aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-05-20 16:43:21 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-05-20 16:43:21 -0400
commit06f4e926d256d902dd9a53dcb400fd74974ce087 (patch)
tree0b438b67f5f0eff6fd617bc497a9dace6164a488 /net
parent8e7bfcbab3825d1b404d615cb1b54f44ff81f981 (diff)
parentd93515611bbc70c2fe4db232e5feb448ed8e4cc9 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6: (1446 commits) macvlan: fix panic if lowerdev in a bond tg3: Add braces around 5906 workaround. tg3: Fix NETIF_F_LOOPBACK error macvlan: remove one synchronize_rcu() call networking: NET_CLS_ROUTE4 depends on INET irda: Fix error propagation in ircomm_lmp_connect_response() irda: Kill set but unused variable 'bytes' in irlan_check_command_param() irda: Kill set but unused variable 'clen' in ircomm_connect_indication() rxrpc: Fix set but unused variable 'usage' in rxrpc_get_transport() be2net: Kill set but unused variable 'req' in lancer_fw_download() irda: Kill set but unused vars 'saddr' and 'daddr' in irlan_provider_connect_indication() atl1c: atl1c_resume() is only used when CONFIG_PM_SLEEP is defined. rxrpc: Fix set but unused variable 'usage' in rxrpc_get_peer(). rxrpc: Kill set but unused variable 'local' in rxrpc_UDP_error_handler() rxrpc: Kill set but unused variable 'sp' in rxrpc_process_connection() rxrpc: Kill set but unused variable 'sp' in rxrpc_rotate_tx_window() pkt_sched: Kill set but unused variable 'protocol' in tc_classify() isdn: capi: Use pr_debug() instead of ifdefs. tg3: Update version to 3.119 tg3: Apply rx_discards fix to 5719/5720 ... Fix up trivial conflicts in arch/x86/Kconfig and net/mac80211/agg-tx.c as per Davem.
Diffstat (limited to 'net')
-rw-r--r--net/802/garp.c22
-rw-r--r--net/8021q/vlan.c38
-rw-r--r--net/8021q/vlan.h2
-rw-r--r--net/8021q/vlan_core.c85
-rw-r--r--net/8021q/vlan_dev.c225
-rw-r--r--net/9p/client.c9
-rw-r--r--net/9p/trans_fd.c2
-rw-r--r--net/9p/trans_rdma.c2
-rw-r--r--net/Kconfig14
-rw-r--r--net/atm/lec.c4
-rw-r--r--net/ax25/af_ax25.c16
-rw-r--r--net/ax25/ax25_iface.c3
-rw-r--r--net/batman-adv/aggregation.c31
-rw-r--r--net/batman-adv/aggregation.h4
-rw-r--r--net/batman-adv/bat_debugfs.c4
-rw-r--r--net/batman-adv/bat_sysfs.c16
-rw-r--r--net/batman-adv/gateway_client.c296
-rw-r--r--net/batman-adv/gateway_client.h2
-rw-r--r--net/batman-adv/hard-interface.c107
-rw-r--r--net/batman-adv/hard-interface.h18
-rw-r--r--net/batman-adv/icmp_socket.c37
-rw-r--r--net/batman-adv/main.c20
-rw-r--r--net/batman-adv/main.h44
-rw-r--r--net/batman-adv/originator.c82
-rw-r--r--net/batman-adv/originator.h1
-rw-r--r--net/batman-adv/packet.h5
-rw-r--r--net/batman-adv/routing.c572
-rw-r--r--net/batman-adv/routing.h6
-rw-r--r--net/batman-adv/send.c72
-rw-r--r--net/batman-adv/send.h2
-rw-r--r--net/batman-adv/soft-interface.c463
-rw-r--r--net/batman-adv/translation-table.c474
-rw-r--r--net/batman-adv/translation-table.h24
-rw-r--r--net/batman-adv/types.h56
-rw-r--r--net/batman-adv/unicast.c20
-rw-r--r--net/batman-adv/vis.c146
-rw-r--r--net/bluetooth/bnep/bnep.h148
-rw-r--r--net/bluetooth/bnep/core.c71
-rw-r--r--net/bluetooth/bnep/sock.c2
-rw-r--r--net/bluetooth/cmtp/capi.c6
-rw-r--r--net/bluetooth/cmtp/cmtp.h11
-rw-r--r--net/bluetooth/cmtp/core.c28
-rw-r--r--net/bluetooth/cmtp/sock.c2
-rw-r--r--net/bluetooth/hci_conn.c95
-rw-r--r--net/bluetooth/hci_core.c154
-rw-r--r--net/bluetooth/hci_event.c267
-rw-r--r--net/bluetooth/hci_sysfs.c71
-rw-r--r--net/bluetooth/hidp/core.c96
-rw-r--r--net/bluetooth/hidp/hidp.h6
-rw-r--r--net/bluetooth/hidp/sock.c7
-rw-r--r--net/bluetooth/l2cap_core.c1966
-rw-r--r--net/bluetooth/l2cap_sock.c330
-rw-r--r--net/bluetooth/mgmt.c612
-rw-r--r--net/bluetooth/rfcomm/core.c23
-rw-r--r--net/bluetooth/rfcomm/sock.c5
-rw-r--r--net/bridge/br.c1
-rw-r--r--net/bridge/br_device.c100
-rw-r--r--net/bridge/br_fdb.c311
-rw-r--r--net/bridge/br_if.c104
-rw-r--r--net/bridge/br_input.c5
-rw-r--r--net/bridge/br_ioctl.c40
-rw-r--r--net/bridge/br_multicast.c12
-rw-r--r--net/bridge/br_netfilter.c4
-rw-r--r--net/bridge/br_netlink.c60
-rw-r--r--net/bridge/br_notify.c11
-rw-r--r--net/bridge/br_private.h22
-rw-r--r--net/bridge/br_private_stp.h13
-rw-r--r--net/bridge/br_stp.c48
-rw-r--r--net/bridge/br_stp_if.c21
-rw-r--r--net/bridge/br_sysfs_br.c39
-rw-r--r--net/bridge/br_sysfs_if.c26
-rw-r--r--net/caif/Makefile2
-rw-r--r--net/caif/caif_config_util.c99
-rw-r--r--net/caif/caif_dev.c387
-rw-r--r--net/caif/caif_socket.c106
-rw-r--r--net/caif/cfcnfg.c507
-rw-r--r--net/caif/cfctrl.c196
-rw-r--r--net/caif/cfdgml.c7
-rw-r--r--net/caif/cffrml.c60
-rw-r--r--net/caif/cfmuxl.c162
-rw-r--r--net/caif/cfpkt_skbuff.c205
-rw-r--r--net/caif/cfrfml.c4
-rw-r--r--net/caif/cfserl.c7
-rw-r--r--net/caif/cfsrvl.c40
-rw-r--r--net/caif/cfutill.c7
-rw-r--r--net/caif/cfveil.c11
-rw-r--r--net/caif/cfvidl.c5
-rw-r--r--net/caif/chnl_net.c45
-rw-r--r--net/can/af_can.c67
-rw-r--r--net/can/bcm.c2
-rw-r--r--net/can/raw.c2
-rw-r--r--net/compat.c16
-rw-r--r--net/core/dev.c187
-rw-r--r--net/core/dst.c54
-rw-r--r--net/core/ethtool.c196
-rw-r--r--net/core/fib_rules.c3
-rw-r--r--net/core/filter.c65
-rw-r--r--net/core/net-sysfs.c26
-rw-r--r--net/core/net_namespace.c12
-rw-r--r--net/core/netpoll.c28
-rw-r--r--net/core/pktgen.c199
-rw-r--r--net/core/rtnetlink.c25
-rw-r--r--net/core/skbuff.c3
-rw-r--r--net/core/sysctl_net_core.c9
-rw-r--r--net/core/utils.c24
-rw-r--r--net/dccp/ipv4.c53
-rw-r--r--net/dccp/ipv6.c10
-rw-r--r--net/dccp/output.c4
-rw-r--r--net/decnet/dn_dev.c10
-rw-r--r--net/decnet/dn_route.c15
-rw-r--r--net/decnet/dn_table.c4
-rw-r--r--net/dsa/slave.c1
-rw-r--r--net/econet/af_econet.c8
-rw-r--r--net/ipv4/Makefile2
-rw-r--r--net/ipv4/af_inet.c53
-rw-r--r--net/ipv4/ah4.c7
-rw-r--r--net/ipv4/cipso_ipv4.c113
-rw-r--r--net/ipv4/datagram.c22
-rw-r--r--net/ipv4/devinet.c4
-rw-r--r--net/ipv4/esp4.c7
-rw-r--r--net/ipv4/fib_frontend.c16
-rw-r--r--net/ipv4/fib_trie.c110
-rw-r--r--net/ipv4/icmp.c133
-rw-r--r--net/ipv4/igmp.c22
-rw-r--r--net/ipv4/inet_connection_sock.c59
-rw-r--r--net/ipv4/inet_diag.c2
-rw-r--r--net/ipv4/inet_lro.c4
-rw-r--r--net/ipv4/ip_forward.c2
-rw-r--r--net/ipv4/ip_fragment.c58
-rw-r--r--net/ipv4/ip_gre.c70
-rw-r--r--net/ipv4/ip_input.c4
-rw-r--r--net/ipv4/ip_options.c57
-rw-r--r--net/ipv4/ip_output.c158
-rw-r--r--net/ipv4/ip_sockglue.c37
-rw-r--r--net/ipv4/ipcomp.c4
-rw-r--r--net/ipv4/ipconfig.c35
-rw-r--r--net/ipv4/ipip.c36
-rw-r--r--net/ipv4/ipmr.c39
-rw-r--r--net/ipv4/netfilter/arp_tables.c18
-rw-r--r--net/ipv4/netfilter/ip_tables.c28
-rw-r--r--net/ipv4/netfilter/nf_nat_helper.c2
-rw-r--r--net/ipv4/ping.c935
-rw-r--r--net/ipv4/raw.c92
-rw-r--r--net/ipv4/route.c385
-rw-r--r--net/ipv4/syncookies.c22
-rw-r--r--net/ipv4/sysctl_net_ipv4.c68
-rw-r--r--net/ipv4/tcp.c7
-rw-r--r--net/ipv4/tcp_ipv4.c98
-rw-r--r--net/ipv4/tcp_output.c2
-rw-r--r--net/ipv4/udp.c78
-rw-r--r--net/ipv4/xfrm4_policy.c38
-rw-r--r--net/ipv4/xfrm4_state.c2
-rw-r--r--net/ipv6/addrconf.c42
-rw-r--r--net/ipv6/af_inet6.c2
-rw-r--r--net/ipv6/anycast.c16
-rw-r--r--net/ipv6/esp6.c5
-rw-r--r--net/ipv6/icmp.c8
-rw-r--r--net/ipv6/inet6_connection_sock.c2
-rw-r--r--net/ipv6/ip6_fib.c20
-rw-r--r--net/ipv6/ip6_input.c6
-rw-r--r--net/ipv6/ip6_output.c50
-rw-r--r--net/ipv6/ip6_tunnel.c46
-rw-r--r--net/ipv6/ip6mr.c4
-rw-r--r--net/ipv6/ipcomp6.c5
-rw-r--r--net/ipv6/mcast.c36
-rw-r--r--net/ipv6/mip6.c8
-rw-r--r--net/ipv6/ndisc.c51
-rw-r--r--net/ipv6/netfilter.c10
-rw-r--r--net/ipv6/netfilter/ip6_tables.c21
-rw-r--r--net/ipv6/netfilter/ip6table_mangle.c3
-rw-r--r--net/ipv6/proc.c40
-rw-r--r--net/ipv6/raw.c18
-rw-r--r--net/ipv6/reassembly.c4
-rw-r--r--net/ipv6/route.c157
-rw-r--r--net/ipv6/sit.c40
-rw-r--r--net/ipv6/syncookies.c13
-rw-r--r--net/ipv6/tcp_ipv6.c50
-rw-r--r--net/ipv6/udp.c22
-rw-r--r--net/ipv6/xfrm6_mode_beet.c2
-rw-r--r--net/ipv6/xfrm6_mode_tunnel.c6
-rw-r--r--net/ipv6/xfrm6_policy.c2
-rw-r--r--net/ipv6/xfrm6_tunnel.c10
-rw-r--r--net/irda/ircomm/ircomm_core.c6
-rw-r--r--net/irda/ircomm/ircomm_lmp.c5
-rw-r--r--net/irda/ircomm/ircomm_tty.c14
-rw-r--r--net/irda/irlan/irlan_filter.c4
-rw-r--r--net/irda/irlan/irlan_provider.c3
-rw-r--r--net/irda/irlap_event.c3
-rw-r--r--net/irda/irproc.c5
-rw-r--r--net/iucv/af_iucv.c9
-rw-r--r--net/iucv/iucv.c77
-rw-r--r--net/key/af_key.c2
-rw-r--r--net/l2tp/l2tp_core.c28
-rw-r--r--net/l2tp/l2tp_ip.c52
-rw-r--r--net/l2tp/l2tp_netlink.c3
-rw-r--r--net/mac80211/Kconfig1
-rw-r--r--net/mac80211/aes_ccm.c6
-rw-r--r--net/mac80211/agg-rx.c3
-rw-r--r--net/mac80211/agg-tx.c59
-rw-r--r--net/mac80211/cfg.c188
-rw-r--r--net/mac80211/debugfs.c91
-rw-r--r--net/mac80211/debugfs_key.c21
-rw-r--r--net/mac80211/debugfs_sta.c26
-rw-r--r--net/mac80211/driver-ops.h87
-rw-r--r--net/mac80211/driver-trace.h275
-rw-r--r--net/mac80211/ht.c27
-rw-r--r--net/mac80211/ibss.c21
-rw-r--r--net/mac80211/ieee80211_i.h51
-rw-r--r--net/mac80211/iface.c7
-rw-r--r--net/mac80211/key.c51
-rw-r--r--net/mac80211/key.h4
-rw-r--r--net/mac80211/main.c69
-rw-r--r--net/mac80211/mesh.c62
-rw-r--r--net/mac80211/mesh.h9
-rw-r--r--net/mac80211/mesh_hwmp.c42
-rw-r--r--net/mac80211/mesh_pathtbl.c166
-rw-r--r--net/mac80211/mesh_plink.c112
-rw-r--r--net/mac80211/mlme.c48
-rw-r--r--net/mac80211/pm.c29
-rw-r--r--net/mac80211/rc80211_minstrel.c4
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c27
-rw-r--r--net/mac80211/rx.c94
-rw-r--r--net/mac80211/scan.c122
-rw-r--r--net/mac80211/sta_info.c52
-rw-r--r--net/mac80211/sta_info.h56
-rw-r--r--net/mac80211/status.c19
-rw-r--r--net/mac80211/tkip.c4
-rw-r--r--net/mac80211/tkip.h4
-rw-r--r--net/mac80211/tx.c27
-rw-r--r--net/mac80211/util.c21
-rw-r--r--net/mac80211/wep.c34
-rw-r--r--net/mac80211/wep.h4
-rw-r--r--net/mac80211/work.c6
-rw-r--r--net/mac80211/wpa.c62
-rw-r--r--net/netfilter/ipset/ip_set_getport.c16
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipport.c2
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipportip.c2
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipportnet.c2
-rw-r--r--net/netfilter/ipset/ip_set_hash_netport.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c24
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c97
-rw-r--r--net/netfilter/nf_conntrack_sip.c16
-rw-r--r--net/netfilter/nf_conntrack_standalone.c2
-rw-r--r--net/netfilter/nfnetlink_log.c2
-rw-r--r--net/netfilter/x_tables.c9
-rw-r--r--net/netlabel/netlabel_cipso_v4.c4
-rw-r--r--net/netrom/af_netrom.c12
-rw-r--r--net/packet/af_packet.c2
-rw-r--r--net/phonet/pn_dev.c6
-rw-r--r--net/phonet/pn_netlink.c4
-rw-r--r--net/phonet/socket.c45
-rw-r--r--net/rfkill/Kconfig11
-rw-r--r--net/rfkill/Makefile1
-rw-r--r--net/rfkill/core.c2
-rw-r--r--net/rfkill/rfkill-regulator.c164
-rw-r--r--net/rose/af_rose.c16
-rw-r--r--net/rxrpc/ar-ack.c2
-rw-r--r--net/rxrpc/ar-connevent.c3
-rw-r--r--net/rxrpc/ar-error.c5
-rw-r--r--net/rxrpc/ar-peer.c6
-rw-r--r--net/rxrpc/ar-transport.c3
-rw-r--r--net/sched/Kconfig12
-rw-r--r--net/sched/Makefile1
-rw-r--r--net/sched/sch_api.c2
-rw-r--r--net/sched/sch_qfq.c1137
-rw-r--r--net/sched/sch_sfq.c2
-rw-r--r--net/sctp/bind_addr.c10
-rw-r--r--net/sctp/debug.c1
-rw-r--r--net/sctp/endpointola.c20
-rw-r--r--net/sctp/input.c19
-rw-r--r--net/sctp/ipv6.c185
-rw-r--r--net/sctp/outqueue.c19
-rw-r--r--net/sctp/protocol.c71
-rw-r--r--net/sctp/sm_make_chunk.c62
-rw-r--r--net/sctp/sm_sideeffect.c9
-rw-r--r--net/sctp/sm_statefuns.c89
-rw-r--r--net/sctp/sm_statetable.c78
-rw-r--r--net/sctp/socket.c95
-rw-r--r--net/sctp/transport.c27
-rw-r--r--net/sctp/ulpevent.c30
-rw-r--r--net/socket.c219
-rw-r--r--net/tipc/addr.h7
-rw-r--r--net/tipc/bcast.c22
-rw-r--r--net/tipc/bearer.c45
-rw-r--r--net/tipc/core.c3
-rw-r--r--net/tipc/discover.c150
-rw-r--r--net/tipc/discover.h11
-rw-r--r--net/tipc/link.c104
-rw-r--r--net/tipc/link.h1
-rw-r--r--net/tipc/msg.c25
-rw-r--r--net/tipc/msg.h161
-rw-r--r--net/tipc/port.c55
-rw-r--r--net/tipc/port.h14
-rw-r--r--net/tipc/socket.c27
-rw-r--r--net/tipc/subscr.c4
-rw-r--r--net/wireless/core.c106
-rw-r--r--net/wireless/core.h33
-rw-r--r--net/wireless/lib80211_crypt_wep.c3
-rw-r--r--net/wireless/mesh.c23
-rw-r--r--net/wireless/mlme.c19
-rw-r--r--net/wireless/nl80211.c790
-rw-r--r--net/wireless/nl80211.h11
-rw-r--r--net/wireless/reg.c74
-rw-r--r--net/wireless/scan.c77
-rw-r--r--net/wireless/sysfs.c2
-rw-r--r--net/wireless/util.c126
-rw-r--r--net/xfrm/xfrm_policy.c3
-rw-r--r--net/xfrm/xfrm_state.c12
309 files changed, 13444 insertions, 7259 deletions
diff --git a/net/802/garp.c b/net/802/garp.c
index c1df2dad8c6b..f8300a8b5fbc 100644
--- a/net/802/garp.c
+++ b/net/802/garp.c
@@ -544,6 +544,11 @@ static int garp_init_port(struct net_device *dev)
544 return 0; 544 return 0;
545} 545}
546 546
547static void garp_kfree_rcu(struct rcu_head *head)
548{
549 kfree(container_of(head, struct garp_port, rcu));
550}
551
547static void garp_release_port(struct net_device *dev) 552static void garp_release_port(struct net_device *dev)
548{ 553{
549 struct garp_port *port = rtnl_dereference(dev->garp_port); 554 struct garp_port *port = rtnl_dereference(dev->garp_port);
@@ -554,8 +559,7 @@ static void garp_release_port(struct net_device *dev)
554 return; 559 return;
555 } 560 }
556 rcu_assign_pointer(dev->garp_port, NULL); 561 rcu_assign_pointer(dev->garp_port, NULL);
557 synchronize_rcu(); 562 call_rcu(&port->rcu, garp_kfree_rcu);
558 kfree(port);
559} 563}
560 564
561int garp_init_applicant(struct net_device *dev, struct garp_application *appl) 565int garp_init_applicant(struct net_device *dev, struct garp_application *appl)
@@ -599,6 +603,11 @@ err1:
599} 603}
600EXPORT_SYMBOL_GPL(garp_init_applicant); 604EXPORT_SYMBOL_GPL(garp_init_applicant);
601 605
606static void garp_app_kfree_rcu(struct rcu_head *head)
607{
608 kfree(container_of(head, struct garp_applicant, rcu));
609}
610
602void garp_uninit_applicant(struct net_device *dev, struct garp_application *appl) 611void garp_uninit_applicant(struct net_device *dev, struct garp_application *appl)
603{ 612{
604 struct garp_port *port = rtnl_dereference(dev->garp_port); 613 struct garp_port *port = rtnl_dereference(dev->garp_port);
@@ -607,7 +616,6 @@ void garp_uninit_applicant(struct net_device *dev, struct garp_application *appl
607 ASSERT_RTNL(); 616 ASSERT_RTNL();
608 617
609 rcu_assign_pointer(port->applicants[appl->type], NULL); 618 rcu_assign_pointer(port->applicants[appl->type], NULL);
610 synchronize_rcu();
611 619
612 /* Delete timer and generate a final TRANSMIT_PDU event to flush out 620 /* Delete timer and generate a final TRANSMIT_PDU event to flush out
613 * all pending messages before the applicant is gone. */ 621 * all pending messages before the applicant is gone. */
@@ -617,7 +625,7 @@ void garp_uninit_applicant(struct net_device *dev, struct garp_application *appl
617 garp_queue_xmit(app); 625 garp_queue_xmit(app);
618 626
619 dev_mc_del(dev, appl->proto.group_address); 627 dev_mc_del(dev, appl->proto.group_address);
620 kfree(app); 628 call_rcu(&app->rcu, garp_app_kfree_rcu);
621 garp_release_port(dev); 629 garp_release_port(dev);
622} 630}
623EXPORT_SYMBOL_GPL(garp_uninit_applicant); 631EXPORT_SYMBOL_GPL(garp_uninit_applicant);
@@ -635,3 +643,9 @@ void garp_unregister_application(struct garp_application *appl)
635 stp_proto_unregister(&appl->proto); 643 stp_proto_unregister(&appl->proto);
636} 644}
637EXPORT_SYMBOL_GPL(garp_unregister_application); 645EXPORT_SYMBOL_GPL(garp_unregister_application);
646
647static void __exit garp_cleanup_module(void)
648{
649 rcu_barrier(); /* Wait for completion of call_rcu()'s */
650}
651module_exit(garp_cleanup_module);
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 0eb1a886b370..b2274d1fd605 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -49,11 +49,6 @@ const char vlan_version[] = DRV_VERSION;
49static const char vlan_copyright[] = "Ben Greear <greearb@candelatech.com>"; 49static const char vlan_copyright[] = "Ben Greear <greearb@candelatech.com>";
50static const char vlan_buggyright[] = "David S. Miller <davem@redhat.com>"; 50static const char vlan_buggyright[] = "David S. Miller <davem@redhat.com>";
51 51
52static struct packet_type vlan_packet_type __read_mostly = {
53 .type = cpu_to_be16(ETH_P_8021Q),
54 .func = vlan_skb_recv, /* VLAN receive method */
55};
56
57/* End of global variables definitions. */ 52/* End of global variables definitions. */
58 53
59static void vlan_group_free(struct vlan_group *grp) 54static void vlan_group_free(struct vlan_group *grp)
@@ -128,9 +123,10 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
128 vlan_gvrp_request_leave(dev); 123 vlan_gvrp_request_leave(dev);
129 124
130 vlan_group_set_device(grp, vlan_id, NULL); 125 vlan_group_set_device(grp, vlan_id, NULL);
131 if (!grp->killall) 126 /* Because unregister_netdevice_queue() makes sure at least one rcu
132 synchronize_net(); 127 * grace period is respected before device freeing,
133 128 * we dont need to call synchronize_net() here.
129 */
134 unregister_netdevice_queue(dev, head); 130 unregister_netdevice_queue(dev, head);
135 131
136 /* If the group is now empty, kill off the group. */ 132 /* If the group is now empty, kill off the group. */
@@ -330,10 +326,6 @@ static void vlan_sync_address(struct net_device *dev,
330static void vlan_transfer_features(struct net_device *dev, 326static void vlan_transfer_features(struct net_device *dev,
331 struct net_device *vlandev) 327 struct net_device *vlandev)
332{ 328{
333 u32 old_features = vlandev->features;
334
335 vlandev->features &= ~dev->vlan_features;
336 vlandev->features |= dev->features & dev->vlan_features;
337 vlandev->gso_max_size = dev->gso_max_size; 329 vlandev->gso_max_size = dev->gso_max_size;
338 330
339 if (dev->features & NETIF_F_HW_VLAN_TX) 331 if (dev->features & NETIF_F_HW_VLAN_TX)
@@ -344,8 +336,8 @@ static void vlan_transfer_features(struct net_device *dev,
344#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) 336#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
345 vlandev->fcoe_ddp_xid = dev->fcoe_ddp_xid; 337 vlandev->fcoe_ddp_xid = dev->fcoe_ddp_xid;
346#endif 338#endif
347 if (old_features != vlandev->features) 339
348 netdev_features_change(vlandev); 340 netdev_update_features(vlandev);
349} 341}
350 342
351static void __vlan_device_event(struct net_device *dev, unsigned long event) 343static void __vlan_device_event(struct net_device *dev, unsigned long event)
@@ -490,9 +482,6 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
490 if (dev->reg_state != NETREG_UNREGISTERING) 482 if (dev->reg_state != NETREG_UNREGISTERING)
491 break; 483 break;
492 484
493 /* Delete all VLANs for this dev. */
494 grp->killall = 1;
495
496 for (i = 0; i < VLAN_N_VID; i++) { 485 for (i = 0; i < VLAN_N_VID; i++) {
497 vlandev = vlan_group_get_device(grp, i); 486 vlandev = vlan_group_get_device(grp, i);
498 if (!vlandev) 487 if (!vlandev)
@@ -511,6 +500,18 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
511 case NETDEV_PRE_TYPE_CHANGE: 500 case NETDEV_PRE_TYPE_CHANGE:
512 /* Forbid underlaying device to change its type. */ 501 /* Forbid underlaying device to change its type. */
513 return NOTIFY_BAD; 502 return NOTIFY_BAD;
503
504 case NETDEV_NOTIFY_PEERS:
505 case NETDEV_BONDING_FAILOVER:
506 /* Propagate to vlan devices */
507 for (i = 0; i < VLAN_N_VID; i++) {
508 vlandev = vlan_group_get_device(grp, i);
509 if (!vlandev)
510 continue;
511
512 call_netdevice_notifiers(event, vlandev);
513 }
514 break;
514 } 515 }
515 516
516out: 517out:
@@ -691,7 +692,6 @@ static int __init vlan_proto_init(void)
691 if (err < 0) 692 if (err < 0)
692 goto err4; 693 goto err4;
693 694
694 dev_add_pack(&vlan_packet_type);
695 vlan_ioctl_set(vlan_ioctl_handler); 695 vlan_ioctl_set(vlan_ioctl_handler);
696 return 0; 696 return 0;
697 697
@@ -712,8 +712,6 @@ static void __exit vlan_cleanup_module(void)
712 712
713 unregister_netdevice_notifier(&vlan_notifier_block); 713 unregister_netdevice_notifier(&vlan_notifier_block);
714 714
715 dev_remove_pack(&vlan_packet_type);
716
717 unregister_pernet_subsys(&vlan_net_ops); 715 unregister_pernet_subsys(&vlan_net_ops);
718 rcu_barrier(); /* Wait for completion of call_rcu()'s */ 716 rcu_barrier(); /* Wait for completion of call_rcu()'s */
719 717
diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h
index 5687c9b95f33..c3408def8a19 100644
--- a/net/8021q/vlan.h
+++ b/net/8021q/vlan.h
@@ -75,8 +75,6 @@ static inline struct vlan_dev_info *vlan_dev_info(const struct net_device *dev)
75} 75}
76 76
77/* found in vlan_dev.c */ 77/* found in vlan_dev.c */
78int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
79 struct packet_type *ptype, struct net_device *orig_dev);
80void vlan_dev_set_ingress_priority(const struct net_device *dev, 78void vlan_dev_set_ingress_priority(const struct net_device *dev,
81 u32 skb_prio, u16 vlan_prio); 79 u32 skb_prio, u16 vlan_prio);
82int vlan_dev_set_egress_priority(const struct net_device *dev, 80int vlan_dev_set_egress_priority(const struct net_device *dev,
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index ce8e3ab3e7a5..41495dc2a4c9 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -4,7 +4,7 @@
4#include <linux/netpoll.h> 4#include <linux/netpoll.h>
5#include "vlan.h" 5#include "vlan.h"
6 6
7bool vlan_hwaccel_do_receive(struct sk_buff **skbp) 7bool vlan_do_receive(struct sk_buff **skbp)
8{ 8{
9 struct sk_buff *skb = *skbp; 9 struct sk_buff *skb = *skbp;
10 u16 vlan_id = skb->vlan_tci & VLAN_VID_MASK; 10 u16 vlan_id = skb->vlan_tci & VLAN_VID_MASK;
@@ -88,3 +88,86 @@ gro_result_t vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp,
88 return napi_gro_frags(napi); 88 return napi_gro_frags(napi);
89} 89}
90EXPORT_SYMBOL(vlan_gro_frags); 90EXPORT_SYMBOL(vlan_gro_frags);
91
92static struct sk_buff *vlan_check_reorder_header(struct sk_buff *skb)
93{
94 if (vlan_dev_info(skb->dev)->flags & VLAN_FLAG_REORDER_HDR) {
95 if (skb_cow(skb, skb_headroom(skb)) < 0)
96 skb = NULL;
97 if (skb) {
98 /* Lifted from Gleb's VLAN code... */
99 memmove(skb->data - ETH_HLEN,
100 skb->data - VLAN_ETH_HLEN, 12);
101 skb->mac_header += VLAN_HLEN;
102 }
103 }
104 return skb;
105}
106
107static void vlan_set_encap_proto(struct sk_buff *skb, struct vlan_hdr *vhdr)
108{
109 __be16 proto;
110 unsigned char *rawp;
111
112 /*
113 * Was a VLAN packet, grab the encapsulated protocol, which the layer
114 * three protocols care about.
115 */
116
117 proto = vhdr->h_vlan_encapsulated_proto;
118 if (ntohs(proto) >= 1536) {
119 skb->protocol = proto;
120 return;
121 }
122
123 rawp = skb->data;
124 if (*(unsigned short *) rawp == 0xFFFF)
125 /*
126 * This is a magic hack to spot IPX packets. Older Novell
127 * breaks the protocol design and runs IPX over 802.3 without
128 * an 802.2 LLC layer. We look for FFFF which isn't a used
129 * 802.2 SSAP/DSAP. This won't work for fault tolerant netware
130 * but does for the rest.
131 */
132 skb->protocol = htons(ETH_P_802_3);
133 else
134 /*
135 * Real 802.2 LLC
136 */
137 skb->protocol = htons(ETH_P_802_2);
138}
139
140struct sk_buff *vlan_untag(struct sk_buff *skb)
141{
142 struct vlan_hdr *vhdr;
143 u16 vlan_tci;
144
145 if (unlikely(vlan_tx_tag_present(skb))) {
146 /* vlan_tci is already set-up so leave this for another time */
147 return skb;
148 }
149
150 skb = skb_share_check(skb, GFP_ATOMIC);
151 if (unlikely(!skb))
152 goto err_free;
153
154 if (unlikely(!pskb_may_pull(skb, VLAN_HLEN)))
155 goto err_free;
156
157 vhdr = (struct vlan_hdr *) skb->data;
158 vlan_tci = ntohs(vhdr->h_vlan_TCI);
159 __vlan_hwaccel_put_tag(skb, vlan_tci);
160
161 skb_pull_rcsum(skb, VLAN_HLEN);
162 vlan_set_encap_proto(skb, vhdr);
163
164 skb = vlan_check_reorder_header(skb);
165 if (unlikely(!skb))
166 goto err_free;
167
168 return skb;
169
170err_free:
171 kfree_skb(skb);
172 return NULL;
173}
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index b2ff6c8d3603..f247f5bff88d 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -65,179 +65,6 @@ static int vlan_dev_rebuild_header(struct sk_buff *skb)
65 return 0; 65 return 0;
66} 66}
67 67
68static inline struct sk_buff *vlan_check_reorder_header(struct sk_buff *skb)
69{
70 if (vlan_dev_info(skb->dev)->flags & VLAN_FLAG_REORDER_HDR) {
71 if (skb_cow(skb, skb_headroom(skb)) < 0)
72 skb = NULL;
73 if (skb) {
74 /* Lifted from Gleb's VLAN code... */
75 memmove(skb->data - ETH_HLEN,
76 skb->data - VLAN_ETH_HLEN, 12);
77 skb->mac_header += VLAN_HLEN;
78 }
79 }
80
81 return skb;
82}
83
84static inline void vlan_set_encap_proto(struct sk_buff *skb,
85 struct vlan_hdr *vhdr)
86{
87 __be16 proto;
88 unsigned char *rawp;
89
90 /*
91 * Was a VLAN packet, grab the encapsulated protocol, which the layer
92 * three protocols care about.
93 */
94
95 proto = vhdr->h_vlan_encapsulated_proto;
96 if (ntohs(proto) >= 1536) {
97 skb->protocol = proto;
98 return;
99 }
100
101 rawp = skb->data;
102 if (*(unsigned short *)rawp == 0xFFFF)
103 /*
104 * This is a magic hack to spot IPX packets. Older Novell
105 * breaks the protocol design and runs IPX over 802.3 without
106 * an 802.2 LLC layer. We look for FFFF which isn't a used
107 * 802.2 SSAP/DSAP. This won't work for fault tolerant netware
108 * but does for the rest.
109 */
110 skb->protocol = htons(ETH_P_802_3);
111 else
112 /*
113 * Real 802.2 LLC
114 */
115 skb->protocol = htons(ETH_P_802_2);
116}
117
118/*
119 * Determine the packet's protocol ID. The rule here is that we
120 * assume 802.3 if the type field is short enough to be a length.
121 * This is normal practice and works for any 'now in use' protocol.
122 *
123 * Also, at this point we assume that we ARE dealing exclusively with
124 * VLAN packets, or packets that should be made into VLAN packets based
125 * on a default VLAN ID.
126 *
127 * NOTE: Should be similar to ethernet/eth.c.
128 *
129 * SANITY NOTE: This method is called when a packet is moving up the stack
130 * towards userland. To get here, it would have already passed
131 * through the ethernet/eth.c eth_type_trans() method.
132 * SANITY NOTE 2: We are referencing to the VLAN_HDR frields, which MAY be
133 * stored UNALIGNED in the memory. RISC systems don't like
134 * such cases very much...
135 * SANITY NOTE 2a: According to Dave Miller & Alexey, it will always be
136 * aligned, so there doesn't need to be any of the unaligned
137 * stuff. It has been commented out now... --Ben
138 *
139 */
140int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
141 struct packet_type *ptype, struct net_device *orig_dev)
142{
143 struct vlan_hdr *vhdr;
144 struct vlan_pcpu_stats *rx_stats;
145 struct net_device *vlan_dev;
146 u16 vlan_id;
147 u16 vlan_tci;
148
149 skb = skb_share_check(skb, GFP_ATOMIC);
150 if (skb == NULL)
151 goto err_free;
152
153 if (unlikely(!pskb_may_pull(skb, VLAN_HLEN)))
154 goto err_free;
155
156 vhdr = (struct vlan_hdr *)skb->data;
157 vlan_tci = ntohs(vhdr->h_vlan_TCI);
158 vlan_id = vlan_tci & VLAN_VID_MASK;
159
160 rcu_read_lock();
161 vlan_dev = vlan_find_dev(dev, vlan_id);
162
163 /* If the VLAN device is defined, we use it.
164 * If not, and the VID is 0, it is a 802.1p packet (not
165 * really a VLAN), so we will just netif_rx it later to the
166 * original interface, but with the skb->proto set to the
167 * wrapped proto: we do nothing here.
168 */
169
170 if (!vlan_dev) {
171 if (vlan_id) {
172 pr_debug("%s: ERROR: No net_device for VID: %u on dev: %s\n",
173 __func__, vlan_id, dev->name);
174 goto err_unlock;
175 }
176 rx_stats = NULL;
177 } else {
178 skb->dev = vlan_dev;
179
180 rx_stats = this_cpu_ptr(vlan_dev_info(skb->dev)->vlan_pcpu_stats);
181
182 u64_stats_update_begin(&rx_stats->syncp);
183 rx_stats->rx_packets++;
184 rx_stats->rx_bytes += skb->len;
185
186 skb->priority = vlan_get_ingress_priority(skb->dev, vlan_tci);
187
188 pr_debug("%s: priority: %u for TCI: %hu\n",
189 __func__, skb->priority, vlan_tci);
190
191 switch (skb->pkt_type) {
192 case PACKET_BROADCAST:
193 /* Yeah, stats collect these together.. */
194 /* stats->broadcast ++; // no such counter :-( */
195 break;
196
197 case PACKET_MULTICAST:
198 rx_stats->rx_multicast++;
199 break;
200
201 case PACKET_OTHERHOST:
202 /* Our lower layer thinks this is not local, let's make
203 * sure.
204 * This allows the VLAN to have a different MAC than the
205 * underlying device, and still route correctly.
206 */
207 if (!compare_ether_addr(eth_hdr(skb)->h_dest,
208 skb->dev->dev_addr))
209 skb->pkt_type = PACKET_HOST;
210 break;
211 default:
212 break;
213 }
214 u64_stats_update_end(&rx_stats->syncp);
215 }
216
217 skb_pull_rcsum(skb, VLAN_HLEN);
218 vlan_set_encap_proto(skb, vhdr);
219
220 if (vlan_dev) {
221 skb = vlan_check_reorder_header(skb);
222 if (!skb) {
223 rx_stats->rx_errors++;
224 goto err_unlock;
225 }
226 }
227
228 netif_rx(skb);
229
230 rcu_read_unlock();
231 return NET_RX_SUCCESS;
232
233err_unlock:
234 rcu_read_unlock();
235err_free:
236 atomic_long_inc(&dev->rx_dropped);
237 kfree_skb(skb);
238 return NET_RX_DROP;
239}
240
241static inline u16 68static inline u16
242vlan_dev_get_egress_qos_mask(struct net_device *dev, struct sk_buff *skb) 69vlan_dev_get_egress_qos_mask(struct net_device *dev, struct sk_buff *skb)
243{ 70{
@@ -701,8 +528,8 @@ static int vlan_dev_init(struct net_device *dev)
701 (1<<__LINK_STATE_DORMANT))) | 528 (1<<__LINK_STATE_DORMANT))) |
702 (1<<__LINK_STATE_PRESENT); 529 (1<<__LINK_STATE_PRESENT);
703 530
704 dev->features |= real_dev->features & real_dev->vlan_features; 531 dev->hw_features = NETIF_F_ALL_TX_OFFLOADS;
705 dev->features |= NETIF_F_LLTX; 532 dev->features |= real_dev->vlan_features | NETIF_F_LLTX;
706 dev->gso_max_size = real_dev->gso_max_size; 533 dev->gso_max_size = real_dev->gso_max_size;
707 534
708 /* ipv6 shared card related stuff */ 535 /* ipv6 shared card related stuff */
@@ -756,6 +583,19 @@ static void vlan_dev_uninit(struct net_device *dev)
756 } 583 }
757} 584}
758 585
586static u32 vlan_dev_fix_features(struct net_device *dev, u32 features)
587{
588 struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
589
590 features &= real_dev->features;
591 features &= real_dev->vlan_features;
592 if (dev_ethtool_get_rx_csum(real_dev))
593 features |= NETIF_F_RXCSUM;
594 features |= NETIF_F_LLTX;
595
596 return features;
597}
598
759static int vlan_ethtool_get_settings(struct net_device *dev, 599static int vlan_ethtool_get_settings(struct net_device *dev,
760 struct ethtool_cmd *cmd) 600 struct ethtool_cmd *cmd)
761{ 601{
@@ -771,18 +611,6 @@ static void vlan_ethtool_get_drvinfo(struct net_device *dev,
771 strcpy(info->fw_version, "N/A"); 611 strcpy(info->fw_version, "N/A");
772} 612}
773 613
774static u32 vlan_ethtool_get_rx_csum(struct net_device *dev)
775{
776 const struct vlan_dev_info *vlan = vlan_dev_info(dev);
777 return dev_ethtool_get_rx_csum(vlan->real_dev);
778}
779
780static u32 vlan_ethtool_get_flags(struct net_device *dev)
781{
782 const struct vlan_dev_info *vlan = vlan_dev_info(dev);
783 return dev_ethtool_get_flags(vlan->real_dev);
784}
785
786static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 614static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
787{ 615{
788 616
@@ -820,32 +648,10 @@ static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, st
820 return stats; 648 return stats;
821} 649}
822 650
823static int vlan_ethtool_set_tso(struct net_device *dev, u32 data)
824{
825 if (data) {
826 struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
827
828 /* Underlying device must support TSO for VLAN-tagged packets
829 * and must have TSO enabled now.
830 */
831 if (!(real_dev->vlan_features & NETIF_F_TSO))
832 return -EOPNOTSUPP;
833 if (!(real_dev->features & NETIF_F_TSO))
834 return -EINVAL;
835 dev->features |= NETIF_F_TSO;
836 } else {
837 dev->features &= ~NETIF_F_TSO;
838 }
839 return 0;
840}
841
842static const struct ethtool_ops vlan_ethtool_ops = { 651static const struct ethtool_ops vlan_ethtool_ops = {
843 .get_settings = vlan_ethtool_get_settings, 652 .get_settings = vlan_ethtool_get_settings,
844 .get_drvinfo = vlan_ethtool_get_drvinfo, 653 .get_drvinfo = vlan_ethtool_get_drvinfo,
845 .get_link = ethtool_op_get_link, 654 .get_link = ethtool_op_get_link,
846 .get_rx_csum = vlan_ethtool_get_rx_csum,
847 .get_flags = vlan_ethtool_get_flags,
848 .set_tso = vlan_ethtool_set_tso,
849}; 655};
850 656
851static const struct net_device_ops vlan_netdev_ops = { 657static const struct net_device_ops vlan_netdev_ops = {
@@ -871,6 +677,7 @@ static const struct net_device_ops vlan_netdev_ops = {
871 .ndo_fcoe_get_wwn = vlan_dev_fcoe_get_wwn, 677 .ndo_fcoe_get_wwn = vlan_dev_fcoe_get_wwn,
872 .ndo_fcoe_ddp_target = vlan_dev_fcoe_ddp_target, 678 .ndo_fcoe_ddp_target = vlan_dev_fcoe_ddp_target,
873#endif 679#endif
680 .ndo_fix_features = vlan_dev_fix_features,
874}; 681};
875 682
876void vlan_setup(struct net_device *dev) 683void vlan_setup(struct net_device *dev)
diff --git a/net/9p/client.c b/net/9p/client.c
index a9aa2dd66482..ceab943dfc49 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -1281,7 +1281,7 @@ int
1281p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset, 1281p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
1282 u32 count) 1282 u32 count)
1283{ 1283{
1284 int err, rsize, total; 1284 int err, rsize;
1285 struct p9_client *clnt; 1285 struct p9_client *clnt;
1286 struct p9_req_t *req; 1286 struct p9_req_t *req;
1287 char *dataptr; 1287 char *dataptr;
@@ -1290,7 +1290,6 @@ p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
1290 (long long unsigned) offset, count); 1290 (long long unsigned) offset, count);
1291 err = 0; 1291 err = 0;
1292 clnt = fid->clnt; 1292 clnt = fid->clnt;
1293 total = 0;
1294 1293
1295 rsize = fid->iounit; 1294 rsize = fid->iounit;
1296 if (!rsize || rsize > clnt->msize-P9_IOHDRSZ) 1295 if (!rsize || rsize > clnt->msize-P9_IOHDRSZ)
@@ -1346,7 +1345,7 @@ int
1346p9_client_write(struct p9_fid *fid, char *data, const char __user *udata, 1345p9_client_write(struct p9_fid *fid, char *data, const char __user *udata,
1347 u64 offset, u32 count) 1346 u64 offset, u32 count)
1348{ 1347{
1349 int err, rsize, total; 1348 int err, rsize;
1350 struct p9_client *clnt; 1349 struct p9_client *clnt;
1351 struct p9_req_t *req; 1350 struct p9_req_t *req;
1352 1351
@@ -1354,7 +1353,6 @@ p9_client_write(struct p9_fid *fid, char *data, const char __user *udata,
1354 fid->fid, (long long unsigned) offset, count); 1353 fid->fid, (long long unsigned) offset, count);
1355 err = 0; 1354 err = 0;
1356 clnt = fid->clnt; 1355 clnt = fid->clnt;
1357 total = 0;
1358 1356
1359 rsize = fid->iounit; 1357 rsize = fid->iounit;
1360 if (!rsize || rsize > clnt->msize-P9_IOHDRSZ) 1358 if (!rsize || rsize > clnt->msize-P9_IOHDRSZ)
@@ -1745,7 +1743,7 @@ EXPORT_SYMBOL_GPL(p9_client_xattrcreate);
1745 1743
1746int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset) 1744int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset)
1747{ 1745{
1748 int err, rsize, total; 1746 int err, rsize;
1749 struct p9_client *clnt; 1747 struct p9_client *clnt;
1750 struct p9_req_t *req; 1748 struct p9_req_t *req;
1751 char *dataptr; 1749 char *dataptr;
@@ -1755,7 +1753,6 @@ int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset)
1755 1753
1756 err = 0; 1754 err = 0;
1757 clnt = fid->clnt; 1755 clnt = fid->clnt;
1758 total = 0;
1759 1756
1760 rsize = fid->iounit; 1757 rsize = fid->iounit;
1761 if (!rsize || rsize > clnt->msize-P9_READDIRHDRSZ) 1758 if (!rsize || rsize > clnt->msize-P9_READDIRHDRSZ)
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
index aa5672b15eae..4a9084395d35 100644
--- a/net/9p/trans_fd.c
+++ b/net/9p/trans_fd.c
@@ -716,7 +716,6 @@ static int parse_opts(char *params, struct p9_fd_opts *opts)
716 substring_t args[MAX_OPT_ARGS]; 716 substring_t args[MAX_OPT_ARGS];
717 int option; 717 int option;
718 char *options, *tmp_options; 718 char *options, *tmp_options;
719 int ret;
720 719
721 opts->port = P9_PORT; 720 opts->port = P9_PORT;
722 opts->rfd = ~0; 721 opts->rfd = ~0;
@@ -744,7 +743,6 @@ static int parse_opts(char *params, struct p9_fd_opts *opts)
744 if (r < 0) { 743 if (r < 0) {
745 P9_DPRINTK(P9_DEBUG_ERROR, 744 P9_DPRINTK(P9_DEBUG_ERROR,
746 "integer field, but no integer?\n"); 745 "integer field, but no integer?\n");
747 ret = r;
748 continue; 746 continue;
749 } 747 }
750 } 748 }
diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c
index 150e0c4bbf40..844a7a5607e3 100644
--- a/net/9p/trans_rdma.c
+++ b/net/9p/trans_rdma.c
@@ -167,7 +167,6 @@ static int parse_opts(char *params, struct p9_rdma_opts *opts)
167 substring_t args[MAX_OPT_ARGS]; 167 substring_t args[MAX_OPT_ARGS];
168 int option; 168 int option;
169 char *options, *tmp_options; 169 char *options, *tmp_options;
170 int ret;
171 170
172 opts->port = P9_PORT; 171 opts->port = P9_PORT;
173 opts->sq_depth = P9_RDMA_SQ_DEPTH; 172 opts->sq_depth = P9_RDMA_SQ_DEPTH;
@@ -195,7 +194,6 @@ static int parse_opts(char *params, struct p9_rdma_opts *opts)
195 if (r < 0) { 194 if (r < 0) {
196 P9_DPRINTK(P9_DEBUG_ERROR, 195 P9_DPRINTK(P9_DEBUG_ERROR,
197 "integer field, but no integer?\n"); 196 "integer field, but no integer?\n");
198 ret = r;
199 continue; 197 continue;
200 } 198 }
201 switch (token) { 199 switch (token) {
diff --git a/net/Kconfig b/net/Kconfig
index 79cabf1ee68b..878151c772c9 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -232,6 +232,20 @@ config XPS
232 depends on SMP && SYSFS && USE_GENERIC_SMP_HELPERS 232 depends on SMP && SYSFS && USE_GENERIC_SMP_HELPERS
233 default y 233 default y
234 234
235config HAVE_BPF_JIT
236 bool
237
238config BPF_JIT
239 bool "enable BPF Just In Time compiler"
240 depends on HAVE_BPF_JIT
241 depends on MODULES
242 ---help---
243 Berkeley Packet Filter filtering capabilities are normally handled
244 by an interpreter. This option allows kernel to generate a native
245 code when filter is loaded in memory. This should speedup
246 packet sniffing (libpcap/tcpdump). Note : Admin should enable
247 this feature changing /proc/sys/net/core/bpf_jit_enable
248
235menu "Network testing" 249menu "Network testing"
236 250
237config NET_PKTGEN 251config NET_PKTGEN
diff --git a/net/atm/lec.c b/net/atm/lec.c
index 38754fdb88ba..25073b6ef474 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -129,7 +129,6 @@ static struct net_device *dev_lec[MAX_LEC_ITF];
129#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) 129#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
130static void lec_handle_bridge(struct sk_buff *skb, struct net_device *dev) 130static void lec_handle_bridge(struct sk_buff *skb, struct net_device *dev)
131{ 131{
132 struct ethhdr *eth;
133 char *buff; 132 char *buff;
134 struct lec_priv *priv; 133 struct lec_priv *priv;
135 134
@@ -138,7 +137,6 @@ static void lec_handle_bridge(struct sk_buff *skb, struct net_device *dev)
138 * LE_TOPOLOGY_REQUEST with the same value of Topology Change bit 137 * LE_TOPOLOGY_REQUEST with the same value of Topology Change bit
139 * as the Config BPDU has 138 * as the Config BPDU has
140 */ 139 */
141 eth = (struct ethhdr *)skb->data;
142 buff = skb->data + skb->dev->hard_header_len; 140 buff = skb->data + skb->dev->hard_header_len;
143 if (*buff++ == 0x42 && *buff++ == 0x42 && *buff++ == 0x03) { 141 if (*buff++ == 0x42 && *buff++ == 0x42 && *buff++ == 0x03) {
144 struct sock *sk; 142 struct sock *sk;
@@ -1180,7 +1178,6 @@ static int __init lane_module_init(void)
1180static void __exit lane_module_cleanup(void) 1178static void __exit lane_module_cleanup(void)
1181{ 1179{
1182 int i; 1180 int i;
1183 struct lec_priv *priv;
1184 1181
1185 remove_proc_entry("lec", atm_proc_root); 1182 remove_proc_entry("lec", atm_proc_root);
1186 1183
@@ -1188,7 +1185,6 @@ static void __exit lane_module_cleanup(void)
1188 1185
1189 for (i = 0; i < MAX_LEC_ITF; i++) { 1186 for (i = 0; i < MAX_LEC_ITF; i++) {
1190 if (dev_lec[i] != NULL) { 1187 if (dev_lec[i] != NULL) {
1191 priv = netdev_priv(dev_lec[i]);
1192 unregister_netdev(dev_lec[i]); 1188 unregister_netdev(dev_lec[i]);
1193 free_netdev(dev_lec[i]); 1189 free_netdev(dev_lec[i]);
1194 dev_lec[i] = NULL; 1190 dev_lec[i] = NULL;
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 6da5daeebab7..e7c69f4619ec 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -1538,8 +1538,6 @@ static int ax25_sendmsg(struct kiocb *iocb, struct socket *sock,
1538 } 1538 }
1539 1539
1540 /* Build a packet */ 1540 /* Build a packet */
1541 SOCK_DEBUG(sk, "AX.25: sendto: Addresses built. Building packet.\n");
1542
1543 /* Assume the worst case */ 1541 /* Assume the worst case */
1544 size = len + ax25->ax25_dev->dev->hard_header_len; 1542 size = len + ax25->ax25_dev->dev->hard_header_len;
1545 1543
@@ -1549,8 +1547,6 @@ static int ax25_sendmsg(struct kiocb *iocb, struct socket *sock,
1549 1547
1550 skb_reserve(skb, size - len); 1548 skb_reserve(skb, size - len);
1551 1549
1552 SOCK_DEBUG(sk, "AX.25: Appending user data\n");
1553
1554 /* User data follows immediately after the AX.25 data */ 1550 /* User data follows immediately after the AX.25 data */
1555 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) { 1551 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
1556 err = -EFAULT; 1552 err = -EFAULT;
@@ -1564,8 +1560,6 @@ static int ax25_sendmsg(struct kiocb *iocb, struct socket *sock,
1564 if (!ax25->pidincl) 1560 if (!ax25->pidincl)
1565 *skb_push(skb, 1) = sk->sk_protocol; 1561 *skb_push(skb, 1) = sk->sk_protocol;
1566 1562
1567 SOCK_DEBUG(sk, "AX.25: Transmitting buffer\n");
1568
1569 if (sk->sk_type == SOCK_SEQPACKET) { 1563 if (sk->sk_type == SOCK_SEQPACKET) {
1570 /* Connected mode sockets go via the LAPB machine */ 1564 /* Connected mode sockets go via the LAPB machine */
1571 if (sk->sk_state != TCP_ESTABLISHED) { 1565 if (sk->sk_state != TCP_ESTABLISHED) {
@@ -1583,22 +1577,14 @@ static int ax25_sendmsg(struct kiocb *iocb, struct socket *sock,
1583 1577
1584 skb_push(skb, 1 + ax25_addr_size(dp)); 1578 skb_push(skb, 1 + ax25_addr_size(dp));
1585 1579
1586 SOCK_DEBUG(sk, "Building AX.25 Header (dp=%p).\n", dp); 1580 /* Building AX.25 Header */
1587
1588 if (dp != NULL)
1589 SOCK_DEBUG(sk, "Num digipeaters=%d\n", dp->ndigi);
1590 1581
1591 /* Build an AX.25 header */ 1582 /* Build an AX.25 header */
1592 lv = ax25_addr_build(skb->data, &ax25->source_addr, &sax.sax25_call, 1583 lv = ax25_addr_build(skb->data, &ax25->source_addr, &sax.sax25_call,
1593 dp, AX25_COMMAND, AX25_MODULUS); 1584 dp, AX25_COMMAND, AX25_MODULUS);
1594 1585
1595 SOCK_DEBUG(sk, "Built header (%d bytes)\n",lv);
1596
1597 skb_set_transport_header(skb, lv); 1586 skb_set_transport_header(skb, lv);
1598 1587
1599 SOCK_DEBUG(sk, "base=%p pos=%p\n",
1600 skb->data, skb_transport_header(skb));
1601
1602 *skb_transport_header(skb) = AX25_UI; 1588 *skb_transport_header(skb) = AX25_UI;
1603 1589
1604 /* Datagram frames go straight out of the door as UI */ 1590 /* Datagram frames go straight out of the door as UI */
diff --git a/net/ax25/ax25_iface.c b/net/ax25/ax25_iface.c
index 5a0dda8df492..60b545e2822a 100644
--- a/net/ax25/ax25_iface.c
+++ b/net/ax25/ax25_iface.c
@@ -58,7 +58,7 @@ EXPORT_SYMBOL_GPL(ax25_register_pid);
58 58
59void ax25_protocol_release(unsigned int pid) 59void ax25_protocol_release(unsigned int pid)
60{ 60{
61 struct ax25_protocol *s, *protocol; 61 struct ax25_protocol *protocol;
62 62
63 write_lock_bh(&protocol_list_lock); 63 write_lock_bh(&protocol_list_lock);
64 protocol = protocol_list; 64 protocol = protocol_list;
@@ -72,7 +72,6 @@ void ax25_protocol_release(unsigned int pid)
72 72
73 while (protocol != NULL && protocol->next != NULL) { 73 while (protocol != NULL && protocol->next != NULL) {
74 if (protocol->next->pid == pid) { 74 if (protocol->next->pid == pid) {
75 s = protocol->next;
76 protocol->next = protocol->next->next; 75 protocol->next = protocol->next->next;
77 goto out; 76 goto out;
78 } 77 }
diff --git a/net/batman-adv/aggregation.c b/net/batman-adv/aggregation.c
index af45d6b2031f..a8c32030527c 100644
--- a/net/batman-adv/aggregation.c
+++ b/net/batman-adv/aggregation.c
@@ -23,11 +23,12 @@
23#include "aggregation.h" 23#include "aggregation.h"
24#include "send.h" 24#include "send.h"
25#include "routing.h" 25#include "routing.h"
26#include "hard-interface.h"
26 27
27/* calculate the size of the hna information for a given packet */ 28/* calculate the size of the tt information for a given packet */
28static int hna_len(struct batman_packet *batman_packet) 29static int tt_len(struct batman_packet *batman_packet)
29{ 30{
30 return batman_packet->num_hna * ETH_ALEN; 31 return batman_packet->num_tt * ETH_ALEN;
31} 32}
32 33
33/* return true if new_packet can be aggregated with forw_packet */ 34/* return true if new_packet can be aggregated with forw_packet */
@@ -95,7 +96,6 @@ static bool can_aggregate_with(struct batman_packet *new_batman_packet,
95 return false; 96 return false;
96} 97}
97 98
98#define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0)
99/* create a new aggregated packet and add this packet to it */ 99/* create a new aggregated packet and add this packet to it */
100static void new_aggregated_packet(unsigned char *packet_buff, int packet_len, 100static void new_aggregated_packet(unsigned char *packet_buff, int packet_len,
101 unsigned long send_time, bool direct_link, 101 unsigned long send_time, bool direct_link,
@@ -106,12 +106,15 @@ static void new_aggregated_packet(unsigned char *packet_buff, int packet_len,
106 struct forw_packet *forw_packet_aggr; 106 struct forw_packet *forw_packet_aggr;
107 unsigned char *skb_buff; 107 unsigned char *skb_buff;
108 108
109 if (!atomic_inc_not_zero(&if_incoming->refcount))
110 return;
111
109 /* own packet should always be scheduled */ 112 /* own packet should always be scheduled */
110 if (!own_packet) { 113 if (!own_packet) {
111 if (!atomic_dec_not_zero(&bat_priv->batman_queue_left)) { 114 if (!atomic_dec_not_zero(&bat_priv->batman_queue_left)) {
112 bat_dbg(DBG_BATMAN, bat_priv, 115 bat_dbg(DBG_BATMAN, bat_priv,
113 "batman packet queue full\n"); 116 "batman packet queue full\n");
114 return; 117 goto out;
115 } 118 }
116 } 119 }
117 120
@@ -119,7 +122,7 @@ static void new_aggregated_packet(unsigned char *packet_buff, int packet_len,
119 if (!forw_packet_aggr) { 122 if (!forw_packet_aggr) {
120 if (!own_packet) 123 if (!own_packet)
121 atomic_inc(&bat_priv->batman_queue_left); 124 atomic_inc(&bat_priv->batman_queue_left);
122 return; 125 goto out;
123 } 126 }
124 127
125 if ((atomic_read(&bat_priv->aggregated_ogms)) && 128 if ((atomic_read(&bat_priv->aggregated_ogms)) &&
@@ -134,7 +137,7 @@ static void new_aggregated_packet(unsigned char *packet_buff, int packet_len,
134 if (!own_packet) 137 if (!own_packet)
135 atomic_inc(&bat_priv->batman_queue_left); 138 atomic_inc(&bat_priv->batman_queue_left);
136 kfree(forw_packet_aggr); 139 kfree(forw_packet_aggr);
137 return; 140 goto out;
138 } 141 }
139 skb_reserve(forw_packet_aggr->skb, sizeof(struct ethhdr)); 142 skb_reserve(forw_packet_aggr->skb, sizeof(struct ethhdr));
140 143
@@ -165,6 +168,10 @@ static void new_aggregated_packet(unsigned char *packet_buff, int packet_len,
165 queue_delayed_work(bat_event_workqueue, 168 queue_delayed_work(bat_event_workqueue,
166 &forw_packet_aggr->delayed_work, 169 &forw_packet_aggr->delayed_work,
167 send_time - jiffies); 170 send_time - jiffies);
171
172 return;
173out:
174 hardif_free_ref(if_incoming);
168} 175}
169 176
170/* aggregate a new packet into the existing aggregation */ 177/* aggregate a new packet into the existing aggregation */
@@ -251,7 +258,7 @@ void receive_aggr_bat_packet(struct ethhdr *ethhdr, unsigned char *packet_buff,
251{ 258{
252 struct batman_packet *batman_packet; 259 struct batman_packet *batman_packet;
253 int buff_pos = 0; 260 int buff_pos = 0;
254 unsigned char *hna_buff; 261 unsigned char *tt_buff;
255 262
256 batman_packet = (struct batman_packet *)packet_buff; 263 batman_packet = (struct batman_packet *)packet_buff;
257 264
@@ -260,14 +267,14 @@ void receive_aggr_bat_packet(struct ethhdr *ethhdr, unsigned char *packet_buff,
260 orig_interval. */ 267 orig_interval. */
261 batman_packet->seqno = ntohl(batman_packet->seqno); 268 batman_packet->seqno = ntohl(batman_packet->seqno);
262 269
263 hna_buff = packet_buff + buff_pos + BAT_PACKET_LEN; 270 tt_buff = packet_buff + buff_pos + BAT_PACKET_LEN;
264 receive_bat_packet(ethhdr, batman_packet, 271 receive_bat_packet(ethhdr, batman_packet,
265 hna_buff, hna_len(batman_packet), 272 tt_buff, tt_len(batman_packet),
266 if_incoming); 273 if_incoming);
267 274
268 buff_pos += BAT_PACKET_LEN + hna_len(batman_packet); 275 buff_pos += BAT_PACKET_LEN + tt_len(batman_packet);
269 batman_packet = (struct batman_packet *) 276 batman_packet = (struct batman_packet *)
270 (packet_buff + buff_pos); 277 (packet_buff + buff_pos);
271 } while (aggregated_packet(buff_pos, packet_len, 278 } while (aggregated_packet(buff_pos, packet_len,
272 batman_packet->num_hna)); 279 batman_packet->num_tt));
273} 280}
diff --git a/net/batman-adv/aggregation.h b/net/batman-adv/aggregation.h
index 062204289d1f..7e6d72fbf540 100644
--- a/net/batman-adv/aggregation.h
+++ b/net/batman-adv/aggregation.h
@@ -25,9 +25,9 @@
25#include "main.h" 25#include "main.h"
26 26
27/* is there another aggregated packet here? */ 27/* is there another aggregated packet here? */
28static inline int aggregated_packet(int buff_pos, int packet_len, int num_hna) 28static inline int aggregated_packet(int buff_pos, int packet_len, int num_tt)
29{ 29{
30 int next_buff_pos = buff_pos + BAT_PACKET_LEN + (num_hna * ETH_ALEN); 30 int next_buff_pos = buff_pos + BAT_PACKET_LEN + (num_tt * ETH_ALEN);
31 31
32 return (next_buff_pos <= packet_len) && 32 return (next_buff_pos <= packet_len) &&
33 (next_buff_pos <= MAX_AGGREGATION_BYTES); 33 (next_buff_pos <= MAX_AGGREGATION_BYTES);
diff --git a/net/batman-adv/bat_debugfs.c b/net/batman-adv/bat_debugfs.c
index 0e9d43509935..abaeec5f6247 100644
--- a/net/batman-adv/bat_debugfs.c
+++ b/net/batman-adv/bat_debugfs.c
@@ -241,13 +241,13 @@ static int softif_neigh_open(struct inode *inode, struct file *file)
241static int transtable_global_open(struct inode *inode, struct file *file) 241static int transtable_global_open(struct inode *inode, struct file *file)
242{ 242{
243 struct net_device *net_dev = (struct net_device *)inode->i_private; 243 struct net_device *net_dev = (struct net_device *)inode->i_private;
244 return single_open(file, hna_global_seq_print_text, net_dev); 244 return single_open(file, tt_global_seq_print_text, net_dev);
245} 245}
246 246
247static int transtable_local_open(struct inode *inode, struct file *file) 247static int transtable_local_open(struct inode *inode, struct file *file)
248{ 248{
249 struct net_device *net_dev = (struct net_device *)inode->i_private; 249 struct net_device *net_dev = (struct net_device *)inode->i_private;
250 return single_open(file, hna_local_seq_print_text, net_dev); 250 return single_open(file, tt_local_seq_print_text, net_dev);
251} 251}
252 252
253static int vis_data_open(struct inode *inode, struct file *file) 253static int vis_data_open(struct inode *inode, struct file *file)
diff --git a/net/batman-adv/bat_sysfs.c b/net/batman-adv/bat_sysfs.c
index e449bf6353e0..497a0700cc3c 100644
--- a/net/batman-adv/bat_sysfs.c
+++ b/net/batman-adv/bat_sysfs.c
@@ -488,22 +488,24 @@ static ssize_t store_mesh_iface(struct kobject *kobj, struct attribute *attr,
488 (strncmp(hard_iface->soft_iface->name, buff, IFNAMSIZ) == 0)) 488 (strncmp(hard_iface->soft_iface->name, buff, IFNAMSIZ) == 0))
489 goto out; 489 goto out;
490 490
491 if (!rtnl_trylock()) {
492 ret = -ERESTARTSYS;
493 goto out;
494 }
495
491 if (status_tmp == IF_NOT_IN_USE) { 496 if (status_tmp == IF_NOT_IN_USE) {
492 rtnl_lock();
493 hardif_disable_interface(hard_iface); 497 hardif_disable_interface(hard_iface);
494 rtnl_unlock(); 498 goto unlock;
495 goto out;
496 } 499 }
497 500
498 /* if the interface already is in use */ 501 /* if the interface already is in use */
499 if (hard_iface->if_status != IF_NOT_IN_USE) { 502 if (hard_iface->if_status != IF_NOT_IN_USE)
500 rtnl_lock();
501 hardif_disable_interface(hard_iface); 503 hardif_disable_interface(hard_iface);
502 rtnl_unlock();
503 }
504 504
505 ret = hardif_enable_interface(hard_iface, buff); 505 ret = hardif_enable_interface(hard_iface, buff);
506 506
507unlock:
508 rtnl_unlock();
507out: 509out:
508 hardif_free_ref(hard_iface); 510 hardif_free_ref(hard_iface);
509 return ret; 511 return ret;
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index 150b6ce23df3..61605a0f3f39 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -23,6 +23,7 @@
23#include "gateway_client.h" 23#include "gateway_client.h"
24#include "gateway_common.h" 24#include "gateway_common.h"
25#include "hard-interface.h" 25#include "hard-interface.h"
26#include "originator.h"
26#include <linux/ip.h> 27#include <linux/ip.h>
27#include <linux/ipv6.h> 28#include <linux/ipv6.h>
28#include <linux/udp.h> 29#include <linux/udp.h>
@@ -34,61 +35,76 @@ static void gw_node_free_ref(struct gw_node *gw_node)
34 kfree_rcu(gw_node, rcu); 35 kfree_rcu(gw_node, rcu);
35} 36}
36 37
37void *gw_get_selected(struct bat_priv *bat_priv) 38static struct gw_node *gw_get_selected_gw_node(struct bat_priv *bat_priv)
38{ 39{
39 struct gw_node *curr_gateway_tmp; 40 struct gw_node *gw_node;
40 struct orig_node *orig_node = NULL;
41 41
42 rcu_read_lock(); 42 rcu_read_lock();
43 curr_gateway_tmp = rcu_dereference(bat_priv->curr_gw); 43 gw_node = rcu_dereference(bat_priv->curr_gw);
44 if (!curr_gateway_tmp) 44 if (!gw_node)
45 goto out;
46
47 orig_node = curr_gateway_tmp->orig_node;
48 if (!orig_node)
49 goto out; 45 goto out;
50 46
51 if (!atomic_inc_not_zero(&orig_node->refcount)) 47 if (!atomic_inc_not_zero(&gw_node->refcount))
52 orig_node = NULL; 48 gw_node = NULL;
53 49
54out: 50out:
55 rcu_read_unlock(); 51 rcu_read_unlock();
56 return orig_node; 52 return gw_node;
57} 53}
58 54
59void gw_deselect(struct bat_priv *bat_priv) 55struct orig_node *gw_get_selected_orig(struct bat_priv *bat_priv)
60{ 56{
61 struct gw_node *gw_node; 57 struct gw_node *gw_node;
58 struct orig_node *orig_node = NULL;
62 59
63 spin_lock_bh(&bat_priv->gw_list_lock); 60 gw_node = gw_get_selected_gw_node(bat_priv);
64 gw_node = rcu_dereference(bat_priv->curr_gw); 61 if (!gw_node)
65 rcu_assign_pointer(bat_priv->curr_gw, NULL); 62 goto out;
66 spin_unlock_bh(&bat_priv->gw_list_lock); 63
64 rcu_read_lock();
65 orig_node = gw_node->orig_node;
66 if (!orig_node)
67 goto unlock;
68
69 if (!atomic_inc_not_zero(&orig_node->refcount))
70 orig_node = NULL;
67 71
72unlock:
73 rcu_read_unlock();
74out:
68 if (gw_node) 75 if (gw_node)
69 gw_node_free_ref(gw_node); 76 gw_node_free_ref(gw_node);
77 return orig_node;
70} 78}
71 79
72static void gw_select(struct bat_priv *bat_priv, struct gw_node *new_gw_node) 80static void gw_select(struct bat_priv *bat_priv, struct gw_node *new_gw_node)
73{ 81{
74 struct gw_node *curr_gw_node; 82 struct gw_node *curr_gw_node;
75 83
84 spin_lock_bh(&bat_priv->gw_list_lock);
85
76 if (new_gw_node && !atomic_inc_not_zero(&new_gw_node->refcount)) 86 if (new_gw_node && !atomic_inc_not_zero(&new_gw_node->refcount))
77 new_gw_node = NULL; 87 new_gw_node = NULL;
78 88
79 spin_lock_bh(&bat_priv->gw_list_lock); 89 curr_gw_node = bat_priv->curr_gw;
80 curr_gw_node = rcu_dereference(bat_priv->curr_gw);
81 rcu_assign_pointer(bat_priv->curr_gw, new_gw_node); 90 rcu_assign_pointer(bat_priv->curr_gw, new_gw_node);
82 spin_unlock_bh(&bat_priv->gw_list_lock);
83 91
84 if (curr_gw_node) 92 if (curr_gw_node)
85 gw_node_free_ref(curr_gw_node); 93 gw_node_free_ref(curr_gw_node);
94
95 spin_unlock_bh(&bat_priv->gw_list_lock);
96}
97
98void gw_deselect(struct bat_priv *bat_priv)
99{
100 gw_select(bat_priv, NULL);
86} 101}
87 102
88void gw_election(struct bat_priv *bat_priv) 103void gw_election(struct bat_priv *bat_priv)
89{ 104{
90 struct hlist_node *node; 105 struct hlist_node *node;
91 struct gw_node *gw_node, *curr_gw, *curr_gw_tmp = NULL; 106 struct gw_node *gw_node, *curr_gw = NULL, *curr_gw_tmp = NULL;
107 struct neigh_node *router;
92 uint8_t max_tq = 0; 108 uint8_t max_tq = 0;
93 uint32_t max_gw_factor = 0, tmp_gw_factor = 0; 109 uint32_t max_gw_factor = 0, tmp_gw_factor = 0;
94 int down, up; 110 int down, up;
@@ -102,32 +118,25 @@ void gw_election(struct bat_priv *bat_priv)
102 if (atomic_read(&bat_priv->gw_mode) != GW_MODE_CLIENT) 118 if (atomic_read(&bat_priv->gw_mode) != GW_MODE_CLIENT)
103 return; 119 return;
104 120
105 rcu_read_lock(); 121 curr_gw = gw_get_selected_gw_node(bat_priv);
106 curr_gw = rcu_dereference(bat_priv->curr_gw); 122 if (curr_gw)
107 if (curr_gw) { 123 goto out;
108 rcu_read_unlock();
109 return;
110 }
111 124
125 rcu_read_lock();
112 if (hlist_empty(&bat_priv->gw_list)) { 126 if (hlist_empty(&bat_priv->gw_list)) {
113 127 bat_dbg(DBG_BATMAN, bat_priv,
114 if (curr_gw) { 128 "Removing selected gateway - "
115 rcu_read_unlock(); 129 "no gateway in range\n");
116 bat_dbg(DBG_BATMAN, bat_priv, 130 gw_deselect(bat_priv);
117 "Removing selected gateway - " 131 goto unlock;
118 "no gateway in range\n");
119 gw_deselect(bat_priv);
120 } else
121 rcu_read_unlock();
122
123 return;
124 } 132 }
125 133
126 hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) { 134 hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) {
127 if (!gw_node->orig_node->router) 135 if (gw_node->deleted)
128 continue; 136 continue;
129 137
130 if (gw_node->deleted) 138 router = orig_node_get_router(gw_node->orig_node);
139 if (!router)
131 continue; 140 continue;
132 141
133 switch (atomic_read(&bat_priv->gw_sel_class)) { 142 switch (atomic_read(&bat_priv->gw_sel_class)) {
@@ -135,15 +144,14 @@ void gw_election(struct bat_priv *bat_priv)
135 gw_bandwidth_to_kbit(gw_node->orig_node->gw_flags, 144 gw_bandwidth_to_kbit(gw_node->orig_node->gw_flags,
136 &down, &up); 145 &down, &up);
137 146
138 tmp_gw_factor = (gw_node->orig_node->router->tq_avg * 147 tmp_gw_factor = (router->tq_avg * router->tq_avg *
139 gw_node->orig_node->router->tq_avg *
140 down * 100 * 100) / 148 down * 100 * 100) /
141 (TQ_LOCAL_WINDOW_SIZE * 149 (TQ_LOCAL_WINDOW_SIZE *
142 TQ_LOCAL_WINDOW_SIZE * 64); 150 TQ_LOCAL_WINDOW_SIZE * 64);
143 151
144 if ((tmp_gw_factor > max_gw_factor) || 152 if ((tmp_gw_factor > max_gw_factor) ||
145 ((tmp_gw_factor == max_gw_factor) && 153 ((tmp_gw_factor == max_gw_factor) &&
146 (gw_node->orig_node->router->tq_avg > max_tq))) 154 (router->tq_avg > max_tq)))
147 curr_gw_tmp = gw_node; 155 curr_gw_tmp = gw_node;
148 break; 156 break;
149 157
@@ -155,19 +163,25 @@ void gw_election(struct bat_priv *bat_priv)
155 * soon as a better gateway appears which has 163 * soon as a better gateway appears which has
156 * $routing_class more tq points) 164 * $routing_class more tq points)
157 **/ 165 **/
158 if (gw_node->orig_node->router->tq_avg > max_tq) 166 if (router->tq_avg > max_tq)
159 curr_gw_tmp = gw_node; 167 curr_gw_tmp = gw_node;
160 break; 168 break;
161 } 169 }
162 170
163 if (gw_node->orig_node->router->tq_avg > max_tq) 171 if (router->tq_avg > max_tq)
164 max_tq = gw_node->orig_node->router->tq_avg; 172 max_tq = router->tq_avg;
165 173
166 if (tmp_gw_factor > max_gw_factor) 174 if (tmp_gw_factor > max_gw_factor)
167 max_gw_factor = tmp_gw_factor; 175 max_gw_factor = tmp_gw_factor;
176
177 neigh_node_free_ref(router);
168 } 178 }
169 179
170 if (curr_gw != curr_gw_tmp) { 180 if (curr_gw != curr_gw_tmp) {
181 router = orig_node_get_router(curr_gw_tmp->orig_node);
182 if (!router)
183 goto unlock;
184
171 if ((curr_gw) && (!curr_gw_tmp)) 185 if ((curr_gw) && (!curr_gw_tmp))
172 bat_dbg(DBG_BATMAN, bat_priv, 186 bat_dbg(DBG_BATMAN, bat_priv,
173 "Removing selected gateway - " 187 "Removing selected gateway - "
@@ -178,48 +192,50 @@ void gw_election(struct bat_priv *bat_priv)
178 "(gw_flags: %i, tq: %i)\n", 192 "(gw_flags: %i, tq: %i)\n",
179 curr_gw_tmp->orig_node->orig, 193 curr_gw_tmp->orig_node->orig,
180 curr_gw_tmp->orig_node->gw_flags, 194 curr_gw_tmp->orig_node->gw_flags,
181 curr_gw_tmp->orig_node->router->tq_avg); 195 router->tq_avg);
182 else 196 else
183 bat_dbg(DBG_BATMAN, bat_priv, 197 bat_dbg(DBG_BATMAN, bat_priv,
184 "Changing route to gateway %pM " 198 "Changing route to gateway %pM "
185 "(gw_flags: %i, tq: %i)\n", 199 "(gw_flags: %i, tq: %i)\n",
186 curr_gw_tmp->orig_node->orig, 200 curr_gw_tmp->orig_node->orig,
187 curr_gw_tmp->orig_node->gw_flags, 201 curr_gw_tmp->orig_node->gw_flags,
188 curr_gw_tmp->orig_node->router->tq_avg); 202 router->tq_avg);
189 203
204 neigh_node_free_ref(router);
190 gw_select(bat_priv, curr_gw_tmp); 205 gw_select(bat_priv, curr_gw_tmp);
191 } 206 }
192 207
208unlock:
193 rcu_read_unlock(); 209 rcu_read_unlock();
210out:
211 if (curr_gw)
212 gw_node_free_ref(curr_gw);
194} 213}
195 214
196void gw_check_election(struct bat_priv *bat_priv, struct orig_node *orig_node) 215void gw_check_election(struct bat_priv *bat_priv, struct orig_node *orig_node)
197{ 216{
198 struct gw_node *curr_gateway_tmp; 217 struct orig_node *curr_gw_orig;
218 struct neigh_node *router_gw = NULL, *router_orig = NULL;
199 uint8_t gw_tq_avg, orig_tq_avg; 219 uint8_t gw_tq_avg, orig_tq_avg;
200 220
201 rcu_read_lock(); 221 curr_gw_orig = gw_get_selected_orig(bat_priv);
202 curr_gateway_tmp = rcu_dereference(bat_priv->curr_gw); 222 if (!curr_gw_orig)
203 if (!curr_gateway_tmp) 223 goto deselect;
204 goto out_rcu;
205
206 if (!curr_gateway_tmp->orig_node)
207 goto deselect_rcu;
208 224
209 if (!curr_gateway_tmp->orig_node->router) 225 router_gw = orig_node_get_router(curr_gw_orig);
210 goto deselect_rcu; 226 if (!router_gw)
227 goto deselect;
211 228
212 /* this node already is the gateway */ 229 /* this node already is the gateway */
213 if (curr_gateway_tmp->orig_node == orig_node) 230 if (curr_gw_orig == orig_node)
214 goto out_rcu; 231 goto out;
215
216 if (!orig_node->router)
217 goto out_rcu;
218 232
219 gw_tq_avg = curr_gateway_tmp->orig_node->router->tq_avg; 233 router_orig = orig_node_get_router(orig_node);
220 rcu_read_unlock(); 234 if (!router_orig)
235 goto out;
221 236
222 orig_tq_avg = orig_node->router->tq_avg; 237 gw_tq_avg = router_gw->tq_avg;
238 orig_tq_avg = router_orig->tq_avg;
223 239
224 /* the TQ value has to be better */ 240 /* the TQ value has to be better */
225 if (orig_tq_avg < gw_tq_avg) 241 if (orig_tq_avg < gw_tq_avg)
@@ -237,16 +253,17 @@ void gw_check_election(struct bat_priv *bat_priv, struct orig_node *orig_node)
237 "Restarting gateway selection: better gateway found (tq curr: " 253 "Restarting gateway selection: better gateway found (tq curr: "
238 "%i, tq new: %i)\n", 254 "%i, tq new: %i)\n",
239 gw_tq_avg, orig_tq_avg); 255 gw_tq_avg, orig_tq_avg);
240 goto deselect;
241 256
242out_rcu:
243 rcu_read_unlock();
244 goto out;
245deselect_rcu:
246 rcu_read_unlock();
247deselect: 257deselect:
248 gw_deselect(bat_priv); 258 gw_deselect(bat_priv);
249out: 259out:
260 if (curr_gw_orig)
261 orig_node_free_ref(curr_gw_orig);
262 if (router_gw)
263 neigh_node_free_ref(router_gw);
264 if (router_orig)
265 neigh_node_free_ref(router_orig);
266
250 return; 267 return;
251} 268}
252 269
@@ -283,7 +300,15 @@ void gw_node_update(struct bat_priv *bat_priv,
283 struct orig_node *orig_node, uint8_t new_gwflags) 300 struct orig_node *orig_node, uint8_t new_gwflags)
284{ 301{
285 struct hlist_node *node; 302 struct hlist_node *node;
286 struct gw_node *gw_node; 303 struct gw_node *gw_node, *curr_gw;
304
305 /**
306 * Note: We don't need a NULL check here, since curr_gw never gets
307 * dereferenced. If curr_gw is NULL we also should not exit as we may
308 * have this gateway in our list (duplication check!) even though we
309 * have no currently selected gateway.
310 */
311 curr_gw = gw_get_selected_gw_node(bat_priv);
287 312
288 rcu_read_lock(); 313 rcu_read_lock();
289 hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) { 314 hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) {
@@ -304,22 +329,26 @@ void gw_node_update(struct bat_priv *bat_priv,
304 "Gateway %pM removed from gateway list\n", 329 "Gateway %pM removed from gateway list\n",
305 orig_node->orig); 330 orig_node->orig);
306 331
307 if (gw_node == rcu_dereference(bat_priv->curr_gw)) { 332 if (gw_node == curr_gw)
308 rcu_read_unlock(); 333 goto deselect;
309 gw_deselect(bat_priv);
310 return;
311 }
312 } 334 }
313 335
314 rcu_read_unlock(); 336 goto unlock;
315 return;
316 } 337 }
317 rcu_read_unlock();
318 338
319 if (new_gwflags == 0) 339 if (new_gwflags == 0)
320 return; 340 goto unlock;
321 341
322 gw_node_add(bat_priv, orig_node, new_gwflags); 342 gw_node_add(bat_priv, orig_node, new_gwflags);
343 goto unlock;
344
345deselect:
346 gw_deselect(bat_priv);
347unlock:
348 rcu_read_unlock();
349
350 if (curr_gw)
351 gw_node_free_ref(curr_gw);
323} 352}
324 353
325void gw_node_delete(struct bat_priv *bat_priv, struct orig_node *orig_node) 354void gw_node_delete(struct bat_priv *bat_priv, struct orig_node *orig_node)
@@ -329,9 +358,12 @@ void gw_node_delete(struct bat_priv *bat_priv, struct orig_node *orig_node)
329 358
330void gw_node_purge(struct bat_priv *bat_priv) 359void gw_node_purge(struct bat_priv *bat_priv)
331{ 360{
332 struct gw_node *gw_node; 361 struct gw_node *gw_node, *curr_gw;
333 struct hlist_node *node, *node_tmp; 362 struct hlist_node *node, *node_tmp;
334 unsigned long timeout = 2 * PURGE_TIMEOUT * HZ; 363 unsigned long timeout = 2 * PURGE_TIMEOUT * HZ;
364 char do_deselect = 0;
365
366 curr_gw = gw_get_selected_gw_node(bat_priv);
335 367
336 spin_lock_bh(&bat_priv->gw_list_lock); 368 spin_lock_bh(&bat_priv->gw_list_lock);
337 369
@@ -342,41 +374,56 @@ void gw_node_purge(struct bat_priv *bat_priv)
342 atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE) 374 atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE)
343 continue; 375 continue;
344 376
345 if (rcu_dereference(bat_priv->curr_gw) == gw_node) 377 if (curr_gw == gw_node)
346 gw_deselect(bat_priv); 378 do_deselect = 1;
347 379
348 hlist_del_rcu(&gw_node->list); 380 hlist_del_rcu(&gw_node->list);
349 gw_node_free_ref(gw_node); 381 gw_node_free_ref(gw_node);
350 } 382 }
351 383
352
353 spin_unlock_bh(&bat_priv->gw_list_lock); 384 spin_unlock_bh(&bat_priv->gw_list_lock);
385
386 /* gw_deselect() needs to acquire the gw_list_lock */
387 if (do_deselect)
388 gw_deselect(bat_priv);
389
390 if (curr_gw)
391 gw_node_free_ref(curr_gw);
354} 392}
355 393
394/**
395 * fails if orig_node has no router
396 */
356static int _write_buffer_text(struct bat_priv *bat_priv, 397static int _write_buffer_text(struct bat_priv *bat_priv,
357 struct seq_file *seq, struct gw_node *gw_node) 398 struct seq_file *seq, struct gw_node *gw_node)
358{ 399{
359 struct gw_node *curr_gw; 400 struct gw_node *curr_gw;
360 int down, up, ret; 401 struct neigh_node *router;
402 int down, up, ret = -1;
361 403
362 gw_bandwidth_to_kbit(gw_node->orig_node->gw_flags, &down, &up); 404 gw_bandwidth_to_kbit(gw_node->orig_node->gw_flags, &down, &up);
363 405
364 rcu_read_lock(); 406 router = orig_node_get_router(gw_node->orig_node);
365 curr_gw = rcu_dereference(bat_priv->curr_gw); 407 if (!router)
408 goto out;
366 409
367 ret = seq_printf(seq, "%s %pM (%3i) %pM [%10s]: %3i - %i%s/%i%s\n", 410 curr_gw = gw_get_selected_gw_node(bat_priv);
368 (curr_gw == gw_node ? "=>" : " "),
369 gw_node->orig_node->orig,
370 gw_node->orig_node->router->tq_avg,
371 gw_node->orig_node->router->addr,
372 gw_node->orig_node->router->if_incoming->net_dev->name,
373 gw_node->orig_node->gw_flags,
374 (down > 2048 ? down / 1024 : down),
375 (down > 2048 ? "MBit" : "KBit"),
376 (up > 2048 ? up / 1024 : up),
377 (up > 2048 ? "MBit" : "KBit"));
378 411
379 rcu_read_unlock(); 412 ret = seq_printf(seq, "%s %pM (%3i) %pM [%10s]: %3i - %i%s/%i%s\n",
413 (curr_gw == gw_node ? "=>" : " "),
414 gw_node->orig_node->orig,
415 router->tq_avg, router->addr,
416 router->if_incoming->net_dev->name,
417 gw_node->orig_node->gw_flags,
418 (down > 2048 ? down / 1024 : down),
419 (down > 2048 ? "MBit" : "KBit"),
420 (up > 2048 ? up / 1024 : up),
421 (up > 2048 ? "MBit" : "KBit"));
422
423 neigh_node_free_ref(router);
424 if (curr_gw)
425 gw_node_free_ref(curr_gw);
426out:
380 return ret; 427 return ret;
381} 428}
382 429
@@ -384,40 +431,42 @@ int gw_client_seq_print_text(struct seq_file *seq, void *offset)
384{ 431{
385 struct net_device *net_dev = (struct net_device *)seq->private; 432 struct net_device *net_dev = (struct net_device *)seq->private;
386 struct bat_priv *bat_priv = netdev_priv(net_dev); 433 struct bat_priv *bat_priv = netdev_priv(net_dev);
434 struct hard_iface *primary_if;
387 struct gw_node *gw_node; 435 struct gw_node *gw_node;
388 struct hlist_node *node; 436 struct hlist_node *node;
389 int gw_count = 0; 437 int gw_count = 0, ret = 0;
390
391 if (!bat_priv->primary_if) {
392 438
393 return seq_printf(seq, "BATMAN mesh %s disabled - please " 439 primary_if = primary_if_get_selected(bat_priv);
394 "specify interfaces to enable it\n", 440 if (!primary_if) {
395 net_dev->name); 441 ret = seq_printf(seq, "BATMAN mesh %s disabled - please "
442 "specify interfaces to enable it\n",
443 net_dev->name);
444 goto out;
396 } 445 }
397 446
398 if (bat_priv->primary_if->if_status != IF_ACTIVE) { 447 if (primary_if->if_status != IF_ACTIVE) {
399 448 ret = seq_printf(seq, "BATMAN mesh %s disabled - "
400 return seq_printf(seq, "BATMAN mesh %s disabled - " 449 "primary interface not active\n",
401 "primary interface not active\n", 450 net_dev->name);
402 net_dev->name); 451 goto out;
403 } 452 }
404 453
405 seq_printf(seq, " %-12s (%s/%i) %17s [%10s]: gw_class ... " 454 seq_printf(seq, " %-12s (%s/%i) %17s [%10s]: gw_class ... "
406 "[B.A.T.M.A.N. adv %s%s, MainIF/MAC: %s/%pM (%s)]\n", 455 "[B.A.T.M.A.N. adv %s%s, MainIF/MAC: %s/%pM (%s)]\n",
407 "Gateway", "#", TQ_MAX_VALUE, "Nexthop", 456 "Gateway", "#", TQ_MAX_VALUE, "Nexthop",
408 "outgoingIF", SOURCE_VERSION, REVISION_VERSION_STR, 457 "outgoingIF", SOURCE_VERSION, REVISION_VERSION_STR,
409 bat_priv->primary_if->net_dev->name, 458 primary_if->net_dev->name,
410 bat_priv->primary_if->net_dev->dev_addr, net_dev->name); 459 primary_if->net_dev->dev_addr, net_dev->name);
411 460
412 rcu_read_lock(); 461 rcu_read_lock();
413 hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) { 462 hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) {
414 if (gw_node->deleted) 463 if (gw_node->deleted)
415 continue; 464 continue;
416 465
417 if (!gw_node->orig_node->router) 466 /* fails if orig_node has no router */
467 if (_write_buffer_text(bat_priv, seq, gw_node) < 0)
418 continue; 468 continue;
419 469
420 _write_buffer_text(bat_priv, seq, gw_node);
421 gw_count++; 470 gw_count++;
422 } 471 }
423 rcu_read_unlock(); 472 rcu_read_unlock();
@@ -425,7 +474,10 @@ int gw_client_seq_print_text(struct seq_file *seq, void *offset)
425 if (gw_count == 0) 474 if (gw_count == 0)
426 seq_printf(seq, "No gateways in range ...\n"); 475 seq_printf(seq, "No gateways in range ...\n");
427 476
428 return 0; 477out:
478 if (primary_if)
479 hardif_free_ref(primary_if);
480 return ret;
429} 481}
430 482
431int gw_is_target(struct bat_priv *bat_priv, struct sk_buff *skb) 483int gw_is_target(struct bat_priv *bat_priv, struct sk_buff *skb)
@@ -434,6 +486,7 @@ int gw_is_target(struct bat_priv *bat_priv, struct sk_buff *skb)
434 struct iphdr *iphdr; 486 struct iphdr *iphdr;
435 struct ipv6hdr *ipv6hdr; 487 struct ipv6hdr *ipv6hdr;
436 struct udphdr *udphdr; 488 struct udphdr *udphdr;
489 struct gw_node *curr_gw;
437 unsigned int header_len = 0; 490 unsigned int header_len = 0;
438 491
439 if (atomic_read(&bat_priv->gw_mode) == GW_MODE_OFF) 492 if (atomic_read(&bat_priv->gw_mode) == GW_MODE_OFF)
@@ -498,12 +551,11 @@ int gw_is_target(struct bat_priv *bat_priv, struct sk_buff *skb)
498 if (atomic_read(&bat_priv->gw_mode) == GW_MODE_SERVER) 551 if (atomic_read(&bat_priv->gw_mode) == GW_MODE_SERVER)
499 return -1; 552 return -1;
500 553
501 rcu_read_lock(); 554 curr_gw = gw_get_selected_gw_node(bat_priv);
502 if (!rcu_dereference(bat_priv->curr_gw)) { 555 if (!curr_gw)
503 rcu_read_unlock();
504 return 0; 556 return 0;
505 }
506 rcu_read_unlock();
507 557
558 if (curr_gw)
559 gw_node_free_ref(curr_gw);
508 return 1; 560 return 1;
509} 561}
diff --git a/net/batman-adv/gateway_client.h b/net/batman-adv/gateway_client.h
index 2aa439124ee3..1ce8c6066da1 100644
--- a/net/batman-adv/gateway_client.h
+++ b/net/batman-adv/gateway_client.h
@@ -24,7 +24,7 @@
24 24
25void gw_deselect(struct bat_priv *bat_priv); 25void gw_deselect(struct bat_priv *bat_priv);
26void gw_election(struct bat_priv *bat_priv); 26void gw_election(struct bat_priv *bat_priv);
27void *gw_get_selected(struct bat_priv *bat_priv); 27struct orig_node *gw_get_selected_orig(struct bat_priv *bat_priv);
28void gw_check_election(struct bat_priv *bat_priv, struct orig_node *orig_node); 28void gw_check_election(struct bat_priv *bat_priv, struct orig_node *orig_node);
29void gw_node_update(struct bat_priv *bat_priv, 29void gw_node_update(struct bat_priv *bat_priv,
30 struct orig_node *orig_node, uint8_t new_gwflags); 30 struct orig_node *orig_node, uint8_t new_gwflags);
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index b3058e46ee6b..dfbfccc9fe40 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -31,9 +31,6 @@
31 31
32#include <linux/if_arp.h> 32#include <linux/if_arp.h>
33 33
34/* protect update critical side of hardif_list - but not the content */
35static DEFINE_SPINLOCK(hardif_list_lock);
36
37 34
38static int batman_skb_recv(struct sk_buff *skb, 35static int batman_skb_recv(struct sk_buff *skb,
39 struct net_device *dev, 36 struct net_device *dev,
@@ -110,47 +107,57 @@ out:
110 return hard_iface; 107 return hard_iface;
111} 108}
112 109
113static void update_primary_addr(struct bat_priv *bat_priv) 110static void primary_if_update_addr(struct bat_priv *bat_priv)
114{ 111{
115 struct vis_packet *vis_packet; 112 struct vis_packet *vis_packet;
113 struct hard_iface *primary_if;
114
115 primary_if = primary_if_get_selected(bat_priv);
116 if (!primary_if)
117 goto out;
116 118
117 vis_packet = (struct vis_packet *) 119 vis_packet = (struct vis_packet *)
118 bat_priv->my_vis_info->skb_packet->data; 120 bat_priv->my_vis_info->skb_packet->data;
119 memcpy(vis_packet->vis_orig, 121 memcpy(vis_packet->vis_orig, primary_if->net_dev->dev_addr, ETH_ALEN);
120 bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
121 memcpy(vis_packet->sender_orig, 122 memcpy(vis_packet->sender_orig,
122 bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN); 123 primary_if->net_dev->dev_addr, ETH_ALEN);
124
125out:
126 if (primary_if)
127 hardif_free_ref(primary_if);
123} 128}
124 129
125static void set_primary_if(struct bat_priv *bat_priv, 130static void primary_if_select(struct bat_priv *bat_priv,
126 struct hard_iface *hard_iface) 131 struct hard_iface *new_hard_iface)
127{ 132{
133 struct hard_iface *curr_hard_iface;
128 struct batman_packet *batman_packet; 134 struct batman_packet *batman_packet;
129 struct hard_iface *old_if;
130 135
131 if (hard_iface && !atomic_inc_not_zero(&hard_iface->refcount)) 136 ASSERT_RTNL();
132 hard_iface = NULL; 137
138 if (new_hard_iface && !atomic_inc_not_zero(&new_hard_iface->refcount))
139 new_hard_iface = NULL;
133 140
134 old_if = bat_priv->primary_if; 141 curr_hard_iface = bat_priv->primary_if;
135 bat_priv->primary_if = hard_iface; 142 rcu_assign_pointer(bat_priv->primary_if, new_hard_iface);
136 143
137 if (old_if) 144 if (curr_hard_iface)
138 hardif_free_ref(old_if); 145 hardif_free_ref(curr_hard_iface);
139 146
140 if (!bat_priv->primary_if) 147 if (!new_hard_iface)
141 return; 148 return;
142 149
143 batman_packet = (struct batman_packet *)(hard_iface->packet_buff); 150 batman_packet = (struct batman_packet *)(new_hard_iface->packet_buff);
144 batman_packet->flags = PRIMARIES_FIRST_HOP; 151 batman_packet->flags = PRIMARIES_FIRST_HOP;
145 batman_packet->ttl = TTL; 152 batman_packet->ttl = TTL;
146 153
147 update_primary_addr(bat_priv); 154 primary_if_update_addr(bat_priv);
148 155
149 /*** 156 /***
150 * hacky trick to make sure that we send the HNA information via 157 * hacky trick to make sure that we send the TT information via
151 * our new primary interface 158 * our new primary interface
152 */ 159 */
153 atomic_set(&bat_priv->hna_local_changed, 1); 160 atomic_set(&bat_priv->tt_local_changed, 1);
154} 161}
155 162
156static bool hardif_is_iface_up(struct hard_iface *hard_iface) 163static bool hardif_is_iface_up(struct hard_iface *hard_iface)
@@ -236,9 +243,10 @@ void update_min_mtu(struct net_device *soft_iface)
236static void hardif_activate_interface(struct hard_iface *hard_iface) 243static void hardif_activate_interface(struct hard_iface *hard_iface)
237{ 244{
238 struct bat_priv *bat_priv; 245 struct bat_priv *bat_priv;
246 struct hard_iface *primary_if = NULL;
239 247
240 if (hard_iface->if_status != IF_INACTIVE) 248 if (hard_iface->if_status != IF_INACTIVE)
241 return; 249 goto out;
242 250
243 bat_priv = netdev_priv(hard_iface->soft_iface); 251 bat_priv = netdev_priv(hard_iface->soft_iface);
244 252
@@ -249,14 +257,18 @@ static void hardif_activate_interface(struct hard_iface *hard_iface)
249 * the first active interface becomes our primary interface or 257 * the first active interface becomes our primary interface or
250 * the next active interface after the old primay interface was removed 258 * the next active interface after the old primay interface was removed
251 */ 259 */
252 if (!bat_priv->primary_if) 260 primary_if = primary_if_get_selected(bat_priv);
253 set_primary_if(bat_priv, hard_iface); 261 if (!primary_if)
262 primary_if_select(bat_priv, hard_iface);
254 263
255 bat_info(hard_iface->soft_iface, "Interface activated: %s\n", 264 bat_info(hard_iface->soft_iface, "Interface activated: %s\n",
256 hard_iface->net_dev->name); 265 hard_iface->net_dev->name);
257 266
258 update_min_mtu(hard_iface->soft_iface); 267 update_min_mtu(hard_iface->soft_iface);
259 return; 268
269out:
270 if (primary_if)
271 hardif_free_ref(primary_if);
260} 272}
261 273
262static void hardif_deactivate_interface(struct hard_iface *hard_iface) 274static void hardif_deactivate_interface(struct hard_iface *hard_iface)
@@ -327,7 +339,7 @@ int hardif_enable_interface(struct hard_iface *hard_iface, char *iface_name)
327 batman_packet->flags = 0; 339 batman_packet->flags = 0;
328 batman_packet->ttl = 2; 340 batman_packet->ttl = 2;
329 batman_packet->tq = TQ_MAX_VALUE; 341 batman_packet->tq = TQ_MAX_VALUE;
330 batman_packet->num_hna = 0; 342 batman_packet->num_tt = 0;
331 343
332 hard_iface->if_num = bat_priv->num_ifaces; 344 hard_iface->if_num = bat_priv->num_ifaces;
333 bat_priv->num_ifaces++; 345 bat_priv->num_ifaces++;
@@ -386,12 +398,13 @@ err:
386void hardif_disable_interface(struct hard_iface *hard_iface) 398void hardif_disable_interface(struct hard_iface *hard_iface)
387{ 399{
388 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); 400 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
401 struct hard_iface *primary_if = NULL;
389 402
390 if (hard_iface->if_status == IF_ACTIVE) 403 if (hard_iface->if_status == IF_ACTIVE)
391 hardif_deactivate_interface(hard_iface); 404 hardif_deactivate_interface(hard_iface);
392 405
393 if (hard_iface->if_status != IF_INACTIVE) 406 if (hard_iface->if_status != IF_INACTIVE)
394 return; 407 goto out;
395 408
396 bat_info(hard_iface->soft_iface, "Removing interface: %s\n", 409 bat_info(hard_iface->soft_iface, "Removing interface: %s\n",
397 hard_iface->net_dev->name); 410 hard_iface->net_dev->name);
@@ -400,11 +413,12 @@ void hardif_disable_interface(struct hard_iface *hard_iface)
400 bat_priv->num_ifaces--; 413 bat_priv->num_ifaces--;
401 orig_hash_del_if(hard_iface, bat_priv->num_ifaces); 414 orig_hash_del_if(hard_iface, bat_priv->num_ifaces);
402 415
403 if (hard_iface == bat_priv->primary_if) { 416 primary_if = primary_if_get_selected(bat_priv);
417 if (hard_iface == primary_if) {
404 struct hard_iface *new_if; 418 struct hard_iface *new_if;
405 419
406 new_if = hardif_get_active(hard_iface->soft_iface); 420 new_if = hardif_get_active(hard_iface->soft_iface);
407 set_primary_if(bat_priv, new_if); 421 primary_if_select(bat_priv, new_if);
408 422
409 if (new_if) 423 if (new_if)
410 hardif_free_ref(new_if); 424 hardif_free_ref(new_if);
@@ -425,6 +439,10 @@ void hardif_disable_interface(struct hard_iface *hard_iface)
425 439
426 hard_iface->soft_iface = NULL; 440 hard_iface->soft_iface = NULL;
427 hardif_free_ref(hard_iface); 441 hardif_free_ref(hard_iface);
442
443out:
444 if (primary_if)
445 hardif_free_ref(primary_if);
428} 446}
429 447
430static struct hard_iface *hardif_add_interface(struct net_device *net_dev) 448static struct hard_iface *hardif_add_interface(struct net_device *net_dev)
@@ -432,6 +450,8 @@ static struct hard_iface *hardif_add_interface(struct net_device *net_dev)
432 struct hard_iface *hard_iface; 450 struct hard_iface *hard_iface;
433 int ret; 451 int ret;
434 452
453 ASSERT_RTNL();
454
435 ret = is_valid_iface(net_dev); 455 ret = is_valid_iface(net_dev);
436 if (ret != 1) 456 if (ret != 1)
437 goto out; 457 goto out;
@@ -458,10 +478,7 @@ static struct hard_iface *hardif_add_interface(struct net_device *net_dev)
458 atomic_set(&hard_iface->refcount, 2); 478 atomic_set(&hard_iface->refcount, 2);
459 479
460 check_known_mac_addr(hard_iface->net_dev); 480 check_known_mac_addr(hard_iface->net_dev);
461
462 spin_lock(&hardif_list_lock);
463 list_add_tail_rcu(&hard_iface->list, &hardif_list); 481 list_add_tail_rcu(&hard_iface->list, &hardif_list);
464 spin_unlock(&hardif_list_lock);
465 482
466 return hard_iface; 483 return hard_iface;
467 484
@@ -475,6 +492,8 @@ out:
475 492
476static void hardif_remove_interface(struct hard_iface *hard_iface) 493static void hardif_remove_interface(struct hard_iface *hard_iface)
477{ 494{
495 ASSERT_RTNL();
496
478 /* first deactivate interface */ 497 /* first deactivate interface */
479 if (hard_iface->if_status != IF_NOT_IN_USE) 498 if (hard_iface->if_status != IF_NOT_IN_USE)
480 hardif_disable_interface(hard_iface); 499 hardif_disable_interface(hard_iface);
@@ -490,20 +509,11 @@ static void hardif_remove_interface(struct hard_iface *hard_iface)
490void hardif_remove_interfaces(void) 509void hardif_remove_interfaces(void)
491{ 510{
492 struct hard_iface *hard_iface, *hard_iface_tmp; 511 struct hard_iface *hard_iface, *hard_iface_tmp;
493 struct list_head if_queue;
494
495 INIT_LIST_HEAD(&if_queue);
496 512
497 spin_lock(&hardif_list_lock); 513 rtnl_lock();
498 list_for_each_entry_safe(hard_iface, hard_iface_tmp, 514 list_for_each_entry_safe(hard_iface, hard_iface_tmp,
499 &hardif_list, list) { 515 &hardif_list, list) {
500 list_del_rcu(&hard_iface->list); 516 list_del_rcu(&hard_iface->list);
501 list_add_tail(&hard_iface->list, &if_queue);
502 }
503 spin_unlock(&hardif_list_lock);
504
505 rtnl_lock();
506 list_for_each_entry_safe(hard_iface, hard_iface_tmp, &if_queue, list) {
507 hardif_remove_interface(hard_iface); 517 hardif_remove_interface(hard_iface);
508 } 518 }
509 rtnl_unlock(); 519 rtnl_unlock();
@@ -514,6 +524,7 @@ static int hard_if_event(struct notifier_block *this,
514{ 524{
515 struct net_device *net_dev = (struct net_device *)ptr; 525 struct net_device *net_dev = (struct net_device *)ptr;
516 struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev); 526 struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev);
527 struct hard_iface *primary_if = NULL;
517 struct bat_priv *bat_priv; 528 struct bat_priv *bat_priv;
518 529
519 if (!hard_iface && event == NETDEV_REGISTER) 530 if (!hard_iface && event == NETDEV_REGISTER)
@@ -531,9 +542,7 @@ static int hard_if_event(struct notifier_block *this,
531 hardif_deactivate_interface(hard_iface); 542 hardif_deactivate_interface(hard_iface);
532 break; 543 break;
533 case NETDEV_UNREGISTER: 544 case NETDEV_UNREGISTER:
534 spin_lock(&hardif_list_lock);
535 list_del_rcu(&hard_iface->list); 545 list_del_rcu(&hard_iface->list);
536 spin_unlock(&hardif_list_lock);
537 546
538 hardif_remove_interface(hard_iface); 547 hardif_remove_interface(hard_iface);
539 break; 548 break;
@@ -549,8 +558,12 @@ static int hard_if_event(struct notifier_block *this,
549 update_mac_addresses(hard_iface); 558 update_mac_addresses(hard_iface);
550 559
551 bat_priv = netdev_priv(hard_iface->soft_iface); 560 bat_priv = netdev_priv(hard_iface->soft_iface);
552 if (hard_iface == bat_priv->primary_if) 561 primary_if = primary_if_get_selected(bat_priv);
553 update_primary_addr(bat_priv); 562 if (!primary_if)
563 goto hardif_put;
564
565 if (hard_iface == primary_if)
566 primary_if_update_addr(bat_priv);
554 break; 567 break;
555 default: 568 default:
556 break; 569 break;
@@ -559,6 +572,8 @@ static int hard_if_event(struct notifier_block *this,
559hardif_put: 572hardif_put:
560 hardif_free_ref(hard_iface); 573 hardif_free_ref(hard_iface);
561out: 574out:
575 if (primary_if)
576 hardif_free_ref(primary_if);
562 return NOTIFY_DONE; 577 return NOTIFY_DONE;
563} 578}
564 579
diff --git a/net/batman-adv/hard-interface.h b/net/batman-adv/hard-interface.h
index a9ddf36e51c8..64265991460b 100644
--- a/net/batman-adv/hard-interface.h
+++ b/net/batman-adv/hard-interface.h
@@ -45,4 +45,22 @@ static inline void hardif_free_ref(struct hard_iface *hard_iface)
45 call_rcu(&hard_iface->rcu, hardif_free_rcu); 45 call_rcu(&hard_iface->rcu, hardif_free_rcu);
46} 46}
47 47
48static inline struct hard_iface *primary_if_get_selected(
49 struct bat_priv *bat_priv)
50{
51 struct hard_iface *hard_iface;
52
53 rcu_read_lock();
54 hard_iface = rcu_dereference(bat_priv->primary_if);
55 if (!hard_iface)
56 goto out;
57
58 if (!atomic_inc_not_zero(&hard_iface->refcount))
59 hard_iface = NULL;
60
61out:
62 rcu_read_unlock();
63 return hard_iface;
64}
65
48#endif /* _NET_BATMAN_ADV_HARD_INTERFACE_H_ */ 66#endif /* _NET_BATMAN_ADV_HARD_INTERFACE_H_ */
diff --git a/net/batman-adv/icmp_socket.c b/net/batman-adv/icmp_socket.c
index 34ce56c358e5..fa22ba2bb832 100644
--- a/net/batman-adv/icmp_socket.c
+++ b/net/batman-adv/icmp_socket.c
@@ -153,6 +153,7 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff,
153{ 153{
154 struct socket_client *socket_client = file->private_data; 154 struct socket_client *socket_client = file->private_data;
155 struct bat_priv *bat_priv = socket_client->bat_priv; 155 struct bat_priv *bat_priv = socket_client->bat_priv;
156 struct hard_iface *primary_if = NULL;
156 struct sk_buff *skb; 157 struct sk_buff *skb;
157 struct icmp_packet_rr *icmp_packet; 158 struct icmp_packet_rr *icmp_packet;
158 159
@@ -167,15 +168,21 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff,
167 return -EINVAL; 168 return -EINVAL;
168 } 169 }
169 170
170 if (!bat_priv->primary_if) 171 primary_if = primary_if_get_selected(bat_priv);
171 return -EFAULT; 172
173 if (!primary_if) {
174 len = -EFAULT;
175 goto out;
176 }
172 177
173 if (len >= sizeof(struct icmp_packet_rr)) 178 if (len >= sizeof(struct icmp_packet_rr))
174 packet_len = sizeof(struct icmp_packet_rr); 179 packet_len = sizeof(struct icmp_packet_rr);
175 180
176 skb = dev_alloc_skb(packet_len + sizeof(struct ethhdr)); 181 skb = dev_alloc_skb(packet_len + sizeof(struct ethhdr));
177 if (!skb) 182 if (!skb) {
178 return -ENOMEM; 183 len = -ENOMEM;
184 goto out;
185 }
179 186
180 skb_reserve(skb, sizeof(struct ethhdr)); 187 skb_reserve(skb, sizeof(struct ethhdr));
181 icmp_packet = (struct icmp_packet_rr *)skb_put(skb, packet_len); 188 icmp_packet = (struct icmp_packet_rr *)skb_put(skb, packet_len);
@@ -218,23 +225,13 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff,
218 if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE) 225 if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE)
219 goto dst_unreach; 226 goto dst_unreach;
220 227
221 rcu_read_lock();
222 orig_node = orig_hash_find(bat_priv, icmp_packet->dst); 228 orig_node = orig_hash_find(bat_priv, icmp_packet->dst);
223
224 if (!orig_node) 229 if (!orig_node)
225 goto unlock; 230 goto dst_unreach;
226
227 neigh_node = orig_node->router;
228 231
232 neigh_node = orig_node_get_router(orig_node);
229 if (!neigh_node) 233 if (!neigh_node)
230 goto unlock; 234 goto dst_unreach;
231
232 if (!atomic_inc_not_zero(&neigh_node->refcount)) {
233 neigh_node = NULL;
234 goto unlock;
235 }
236
237 rcu_read_unlock();
238 235
239 if (!neigh_node->if_incoming) 236 if (!neigh_node->if_incoming)
240 goto dst_unreach; 237 goto dst_unreach;
@@ -243,7 +240,7 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff,
243 goto dst_unreach; 240 goto dst_unreach;
244 241
245 memcpy(icmp_packet->orig, 242 memcpy(icmp_packet->orig,
246 bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN); 243 primary_if->net_dev->dev_addr, ETH_ALEN);
247 244
248 if (packet_len == sizeof(struct icmp_packet_rr)) 245 if (packet_len == sizeof(struct icmp_packet_rr))
249 memcpy(icmp_packet->rr, 246 memcpy(icmp_packet->rr,
@@ -252,14 +249,14 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff,
252 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); 249 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
253 goto out; 250 goto out;
254 251
255unlock:
256 rcu_read_unlock();
257dst_unreach: 252dst_unreach:
258 icmp_packet->msg_type = DESTINATION_UNREACHABLE; 253 icmp_packet->msg_type = DESTINATION_UNREACHABLE;
259 bat_socket_add_packet(socket_client, icmp_packet, packet_len); 254 bat_socket_add_packet(socket_client, icmp_packet, packet_len);
260free_skb: 255free_skb:
261 kfree_skb(skb); 256 kfree_skb(skb);
262out: 257out:
258 if (primary_if)
259 hardif_free_ref(primary_if);
263 if (neigh_node) 260 if (neigh_node)
264 neigh_node_free_ref(neigh_node); 261 neigh_node_free_ref(neigh_node);
265 if (orig_node) 262 if (orig_node)
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index 709b33bbdf43..0a7cee0076f4 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -33,6 +33,9 @@
33#include "vis.h" 33#include "vis.h"
34#include "hash.h" 34#include "hash.h"
35 35
36
37/* List manipulations on hardif_list have to be rtnl_lock()'ed,
38 * list traversals just rcu-locked */
36struct list_head hardif_list; 39struct list_head hardif_list;
37 40
38unsigned char broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 41unsigned char broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
@@ -81,28 +84,29 @@ int mesh_init(struct net_device *soft_iface)
81 84
82 spin_lock_init(&bat_priv->forw_bat_list_lock); 85 spin_lock_init(&bat_priv->forw_bat_list_lock);
83 spin_lock_init(&bat_priv->forw_bcast_list_lock); 86 spin_lock_init(&bat_priv->forw_bcast_list_lock);
84 spin_lock_init(&bat_priv->hna_lhash_lock); 87 spin_lock_init(&bat_priv->tt_lhash_lock);
85 spin_lock_init(&bat_priv->hna_ghash_lock); 88 spin_lock_init(&bat_priv->tt_ghash_lock);
86 spin_lock_init(&bat_priv->gw_list_lock); 89 spin_lock_init(&bat_priv->gw_list_lock);
87 spin_lock_init(&bat_priv->vis_hash_lock); 90 spin_lock_init(&bat_priv->vis_hash_lock);
88 spin_lock_init(&bat_priv->vis_list_lock); 91 spin_lock_init(&bat_priv->vis_list_lock);
89 spin_lock_init(&bat_priv->softif_neigh_lock); 92 spin_lock_init(&bat_priv->softif_neigh_lock);
93 spin_lock_init(&bat_priv->softif_neigh_vid_lock);
90 94
91 INIT_HLIST_HEAD(&bat_priv->forw_bat_list); 95 INIT_HLIST_HEAD(&bat_priv->forw_bat_list);
92 INIT_HLIST_HEAD(&bat_priv->forw_bcast_list); 96 INIT_HLIST_HEAD(&bat_priv->forw_bcast_list);
93 INIT_HLIST_HEAD(&bat_priv->gw_list); 97 INIT_HLIST_HEAD(&bat_priv->gw_list);
94 INIT_HLIST_HEAD(&bat_priv->softif_neigh_list); 98 INIT_HLIST_HEAD(&bat_priv->softif_neigh_vids);
95 99
96 if (originator_init(bat_priv) < 1) 100 if (originator_init(bat_priv) < 1)
97 goto err; 101 goto err;
98 102
99 if (hna_local_init(bat_priv) < 1) 103 if (tt_local_init(bat_priv) < 1)
100 goto err; 104 goto err;
101 105
102 if (hna_global_init(bat_priv) < 1) 106 if (tt_global_init(bat_priv) < 1)
103 goto err; 107 goto err;
104 108
105 hna_local_add(soft_iface, soft_iface->dev_addr); 109 tt_local_add(soft_iface, soft_iface->dev_addr);
106 110
107 if (vis_init(bat_priv) < 1) 111 if (vis_init(bat_priv) < 1)
108 goto err; 112 goto err;
@@ -133,8 +137,8 @@ void mesh_free(struct net_device *soft_iface)
133 gw_node_purge(bat_priv); 137 gw_node_purge(bat_priv);
134 originator_free(bat_priv); 138 originator_free(bat_priv);
135 139
136 hna_local_free(bat_priv); 140 tt_local_free(bat_priv);
137 hna_global_free(bat_priv); 141 tt_global_free(bat_priv);
138 142
139 softif_neigh_purge(bat_priv); 143 softif_neigh_purge(bat_priv);
140 144
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index dc248697de71..148b49e02642 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -34,16 +34,18 @@
34 34
35#define TQ_MAX_VALUE 255 35#define TQ_MAX_VALUE 255
36#define JITTER 20 36#define JITTER 20
37#define TTL 50 /* Time To Live of broadcast messages */
38 37
39#define PURGE_TIMEOUT 200 /* purge originators after time in seconds if no 38 /* Time To Live of broadcast messages */
40 * valid packet comes in -> TODO: check 39#define TTL 50
41 * influence on TQ_LOCAL_WINDOW_SIZE */
42#define LOCAL_HNA_TIMEOUT 3600 /* in seconds */
43 40
44#define TQ_LOCAL_WINDOW_SIZE 64 /* sliding packet range of received originator 41/* purge originators after time in seconds if no valid packet comes in
45 * messages in squence numbers (should be a 42 * -> TODO: check influence on TQ_LOCAL_WINDOW_SIZE */
46 * multiple of our word size) */ 43#define PURGE_TIMEOUT 200
44#define TT_LOCAL_TIMEOUT 3600 /* in seconds */
45
46/* sliding packet range of received originator messages in squence numbers
47 * (should be a multiple of our word size) */
48#define TQ_LOCAL_WINDOW_SIZE 64
47#define TQ_GLOBAL_WINDOW_SIZE 5 49#define TQ_GLOBAL_WINDOW_SIZE 5
48#define TQ_LOCAL_BIDRECT_SEND_MINIMUM 1 50#define TQ_LOCAL_BIDRECT_SEND_MINIMUM 1
49#define TQ_LOCAL_BIDRECT_RECV_MINIMUM 1 51#define TQ_LOCAL_BIDRECT_RECV_MINIMUM 1
@@ -55,21 +57,20 @@
55 57
56#define VIS_INTERVAL 5000 /* 5 seconds */ 58#define VIS_INTERVAL 5000 /* 5 seconds */
57 59
58/* how much worse secondary interfaces may be to 60/* how much worse secondary interfaces may be to be considered as bonding
59 * to be considered as bonding candidates */ 61 * candidates */
60
61#define BONDING_TQ_THRESHOLD 50 62#define BONDING_TQ_THRESHOLD 50
62 63
63#define MAX_AGGREGATION_BYTES 512 /* should not be bigger than 512 bytes or 64/* should not be bigger than 512 bytes or change the size of
64 * change the size of 65 * forw_packet->direct_link_flags */
65 * forw_packet->direct_link_flags */ 66#define MAX_AGGREGATION_BYTES 512
66#define MAX_AGGREGATION_MS 100 67#define MAX_AGGREGATION_MS 100
67 68
68#define SOFTIF_NEIGH_TIMEOUT 180000 /* 3 minutes */ 69#define SOFTIF_NEIGH_TIMEOUT 180000 /* 3 minutes */
69 70
71/* don't reset again within 30 seconds */
70#define RESET_PROTECTION_MS 30000 72#define RESET_PROTECTION_MS 30000
71#define EXPECTED_SEQNO_RANGE 65536 73#define EXPECTED_SEQNO_RANGE 65536
72/* don't reset again within 30 seconds */
73 74
74#define MESH_INACTIVE 0 75#define MESH_INACTIVE 0
75#define MESH_ACTIVE 1 76#define MESH_ACTIVE 1
@@ -84,12 +85,13 @@
84#ifdef pr_fmt 85#ifdef pr_fmt
85#undef pr_fmt 86#undef pr_fmt
86#endif 87#endif
87#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt /* Append 'batman-adv: ' before 88/* Append 'batman-adv: ' before kernel messages */
88 * kernel messages */ 89#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
89 90
90#define DBG_BATMAN 1 /* all messages related to routing / flooding / 91/* all messages related to routing / flooding / broadcasting / etc */
91 * broadcasting / etc */ 92#define DBG_BATMAN 1
92#define DBG_ROUTES 2 /* route or hna added / changed / deleted */ 93/* route or tt entry added / changed / deleted */
94#define DBG_ROUTES 2
93#define DBG_ALL 3 95#define DBG_ALL 3
94 96
95 97
@@ -175,4 +177,6 @@ static inline int compare_eth(void *data1, void *data2)
175 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0); 177 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
176} 178}
177 179
180#define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0)
181
178#endif /* _NET_BATMAN_ADV_MAIN_H_ */ 182#endif /* _NET_BATMAN_ADV_MAIN_H_ */
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index ed23a5895d6c..40a30bbcd147 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -19,8 +19,6 @@
19 * 19 *
20 */ 20 */
21 21
22/* increase the reference counter for this originator */
23
24#include "main.h" 22#include "main.h"
25#include "originator.h" 23#include "originator.h"
26#include "hash.h" 24#include "hash.h"
@@ -62,6 +60,21 @@ void neigh_node_free_ref(struct neigh_node *neigh_node)
62 kfree_rcu(neigh_node, rcu); 60 kfree_rcu(neigh_node, rcu);
63} 61}
64 62
63/* increases the refcounter of a found router */
64struct neigh_node *orig_node_get_router(struct orig_node *orig_node)
65{
66 struct neigh_node *router;
67
68 rcu_read_lock();
69 router = rcu_dereference(orig_node->router);
70
71 if (router && !atomic_inc_not_zero(&router->refcount))
72 router = NULL;
73
74 rcu_read_unlock();
75 return router;
76}
77
65struct neigh_node *create_neighbor(struct orig_node *orig_node, 78struct neigh_node *create_neighbor(struct orig_node *orig_node,
66 struct orig_node *orig_neigh_node, 79 struct orig_node *orig_neigh_node,
67 uint8_t *neigh, 80 uint8_t *neigh,
@@ -79,6 +92,7 @@ struct neigh_node *create_neighbor(struct orig_node *orig_node,
79 92
80 INIT_HLIST_NODE(&neigh_node->list); 93 INIT_HLIST_NODE(&neigh_node->list);
81 INIT_LIST_HEAD(&neigh_node->bonding_list); 94 INIT_LIST_HEAD(&neigh_node->bonding_list);
95 spin_lock_init(&neigh_node->tq_lock);
82 96
83 memcpy(neigh_node->addr, neigh, ETH_ALEN); 97 memcpy(neigh_node->addr, neigh, ETH_ALEN);
84 neigh_node->orig_node = orig_neigh_node; 98 neigh_node->orig_node = orig_neigh_node;
@@ -120,7 +134,7 @@ static void orig_node_free_rcu(struct rcu_head *rcu)
120 spin_unlock_bh(&orig_node->neigh_list_lock); 134 spin_unlock_bh(&orig_node->neigh_list_lock);
121 135
122 frag_list_free(&orig_node->frag_list); 136 frag_list_free(&orig_node->frag_list);
123 hna_global_del_orig(orig_node->bat_priv, orig_node, 137 tt_global_del_orig(orig_node->bat_priv, orig_node,
124 "originator timed out"); 138 "originator timed out");
125 139
126 kfree(orig_node->bcast_own); 140 kfree(orig_node->bcast_own);
@@ -198,7 +212,7 @@ struct orig_node *get_orig_node(struct bat_priv *bat_priv, uint8_t *addr)
198 orig_node->bat_priv = bat_priv; 212 orig_node->bat_priv = bat_priv;
199 memcpy(orig_node->orig, addr, ETH_ALEN); 213 memcpy(orig_node->orig, addr, ETH_ALEN);
200 orig_node->router = NULL; 214 orig_node->router = NULL;
201 orig_node->hna_buff = NULL; 215 orig_node->tt_buff = NULL;
202 orig_node->bcast_seqno_reset = jiffies - 1 216 orig_node->bcast_seqno_reset = jiffies - 1
203 - msecs_to_jiffies(RESET_PROTECTION_MS); 217 - msecs_to_jiffies(RESET_PROTECTION_MS);
204 orig_node->batman_seqno_reset = jiffies - 1 218 orig_node->batman_seqno_reset = jiffies - 1
@@ -309,8 +323,8 @@ static bool purge_orig_node(struct bat_priv *bat_priv,
309 &best_neigh_node)) { 323 &best_neigh_node)) {
310 update_routes(bat_priv, orig_node, 324 update_routes(bat_priv, orig_node,
311 best_neigh_node, 325 best_neigh_node,
312 orig_node->hna_buff, 326 orig_node->tt_buff,
313 orig_node->hna_buff_len); 327 orig_node->tt_buff_len);
314 } 328 }
315 } 329 }
316 330
@@ -381,29 +395,34 @@ int orig_seq_print_text(struct seq_file *seq, void *offset)
381 struct hashtable_t *hash = bat_priv->orig_hash; 395 struct hashtable_t *hash = bat_priv->orig_hash;
382 struct hlist_node *node, *node_tmp; 396 struct hlist_node *node, *node_tmp;
383 struct hlist_head *head; 397 struct hlist_head *head;
398 struct hard_iface *primary_if;
384 struct orig_node *orig_node; 399 struct orig_node *orig_node;
385 struct neigh_node *neigh_node; 400 struct neigh_node *neigh_node, *neigh_node_tmp;
386 int batman_count = 0; 401 int batman_count = 0;
387 int last_seen_secs; 402 int last_seen_secs;
388 int last_seen_msecs; 403 int last_seen_msecs;
389 int i; 404 int i, ret = 0;
390 405
391 if ((!bat_priv->primary_if) || 406 primary_if = primary_if_get_selected(bat_priv);
392 (bat_priv->primary_if->if_status != IF_ACTIVE)) { 407
393 if (!bat_priv->primary_if) 408 if (!primary_if) {
394 return seq_printf(seq, "BATMAN mesh %s disabled - " 409 ret = seq_printf(seq, "BATMAN mesh %s disabled - "
395 "please specify interfaces to enable it\n", 410 "please specify interfaces to enable it\n",
396 net_dev->name); 411 net_dev->name);
412 goto out;
413 }
397 414
398 return seq_printf(seq, "BATMAN mesh %s " 415 if (primary_if->if_status != IF_ACTIVE) {
399 "disabled - primary interface not active\n", 416 ret = seq_printf(seq, "BATMAN mesh %s "
400 net_dev->name); 417 "disabled - primary interface not active\n",
418 net_dev->name);
419 goto out;
401 } 420 }
402 421
403 seq_printf(seq, "[B.A.T.M.A.N. adv %s%s, MainIF/MAC: %s/%pM (%s)]\n", 422 seq_printf(seq, "[B.A.T.M.A.N. adv %s%s, MainIF/MAC: %s/%pM (%s)]\n",
404 SOURCE_VERSION, REVISION_VERSION_STR, 423 SOURCE_VERSION, REVISION_VERSION_STR,
405 bat_priv->primary_if->net_dev->name, 424 primary_if->net_dev->name,
406 bat_priv->primary_if->net_dev->dev_addr, net_dev->name); 425 primary_if->net_dev->dev_addr, net_dev->name);
407 seq_printf(seq, " %-15s %s (%s/%i) %17s [%10s]: %20s ...\n", 426 seq_printf(seq, " %-15s %s (%s/%i) %17s [%10s]: %20s ...\n",
408 "Originator", "last-seen", "#", TQ_MAX_VALUE, "Nexthop", 427 "Originator", "last-seen", "#", TQ_MAX_VALUE, "Nexthop",
409 "outgoingIF", "Potential nexthops"); 428 "outgoingIF", "Potential nexthops");
@@ -413,40 +432,47 @@ int orig_seq_print_text(struct seq_file *seq, void *offset)
413 432
414 rcu_read_lock(); 433 rcu_read_lock();
415 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { 434 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
416 if (!orig_node->router) 435 neigh_node = orig_node_get_router(orig_node);
436 if (!neigh_node)
417 continue; 437 continue;
418 438
419 if (orig_node->router->tq_avg == 0) 439 if (neigh_node->tq_avg == 0)
420 continue; 440 goto next;
421 441
422 last_seen_secs = jiffies_to_msecs(jiffies - 442 last_seen_secs = jiffies_to_msecs(jiffies -
423 orig_node->last_valid) / 1000; 443 orig_node->last_valid) / 1000;
424 last_seen_msecs = jiffies_to_msecs(jiffies - 444 last_seen_msecs = jiffies_to_msecs(jiffies -
425 orig_node->last_valid) % 1000; 445 orig_node->last_valid) % 1000;
426 446
427 neigh_node = orig_node->router;
428 seq_printf(seq, "%pM %4i.%03is (%3i) %pM [%10s]:", 447 seq_printf(seq, "%pM %4i.%03is (%3i) %pM [%10s]:",
429 orig_node->orig, last_seen_secs, 448 orig_node->orig, last_seen_secs,
430 last_seen_msecs, neigh_node->tq_avg, 449 last_seen_msecs, neigh_node->tq_avg,
431 neigh_node->addr, 450 neigh_node->addr,
432 neigh_node->if_incoming->net_dev->name); 451 neigh_node->if_incoming->net_dev->name);
433 452
434 hlist_for_each_entry_rcu(neigh_node, node_tmp, 453 hlist_for_each_entry_rcu(neigh_node_tmp, node_tmp,
435 &orig_node->neigh_list, list) { 454 &orig_node->neigh_list, list) {
436 seq_printf(seq, " %pM (%3i)", neigh_node->addr, 455 seq_printf(seq, " %pM (%3i)",
437 neigh_node->tq_avg); 456 neigh_node_tmp->addr,
457 neigh_node_tmp->tq_avg);
438 } 458 }
439 459
440 seq_printf(seq, "\n"); 460 seq_printf(seq, "\n");
441 batman_count++; 461 batman_count++;
462
463next:
464 neigh_node_free_ref(neigh_node);
442 } 465 }
443 rcu_read_unlock(); 466 rcu_read_unlock();
444 } 467 }
445 468
446 if ((batman_count == 0)) 469 if (batman_count == 0)
447 seq_printf(seq, "No batman nodes in range ...\n"); 470 seq_printf(seq, "No batman nodes in range ...\n");
448 471
449 return 0; 472out:
473 if (primary_if)
474 hardif_free_ref(primary_if);
475 return ret;
450} 476}
451 477
452static int orig_node_add_if(struct orig_node *orig_node, int max_if_num) 478static int orig_node_add_if(struct orig_node *orig_node, int max_if_num)
diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h
index 5cc011057da1..e1d641f27aa9 100644
--- a/net/batman-adv/originator.h
+++ b/net/batman-adv/originator.h
@@ -34,6 +34,7 @@ struct neigh_node *create_neighbor(struct orig_node *orig_node,
34 uint8_t *neigh, 34 uint8_t *neigh,
35 struct hard_iface *if_incoming); 35 struct hard_iface *if_incoming);
36void neigh_node_free_ref(struct neigh_node *neigh_node); 36void neigh_node_free_ref(struct neigh_node *neigh_node);
37struct neigh_node *orig_node_get_router(struct orig_node *orig_node);
37int orig_seq_print_text(struct seq_file *seq, void *offset); 38int orig_seq_print_text(struct seq_file *seq, void *offset);
38int orig_hash_add_if(struct hard_iface *hard_iface, int max_if_num); 39int orig_hash_add_if(struct hard_iface *hard_iface, int max_if_num);
39int orig_hash_del_if(struct hard_iface *hard_iface, int max_if_num); 40int orig_hash_del_if(struct hard_iface *hard_iface, int max_if_num);
diff --git a/net/batman-adv/packet.h b/net/batman-adv/packet.h
index e7571879af3f..eda99650e9f8 100644
--- a/net/batman-adv/packet.h
+++ b/net/batman-adv/packet.h
@@ -61,7 +61,7 @@ struct batman_packet {
61 uint8_t orig[6]; 61 uint8_t orig[6];
62 uint8_t prev_sender[6]; 62 uint8_t prev_sender[6];
63 uint8_t ttl; 63 uint8_t ttl;
64 uint8_t num_hna; 64 uint8_t num_tt;
65 uint8_t gw_flags; /* flags related to gateway class */ 65 uint8_t gw_flags; /* flags related to gateway class */
66 uint8_t align; 66 uint8_t align;
67} __packed; 67} __packed;
@@ -128,8 +128,7 @@ struct vis_packet {
128 uint8_t entries; /* number of entries behind this struct */ 128 uint8_t entries; /* number of entries behind this struct */
129 uint32_t seqno; /* sequence number */ 129 uint32_t seqno; /* sequence number */
130 uint8_t ttl; /* TTL */ 130 uint8_t ttl; /* TTL */
131 uint8_t vis_orig[6]; /* originator that informs about its 131 uint8_t vis_orig[6]; /* originator that announces its neighbors */
132 * neighbors */
133 uint8_t target_orig[6]; /* who should receive this packet */ 132 uint8_t target_orig[6]; /* who should receive this packet */
134 uint8_t sender_orig[6]; /* who sent or rebroadcasted this packet */ 133 uint8_t sender_orig[6]; /* who sent or rebroadcasted this packet */
135} __packed; 134} __packed;
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index c172f5d0e05a..bb1c3ec7e3ff 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -64,80 +64,97 @@ void slide_own_bcast_window(struct hard_iface *hard_iface)
64 } 64 }
65} 65}
66 66
67static void update_HNA(struct bat_priv *bat_priv, struct orig_node *orig_node, 67static void update_TT(struct bat_priv *bat_priv, struct orig_node *orig_node,
68 unsigned char *hna_buff, int hna_buff_len) 68 unsigned char *tt_buff, int tt_buff_len)
69{ 69{
70 if ((hna_buff_len != orig_node->hna_buff_len) || 70 if ((tt_buff_len != orig_node->tt_buff_len) ||
71 ((hna_buff_len > 0) && 71 ((tt_buff_len > 0) &&
72 (orig_node->hna_buff_len > 0) && 72 (orig_node->tt_buff_len > 0) &&
73 (memcmp(orig_node->hna_buff, hna_buff, hna_buff_len) != 0))) { 73 (memcmp(orig_node->tt_buff, tt_buff, tt_buff_len) != 0))) {
74 74
75 if (orig_node->hna_buff_len > 0) 75 if (orig_node->tt_buff_len > 0)
76 hna_global_del_orig(bat_priv, orig_node, 76 tt_global_del_orig(bat_priv, orig_node,
77 "originator changed hna"); 77 "originator changed tt");
78 78
79 if ((hna_buff_len > 0) && (hna_buff)) 79 if ((tt_buff_len > 0) && (tt_buff))
80 hna_global_add_orig(bat_priv, orig_node, 80 tt_global_add_orig(bat_priv, orig_node,
81 hna_buff, hna_buff_len); 81 tt_buff, tt_buff_len);
82 } 82 }
83} 83}
84 84
85static void update_route(struct bat_priv *bat_priv, 85static void update_route(struct bat_priv *bat_priv,
86 struct orig_node *orig_node, 86 struct orig_node *orig_node,
87 struct neigh_node *neigh_node, 87 struct neigh_node *neigh_node,
88 unsigned char *hna_buff, int hna_buff_len) 88 unsigned char *tt_buff, int tt_buff_len)
89{ 89{
90 struct neigh_node *neigh_node_tmp; 90 struct neigh_node *curr_router;
91
92 curr_router = orig_node_get_router(orig_node);
91 93
92 /* route deleted */ 94 /* route deleted */
93 if ((orig_node->router) && (!neigh_node)) { 95 if ((curr_router) && (!neigh_node)) {
94 96
95 bat_dbg(DBG_ROUTES, bat_priv, "Deleting route towards: %pM\n", 97 bat_dbg(DBG_ROUTES, bat_priv, "Deleting route towards: %pM\n",
96 orig_node->orig); 98 orig_node->orig);
97 hna_global_del_orig(bat_priv, orig_node, 99 tt_global_del_orig(bat_priv, orig_node,
98 "originator timed out"); 100 "originator timed out");
99 101
100 /* route added */ 102 /* route added */
101 } else if ((!orig_node->router) && (neigh_node)) { 103 } else if ((!curr_router) && (neigh_node)) {
102 104
103 bat_dbg(DBG_ROUTES, bat_priv, 105 bat_dbg(DBG_ROUTES, bat_priv,
104 "Adding route towards: %pM (via %pM)\n", 106 "Adding route towards: %pM (via %pM)\n",
105 orig_node->orig, neigh_node->addr); 107 orig_node->orig, neigh_node->addr);
106 hna_global_add_orig(bat_priv, orig_node, 108 tt_global_add_orig(bat_priv, orig_node,
107 hna_buff, hna_buff_len); 109 tt_buff, tt_buff_len);
108 110
109 /* route changed */ 111 /* route changed */
110 } else { 112 } else {
111 bat_dbg(DBG_ROUTES, bat_priv, 113 bat_dbg(DBG_ROUTES, bat_priv,
112 "Changing route towards: %pM " 114 "Changing route towards: %pM "
113 "(now via %pM - was via %pM)\n", 115 "(now via %pM - was via %pM)\n",
114 orig_node->orig, neigh_node->addr, 116 orig_node->orig, neigh_node->addr,
115 orig_node->router->addr); 117 curr_router->addr);
116 } 118 }
117 119
120 if (curr_router)
121 neigh_node_free_ref(curr_router);
122
123 /* increase refcount of new best neighbor */
118 if (neigh_node && !atomic_inc_not_zero(&neigh_node->refcount)) 124 if (neigh_node && !atomic_inc_not_zero(&neigh_node->refcount))
119 neigh_node = NULL; 125 neigh_node = NULL;
120 neigh_node_tmp = orig_node->router; 126
121 orig_node->router = neigh_node; 127 spin_lock_bh(&orig_node->neigh_list_lock);
122 if (neigh_node_tmp) 128 rcu_assign_pointer(orig_node->router, neigh_node);
123 neigh_node_free_ref(neigh_node_tmp); 129 spin_unlock_bh(&orig_node->neigh_list_lock);
130
131 /* decrease refcount of previous best neighbor */
132 if (curr_router)
133 neigh_node_free_ref(curr_router);
124} 134}
125 135
126 136
127void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node, 137void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node,
128 struct neigh_node *neigh_node, unsigned char *hna_buff, 138 struct neigh_node *neigh_node, unsigned char *tt_buff,
129 int hna_buff_len) 139 int tt_buff_len)
130{ 140{
141 struct neigh_node *router = NULL;
131 142
132 if (!orig_node) 143 if (!orig_node)
133 return; 144 goto out;
145
146 router = orig_node_get_router(orig_node);
134 147
135 if (orig_node->router != neigh_node) 148 if (router != neigh_node)
136 update_route(bat_priv, orig_node, neigh_node, 149 update_route(bat_priv, orig_node, neigh_node,
137 hna_buff, hna_buff_len); 150 tt_buff, tt_buff_len);
138 /* may be just HNA changed */ 151 /* may be just TT changed */
139 else 152 else
140 update_HNA(bat_priv, orig_node, hna_buff, hna_buff_len); 153 update_TT(bat_priv, orig_node, tt_buff, tt_buff_len);
154
155out:
156 if (router)
157 neigh_node_free_ref(router);
141} 158}
142 159
143static int is_bidirectional_neigh(struct orig_node *orig_node, 160static int is_bidirectional_neigh(struct orig_node *orig_node,
@@ -152,65 +169,41 @@ static int is_bidirectional_neigh(struct orig_node *orig_node,
152 uint8_t orig_eq_count, neigh_rq_count, tq_own; 169 uint8_t orig_eq_count, neigh_rq_count, tq_own;
153 int tq_asym_penalty, ret = 0; 170 int tq_asym_penalty, ret = 0;
154 171
155 if (orig_node == orig_neigh_node) { 172 /* find corresponding one hop neighbor */
156 rcu_read_lock(); 173 rcu_read_lock();
157 hlist_for_each_entry_rcu(tmp_neigh_node, node, 174 hlist_for_each_entry_rcu(tmp_neigh_node, node,
158 &orig_node->neigh_list, list) { 175 &orig_neigh_node->neigh_list, list) {
159
160 if (!compare_eth(tmp_neigh_node->addr,
161 orig_neigh_node->orig))
162 continue;
163
164 if (tmp_neigh_node->if_incoming != if_incoming)
165 continue;
166
167 if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
168 continue;
169
170 neigh_node = tmp_neigh_node;
171 }
172 rcu_read_unlock();
173 176
174 if (!neigh_node) 177 if (!compare_eth(tmp_neigh_node->addr, orig_neigh_node->orig))
175 neigh_node = create_neighbor(orig_node, 178 continue;
176 orig_neigh_node,
177 orig_neigh_node->orig,
178 if_incoming);
179 if (!neigh_node)
180 goto out;
181 179
182 neigh_node->last_valid = jiffies; 180 if (tmp_neigh_node->if_incoming != if_incoming)
183 } else { 181 continue;
184 /* find packet count of corresponding one hop neighbor */
185 rcu_read_lock();
186 hlist_for_each_entry_rcu(tmp_neigh_node, node,
187 &orig_neigh_node->neigh_list, list) {
188 182
189 if (!compare_eth(tmp_neigh_node->addr, 183 if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
190 orig_neigh_node->orig)) 184 continue;
191 continue;
192 185
193 if (tmp_neigh_node->if_incoming != if_incoming) 186 neigh_node = tmp_neigh_node;
194 continue; 187 break;
188 }
189 rcu_read_unlock();
195 190
196 if (!atomic_inc_not_zero(&tmp_neigh_node->refcount)) 191 if (!neigh_node)
197 continue; 192 neigh_node = create_neighbor(orig_neigh_node,
193 orig_neigh_node,
194 orig_neigh_node->orig,
195 if_incoming);
198 196
199 neigh_node = tmp_neigh_node; 197 if (!neigh_node)
200 } 198 goto out;
201 rcu_read_unlock();
202 199
203 if (!neigh_node) 200 /* if orig_node is direct neighbour update neigh_node last_valid */
204 neigh_node = create_neighbor(orig_neigh_node, 201 if (orig_node == orig_neigh_node)
205 orig_neigh_node, 202 neigh_node->last_valid = jiffies;
206 orig_neigh_node->orig,
207 if_incoming);
208 if (!neigh_node)
209 goto out;
210 }
211 203
212 orig_node->last_valid = jiffies; 204 orig_node->last_valid = jiffies;
213 205
206 /* find packet count of corresponding one hop neighbor */
214 spin_lock_bh(&orig_node->ogm_cnt_lock); 207 spin_lock_bh(&orig_node->ogm_cnt_lock);
215 orig_eq_count = orig_neigh_node->bcast_own_sum[if_incoming->if_num]; 208 orig_eq_count = orig_neigh_node->bcast_own_sum[if_incoming->if_num];
216 neigh_rq_count = neigh_node->real_packet_count; 209 neigh_rq_count = neigh_node->real_packet_count;
@@ -288,8 +281,8 @@ static void bonding_candidate_add(struct orig_node *orig_node,
288 struct neigh_node *neigh_node) 281 struct neigh_node *neigh_node)
289{ 282{
290 struct hlist_node *node; 283 struct hlist_node *node;
291 struct neigh_node *tmp_neigh_node; 284 struct neigh_node *tmp_neigh_node, *router = NULL;
292 uint8_t best_tq, interference_candidate = 0; 285 uint8_t interference_candidate = 0;
293 286
294 spin_lock_bh(&orig_node->neigh_list_lock); 287 spin_lock_bh(&orig_node->neigh_list_lock);
295 288
@@ -298,13 +291,12 @@ static void bonding_candidate_add(struct orig_node *orig_node,
298 neigh_node->orig_node->primary_addr)) 291 neigh_node->orig_node->primary_addr))
299 goto candidate_del; 292 goto candidate_del;
300 293
301 if (!orig_node->router) 294 router = orig_node_get_router(orig_node);
295 if (!router)
302 goto candidate_del; 296 goto candidate_del;
303 297
304 best_tq = orig_node->router->tq_avg;
305
306 /* ... and is good enough to be considered */ 298 /* ... and is good enough to be considered */
307 if (neigh_node->tq_avg < best_tq - BONDING_TQ_THRESHOLD) 299 if (neigh_node->tq_avg < router->tq_avg - BONDING_TQ_THRESHOLD)
308 goto candidate_del; 300 goto candidate_del;
309 301
310 /** 302 /**
@@ -350,7 +342,9 @@ candidate_del:
350 342
351out: 343out:
352 spin_unlock_bh(&orig_node->neigh_list_lock); 344 spin_unlock_bh(&orig_node->neigh_list_lock);
353 return; 345
346 if (router)
347 neigh_node_free_ref(router);
354} 348}
355 349
356/* copy primary address for bonding */ 350/* copy primary address for bonding */
@@ -369,13 +363,14 @@ static void update_orig(struct bat_priv *bat_priv,
369 struct ethhdr *ethhdr, 363 struct ethhdr *ethhdr,
370 struct batman_packet *batman_packet, 364 struct batman_packet *batman_packet,
371 struct hard_iface *if_incoming, 365 struct hard_iface *if_incoming,
372 unsigned char *hna_buff, int hna_buff_len, 366 unsigned char *tt_buff, int tt_buff_len,
373 char is_duplicate) 367 char is_duplicate)
374{ 368{
375 struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL; 369 struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
370 struct neigh_node *router = NULL;
376 struct orig_node *orig_node_tmp; 371 struct orig_node *orig_node_tmp;
377 struct hlist_node *node; 372 struct hlist_node *node;
378 int tmp_hna_buff_len; 373 int tmp_tt_buff_len;
379 uint8_t bcast_own_sum_orig, bcast_own_sum_neigh; 374 uint8_t bcast_own_sum_orig, bcast_own_sum_neigh;
380 375
381 bat_dbg(DBG_BATMAN, bat_priv, "update_originator(): " 376 bat_dbg(DBG_BATMAN, bat_priv, "update_originator(): "
@@ -396,10 +391,12 @@ static void update_orig(struct bat_priv *bat_priv,
396 if (is_duplicate) 391 if (is_duplicate)
397 continue; 392 continue;
398 393
394 spin_lock_bh(&tmp_neigh_node->tq_lock);
399 ring_buffer_set(tmp_neigh_node->tq_recv, 395 ring_buffer_set(tmp_neigh_node->tq_recv,
400 &tmp_neigh_node->tq_index, 0); 396 &tmp_neigh_node->tq_index, 0);
401 tmp_neigh_node->tq_avg = 397 tmp_neigh_node->tq_avg =
402 ring_buffer_avg(tmp_neigh_node->tq_recv); 398 ring_buffer_avg(tmp_neigh_node->tq_recv);
399 spin_unlock_bh(&tmp_neigh_node->tq_lock);
403 } 400 }
404 401
405 if (!neigh_node) { 402 if (!neigh_node) {
@@ -424,10 +421,12 @@ static void update_orig(struct bat_priv *bat_priv,
424 orig_node->flags = batman_packet->flags; 421 orig_node->flags = batman_packet->flags;
425 neigh_node->last_valid = jiffies; 422 neigh_node->last_valid = jiffies;
426 423
424 spin_lock_bh(&neigh_node->tq_lock);
427 ring_buffer_set(neigh_node->tq_recv, 425 ring_buffer_set(neigh_node->tq_recv,
428 &neigh_node->tq_index, 426 &neigh_node->tq_index,
429 batman_packet->tq); 427 batman_packet->tq);
430 neigh_node->tq_avg = ring_buffer_avg(neigh_node->tq_recv); 428 neigh_node->tq_avg = ring_buffer_avg(neigh_node->tq_recv);
429 spin_unlock_bh(&neigh_node->tq_lock);
431 430
432 if (!is_duplicate) { 431 if (!is_duplicate) {
433 orig_node->last_ttl = batman_packet->ttl; 432 orig_node->last_ttl = batman_packet->ttl;
@@ -436,24 +435,23 @@ static void update_orig(struct bat_priv *bat_priv,
436 435
437 bonding_candidate_add(orig_node, neigh_node); 436 bonding_candidate_add(orig_node, neigh_node);
438 437
439 tmp_hna_buff_len = (hna_buff_len > batman_packet->num_hna * ETH_ALEN ? 438 tmp_tt_buff_len = (tt_buff_len > batman_packet->num_tt * ETH_ALEN ?
440 batman_packet->num_hna * ETH_ALEN : hna_buff_len); 439 batman_packet->num_tt * ETH_ALEN : tt_buff_len);
441 440
442 /* if this neighbor already is our next hop there is nothing 441 /* if this neighbor already is our next hop there is nothing
443 * to change */ 442 * to change */
444 if (orig_node->router == neigh_node) 443 router = orig_node_get_router(orig_node);
445 goto update_hna; 444 if (router == neigh_node)
445 goto update_tt;
446 446
447 /* if this neighbor does not offer a better TQ we won't consider it */ 447 /* if this neighbor does not offer a better TQ we won't consider it */
448 if ((orig_node->router) && 448 if (router && (router->tq_avg > neigh_node->tq_avg))
449 (orig_node->router->tq_avg > neigh_node->tq_avg)) 449 goto update_tt;
450 goto update_hna;
451 450
452 /* if the TQ is the same and the link not more symetric we 451 /* if the TQ is the same and the link not more symetric we
453 * won't consider it either */ 452 * won't consider it either */
454 if ((orig_node->router) && 453 if (router && (neigh_node->tq_avg == router->tq_avg)) {
455 (neigh_node->tq_avg == orig_node->router->tq_avg)) { 454 orig_node_tmp = router->orig_node;
456 orig_node_tmp = orig_node->router->orig_node;
457 spin_lock_bh(&orig_node_tmp->ogm_cnt_lock); 455 spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
458 bcast_own_sum_orig = 456 bcast_own_sum_orig =
459 orig_node_tmp->bcast_own_sum[if_incoming->if_num]; 457 orig_node_tmp->bcast_own_sum[if_incoming->if_num];
@@ -466,16 +464,16 @@ static void update_orig(struct bat_priv *bat_priv,
466 spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock); 464 spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
467 465
468 if (bcast_own_sum_orig >= bcast_own_sum_neigh) 466 if (bcast_own_sum_orig >= bcast_own_sum_neigh)
469 goto update_hna; 467 goto update_tt;
470 } 468 }
471 469
472 update_routes(bat_priv, orig_node, neigh_node, 470 update_routes(bat_priv, orig_node, neigh_node,
473 hna_buff, tmp_hna_buff_len); 471 tt_buff, tmp_tt_buff_len);
474 goto update_gw; 472 goto update_gw;
475 473
476update_hna: 474update_tt:
477 update_routes(bat_priv, orig_node, orig_node->router, 475 update_routes(bat_priv, orig_node, router,
478 hna_buff, tmp_hna_buff_len); 476 tt_buff, tmp_tt_buff_len);
479 477
480update_gw: 478update_gw:
481 if (orig_node->gw_flags != batman_packet->gw_flags) 479 if (orig_node->gw_flags != batman_packet->gw_flags)
@@ -496,6 +494,8 @@ unlock:
496out: 494out:
497 if (neigh_node) 495 if (neigh_node)
498 neigh_node_free_ref(neigh_node); 496 neigh_node_free_ref(neigh_node);
497 if (router)
498 neigh_node_free_ref(router);
499} 499}
500 500
501/* checks whether the host restarted and is in the protection time. 501/* checks whether the host restarted and is in the protection time.
@@ -597,12 +597,14 @@ out:
597 597
598void receive_bat_packet(struct ethhdr *ethhdr, 598void receive_bat_packet(struct ethhdr *ethhdr,
599 struct batman_packet *batman_packet, 599 struct batman_packet *batman_packet,
600 unsigned char *hna_buff, int hna_buff_len, 600 unsigned char *tt_buff, int tt_buff_len,
601 struct hard_iface *if_incoming) 601 struct hard_iface *if_incoming)
602{ 602{
603 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); 603 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
604 struct hard_iface *hard_iface; 604 struct hard_iface *hard_iface;
605 struct orig_node *orig_neigh_node, *orig_node; 605 struct orig_node *orig_neigh_node, *orig_node;
606 struct neigh_node *router = NULL, *router_router = NULL;
607 struct neigh_node *orig_neigh_router = NULL;
606 char has_directlink_flag; 608 char has_directlink_flag;
607 char is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0; 609 char is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0;
608 char is_broadcast = 0, is_bidirectional, is_single_hop_neigh; 610 char is_broadcast = 0, is_bidirectional, is_single_hop_neigh;
@@ -747,14 +749,15 @@ void receive_bat_packet(struct ethhdr *ethhdr,
747 goto out; 749 goto out;
748 } 750 }
749 751
752 router = orig_node_get_router(orig_node);
753 if (router)
754 router_router = orig_node_get_router(router->orig_node);
755
750 /* avoid temporary routing loops */ 756 /* avoid temporary routing loops */
751 if ((orig_node->router) && 757 if (router && router_router &&
752 (orig_node->router->orig_node->router) && 758 (compare_eth(router->addr, batman_packet->prev_sender)) &&
753 (compare_eth(orig_node->router->addr,
754 batman_packet->prev_sender)) &&
755 !(compare_eth(batman_packet->orig, batman_packet->prev_sender)) && 759 !(compare_eth(batman_packet->orig, batman_packet->prev_sender)) &&
756 (compare_eth(orig_node->router->addr, 760 (compare_eth(router->addr, router_router->addr))) {
757 orig_node->router->orig_node->router->addr))) {
758 bat_dbg(DBG_BATMAN, bat_priv, 761 bat_dbg(DBG_BATMAN, bat_priv,
759 "Drop packet: ignoring all rebroadcast packets that " 762 "Drop packet: ignoring all rebroadcast packets that "
760 "may make me loop (sender: %pM)\n", ethhdr->h_source); 763 "may make me loop (sender: %pM)\n", ethhdr->h_source);
@@ -769,9 +772,11 @@ void receive_bat_packet(struct ethhdr *ethhdr,
769 if (!orig_neigh_node) 772 if (!orig_neigh_node)
770 goto out; 773 goto out;
771 774
775 orig_neigh_router = orig_node_get_router(orig_neigh_node);
776
772 /* drop packet if sender is not a direct neighbor and if we 777 /* drop packet if sender is not a direct neighbor and if we
773 * don't route towards it */ 778 * don't route towards it */
774 if (!is_single_hop_neigh && (!orig_neigh_node->router)) { 779 if (!is_single_hop_neigh && (!orig_neigh_router)) {
775 bat_dbg(DBG_BATMAN, bat_priv, 780 bat_dbg(DBG_BATMAN, bat_priv,
776 "Drop packet: OGM via unknown neighbor!\n"); 781 "Drop packet: OGM via unknown neighbor!\n");
777 goto out_neigh; 782 goto out_neigh;
@@ -789,14 +794,14 @@ void receive_bat_packet(struct ethhdr *ethhdr,
789 ((orig_node->last_real_seqno == batman_packet->seqno) && 794 ((orig_node->last_real_seqno == batman_packet->seqno) &&
790 (orig_node->last_ttl - 3 <= batman_packet->ttl)))) 795 (orig_node->last_ttl - 3 <= batman_packet->ttl))))
791 update_orig(bat_priv, orig_node, ethhdr, batman_packet, 796 update_orig(bat_priv, orig_node, ethhdr, batman_packet,
792 if_incoming, hna_buff, hna_buff_len, is_duplicate); 797 if_incoming, tt_buff, tt_buff_len, is_duplicate);
793 798
794 /* is single hop (direct) neighbor */ 799 /* is single hop (direct) neighbor */
795 if (is_single_hop_neigh) { 800 if (is_single_hop_neigh) {
796 801
797 /* mark direct link on incoming interface */ 802 /* mark direct link on incoming interface */
798 schedule_forward_packet(orig_node, ethhdr, batman_packet, 803 schedule_forward_packet(orig_node, ethhdr, batman_packet,
799 1, hna_buff_len, if_incoming); 804 1, tt_buff_len, if_incoming);
800 805
801 bat_dbg(DBG_BATMAN, bat_priv, "Forwarding packet: " 806 bat_dbg(DBG_BATMAN, bat_priv, "Forwarding packet: "
802 "rebroadcast neighbor packet with direct link flag\n"); 807 "rebroadcast neighbor packet with direct link flag\n");
@@ -819,12 +824,19 @@ void receive_bat_packet(struct ethhdr *ethhdr,
819 bat_dbg(DBG_BATMAN, bat_priv, 824 bat_dbg(DBG_BATMAN, bat_priv,
820 "Forwarding packet: rebroadcast originator packet\n"); 825 "Forwarding packet: rebroadcast originator packet\n");
821 schedule_forward_packet(orig_node, ethhdr, batman_packet, 826 schedule_forward_packet(orig_node, ethhdr, batman_packet,
822 0, hna_buff_len, if_incoming); 827 0, tt_buff_len, if_incoming);
823 828
824out_neigh: 829out_neigh:
825 if ((orig_neigh_node) && (!is_single_hop_neigh)) 830 if ((orig_neigh_node) && (!is_single_hop_neigh))
826 orig_node_free_ref(orig_neigh_node); 831 orig_node_free_ref(orig_neigh_node);
827out: 832out:
833 if (router)
834 neigh_node_free_ref(router);
835 if (router_router)
836 neigh_node_free_ref(router_router);
837 if (orig_neigh_router)
838 neigh_node_free_ref(orig_neigh_router);
839
828 orig_node_free_ref(orig_node); 840 orig_node_free_ref(orig_node);
829} 841}
830 842
@@ -868,8 +880,9 @@ int recv_bat_packet(struct sk_buff *skb, struct hard_iface *hard_iface)
868static int recv_my_icmp_packet(struct bat_priv *bat_priv, 880static int recv_my_icmp_packet(struct bat_priv *bat_priv,
869 struct sk_buff *skb, size_t icmp_len) 881 struct sk_buff *skb, size_t icmp_len)
870{ 882{
883 struct hard_iface *primary_if = NULL;
871 struct orig_node *orig_node = NULL; 884 struct orig_node *orig_node = NULL;
872 struct neigh_node *neigh_node = NULL; 885 struct neigh_node *router = NULL;
873 struct icmp_packet_rr *icmp_packet; 886 struct icmp_packet_rr *icmp_packet;
874 int ret = NET_RX_DROP; 887 int ret = NET_RX_DROP;
875 888
@@ -881,28 +894,19 @@ static int recv_my_icmp_packet(struct bat_priv *bat_priv,
881 goto out; 894 goto out;
882 } 895 }
883 896
884 if (!bat_priv->primary_if) 897 primary_if = primary_if_get_selected(bat_priv);
898 if (!primary_if)
885 goto out; 899 goto out;
886 900
887 /* answer echo request (ping) */ 901 /* answer echo request (ping) */
888 /* get routing information */ 902 /* get routing information */
889 rcu_read_lock();
890 orig_node = orig_hash_find(bat_priv, icmp_packet->orig); 903 orig_node = orig_hash_find(bat_priv, icmp_packet->orig);
891
892 if (!orig_node) 904 if (!orig_node)
893 goto unlock; 905 goto out;
894
895 neigh_node = orig_node->router;
896
897 if (!neigh_node)
898 goto unlock;
899
900 if (!atomic_inc_not_zero(&neigh_node->refcount)) {
901 neigh_node = NULL;
902 goto unlock;
903 }
904 906
905 rcu_read_unlock(); 907 router = orig_node_get_router(orig_node);
908 if (!router)
909 goto out;
906 910
907 /* create a copy of the skb, if needed, to modify it. */ 911 /* create a copy of the skb, if needed, to modify it. */
908 if (skb_cow(skb, sizeof(struct ethhdr)) < 0) 912 if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
@@ -911,20 +915,18 @@ static int recv_my_icmp_packet(struct bat_priv *bat_priv,
911 icmp_packet = (struct icmp_packet_rr *)skb->data; 915 icmp_packet = (struct icmp_packet_rr *)skb->data;
912 916
913 memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN); 917 memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
914 memcpy(icmp_packet->orig, 918 memcpy(icmp_packet->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
915 bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
916 icmp_packet->msg_type = ECHO_REPLY; 919 icmp_packet->msg_type = ECHO_REPLY;
917 icmp_packet->ttl = TTL; 920 icmp_packet->ttl = TTL;
918 921
919 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); 922 send_skb_packet(skb, router->if_incoming, router->addr);
920 ret = NET_RX_SUCCESS; 923 ret = NET_RX_SUCCESS;
921 goto out;
922 924
923unlock:
924 rcu_read_unlock();
925out: 925out:
926 if (neigh_node) 926 if (primary_if)
927 neigh_node_free_ref(neigh_node); 927 hardif_free_ref(primary_if);
928 if (router)
929 neigh_node_free_ref(router);
928 if (orig_node) 930 if (orig_node)
929 orig_node_free_ref(orig_node); 931 orig_node_free_ref(orig_node);
930 return ret; 932 return ret;
@@ -933,8 +935,9 @@ out:
933static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv, 935static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv,
934 struct sk_buff *skb) 936 struct sk_buff *skb)
935{ 937{
938 struct hard_iface *primary_if = NULL;
936 struct orig_node *orig_node = NULL; 939 struct orig_node *orig_node = NULL;
937 struct neigh_node *neigh_node = NULL; 940 struct neigh_node *router = NULL;
938 struct icmp_packet *icmp_packet; 941 struct icmp_packet *icmp_packet;
939 int ret = NET_RX_DROP; 942 int ret = NET_RX_DROP;
940 943
@@ -948,27 +951,18 @@ static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv,
948 goto out; 951 goto out;
949 } 952 }
950 953
951 if (!bat_priv->primary_if) 954 primary_if = primary_if_get_selected(bat_priv);
955 if (!primary_if)
952 goto out; 956 goto out;
953 957
954 /* get routing information */ 958 /* get routing information */
955 rcu_read_lock();
956 orig_node = orig_hash_find(bat_priv, icmp_packet->orig); 959 orig_node = orig_hash_find(bat_priv, icmp_packet->orig);
957
958 if (!orig_node) 960 if (!orig_node)
959 goto unlock; 961 goto out;
960
961 neigh_node = orig_node->router;
962
963 if (!neigh_node)
964 goto unlock;
965
966 if (!atomic_inc_not_zero(&neigh_node->refcount)) {
967 neigh_node = NULL;
968 goto unlock;
969 }
970 962
971 rcu_read_unlock(); 963 router = orig_node_get_router(orig_node);
964 if (!router)
965 goto out;
972 966
973 /* create a copy of the skb, if needed, to modify it. */ 967 /* create a copy of the skb, if needed, to modify it. */
974 if (skb_cow(skb, sizeof(struct ethhdr)) < 0) 968 if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
@@ -977,20 +971,18 @@ static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv,
977 icmp_packet = (struct icmp_packet *)skb->data; 971 icmp_packet = (struct icmp_packet *)skb->data;
978 972
979 memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN); 973 memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
980 memcpy(icmp_packet->orig, 974 memcpy(icmp_packet->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
981 bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
982 icmp_packet->msg_type = TTL_EXCEEDED; 975 icmp_packet->msg_type = TTL_EXCEEDED;
983 icmp_packet->ttl = TTL; 976 icmp_packet->ttl = TTL;
984 977
985 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); 978 send_skb_packet(skb, router->if_incoming, router->addr);
986 ret = NET_RX_SUCCESS; 979 ret = NET_RX_SUCCESS;
987 goto out;
988 980
989unlock:
990 rcu_read_unlock();
991out: 981out:
992 if (neigh_node) 982 if (primary_if)
993 neigh_node_free_ref(neigh_node); 983 hardif_free_ref(primary_if);
984 if (router)
985 neigh_node_free_ref(router);
994 if (orig_node) 986 if (orig_node)
995 orig_node_free_ref(orig_node); 987 orig_node_free_ref(orig_node);
996 return ret; 988 return ret;
@@ -1003,7 +995,7 @@ int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if)
1003 struct icmp_packet_rr *icmp_packet; 995 struct icmp_packet_rr *icmp_packet;
1004 struct ethhdr *ethhdr; 996 struct ethhdr *ethhdr;
1005 struct orig_node *orig_node = NULL; 997 struct orig_node *orig_node = NULL;
1006 struct neigh_node *neigh_node = NULL; 998 struct neigh_node *router = NULL;
1007 int hdr_size = sizeof(struct icmp_packet); 999 int hdr_size = sizeof(struct icmp_packet);
1008 int ret = NET_RX_DROP; 1000 int ret = NET_RX_DROP;
1009 1001
@@ -1050,23 +1042,13 @@ int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if)
1050 return recv_icmp_ttl_exceeded(bat_priv, skb); 1042 return recv_icmp_ttl_exceeded(bat_priv, skb);
1051 1043
1052 /* get routing information */ 1044 /* get routing information */
1053 rcu_read_lock();
1054 orig_node = orig_hash_find(bat_priv, icmp_packet->dst); 1045 orig_node = orig_hash_find(bat_priv, icmp_packet->dst);
1055
1056 if (!orig_node) 1046 if (!orig_node)
1057 goto unlock; 1047 goto out;
1058
1059 neigh_node = orig_node->router;
1060
1061 if (!neigh_node)
1062 goto unlock;
1063
1064 if (!atomic_inc_not_zero(&neigh_node->refcount)) {
1065 neigh_node = NULL;
1066 goto unlock;
1067 }
1068 1048
1069 rcu_read_unlock(); 1049 router = orig_node_get_router(orig_node);
1050 if (!router)
1051 goto out;
1070 1052
1071 /* create a copy of the skb, if needed, to modify it. */ 1053 /* create a copy of the skb, if needed, to modify it. */
1072 if (skb_cow(skb, sizeof(struct ethhdr)) < 0) 1054 if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
@@ -1078,20 +1060,117 @@ int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if)
1078 icmp_packet->ttl--; 1060 icmp_packet->ttl--;
1079 1061
1080 /* route it */ 1062 /* route it */
1081 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); 1063 send_skb_packet(skb, router->if_incoming, router->addr);
1082 ret = NET_RX_SUCCESS; 1064 ret = NET_RX_SUCCESS;
1083 goto out;
1084 1065
1085unlock:
1086 rcu_read_unlock();
1087out: 1066out:
1088 if (neigh_node) 1067 if (router)
1089 neigh_node_free_ref(neigh_node); 1068 neigh_node_free_ref(router);
1090 if (orig_node) 1069 if (orig_node)
1091 orig_node_free_ref(orig_node); 1070 orig_node_free_ref(orig_node);
1092 return ret; 1071 return ret;
1093} 1072}
1094 1073
1074/* In the bonding case, send the packets in a round
1075 * robin fashion over the remaining interfaces.
1076 *
1077 * This method rotates the bonding list and increases the
1078 * returned router's refcount. */
1079static struct neigh_node *find_bond_router(struct orig_node *primary_orig,
1080 struct hard_iface *recv_if)
1081{
1082 struct neigh_node *tmp_neigh_node;
1083 struct neigh_node *router = NULL, *first_candidate = NULL;
1084
1085 rcu_read_lock();
1086 list_for_each_entry_rcu(tmp_neigh_node, &primary_orig->bond_list,
1087 bonding_list) {
1088 if (!first_candidate)
1089 first_candidate = tmp_neigh_node;
1090
1091 /* recv_if == NULL on the first node. */
1092 if (tmp_neigh_node->if_incoming == recv_if)
1093 continue;
1094
1095 if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
1096 continue;
1097
1098 router = tmp_neigh_node;
1099 break;
1100 }
1101
1102 /* use the first candidate if nothing was found. */
1103 if (!router && first_candidate &&
1104 atomic_inc_not_zero(&first_candidate->refcount))
1105 router = first_candidate;
1106
1107 if (!router)
1108 goto out;
1109
1110 /* selected should point to the next element
1111 * after the current router */
1112 spin_lock_bh(&primary_orig->neigh_list_lock);
1113 /* this is a list_move(), which unfortunately
1114 * does not exist as rcu version */
1115 list_del_rcu(&primary_orig->bond_list);
1116 list_add_rcu(&primary_orig->bond_list,
1117 &router->bonding_list);
1118 spin_unlock_bh(&primary_orig->neigh_list_lock);
1119
1120out:
1121 rcu_read_unlock();
1122 return router;
1123}
1124
1125/* Interface Alternating: Use the best of the
1126 * remaining candidates which are not using
1127 * this interface.
1128 *
1129 * Increases the returned router's refcount */
1130static struct neigh_node *find_ifalter_router(struct orig_node *primary_orig,
1131 struct hard_iface *recv_if)
1132{
1133 struct neigh_node *tmp_neigh_node;
1134 struct neigh_node *router = NULL, *first_candidate = NULL;
1135
1136 rcu_read_lock();
1137 list_for_each_entry_rcu(tmp_neigh_node, &primary_orig->bond_list,
1138 bonding_list) {
1139 if (!first_candidate)
1140 first_candidate = tmp_neigh_node;
1141
1142 /* recv_if == NULL on the first node. */
1143 if (tmp_neigh_node->if_incoming == recv_if)
1144 continue;
1145
1146 if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
1147 continue;
1148
1149 /* if we don't have a router yet
1150 * or this one is better, choose it. */
1151 if ((!router) ||
1152 (tmp_neigh_node->tq_avg > router->tq_avg)) {
1153 /* decrement refcount of
1154 * previously selected router */
1155 if (router)
1156 neigh_node_free_ref(router);
1157
1158 router = tmp_neigh_node;
1159 atomic_inc_not_zero(&router->refcount);
1160 }
1161
1162 neigh_node_free_ref(tmp_neigh_node);
1163 }
1164
1165 /* use the first candidate if nothing was found. */
1166 if (!router && first_candidate &&
1167 atomic_inc_not_zero(&first_candidate->refcount))
1168 router = first_candidate;
1169
1170 rcu_read_unlock();
1171 return router;
1172}
1173
1095/* find a suitable router for this originator, and use 1174/* find a suitable router for this originator, and use
1096 * bonding if possible. increases the found neighbors 1175 * bonding if possible. increases the found neighbors
1097 * refcount.*/ 1176 * refcount.*/
@@ -1101,15 +1180,16 @@ struct neigh_node *find_router(struct bat_priv *bat_priv,
1101{ 1180{
1102 struct orig_node *primary_orig_node; 1181 struct orig_node *primary_orig_node;
1103 struct orig_node *router_orig; 1182 struct orig_node *router_orig;
1104 struct neigh_node *router, *first_candidate, *tmp_neigh_node; 1183 struct neigh_node *router;
1105 static uint8_t zero_mac[ETH_ALEN] = {0, 0, 0, 0, 0, 0}; 1184 static uint8_t zero_mac[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
1106 int bonding_enabled; 1185 int bonding_enabled;
1107 1186
1108 if (!orig_node) 1187 if (!orig_node)
1109 return NULL; 1188 return NULL;
1110 1189
1111 if (!orig_node->router) 1190 router = orig_node_get_router(orig_node);
1112 return NULL; 1191 if (!router)
1192 goto err;
1113 1193
1114 /* without bonding, the first node should 1194 /* without bonding, the first node should
1115 * always choose the default router. */ 1195 * always choose the default router. */
@@ -1117,12 +1197,9 @@ struct neigh_node *find_router(struct bat_priv *bat_priv,
1117 1197
1118 rcu_read_lock(); 1198 rcu_read_lock();
1119 /* select default router to output */ 1199 /* select default router to output */
1120 router = orig_node->router; 1200 router_orig = router->orig_node;
1121 router_orig = orig_node->router->orig_node; 1201 if (!router_orig)
1122 if (!router_orig || !atomic_inc_not_zero(&router->refcount)) { 1202 goto err_unlock;
1123 rcu_read_unlock();
1124 return NULL;
1125 }
1126 1203
1127 if ((!recv_if) && (!bonding_enabled)) 1204 if ((!recv_if) && (!bonding_enabled))
1128 goto return_router; 1205 goto return_router;
@@ -1151,91 +1228,26 @@ struct neigh_node *find_router(struct bat_priv *bat_priv,
1151 if (atomic_read(&primary_orig_node->bond_candidates) < 2) 1228 if (atomic_read(&primary_orig_node->bond_candidates) < 2)
1152 goto return_router; 1229 goto return_router;
1153 1230
1154
1155 /* all nodes between should choose a candidate which 1231 /* all nodes between should choose a candidate which
1156 * is is not on the interface where the packet came 1232 * is is not on the interface where the packet came
1157 * in. */ 1233 * in. */
1158 1234
1159 neigh_node_free_ref(router); 1235 neigh_node_free_ref(router);
1160 first_candidate = NULL;
1161 router = NULL;
1162
1163 if (bonding_enabled) {
1164 /* in the bonding case, send the packets in a round
1165 * robin fashion over the remaining interfaces. */
1166
1167 list_for_each_entry_rcu(tmp_neigh_node,
1168 &primary_orig_node->bond_list, bonding_list) {
1169 if (!first_candidate)
1170 first_candidate = tmp_neigh_node;
1171 /* recv_if == NULL on the first node. */
1172 if (tmp_neigh_node->if_incoming != recv_if &&
1173 atomic_inc_not_zero(&tmp_neigh_node->refcount)) {
1174 router = tmp_neigh_node;
1175 break;
1176 }
1177 }
1178
1179 /* use the first candidate if nothing was found. */
1180 if (!router && first_candidate &&
1181 atomic_inc_not_zero(&first_candidate->refcount))
1182 router = first_candidate;
1183 1236
1184 if (!router) { 1237 if (bonding_enabled)
1185 rcu_read_unlock(); 1238 router = find_bond_router(primary_orig_node, recv_if);
1186 return NULL; 1239 else
1187 } 1240 router = find_ifalter_router(primary_orig_node, recv_if);
1188
1189 /* selected should point to the next element
1190 * after the current router */
1191 spin_lock_bh(&primary_orig_node->neigh_list_lock);
1192 /* this is a list_move(), which unfortunately
1193 * does not exist as rcu version */
1194 list_del_rcu(&primary_orig_node->bond_list);
1195 list_add_rcu(&primary_orig_node->bond_list,
1196 &router->bonding_list);
1197 spin_unlock_bh(&primary_orig_node->neigh_list_lock);
1198
1199 } else {
1200 /* if bonding is disabled, use the best of the
1201 * remaining candidates which are not using
1202 * this interface. */
1203 list_for_each_entry_rcu(tmp_neigh_node,
1204 &primary_orig_node->bond_list, bonding_list) {
1205 if (!first_candidate)
1206 first_candidate = tmp_neigh_node;
1207
1208 /* recv_if == NULL on the first node. */
1209 if (tmp_neigh_node->if_incoming == recv_if)
1210 continue;
1211
1212 if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
1213 continue;
1214
1215 /* if we don't have a router yet
1216 * or this one is better, choose it. */
1217 if ((!router) ||
1218 (tmp_neigh_node->tq_avg > router->tq_avg)) {
1219 /* decrement refcount of
1220 * previously selected router */
1221 if (router)
1222 neigh_node_free_ref(router);
1223
1224 router = tmp_neigh_node;
1225 atomic_inc_not_zero(&router->refcount);
1226 }
1227
1228 neigh_node_free_ref(tmp_neigh_node);
1229 }
1230 1241
1231 /* use the first candidate if nothing was found. */
1232 if (!router && first_candidate &&
1233 atomic_inc_not_zero(&first_candidate->refcount))
1234 router = first_candidate;
1235 }
1236return_router: 1242return_router:
1237 rcu_read_unlock(); 1243 rcu_read_unlock();
1238 return router; 1244 return router;
1245err_unlock:
1246 rcu_read_unlock();
1247err:
1248 if (router)
1249 neigh_node_free_ref(router);
1250 return NULL;
1239} 1251}
1240 1252
1241static int check_unicast_packet(struct sk_buff *skb, int hdr_size) 1253static int check_unicast_packet(struct sk_buff *skb, int hdr_size)
@@ -1284,13 +1296,10 @@ int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
1284 } 1296 }
1285 1297
1286 /* get routing information */ 1298 /* get routing information */
1287 rcu_read_lock();
1288 orig_node = orig_hash_find(bat_priv, unicast_packet->dest); 1299 orig_node = orig_hash_find(bat_priv, unicast_packet->dest);
1289 1300
1290 if (!orig_node) 1301 if (!orig_node)
1291 goto unlock; 1302 goto out;
1292
1293 rcu_read_unlock();
1294 1303
1295 /* find_router() increases neigh_nodes refcount if found. */ 1304 /* find_router() increases neigh_nodes refcount if found. */
1296 neigh_node = find_router(bat_priv, orig_node, recv_if); 1305 neigh_node = find_router(bat_priv, orig_node, recv_if);
@@ -1336,10 +1345,7 @@ int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
1336 /* route it */ 1345 /* route it */
1337 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); 1346 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
1338 ret = NET_RX_SUCCESS; 1347 ret = NET_RX_SUCCESS;
1339 goto out;
1340 1348
1341unlock:
1342 rcu_read_unlock();
1343out: 1349out:
1344 if (neigh_node) 1350 if (neigh_node)
1345 neigh_node_free_ref(neigh_node); 1351 neigh_node_free_ref(neigh_node);
@@ -1438,13 +1444,10 @@ int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
1438 if (bcast_packet->ttl < 2) 1444 if (bcast_packet->ttl < 2)
1439 goto out; 1445 goto out;
1440 1446
1441 rcu_read_lock();
1442 orig_node = orig_hash_find(bat_priv, bcast_packet->orig); 1447 orig_node = orig_hash_find(bat_priv, bcast_packet->orig);
1443 1448
1444 if (!orig_node) 1449 if (!orig_node)
1445 goto rcu_unlock; 1450 goto out;
1446
1447 rcu_read_unlock();
1448 1451
1449 spin_lock_bh(&orig_node->bcast_seqno_lock); 1452 spin_lock_bh(&orig_node->bcast_seqno_lock);
1450 1453
@@ -1475,9 +1478,6 @@ int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
1475 ret = NET_RX_SUCCESS; 1478 ret = NET_RX_SUCCESS;
1476 goto out; 1479 goto out;
1477 1480
1478rcu_unlock:
1479 rcu_read_unlock();
1480 goto out;
1481spin_unlock: 1481spin_unlock:
1482 spin_unlock_bh(&orig_node->bcast_seqno_lock); 1482 spin_unlock_bh(&orig_node->bcast_seqno_lock);
1483out: 1483out:
diff --git a/net/batman-adv/routing.h b/net/batman-adv/routing.h
index b5a064c88a4f..870f29842b28 100644
--- a/net/batman-adv/routing.h
+++ b/net/batman-adv/routing.h
@@ -25,11 +25,11 @@
25void slide_own_bcast_window(struct hard_iface *hard_iface); 25void slide_own_bcast_window(struct hard_iface *hard_iface);
26void receive_bat_packet(struct ethhdr *ethhdr, 26void receive_bat_packet(struct ethhdr *ethhdr,
27 struct batman_packet *batman_packet, 27 struct batman_packet *batman_packet,
28 unsigned char *hna_buff, int hna_buff_len, 28 unsigned char *tt_buff, int tt_buff_len,
29 struct hard_iface *if_incoming); 29 struct hard_iface *if_incoming);
30void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node, 30void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node,
31 struct neigh_node *neigh_node, unsigned char *hna_buff, 31 struct neigh_node *neigh_node, unsigned char *tt_buff,
32 int hna_buff_len); 32 int tt_buff_len);
33int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if); 33int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if);
34int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if); 34int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if);
35int recv_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if); 35int recv_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if);
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index d49e54d932af..33779278f1b2 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -121,7 +121,7 @@ static void send_packet_to_if(struct forw_packet *forw_packet,
121 /* adjust all flags and log packets */ 121 /* adjust all flags and log packets */
122 while (aggregated_packet(buff_pos, 122 while (aggregated_packet(buff_pos,
123 forw_packet->packet_len, 123 forw_packet->packet_len,
124 batman_packet->num_hna)) { 124 batman_packet->num_tt)) {
125 125
126 /* we might have aggregated direct link packets with an 126 /* we might have aggregated direct link packets with an
127 * ordinary base packet */ 127 * ordinary base packet */
@@ -146,7 +146,7 @@ static void send_packet_to_if(struct forw_packet *forw_packet,
146 hard_iface->net_dev->dev_addr); 146 hard_iface->net_dev->dev_addr);
147 147
148 buff_pos += sizeof(struct batman_packet) + 148 buff_pos += sizeof(struct batman_packet) +
149 (batman_packet->num_hna * ETH_ALEN); 149 (batman_packet->num_tt * ETH_ALEN);
150 packet_num++; 150 packet_num++;
151 batman_packet = (struct batman_packet *) 151 batman_packet = (struct batman_packet *)
152 (forw_packet->skb->data + buff_pos); 152 (forw_packet->skb->data + buff_pos);
@@ -222,7 +222,7 @@ static void rebuild_batman_packet(struct bat_priv *bat_priv,
222 struct batman_packet *batman_packet; 222 struct batman_packet *batman_packet;
223 223
224 new_len = sizeof(struct batman_packet) + 224 new_len = sizeof(struct batman_packet) +
225 (bat_priv->num_local_hna * ETH_ALEN); 225 (bat_priv->num_local_tt * ETH_ALEN);
226 new_buff = kmalloc(new_len, GFP_ATOMIC); 226 new_buff = kmalloc(new_len, GFP_ATOMIC);
227 227
228 /* keep old buffer if kmalloc should fail */ 228 /* keep old buffer if kmalloc should fail */
@@ -231,7 +231,7 @@ static void rebuild_batman_packet(struct bat_priv *bat_priv,
231 sizeof(struct batman_packet)); 231 sizeof(struct batman_packet));
232 batman_packet = (struct batman_packet *)new_buff; 232 batman_packet = (struct batman_packet *)new_buff;
233 233
234 batman_packet->num_hna = hna_local_fill_buffer(bat_priv, 234 batman_packet->num_tt = tt_local_fill_buffer(bat_priv,
235 new_buff + sizeof(struct batman_packet), 235 new_buff + sizeof(struct batman_packet),
236 new_len - sizeof(struct batman_packet)); 236 new_len - sizeof(struct batman_packet));
237 237
@@ -244,6 +244,7 @@ static void rebuild_batman_packet(struct bat_priv *bat_priv,
244void schedule_own_packet(struct hard_iface *hard_iface) 244void schedule_own_packet(struct hard_iface *hard_iface)
245{ 245{
246 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); 246 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
247 struct hard_iface *primary_if;
247 unsigned long send_time; 248 unsigned long send_time;
248 struct batman_packet *batman_packet; 249 struct batman_packet *batman_packet;
249 int vis_server; 250 int vis_server;
@@ -253,6 +254,7 @@ void schedule_own_packet(struct hard_iface *hard_iface)
253 return; 254 return;
254 255
255 vis_server = atomic_read(&bat_priv->vis_mode); 256 vis_server = atomic_read(&bat_priv->vis_mode);
257 primary_if = primary_if_get_selected(bat_priv);
256 258
257 /** 259 /**
258 * the interface gets activated here to avoid race conditions between 260 * the interface gets activated here to avoid race conditions between
@@ -264,9 +266,9 @@ void schedule_own_packet(struct hard_iface *hard_iface)
264 if (hard_iface->if_status == IF_TO_BE_ACTIVATED) 266 if (hard_iface->if_status == IF_TO_BE_ACTIVATED)
265 hard_iface->if_status = IF_ACTIVE; 267 hard_iface->if_status = IF_ACTIVE;
266 268
267 /* if local hna has changed and interface is a primary interface */ 269 /* if local tt has changed and interface is a primary interface */
268 if ((atomic_read(&bat_priv->hna_local_changed)) && 270 if ((atomic_read(&bat_priv->tt_local_changed)) &&
269 (hard_iface == bat_priv->primary_if)) 271 (hard_iface == primary_if))
270 rebuild_batman_packet(bat_priv, hard_iface); 272 rebuild_batman_packet(bat_priv, hard_iface);
271 273
272 /** 274 /**
@@ -284,7 +286,7 @@ void schedule_own_packet(struct hard_iface *hard_iface)
284 else 286 else
285 batman_packet->flags &= ~VIS_SERVER; 287 batman_packet->flags &= ~VIS_SERVER;
286 288
287 if ((hard_iface == bat_priv->primary_if) && 289 if ((hard_iface == primary_if) &&
288 (atomic_read(&bat_priv->gw_mode) == GW_MODE_SERVER)) 290 (atomic_read(&bat_priv->gw_mode) == GW_MODE_SERVER))
289 batman_packet->gw_flags = 291 batman_packet->gw_flags =
290 (uint8_t)atomic_read(&bat_priv->gw_bandwidth); 292 (uint8_t)atomic_read(&bat_priv->gw_bandwidth);
@@ -299,15 +301,19 @@ void schedule_own_packet(struct hard_iface *hard_iface)
299 hard_iface->packet_buff, 301 hard_iface->packet_buff,
300 hard_iface->packet_len, 302 hard_iface->packet_len,
301 hard_iface, 1, send_time); 303 hard_iface, 1, send_time);
304
305 if (primary_if)
306 hardif_free_ref(primary_if);
302} 307}
303 308
304void schedule_forward_packet(struct orig_node *orig_node, 309void schedule_forward_packet(struct orig_node *orig_node,
305 struct ethhdr *ethhdr, 310 struct ethhdr *ethhdr,
306 struct batman_packet *batman_packet, 311 struct batman_packet *batman_packet,
307 uint8_t directlink, int hna_buff_len, 312 uint8_t directlink, int tt_buff_len,
308 struct hard_iface *if_incoming) 313 struct hard_iface *if_incoming)
309{ 314{
310 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); 315 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
316 struct neigh_node *router;
311 unsigned char in_tq, in_ttl, tq_avg = 0; 317 unsigned char in_tq, in_ttl, tq_avg = 0;
312 unsigned long send_time; 318 unsigned long send_time;
313 319
@@ -316,6 +322,8 @@ void schedule_forward_packet(struct orig_node *orig_node,
316 return; 322 return;
317 } 323 }
318 324
325 router = orig_node_get_router(orig_node);
326
319 in_tq = batman_packet->tq; 327 in_tq = batman_packet->tq;
320 in_ttl = batman_packet->ttl; 328 in_ttl = batman_packet->ttl;
321 329
@@ -324,20 +332,22 @@ void schedule_forward_packet(struct orig_node *orig_node,
324 332
325 /* rebroadcast tq of our best ranking neighbor to ensure the rebroadcast 333 /* rebroadcast tq of our best ranking neighbor to ensure the rebroadcast
326 * of our best tq value */ 334 * of our best tq value */
327 if ((orig_node->router) && (orig_node->router->tq_avg != 0)) { 335 if (router && router->tq_avg != 0) {
328 336
329 /* rebroadcast ogm of best ranking neighbor as is */ 337 /* rebroadcast ogm of best ranking neighbor as is */
330 if (!compare_eth(orig_node->router->addr, ethhdr->h_source)) { 338 if (!compare_eth(router->addr, ethhdr->h_source)) {
331 batman_packet->tq = orig_node->router->tq_avg; 339 batman_packet->tq = router->tq_avg;
332 340
333 if (orig_node->router->last_ttl) 341 if (router->last_ttl)
334 batman_packet->ttl = orig_node->router->last_ttl 342 batman_packet->ttl = router->last_ttl - 1;
335 - 1;
336 } 343 }
337 344
338 tq_avg = orig_node->router->tq_avg; 345 tq_avg = router->tq_avg;
339 } 346 }
340 347
348 if (router)
349 neigh_node_free_ref(router);
350
341 /* apply hop penalty */ 351 /* apply hop penalty */
342 batman_packet->tq = hop_penalty(batman_packet->tq, bat_priv); 352 batman_packet->tq = hop_penalty(batman_packet->tq, bat_priv);
343 353
@@ -359,7 +369,7 @@ void schedule_forward_packet(struct orig_node *orig_node,
359 send_time = forward_send_time(); 369 send_time = forward_send_time();
360 add_bat_packet_to_list(bat_priv, 370 add_bat_packet_to_list(bat_priv,
361 (unsigned char *)batman_packet, 371 (unsigned char *)batman_packet,
362 sizeof(struct batman_packet) + hna_buff_len, 372 sizeof(struct batman_packet) + tt_buff_len,
363 if_incoming, 0, send_time); 373 if_incoming, 0, send_time);
364} 374}
365 375
@@ -367,6 +377,8 @@ static void forw_packet_free(struct forw_packet *forw_packet)
367{ 377{
368 if (forw_packet->skb) 378 if (forw_packet->skb)
369 kfree_skb(forw_packet->skb); 379 kfree_skb(forw_packet->skb);
380 if (forw_packet->if_incoming)
381 hardif_free_ref(forw_packet->if_incoming);
370 kfree(forw_packet); 382 kfree(forw_packet);
371} 383}
372 384
@@ -388,7 +400,6 @@ static void _add_bcast_packet_to_list(struct bat_priv *bat_priv,
388 send_time); 400 send_time);
389} 401}
390 402
391#define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0)
392/* add a broadcast packet to the queue and setup timers. broadcast packets 403/* add a broadcast packet to the queue and setup timers. broadcast packets
393 * are sent multiple times to increase probability for beeing received. 404 * are sent multiple times to increase probability for beeing received.
394 * 405 *
@@ -399,6 +410,7 @@ static void _add_bcast_packet_to_list(struct bat_priv *bat_priv,
399 * skb is freed. */ 410 * skb is freed. */
400int add_bcast_packet_to_list(struct bat_priv *bat_priv, struct sk_buff *skb) 411int add_bcast_packet_to_list(struct bat_priv *bat_priv, struct sk_buff *skb)
401{ 412{
413 struct hard_iface *primary_if = NULL;
402 struct forw_packet *forw_packet; 414 struct forw_packet *forw_packet;
403 struct bcast_packet *bcast_packet; 415 struct bcast_packet *bcast_packet;
404 416
@@ -407,8 +419,9 @@ int add_bcast_packet_to_list(struct bat_priv *bat_priv, struct sk_buff *skb)
407 goto out; 419 goto out;
408 } 420 }
409 421
410 if (!bat_priv->primary_if) 422 primary_if = primary_if_get_selected(bat_priv);
411 goto out; 423 if (!primary_if)
424 goto out_and_inc;
412 425
413 forw_packet = kmalloc(sizeof(struct forw_packet), GFP_ATOMIC); 426 forw_packet = kmalloc(sizeof(struct forw_packet), GFP_ATOMIC);
414 427
@@ -426,7 +439,7 @@ int add_bcast_packet_to_list(struct bat_priv *bat_priv, struct sk_buff *skb)
426 skb_reset_mac_header(skb); 439 skb_reset_mac_header(skb);
427 440
428 forw_packet->skb = skb; 441 forw_packet->skb = skb;
429 forw_packet->if_incoming = bat_priv->primary_if; 442 forw_packet->if_incoming = primary_if;
430 443
431 /* how often did we send the bcast packet ? */ 444 /* how often did we send the bcast packet ? */
432 forw_packet->num_packets = 0; 445 forw_packet->num_packets = 0;
@@ -439,6 +452,8 @@ packet_free:
439out_and_inc: 452out_and_inc:
440 atomic_inc(&bat_priv->bcast_queue_left); 453 atomic_inc(&bat_priv->bcast_queue_left);
441out: 454out:
455 if (primary_if)
456 hardif_free_ref(primary_if);
442 return NETDEV_TX_BUSY; 457 return NETDEV_TX_BUSY;
443} 458}
444 459
@@ -526,6 +541,7 @@ void purge_outstanding_packets(struct bat_priv *bat_priv,
526{ 541{
527 struct forw_packet *forw_packet; 542 struct forw_packet *forw_packet;
528 struct hlist_node *tmp_node, *safe_tmp_node; 543 struct hlist_node *tmp_node, *safe_tmp_node;
544 bool pending;
529 545
530 if (hard_iface) 546 if (hard_iface)
531 bat_dbg(DBG_BATMAN, bat_priv, 547 bat_dbg(DBG_BATMAN, bat_priv,
@@ -554,8 +570,13 @@ void purge_outstanding_packets(struct bat_priv *bat_priv,
554 * send_outstanding_bcast_packet() will lock the list to 570 * send_outstanding_bcast_packet() will lock the list to
555 * delete the item from the list 571 * delete the item from the list
556 */ 572 */
557 cancel_delayed_work_sync(&forw_packet->delayed_work); 573 pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
558 spin_lock_bh(&bat_priv->forw_bcast_list_lock); 574 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
575
576 if (pending) {
577 hlist_del(&forw_packet->list);
578 forw_packet_free(forw_packet);
579 }
559 } 580 }
560 spin_unlock_bh(&bat_priv->forw_bcast_list_lock); 581 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
561 582
@@ -578,8 +599,13 @@ void purge_outstanding_packets(struct bat_priv *bat_priv,
578 * send_outstanding_bat_packet() will lock the list to 599 * send_outstanding_bat_packet() will lock the list to
579 * delete the item from the list 600 * delete the item from the list
580 */ 601 */
581 cancel_delayed_work_sync(&forw_packet->delayed_work); 602 pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
582 spin_lock_bh(&bat_priv->forw_bat_list_lock); 603 spin_lock_bh(&bat_priv->forw_bat_list_lock);
604
605 if (pending) {
606 hlist_del(&forw_packet->list);
607 forw_packet_free(forw_packet);
608 }
583 } 609 }
584 spin_unlock_bh(&bat_priv->forw_bat_list_lock); 610 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
585} 611}
diff --git a/net/batman-adv/send.h b/net/batman-adv/send.h
index 7b2ff19c05e7..247172d71e4b 100644
--- a/net/batman-adv/send.h
+++ b/net/batman-adv/send.h
@@ -29,7 +29,7 @@ void schedule_own_packet(struct hard_iface *hard_iface);
29void schedule_forward_packet(struct orig_node *orig_node, 29void schedule_forward_packet(struct orig_node *orig_node,
30 struct ethhdr *ethhdr, 30 struct ethhdr *ethhdr,
31 struct batman_packet *batman_packet, 31 struct batman_packet *batman_packet,
32 uint8_t directlink, int hna_buff_len, 32 uint8_t directlink, int tt_buff_len,
33 struct hard_iface *if_outgoing); 33 struct hard_iface *if_outgoing);
34int add_bcast_packet_to_list(struct bat_priv *bat_priv, struct sk_buff *skb); 34int add_bcast_packet_to_list(struct bat_priv *bat_priv, struct sk_buff *skb);
35void send_outstanding_bat_packet(struct work_struct *work); 35void send_outstanding_bat_packet(struct work_struct *work);
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 04efe022c13b..d5aa60999e83 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -43,8 +43,6 @@ static void bat_get_drvinfo(struct net_device *dev,
43static u32 bat_get_msglevel(struct net_device *dev); 43static u32 bat_get_msglevel(struct net_device *dev);
44static void bat_set_msglevel(struct net_device *dev, u32 value); 44static void bat_set_msglevel(struct net_device *dev, u32 value);
45static u32 bat_get_link(struct net_device *dev); 45static u32 bat_get_link(struct net_device *dev);
46static u32 bat_get_rx_csum(struct net_device *dev);
47static int bat_set_rx_csum(struct net_device *dev, u32 data);
48 46
49static const struct ethtool_ops bat_ethtool_ops = { 47static const struct ethtool_ops bat_ethtool_ops = {
50 .get_settings = bat_get_settings, 48 .get_settings = bat_get_settings,
@@ -52,8 +50,6 @@ static const struct ethtool_ops bat_ethtool_ops = {
52 .get_msglevel = bat_get_msglevel, 50 .get_msglevel = bat_get_msglevel,
53 .set_msglevel = bat_set_msglevel, 51 .set_msglevel = bat_set_msglevel,
54 .get_link = bat_get_link, 52 .get_link = bat_get_link,
55 .get_rx_csum = bat_get_rx_csum,
56 .set_rx_csum = bat_set_rx_csum
57}; 53};
58 54
59int my_skb_head_push(struct sk_buff *skb, unsigned int len) 55int my_skb_head_push(struct sk_buff *skb, unsigned int len)
@@ -82,106 +78,365 @@ static void softif_neigh_free_ref(struct softif_neigh *softif_neigh)
82 kfree_rcu(softif_neigh, rcu); 78 kfree_rcu(softif_neigh, rcu);
83} 79}
84 80
85void softif_neigh_purge(struct bat_priv *bat_priv) 81static void softif_neigh_vid_free_rcu(struct rcu_head *rcu)
86{ 82{
87 struct softif_neigh *softif_neigh, *softif_neigh_tmp; 83 struct softif_neigh_vid *softif_neigh_vid;
84 struct softif_neigh *softif_neigh;
88 struct hlist_node *node, *node_tmp; 85 struct hlist_node *node, *node_tmp;
86 struct bat_priv *bat_priv;
89 87
90 spin_lock_bh(&bat_priv->softif_neigh_lock); 88 softif_neigh_vid = container_of(rcu, struct softif_neigh_vid, rcu);
89 bat_priv = softif_neigh_vid->bat_priv;
91 90
91 spin_lock_bh(&bat_priv->softif_neigh_lock);
92 hlist_for_each_entry_safe(softif_neigh, node, node_tmp, 92 hlist_for_each_entry_safe(softif_neigh, node, node_tmp,
93 &bat_priv->softif_neigh_list, list) { 93 &softif_neigh_vid->softif_neigh_list, list) {
94 hlist_del_rcu(&softif_neigh->list);
95 softif_neigh_free_ref(softif_neigh);
96 }
97 spin_unlock_bh(&bat_priv->softif_neigh_lock);
94 98
95 if ((!time_after(jiffies, softif_neigh->last_seen + 99 kfree(softif_neigh_vid);
96 msecs_to_jiffies(SOFTIF_NEIGH_TIMEOUT))) && 100}
97 (atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE))
98 continue;
99 101
100 hlist_del_rcu(&softif_neigh->list); 102static void softif_neigh_vid_free_ref(struct softif_neigh_vid *softif_neigh_vid)
103{
104 if (atomic_dec_and_test(&softif_neigh_vid->refcount))
105 call_rcu(&softif_neigh_vid->rcu, softif_neigh_vid_free_rcu);
106}
101 107
102 if (bat_priv->softif_neigh == softif_neigh) { 108static struct softif_neigh_vid *softif_neigh_vid_get(struct bat_priv *bat_priv,
103 bat_dbg(DBG_ROUTES, bat_priv, 109 short vid)
104 "Current mesh exit point '%pM' vanished " 110{
105 "(vid: %d).\n", 111 struct softif_neigh_vid *softif_neigh_vid;
106 softif_neigh->addr, softif_neigh->vid); 112 struct hlist_node *node;
107 softif_neigh_tmp = bat_priv->softif_neigh;
108 bat_priv->softif_neigh = NULL;
109 softif_neigh_free_ref(softif_neigh_tmp);
110 }
111 113
112 softif_neigh_free_ref(softif_neigh); 114 rcu_read_lock();
115 hlist_for_each_entry_rcu(softif_neigh_vid, node,
116 &bat_priv->softif_neigh_vids, list) {
117 if (softif_neigh_vid->vid != vid)
118 continue;
119
120 if (!atomic_inc_not_zero(&softif_neigh_vid->refcount))
121 continue;
122
123 goto out;
113 } 124 }
114 125
115 spin_unlock_bh(&bat_priv->softif_neigh_lock); 126 softif_neigh_vid = kzalloc(sizeof(struct softif_neigh_vid),
127 GFP_ATOMIC);
128 if (!softif_neigh_vid)
129 goto out;
130
131 softif_neigh_vid->vid = vid;
132 softif_neigh_vid->bat_priv = bat_priv;
133
134 /* initialize with 2 - caller decrements counter by one */
135 atomic_set(&softif_neigh_vid->refcount, 2);
136 INIT_HLIST_HEAD(&softif_neigh_vid->softif_neigh_list);
137 INIT_HLIST_NODE(&softif_neigh_vid->list);
138 spin_lock_bh(&bat_priv->softif_neigh_vid_lock);
139 hlist_add_head_rcu(&softif_neigh_vid->list,
140 &bat_priv->softif_neigh_vids);
141 spin_unlock_bh(&bat_priv->softif_neigh_vid_lock);
142
143out:
144 rcu_read_unlock();
145 return softif_neigh_vid;
116} 146}
117 147
118static struct softif_neigh *softif_neigh_get(struct bat_priv *bat_priv, 148static struct softif_neigh *softif_neigh_get(struct bat_priv *bat_priv,
119 uint8_t *addr, short vid) 149 uint8_t *addr, short vid)
120{ 150{
121 struct softif_neigh *softif_neigh; 151 struct softif_neigh_vid *softif_neigh_vid;
152 struct softif_neigh *softif_neigh = NULL;
122 struct hlist_node *node; 153 struct hlist_node *node;
123 154
155 softif_neigh_vid = softif_neigh_vid_get(bat_priv, vid);
156 if (!softif_neigh_vid)
157 goto out;
158
124 rcu_read_lock(); 159 rcu_read_lock();
125 hlist_for_each_entry_rcu(softif_neigh, node, 160 hlist_for_each_entry_rcu(softif_neigh, node,
126 &bat_priv->softif_neigh_list, list) { 161 &softif_neigh_vid->softif_neigh_list,
162 list) {
127 if (!compare_eth(softif_neigh->addr, addr)) 163 if (!compare_eth(softif_neigh->addr, addr))
128 continue; 164 continue;
129 165
130 if (softif_neigh->vid != vid)
131 continue;
132
133 if (!atomic_inc_not_zero(&softif_neigh->refcount)) 166 if (!atomic_inc_not_zero(&softif_neigh->refcount))
134 continue; 167 continue;
135 168
136 softif_neigh->last_seen = jiffies; 169 softif_neigh->last_seen = jiffies;
137 goto out; 170 goto unlock;
138 } 171 }
139 172
140 softif_neigh = kzalloc(sizeof(struct softif_neigh), GFP_ATOMIC); 173 softif_neigh = kzalloc(sizeof(struct softif_neigh), GFP_ATOMIC);
141 if (!softif_neigh) 174 if (!softif_neigh)
142 goto out; 175 goto unlock;
143 176
144 memcpy(softif_neigh->addr, addr, ETH_ALEN); 177 memcpy(softif_neigh->addr, addr, ETH_ALEN);
145 softif_neigh->vid = vid;
146 softif_neigh->last_seen = jiffies; 178 softif_neigh->last_seen = jiffies;
147 /* initialize with 2 - caller decrements counter by one */ 179 /* initialize with 2 - caller decrements counter by one */
148 atomic_set(&softif_neigh->refcount, 2); 180 atomic_set(&softif_neigh->refcount, 2);
149 181
150 INIT_HLIST_NODE(&softif_neigh->list); 182 INIT_HLIST_NODE(&softif_neigh->list);
151 spin_lock_bh(&bat_priv->softif_neigh_lock); 183 spin_lock_bh(&bat_priv->softif_neigh_lock);
152 hlist_add_head_rcu(&softif_neigh->list, &bat_priv->softif_neigh_list); 184 hlist_add_head_rcu(&softif_neigh->list,
185 &softif_neigh_vid->softif_neigh_list);
153 spin_unlock_bh(&bat_priv->softif_neigh_lock); 186 spin_unlock_bh(&bat_priv->softif_neigh_lock);
154 187
188unlock:
189 rcu_read_unlock();
155out: 190out:
191 if (softif_neigh_vid)
192 softif_neigh_vid_free_ref(softif_neigh_vid);
193 return softif_neigh;
194}
195
196static struct softif_neigh *softif_neigh_get_selected(
197 struct softif_neigh_vid *softif_neigh_vid)
198{
199 struct softif_neigh *softif_neigh;
200
201 rcu_read_lock();
202 softif_neigh = rcu_dereference(softif_neigh_vid->softif_neigh);
203
204 if (softif_neigh && !atomic_inc_not_zero(&softif_neigh->refcount))
205 softif_neigh = NULL;
206
156 rcu_read_unlock(); 207 rcu_read_unlock();
157 return softif_neigh; 208 return softif_neigh;
158} 209}
159 210
211static struct softif_neigh *softif_neigh_vid_get_selected(
212 struct bat_priv *bat_priv,
213 short vid)
214{
215 struct softif_neigh_vid *softif_neigh_vid;
216 struct softif_neigh *softif_neigh = NULL;
217
218 softif_neigh_vid = softif_neigh_vid_get(bat_priv, vid);
219 if (!softif_neigh_vid)
220 goto out;
221
222 softif_neigh = softif_neigh_get_selected(softif_neigh_vid);
223out:
224 if (softif_neigh_vid)
225 softif_neigh_vid_free_ref(softif_neigh_vid);
226 return softif_neigh;
227}
228
229static void softif_neigh_vid_select(struct bat_priv *bat_priv,
230 struct softif_neigh *new_neigh,
231 short vid)
232{
233 struct softif_neigh_vid *softif_neigh_vid;
234 struct softif_neigh *curr_neigh;
235
236 softif_neigh_vid = softif_neigh_vid_get(bat_priv, vid);
237 if (!softif_neigh_vid)
238 goto out;
239
240 spin_lock_bh(&bat_priv->softif_neigh_lock);
241
242 if (new_neigh && !atomic_inc_not_zero(&new_neigh->refcount))
243 new_neigh = NULL;
244
245 curr_neigh = softif_neigh_vid->softif_neigh;
246 rcu_assign_pointer(softif_neigh_vid->softif_neigh, new_neigh);
247
248 if ((curr_neigh) && (!new_neigh))
249 bat_dbg(DBG_ROUTES, bat_priv,
250 "Removing mesh exit point on vid: %d (prev: %pM).\n",
251 vid, curr_neigh->addr);
252 else if ((curr_neigh) && (new_neigh))
253 bat_dbg(DBG_ROUTES, bat_priv,
254 "Changing mesh exit point on vid: %d from %pM "
255 "to %pM.\n", vid, curr_neigh->addr, new_neigh->addr);
256 else if ((!curr_neigh) && (new_neigh))
257 bat_dbg(DBG_ROUTES, bat_priv,
258 "Setting mesh exit point on vid: %d to %pM.\n",
259 vid, new_neigh->addr);
260
261 if (curr_neigh)
262 softif_neigh_free_ref(curr_neigh);
263
264 spin_unlock_bh(&bat_priv->softif_neigh_lock);
265
266out:
267 if (softif_neigh_vid)
268 softif_neigh_vid_free_ref(softif_neigh_vid);
269}
270
271static void softif_neigh_vid_deselect(struct bat_priv *bat_priv,
272 struct softif_neigh_vid *softif_neigh_vid)
273{
274 struct softif_neigh *curr_neigh;
275 struct softif_neigh *softif_neigh = NULL, *softif_neigh_tmp;
276 struct hard_iface *primary_if = NULL;
277 struct hlist_node *node;
278
279 primary_if = primary_if_get_selected(bat_priv);
280 if (!primary_if)
281 goto out;
282
283 /* find new softif_neigh immediately to avoid temporary loops */
284 rcu_read_lock();
285 curr_neigh = rcu_dereference(softif_neigh_vid->softif_neigh);
286
287 hlist_for_each_entry_rcu(softif_neigh_tmp, node,
288 &softif_neigh_vid->softif_neigh_list,
289 list) {
290 if (softif_neigh_tmp == curr_neigh)
291 continue;
292
293 /* we got a neighbor but its mac is 'bigger' than ours */
294 if (memcmp(primary_if->net_dev->dev_addr,
295 softif_neigh_tmp->addr, ETH_ALEN) < 0)
296 continue;
297
298 if (!atomic_inc_not_zero(&softif_neigh_tmp->refcount))
299 continue;
300
301 softif_neigh = softif_neigh_tmp;
302 goto unlock;
303 }
304
305unlock:
306 rcu_read_unlock();
307out:
308 softif_neigh_vid_select(bat_priv, softif_neigh, softif_neigh_vid->vid);
309
310 if (primary_if)
311 hardif_free_ref(primary_if);
312 if (softif_neigh)
313 softif_neigh_free_ref(softif_neigh);
314}
315
160int softif_neigh_seq_print_text(struct seq_file *seq, void *offset) 316int softif_neigh_seq_print_text(struct seq_file *seq, void *offset)
161{ 317{
162 struct net_device *net_dev = (struct net_device *)seq->private; 318 struct net_device *net_dev = (struct net_device *)seq->private;
163 struct bat_priv *bat_priv = netdev_priv(net_dev); 319 struct bat_priv *bat_priv = netdev_priv(net_dev);
320 struct softif_neigh_vid *softif_neigh_vid;
164 struct softif_neigh *softif_neigh; 321 struct softif_neigh *softif_neigh;
165 struct hlist_node *node; 322 struct hard_iface *primary_if;
323 struct hlist_node *node, *node_tmp;
324 struct softif_neigh *curr_softif_neigh;
325 int ret = 0, last_seen_secs, last_seen_msecs;
326
327 primary_if = primary_if_get_selected(bat_priv);
328 if (!primary_if) {
329 ret = seq_printf(seq, "BATMAN mesh %s disabled - "
330 "please specify interfaces to enable it\n",
331 net_dev->name);
332 goto out;
333 }
166 334
167 if (!bat_priv->primary_if) { 335 if (primary_if->if_status != IF_ACTIVE) {
168 return seq_printf(seq, "BATMAN mesh %s disabled - " 336 ret = seq_printf(seq, "BATMAN mesh %s "
169 "please specify interfaces to enable it\n", 337 "disabled - primary interface not active\n",
170 net_dev->name); 338 net_dev->name);
339 goto out;
171 } 340 }
172 341
173 seq_printf(seq, "Softif neighbor list (%s)\n", net_dev->name); 342 seq_printf(seq, "Softif neighbor list (%s)\n", net_dev->name);
174 343
175 rcu_read_lock(); 344 rcu_read_lock();
176 hlist_for_each_entry_rcu(softif_neigh, node, 345 hlist_for_each_entry_rcu(softif_neigh_vid, node,
177 &bat_priv->softif_neigh_list, list) 346 &bat_priv->softif_neigh_vids, list) {
178 seq_printf(seq, "%s %pM (vid: %d)\n", 347 seq_printf(seq, " %-15s %s on vid: %d\n",
179 bat_priv->softif_neigh == softif_neigh 348 "Originator", "last-seen", softif_neigh_vid->vid);
180 ? "=>" : " ", softif_neigh->addr, 349
181 softif_neigh->vid); 350 curr_softif_neigh = softif_neigh_get_selected(softif_neigh_vid);
351
352 hlist_for_each_entry_rcu(softif_neigh, node_tmp,
353 &softif_neigh_vid->softif_neigh_list,
354 list) {
355 last_seen_secs = jiffies_to_msecs(jiffies -
356 softif_neigh->last_seen) / 1000;
357 last_seen_msecs = jiffies_to_msecs(jiffies -
358 softif_neigh->last_seen) % 1000;
359 seq_printf(seq, "%s %pM %3i.%03is\n",
360 curr_softif_neigh == softif_neigh
361 ? "=>" : " ", softif_neigh->addr,
362 last_seen_secs, last_seen_msecs);
363 }
364
365 if (curr_softif_neigh)
366 softif_neigh_free_ref(curr_softif_neigh);
367
368 seq_printf(seq, "\n");
369 }
182 rcu_read_unlock(); 370 rcu_read_unlock();
183 371
184 return 0; 372out:
373 if (primary_if)
374 hardif_free_ref(primary_if);
375 return ret;
376}
377
378void softif_neigh_purge(struct bat_priv *bat_priv)
379{
380 struct softif_neigh *softif_neigh, *curr_softif_neigh;
381 struct softif_neigh_vid *softif_neigh_vid;
382 struct hlist_node *node, *node_tmp, *node_tmp2;
383 char do_deselect;
384
385 rcu_read_lock();
386 hlist_for_each_entry_rcu(softif_neigh_vid, node,
387 &bat_priv->softif_neigh_vids, list) {
388 if (!atomic_inc_not_zero(&softif_neigh_vid->refcount))
389 continue;
390
391 curr_softif_neigh = softif_neigh_get_selected(softif_neigh_vid);
392 do_deselect = 0;
393
394 spin_lock_bh(&bat_priv->softif_neigh_lock);
395 hlist_for_each_entry_safe(softif_neigh, node_tmp, node_tmp2,
396 &softif_neigh_vid->softif_neigh_list,
397 list) {
398 if ((!time_after(jiffies, softif_neigh->last_seen +
399 msecs_to_jiffies(SOFTIF_NEIGH_TIMEOUT))) &&
400 (atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE))
401 continue;
402
403 if (curr_softif_neigh == softif_neigh) {
404 bat_dbg(DBG_ROUTES, bat_priv,
405 "Current mesh exit point on vid: %d "
406 "'%pM' vanished.\n",
407 softif_neigh_vid->vid,
408 softif_neigh->addr);
409 do_deselect = 1;
410 }
411
412 hlist_del_rcu(&softif_neigh->list);
413 softif_neigh_free_ref(softif_neigh);
414 }
415 spin_unlock_bh(&bat_priv->softif_neigh_lock);
416
417 /* soft_neigh_vid_deselect() needs to acquire the
418 * softif_neigh_lock */
419 if (do_deselect)
420 softif_neigh_vid_deselect(bat_priv, softif_neigh_vid);
421
422 if (curr_softif_neigh)
423 softif_neigh_free_ref(curr_softif_neigh);
424
425 softif_neigh_vid_free_ref(softif_neigh_vid);
426 }
427 rcu_read_unlock();
428
429 spin_lock_bh(&bat_priv->softif_neigh_vid_lock);
430 hlist_for_each_entry_safe(softif_neigh_vid, node, node_tmp,
431 &bat_priv->softif_neigh_vids, list) {
432 if (!hlist_empty(&softif_neigh_vid->softif_neigh_list))
433 continue;
434
435 hlist_del_rcu(&softif_neigh_vid->list);
436 softif_neigh_vid_free_ref(softif_neigh_vid);
437 }
438 spin_unlock_bh(&bat_priv->softif_neigh_vid_lock);
439
185} 440}
186 441
187static void softif_batman_recv(struct sk_buff *skb, struct net_device *dev, 442static void softif_batman_recv(struct sk_buff *skb, struct net_device *dev,
@@ -190,7 +445,9 @@ static void softif_batman_recv(struct sk_buff *skb, struct net_device *dev,
190 struct bat_priv *bat_priv = netdev_priv(dev); 445 struct bat_priv *bat_priv = netdev_priv(dev);
191 struct ethhdr *ethhdr = (struct ethhdr *)skb->data; 446 struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
192 struct batman_packet *batman_packet; 447 struct batman_packet *batman_packet;
193 struct softif_neigh *softif_neigh, *softif_neigh_tmp; 448 struct softif_neigh *softif_neigh = NULL;
449 struct hard_iface *primary_if = NULL;
450 struct softif_neigh *curr_softif_neigh = NULL;
194 451
195 if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) 452 if (ntohs(ethhdr->h_proto) == ETH_P_8021Q)
196 batman_packet = (struct batman_packet *) 453 batman_packet = (struct batman_packet *)
@@ -199,63 +456,52 @@ static void softif_batman_recv(struct sk_buff *skb, struct net_device *dev,
199 batman_packet = (struct batman_packet *)(skb->data + ETH_HLEN); 456 batman_packet = (struct batman_packet *)(skb->data + ETH_HLEN);
200 457
201 if (batman_packet->version != COMPAT_VERSION) 458 if (batman_packet->version != COMPAT_VERSION)
202 goto err; 459 goto out;
203 460
204 if (batman_packet->packet_type != BAT_PACKET) 461 if (batman_packet->packet_type != BAT_PACKET)
205 goto err; 462 goto out;
206 463
207 if (!(batman_packet->flags & PRIMARIES_FIRST_HOP)) 464 if (!(batman_packet->flags & PRIMARIES_FIRST_HOP))
208 goto err; 465 goto out;
209 466
210 if (is_my_mac(batman_packet->orig)) 467 if (is_my_mac(batman_packet->orig))
211 goto err; 468 goto out;
212 469
213 softif_neigh = softif_neigh_get(bat_priv, batman_packet->orig, vid); 470 softif_neigh = softif_neigh_get(bat_priv, batman_packet->orig, vid);
214
215 if (!softif_neigh) 471 if (!softif_neigh)
216 goto err; 472 goto out;
217 473
218 if (bat_priv->softif_neigh == softif_neigh) 474 curr_softif_neigh = softif_neigh_vid_get_selected(bat_priv, vid);
475 if (curr_softif_neigh == softif_neigh)
476 goto out;
477
478 primary_if = primary_if_get_selected(bat_priv);
479 if (!primary_if)
219 goto out; 480 goto out;
220 481
221 /* we got a neighbor but its mac is 'bigger' than ours */ 482 /* we got a neighbor but its mac is 'bigger' than ours */
222 if (memcmp(bat_priv->primary_if->net_dev->dev_addr, 483 if (memcmp(primary_if->net_dev->dev_addr,
223 softif_neigh->addr, ETH_ALEN) < 0) 484 softif_neigh->addr, ETH_ALEN) < 0)
224 goto out; 485 goto out;
225 486
226 /* switch to new 'smallest neighbor' */
227 if ((bat_priv->softif_neigh) &&
228 (memcmp(softif_neigh->addr, bat_priv->softif_neigh->addr,
229 ETH_ALEN) < 0)) {
230 bat_dbg(DBG_ROUTES, bat_priv,
231 "Changing mesh exit point from %pM (vid: %d) "
232 "to %pM (vid: %d).\n",
233 bat_priv->softif_neigh->addr,
234 bat_priv->softif_neigh->vid,
235 softif_neigh->addr, softif_neigh->vid);
236 softif_neigh_tmp = bat_priv->softif_neigh;
237 bat_priv->softif_neigh = softif_neigh;
238 softif_neigh_free_ref(softif_neigh_tmp);
239 /* we need to hold the additional reference */
240 goto err;
241 }
242
243 /* close own batX device and use softif_neigh as exit node */ 487 /* close own batX device and use softif_neigh as exit node */
244 if ((!bat_priv->softif_neigh) && 488 if (!curr_softif_neigh) {
245 (memcmp(softif_neigh->addr, 489 softif_neigh_vid_select(bat_priv, softif_neigh, vid);
246 bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN) < 0)) { 490 goto out;
247 bat_dbg(DBG_ROUTES, bat_priv,
248 "Setting mesh exit point to %pM (vid: %d).\n",
249 softif_neigh->addr, softif_neigh->vid);
250 bat_priv->softif_neigh = softif_neigh;
251 /* we need to hold the additional reference */
252 goto err;
253 } 491 }
254 492
493 /* switch to new 'smallest neighbor' */
494 if (memcmp(softif_neigh->addr, curr_softif_neigh->addr, ETH_ALEN) < 0)
495 softif_neigh_vid_select(bat_priv, softif_neigh, vid);
496
255out: 497out:
256 softif_neigh_free_ref(softif_neigh);
257err:
258 kfree_skb(skb); 498 kfree_skb(skb);
499 if (softif_neigh)
500 softif_neigh_free_ref(softif_neigh);
501 if (curr_softif_neigh)
502 softif_neigh_free_ref(curr_softif_neigh);
503 if (primary_if)
504 hardif_free_ref(primary_if);
259 return; 505 return;
260} 506}
261 507
@@ -285,11 +531,11 @@ static int interface_set_mac_addr(struct net_device *dev, void *p)
285 if (!is_valid_ether_addr(addr->sa_data)) 531 if (!is_valid_ether_addr(addr->sa_data))
286 return -EADDRNOTAVAIL; 532 return -EADDRNOTAVAIL;
287 533
288 /* only modify hna-table if it has been initialised before */ 534 /* only modify transtable if it has been initialised before */
289 if (atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE) { 535 if (atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE) {
290 hna_local_remove(bat_priv, dev->dev_addr, 536 tt_local_remove(bat_priv, dev->dev_addr,
291 "mac address changed"); 537 "mac address changed");
292 hna_local_add(dev, addr->sa_data); 538 tt_local_add(dev, addr->sa_data);
293 } 539 }
294 540
295 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); 541 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
@@ -311,8 +557,10 @@ int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
311{ 557{
312 struct ethhdr *ethhdr = (struct ethhdr *)skb->data; 558 struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
313 struct bat_priv *bat_priv = netdev_priv(soft_iface); 559 struct bat_priv *bat_priv = netdev_priv(soft_iface);
560 struct hard_iface *primary_if = NULL;
314 struct bcast_packet *bcast_packet; 561 struct bcast_packet *bcast_packet;
315 struct vlan_ethhdr *vhdr; 562 struct vlan_ethhdr *vhdr;
563 struct softif_neigh *curr_softif_neigh = NULL;
316 int data_len = skb->len, ret; 564 int data_len = skb->len, ret;
317 short vid = -1; 565 short vid = -1;
318 bool do_bcast = false; 566 bool do_bcast = false;
@@ -340,11 +588,12 @@ int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
340 * if we have a another chosen mesh exit node in range 588 * if we have a another chosen mesh exit node in range
341 * it will transport the packets to the mesh 589 * it will transport the packets to the mesh
342 */ 590 */
343 if ((bat_priv->softif_neigh) && (bat_priv->softif_neigh->vid == vid)) 591 curr_softif_neigh = softif_neigh_vid_get_selected(bat_priv, vid);
592 if (curr_softif_neigh)
344 goto dropped; 593 goto dropped;
345 594
346 /* TODO: check this for locks */ 595 /* TODO: check this for locks */
347 hna_local_add(soft_iface, ethhdr->h_source); 596 tt_local_add(soft_iface, ethhdr->h_source);
348 597
349 if (is_multicast_ether_addr(ethhdr->h_dest)) { 598 if (is_multicast_ether_addr(ethhdr->h_dest)) {
350 ret = gw_is_target(bat_priv, skb); 599 ret = gw_is_target(bat_priv, skb);
@@ -358,7 +607,8 @@ int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
358 607
359 /* ethernet packet should be broadcasted */ 608 /* ethernet packet should be broadcasted */
360 if (do_bcast) { 609 if (do_bcast) {
361 if (!bat_priv->primary_if) 610 primary_if = primary_if_get_selected(bat_priv);
611 if (!primary_if)
362 goto dropped; 612 goto dropped;
363 613
364 if (my_skb_head_push(skb, sizeof(struct bcast_packet)) < 0) 614 if (my_skb_head_push(skb, sizeof(struct bcast_packet)) < 0)
@@ -374,7 +624,7 @@ int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
374 /* hw address of first interface is the orig mac because only 624 /* hw address of first interface is the orig mac because only
375 * this mac is known throughout the mesh */ 625 * this mac is known throughout the mesh */
376 memcpy(bcast_packet->orig, 626 memcpy(bcast_packet->orig,
377 bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN); 627 primary_if->net_dev->dev_addr, ETH_ALEN);
378 628
379 /* set broadcast sequence number */ 629 /* set broadcast sequence number */
380 bcast_packet->seqno = 630 bcast_packet->seqno =
@@ -402,6 +652,10 @@ dropped:
402dropped_freed: 652dropped_freed:
403 bat_priv->stats.tx_dropped++; 653 bat_priv->stats.tx_dropped++;
404end: 654end:
655 if (curr_softif_neigh)
656 softif_neigh_free_ref(curr_softif_neigh);
657 if (primary_if)
658 hardif_free_ref(primary_if);
405 return NETDEV_TX_OK; 659 return NETDEV_TX_OK;
406} 660}
407 661
@@ -413,6 +667,7 @@ void interface_rx(struct net_device *soft_iface,
413 struct unicast_packet *unicast_packet; 667 struct unicast_packet *unicast_packet;
414 struct ethhdr *ethhdr; 668 struct ethhdr *ethhdr;
415 struct vlan_ethhdr *vhdr; 669 struct vlan_ethhdr *vhdr;
670 struct softif_neigh *curr_softif_neigh = NULL;
416 short vid = -1; 671 short vid = -1;
417 int ret; 672 int ret;
418 673
@@ -442,7 +697,8 @@ void interface_rx(struct net_device *soft_iface,
442 * if we have a another chosen mesh exit node in range 697 * if we have a another chosen mesh exit node in range
443 * it will transport the packets to the non-mesh network 698 * it will transport the packets to the non-mesh network
444 */ 699 */
445 if ((bat_priv->softif_neigh) && (bat_priv->softif_neigh->vid == vid)) { 700 curr_softif_neigh = softif_neigh_vid_get_selected(bat_priv, vid);
701 if (curr_softif_neigh) {
446 skb_push(skb, hdr_size); 702 skb_push(skb, hdr_size);
447 unicast_packet = (struct unicast_packet *)skb->data; 703 unicast_packet = (struct unicast_packet *)skb->data;
448 704
@@ -453,7 +709,7 @@ void interface_rx(struct net_device *soft_iface,
453 skb_reset_mac_header(skb); 709 skb_reset_mac_header(skb);
454 710
455 memcpy(unicast_packet->dest, 711 memcpy(unicast_packet->dest,
456 bat_priv->softif_neigh->addr, ETH_ALEN); 712 curr_softif_neigh->addr, ETH_ALEN);
457 ret = route_unicast_packet(skb, recv_if); 713 ret = route_unicast_packet(skb, recv_if);
458 if (ret == NET_RX_DROP) 714 if (ret == NET_RX_DROP)
459 goto dropped; 715 goto dropped;
@@ -478,11 +734,13 @@ void interface_rx(struct net_device *soft_iface,
478 soft_iface->last_rx = jiffies; 734 soft_iface->last_rx = jiffies;
479 735
480 netif_rx(skb); 736 netif_rx(skb);
481 return; 737 goto out;
482 738
483dropped: 739dropped:
484 kfree_skb(skb); 740 kfree_skb(skb);
485out: 741out:
742 if (curr_softif_neigh)
743 softif_neigh_free_ref(curr_softif_neigh);
486 return; 744 return;
487} 745}
488 746
@@ -516,14 +774,15 @@ static void interface_setup(struct net_device *dev)
516 dev->hard_start_xmit = interface_tx; 774 dev->hard_start_xmit = interface_tx;
517#endif 775#endif
518 dev->destructor = free_netdev; 776 dev->destructor = free_netdev;
777 dev->tx_queue_len = 0;
519 778
520 /** 779 /**
521 * can't call min_mtu, because the needed variables 780 * can't call min_mtu, because the needed variables
522 * have not been initialized yet 781 * have not been initialized yet
523 */ 782 */
524 dev->mtu = ETH_DATA_LEN; 783 dev->mtu = ETH_DATA_LEN;
525 dev->hard_header_len = BAT_HEADER_LEN; /* reserve more space in the 784 /* reserve more space in the skbuff for our header */
526 * skbuff for our header */ 785 dev->hard_header_len = BAT_HEADER_LEN;
527 786
528 /* generate random address */ 787 /* generate random address */
529 random_ether_addr(dev_addr); 788 random_ether_addr(dev_addr);
@@ -548,7 +807,7 @@ struct net_device *softif_create(char *name)
548 goto out; 807 goto out;
549 } 808 }
550 809
551 ret = register_netdev(soft_iface); 810 ret = register_netdevice(soft_iface);
552 if (ret < 0) { 811 if (ret < 0) {
553 pr_err("Unable to register the batman interface '%s': %i\n", 812 pr_err("Unable to register the batman interface '%s': %i\n",
554 name, ret); 813 name, ret);
@@ -572,11 +831,10 @@ struct net_device *softif_create(char *name)
572 831
573 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE); 832 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
574 atomic_set(&bat_priv->bcast_seqno, 1); 833 atomic_set(&bat_priv->bcast_seqno, 1);
575 atomic_set(&bat_priv->hna_local_changed, 0); 834 atomic_set(&bat_priv->tt_local_changed, 0);
576 835
577 bat_priv->primary_if = NULL; 836 bat_priv->primary_if = NULL;
578 bat_priv->num_ifaces = 0; 837 bat_priv->num_ifaces = 0;
579 bat_priv->softif_neigh = NULL;
580 838
581 ret = sysfs_add_meshif(soft_iface); 839 ret = sysfs_add_meshif(soft_iface);
582 if (ret < 0) 840 if (ret < 0)
@@ -632,7 +890,7 @@ static int bat_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
632{ 890{
633 cmd->supported = 0; 891 cmd->supported = 0;
634 cmd->advertising = 0; 892 cmd->advertising = 0;
635 cmd->speed = SPEED_10; 893 ethtool_cmd_speed_set(cmd, SPEED_10);
636 cmd->duplex = DUPLEX_FULL; 894 cmd->duplex = DUPLEX_FULL;
637 cmd->port = PORT_TP; 895 cmd->port = PORT_TP;
638 cmd->phy_address = 0; 896 cmd->phy_address = 0;
@@ -667,12 +925,3 @@ static u32 bat_get_link(struct net_device *dev)
667 return 1; 925 return 1;
668} 926}
669 927
670static u32 bat_get_rx_csum(struct net_device *dev)
671{
672 return 0;
673}
674
675static int bat_set_rx_csum(struct net_device *dev, u32 data)
676{
677 return -EOPNOTSUPP;
678}
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index 8d15b48d1692..7b729660cbfd 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -22,43 +22,44 @@
22#include "main.h" 22#include "main.h"
23#include "translation-table.h" 23#include "translation-table.h"
24#include "soft-interface.h" 24#include "soft-interface.h"
25#include "hard-interface.h"
25#include "hash.h" 26#include "hash.h"
26#include "originator.h" 27#include "originator.h"
27 28
28static void hna_local_purge(struct work_struct *work); 29static void tt_local_purge(struct work_struct *work);
29static void _hna_global_del_orig(struct bat_priv *bat_priv, 30static void _tt_global_del_orig(struct bat_priv *bat_priv,
30 struct hna_global_entry *hna_global_entry, 31 struct tt_global_entry *tt_global_entry,
31 char *message); 32 char *message);
32 33
33/* returns 1 if they are the same mac addr */ 34/* returns 1 if they are the same mac addr */
34static int compare_lhna(struct hlist_node *node, void *data2) 35static int compare_ltt(struct hlist_node *node, void *data2)
35{ 36{
36 void *data1 = container_of(node, struct hna_local_entry, hash_entry); 37 void *data1 = container_of(node, struct tt_local_entry, hash_entry);
37 38
38 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0); 39 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
39} 40}
40 41
41/* returns 1 if they are the same mac addr */ 42/* returns 1 if they are the same mac addr */
42static int compare_ghna(struct hlist_node *node, void *data2) 43static int compare_gtt(struct hlist_node *node, void *data2)
43{ 44{
44 void *data1 = container_of(node, struct hna_global_entry, hash_entry); 45 void *data1 = container_of(node, struct tt_global_entry, hash_entry);
45 46
46 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0); 47 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
47} 48}
48 49
49static void hna_local_start_timer(struct bat_priv *bat_priv) 50static void tt_local_start_timer(struct bat_priv *bat_priv)
50{ 51{
51 INIT_DELAYED_WORK(&bat_priv->hna_work, hna_local_purge); 52 INIT_DELAYED_WORK(&bat_priv->tt_work, tt_local_purge);
52 queue_delayed_work(bat_event_workqueue, &bat_priv->hna_work, 10 * HZ); 53 queue_delayed_work(bat_event_workqueue, &bat_priv->tt_work, 10 * HZ);
53} 54}
54 55
55static struct hna_local_entry *hna_local_hash_find(struct bat_priv *bat_priv, 56static struct tt_local_entry *tt_local_hash_find(struct bat_priv *bat_priv,
56 void *data) 57 void *data)
57{ 58{
58 struct hashtable_t *hash = bat_priv->hna_local_hash; 59 struct hashtable_t *hash = bat_priv->tt_local_hash;
59 struct hlist_head *head; 60 struct hlist_head *head;
60 struct hlist_node *node; 61 struct hlist_node *node;
61 struct hna_local_entry *hna_local_entry, *hna_local_entry_tmp = NULL; 62 struct tt_local_entry *tt_local_entry, *tt_local_entry_tmp = NULL;
62 int index; 63 int index;
63 64
64 if (!hash) 65 if (!hash)
@@ -68,26 +69,26 @@ static struct hna_local_entry *hna_local_hash_find(struct bat_priv *bat_priv,
68 head = &hash->table[index]; 69 head = &hash->table[index];
69 70
70 rcu_read_lock(); 71 rcu_read_lock();
71 hlist_for_each_entry_rcu(hna_local_entry, node, head, hash_entry) { 72 hlist_for_each_entry_rcu(tt_local_entry, node, head, hash_entry) {
72 if (!compare_eth(hna_local_entry, data)) 73 if (!compare_eth(tt_local_entry, data))
73 continue; 74 continue;
74 75
75 hna_local_entry_tmp = hna_local_entry; 76 tt_local_entry_tmp = tt_local_entry;
76 break; 77 break;
77 } 78 }
78 rcu_read_unlock(); 79 rcu_read_unlock();
79 80
80 return hna_local_entry_tmp; 81 return tt_local_entry_tmp;
81} 82}
82 83
83static struct hna_global_entry *hna_global_hash_find(struct bat_priv *bat_priv, 84static struct tt_global_entry *tt_global_hash_find(struct bat_priv *bat_priv,
84 void *data) 85 void *data)
85{ 86{
86 struct hashtable_t *hash = bat_priv->hna_global_hash; 87 struct hashtable_t *hash = bat_priv->tt_global_hash;
87 struct hlist_head *head; 88 struct hlist_head *head;
88 struct hlist_node *node; 89 struct hlist_node *node;
89 struct hna_global_entry *hna_global_entry; 90 struct tt_global_entry *tt_global_entry;
90 struct hna_global_entry *hna_global_entry_tmp = NULL; 91 struct tt_global_entry *tt_global_entry_tmp = NULL;
91 int index; 92 int index;
92 93
93 if (!hash) 94 if (!hash)
@@ -97,125 +98,125 @@ static struct hna_global_entry *hna_global_hash_find(struct bat_priv *bat_priv,
97 head = &hash->table[index]; 98 head = &hash->table[index];
98 99
99 rcu_read_lock(); 100 rcu_read_lock();
100 hlist_for_each_entry_rcu(hna_global_entry, node, head, hash_entry) { 101 hlist_for_each_entry_rcu(tt_global_entry, node, head, hash_entry) {
101 if (!compare_eth(hna_global_entry, data)) 102 if (!compare_eth(tt_global_entry, data))
102 continue; 103 continue;
103 104
104 hna_global_entry_tmp = hna_global_entry; 105 tt_global_entry_tmp = tt_global_entry;
105 break; 106 break;
106 } 107 }
107 rcu_read_unlock(); 108 rcu_read_unlock();
108 109
109 return hna_global_entry_tmp; 110 return tt_global_entry_tmp;
110} 111}
111 112
112int hna_local_init(struct bat_priv *bat_priv) 113int tt_local_init(struct bat_priv *bat_priv)
113{ 114{
114 if (bat_priv->hna_local_hash) 115 if (bat_priv->tt_local_hash)
115 return 1; 116 return 1;
116 117
117 bat_priv->hna_local_hash = hash_new(1024); 118 bat_priv->tt_local_hash = hash_new(1024);
118 119
119 if (!bat_priv->hna_local_hash) 120 if (!bat_priv->tt_local_hash)
120 return 0; 121 return 0;
121 122
122 atomic_set(&bat_priv->hna_local_changed, 0); 123 atomic_set(&bat_priv->tt_local_changed, 0);
123 hna_local_start_timer(bat_priv); 124 tt_local_start_timer(bat_priv);
124 125
125 return 1; 126 return 1;
126} 127}
127 128
128void hna_local_add(struct net_device *soft_iface, uint8_t *addr) 129void tt_local_add(struct net_device *soft_iface, uint8_t *addr)
129{ 130{
130 struct bat_priv *bat_priv = netdev_priv(soft_iface); 131 struct bat_priv *bat_priv = netdev_priv(soft_iface);
131 struct hna_local_entry *hna_local_entry; 132 struct tt_local_entry *tt_local_entry;
132 struct hna_global_entry *hna_global_entry; 133 struct tt_global_entry *tt_global_entry;
133 int required_bytes; 134 int required_bytes;
134 135
135 spin_lock_bh(&bat_priv->hna_lhash_lock); 136 spin_lock_bh(&bat_priv->tt_lhash_lock);
136 hna_local_entry = hna_local_hash_find(bat_priv, addr); 137 tt_local_entry = tt_local_hash_find(bat_priv, addr);
137 spin_unlock_bh(&bat_priv->hna_lhash_lock); 138 spin_unlock_bh(&bat_priv->tt_lhash_lock);
138 139
139 if (hna_local_entry) { 140 if (tt_local_entry) {
140 hna_local_entry->last_seen = jiffies; 141 tt_local_entry->last_seen = jiffies;
141 return; 142 return;
142 } 143 }
143 144
144 /* only announce as many hosts as possible in the batman-packet and 145 /* only announce as many hosts as possible in the batman-packet and
145 space in batman_packet->num_hna That also should give a limit to 146 space in batman_packet->num_tt That also should give a limit to
146 MAC-flooding. */ 147 MAC-flooding. */
147 required_bytes = (bat_priv->num_local_hna + 1) * ETH_ALEN; 148 required_bytes = (bat_priv->num_local_tt + 1) * ETH_ALEN;
148 required_bytes += BAT_PACKET_LEN; 149 required_bytes += BAT_PACKET_LEN;
149 150
150 if ((required_bytes > ETH_DATA_LEN) || 151 if ((required_bytes > ETH_DATA_LEN) ||
151 (atomic_read(&bat_priv->aggregated_ogms) && 152 (atomic_read(&bat_priv->aggregated_ogms) &&
152 required_bytes > MAX_AGGREGATION_BYTES) || 153 required_bytes > MAX_AGGREGATION_BYTES) ||
153 (bat_priv->num_local_hna + 1 > 255)) { 154 (bat_priv->num_local_tt + 1 > 255)) {
154 bat_dbg(DBG_ROUTES, bat_priv, 155 bat_dbg(DBG_ROUTES, bat_priv,
155 "Can't add new local hna entry (%pM): " 156 "Can't add new local tt entry (%pM): "
156 "number of local hna entries exceeds packet size\n", 157 "number of local tt entries exceeds packet size\n",
157 addr); 158 addr);
158 return; 159 return;
159 } 160 }
160 161
161 bat_dbg(DBG_ROUTES, bat_priv, 162 bat_dbg(DBG_ROUTES, bat_priv,
162 "Creating new local hna entry: %pM\n", addr); 163 "Creating new local tt entry: %pM\n", addr);
163 164
164 hna_local_entry = kmalloc(sizeof(struct hna_local_entry), GFP_ATOMIC); 165 tt_local_entry = kmalloc(sizeof(struct tt_local_entry), GFP_ATOMIC);
165 if (!hna_local_entry) 166 if (!tt_local_entry)
166 return; 167 return;
167 168
168 memcpy(hna_local_entry->addr, addr, ETH_ALEN); 169 memcpy(tt_local_entry->addr, addr, ETH_ALEN);
169 hna_local_entry->last_seen = jiffies; 170 tt_local_entry->last_seen = jiffies;
170 171
171 /* the batman interface mac address should never be purged */ 172 /* the batman interface mac address should never be purged */
172 if (compare_eth(addr, soft_iface->dev_addr)) 173 if (compare_eth(addr, soft_iface->dev_addr))
173 hna_local_entry->never_purge = 1; 174 tt_local_entry->never_purge = 1;
174 else 175 else
175 hna_local_entry->never_purge = 0; 176 tt_local_entry->never_purge = 0;
176 177
177 spin_lock_bh(&bat_priv->hna_lhash_lock); 178 spin_lock_bh(&bat_priv->tt_lhash_lock);
178 179
179 hash_add(bat_priv->hna_local_hash, compare_lhna, choose_orig, 180 hash_add(bat_priv->tt_local_hash, compare_ltt, choose_orig,
180 hna_local_entry, &hna_local_entry->hash_entry); 181 tt_local_entry, &tt_local_entry->hash_entry);
181 bat_priv->num_local_hna++; 182 bat_priv->num_local_tt++;
182 atomic_set(&bat_priv->hna_local_changed, 1); 183 atomic_set(&bat_priv->tt_local_changed, 1);
183 184
184 spin_unlock_bh(&bat_priv->hna_lhash_lock); 185 spin_unlock_bh(&bat_priv->tt_lhash_lock);
185 186
186 /* remove address from global hash if present */ 187 /* remove address from global hash if present */
187 spin_lock_bh(&bat_priv->hna_ghash_lock); 188 spin_lock_bh(&bat_priv->tt_ghash_lock);
188 189
189 hna_global_entry = hna_global_hash_find(bat_priv, addr); 190 tt_global_entry = tt_global_hash_find(bat_priv, addr);
190 191
191 if (hna_global_entry) 192 if (tt_global_entry)
192 _hna_global_del_orig(bat_priv, hna_global_entry, 193 _tt_global_del_orig(bat_priv, tt_global_entry,
193 "local hna received"); 194 "local tt received");
194 195
195 spin_unlock_bh(&bat_priv->hna_ghash_lock); 196 spin_unlock_bh(&bat_priv->tt_ghash_lock);
196} 197}
197 198
198int hna_local_fill_buffer(struct bat_priv *bat_priv, 199int tt_local_fill_buffer(struct bat_priv *bat_priv,
199 unsigned char *buff, int buff_len) 200 unsigned char *buff, int buff_len)
200{ 201{
201 struct hashtable_t *hash = bat_priv->hna_local_hash; 202 struct hashtable_t *hash = bat_priv->tt_local_hash;
202 struct hna_local_entry *hna_local_entry; 203 struct tt_local_entry *tt_local_entry;
203 struct hlist_node *node; 204 struct hlist_node *node;
204 struct hlist_head *head; 205 struct hlist_head *head;
205 int i, count = 0; 206 int i, count = 0;
206 207
207 spin_lock_bh(&bat_priv->hna_lhash_lock); 208 spin_lock_bh(&bat_priv->tt_lhash_lock);
208 209
209 for (i = 0; i < hash->size; i++) { 210 for (i = 0; i < hash->size; i++) {
210 head = &hash->table[i]; 211 head = &hash->table[i];
211 212
212 rcu_read_lock(); 213 rcu_read_lock();
213 hlist_for_each_entry_rcu(hna_local_entry, node, 214 hlist_for_each_entry_rcu(tt_local_entry, node,
214 head, hash_entry) { 215 head, hash_entry) {
215 if (buff_len < (count + 1) * ETH_ALEN) 216 if (buff_len < (count + 1) * ETH_ALEN)
216 break; 217 break;
217 218
218 memcpy(buff + (count * ETH_ALEN), hna_local_entry->addr, 219 memcpy(buff + (count * ETH_ALEN), tt_local_entry->addr,
219 ETH_ALEN); 220 ETH_ALEN);
220 221
221 count++; 222 count++;
@@ -223,37 +224,47 @@ int hna_local_fill_buffer(struct bat_priv *bat_priv,
223 rcu_read_unlock(); 224 rcu_read_unlock();
224 } 225 }
225 226
226 /* if we did not get all new local hnas see you next time ;-) */ 227 /* if we did not get all new local tts see you next time ;-) */
227 if (count == bat_priv->num_local_hna) 228 if (count == bat_priv->num_local_tt)
228 atomic_set(&bat_priv->hna_local_changed, 0); 229 atomic_set(&bat_priv->tt_local_changed, 0);
229 230
230 spin_unlock_bh(&bat_priv->hna_lhash_lock); 231 spin_unlock_bh(&bat_priv->tt_lhash_lock);
231 return count; 232 return count;
232} 233}
233 234
234int hna_local_seq_print_text(struct seq_file *seq, void *offset) 235int tt_local_seq_print_text(struct seq_file *seq, void *offset)
235{ 236{
236 struct net_device *net_dev = (struct net_device *)seq->private; 237 struct net_device *net_dev = (struct net_device *)seq->private;
237 struct bat_priv *bat_priv = netdev_priv(net_dev); 238 struct bat_priv *bat_priv = netdev_priv(net_dev);
238 struct hashtable_t *hash = bat_priv->hna_local_hash; 239 struct hashtable_t *hash = bat_priv->tt_local_hash;
239 struct hna_local_entry *hna_local_entry; 240 struct tt_local_entry *tt_local_entry;
241 struct hard_iface *primary_if;
240 struct hlist_node *node; 242 struct hlist_node *node;
241 struct hlist_head *head; 243 struct hlist_head *head;
242 size_t buf_size, pos; 244 size_t buf_size, pos;
243 char *buff; 245 char *buff;
244 int i; 246 int i, ret = 0;
245 247
246 if (!bat_priv->primary_if) { 248 primary_if = primary_if_get_selected(bat_priv);
247 return seq_printf(seq, "BATMAN mesh %s disabled - " 249 if (!primary_if) {
248 "please specify interfaces to enable it\n", 250 ret = seq_printf(seq, "BATMAN mesh %s disabled - "
249 net_dev->name); 251 "please specify interfaces to enable it\n",
252 net_dev->name);
253 goto out;
254 }
255
256 if (primary_if->if_status != IF_ACTIVE) {
257 ret = seq_printf(seq, "BATMAN mesh %s disabled - "
258 "primary interface not active\n",
259 net_dev->name);
260 goto out;
250 } 261 }
251 262
252 seq_printf(seq, "Locally retrieved addresses (from %s) " 263 seq_printf(seq, "Locally retrieved addresses (from %s) "
253 "announced via HNA:\n", 264 "announced via TT:\n",
254 net_dev->name); 265 net_dev->name);
255 266
256 spin_lock_bh(&bat_priv->hna_lhash_lock); 267 spin_lock_bh(&bat_priv->tt_lhash_lock);
257 268
258 buf_size = 1; 269 buf_size = 1;
259 /* Estimate length for: " * xx:xx:xx:xx:xx:xx\n" */ 270 /* Estimate length for: " * xx:xx:xx:xx:xx:xx\n" */
@@ -268,8 +279,9 @@ int hna_local_seq_print_text(struct seq_file *seq, void *offset)
268 279
269 buff = kmalloc(buf_size, GFP_ATOMIC); 280 buff = kmalloc(buf_size, GFP_ATOMIC);
270 if (!buff) { 281 if (!buff) {
271 spin_unlock_bh(&bat_priv->hna_lhash_lock); 282 spin_unlock_bh(&bat_priv->tt_lhash_lock);
272 return -ENOMEM; 283 ret = -ENOMEM;
284 goto out;
273 } 285 }
274 286
275 buff[0] = '\0'; 287 buff[0] = '\0';
@@ -279,211 +291,225 @@ int hna_local_seq_print_text(struct seq_file *seq, void *offset)
279 head = &hash->table[i]; 291 head = &hash->table[i];
280 292
281 rcu_read_lock(); 293 rcu_read_lock();
282 hlist_for_each_entry_rcu(hna_local_entry, node, 294 hlist_for_each_entry_rcu(tt_local_entry, node,
283 head, hash_entry) { 295 head, hash_entry) {
284 pos += snprintf(buff + pos, 22, " * %pM\n", 296 pos += snprintf(buff + pos, 22, " * %pM\n",
285 hna_local_entry->addr); 297 tt_local_entry->addr);
286 } 298 }
287 rcu_read_unlock(); 299 rcu_read_unlock();
288 } 300 }
289 301
290 spin_unlock_bh(&bat_priv->hna_lhash_lock); 302 spin_unlock_bh(&bat_priv->tt_lhash_lock);
291 303
292 seq_printf(seq, "%s", buff); 304 seq_printf(seq, "%s", buff);
293 kfree(buff); 305 kfree(buff);
294 return 0; 306out:
307 if (primary_if)
308 hardif_free_ref(primary_if);
309 return ret;
295} 310}
296 311
297static void _hna_local_del(struct hlist_node *node, void *arg) 312static void _tt_local_del(struct hlist_node *node, void *arg)
298{ 313{
299 struct bat_priv *bat_priv = (struct bat_priv *)arg; 314 struct bat_priv *bat_priv = (struct bat_priv *)arg;
300 void *data = container_of(node, struct hna_local_entry, hash_entry); 315 void *data = container_of(node, struct tt_local_entry, hash_entry);
301 316
302 kfree(data); 317 kfree(data);
303 bat_priv->num_local_hna--; 318 bat_priv->num_local_tt--;
304 atomic_set(&bat_priv->hna_local_changed, 1); 319 atomic_set(&bat_priv->tt_local_changed, 1);
305} 320}
306 321
307static void hna_local_del(struct bat_priv *bat_priv, 322static void tt_local_del(struct bat_priv *bat_priv,
308 struct hna_local_entry *hna_local_entry, 323 struct tt_local_entry *tt_local_entry,
309 char *message) 324 char *message)
310{ 325{
311 bat_dbg(DBG_ROUTES, bat_priv, "Deleting local hna entry (%pM): %s\n", 326 bat_dbg(DBG_ROUTES, bat_priv, "Deleting local tt entry (%pM): %s\n",
312 hna_local_entry->addr, message); 327 tt_local_entry->addr, message);
313 328
314 hash_remove(bat_priv->hna_local_hash, compare_lhna, choose_orig, 329 hash_remove(bat_priv->tt_local_hash, compare_ltt, choose_orig,
315 hna_local_entry->addr); 330 tt_local_entry->addr);
316 _hna_local_del(&hna_local_entry->hash_entry, bat_priv); 331 _tt_local_del(&tt_local_entry->hash_entry, bat_priv);
317} 332}
318 333
319void hna_local_remove(struct bat_priv *bat_priv, 334void tt_local_remove(struct bat_priv *bat_priv,
320 uint8_t *addr, char *message) 335 uint8_t *addr, char *message)
321{ 336{
322 struct hna_local_entry *hna_local_entry; 337 struct tt_local_entry *tt_local_entry;
323 338
324 spin_lock_bh(&bat_priv->hna_lhash_lock); 339 spin_lock_bh(&bat_priv->tt_lhash_lock);
325 340
326 hna_local_entry = hna_local_hash_find(bat_priv, addr); 341 tt_local_entry = tt_local_hash_find(bat_priv, addr);
327 342
328 if (hna_local_entry) 343 if (tt_local_entry)
329 hna_local_del(bat_priv, hna_local_entry, message); 344 tt_local_del(bat_priv, tt_local_entry, message);
330 345
331 spin_unlock_bh(&bat_priv->hna_lhash_lock); 346 spin_unlock_bh(&bat_priv->tt_lhash_lock);
332} 347}
333 348
334static void hna_local_purge(struct work_struct *work) 349static void tt_local_purge(struct work_struct *work)
335{ 350{
336 struct delayed_work *delayed_work = 351 struct delayed_work *delayed_work =
337 container_of(work, struct delayed_work, work); 352 container_of(work, struct delayed_work, work);
338 struct bat_priv *bat_priv = 353 struct bat_priv *bat_priv =
339 container_of(delayed_work, struct bat_priv, hna_work); 354 container_of(delayed_work, struct bat_priv, tt_work);
340 struct hashtable_t *hash = bat_priv->hna_local_hash; 355 struct hashtable_t *hash = bat_priv->tt_local_hash;
341 struct hna_local_entry *hna_local_entry; 356 struct tt_local_entry *tt_local_entry;
342 struct hlist_node *node, *node_tmp; 357 struct hlist_node *node, *node_tmp;
343 struct hlist_head *head; 358 struct hlist_head *head;
344 unsigned long timeout; 359 unsigned long timeout;
345 int i; 360 int i;
346 361
347 spin_lock_bh(&bat_priv->hna_lhash_lock); 362 spin_lock_bh(&bat_priv->tt_lhash_lock);
348 363
349 for (i = 0; i < hash->size; i++) { 364 for (i = 0; i < hash->size; i++) {
350 head = &hash->table[i]; 365 head = &hash->table[i];
351 366
352 hlist_for_each_entry_safe(hna_local_entry, node, node_tmp, 367 hlist_for_each_entry_safe(tt_local_entry, node, node_tmp,
353 head, hash_entry) { 368 head, hash_entry) {
354 if (hna_local_entry->never_purge) 369 if (tt_local_entry->never_purge)
355 continue; 370 continue;
356 371
357 timeout = hna_local_entry->last_seen; 372 timeout = tt_local_entry->last_seen;
358 timeout += LOCAL_HNA_TIMEOUT * HZ; 373 timeout += TT_LOCAL_TIMEOUT * HZ;
359 374
360 if (time_before(jiffies, timeout)) 375 if (time_before(jiffies, timeout))
361 continue; 376 continue;
362 377
363 hna_local_del(bat_priv, hna_local_entry, 378 tt_local_del(bat_priv, tt_local_entry,
364 "address timed out"); 379 "address timed out");
365 } 380 }
366 } 381 }
367 382
368 spin_unlock_bh(&bat_priv->hna_lhash_lock); 383 spin_unlock_bh(&bat_priv->tt_lhash_lock);
369 hna_local_start_timer(bat_priv); 384 tt_local_start_timer(bat_priv);
370} 385}
371 386
372void hna_local_free(struct bat_priv *bat_priv) 387void tt_local_free(struct bat_priv *bat_priv)
373{ 388{
374 if (!bat_priv->hna_local_hash) 389 if (!bat_priv->tt_local_hash)
375 return; 390 return;
376 391
377 cancel_delayed_work_sync(&bat_priv->hna_work); 392 cancel_delayed_work_sync(&bat_priv->tt_work);
378 hash_delete(bat_priv->hna_local_hash, _hna_local_del, bat_priv); 393 hash_delete(bat_priv->tt_local_hash, _tt_local_del, bat_priv);
379 bat_priv->hna_local_hash = NULL; 394 bat_priv->tt_local_hash = NULL;
380} 395}
381 396
382int hna_global_init(struct bat_priv *bat_priv) 397int tt_global_init(struct bat_priv *bat_priv)
383{ 398{
384 if (bat_priv->hna_global_hash) 399 if (bat_priv->tt_global_hash)
385 return 1; 400 return 1;
386 401
387 bat_priv->hna_global_hash = hash_new(1024); 402 bat_priv->tt_global_hash = hash_new(1024);
388 403
389 if (!bat_priv->hna_global_hash) 404 if (!bat_priv->tt_global_hash)
390 return 0; 405 return 0;
391 406
392 return 1; 407 return 1;
393} 408}
394 409
395void hna_global_add_orig(struct bat_priv *bat_priv, 410void tt_global_add_orig(struct bat_priv *bat_priv,
396 struct orig_node *orig_node, 411 struct orig_node *orig_node,
397 unsigned char *hna_buff, int hna_buff_len) 412 unsigned char *tt_buff, int tt_buff_len)
398{ 413{
399 struct hna_global_entry *hna_global_entry; 414 struct tt_global_entry *tt_global_entry;
400 struct hna_local_entry *hna_local_entry; 415 struct tt_local_entry *tt_local_entry;
401 int hna_buff_count = 0; 416 int tt_buff_count = 0;
402 unsigned char *hna_ptr; 417 unsigned char *tt_ptr;
403 418
404 while ((hna_buff_count + 1) * ETH_ALEN <= hna_buff_len) { 419 while ((tt_buff_count + 1) * ETH_ALEN <= tt_buff_len) {
405 spin_lock_bh(&bat_priv->hna_ghash_lock); 420 spin_lock_bh(&bat_priv->tt_ghash_lock);
406 421
407 hna_ptr = hna_buff + (hna_buff_count * ETH_ALEN); 422 tt_ptr = tt_buff + (tt_buff_count * ETH_ALEN);
408 hna_global_entry = hna_global_hash_find(bat_priv, hna_ptr); 423 tt_global_entry = tt_global_hash_find(bat_priv, tt_ptr);
409 424
410 if (!hna_global_entry) { 425 if (!tt_global_entry) {
411 spin_unlock_bh(&bat_priv->hna_ghash_lock); 426 spin_unlock_bh(&bat_priv->tt_ghash_lock);
412 427
413 hna_global_entry = 428 tt_global_entry =
414 kmalloc(sizeof(struct hna_global_entry), 429 kmalloc(sizeof(struct tt_global_entry),
415 GFP_ATOMIC); 430 GFP_ATOMIC);
416 431
417 if (!hna_global_entry) 432 if (!tt_global_entry)
418 break; 433 break;
419 434
420 memcpy(hna_global_entry->addr, hna_ptr, ETH_ALEN); 435 memcpy(tt_global_entry->addr, tt_ptr, ETH_ALEN);
421 436
422 bat_dbg(DBG_ROUTES, bat_priv, 437 bat_dbg(DBG_ROUTES, bat_priv,
423 "Creating new global hna entry: " 438 "Creating new global tt entry: "
424 "%pM (via %pM)\n", 439 "%pM (via %pM)\n",
425 hna_global_entry->addr, orig_node->orig); 440 tt_global_entry->addr, orig_node->orig);
426 441
427 spin_lock_bh(&bat_priv->hna_ghash_lock); 442 spin_lock_bh(&bat_priv->tt_ghash_lock);
428 hash_add(bat_priv->hna_global_hash, compare_ghna, 443 hash_add(bat_priv->tt_global_hash, compare_gtt,
429 choose_orig, hna_global_entry, 444 choose_orig, tt_global_entry,
430 &hna_global_entry->hash_entry); 445 &tt_global_entry->hash_entry);
431 446
432 } 447 }
433 448
434 hna_global_entry->orig_node = orig_node; 449 tt_global_entry->orig_node = orig_node;
435 spin_unlock_bh(&bat_priv->hna_ghash_lock); 450 spin_unlock_bh(&bat_priv->tt_ghash_lock);
436 451
437 /* remove address from local hash if present */ 452 /* remove address from local hash if present */
438 spin_lock_bh(&bat_priv->hna_lhash_lock); 453 spin_lock_bh(&bat_priv->tt_lhash_lock);
439 454
440 hna_ptr = hna_buff + (hna_buff_count * ETH_ALEN); 455 tt_ptr = tt_buff + (tt_buff_count * ETH_ALEN);
441 hna_local_entry = hna_local_hash_find(bat_priv, hna_ptr); 456 tt_local_entry = tt_local_hash_find(bat_priv, tt_ptr);
442 457
443 if (hna_local_entry) 458 if (tt_local_entry)
444 hna_local_del(bat_priv, hna_local_entry, 459 tt_local_del(bat_priv, tt_local_entry,
445 "global hna received"); 460 "global tt received");
446 461
447 spin_unlock_bh(&bat_priv->hna_lhash_lock); 462 spin_unlock_bh(&bat_priv->tt_lhash_lock);
448 463
449 hna_buff_count++; 464 tt_buff_count++;
450 } 465 }
451 466
452 /* initialize, and overwrite if malloc succeeds */ 467 /* initialize, and overwrite if malloc succeeds */
453 orig_node->hna_buff = NULL; 468 orig_node->tt_buff = NULL;
454 orig_node->hna_buff_len = 0; 469 orig_node->tt_buff_len = 0;
455 470
456 if (hna_buff_len > 0) { 471 if (tt_buff_len > 0) {
457 orig_node->hna_buff = kmalloc(hna_buff_len, GFP_ATOMIC); 472 orig_node->tt_buff = kmalloc(tt_buff_len, GFP_ATOMIC);
458 if (orig_node->hna_buff) { 473 if (orig_node->tt_buff) {
459 memcpy(orig_node->hna_buff, hna_buff, hna_buff_len); 474 memcpy(orig_node->tt_buff, tt_buff, tt_buff_len);
460 orig_node->hna_buff_len = hna_buff_len; 475 orig_node->tt_buff_len = tt_buff_len;
461 } 476 }
462 } 477 }
463} 478}
464 479
465int hna_global_seq_print_text(struct seq_file *seq, void *offset) 480int tt_global_seq_print_text(struct seq_file *seq, void *offset)
466{ 481{
467 struct net_device *net_dev = (struct net_device *)seq->private; 482 struct net_device *net_dev = (struct net_device *)seq->private;
468 struct bat_priv *bat_priv = netdev_priv(net_dev); 483 struct bat_priv *bat_priv = netdev_priv(net_dev);
469 struct hashtable_t *hash = bat_priv->hna_global_hash; 484 struct hashtable_t *hash = bat_priv->tt_global_hash;
470 struct hna_global_entry *hna_global_entry; 485 struct tt_global_entry *tt_global_entry;
486 struct hard_iface *primary_if;
471 struct hlist_node *node; 487 struct hlist_node *node;
472 struct hlist_head *head; 488 struct hlist_head *head;
473 size_t buf_size, pos; 489 size_t buf_size, pos;
474 char *buff; 490 char *buff;
475 int i; 491 int i, ret = 0;
476 492
477 if (!bat_priv->primary_if) { 493 primary_if = primary_if_get_selected(bat_priv);
478 return seq_printf(seq, "BATMAN mesh %s disabled - " 494 if (!primary_if) {
479 "please specify interfaces to enable it\n", 495 ret = seq_printf(seq, "BATMAN mesh %s disabled - please "
480 net_dev->name); 496 "specify interfaces to enable it\n",
497 net_dev->name);
498 goto out;
481 } 499 }
482 500
483 seq_printf(seq, "Globally announced HNAs received via the mesh %s\n", 501 if (primary_if->if_status != IF_ACTIVE) {
502 ret = seq_printf(seq, "BATMAN mesh %s disabled - "
503 "primary interface not active\n",
504 net_dev->name);
505 goto out;
506 }
507
508 seq_printf(seq,
509 "Globally announced TT entries received via the mesh %s\n",
484 net_dev->name); 510 net_dev->name);
485 511
486 spin_lock_bh(&bat_priv->hna_ghash_lock); 512 spin_lock_bh(&bat_priv->tt_ghash_lock);
487 513
488 buf_size = 1; 514 buf_size = 1;
489 /* Estimate length for: " * xx:xx:xx:xx:xx:xx via xx:xx:xx:xx:xx:xx\n"*/ 515 /* Estimate length for: " * xx:xx:xx:xx:xx:xx via xx:xx:xx:xx:xx:xx\n"*/
@@ -498,8 +524,9 @@ int hna_global_seq_print_text(struct seq_file *seq, void *offset)
498 524
499 buff = kmalloc(buf_size, GFP_ATOMIC); 525 buff = kmalloc(buf_size, GFP_ATOMIC);
500 if (!buff) { 526 if (!buff) {
501 spin_unlock_bh(&bat_priv->hna_ghash_lock); 527 spin_unlock_bh(&bat_priv->tt_ghash_lock);
502 return -ENOMEM; 528 ret = -ENOMEM;
529 goto out;
503 } 530 }
504 buff[0] = '\0'; 531 buff[0] = '\0';
505 pos = 0; 532 pos = 0;
@@ -508,101 +535,104 @@ int hna_global_seq_print_text(struct seq_file *seq, void *offset)
508 head = &hash->table[i]; 535 head = &hash->table[i];
509 536
510 rcu_read_lock(); 537 rcu_read_lock();
511 hlist_for_each_entry_rcu(hna_global_entry, node, 538 hlist_for_each_entry_rcu(tt_global_entry, node,
512 head, hash_entry) { 539 head, hash_entry) {
513 pos += snprintf(buff + pos, 44, 540 pos += snprintf(buff + pos, 44,
514 " * %pM via %pM\n", 541 " * %pM via %pM\n",
515 hna_global_entry->addr, 542 tt_global_entry->addr,
516 hna_global_entry->orig_node->orig); 543 tt_global_entry->orig_node->orig);
517 } 544 }
518 rcu_read_unlock(); 545 rcu_read_unlock();
519 } 546 }
520 547
521 spin_unlock_bh(&bat_priv->hna_ghash_lock); 548 spin_unlock_bh(&bat_priv->tt_ghash_lock);
522 549
523 seq_printf(seq, "%s", buff); 550 seq_printf(seq, "%s", buff);
524 kfree(buff); 551 kfree(buff);
525 return 0; 552out:
553 if (primary_if)
554 hardif_free_ref(primary_if);
555 return ret;
526} 556}
527 557
528static void _hna_global_del_orig(struct bat_priv *bat_priv, 558static void _tt_global_del_orig(struct bat_priv *bat_priv,
529 struct hna_global_entry *hna_global_entry, 559 struct tt_global_entry *tt_global_entry,
530 char *message) 560 char *message)
531{ 561{
532 bat_dbg(DBG_ROUTES, bat_priv, 562 bat_dbg(DBG_ROUTES, bat_priv,
533 "Deleting global hna entry %pM (via %pM): %s\n", 563 "Deleting global tt entry %pM (via %pM): %s\n",
534 hna_global_entry->addr, hna_global_entry->orig_node->orig, 564 tt_global_entry->addr, tt_global_entry->orig_node->orig,
535 message); 565 message);
536 566
537 hash_remove(bat_priv->hna_global_hash, compare_ghna, choose_orig, 567 hash_remove(bat_priv->tt_global_hash, compare_gtt, choose_orig,
538 hna_global_entry->addr); 568 tt_global_entry->addr);
539 kfree(hna_global_entry); 569 kfree(tt_global_entry);
540} 570}
541 571
542void hna_global_del_orig(struct bat_priv *bat_priv, 572void tt_global_del_orig(struct bat_priv *bat_priv,
543 struct orig_node *orig_node, char *message) 573 struct orig_node *orig_node, char *message)
544{ 574{
545 struct hna_global_entry *hna_global_entry; 575 struct tt_global_entry *tt_global_entry;
546 int hna_buff_count = 0; 576 int tt_buff_count = 0;
547 unsigned char *hna_ptr; 577 unsigned char *tt_ptr;
548 578
549 if (orig_node->hna_buff_len == 0) 579 if (orig_node->tt_buff_len == 0)
550 return; 580 return;
551 581
552 spin_lock_bh(&bat_priv->hna_ghash_lock); 582 spin_lock_bh(&bat_priv->tt_ghash_lock);
553 583
554 while ((hna_buff_count + 1) * ETH_ALEN <= orig_node->hna_buff_len) { 584 while ((tt_buff_count + 1) * ETH_ALEN <= orig_node->tt_buff_len) {
555 hna_ptr = orig_node->hna_buff + (hna_buff_count * ETH_ALEN); 585 tt_ptr = orig_node->tt_buff + (tt_buff_count * ETH_ALEN);
556 hna_global_entry = hna_global_hash_find(bat_priv, hna_ptr); 586 tt_global_entry = tt_global_hash_find(bat_priv, tt_ptr);
557 587
558 if ((hna_global_entry) && 588 if ((tt_global_entry) &&
559 (hna_global_entry->orig_node == orig_node)) 589 (tt_global_entry->orig_node == orig_node))
560 _hna_global_del_orig(bat_priv, hna_global_entry, 590 _tt_global_del_orig(bat_priv, tt_global_entry,
561 message); 591 message);
562 592
563 hna_buff_count++; 593 tt_buff_count++;
564 } 594 }
565 595
566 spin_unlock_bh(&bat_priv->hna_ghash_lock); 596 spin_unlock_bh(&bat_priv->tt_ghash_lock);
567 597
568 orig_node->hna_buff_len = 0; 598 orig_node->tt_buff_len = 0;
569 kfree(orig_node->hna_buff); 599 kfree(orig_node->tt_buff);
570 orig_node->hna_buff = NULL; 600 orig_node->tt_buff = NULL;
571} 601}
572 602
573static void hna_global_del(struct hlist_node *node, void *arg) 603static void tt_global_del(struct hlist_node *node, void *arg)
574{ 604{
575 void *data = container_of(node, struct hna_global_entry, hash_entry); 605 void *data = container_of(node, struct tt_global_entry, hash_entry);
576 606
577 kfree(data); 607 kfree(data);
578} 608}
579 609
580void hna_global_free(struct bat_priv *bat_priv) 610void tt_global_free(struct bat_priv *bat_priv)
581{ 611{
582 if (!bat_priv->hna_global_hash) 612 if (!bat_priv->tt_global_hash)
583 return; 613 return;
584 614
585 hash_delete(bat_priv->hna_global_hash, hna_global_del, NULL); 615 hash_delete(bat_priv->tt_global_hash, tt_global_del, NULL);
586 bat_priv->hna_global_hash = NULL; 616 bat_priv->tt_global_hash = NULL;
587} 617}
588 618
589struct orig_node *transtable_search(struct bat_priv *bat_priv, uint8_t *addr) 619struct orig_node *transtable_search(struct bat_priv *bat_priv, uint8_t *addr)
590{ 620{
591 struct hna_global_entry *hna_global_entry; 621 struct tt_global_entry *tt_global_entry;
592 struct orig_node *orig_node = NULL; 622 struct orig_node *orig_node = NULL;
593 623
594 spin_lock_bh(&bat_priv->hna_ghash_lock); 624 spin_lock_bh(&bat_priv->tt_ghash_lock);
595 hna_global_entry = hna_global_hash_find(bat_priv, addr); 625 tt_global_entry = tt_global_hash_find(bat_priv, addr);
596 626
597 if (!hna_global_entry) 627 if (!tt_global_entry)
598 goto out; 628 goto out;
599 629
600 if (!atomic_inc_not_zero(&hna_global_entry->orig_node->refcount)) 630 if (!atomic_inc_not_zero(&tt_global_entry->orig_node->refcount))
601 goto out; 631 goto out;
602 632
603 orig_node = hna_global_entry->orig_node; 633 orig_node = tt_global_entry->orig_node;
604 634
605out: 635out:
606 spin_unlock_bh(&bat_priv->hna_ghash_lock); 636 spin_unlock_bh(&bat_priv->tt_ghash_lock);
607 return orig_node; 637 return orig_node;
608} 638}
diff --git a/net/batman-adv/translation-table.h b/net/batman-adv/translation-table.h
index f19931ca1457..46152c38cc95 100644
--- a/net/batman-adv/translation-table.h
+++ b/net/batman-adv/translation-table.h
@@ -22,22 +22,22 @@
22#ifndef _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ 22#ifndef _NET_BATMAN_ADV_TRANSLATION_TABLE_H_
23#define _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ 23#define _NET_BATMAN_ADV_TRANSLATION_TABLE_H_
24 24
25int hna_local_init(struct bat_priv *bat_priv); 25int tt_local_init(struct bat_priv *bat_priv);
26void hna_local_add(struct net_device *soft_iface, uint8_t *addr); 26void tt_local_add(struct net_device *soft_iface, uint8_t *addr);
27void hna_local_remove(struct bat_priv *bat_priv, 27void tt_local_remove(struct bat_priv *bat_priv,
28 uint8_t *addr, char *message); 28 uint8_t *addr, char *message);
29int hna_local_fill_buffer(struct bat_priv *bat_priv, 29int tt_local_fill_buffer(struct bat_priv *bat_priv,
30 unsigned char *buff, int buff_len); 30 unsigned char *buff, int buff_len);
31int hna_local_seq_print_text(struct seq_file *seq, void *offset); 31int tt_local_seq_print_text(struct seq_file *seq, void *offset);
32void hna_local_free(struct bat_priv *bat_priv); 32void tt_local_free(struct bat_priv *bat_priv);
33int hna_global_init(struct bat_priv *bat_priv); 33int tt_global_init(struct bat_priv *bat_priv);
34void hna_global_add_orig(struct bat_priv *bat_priv, 34void tt_global_add_orig(struct bat_priv *bat_priv,
35 struct orig_node *orig_node, 35 struct orig_node *orig_node,
36 unsigned char *hna_buff, int hna_buff_len); 36 unsigned char *tt_buff, int tt_buff_len);
37int hna_global_seq_print_text(struct seq_file *seq, void *offset); 37int tt_global_seq_print_text(struct seq_file *seq, void *offset);
38void hna_global_del_orig(struct bat_priv *bat_priv, 38void tt_global_del_orig(struct bat_priv *bat_priv,
39 struct orig_node *orig_node, char *message); 39 struct orig_node *orig_node, char *message);
40void hna_global_free(struct bat_priv *bat_priv); 40void tt_global_free(struct bat_priv *bat_priv);
41struct orig_node *transtable_search(struct bat_priv *bat_priv, uint8_t *addr); 41struct orig_node *transtable_search(struct bat_priv *bat_priv, uint8_t *addr);
42 42
43#endif /* _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ */ 43#endif /* _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ */
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index 83445cf0cc9f..fab70e8b16ee 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -67,7 +67,7 @@ struct hard_iface {
67struct orig_node { 67struct orig_node {
68 uint8_t orig[ETH_ALEN]; 68 uint8_t orig[ETH_ALEN];
69 uint8_t primary_addr[ETH_ALEN]; 69 uint8_t primary_addr[ETH_ALEN];
70 struct neigh_node *router; 70 struct neigh_node __rcu *router; /* rcu protected pointer */
71 unsigned long *bcast_own; 71 unsigned long *bcast_own;
72 uint8_t *bcast_own_sum; 72 uint8_t *bcast_own_sum;
73 unsigned long last_valid; 73 unsigned long last_valid;
@@ -75,25 +75,25 @@ struct orig_node {
75 unsigned long batman_seqno_reset; 75 unsigned long batman_seqno_reset;
76 uint8_t gw_flags; 76 uint8_t gw_flags;
77 uint8_t flags; 77 uint8_t flags;
78 unsigned char *hna_buff; 78 unsigned char *tt_buff;
79 int16_t hna_buff_len; 79 int16_t tt_buff_len;
80 uint32_t last_real_seqno; 80 uint32_t last_real_seqno;
81 uint8_t last_ttl; 81 uint8_t last_ttl;
82 unsigned long bcast_bits[NUM_WORDS]; 82 unsigned long bcast_bits[NUM_WORDS];
83 uint32_t last_bcast_seqno; 83 uint32_t last_bcast_seqno;
84 struct hlist_head neigh_list; 84 struct hlist_head neigh_list;
85 struct list_head frag_list; 85 struct list_head frag_list;
86 spinlock_t neigh_list_lock; /* protects neighbor list */ 86 spinlock_t neigh_list_lock; /* protects neigh_list and router */
87 atomic_t refcount; 87 atomic_t refcount;
88 struct rcu_head rcu; 88 struct rcu_head rcu;
89 struct hlist_node hash_entry; 89 struct hlist_node hash_entry;
90 struct bat_priv *bat_priv; 90 struct bat_priv *bat_priv;
91 unsigned long last_frag_packet; 91 unsigned long last_frag_packet;
92 spinlock_t ogm_cnt_lock; /* protects: bcast_own, bcast_own_sum, 92 /* ogm_cnt_lock protects: bcast_own, bcast_own_sum,
93 * neigh_node->real_bits, 93 * neigh_node->real_bits, neigh_node->real_packet_count */
94 * neigh_node->real_packet_count */ 94 spinlock_t ogm_cnt_lock;
95 spinlock_t bcast_seqno_lock; /* protects bcast_bits, 95 /* bcast_seqno_lock protects bcast_bits, last_bcast_seqno */
96 * last_bcast_seqno */ 96 spinlock_t bcast_seqno_lock;
97 atomic_t bond_candidates; 97 atomic_t bond_candidates;
98 struct list_head bond_list; 98 struct list_head bond_list;
99}; 99};
@@ -125,6 +125,7 @@ struct neigh_node {
125 struct rcu_head rcu; 125 struct rcu_head rcu;
126 struct orig_node *orig_node; 126 struct orig_node *orig_node;
127 struct hard_iface *if_incoming; 127 struct hard_iface *if_incoming;
128 spinlock_t tq_lock; /* protects: tq_recv, tq_index */
128}; 129};
129 130
130 131
@@ -145,34 +146,34 @@ struct bat_priv {
145 atomic_t bcast_queue_left; 146 atomic_t bcast_queue_left;
146 atomic_t batman_queue_left; 147 atomic_t batman_queue_left;
147 char num_ifaces; 148 char num_ifaces;
148 struct hlist_head softif_neigh_list;
149 struct softif_neigh *softif_neigh;
150 struct debug_log *debug_log; 149 struct debug_log *debug_log;
151 struct hard_iface *primary_if;
152 struct kobject *mesh_obj; 150 struct kobject *mesh_obj;
153 struct dentry *debug_dir; 151 struct dentry *debug_dir;
154 struct hlist_head forw_bat_list; 152 struct hlist_head forw_bat_list;
155 struct hlist_head forw_bcast_list; 153 struct hlist_head forw_bcast_list;
156 struct hlist_head gw_list; 154 struct hlist_head gw_list;
155 struct hlist_head softif_neigh_vids;
157 struct list_head vis_send_list; 156 struct list_head vis_send_list;
158 struct hashtable_t *orig_hash; 157 struct hashtable_t *orig_hash;
159 struct hashtable_t *hna_local_hash; 158 struct hashtable_t *tt_local_hash;
160 struct hashtable_t *hna_global_hash; 159 struct hashtable_t *tt_global_hash;
161 struct hashtable_t *vis_hash; 160 struct hashtable_t *vis_hash;
162 spinlock_t forw_bat_list_lock; /* protects forw_bat_list */ 161 spinlock_t forw_bat_list_lock; /* protects forw_bat_list */
163 spinlock_t forw_bcast_list_lock; /* protects */ 162 spinlock_t forw_bcast_list_lock; /* protects */
164 spinlock_t hna_lhash_lock; /* protects hna_local_hash */ 163 spinlock_t tt_lhash_lock; /* protects tt_local_hash */
165 spinlock_t hna_ghash_lock; /* protects hna_global_hash */ 164 spinlock_t tt_ghash_lock; /* protects tt_global_hash */
166 spinlock_t gw_list_lock; /* protects gw_list and curr_gw */ 165 spinlock_t gw_list_lock; /* protects gw_list and curr_gw */
167 spinlock_t vis_hash_lock; /* protects vis_hash */ 166 spinlock_t vis_hash_lock; /* protects vis_hash */
168 spinlock_t vis_list_lock; /* protects vis_info::recv_list */ 167 spinlock_t vis_list_lock; /* protects vis_info::recv_list */
169 spinlock_t softif_neigh_lock; /* protects soft-interface neigh list */ 168 spinlock_t softif_neigh_lock; /* protects soft-interface neigh list */
170 int16_t num_local_hna; 169 spinlock_t softif_neigh_vid_lock; /* protects soft-interface vid list */
171 atomic_t hna_local_changed; 170 int16_t num_local_tt;
172 struct delayed_work hna_work; 171 atomic_t tt_local_changed;
172 struct delayed_work tt_work;
173 struct delayed_work orig_work; 173 struct delayed_work orig_work;
174 struct delayed_work vis_work; 174 struct delayed_work vis_work;
175 struct gw_node __rcu *curr_gw; /* rcu protected pointer */ 175 struct gw_node __rcu *curr_gw; /* rcu protected pointer */
176 struct hard_iface __rcu *primary_if; /* rcu protected pointer */
176 struct vis_info *my_vis_info; 177 struct vis_info *my_vis_info;
177}; 178};
178 179
@@ -191,14 +192,14 @@ struct socket_packet {
191 struct icmp_packet_rr icmp_packet; 192 struct icmp_packet_rr icmp_packet;
192}; 193};
193 194
194struct hna_local_entry { 195struct tt_local_entry {
195 uint8_t addr[ETH_ALEN]; 196 uint8_t addr[ETH_ALEN];
196 unsigned long last_seen; 197 unsigned long last_seen;
197 char never_purge; 198 char never_purge;
198 struct hlist_node hash_entry; 199 struct hlist_node hash_entry;
199}; 200};
200 201
201struct hna_global_entry { 202struct tt_global_entry {
202 uint8_t addr[ETH_ALEN]; 203 uint8_t addr[ETH_ALEN];
203 struct orig_node *orig_node; 204 struct orig_node *orig_node;
204 struct hlist_node hash_entry; 205 struct hlist_node hash_entry;
@@ -261,7 +262,7 @@ struct vis_info {
261struct vis_info_entry { 262struct vis_info_entry {
262 uint8_t src[ETH_ALEN]; 263 uint8_t src[ETH_ALEN];
263 uint8_t dest[ETH_ALEN]; 264 uint8_t dest[ETH_ALEN];
264 uint8_t quality; /* quality = 0 means HNA */ 265 uint8_t quality; /* quality = 0 client */
265} __packed; 266} __packed;
266 267
267struct recvlist_node { 268struct recvlist_node {
@@ -269,11 +270,20 @@ struct recvlist_node {
269 uint8_t mac[ETH_ALEN]; 270 uint8_t mac[ETH_ALEN];
270}; 271};
271 272
273struct softif_neigh_vid {
274 struct hlist_node list;
275 struct bat_priv *bat_priv;
276 short vid;
277 atomic_t refcount;
278 struct softif_neigh __rcu *softif_neigh;
279 struct rcu_head rcu;
280 struct hlist_head softif_neigh_list;
281};
282
272struct softif_neigh { 283struct softif_neigh {
273 struct hlist_node list; 284 struct hlist_node list;
274 uint8_t addr[ETH_ALEN]; 285 uint8_t addr[ETH_ALEN];
275 unsigned long last_seen; 286 unsigned long last_seen;
276 short vid;
277 atomic_t refcount; 287 atomic_t refcount;
278 struct rcu_head rcu; 288 struct rcu_head rcu;
279}; 289};
diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
index 19f84bd443af..19c3daf34ac6 100644
--- a/net/batman-adv/unicast.c
+++ b/net/batman-adv/unicast.c
@@ -221,15 +221,17 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
221 struct hard_iface *hard_iface, uint8_t dstaddr[]) 221 struct hard_iface *hard_iface, uint8_t dstaddr[])
222{ 222{
223 struct unicast_packet tmp_uc, *unicast_packet; 223 struct unicast_packet tmp_uc, *unicast_packet;
224 struct hard_iface *primary_if;
224 struct sk_buff *frag_skb; 225 struct sk_buff *frag_skb;
225 struct unicast_frag_packet *frag1, *frag2; 226 struct unicast_frag_packet *frag1, *frag2;
226 int uc_hdr_len = sizeof(struct unicast_packet); 227 int uc_hdr_len = sizeof(struct unicast_packet);
227 int ucf_hdr_len = sizeof(struct unicast_frag_packet); 228 int ucf_hdr_len = sizeof(struct unicast_frag_packet);
228 int data_len = skb->len - uc_hdr_len; 229 int data_len = skb->len - uc_hdr_len;
229 int large_tail = 0; 230 int large_tail = 0, ret = NET_RX_DROP;
230 uint16_t seqno; 231 uint16_t seqno;
231 232
232 if (!bat_priv->primary_if) 233 primary_if = primary_if_get_selected(bat_priv);
234 if (!primary_if)
233 goto dropped; 235 goto dropped;
234 236
235 frag_skb = dev_alloc_skb(data_len - (data_len / 2) + ucf_hdr_len); 237 frag_skb = dev_alloc_skb(data_len - (data_len / 2) + ucf_hdr_len);
@@ -254,7 +256,7 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
254 frag1->version = COMPAT_VERSION; 256 frag1->version = COMPAT_VERSION;
255 frag1->packet_type = BAT_UNICAST_FRAG; 257 frag1->packet_type = BAT_UNICAST_FRAG;
256 258
257 memcpy(frag1->orig, bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN); 259 memcpy(frag1->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
258 memcpy(frag2, frag1, sizeof(struct unicast_frag_packet)); 260 memcpy(frag2, frag1, sizeof(struct unicast_frag_packet));
259 261
260 if (data_len & 1) 262 if (data_len & 1)
@@ -269,13 +271,17 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
269 271
270 send_skb_packet(skb, hard_iface, dstaddr); 272 send_skb_packet(skb, hard_iface, dstaddr);
271 send_skb_packet(frag_skb, hard_iface, dstaddr); 273 send_skb_packet(frag_skb, hard_iface, dstaddr);
272 return NET_RX_SUCCESS; 274 ret = NET_RX_SUCCESS;
275 goto out;
273 276
274drop_frag: 277drop_frag:
275 kfree_skb(frag_skb); 278 kfree_skb(frag_skb);
276dropped: 279dropped:
277 kfree_skb(skb); 280 kfree_skb(skb);
278 return NET_RX_DROP; 281out:
282 if (primary_if)
283 hardif_free_ref(primary_if);
284 return ret;
279} 285}
280 286
281int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv) 287int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv)
@@ -289,12 +295,12 @@ int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv)
289 295
290 /* get routing information */ 296 /* get routing information */
291 if (is_multicast_ether_addr(ethhdr->h_dest)) { 297 if (is_multicast_ether_addr(ethhdr->h_dest)) {
292 orig_node = (struct orig_node *)gw_get_selected(bat_priv); 298 orig_node = (struct orig_node *)gw_get_selected_orig(bat_priv);
293 if (orig_node) 299 if (orig_node)
294 goto find_router; 300 goto find_router;
295 } 301 }
296 302
297 /* check for hna host - increases orig_node refcount */ 303 /* check for tt host - increases orig_node refcount */
298 orig_node = transtable_search(bat_priv, ethhdr->h_dest); 304 orig_node = transtable_search(bat_priv, ethhdr->h_dest);
299 305
300find_router: 306find_router:
diff --git a/net/batman-adv/vis.c b/net/batman-adv/vis.c
index f90212f42082..c39f20cc1ba6 100644
--- a/net/batman-adv/vis.c
+++ b/net/batman-adv/vis.c
@@ -194,7 +194,7 @@ static ssize_t vis_data_read_entry(char *buff, struct vis_info_entry *entry,
194{ 194{
195 /* maximal length: max(4+17+2, 3+17+1+3+2) == 26 */ 195 /* maximal length: max(4+17+2, 3+17+1+3+2) == 26 */
196 if (primary && entry->quality == 0) 196 if (primary && entry->quality == 0)
197 return sprintf(buff, "HNA %pM, ", entry->dest); 197 return sprintf(buff, "TT %pM, ", entry->dest);
198 else if (compare_eth(entry->src, src)) 198 else if (compare_eth(entry->src, src))
199 return sprintf(buff, "TQ %pM %d, ", entry->dest, 199 return sprintf(buff, "TQ %pM %d, ", entry->dest,
200 entry->quality); 200 entry->quality);
@@ -204,6 +204,7 @@ static ssize_t vis_data_read_entry(char *buff, struct vis_info_entry *entry,
204 204
205int vis_seq_print_text(struct seq_file *seq, void *offset) 205int vis_seq_print_text(struct seq_file *seq, void *offset)
206{ 206{
207 struct hard_iface *primary_if;
207 struct hlist_node *node; 208 struct hlist_node *node;
208 struct hlist_head *head; 209 struct hlist_head *head;
209 struct vis_info *info; 210 struct vis_info *info;
@@ -215,15 +216,18 @@ int vis_seq_print_text(struct seq_file *seq, void *offset)
215 HLIST_HEAD(vis_if_list); 216 HLIST_HEAD(vis_if_list);
216 struct if_list_entry *entry; 217 struct if_list_entry *entry;
217 struct hlist_node *pos, *n; 218 struct hlist_node *pos, *n;
218 int i, j; 219 int i, j, ret = 0;
219 int vis_server = atomic_read(&bat_priv->vis_mode); 220 int vis_server = atomic_read(&bat_priv->vis_mode);
220 size_t buff_pos, buf_size; 221 size_t buff_pos, buf_size;
221 char *buff; 222 char *buff;
222 int compare; 223 int compare;
223 224
224 if ((!bat_priv->primary_if) || 225 primary_if = primary_if_get_selected(bat_priv);
225 (vis_server == VIS_TYPE_CLIENT_UPDATE)) 226 if (!primary_if)
226 return 0; 227 goto out;
228
229 if (vis_server == VIS_TYPE_CLIENT_UPDATE)
230 goto out;
227 231
228 buf_size = 1; 232 buf_size = 1;
229 /* Estimate length */ 233 /* Estimate length */
@@ -270,7 +274,8 @@ int vis_seq_print_text(struct seq_file *seq, void *offset)
270 buff = kmalloc(buf_size, GFP_ATOMIC); 274 buff = kmalloc(buf_size, GFP_ATOMIC);
271 if (!buff) { 275 if (!buff) {
272 spin_unlock_bh(&bat_priv->vis_hash_lock); 276 spin_unlock_bh(&bat_priv->vis_hash_lock);
273 return -ENOMEM; 277 ret = -ENOMEM;
278 goto out;
274 } 279 }
275 buff[0] = '\0'; 280 buff[0] = '\0';
276 buff_pos = 0; 281 buff_pos = 0;
@@ -328,7 +333,10 @@ int vis_seq_print_text(struct seq_file *seq, void *offset)
328 seq_printf(seq, "%s", buff); 333 seq_printf(seq, "%s", buff);
329 kfree(buff); 334 kfree(buff);
330 335
331 return 0; 336out:
337 if (primary_if)
338 hardif_free_ref(primary_if);
339 return ret;
332} 340}
333 341
334/* add the info packet to the send list, if it was not 342/* add the info packet to the send list, if it was not
@@ -558,6 +566,7 @@ static int find_best_vis_server(struct bat_priv *bat_priv,
558 struct vis_info *info) 566 struct vis_info *info)
559{ 567{
560 struct hashtable_t *hash = bat_priv->orig_hash; 568 struct hashtable_t *hash = bat_priv->orig_hash;
569 struct neigh_node *router;
561 struct hlist_node *node; 570 struct hlist_node *node;
562 struct hlist_head *head; 571 struct hlist_head *head;
563 struct orig_node *orig_node; 572 struct orig_node *orig_node;
@@ -571,13 +580,17 @@ static int find_best_vis_server(struct bat_priv *bat_priv,
571 580
572 rcu_read_lock(); 581 rcu_read_lock();
573 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { 582 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
574 if ((orig_node) && (orig_node->router) && 583 router = orig_node_get_router(orig_node);
575 (orig_node->flags & VIS_SERVER) && 584 if (!router)
576 (orig_node->router->tq_avg > best_tq)) { 585 continue;
577 best_tq = orig_node->router->tq_avg; 586
587 if ((orig_node->flags & VIS_SERVER) &&
588 (router->tq_avg > best_tq)) {
589 best_tq = router->tq_avg;
578 memcpy(packet->target_orig, orig_node->orig, 590 memcpy(packet->target_orig, orig_node->orig,
579 ETH_ALEN); 591 ETH_ALEN);
580 } 592 }
593 neigh_node_free_ref(router);
581 } 594 }
582 rcu_read_unlock(); 595 rcu_read_unlock();
583 } 596 }
@@ -605,11 +618,11 @@ static int generate_vis_packet(struct bat_priv *bat_priv)
605 struct hlist_node *node; 618 struct hlist_node *node;
606 struct hlist_head *head; 619 struct hlist_head *head;
607 struct orig_node *orig_node; 620 struct orig_node *orig_node;
608 struct neigh_node *neigh_node; 621 struct neigh_node *router;
609 struct vis_info *info = (struct vis_info *)bat_priv->my_vis_info; 622 struct vis_info *info = (struct vis_info *)bat_priv->my_vis_info;
610 struct vis_packet *packet = (struct vis_packet *)info->skb_packet->data; 623 struct vis_packet *packet = (struct vis_packet *)info->skb_packet->data;
611 struct vis_info_entry *entry; 624 struct vis_info_entry *entry;
612 struct hna_local_entry *hna_local_entry; 625 struct tt_local_entry *tt_local_entry;
613 int best_tq = -1, i; 626 int best_tq = -1, i;
614 627
615 info->first_seen = jiffies; 628 info->first_seen = jiffies;
@@ -633,59 +646,61 @@ static int generate_vis_packet(struct bat_priv *bat_priv)
633 646
634 rcu_read_lock(); 647 rcu_read_lock();
635 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { 648 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
636 neigh_node = orig_node->router; 649 router = orig_node_get_router(orig_node);
637 650 if (!router)
638 if (!neigh_node)
639 continue; 651 continue;
640 652
641 if (!compare_eth(neigh_node->addr, orig_node->orig)) 653 if (!compare_eth(router->addr, orig_node->orig))
642 continue; 654 goto next;
643 655
644 if (neigh_node->if_incoming->if_status != IF_ACTIVE) 656 if (router->if_incoming->if_status != IF_ACTIVE)
645 continue; 657 goto next;
646 658
647 if (neigh_node->tq_avg < 1) 659 if (router->tq_avg < 1)
648 continue; 660 goto next;
649 661
650 /* fill one entry into buffer. */ 662 /* fill one entry into buffer. */
651 entry = (struct vis_info_entry *) 663 entry = (struct vis_info_entry *)
652 skb_put(info->skb_packet, sizeof(*entry)); 664 skb_put(info->skb_packet, sizeof(*entry));
653 memcpy(entry->src, 665 memcpy(entry->src,
654 neigh_node->if_incoming->net_dev->dev_addr, 666 router->if_incoming->net_dev->dev_addr,
655 ETH_ALEN); 667 ETH_ALEN);
656 memcpy(entry->dest, orig_node->orig, ETH_ALEN); 668 memcpy(entry->dest, orig_node->orig, ETH_ALEN);
657 entry->quality = neigh_node->tq_avg; 669 entry->quality = router->tq_avg;
658 packet->entries++; 670 packet->entries++;
659 671
672next:
673 neigh_node_free_ref(router);
674
660 if (vis_packet_full(info)) 675 if (vis_packet_full(info))
661 goto unlock; 676 goto unlock;
662 } 677 }
663 rcu_read_unlock(); 678 rcu_read_unlock();
664 } 679 }
665 680
666 hash = bat_priv->hna_local_hash; 681 hash = bat_priv->tt_local_hash;
667 682
668 spin_lock_bh(&bat_priv->hna_lhash_lock); 683 spin_lock_bh(&bat_priv->tt_lhash_lock);
669 for (i = 0; i < hash->size; i++) { 684 for (i = 0; i < hash->size; i++) {
670 head = &hash->table[i]; 685 head = &hash->table[i];
671 686
672 hlist_for_each_entry(hna_local_entry, node, head, hash_entry) { 687 hlist_for_each_entry(tt_local_entry, node, head, hash_entry) {
673 entry = (struct vis_info_entry *) 688 entry = (struct vis_info_entry *)
674 skb_put(info->skb_packet, 689 skb_put(info->skb_packet,
675 sizeof(*entry)); 690 sizeof(*entry));
676 memset(entry->src, 0, ETH_ALEN); 691 memset(entry->src, 0, ETH_ALEN);
677 memcpy(entry->dest, hna_local_entry->addr, ETH_ALEN); 692 memcpy(entry->dest, tt_local_entry->addr, ETH_ALEN);
678 entry->quality = 0; /* 0 means HNA */ 693 entry->quality = 0; /* 0 means TT */
679 packet->entries++; 694 packet->entries++;
680 695
681 if (vis_packet_full(info)) { 696 if (vis_packet_full(info)) {
682 spin_unlock_bh(&bat_priv->hna_lhash_lock); 697 spin_unlock_bh(&bat_priv->tt_lhash_lock);
683 return 0; 698 return 0;
684 } 699 }
685 } 700 }
686 } 701 }
687 702
688 spin_unlock_bh(&bat_priv->hna_lhash_lock); 703 spin_unlock_bh(&bat_priv->tt_lhash_lock);
689 return 0; 704 return 0;
690 705
691unlock: 706unlock:
@@ -725,6 +740,7 @@ static void purge_vis_packets(struct bat_priv *bat_priv)
725static void broadcast_vis_packet(struct bat_priv *bat_priv, 740static void broadcast_vis_packet(struct bat_priv *bat_priv,
726 struct vis_info *info) 741 struct vis_info *info)
727{ 742{
743 struct neigh_node *router;
728 struct hashtable_t *hash = bat_priv->orig_hash; 744 struct hashtable_t *hash = bat_priv->orig_hash;
729 struct hlist_node *node; 745 struct hlist_node *node;
730 struct hlist_head *head; 746 struct hlist_head *head;
@@ -745,19 +761,26 @@ static void broadcast_vis_packet(struct bat_priv *bat_priv,
745 rcu_read_lock(); 761 rcu_read_lock();
746 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { 762 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
747 /* if it's a vis server and reachable, send it. */ 763 /* if it's a vis server and reachable, send it. */
748 if ((!orig_node) || (!orig_node->router))
749 continue;
750 if (!(orig_node->flags & VIS_SERVER)) 764 if (!(orig_node->flags & VIS_SERVER))
751 continue; 765 continue;
766
767 router = orig_node_get_router(orig_node);
768 if (!router)
769 continue;
770
752 /* don't send it if we already received the packet from 771 /* don't send it if we already received the packet from
753 * this node. */ 772 * this node. */
754 if (recv_list_is_in(bat_priv, &info->recv_list, 773 if (recv_list_is_in(bat_priv, &info->recv_list,
755 orig_node->orig)) 774 orig_node->orig)) {
775 neigh_node_free_ref(router);
756 continue; 776 continue;
777 }
757 778
758 memcpy(packet->target_orig, orig_node->orig, ETH_ALEN); 779 memcpy(packet->target_orig, orig_node->orig, ETH_ALEN);
759 hard_iface = orig_node->router->if_incoming; 780 hard_iface = router->if_incoming;
760 memcpy(dstaddr, orig_node->router->addr, ETH_ALEN); 781 memcpy(dstaddr, router->addr, ETH_ALEN);
782
783 neigh_node_free_ref(router);
761 784
762 skb = skb_clone(info->skb_packet, GFP_ATOMIC); 785 skb = skb_clone(info->skb_packet, GFP_ATOMIC);
763 if (skb) 786 if (skb)
@@ -772,60 +795,48 @@ static void unicast_vis_packet(struct bat_priv *bat_priv,
772 struct vis_info *info) 795 struct vis_info *info)
773{ 796{
774 struct orig_node *orig_node; 797 struct orig_node *orig_node;
775 struct neigh_node *neigh_node = NULL; 798 struct neigh_node *router = NULL;
776 struct sk_buff *skb; 799 struct sk_buff *skb;
777 struct vis_packet *packet; 800 struct vis_packet *packet;
778 801
779 packet = (struct vis_packet *)info->skb_packet->data; 802 packet = (struct vis_packet *)info->skb_packet->data;
780 803
781 rcu_read_lock();
782 orig_node = orig_hash_find(bat_priv, packet->target_orig); 804 orig_node = orig_hash_find(bat_priv, packet->target_orig);
783
784 if (!orig_node) 805 if (!orig_node)
785 goto unlock; 806 goto out;
786
787 neigh_node = orig_node->router;
788 807
789 if (!neigh_node) 808 router = orig_node_get_router(orig_node);
790 goto unlock; 809 if (!router)
791 810 goto out;
792 if (!atomic_inc_not_zero(&neigh_node->refcount)) {
793 neigh_node = NULL;
794 goto unlock;
795 }
796
797 rcu_read_unlock();
798 811
799 skb = skb_clone(info->skb_packet, GFP_ATOMIC); 812 skb = skb_clone(info->skb_packet, GFP_ATOMIC);
800 if (skb) 813 if (skb)
801 send_skb_packet(skb, neigh_node->if_incoming, 814 send_skb_packet(skb, router->if_incoming, router->addr);
802 neigh_node->addr);
803 815
804 goto out;
805
806unlock:
807 rcu_read_unlock();
808out: 816out:
809 if (neigh_node) 817 if (router)
810 neigh_node_free_ref(neigh_node); 818 neigh_node_free_ref(router);
811 if (orig_node) 819 if (orig_node)
812 orig_node_free_ref(orig_node); 820 orig_node_free_ref(orig_node);
813 return;
814} 821}
815 822
816/* only send one vis packet. called from send_vis_packets() */ 823/* only send one vis packet. called from send_vis_packets() */
817static void send_vis_packet(struct bat_priv *bat_priv, struct vis_info *info) 824static void send_vis_packet(struct bat_priv *bat_priv, struct vis_info *info)
818{ 825{
826 struct hard_iface *primary_if;
819 struct vis_packet *packet; 827 struct vis_packet *packet;
820 828
829 primary_if = primary_if_get_selected(bat_priv);
830 if (!primary_if)
831 goto out;
832
821 packet = (struct vis_packet *)info->skb_packet->data; 833 packet = (struct vis_packet *)info->skb_packet->data;
822 if (packet->ttl < 2) { 834 if (packet->ttl < 2) {
823 pr_debug("Error - can't send vis packet: ttl exceeded\n"); 835 pr_debug("Error - can't send vis packet: ttl exceeded\n");
824 return; 836 goto out;
825 } 837 }
826 838
827 memcpy(packet->sender_orig, bat_priv->primary_if->net_dev->dev_addr, 839 memcpy(packet->sender_orig, primary_if->net_dev->dev_addr, ETH_ALEN);
828 ETH_ALEN);
829 packet->ttl--; 840 packet->ttl--;
830 841
831 if (is_broadcast_ether_addr(packet->target_orig)) 842 if (is_broadcast_ether_addr(packet->target_orig))
@@ -833,6 +844,10 @@ static void send_vis_packet(struct bat_priv *bat_priv, struct vis_info *info)
833 else 844 else
834 unicast_vis_packet(bat_priv, info); 845 unicast_vis_packet(bat_priv, info);
835 packet->ttl++; /* restore TTL */ 846 packet->ttl++; /* restore TTL */
847
848out:
849 if (primary_if)
850 hardif_free_ref(primary_if);
836} 851}
837 852
838/* called from timer; send (and maybe generate) vis packet. */ 853/* called from timer; send (and maybe generate) vis packet. */
@@ -859,8 +874,7 @@ static void send_vis_packets(struct work_struct *work)
859 kref_get(&info->refcount); 874 kref_get(&info->refcount);
860 spin_unlock_bh(&bat_priv->vis_hash_lock); 875 spin_unlock_bh(&bat_priv->vis_hash_lock);
861 876
862 if (bat_priv->primary_if) 877 send_vis_packet(bat_priv, info);
863 send_vis_packet(bat_priv, info);
864 878
865 spin_lock_bh(&bat_priv->vis_hash_lock); 879 spin_lock_bh(&bat_priv->vis_hash_lock);
866 send_list_del(info); 880 send_list_del(info);
diff --git a/net/bluetooth/bnep/bnep.h b/net/bluetooth/bnep/bnep.h
index 70672544db86..8e6c06158f8e 100644
--- a/net/bluetooth/bnep/bnep.h
+++ b/net/bluetooth/bnep/bnep.h
@@ -23,88 +23,88 @@
23#include <linux/crc32.h> 23#include <linux/crc32.h>
24#include <net/bluetooth/bluetooth.h> 24#include <net/bluetooth/bluetooth.h>
25 25
26// Limits 26/* Limits */
27#define BNEP_MAX_PROTO_FILTERS 5 27#define BNEP_MAX_PROTO_FILTERS 5
28#define BNEP_MAX_MULTICAST_FILTERS 20 28#define BNEP_MAX_MULTICAST_FILTERS 20
29 29
30// UUIDs 30/* UUIDs */
31#define BNEP_BASE_UUID 0x0000000000001000800000805F9B34FB 31#define BNEP_BASE_UUID 0x0000000000001000800000805F9B34FB
32#define BNEP_UUID16 0x02 32#define BNEP_UUID16 0x02
33#define BNEP_UUID32 0x04 33#define BNEP_UUID32 0x04
34#define BNEP_UUID128 0x16 34#define BNEP_UUID128 0x16
35 35
36#define BNEP_SVC_PANU 0x1115 36#define BNEP_SVC_PANU 0x1115
37#define BNEP_SVC_NAP 0x1116 37#define BNEP_SVC_NAP 0x1116
38#define BNEP_SVC_GN 0x1117 38#define BNEP_SVC_GN 0x1117
39 39
40// Packet types 40/* Packet types */
41#define BNEP_GENERAL 0x00 41#define BNEP_GENERAL 0x00
42#define BNEP_CONTROL 0x01 42#define BNEP_CONTROL 0x01
43#define BNEP_COMPRESSED 0x02 43#define BNEP_COMPRESSED 0x02
44#define BNEP_COMPRESSED_SRC_ONLY 0x03 44#define BNEP_COMPRESSED_SRC_ONLY 0x03
45#define BNEP_COMPRESSED_DST_ONLY 0x04 45#define BNEP_COMPRESSED_DST_ONLY 0x04
46 46
47// Control types 47/* Control types */
48#define BNEP_CMD_NOT_UNDERSTOOD 0x00 48#define BNEP_CMD_NOT_UNDERSTOOD 0x00
49#define BNEP_SETUP_CONN_REQ 0x01 49#define BNEP_SETUP_CONN_REQ 0x01
50#define BNEP_SETUP_CONN_RSP 0x02 50#define BNEP_SETUP_CONN_RSP 0x02
51#define BNEP_FILTER_NET_TYPE_SET 0x03 51#define BNEP_FILTER_NET_TYPE_SET 0x03
52#define BNEP_FILTER_NET_TYPE_RSP 0x04 52#define BNEP_FILTER_NET_TYPE_RSP 0x04
53#define BNEP_FILTER_MULTI_ADDR_SET 0x05 53#define BNEP_FILTER_MULTI_ADDR_SET 0x05
54#define BNEP_FILTER_MULTI_ADDR_RSP 0x06 54#define BNEP_FILTER_MULTI_ADDR_RSP 0x06
55 55
56// Extension types 56/* Extension types */
57#define BNEP_EXT_CONTROL 0x00 57#define BNEP_EXT_CONTROL 0x00
58 58
59// Response messages 59/* Response messages */
60#define BNEP_SUCCESS 0x00 60#define BNEP_SUCCESS 0x00
61 61
62#define BNEP_CONN_INVALID_DST 0x01 62#define BNEP_CONN_INVALID_DST 0x01
63#define BNEP_CONN_INVALID_SRC 0x02 63#define BNEP_CONN_INVALID_SRC 0x02
64#define BNEP_CONN_INVALID_SVC 0x03 64#define BNEP_CONN_INVALID_SVC 0x03
65#define BNEP_CONN_NOT_ALLOWED 0x04 65#define BNEP_CONN_NOT_ALLOWED 0x04
66 66
67#define BNEP_FILTER_UNSUPPORTED_REQ 0x01 67#define BNEP_FILTER_UNSUPPORTED_REQ 0x01
68#define BNEP_FILTER_INVALID_RANGE 0x02 68#define BNEP_FILTER_INVALID_RANGE 0x02
69#define BNEP_FILTER_INVALID_MCADDR 0x02 69#define BNEP_FILTER_INVALID_MCADDR 0x02
70#define BNEP_FILTER_LIMIT_REACHED 0x03 70#define BNEP_FILTER_LIMIT_REACHED 0x03
71#define BNEP_FILTER_DENIED_SECURITY 0x04 71#define BNEP_FILTER_DENIED_SECURITY 0x04
72 72
73// L2CAP settings 73/* L2CAP settings */
74#define BNEP_MTU 1691 74#define BNEP_MTU 1691
75#define BNEP_PSM 0x0f 75#define BNEP_PSM 0x0f
76#define BNEP_FLUSH_TO 0xffff 76#define BNEP_FLUSH_TO 0xffff
77#define BNEP_CONNECT_TO 15 77#define BNEP_CONNECT_TO 15
78#define BNEP_FILTER_TO 15 78#define BNEP_FILTER_TO 15
79 79
80// Headers 80/* Headers */
81#define BNEP_TYPE_MASK 0x7f 81#define BNEP_TYPE_MASK 0x7f
82#define BNEP_EXT_HEADER 0x80 82#define BNEP_EXT_HEADER 0x80
83 83
84struct bnep_setup_conn_req { 84struct bnep_setup_conn_req {
85 __u8 type; 85 __u8 type;
86 __u8 ctrl; 86 __u8 ctrl;
87 __u8 uuid_size; 87 __u8 uuid_size;
88 __u8 service[0]; 88 __u8 service[0];
89} __packed; 89} __packed;
90 90
91struct bnep_set_filter_req { 91struct bnep_set_filter_req {
92 __u8 type; 92 __u8 type;
93 __u8 ctrl; 93 __u8 ctrl;
94 __be16 len; 94 __be16 len;
95 __u8 list[0]; 95 __u8 list[0];
96} __packed; 96} __packed;
97 97
98struct bnep_control_rsp { 98struct bnep_control_rsp {
99 __u8 type; 99 __u8 type;
100 __u8 ctrl; 100 __u8 ctrl;
101 __be16 resp; 101 __be16 resp;
102} __packed; 102} __packed;
103 103
104struct bnep_ext_hdr { 104struct bnep_ext_hdr {
105 __u8 type; 105 __u8 type;
106 __u8 len; 106 __u8 len;
107 __u8 data[0]; 107 __u8 data[0];
108} __packed; 108} __packed;
109 109
110/* BNEP ioctl defines */ 110/* BNEP ioctl defines */
@@ -114,10 +114,10 @@ struct bnep_ext_hdr {
114#define BNEPGETCONNINFO _IOR('B', 211, int) 114#define BNEPGETCONNINFO _IOR('B', 211, int)
115 115
116struct bnep_connadd_req { 116struct bnep_connadd_req {
117 int sock; // Connected socket 117 int sock; /* Connected socket */
118 __u32 flags; 118 __u32 flags;
119 __u16 role; 119 __u16 role;
120 char device[16]; // Name of the Ethernet device 120 char device[16]; /* Name of the Ethernet device */
121}; 121};
122 122
123struct bnep_conndel_req { 123struct bnep_conndel_req {
@@ -148,14 +148,14 @@ int bnep_del_connection(struct bnep_conndel_req *req);
148int bnep_get_connlist(struct bnep_connlist_req *req); 148int bnep_get_connlist(struct bnep_connlist_req *req);
149int bnep_get_conninfo(struct bnep_conninfo *ci); 149int bnep_get_conninfo(struct bnep_conninfo *ci);
150 150
151// BNEP sessions 151/* BNEP sessions */
152struct bnep_session { 152struct bnep_session {
153 struct list_head list; 153 struct list_head list;
154 154
155 unsigned int role; 155 unsigned int role;
156 unsigned long state; 156 unsigned long state;
157 unsigned long flags; 157 unsigned long flags;
158 atomic_t killed; 158 struct task_struct *task;
159 159
160 struct ethhdr eh; 160 struct ethhdr eh;
161 struct msghdr msg; 161 struct msghdr msg;
@@ -173,7 +173,7 @@ void bnep_sock_cleanup(void);
173 173
174static inline int bnep_mc_hash(__u8 *addr) 174static inline int bnep_mc_hash(__u8 *addr)
175{ 175{
176 return (crc32_be(~0, addr, ETH_ALEN) >> 26); 176 return crc32_be(~0, addr, ETH_ALEN) >> 26;
177} 177}
178 178
179#endif 179#endif
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index 03d4d1245d58..ca39fcf010ce 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -36,6 +36,7 @@
36#include <linux/errno.h> 36#include <linux/errno.h>
37#include <linux/net.h> 37#include <linux/net.h>
38#include <linux/slab.h> 38#include <linux/slab.h>
39#include <linux/kthread.h>
39#include <net/sock.h> 40#include <net/sock.h>
40 41
41#include <linux/socket.h> 42#include <linux/socket.h>
@@ -131,7 +132,8 @@ static int bnep_ctrl_set_netfilter(struct bnep_session *s, __be16 *data, int len
131 return -EILSEQ; 132 return -EILSEQ;
132 133
133 n = get_unaligned_be16(data); 134 n = get_unaligned_be16(data);
134 data++; len -= 2; 135 data++;
136 len -= 2;
135 137
136 if (len < n) 138 if (len < n)
137 return -EILSEQ; 139 return -EILSEQ;
@@ -176,7 +178,8 @@ static int bnep_ctrl_set_mcfilter(struct bnep_session *s, u8 *data, int len)
176 return -EILSEQ; 178 return -EILSEQ;
177 179
178 n = get_unaligned_be16(data); 180 n = get_unaligned_be16(data);
179 data += 2; len -= 2; 181 data += 2;
182 len -= 2;
180 183
181 if (len < n) 184 if (len < n)
182 return -EILSEQ; 185 return -EILSEQ;
@@ -187,6 +190,8 @@ static int bnep_ctrl_set_mcfilter(struct bnep_session *s, u8 *data, int len)
187 n /= (ETH_ALEN * 2); 190 n /= (ETH_ALEN * 2);
188 191
189 if (n > 0) { 192 if (n > 0) {
193 int i;
194
190 s->mc_filter = 0; 195 s->mc_filter = 0;
191 196
192 /* Always send broadcast */ 197 /* Always send broadcast */
@@ -196,18 +201,22 @@ static int bnep_ctrl_set_mcfilter(struct bnep_session *s, u8 *data, int len)
196 for (; n > 0; n--) { 201 for (; n > 0; n--) {
197 u8 a1[6], *a2; 202 u8 a1[6], *a2;
198 203
199 memcpy(a1, data, ETH_ALEN); data += ETH_ALEN; 204 memcpy(a1, data, ETH_ALEN);
200 a2 = data; data += ETH_ALEN; 205 data += ETH_ALEN;
206 a2 = data;
207 data += ETH_ALEN;
201 208
202 BT_DBG("mc filter %s -> %s", 209 BT_DBG("mc filter %s -> %s",
203 batostr((void *) a1), batostr((void *) a2)); 210 batostr((void *) a1), batostr((void *) a2));
204 211
205 #define INCA(a) { int i = 5; while (i >=0 && ++a[i--] == 0); }
206
207 /* Iterate from a1 to a2 */ 212 /* Iterate from a1 to a2 */
208 set_bit(bnep_mc_hash(a1), (ulong *) &s->mc_filter); 213 set_bit(bnep_mc_hash(a1), (ulong *) &s->mc_filter);
209 while (memcmp(a1, a2, 6) < 0 && s->mc_filter != ~0LL) { 214 while (memcmp(a1, a2, 6) < 0 && s->mc_filter != ~0LL) {
210 INCA(a1); 215 /* Increment a1 */
216 i = 5;
217 while (i >= 0 && ++a1[i--] == 0)
218 ;
219
211 set_bit(bnep_mc_hash(a1), (ulong *) &s->mc_filter); 220 set_bit(bnep_mc_hash(a1), (ulong *) &s->mc_filter);
212 } 221 }
213 } 222 }
@@ -227,7 +236,8 @@ static int bnep_rx_control(struct bnep_session *s, void *data, int len)
227 u8 cmd = *(u8 *)data; 236 u8 cmd = *(u8 *)data;
228 int err = 0; 237 int err = 0;
229 238
230 data++; len--; 239 data++;
240 len--;
231 241
232 switch (cmd) { 242 switch (cmd) {
233 case BNEP_CMD_NOT_UNDERSTOOD: 243 case BNEP_CMD_NOT_UNDERSTOOD:
@@ -302,7 +312,6 @@ static u8 __bnep_rx_hlen[] = {
302 ETH_ALEN + 2, /* BNEP_COMPRESSED_SRC_ONLY */ 312 ETH_ALEN + 2, /* BNEP_COMPRESSED_SRC_ONLY */
303 ETH_ALEN + 2 /* BNEP_COMPRESSED_DST_ONLY */ 313 ETH_ALEN + 2 /* BNEP_COMPRESSED_DST_ONLY */
304}; 314};
305#define BNEP_RX_TYPES (sizeof(__bnep_rx_hlen) - 1)
306 315
307static inline int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb) 316static inline int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb)
308{ 317{
@@ -312,9 +321,10 @@ static inline int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb)
312 321
313 dev->stats.rx_bytes += skb->len; 322 dev->stats.rx_bytes += skb->len;
314 323
315 type = *(u8 *) skb->data; skb_pull(skb, 1); 324 type = *(u8 *) skb->data;
325 skb_pull(skb, 1);
316 326
317 if ((type & BNEP_TYPE_MASK) > BNEP_RX_TYPES) 327 if ((type & BNEP_TYPE_MASK) >= sizeof(__bnep_rx_hlen))
318 goto badframe; 328 goto badframe;
319 329
320 if ((type & BNEP_TYPE_MASK) == BNEP_CONTROL) { 330 if ((type & BNEP_TYPE_MASK) == BNEP_CONTROL) {
@@ -367,14 +377,14 @@ static inline int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb)
367 377
368 case BNEP_COMPRESSED_DST_ONLY: 378 case BNEP_COMPRESSED_DST_ONLY:
369 memcpy(__skb_put(nskb, ETH_ALEN), skb_mac_header(skb), 379 memcpy(__skb_put(nskb, ETH_ALEN), skb_mac_header(skb),
370 ETH_ALEN); 380 ETH_ALEN);
371 memcpy(__skb_put(nskb, ETH_ALEN + 2), s->eh.h_source, 381 memcpy(__skb_put(nskb, ETH_ALEN + 2), s->eh.h_source,
372 ETH_ALEN + 2); 382 ETH_ALEN + 2);
373 break; 383 break;
374 384
375 case BNEP_GENERAL: 385 case BNEP_GENERAL:
376 memcpy(__skb_put(nskb, ETH_ALEN * 2), skb_mac_header(skb), 386 memcpy(__skb_put(nskb, ETH_ALEN * 2), skb_mac_header(skb),
377 ETH_ALEN * 2); 387 ETH_ALEN * 2);
378 put_unaligned(s->eh.h_proto, (__be16 *) __skb_put(nskb, 2)); 388 put_unaligned(s->eh.h_proto, (__be16 *) __skb_put(nskb, 2));
379 break; 389 break;
380 } 390 }
@@ -470,15 +480,14 @@ static int bnep_session(void *arg)
470 480
471 BT_DBG(""); 481 BT_DBG("");
472 482
473 daemonize("kbnepd %s", dev->name);
474 set_user_nice(current, -15); 483 set_user_nice(current, -15);
475 484
476 init_waitqueue_entry(&wait, current); 485 init_waitqueue_entry(&wait, current);
477 add_wait_queue(sk_sleep(sk), &wait); 486 add_wait_queue(sk_sleep(sk), &wait);
478 while (!atomic_read(&s->killed)) { 487 while (!kthread_should_stop()) {
479 set_current_state(TASK_INTERRUPTIBLE); 488 set_current_state(TASK_INTERRUPTIBLE);
480 489
481 // RX 490 /* RX */
482 while ((skb = skb_dequeue(&sk->sk_receive_queue))) { 491 while ((skb = skb_dequeue(&sk->sk_receive_queue))) {
483 skb_orphan(skb); 492 skb_orphan(skb);
484 bnep_rx_frame(s, skb); 493 bnep_rx_frame(s, skb);
@@ -487,7 +496,7 @@ static int bnep_session(void *arg)
487 if (sk->sk_state != BT_CONNECTED) 496 if (sk->sk_state != BT_CONNECTED)
488 break; 497 break;
489 498
490 // TX 499 /* TX */
491 while ((skb = skb_dequeue(&sk->sk_write_queue))) 500 while ((skb = skb_dequeue(&sk->sk_write_queue)))
492 if (bnep_tx_frame(s, skb)) 501 if (bnep_tx_frame(s, skb))
493 break; 502 break;
@@ -555,8 +564,8 @@ int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock)
555 564
556 /* session struct allocated as private part of net_device */ 565 /* session struct allocated as private part of net_device */
557 dev = alloc_netdev(sizeof(struct bnep_session), 566 dev = alloc_netdev(sizeof(struct bnep_session),
558 (*req->device) ? req->device : "bnep%d", 567 (*req->device) ? req->device : "bnep%d",
559 bnep_net_setup); 568 bnep_net_setup);
560 if (!dev) 569 if (!dev)
561 return -ENOMEM; 570 return -ENOMEM;
562 571
@@ -571,7 +580,7 @@ int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock)
571 s = netdev_priv(dev); 580 s = netdev_priv(dev);
572 581
573 /* This is rx header therefore addresses are swapped. 582 /* This is rx header therefore addresses are swapped.
574 * ie eh.h_dest is our local address. */ 583 * ie. eh.h_dest is our local address. */
575 memcpy(s->eh.h_dest, &src, ETH_ALEN); 584 memcpy(s->eh.h_dest, &src, ETH_ALEN);
576 memcpy(s->eh.h_source, &dst, ETH_ALEN); 585 memcpy(s->eh.h_source, &dst, ETH_ALEN);
577 memcpy(dev->dev_addr, s->eh.h_dest, ETH_ALEN); 586 memcpy(dev->dev_addr, s->eh.h_dest, ETH_ALEN);
@@ -597,17 +606,17 @@ int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock)
597 SET_NETDEV_DEVTYPE(dev, &bnep_type); 606 SET_NETDEV_DEVTYPE(dev, &bnep_type);
598 607
599 err = register_netdev(dev); 608 err = register_netdev(dev);
600 if (err) { 609 if (err)
601 goto failed; 610 goto failed;
602 }
603 611
604 __bnep_link_session(s); 612 __bnep_link_session(s);
605 613
606 err = kernel_thread(bnep_session, s, CLONE_KERNEL); 614 s->task = kthread_run(bnep_session, s, "kbnepd %s", dev->name);
607 if (err < 0) { 615 if (IS_ERR(s->task)) {
608 /* Session thread start failed, gotta cleanup. */ 616 /* Session thread start failed, gotta cleanup. */
609 unregister_netdev(dev); 617 unregister_netdev(dev);
610 __bnep_unlink_session(s); 618 __bnep_unlink_session(s);
619 err = PTR_ERR(s->task);
611 goto failed; 620 goto failed;
612 } 621 }
613 622
@@ -631,15 +640,9 @@ int bnep_del_connection(struct bnep_conndel_req *req)
631 down_read(&bnep_session_sem); 640 down_read(&bnep_session_sem);
632 641
633 s = __bnep_get_session(req->dst); 642 s = __bnep_get_session(req->dst);
634 if (s) { 643 if (s)
635 /* Wakeup user-space which is polling for socket errors. 644 kthread_stop(s->task);
636 * This is temporary hack until we have shutdown in L2CAP */ 645 else
637 s->sock->sk->sk_err = EUNATCH;
638
639 /* Kill session thread */
640 atomic_inc(&s->killed);
641 wake_up_interruptible(sk_sleep(s->sock->sk));
642 } else
643 err = -ENOENT; 646 err = -ENOENT;
644 647
645 up_read(&bnep_session_sem); 648 up_read(&bnep_session_sem);
diff --git a/net/bluetooth/bnep/sock.c b/net/bluetooth/bnep/sock.c
index d935da71ab3b..17800b1d28ea 100644
--- a/net/bluetooth/bnep/sock.c
+++ b/net/bluetooth/bnep/sock.c
@@ -39,10 +39,10 @@
39#include <linux/init.h> 39#include <linux/init.h>
40#include <linux/compat.h> 40#include <linux/compat.h>
41#include <linux/gfp.h> 41#include <linux/gfp.h>
42#include <linux/uaccess.h>
42#include <net/sock.h> 43#include <net/sock.h>
43 44
44#include <asm/system.h> 45#include <asm/system.h>
45#include <asm/uaccess.h>
46 46
47#include "bnep.h" 47#include "bnep.h"
48 48
diff --git a/net/bluetooth/cmtp/capi.c b/net/bluetooth/cmtp/capi.c
index 67cff810c77d..744233cba244 100644
--- a/net/bluetooth/cmtp/capi.c
+++ b/net/bluetooth/cmtp/capi.c
@@ -35,6 +35,7 @@
35#include <linux/ioctl.h> 35#include <linux/ioctl.h>
36#include <linux/file.h> 36#include <linux/file.h>
37#include <linux/wait.h> 37#include <linux/wait.h>
38#include <linux/kthread.h>
38#include <net/sock.h> 39#include <net/sock.h>
39 40
40#include <linux/isdn/capilli.h> 41#include <linux/isdn/capilli.h>
@@ -143,7 +144,7 @@ static void cmtp_send_capimsg(struct cmtp_session *session, struct sk_buff *skb)
143 144
144 skb_queue_tail(&session->transmit, skb); 145 skb_queue_tail(&session->transmit, skb);
145 146
146 cmtp_schedule(session); 147 wake_up_interruptible(sk_sleep(session->sock->sk));
147} 148}
148 149
149static void cmtp_send_interopmsg(struct cmtp_session *session, 150static void cmtp_send_interopmsg(struct cmtp_session *session,
@@ -386,8 +387,7 @@ static void cmtp_reset_ctr(struct capi_ctr *ctrl)
386 387
387 capi_ctr_down(ctrl); 388 capi_ctr_down(ctrl);
388 389
389 atomic_inc(&session->terminate); 390 kthread_stop(session->task);
390 cmtp_schedule(session);
391} 391}
392 392
393static void cmtp_register_appl(struct capi_ctr *ctrl, __u16 appl, capi_register_params *rp) 393static void cmtp_register_appl(struct capi_ctr *ctrl, __u16 appl, capi_register_params *rp)
diff --git a/net/bluetooth/cmtp/cmtp.h b/net/bluetooth/cmtp/cmtp.h
index 785e79e953c5..db43b54ac9af 100644
--- a/net/bluetooth/cmtp/cmtp.h
+++ b/net/bluetooth/cmtp/cmtp.h
@@ -37,7 +37,7 @@
37#define CMTP_LOOPBACK 0 37#define CMTP_LOOPBACK 0
38 38
39struct cmtp_connadd_req { 39struct cmtp_connadd_req {
40 int sock; // Connected socket 40 int sock; /* Connected socket */
41 __u32 flags; 41 __u32 flags;
42}; 42};
43 43
@@ -81,7 +81,7 @@ struct cmtp_session {
81 81
82 char name[BTNAMSIZ]; 82 char name[BTNAMSIZ];
83 83
84 atomic_t terminate; 84 struct task_struct *task;
85 85
86 wait_queue_head_t wait; 86 wait_queue_head_t wait;
87 87
@@ -121,13 +121,6 @@ void cmtp_detach_device(struct cmtp_session *session);
121 121
122void cmtp_recv_capimsg(struct cmtp_session *session, struct sk_buff *skb); 122void cmtp_recv_capimsg(struct cmtp_session *session, struct sk_buff *skb);
123 123
124static inline void cmtp_schedule(struct cmtp_session *session)
125{
126 struct sock *sk = session->sock->sk;
127
128 wake_up_interruptible(sk_sleep(sk));
129}
130
131/* CMTP init defines */ 124/* CMTP init defines */
132int cmtp_init_sockets(void); 125int cmtp_init_sockets(void);
133void cmtp_cleanup_sockets(void); 126void cmtp_cleanup_sockets(void);
diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c
index 964ea9126f9f..c5b11af908be 100644
--- a/net/bluetooth/cmtp/core.c
+++ b/net/bluetooth/cmtp/core.c
@@ -35,6 +35,7 @@
35#include <linux/ioctl.h> 35#include <linux/ioctl.h>
36#include <linux/file.h> 36#include <linux/file.h>
37#include <linux/init.h> 37#include <linux/init.h>
38#include <linux/kthread.h>
38#include <net/sock.h> 39#include <net/sock.h>
39 40
40#include <linux/isdn/capilli.h> 41#include <linux/isdn/capilli.h>
@@ -235,9 +236,12 @@ static void cmtp_process_transmit(struct cmtp_session *session)
235 236
236 size = min_t(uint, ((tail < 258) ? (tail - 2) : (tail - 3)), skb->len); 237 size = min_t(uint, ((tail < 258) ? (tail - 2) : (tail - 3)), skb->len);
237 238
238 if ((scb->id < 0) && ((scb->id = cmtp_alloc_block_id(session)) < 0)) { 239 if (scb->id < 0) {
239 skb_queue_head(&session->transmit, skb); 240 scb->id = cmtp_alloc_block_id(session);
240 break; 241 if (scb->id < 0) {
242 skb_queue_head(&session->transmit, skb);
243 break;
244 }
241 } 245 }
242 246
243 if (size < 256) { 247 if (size < 256) {
@@ -284,12 +288,11 @@ static int cmtp_session(void *arg)
284 288
285 BT_DBG("session %p", session); 289 BT_DBG("session %p", session);
286 290
287 daemonize("kcmtpd_ctr_%d", session->num);
288 set_user_nice(current, -15); 291 set_user_nice(current, -15);
289 292
290 init_waitqueue_entry(&wait, current); 293 init_waitqueue_entry(&wait, current);
291 add_wait_queue(sk_sleep(sk), &wait); 294 add_wait_queue(sk_sleep(sk), &wait);
292 while (!atomic_read(&session->terminate)) { 295 while (!kthread_should_stop()) {
293 set_current_state(TASK_INTERRUPTIBLE); 296 set_current_state(TASK_INTERRUPTIBLE);
294 297
295 if (sk->sk_state != BT_CONNECTED) 298 if (sk->sk_state != BT_CONNECTED)
@@ -343,7 +346,8 @@ int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock)
343 346
344 bacpy(&session->bdaddr, &bt_sk(sock->sk)->dst); 347 bacpy(&session->bdaddr, &bt_sk(sock->sk)->dst);
345 348
346 session->mtu = min_t(uint, l2cap_pi(sock->sk)->omtu, l2cap_pi(sock->sk)->imtu); 349 session->mtu = min_t(uint, l2cap_pi(sock->sk)->chan->omtu,
350 l2cap_pi(sock->sk)->chan->imtu);
347 351
348 BT_DBG("mtu %d", session->mtu); 352 BT_DBG("mtu %d", session->mtu);
349 353
@@ -367,9 +371,12 @@ int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock)
367 371
368 __cmtp_link_session(session); 372 __cmtp_link_session(session);
369 373
370 err = kernel_thread(cmtp_session, session, CLONE_KERNEL); 374 session->task = kthread_run(cmtp_session, session, "kcmtpd_ctr_%d",
371 if (err < 0) 375 session->num);
376 if (IS_ERR(session->task)) {
377 err = PTR_ERR(session->task);
372 goto unlink; 378 goto unlink;
379 }
373 380
374 if (!(session->flags & (1 << CMTP_LOOPBACK))) { 381 if (!(session->flags & (1 << CMTP_LOOPBACK))) {
375 err = cmtp_attach_device(session); 382 err = cmtp_attach_device(session);
@@ -406,9 +413,8 @@ int cmtp_del_connection(struct cmtp_conndel_req *req)
406 /* Flush the transmit queue */ 413 /* Flush the transmit queue */
407 skb_queue_purge(&session->transmit); 414 skb_queue_purge(&session->transmit);
408 415
409 /* Kill session thread */ 416 /* Stop session thread */
410 atomic_inc(&session->terminate); 417 kthread_stop(session->task);
411 cmtp_schedule(session);
412 } else 418 } else
413 err = -ENOENT; 419 err = -ENOENT;
414 420
diff --git a/net/bluetooth/cmtp/sock.c b/net/bluetooth/cmtp/sock.c
index 7ea1979a8e4f..3f2dd5c25ae5 100644
--- a/net/bluetooth/cmtp/sock.c
+++ b/net/bluetooth/cmtp/sock.c
@@ -34,12 +34,12 @@
34#include <linux/file.h> 34#include <linux/file.h>
35#include <linux/compat.h> 35#include <linux/compat.h>
36#include <linux/gfp.h> 36#include <linux/gfp.h>
37#include <linux/uaccess.h>
37#include <net/sock.h> 38#include <net/sock.h>
38 39
39#include <linux/isdn/capilli.h> 40#include <linux/isdn/capilli.h>
40 41
41#include <asm/system.h> 42#include <asm/system.h>
42#include <asm/uaccess.h>
43 43
44#include "cmtp.h" 44#include "cmtp.h"
45 45
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 7a6f56b2f49d..3163330cd4f1 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -269,6 +269,19 @@ static void hci_conn_idle(unsigned long arg)
269 hci_conn_enter_sniff_mode(conn); 269 hci_conn_enter_sniff_mode(conn);
270} 270}
271 271
272static void hci_conn_auto_accept(unsigned long arg)
273{
274 struct hci_conn *conn = (void *) arg;
275 struct hci_dev *hdev = conn->hdev;
276
277 hci_dev_lock(hdev);
278
279 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
280 &conn->dst);
281
282 hci_dev_unlock(hdev);
283}
284
272struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst) 285struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
273{ 286{
274 struct hci_conn *conn; 287 struct hci_conn *conn;
@@ -287,6 +300,7 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
287 conn->auth_type = HCI_AT_GENERAL_BONDING; 300 conn->auth_type = HCI_AT_GENERAL_BONDING;
288 conn->io_capability = hdev->io_capability; 301 conn->io_capability = hdev->io_capability;
289 conn->remote_auth = 0xff; 302 conn->remote_auth = 0xff;
303 conn->key_type = 0xff;
290 304
291 conn->power_save = 1; 305 conn->power_save = 1;
292 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 306 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
@@ -311,6 +325,8 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
311 325
312 setup_timer(&conn->disc_timer, hci_conn_timeout, (unsigned long)conn); 326 setup_timer(&conn->disc_timer, hci_conn_timeout, (unsigned long)conn);
313 setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn); 327 setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn);
328 setup_timer(&conn->auto_accept_timer, hci_conn_auto_accept,
329 (unsigned long) conn);
314 330
315 atomic_set(&conn->refcnt, 0); 331 atomic_set(&conn->refcnt, 0);
316 332
@@ -341,6 +357,8 @@ int hci_conn_del(struct hci_conn *conn)
341 357
342 del_timer(&conn->disc_timer); 358 del_timer(&conn->disc_timer);
343 359
360 del_timer(&conn->auto_accept_timer);
361
344 if (conn->type == ACL_LINK) { 362 if (conn->type == ACL_LINK) {
345 struct hci_conn *sco = conn->link; 363 struct hci_conn *sco = conn->link;
346 if (sco) 364 if (sco)
@@ -535,36 +553,93 @@ static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
535 return 0; 553 return 0;
536} 554}
537 555
556/* Encrypt the the link */
557static void hci_conn_encrypt(struct hci_conn *conn)
558{
559 BT_DBG("conn %p", conn);
560
561 if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) {
562 struct hci_cp_set_conn_encrypt cp;
563 cp.handle = cpu_to_le16(conn->handle);
564 cp.encrypt = 0x01;
565 hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
566 &cp);
567 }
568}
569
538/* Enable security */ 570/* Enable security */
539int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type) 571int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
540{ 572{
541 BT_DBG("conn %p", conn); 573 BT_DBG("conn %p", conn);
542 574
575 /* For sdp we don't need the link key. */
543 if (sec_level == BT_SECURITY_SDP) 576 if (sec_level == BT_SECURITY_SDP)
544 return 1; 577 return 1;
545 578
579 /* For non 2.1 devices and low security level we don't need the link
580 key. */
546 if (sec_level == BT_SECURITY_LOW && 581 if (sec_level == BT_SECURITY_LOW &&
547 (!conn->ssp_mode || !conn->hdev->ssp_mode)) 582 (!conn->ssp_mode || !conn->hdev->ssp_mode))
548 return 1; 583 return 1;
549 584
550 if (conn->link_mode & HCI_LM_ENCRYPT) 585 /* For other security levels we need the link key. */
551 return hci_conn_auth(conn, sec_level, auth_type); 586 if (!(conn->link_mode & HCI_LM_AUTH))
552 587 goto auth;
588
589 /* An authenticated combination key has sufficient security for any
590 security level. */
591 if (conn->key_type == HCI_LK_AUTH_COMBINATION)
592 goto encrypt;
593
594 /* An unauthenticated combination key has sufficient security for
595 security level 1 and 2. */
596 if (conn->key_type == HCI_LK_UNAUTH_COMBINATION &&
597 (sec_level == BT_SECURITY_MEDIUM ||
598 sec_level == BT_SECURITY_LOW))
599 goto encrypt;
600
601 /* A combination key has always sufficient security for the security
602 levels 1 or 2. High security level requires the combination key
603 is generated using maximum PIN code length (16).
604 For pre 2.1 units. */
605 if (conn->key_type == HCI_LK_COMBINATION &&
606 (sec_level != BT_SECURITY_HIGH ||
607 conn->pin_length == 16))
608 goto encrypt;
609
610auth:
553 if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) 611 if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
554 return 0; 612 return 0;
555 613
556 if (hci_conn_auth(conn, sec_level, auth_type)) { 614 hci_conn_auth(conn, sec_level, auth_type);
557 struct hci_cp_set_conn_encrypt cp; 615 return 0;
558 cp.handle = cpu_to_le16(conn->handle); 616
559 cp.encrypt = 1; 617encrypt:
560 hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, 618 if (conn->link_mode & HCI_LM_ENCRYPT)
561 sizeof(cp), &cp); 619 return 1;
562 }
563 620
621 hci_conn_encrypt(conn);
564 return 0; 622 return 0;
565} 623}
566EXPORT_SYMBOL(hci_conn_security); 624EXPORT_SYMBOL(hci_conn_security);
567 625
626/* Check secure link requirement */
627int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
628{
629 BT_DBG("conn %p", conn);
630
631 if (sec_level != BT_SECURITY_HIGH)
632 return 1; /* Accept if non-secure is required */
633
634 if (conn->key_type == HCI_LK_AUTH_COMBINATION ||
635 (conn->key_type == HCI_LK_COMBINATION &&
636 conn->pin_length == 16))
637 return 1;
638
639 return 0; /* Reject not secure link */
640}
641EXPORT_SYMBOL(hci_conn_check_secure);
642
568/* Change link key */ 643/* Change link key */
569int hci_conn_change_link_key(struct hci_conn *conn) 644int hci_conn_change_link_key(struct hci_conn *conn)
570{ 645{
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index b5a8afc2be33..815269b07f20 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -56,7 +56,6 @@
56static void hci_cmd_task(unsigned long arg); 56static void hci_cmd_task(unsigned long arg);
57static void hci_rx_task(unsigned long arg); 57static void hci_rx_task(unsigned long arg);
58static void hci_tx_task(unsigned long arg); 58static void hci_tx_task(unsigned long arg);
59static void hci_notify(struct hci_dev *hdev, int event);
60 59
61static DEFINE_RWLOCK(hci_task_lock); 60static DEFINE_RWLOCK(hci_task_lock);
62 61
@@ -1021,18 +1020,54 @@ struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1021 return NULL; 1020 return NULL;
1022} 1021}
1023 1022
1024int hci_add_link_key(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr, 1023static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1025 u8 *val, u8 type, u8 pin_len) 1024 u8 key_type, u8 old_key_type)
1025{
1026 /* Legacy key */
1027 if (key_type < 0x03)
1028 return 1;
1029
1030 /* Debug keys are insecure so don't store them persistently */
1031 if (key_type == HCI_LK_DEBUG_COMBINATION)
1032 return 0;
1033
1034 /* Changed combination key and there's no previous one */
1035 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1036 return 0;
1037
1038 /* Security mode 3 case */
1039 if (!conn)
1040 return 1;
1041
1042 /* Neither local nor remote side had no-bonding as requirement */
1043 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1044 return 1;
1045
1046 /* Local side had dedicated bonding as requirement */
1047 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1048 return 1;
1049
1050 /* Remote side had dedicated bonding as requirement */
1051 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1052 return 1;
1053
1054 /* If none of the above criteria match, then don't store the key
1055 * persistently */
1056 return 0;
1057}
1058
1059int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1060 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1026{ 1061{
1027 struct link_key *key, *old_key; 1062 struct link_key *key, *old_key;
1028 u8 old_key_type; 1063 u8 old_key_type, persistent;
1029 1064
1030 old_key = hci_find_link_key(hdev, bdaddr); 1065 old_key = hci_find_link_key(hdev, bdaddr);
1031 if (old_key) { 1066 if (old_key) {
1032 old_key_type = old_key->type; 1067 old_key_type = old_key->type;
1033 key = old_key; 1068 key = old_key;
1034 } else { 1069 } else {
1035 old_key_type = 0xff; 1070 old_key_type = conn ? conn->key_type : 0xff;
1036 key = kzalloc(sizeof(*key), GFP_ATOMIC); 1071 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1037 if (!key) 1072 if (!key)
1038 return -ENOMEM; 1073 return -ENOMEM;
@@ -1041,16 +1076,37 @@ int hci_add_link_key(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
1041 1076
1042 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type); 1077 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1043 1078
1079 /* Some buggy controller combinations generate a changed
1080 * combination key for legacy pairing even when there's no
1081 * previous key */
1082 if (type == HCI_LK_CHANGED_COMBINATION &&
1083 (!conn || conn->remote_auth == 0xff) &&
1084 old_key_type == 0xff) {
1085 type = HCI_LK_COMBINATION;
1086 if (conn)
1087 conn->key_type = type;
1088 }
1089
1044 bacpy(&key->bdaddr, bdaddr); 1090 bacpy(&key->bdaddr, bdaddr);
1045 memcpy(key->val, val, 16); 1091 memcpy(key->val, val, 16);
1046 key->type = type;
1047 key->pin_len = pin_len; 1092 key->pin_len = pin_len;
1048 1093
1049 if (new_key) 1094 if (type == HCI_LK_CHANGED_COMBINATION)
1050 mgmt_new_key(hdev->id, key, old_key_type);
1051
1052 if (type == 0x06)
1053 key->type = old_key_type; 1095 key->type = old_key_type;
1096 else
1097 key->type = type;
1098
1099 if (!new_key)
1100 return 0;
1101
1102 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1103
1104 mgmt_new_key(hdev->id, key, persistent);
1105
1106 if (!persistent) {
1107 list_del(&key->list);
1108 kfree(key);
1109 }
1054 1110
1055 return 0; 1111 return 0;
1056} 1112}
@@ -1082,6 +1138,70 @@ static void hci_cmd_timer(unsigned long arg)
1082 tasklet_schedule(&hdev->cmd_task); 1138 tasklet_schedule(&hdev->cmd_task);
1083} 1139}
1084 1140
1141struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1142 bdaddr_t *bdaddr)
1143{
1144 struct oob_data *data;
1145
1146 list_for_each_entry(data, &hdev->remote_oob_data, list)
1147 if (bacmp(bdaddr, &data->bdaddr) == 0)
1148 return data;
1149
1150 return NULL;
1151}
1152
1153int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1154{
1155 struct oob_data *data;
1156
1157 data = hci_find_remote_oob_data(hdev, bdaddr);
1158 if (!data)
1159 return -ENOENT;
1160
1161 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1162
1163 list_del(&data->list);
1164 kfree(data);
1165
1166 return 0;
1167}
1168
1169int hci_remote_oob_data_clear(struct hci_dev *hdev)
1170{
1171 struct oob_data *data, *n;
1172
1173 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1174 list_del(&data->list);
1175 kfree(data);
1176 }
1177
1178 return 0;
1179}
1180
1181int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1182 u8 *randomizer)
1183{
1184 struct oob_data *data;
1185
1186 data = hci_find_remote_oob_data(hdev, bdaddr);
1187
1188 if (!data) {
1189 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1190 if (!data)
1191 return -ENOMEM;
1192
1193 bacpy(&data->bdaddr, bdaddr);
1194 list_add(&data->list, &hdev->remote_oob_data);
1195 }
1196
1197 memcpy(data->hash, hash, sizeof(data->hash));
1198 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1199
1200 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1201
1202 return 0;
1203}
1204
1085/* Register HCI device */ 1205/* Register HCI device */
1086int hci_register_dev(struct hci_dev *hdev) 1206int hci_register_dev(struct hci_dev *hdev)
1087{ 1207{
@@ -1146,6 +1266,8 @@ int hci_register_dev(struct hci_dev *hdev)
1146 1266
1147 INIT_LIST_HEAD(&hdev->link_keys); 1267 INIT_LIST_HEAD(&hdev->link_keys);
1148 1268
1269 INIT_LIST_HEAD(&hdev->remote_oob_data);
1270
1149 INIT_WORK(&hdev->power_on, hci_power_on); 1271 INIT_WORK(&hdev->power_on, hci_power_on);
1150 INIT_WORK(&hdev->power_off, hci_power_off); 1272 INIT_WORK(&hdev->power_off, hci_power_off);
1151 setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev); 1273 setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
@@ -1225,6 +1347,7 @@ int hci_unregister_dev(struct hci_dev *hdev)
1225 hci_blacklist_clear(hdev); 1347 hci_blacklist_clear(hdev);
1226 hci_uuids_clear(hdev); 1348 hci_uuids_clear(hdev);
1227 hci_link_keys_clear(hdev); 1349 hci_link_keys_clear(hdev);
1350 hci_remote_oob_data_clear(hdev);
1228 hci_dev_unlock_bh(hdev); 1351 hci_dev_unlock_bh(hdev);
1229 1352
1230 __hci_dev_put(hdev); 1353 __hci_dev_put(hdev);
@@ -1274,7 +1397,7 @@ int hci_recv_frame(struct sk_buff *skb)
1274EXPORT_SYMBOL(hci_recv_frame); 1397EXPORT_SYMBOL(hci_recv_frame);
1275 1398
1276static int hci_reassembly(struct hci_dev *hdev, int type, void *data, 1399static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1277 int count, __u8 index, gfp_t gfp_mask) 1400 int count, __u8 index)
1278{ 1401{
1279 int len = 0; 1402 int len = 0;
1280 int hlen = 0; 1403 int hlen = 0;
@@ -1304,7 +1427,7 @@ static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1304 break; 1427 break;
1305 } 1428 }
1306 1429
1307 skb = bt_skb_alloc(len, gfp_mask); 1430 skb = bt_skb_alloc(len, GFP_ATOMIC);
1308 if (!skb) 1431 if (!skb)
1309 return -ENOMEM; 1432 return -ENOMEM;
1310 1433
@@ -1390,8 +1513,7 @@ int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1390 return -EILSEQ; 1513 return -EILSEQ;
1391 1514
1392 while (count) { 1515 while (count) {
1393 rem = hci_reassembly(hdev, type, data, count, 1516 rem = hci_reassembly(hdev, type, data, count, type - 1);
1394 type - 1, GFP_ATOMIC);
1395 if (rem < 0) 1517 if (rem < 0)
1396 return rem; 1518 return rem;
1397 1519
@@ -1425,8 +1547,8 @@ int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1425 } else 1547 } else
1426 type = bt_cb(skb)->pkt_type; 1548 type = bt_cb(skb)->pkt_type;
1427 1549
1428 rem = hci_reassembly(hdev, type, data, 1550 rem = hci_reassembly(hdev, type, data, count,
1429 count, STREAM_REASSEMBLY, GFP_ATOMIC); 1551 STREAM_REASSEMBLY);
1430 if (rem < 0) 1552 if (rem < 0)
1431 return rem; 1553 return rem;
1432 1554
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index b2570159a044..f13ddbf858ba 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -56,7 +56,9 @@ static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
56 if (status) 56 if (status)
57 return; 57 return;
58 58
59 clear_bit(HCI_INQUIRY, &hdev->flags); 59 if (test_bit(HCI_MGMT, &hdev->flags) &&
60 test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
61 mgmt_discovering(hdev->id, 0);
60 62
61 hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status); 63 hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status);
62 64
@@ -72,7 +74,9 @@ static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
72 if (status) 74 if (status)
73 return; 75 return;
74 76
75 clear_bit(HCI_INQUIRY, &hdev->flags); 77 if (test_bit(HCI_MGMT, &hdev->flags) &&
78 test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
79 mgmt_discovering(hdev->id, 0);
76 80
77 hci_conn_check_pending(hdev); 81 hci_conn_check_pending(hdev);
78} 82}
@@ -195,14 +199,17 @@ static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
195 199
196 BT_DBG("%s status 0x%x", hdev->name, status); 200 BT_DBG("%s status 0x%x", hdev->name, status);
197 201
198 if (status)
199 return;
200
201 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME); 202 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
202 if (!sent) 203 if (!sent)
203 return; 204 return;
204 205
205 memcpy(hdev->dev_name, sent, 248); 206 if (test_bit(HCI_MGMT, &hdev->flags))
207 mgmt_set_local_name_complete(hdev->id, sent, status);
208
209 if (status)
210 return;
211
212 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
206} 213}
207 214
208static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb) 215static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
@@ -214,7 +221,7 @@ static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
214 if (rp->status) 221 if (rp->status)
215 return; 222 return;
216 223
217 memcpy(hdev->dev_name, rp->name, 248); 224 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
218} 225}
219 226
220static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb) 227static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
@@ -821,16 +828,31 @@ static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
821 rp->status); 828 rp->status);
822} 829}
823 830
831static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
832 struct sk_buff *skb)
833{
834 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
835
836 BT_DBG("%s status 0x%x", hdev->name, rp->status);
837
838 mgmt_read_local_oob_data_reply_complete(hdev->id, rp->hash,
839 rp->randomizer, rp->status);
840}
841
824static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status) 842static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
825{ 843{
826 BT_DBG("%s status 0x%x", hdev->name, status); 844 BT_DBG("%s status 0x%x", hdev->name, status);
827 845
828 if (status) { 846 if (status) {
829 hci_req_complete(hdev, HCI_OP_INQUIRY, status); 847 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
830
831 hci_conn_check_pending(hdev); 848 hci_conn_check_pending(hdev);
832 } else 849 return;
833 set_bit(HCI_INQUIRY, &hdev->flags); 850 }
851
852 if (test_bit(HCI_MGMT, &hdev->flags) &&
853 !test_and_set_bit(HCI_INQUIRY,
854 &hdev->flags))
855 mgmt_discovering(hdev->id, 1);
834} 856}
835 857
836static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status) 858static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
@@ -999,12 +1021,19 @@ static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
999 hci_dev_lock(hdev); 1021 hci_dev_lock(hdev);
1000 1022
1001 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 1023 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1002 if (conn && hci_outgoing_auth_needed(hdev, conn)) { 1024 if (!conn)
1025 goto unlock;
1026
1027 if (!hci_outgoing_auth_needed(hdev, conn))
1028 goto unlock;
1029
1030 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
1003 struct hci_cp_auth_requested cp; 1031 struct hci_cp_auth_requested cp;
1004 cp.handle = __cpu_to_le16(conn->handle); 1032 cp.handle = __cpu_to_le16(conn->handle);
1005 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp); 1033 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1006 } 1034 }
1007 1035
1036unlock:
1008 hci_dev_unlock(hdev); 1037 hci_dev_unlock(hdev);
1009} 1038}
1010 1039
@@ -1194,7 +1223,9 @@ static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff
1194 1223
1195 BT_DBG("%s status %d", hdev->name, status); 1224 BT_DBG("%s status %d", hdev->name, status);
1196 1225
1197 clear_bit(HCI_INQUIRY, &hdev->flags); 1226 if (test_bit(HCI_MGMT, &hdev->flags) &&
1227 test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1228 mgmt_discovering(hdev->id, 0);
1198 1229
1199 hci_req_complete(hdev, HCI_OP_INQUIRY, status); 1230 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1200 1231
@@ -1214,7 +1245,13 @@ static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *
1214 1245
1215 hci_dev_lock(hdev); 1246 hci_dev_lock(hdev);
1216 1247
1217 for (; num_rsp; num_rsp--) { 1248 if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags)) {
1249
1250 if (test_bit(HCI_MGMT, &hdev->flags))
1251 mgmt_discovering(hdev->id, 1);
1252 }
1253
1254 for (; num_rsp; num_rsp--, info++) {
1218 bacpy(&data.bdaddr, &info->bdaddr); 1255 bacpy(&data.bdaddr, &info->bdaddr);
1219 data.pscan_rep_mode = info->pscan_rep_mode; 1256 data.pscan_rep_mode = info->pscan_rep_mode;
1220 data.pscan_period_mode = info->pscan_period_mode; 1257 data.pscan_period_mode = info->pscan_period_mode;
@@ -1223,8 +1260,9 @@ static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *
1223 data.clock_offset = info->clock_offset; 1260 data.clock_offset = info->clock_offset;
1224 data.rssi = 0x00; 1261 data.rssi = 0x00;
1225 data.ssp_mode = 0x00; 1262 data.ssp_mode = 0x00;
1226 info++;
1227 hci_inquiry_cache_update(hdev, &data); 1263 hci_inquiry_cache_update(hdev, &data);
1264 mgmt_device_found(hdev->id, &info->bdaddr, info->dev_class, 0,
1265 NULL);
1228 } 1266 }
1229 1267
1230 hci_dev_unlock(hdev); 1268 hci_dev_unlock(hdev);
@@ -1402,7 +1440,7 @@ static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff
1402 1440
1403 conn->state = BT_CLOSED; 1441 conn->state = BT_CLOSED;
1404 1442
1405 if (conn->type == ACL_LINK) 1443 if (conn->type == ACL_LINK || conn->type == LE_LINK)
1406 mgmt_disconnected(hdev->id, &conn->dst); 1444 mgmt_disconnected(hdev->id, &conn->dst);
1407 1445
1408 hci_proto_disconn_cfm(conn, ev->reason); 1446 hci_proto_disconn_cfm(conn, ev->reason);
@@ -1428,7 +1466,6 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s
1428 conn->sec_level = conn->pending_sec_level; 1466 conn->sec_level = conn->pending_sec_level;
1429 } else { 1467 } else {
1430 mgmt_auth_failed(hdev->id, &conn->dst, ev->status); 1468 mgmt_auth_failed(hdev->id, &conn->dst, ev->status);
1431 conn->sec_level = BT_SECURITY_LOW;
1432 } 1469 }
1433 1470
1434 clear_bit(HCI_CONN_AUTH_PEND, &conn->pend); 1471 clear_bit(HCI_CONN_AUTH_PEND, &conn->pend);
@@ -1482,13 +1519,23 @@ static inline void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb
1482 1519
1483 hci_dev_lock(hdev); 1520 hci_dev_lock(hdev);
1484 1521
1522 if (ev->status == 0 && test_bit(HCI_MGMT, &hdev->flags))
1523 mgmt_remote_name(hdev->id, &ev->bdaddr, ev->name);
1524
1485 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 1525 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
1486 if (conn && hci_outgoing_auth_needed(hdev, conn)) { 1526 if (!conn)
1527 goto unlock;
1528
1529 if (!hci_outgoing_auth_needed(hdev, conn))
1530 goto unlock;
1531
1532 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
1487 struct hci_cp_auth_requested cp; 1533 struct hci_cp_auth_requested cp;
1488 cp.handle = __cpu_to_le16(conn->handle); 1534 cp.handle = __cpu_to_le16(conn->handle);
1489 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp); 1535 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1490 } 1536 }
1491 1537
1538unlock:
1492 hci_dev_unlock(hdev); 1539 hci_dev_unlock(hdev);
1493} 1540}
1494 1541
@@ -1751,6 +1798,10 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk
1751 hci_cc_pin_code_neg_reply(hdev, skb); 1798 hci_cc_pin_code_neg_reply(hdev, skb);
1752 break; 1799 break;
1753 1800
1801 case HCI_OP_READ_LOCAL_OOB_DATA:
1802 hci_cc_read_local_oob_data_reply(hdev, skb);
1803 break;
1804
1754 case HCI_OP_LE_READ_BUFFER_SIZE: 1805 case HCI_OP_LE_READ_BUFFER_SIZE:
1755 hci_cc_le_read_buffer_size(hdev, skb); 1806 hci_cc_le_read_buffer_size(hdev, skb);
1756 break; 1807 break;
@@ -1984,9 +2035,16 @@ static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff
1984 if (!test_bit(HCI_PAIRABLE, &hdev->flags)) 2035 if (!test_bit(HCI_PAIRABLE, &hdev->flags))
1985 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY, 2036 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
1986 sizeof(ev->bdaddr), &ev->bdaddr); 2037 sizeof(ev->bdaddr), &ev->bdaddr);
2038 else if (test_bit(HCI_MGMT, &hdev->flags)) {
2039 u8 secure;
1987 2040
1988 if (test_bit(HCI_MGMT, &hdev->flags)) 2041 if (conn->pending_sec_level == BT_SECURITY_HIGH)
1989 mgmt_pin_code_request(hdev->id, &ev->bdaddr); 2042 secure = 1;
2043 else
2044 secure = 0;
2045
2046 mgmt_pin_code_request(hdev->id, &ev->bdaddr, secure);
2047 }
1990 2048
1991 hci_dev_unlock(hdev); 2049 hci_dev_unlock(hdev);
1992} 2050}
@@ -2015,17 +2073,30 @@ static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff
2015 BT_DBG("%s found key type %u for %s", hdev->name, key->type, 2073 BT_DBG("%s found key type %u for %s", hdev->name, key->type,
2016 batostr(&ev->bdaddr)); 2074 batostr(&ev->bdaddr));
2017 2075
2018 if (!test_bit(HCI_DEBUG_KEYS, &hdev->flags) && key->type == 0x03) { 2076 if (!test_bit(HCI_DEBUG_KEYS, &hdev->flags) &&
2077 key->type == HCI_LK_DEBUG_COMBINATION) {
2019 BT_DBG("%s ignoring debug key", hdev->name); 2078 BT_DBG("%s ignoring debug key", hdev->name);
2020 goto not_found; 2079 goto not_found;
2021 } 2080 }
2022 2081
2023 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 2082 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2083 if (conn) {
2084 if (key->type == HCI_LK_UNAUTH_COMBINATION &&
2085 conn->auth_type != 0xff &&
2086 (conn->auth_type & 0x01)) {
2087 BT_DBG("%s ignoring unauthenticated key", hdev->name);
2088 goto not_found;
2089 }
2024 2090
2025 if (key->type == 0x04 && conn && conn->auth_type != 0xff && 2091 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
2026 (conn->auth_type & 0x01)) { 2092 conn->pending_sec_level == BT_SECURITY_HIGH) {
2027 BT_DBG("%s ignoring unauthenticated key", hdev->name); 2093 BT_DBG("%s ignoring key unauthenticated for high \
2028 goto not_found; 2094 security", hdev->name);
2095 goto not_found;
2096 }
2097
2098 conn->key_type = key->type;
2099 conn->pin_length = key->pin_len;
2029 } 2100 }
2030 2101
2031 bacpy(&cp.bdaddr, &ev->bdaddr); 2102 bacpy(&cp.bdaddr, &ev->bdaddr);
@@ -2057,11 +2128,15 @@ static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff
2057 hci_conn_hold(conn); 2128 hci_conn_hold(conn);
2058 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 2129 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2059 pin_len = conn->pin_length; 2130 pin_len = conn->pin_length;
2131
2132 if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
2133 conn->key_type = ev->key_type;
2134
2060 hci_conn_put(conn); 2135 hci_conn_put(conn);
2061 } 2136 }
2062 2137
2063 if (test_bit(HCI_LINK_KEYS, &hdev->flags)) 2138 if (test_bit(HCI_LINK_KEYS, &hdev->flags))
2064 hci_add_link_key(hdev, 1, &ev->bdaddr, ev->link_key, 2139 hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
2065 ev->key_type, pin_len); 2140 ev->key_type, pin_len);
2066 2141
2067 hci_dev_unlock(hdev); 2142 hci_dev_unlock(hdev);
@@ -2136,11 +2211,17 @@ static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct
2136 2211
2137 hci_dev_lock(hdev); 2212 hci_dev_lock(hdev);
2138 2213
2214 if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags)) {
2215
2216 if (test_bit(HCI_MGMT, &hdev->flags))
2217 mgmt_discovering(hdev->id, 1);
2218 }
2219
2139 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) { 2220 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
2140 struct inquiry_info_with_rssi_and_pscan_mode *info; 2221 struct inquiry_info_with_rssi_and_pscan_mode *info;
2141 info = (void *) (skb->data + 1); 2222 info = (void *) (skb->data + 1);
2142 2223
2143 for (; num_rsp; num_rsp--) { 2224 for (; num_rsp; num_rsp--, info++) {
2144 bacpy(&data.bdaddr, &info->bdaddr); 2225 bacpy(&data.bdaddr, &info->bdaddr);
2145 data.pscan_rep_mode = info->pscan_rep_mode; 2226 data.pscan_rep_mode = info->pscan_rep_mode;
2146 data.pscan_period_mode = info->pscan_period_mode; 2227 data.pscan_period_mode = info->pscan_period_mode;
@@ -2149,13 +2230,15 @@ static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct
2149 data.clock_offset = info->clock_offset; 2230 data.clock_offset = info->clock_offset;
2150 data.rssi = info->rssi; 2231 data.rssi = info->rssi;
2151 data.ssp_mode = 0x00; 2232 data.ssp_mode = 0x00;
2152 info++;
2153 hci_inquiry_cache_update(hdev, &data); 2233 hci_inquiry_cache_update(hdev, &data);
2234 mgmt_device_found(hdev->id, &info->bdaddr,
2235 info->dev_class, info->rssi,
2236 NULL);
2154 } 2237 }
2155 } else { 2238 } else {
2156 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1); 2239 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
2157 2240
2158 for (; num_rsp; num_rsp--) { 2241 for (; num_rsp; num_rsp--, info++) {
2159 bacpy(&data.bdaddr, &info->bdaddr); 2242 bacpy(&data.bdaddr, &info->bdaddr);
2160 data.pscan_rep_mode = info->pscan_rep_mode; 2243 data.pscan_rep_mode = info->pscan_rep_mode;
2161 data.pscan_period_mode = info->pscan_period_mode; 2244 data.pscan_period_mode = info->pscan_period_mode;
@@ -2164,8 +2247,10 @@ static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct
2164 data.clock_offset = info->clock_offset; 2247 data.clock_offset = info->clock_offset;
2165 data.rssi = info->rssi; 2248 data.rssi = info->rssi;
2166 data.ssp_mode = 0x00; 2249 data.ssp_mode = 0x00;
2167 info++;
2168 hci_inquiry_cache_update(hdev, &data); 2250 hci_inquiry_cache_update(hdev, &data);
2251 mgmt_device_found(hdev->id, &info->bdaddr,
2252 info->dev_class, info->rssi,
2253 NULL);
2169 } 2254 }
2170 } 2255 }
2171 2256
@@ -2294,9 +2379,15 @@ static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct
2294 if (!num_rsp) 2379 if (!num_rsp)
2295 return; 2380 return;
2296 2381
2382 if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags)) {
2383
2384 if (test_bit(HCI_MGMT, &hdev->flags))
2385 mgmt_discovering(hdev->id, 1);
2386 }
2387
2297 hci_dev_lock(hdev); 2388 hci_dev_lock(hdev);
2298 2389
2299 for (; num_rsp; num_rsp--) { 2390 for (; num_rsp; num_rsp--, info++) {
2300 bacpy(&data.bdaddr, &info->bdaddr); 2391 bacpy(&data.bdaddr, &info->bdaddr);
2301 data.pscan_rep_mode = info->pscan_rep_mode; 2392 data.pscan_rep_mode = info->pscan_rep_mode;
2302 data.pscan_period_mode = info->pscan_period_mode; 2393 data.pscan_period_mode = info->pscan_period_mode;
@@ -2305,8 +2396,9 @@ static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct
2305 data.clock_offset = info->clock_offset; 2396 data.clock_offset = info->clock_offset;
2306 data.rssi = info->rssi; 2397 data.rssi = info->rssi;
2307 data.ssp_mode = 0x01; 2398 data.ssp_mode = 0x01;
2308 info++;
2309 hci_inquiry_cache_update(hdev, &data); 2399 hci_inquiry_cache_update(hdev, &data);
2400 mgmt_device_found(hdev->id, &info->bdaddr, info->dev_class,
2401 info->rssi, info->data);
2310 } 2402 }
2311 2403
2312 hci_dev_unlock(hdev); 2404 hci_dev_unlock(hdev);
@@ -2326,7 +2418,7 @@ static inline u8 hci_get_auth_req(struct hci_conn *conn)
2326 2418
2327 /* If remote requests no-bonding follow that lead */ 2419 /* If remote requests no-bonding follow that lead */
2328 if (conn->remote_auth == 0x00 || conn->remote_auth == 0x01) 2420 if (conn->remote_auth == 0x00 || conn->remote_auth == 0x01)
2329 return 0x00; 2421 return conn->remote_auth | (conn->auth_type & 0x01);
2330 2422
2331 return conn->auth_type; 2423 return conn->auth_type;
2332} 2424}
@@ -2355,8 +2447,14 @@ static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff
2355 2447
2356 bacpy(&cp.bdaddr, &ev->bdaddr); 2448 bacpy(&cp.bdaddr, &ev->bdaddr);
2357 cp.capability = conn->io_capability; 2449 cp.capability = conn->io_capability;
2358 cp.oob_data = 0; 2450 conn->auth_type = hci_get_auth_req(conn);
2359 cp.authentication = hci_get_auth_req(conn); 2451 cp.authentication = conn->auth_type;
2452
2453 if ((conn->out == 0x01 || conn->remote_oob == 0x01) &&
2454 hci_find_remote_oob_data(hdev, &conn->dst))
2455 cp.oob_data = 0x01;
2456 else
2457 cp.oob_data = 0x00;
2360 2458
2361 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY, 2459 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
2362 sizeof(cp), &cp); 2460 sizeof(cp), &cp);
@@ -2364,7 +2462,7 @@ static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff
2364 struct hci_cp_io_capability_neg_reply cp; 2462 struct hci_cp_io_capability_neg_reply cp;
2365 2463
2366 bacpy(&cp.bdaddr, &ev->bdaddr); 2464 bacpy(&cp.bdaddr, &ev->bdaddr);
2367 cp.reason = 0x16; /* Pairing not allowed */ 2465 cp.reason = 0x18; /* Pairing not allowed */
2368 2466
2369 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY, 2467 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
2370 sizeof(cp), &cp); 2468 sizeof(cp), &cp);
@@ -2399,14 +2497,67 @@ static inline void hci_user_confirm_request_evt(struct hci_dev *hdev,
2399 struct sk_buff *skb) 2497 struct sk_buff *skb)
2400{ 2498{
2401 struct hci_ev_user_confirm_req *ev = (void *) skb->data; 2499 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
2500 int loc_mitm, rem_mitm, confirm_hint = 0;
2501 struct hci_conn *conn;
2402 2502
2403 BT_DBG("%s", hdev->name); 2503 BT_DBG("%s", hdev->name);
2404 2504
2405 hci_dev_lock(hdev); 2505 hci_dev_lock(hdev);
2406 2506
2407 if (test_bit(HCI_MGMT, &hdev->flags)) 2507 if (!test_bit(HCI_MGMT, &hdev->flags))
2408 mgmt_user_confirm_request(hdev->id, &ev->bdaddr, ev->passkey); 2508 goto unlock;
2509
2510 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2511 if (!conn)
2512 goto unlock;
2513
2514 loc_mitm = (conn->auth_type & 0x01);
2515 rem_mitm = (conn->remote_auth & 0x01);
2516
2517 /* If we require MITM but the remote device can't provide that
2518 * (it has NoInputNoOutput) then reject the confirmation
2519 * request. The only exception is when we're dedicated bonding
2520 * initiators (connect_cfm_cb set) since then we always have the MITM
2521 * bit set. */
2522 if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) {
2523 BT_DBG("Rejecting request: remote device can't provide MITM");
2524 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
2525 sizeof(ev->bdaddr), &ev->bdaddr);
2526 goto unlock;
2527 }
2528
2529 /* If no side requires MITM protection; auto-accept */
2530 if ((!loc_mitm || conn->remote_cap == 0x03) &&
2531 (!rem_mitm || conn->io_capability == 0x03)) {
2532
2533 /* If we're not the initiators request authorization to
2534 * proceed from user space (mgmt_user_confirm with
2535 * confirm_hint set to 1). */
2536 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
2537 BT_DBG("Confirming auto-accept as acceptor");
2538 confirm_hint = 1;
2539 goto confirm;
2540 }
2541
2542 BT_DBG("Auto-accept of user confirmation with %ums delay",
2543 hdev->auto_accept_delay);
2544
2545 if (hdev->auto_accept_delay > 0) {
2546 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
2547 mod_timer(&conn->auto_accept_timer, jiffies + delay);
2548 goto unlock;
2549 }
2550
2551 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
2552 sizeof(ev->bdaddr), &ev->bdaddr);
2553 goto unlock;
2554 }
2555
2556confirm:
2557 mgmt_user_confirm_request(hdev->id, &ev->bdaddr, ev->passkey,
2558 confirm_hint);
2409 2559
2560unlock:
2410 hci_dev_unlock(hdev); 2561 hci_dev_unlock(hdev);
2411} 2562}
2412 2563
@@ -2453,6 +2604,41 @@ static inline void hci_remote_host_features_evt(struct hci_dev *hdev, struct sk_
2453 hci_dev_unlock(hdev); 2604 hci_dev_unlock(hdev);
2454} 2605}
2455 2606
2607static inline void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
2608 struct sk_buff *skb)
2609{
2610 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
2611 struct oob_data *data;
2612
2613 BT_DBG("%s", hdev->name);
2614
2615 hci_dev_lock(hdev);
2616
2617 if (!test_bit(HCI_MGMT, &hdev->flags))
2618 goto unlock;
2619
2620 data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
2621 if (data) {
2622 struct hci_cp_remote_oob_data_reply cp;
2623
2624 bacpy(&cp.bdaddr, &ev->bdaddr);
2625 memcpy(cp.hash, data->hash, sizeof(cp.hash));
2626 memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer));
2627
2628 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp),
2629 &cp);
2630 } else {
2631 struct hci_cp_remote_oob_data_neg_reply cp;
2632
2633 bacpy(&cp.bdaddr, &ev->bdaddr);
2634 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp),
2635 &cp);
2636 }
2637
2638unlock:
2639 hci_dev_unlock(hdev);
2640}
2641
2456static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 2642static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2457{ 2643{
2458 struct hci_ev_le_conn_complete *ev = (void *) skb->data; 2644 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
@@ -2473,12 +2659,15 @@ static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff
2473 } 2659 }
2474 2660
2475 if (ev->status) { 2661 if (ev->status) {
2662 mgmt_connect_failed(hdev->id, &ev->bdaddr, ev->status);
2476 hci_proto_connect_cfm(conn, ev->status); 2663 hci_proto_connect_cfm(conn, ev->status);
2477 conn->state = BT_CLOSED; 2664 conn->state = BT_CLOSED;
2478 hci_conn_del(conn); 2665 hci_conn_del(conn);
2479 goto unlock; 2666 goto unlock;
2480 } 2667 }
2481 2668
2669 mgmt_connected(hdev->id, &ev->bdaddr);
2670
2482 conn->handle = __le16_to_cpu(ev->handle); 2671 conn->handle = __le16_to_cpu(ev->handle);
2483 conn->state = BT_CONNECTED; 2672 conn->state = BT_CONNECTED;
2484 2673
@@ -2655,6 +2844,10 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
2655 hci_le_meta_evt(hdev, skb); 2844 hci_le_meta_evt(hdev, skb);
2656 break; 2845 break;
2657 2846
2847 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
2848 hci_remote_oob_data_request_evt(hdev, skb);
2849 break;
2850
2658 default: 2851 default:
2659 BT_DBG("%s event 0x%x", hdev->name, event); 2852 BT_DBG("%s event 0x%x", hdev->name, event);
2660 break; 2853 break;
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index 3c838a65a75a..a6c3aa8be1f7 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -216,13 +216,13 @@ static ssize_t show_type(struct device *dev, struct device_attribute *attr, char
216static ssize_t show_name(struct device *dev, struct device_attribute *attr, char *buf) 216static ssize_t show_name(struct device *dev, struct device_attribute *attr, char *buf)
217{ 217{
218 struct hci_dev *hdev = dev_get_drvdata(dev); 218 struct hci_dev *hdev = dev_get_drvdata(dev);
219 char name[249]; 219 char name[HCI_MAX_NAME_LENGTH + 1];
220 int i; 220 int i;
221 221
222 for (i = 0; i < 248; i++) 222 for (i = 0; i < HCI_MAX_NAME_LENGTH; i++)
223 name[i] = hdev->dev_name[i]; 223 name[i] = hdev->dev_name[i];
224 224
225 name[248] = '\0'; 225 name[HCI_MAX_NAME_LENGTH] = '\0';
226 return sprintf(buf, "%s\n", name); 226 return sprintf(buf, "%s\n", name);
227} 227}
228 228
@@ -277,10 +277,12 @@ static ssize_t show_idle_timeout(struct device *dev, struct device_attribute *at
277static ssize_t store_idle_timeout(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 277static ssize_t store_idle_timeout(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
278{ 278{
279 struct hci_dev *hdev = dev_get_drvdata(dev); 279 struct hci_dev *hdev = dev_get_drvdata(dev);
280 unsigned long val; 280 unsigned int val;
281 int rv;
281 282
282 if (strict_strtoul(buf, 0, &val) < 0) 283 rv = kstrtouint(buf, 0, &val);
283 return -EINVAL; 284 if (rv < 0)
285 return rv;
284 286
285 if (val != 0 && (val < 500 || val > 3600000)) 287 if (val != 0 && (val < 500 || val > 3600000))
286 return -EINVAL; 288 return -EINVAL;
@@ -299,15 +301,14 @@ static ssize_t show_sniff_max_interval(struct device *dev, struct device_attribu
299static ssize_t store_sniff_max_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 301static ssize_t store_sniff_max_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
300{ 302{
301 struct hci_dev *hdev = dev_get_drvdata(dev); 303 struct hci_dev *hdev = dev_get_drvdata(dev);
302 unsigned long val; 304 u16 val;
303 305 int rv;
304 if (strict_strtoul(buf, 0, &val) < 0)
305 return -EINVAL;
306 306
307 if (val < 0x0002 || val > 0xFFFE || val % 2) 307 rv = kstrtou16(buf, 0, &val);
308 return -EINVAL; 308 if (rv < 0)
309 return rv;
309 310
310 if (val < hdev->sniff_min_interval) 311 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
311 return -EINVAL; 312 return -EINVAL;
312 313
313 hdev->sniff_max_interval = val; 314 hdev->sniff_max_interval = val;
@@ -324,15 +325,14 @@ static ssize_t show_sniff_min_interval(struct device *dev, struct device_attribu
324static ssize_t store_sniff_min_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 325static ssize_t store_sniff_min_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
325{ 326{
326 struct hci_dev *hdev = dev_get_drvdata(dev); 327 struct hci_dev *hdev = dev_get_drvdata(dev);
327 unsigned long val; 328 u16 val;
329 int rv;
328 330
329 if (strict_strtoul(buf, 0, &val) < 0) 331 rv = kstrtou16(buf, 0, &val);
330 return -EINVAL; 332 if (rv < 0)
331 333 return rv;
332 if (val < 0x0002 || val > 0xFFFE || val % 2)
333 return -EINVAL;
334 334
335 if (val > hdev->sniff_max_interval) 335 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
336 return -EINVAL; 336 return -EINVAL;
337 337
338 hdev->sniff_min_interval = val; 338 hdev->sniff_min_interval = val;
@@ -511,6 +511,35 @@ static const struct file_operations uuids_fops = {
511 .release = single_release, 511 .release = single_release,
512}; 512};
513 513
514static int auto_accept_delay_set(void *data, u64 val)
515{
516 struct hci_dev *hdev = data;
517
518 hci_dev_lock_bh(hdev);
519
520 hdev->auto_accept_delay = val;
521
522 hci_dev_unlock_bh(hdev);
523
524 return 0;
525}
526
527static int auto_accept_delay_get(void *data, u64 *val)
528{
529 struct hci_dev *hdev = data;
530
531 hci_dev_lock_bh(hdev);
532
533 *val = hdev->auto_accept_delay;
534
535 hci_dev_unlock_bh(hdev);
536
537 return 0;
538}
539
540DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
541 auto_accept_delay_set, "%llu\n");
542
514int hci_register_sysfs(struct hci_dev *hdev) 543int hci_register_sysfs(struct hci_dev *hdev)
515{ 544{
516 struct device *dev = &hdev->dev; 545 struct device *dev = &hdev->dev;
@@ -545,6 +574,8 @@ int hci_register_sysfs(struct hci_dev *hdev)
545 574
546 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops); 575 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
547 576
577 debugfs_create_file("auto_accept_delay", 0444, hdev->debugfs, hdev,
578 &auto_accept_delay_fops);
548 return 0; 579 return 0;
549} 580}
550 581
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 5ec12971af6b..c405a954a603 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -37,6 +37,7 @@
37#include <linux/init.h> 37#include <linux/init.h>
38#include <linux/wait.h> 38#include <linux/wait.h>
39#include <linux/mutex.h> 39#include <linux/mutex.h>
40#include <linux/kthread.h>
40#include <net/sock.h> 41#include <net/sock.h>
41 42
42#include <linux/input.h> 43#include <linux/input.h>
@@ -55,22 +56,24 @@ static DECLARE_RWSEM(hidp_session_sem);
55static LIST_HEAD(hidp_session_list); 56static LIST_HEAD(hidp_session_list);
56 57
57static unsigned char hidp_keycode[256] = { 58static unsigned char hidp_keycode[256] = {
58 0, 0, 0, 0, 30, 48, 46, 32, 18, 33, 34, 35, 23, 36, 37, 38, 59 0, 0, 0, 0, 30, 48, 46, 32, 18, 33, 34, 35, 23, 36,
59 50, 49, 24, 25, 16, 19, 31, 20, 22, 47, 17, 45, 21, 44, 2, 3, 60 37, 38, 50, 49, 24, 25, 16, 19, 31, 20, 22, 47, 17, 45,
60 4, 5, 6, 7, 8, 9, 10, 11, 28, 1, 14, 15, 57, 12, 13, 26, 61 21, 44, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 28, 1,
61 27, 43, 43, 39, 40, 41, 51, 52, 53, 58, 59, 60, 61, 62, 63, 64, 62 14, 15, 57, 12, 13, 26, 27, 43, 43, 39, 40, 41, 51, 52,
62 65, 66, 67, 68, 87, 88, 99, 70,119,110,102,104,111,107,109,106, 63 53, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 87, 88,
63 105,108,103, 69, 98, 55, 74, 78, 96, 79, 80, 81, 75, 76, 77, 71, 64 99, 70, 119, 110, 102, 104, 111, 107, 109, 106, 105, 108, 103, 69,
64 72, 73, 82, 83, 86,127,116,117,183,184,185,186,187,188,189,190, 65 98, 55, 74, 78, 96, 79, 80, 81, 75, 76, 77, 71, 72, 73,
65 191,192,193,194,134,138,130,132,128,129,131,137,133,135,136,113, 66 82, 83, 86, 127, 116, 117, 183, 184, 185, 186, 187, 188, 189, 190,
66 115,114, 0, 0, 0,121, 0, 89, 93,124, 92, 94, 95, 0, 0, 0, 67 191, 192, 193, 194, 134, 138, 130, 132, 128, 129, 131, 137, 133, 135,
67 122,123, 90, 91, 85, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 68 136, 113, 115, 114, 0, 0, 0, 121, 0, 89, 93, 124, 92, 94,
68 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 69 95, 0, 0, 0, 122, 123, 90, 91, 85, 0, 0, 0, 0, 0,
69 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 70 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
70 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 71 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
71 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 72 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
72 29, 42, 56,125, 97, 54,100,126,164,166,165,163,161,115,114,113, 73 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
73 150,158,159,128,136,177,178,176,142,152,173,140 74 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
75 29, 42, 56, 125, 97, 54, 100, 126, 164, 166, 165, 163, 161, 115,
76 114, 113, 150, 158, 159, 128, 136, 177, 178, 176, 142, 152, 173, 140
74}; 77};
75 78
76static unsigned char hidp_mkeyspat[] = { 0x01, 0x01, 0x01, 0x01, 0x01, 0x01 }; 79static unsigned char hidp_mkeyspat[] = { 0x01, 0x01, 0x01, 0x01, 0x01, 0x01 };
@@ -461,8 +464,7 @@ static void hidp_idle_timeout(unsigned long arg)
461{ 464{
462 struct hidp_session *session = (struct hidp_session *) arg; 465 struct hidp_session *session = (struct hidp_session *) arg;
463 466
464 atomic_inc(&session->terminate); 467 kthread_stop(session->task);
465 hidp_schedule(session);
466} 468}
467 469
468static void hidp_set_timer(struct hidp_session *session) 470static void hidp_set_timer(struct hidp_session *session)
@@ -533,9 +535,7 @@ static void hidp_process_hid_control(struct hidp_session *session,
533 skb_queue_purge(&session->ctrl_transmit); 535 skb_queue_purge(&session->ctrl_transmit);
534 skb_queue_purge(&session->intr_transmit); 536 skb_queue_purge(&session->intr_transmit);
535 537
536 /* Kill session thread */ 538 kthread_stop(session->task);
537 atomic_inc(&session->terminate);
538 hidp_schedule(session);
539 } 539 }
540} 540}
541 541
@@ -694,22 +694,10 @@ static int hidp_session(void *arg)
694 struct sock *ctrl_sk = session->ctrl_sock->sk; 694 struct sock *ctrl_sk = session->ctrl_sock->sk;
695 struct sock *intr_sk = session->intr_sock->sk; 695 struct sock *intr_sk = session->intr_sock->sk;
696 struct sk_buff *skb; 696 struct sk_buff *skb;
697 int vendor = 0x0000, product = 0x0000;
698 wait_queue_t ctrl_wait, intr_wait; 697 wait_queue_t ctrl_wait, intr_wait;
699 698
700 BT_DBG("session %p", session); 699 BT_DBG("session %p", session);
701 700
702 if (session->input) {
703 vendor = session->input->id.vendor;
704 product = session->input->id.product;
705 }
706
707 if (session->hid) {
708 vendor = session->hid->vendor;
709 product = session->hid->product;
710 }
711
712 daemonize("khidpd_%04x%04x", vendor, product);
713 set_user_nice(current, -15); 701 set_user_nice(current, -15);
714 702
715 init_waitqueue_entry(&ctrl_wait, current); 703 init_waitqueue_entry(&ctrl_wait, current);
@@ -718,10 +706,11 @@ static int hidp_session(void *arg)
718 add_wait_queue(sk_sleep(intr_sk), &intr_wait); 706 add_wait_queue(sk_sleep(intr_sk), &intr_wait);
719 session->waiting_for_startup = 0; 707 session->waiting_for_startup = 0;
720 wake_up_interruptible(&session->startup_queue); 708 wake_up_interruptible(&session->startup_queue);
721 while (!atomic_read(&session->terminate)) { 709 while (!kthread_should_stop()) {
722 set_current_state(TASK_INTERRUPTIBLE); 710 set_current_state(TASK_INTERRUPTIBLE);
723 711
724 if (ctrl_sk->sk_state != BT_CONNECTED || intr_sk->sk_state != BT_CONNECTED) 712 if (ctrl_sk->sk_state != BT_CONNECTED ||
713 intr_sk->sk_state != BT_CONNECTED)
725 break; 714 break;
726 715
727 while ((skb = skb_dequeue(&ctrl_sk->sk_receive_queue))) { 716 while ((skb = skb_dequeue(&ctrl_sk->sk_receive_queue))) {
@@ -965,6 +954,7 @@ fault:
965int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock, struct socket *intr_sock) 954int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock, struct socket *intr_sock)
966{ 955{
967 struct hidp_session *session, *s; 956 struct hidp_session *session, *s;
957 int vendor, product;
968 int err; 958 int err;
969 959
970 BT_DBG(""); 960 BT_DBG("");
@@ -989,8 +979,10 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock,
989 979
990 bacpy(&session->bdaddr, &bt_sk(ctrl_sock->sk)->dst); 980 bacpy(&session->bdaddr, &bt_sk(ctrl_sock->sk)->dst);
991 981
992 session->ctrl_mtu = min_t(uint, l2cap_pi(ctrl_sock->sk)->omtu, l2cap_pi(ctrl_sock->sk)->imtu); 982 session->ctrl_mtu = min_t(uint, l2cap_pi(ctrl_sock->sk)->chan->omtu,
993 session->intr_mtu = min_t(uint, l2cap_pi(intr_sock->sk)->omtu, l2cap_pi(intr_sock->sk)->imtu); 983 l2cap_pi(ctrl_sock->sk)->chan->imtu);
984 session->intr_mtu = min_t(uint, l2cap_pi(intr_sock->sk)->chan->omtu,
985 l2cap_pi(intr_sock->sk)->chan->imtu);
994 986
995 BT_DBG("ctrl mtu %d intr mtu %d", session->ctrl_mtu, session->intr_mtu); 987 BT_DBG("ctrl mtu %d intr mtu %d", session->ctrl_mtu, session->intr_mtu);
996 988
@@ -1026,9 +1018,24 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock,
1026 1018
1027 hidp_set_timer(session); 1019 hidp_set_timer(session);
1028 1020
1029 err = kernel_thread(hidp_session, session, CLONE_KERNEL); 1021 if (session->hid) {
1030 if (err < 0) 1022 vendor = session->hid->vendor;
1023 product = session->hid->product;
1024 } else if (session->input) {
1025 vendor = session->input->id.vendor;
1026 product = session->input->id.product;
1027 } else {
1028 vendor = 0x0000;
1029 product = 0x0000;
1030 }
1031
1032 session->task = kthread_run(hidp_session, session, "khidpd_%04x%04x",
1033 vendor, product);
1034 if (IS_ERR(session->task)) {
1035 err = PTR_ERR(session->task);
1031 goto unlink; 1036 goto unlink;
1037 }
1038
1032 while (session->waiting_for_startup) { 1039 while (session->waiting_for_startup) {
1033 wait_event_interruptible(session->startup_queue, 1040 wait_event_interruptible(session->startup_queue,
1034 !session->waiting_for_startup); 1041 !session->waiting_for_startup);
@@ -1053,8 +1060,7 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock,
1053err_add_device: 1060err_add_device:
1054 hid_destroy_device(session->hid); 1061 hid_destroy_device(session->hid);
1055 session->hid = NULL; 1062 session->hid = NULL;
1056 atomic_inc(&session->terminate); 1063 kthread_stop(session->task);
1057 hidp_schedule(session);
1058 1064
1059unlink: 1065unlink:
1060 hidp_del_timer(session); 1066 hidp_del_timer(session);
@@ -1105,13 +1111,7 @@ int hidp_del_connection(struct hidp_conndel_req *req)
1105 skb_queue_purge(&session->ctrl_transmit); 1111 skb_queue_purge(&session->ctrl_transmit);
1106 skb_queue_purge(&session->intr_transmit); 1112 skb_queue_purge(&session->intr_transmit);
1107 1113
1108 /* Wakeup user-space polling for socket errors */ 1114 kthread_stop(session->task);
1109 session->intr_sock->sk->sk_err = EUNATCH;
1110 session->ctrl_sock->sk->sk_err = EUNATCH;
1111
1112 /* Kill session thread */
1113 atomic_inc(&session->terminate);
1114 hidp_schedule(session);
1115 } 1115 }
1116 } else 1116 } else
1117 err = -ENOENT; 1117 err = -ENOENT;
diff --git a/net/bluetooth/hidp/hidp.h b/net/bluetooth/hidp/hidp.h
index 13de5fa03480..12822cde4b49 100644
--- a/net/bluetooth/hidp/hidp.h
+++ b/net/bluetooth/hidp/hidp.h
@@ -84,8 +84,8 @@
84#define HIDP_WAITING_FOR_SEND_ACK 11 84#define HIDP_WAITING_FOR_SEND_ACK 11
85 85
86struct hidp_connadd_req { 86struct hidp_connadd_req {
87 int ctrl_sock; // Connected control socket 87 int ctrl_sock; /* Connected control socket */
88 int intr_sock; // Connteted interrupt socket 88 int intr_sock; /* Connected interrupt socket */
89 __u16 parser; 89 __u16 parser;
90 __u16 rd_size; 90 __u16 rd_size;
91 __u8 __user *rd_data; 91 __u8 __user *rd_data;
@@ -142,7 +142,7 @@ struct hidp_session {
142 uint ctrl_mtu; 142 uint ctrl_mtu;
143 uint intr_mtu; 143 uint intr_mtu;
144 144
145 atomic_t terminate; 145 struct task_struct *task;
146 146
147 unsigned char keys[8]; 147 unsigned char keys[8];
148 unsigned char leds; 148 unsigned char leds;
diff --git a/net/bluetooth/hidp/sock.c b/net/bluetooth/hidp/sock.c
index 250dfd46237d..178ac7f127ad 100644
--- a/net/bluetooth/hidp/sock.c
+++ b/net/bluetooth/hidp/sock.c
@@ -85,7 +85,8 @@ static int hidp_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long
85 return err; 85 return err;
86 } 86 }
87 87
88 if (csock->sk->sk_state != BT_CONNECTED || isock->sk->sk_state != BT_CONNECTED) { 88 if (csock->sk->sk_state != BT_CONNECTED ||
89 isock->sk->sk_state != BT_CONNECTED) {
89 sockfd_put(csock); 90 sockfd_put(csock);
90 sockfd_put(isock); 91 sockfd_put(isock);
91 return -EBADFD; 92 return -EBADFD;
@@ -140,8 +141,8 @@ static int hidp_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long
140 141
141#ifdef CONFIG_COMPAT 142#ifdef CONFIG_COMPAT
142struct compat_hidp_connadd_req { 143struct compat_hidp_connadd_req {
143 int ctrl_sock; // Connected control socket 144 int ctrl_sock; /* Connected control socket */
144 int intr_sock; // Connteted interrupt socket 145 int intr_sock; /* Connected interrupt socket */
145 __u16 parser; 146 __u16 parser;
146 __u16 rd_size; 147 __u16 rd_size;
147 compat_uptr_t rd_data; 148 compat_uptr_t rd_data;
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 2c8dd4494c63..a86f9ba4f05c 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -62,168 +62,233 @@ static u8 l2cap_fixed_chan[8] = { 0x02, };
62 62
63static struct workqueue_struct *_busy_wq; 63static struct workqueue_struct *_busy_wq;
64 64
65struct bt_sock_list l2cap_sk_list = { 65LIST_HEAD(chan_list);
66 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock) 66DEFINE_RWLOCK(chan_list_lock);
67};
68 67
69static void l2cap_busy_work(struct work_struct *work); 68static void l2cap_busy_work(struct work_struct *work);
70 69
71static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, 70static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
72 u8 code, u8 ident, u16 dlen, void *data); 71 u8 code, u8 ident, u16 dlen, void *data);
72static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
73 73
74static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb); 74static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
75 75
76/* ---- L2CAP channels ---- */ 76/* ---- L2CAP channels ---- */
77static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid) 77static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
78{ 78{
79 struct sock *s; 79 struct l2cap_chan *c;
80 for (s = l->head; s; s = l2cap_pi(s)->next_c) { 80
81 if (l2cap_pi(s)->dcid == cid) 81 list_for_each_entry(c, &conn->chan_l, list) {
82 break; 82 if (c->dcid == cid)
83 return c;
83 } 84 }
84 return s; 85 return NULL;
86
85} 87}
86 88
87static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid) 89static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
88{ 90{
89 struct sock *s; 91 struct l2cap_chan *c;
90 for (s = l->head; s; s = l2cap_pi(s)->next_c) { 92
91 if (l2cap_pi(s)->scid == cid) 93 list_for_each_entry(c, &conn->chan_l, list) {
92 break; 94 if (c->scid == cid)
95 return c;
93 } 96 }
94 return s; 97 return NULL;
95} 98}
96 99
97/* Find channel with given SCID. 100/* Find channel with given SCID.
98 * Returns locked socket */ 101 * Returns locked socket */
99static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid) 102static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
100{ 103{
101 struct sock *s; 104 struct l2cap_chan *c;
102 read_lock(&l->lock); 105
103 s = __l2cap_get_chan_by_scid(l, cid); 106 read_lock(&conn->chan_lock);
104 if (s) 107 c = __l2cap_get_chan_by_scid(conn, cid);
105 bh_lock_sock(s); 108 if (c)
106 read_unlock(&l->lock); 109 bh_lock_sock(c->sk);
107 return s; 110 read_unlock(&conn->chan_lock);
111 return c;
108} 112}
109 113
110static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident) 114static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
111{ 115{
112 struct sock *s; 116 struct l2cap_chan *c;
113 for (s = l->head; s; s = l2cap_pi(s)->next_c) { 117
114 if (l2cap_pi(s)->ident == ident) 118 list_for_each_entry(c, &conn->chan_l, list) {
115 break; 119 if (c->ident == ident)
120 return c;
121 }
122 return NULL;
123}
124
125static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
126{
127 struct l2cap_chan *c;
128
129 read_lock(&conn->chan_lock);
130 c = __l2cap_get_chan_by_ident(conn, ident);
131 if (c)
132 bh_lock_sock(c->sk);
133 read_unlock(&conn->chan_lock);
134 return c;
135}
136
137static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
138{
139 struct l2cap_chan *c;
140
141 list_for_each_entry(c, &chan_list, global_l) {
142 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
143 goto found;
144 }
145
146 c = NULL;
147found:
148 return c;
149}
150
151int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
152{
153 int err;
154
155 write_lock_bh(&chan_list_lock);
156
157 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
158 err = -EADDRINUSE;
159 goto done;
116 } 160 }
117 return s; 161
162 if (psm) {
163 chan->psm = psm;
164 chan->sport = psm;
165 err = 0;
166 } else {
167 u16 p;
168
169 err = -EINVAL;
170 for (p = 0x1001; p < 0x1100; p += 2)
171 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
172 chan->psm = cpu_to_le16(p);
173 chan->sport = cpu_to_le16(p);
174 err = 0;
175 break;
176 }
177 }
178
179done:
180 write_unlock_bh(&chan_list_lock);
181 return err;
118} 182}
119 183
120static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident) 184int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
121{ 185{
122 struct sock *s; 186 write_lock_bh(&chan_list_lock);
123 read_lock(&l->lock); 187
124 s = __l2cap_get_chan_by_ident(l, ident); 188 chan->scid = scid;
125 if (s) 189
126 bh_lock_sock(s); 190 write_unlock_bh(&chan_list_lock);
127 read_unlock(&l->lock); 191
128 return s; 192 return 0;
129} 193}
130 194
131static u16 l2cap_alloc_cid(struct l2cap_chan_list *l) 195static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
132{ 196{
133 u16 cid = L2CAP_CID_DYN_START; 197 u16 cid = L2CAP_CID_DYN_START;
134 198
135 for (; cid < L2CAP_CID_DYN_END; cid++) { 199 for (; cid < L2CAP_CID_DYN_END; cid++) {
136 if (!__l2cap_get_chan_by_scid(l, cid)) 200 if (!__l2cap_get_chan_by_scid(conn, cid))
137 return cid; 201 return cid;
138 } 202 }
139 203
140 return 0; 204 return 0;
141} 205}
142 206
143static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk) 207struct l2cap_chan *l2cap_chan_create(struct sock *sk)
144{ 208{
145 sock_hold(sk); 209 struct l2cap_chan *chan;
146 210
147 if (l->head) 211 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
148 l2cap_pi(l->head)->prev_c = sk; 212 if (!chan)
213 return NULL;
149 214
150 l2cap_pi(sk)->next_c = l->head; 215 chan->sk = sk;
151 l2cap_pi(sk)->prev_c = NULL;
152 l->head = sk;
153}
154 216
155static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk) 217 write_lock_bh(&chan_list_lock);
156{ 218 list_add(&chan->global_l, &chan_list);
157 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c; 219 write_unlock_bh(&chan_list_lock);
158 220
159 write_lock_bh(&l->lock); 221 return chan;
160 if (sk == l->head) 222}
161 l->head = next;
162 223
163 if (next) 224void l2cap_chan_destroy(struct l2cap_chan *chan)
164 l2cap_pi(next)->prev_c = prev; 225{
165 if (prev) 226 write_lock_bh(&chan_list_lock);
166 l2cap_pi(prev)->next_c = next; 227 list_del(&chan->global_l);
167 write_unlock_bh(&l->lock); 228 write_unlock_bh(&chan_list_lock);
168 229
169 __sock_put(sk); 230 kfree(chan);
170} 231}
171 232
172static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent) 233static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
173{ 234{
174 struct l2cap_chan_list *l = &conn->chan_list; 235 struct sock *sk = chan->sk;
175 236
176 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, 237 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
177 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid); 238 chan->psm, chan->dcid);
178 239
179 conn->disc_reason = 0x13; 240 conn->disc_reason = 0x13;
180 241
181 l2cap_pi(sk)->conn = conn; 242 chan->conn = conn;
182 243
183 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) { 244 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
184 if (conn->hcon->type == LE_LINK) { 245 if (conn->hcon->type == LE_LINK) {
185 /* LE connection */ 246 /* LE connection */
186 l2cap_pi(sk)->omtu = L2CAP_LE_DEFAULT_MTU; 247 chan->omtu = L2CAP_LE_DEFAULT_MTU;
187 l2cap_pi(sk)->scid = L2CAP_CID_LE_DATA; 248 chan->scid = L2CAP_CID_LE_DATA;
188 l2cap_pi(sk)->dcid = L2CAP_CID_LE_DATA; 249 chan->dcid = L2CAP_CID_LE_DATA;
189 } else { 250 } else {
190 /* Alloc CID for connection-oriented socket */ 251 /* Alloc CID for connection-oriented socket */
191 l2cap_pi(sk)->scid = l2cap_alloc_cid(l); 252 chan->scid = l2cap_alloc_cid(conn);
192 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU; 253 chan->omtu = L2CAP_DEFAULT_MTU;
193 } 254 }
194 } else if (sk->sk_type == SOCK_DGRAM) { 255 } else if (sk->sk_type == SOCK_DGRAM) {
195 /* Connectionless socket */ 256 /* Connectionless socket */
196 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS; 257 chan->scid = L2CAP_CID_CONN_LESS;
197 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS; 258 chan->dcid = L2CAP_CID_CONN_LESS;
198 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU; 259 chan->omtu = L2CAP_DEFAULT_MTU;
199 } else { 260 } else {
200 /* Raw socket can send/recv signalling messages only */ 261 /* Raw socket can send/recv signalling messages only */
201 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING; 262 chan->scid = L2CAP_CID_SIGNALING;
202 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING; 263 chan->dcid = L2CAP_CID_SIGNALING;
203 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU; 264 chan->omtu = L2CAP_DEFAULT_MTU;
204 } 265 }
205 266
206 __l2cap_chan_link(l, sk); 267 sock_hold(sk);
207 268
208 if (parent) 269 list_add(&chan->list, &conn->chan_l);
209 bt_accept_enqueue(parent, sk);
210} 270}
211 271
212/* Delete channel. 272/* Delete channel.
213 * Must be called on the locked socket. */ 273 * Must be called on the locked socket. */
214void l2cap_chan_del(struct sock *sk, int err) 274void l2cap_chan_del(struct l2cap_chan *chan, int err)
215{ 275{
216 struct l2cap_conn *conn = l2cap_pi(sk)->conn; 276 struct sock *sk = chan->sk;
277 struct l2cap_conn *conn = chan->conn;
217 struct sock *parent = bt_sk(sk)->parent; 278 struct sock *parent = bt_sk(sk)->parent;
218 279
219 l2cap_sock_clear_timer(sk); 280 l2cap_sock_clear_timer(sk);
220 281
221 BT_DBG("sk %p, conn %p, err %d", sk, conn, err); 282 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
222 283
223 if (conn) { 284 if (conn) {
224 /* Unlink from channel list */ 285 /* Delete from channel list */
225 l2cap_chan_unlink(&conn->chan_list, sk); 286 write_lock_bh(&conn->chan_lock);
226 l2cap_pi(sk)->conn = NULL; 287 list_del(&chan->list);
288 write_unlock_bh(&conn->chan_lock);
289 __sock_put(sk);
290
291 chan->conn = NULL;
227 hci_conn_put(conn->hcon); 292 hci_conn_put(conn->hcon);
228 } 293 }
229 294
@@ -239,29 +304,35 @@ void l2cap_chan_del(struct sock *sk, int err)
239 } else 304 } else
240 sk->sk_state_change(sk); 305 sk->sk_state_change(sk);
241 306
242 skb_queue_purge(TX_QUEUE(sk)); 307 if (!(chan->conf_state & L2CAP_CONF_OUTPUT_DONE &&
308 chan->conf_state & L2CAP_CONF_INPUT_DONE))
309 return;
310
311 skb_queue_purge(&chan->tx_q);
243 312
244 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) { 313 if (chan->mode == L2CAP_MODE_ERTM) {
245 struct srej_list *l, *tmp; 314 struct srej_list *l, *tmp;
246 315
247 del_timer(&l2cap_pi(sk)->retrans_timer); 316 del_timer(&chan->retrans_timer);
248 del_timer(&l2cap_pi(sk)->monitor_timer); 317 del_timer(&chan->monitor_timer);
249 del_timer(&l2cap_pi(sk)->ack_timer); 318 del_timer(&chan->ack_timer);
250 319
251 skb_queue_purge(SREJ_QUEUE(sk)); 320 skb_queue_purge(&chan->srej_q);
252 skb_queue_purge(BUSY_QUEUE(sk)); 321 skb_queue_purge(&chan->busy_q);
253 322
254 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) { 323 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
255 list_del(&l->list); 324 list_del(&l->list);
256 kfree(l); 325 kfree(l);
257 } 326 }
258 } 327 }
259} 328}
260 329
261static inline u8 l2cap_get_auth_type(struct sock *sk) 330static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
262{ 331{
332 struct sock *sk = chan->sk;
333
263 if (sk->sk_type == SOCK_RAW) { 334 if (sk->sk_type == SOCK_RAW) {
264 switch (l2cap_pi(sk)->sec_level) { 335 switch (chan->sec_level) {
265 case BT_SECURITY_HIGH: 336 case BT_SECURITY_HIGH:
266 return HCI_AT_DEDICATED_BONDING_MITM; 337 return HCI_AT_DEDICATED_BONDING_MITM;
267 case BT_SECURITY_MEDIUM: 338 case BT_SECURITY_MEDIUM:
@@ -269,16 +340,16 @@ static inline u8 l2cap_get_auth_type(struct sock *sk)
269 default: 340 default:
270 return HCI_AT_NO_BONDING; 341 return HCI_AT_NO_BONDING;
271 } 342 }
272 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) { 343 } else if (chan->psm == cpu_to_le16(0x0001)) {
273 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW) 344 if (chan->sec_level == BT_SECURITY_LOW)
274 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP; 345 chan->sec_level = BT_SECURITY_SDP;
275 346
276 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH) 347 if (chan->sec_level == BT_SECURITY_HIGH)
277 return HCI_AT_NO_BONDING_MITM; 348 return HCI_AT_NO_BONDING_MITM;
278 else 349 else
279 return HCI_AT_NO_BONDING; 350 return HCI_AT_NO_BONDING;
280 } else { 351 } else {
281 switch (l2cap_pi(sk)->sec_level) { 352 switch (chan->sec_level) {
282 case BT_SECURITY_HIGH: 353 case BT_SECURITY_HIGH:
283 return HCI_AT_GENERAL_BONDING_MITM; 354 return HCI_AT_GENERAL_BONDING_MITM;
284 case BT_SECURITY_MEDIUM: 355 case BT_SECURITY_MEDIUM:
@@ -290,15 +361,14 @@ static inline u8 l2cap_get_auth_type(struct sock *sk)
290} 361}
291 362
292/* Service level security */ 363/* Service level security */
293static inline int l2cap_check_security(struct sock *sk) 364static inline int l2cap_check_security(struct l2cap_chan *chan)
294{ 365{
295 struct l2cap_conn *conn = l2cap_pi(sk)->conn; 366 struct l2cap_conn *conn = chan->conn;
296 __u8 auth_type; 367 __u8 auth_type;
297 368
298 auth_type = l2cap_get_auth_type(sk); 369 auth_type = l2cap_get_auth_type(chan);
299 370
300 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level, 371 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
301 auth_type);
302} 372}
303 373
304u8 l2cap_get_ident(struct l2cap_conn *conn) 374u8 l2cap_get_ident(struct l2cap_conn *conn)
@@ -341,11 +411,12 @@ void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *d
341 hci_send_acl(conn->hcon, skb, flags); 411 hci_send_acl(conn->hcon, skb, flags);
342} 412}
343 413
344static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control) 414static inline void l2cap_send_sframe(struct l2cap_chan *chan, u16 control)
345{ 415{
346 struct sk_buff *skb; 416 struct sk_buff *skb;
347 struct l2cap_hdr *lh; 417 struct l2cap_hdr *lh;
348 struct l2cap_conn *conn = pi->conn; 418 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
419 struct l2cap_conn *conn = chan->conn;
349 struct sock *sk = (struct sock *)pi; 420 struct sock *sk = (struct sock *)pi;
350 int count, hlen = L2CAP_HDR_SIZE + 2; 421 int count, hlen = L2CAP_HDR_SIZE + 2;
351 u8 flags; 422 u8 flags;
@@ -353,22 +424,22 @@ static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
353 if (sk->sk_state != BT_CONNECTED) 424 if (sk->sk_state != BT_CONNECTED)
354 return; 425 return;
355 426
356 if (pi->fcs == L2CAP_FCS_CRC16) 427 if (chan->fcs == L2CAP_FCS_CRC16)
357 hlen += 2; 428 hlen += 2;
358 429
359 BT_DBG("pi %p, control 0x%2.2x", pi, control); 430 BT_DBG("chan %p, control 0x%2.2x", chan, control);
360 431
361 count = min_t(unsigned int, conn->mtu, hlen); 432 count = min_t(unsigned int, conn->mtu, hlen);
362 control |= L2CAP_CTRL_FRAME_TYPE; 433 control |= L2CAP_CTRL_FRAME_TYPE;
363 434
364 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) { 435 if (chan->conn_state & L2CAP_CONN_SEND_FBIT) {
365 control |= L2CAP_CTRL_FINAL; 436 control |= L2CAP_CTRL_FINAL;
366 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT; 437 chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
367 } 438 }
368 439
369 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) { 440 if (chan->conn_state & L2CAP_CONN_SEND_PBIT) {
370 control |= L2CAP_CTRL_POLL; 441 control |= L2CAP_CTRL_POLL;
371 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT; 442 chan->conn_state &= ~L2CAP_CONN_SEND_PBIT;
372 } 443 }
373 444
374 skb = bt_skb_alloc(count, GFP_ATOMIC); 445 skb = bt_skb_alloc(count, GFP_ATOMIC);
@@ -377,10 +448,10 @@ static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
377 448
378 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); 449 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
379 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE); 450 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
380 lh->cid = cpu_to_le16(pi->dcid); 451 lh->cid = cpu_to_le16(chan->dcid);
381 put_unaligned_le16(control, skb_put(skb, 2)); 452 put_unaligned_le16(control, skb_put(skb, 2));
382 453
383 if (pi->fcs == L2CAP_FCS_CRC16) { 454 if (chan->fcs == L2CAP_FCS_CRC16) {
384 u16 fcs = crc16(0, (u8 *)lh, count - 2); 455 u16 fcs = crc16(0, (u8 *)lh, count - 2);
385 put_unaligned_le16(fcs, skb_put(skb, 2)); 456 put_unaligned_le16(fcs, skb_put(skb, 2));
386 } 457 }
@@ -390,45 +461,46 @@ static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
390 else 461 else
391 flags = ACL_START; 462 flags = ACL_START;
392 463
393 hci_send_acl(pi->conn->hcon, skb, flags); 464 hci_send_acl(chan->conn->hcon, skb, flags);
394} 465}
395 466
396static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control) 467static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u16 control)
397{ 468{
398 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) { 469 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
399 control |= L2CAP_SUPER_RCV_NOT_READY; 470 control |= L2CAP_SUPER_RCV_NOT_READY;
400 pi->conn_state |= L2CAP_CONN_RNR_SENT; 471 chan->conn_state |= L2CAP_CONN_RNR_SENT;
401 } else 472 } else
402 control |= L2CAP_SUPER_RCV_READY; 473 control |= L2CAP_SUPER_RCV_READY;
403 474
404 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT; 475 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
405 476
406 l2cap_send_sframe(pi, control); 477 l2cap_send_sframe(chan, control);
407} 478}
408 479
409static inline int __l2cap_no_conn_pending(struct sock *sk) 480static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
410{ 481{
411 return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND); 482 return !(chan->conf_state & L2CAP_CONF_CONNECT_PEND);
412} 483}
413 484
414static void l2cap_do_start(struct sock *sk) 485static void l2cap_do_start(struct l2cap_chan *chan)
415{ 486{
416 struct l2cap_conn *conn = l2cap_pi(sk)->conn; 487 struct l2cap_conn *conn = chan->conn;
417 488
418 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) { 489 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
419 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)) 490 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
420 return; 491 return;
421 492
422 if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) { 493 if (l2cap_check_security(chan) &&
494 __l2cap_no_conn_pending(chan)) {
423 struct l2cap_conn_req req; 495 struct l2cap_conn_req req;
424 req.scid = cpu_to_le16(l2cap_pi(sk)->scid); 496 req.scid = cpu_to_le16(chan->scid);
425 req.psm = l2cap_pi(sk)->psm; 497 req.psm = chan->psm;
426 498
427 l2cap_pi(sk)->ident = l2cap_get_ident(conn); 499 chan->ident = l2cap_get_ident(conn);
428 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND; 500 chan->conf_state |= L2CAP_CONF_CONNECT_PEND;
429 501
430 l2cap_send_cmd(conn, l2cap_pi(sk)->ident, 502 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
431 L2CAP_CONN_REQ, sizeof(req), &req); 503 sizeof(req), &req);
432 } 504 }
433 } else { 505 } else {
434 struct l2cap_info_req req; 506 struct l2cap_info_req req;
@@ -461,23 +533,24 @@ static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
461 } 533 }
462} 534}
463 535
464void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err) 536void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
465{ 537{
538 struct sock *sk;
466 struct l2cap_disconn_req req; 539 struct l2cap_disconn_req req;
467 540
468 if (!conn) 541 if (!conn)
469 return; 542 return;
470 543
471 skb_queue_purge(TX_QUEUE(sk)); 544 sk = chan->sk;
472 545
473 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) { 546 if (chan->mode == L2CAP_MODE_ERTM) {
474 del_timer(&l2cap_pi(sk)->retrans_timer); 547 del_timer(&chan->retrans_timer);
475 del_timer(&l2cap_pi(sk)->monitor_timer); 548 del_timer(&chan->monitor_timer);
476 del_timer(&l2cap_pi(sk)->ack_timer); 549 del_timer(&chan->ack_timer);
477 } 550 }
478 551
479 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid); 552 req.dcid = cpu_to_le16(chan->dcid);
480 req.scid = cpu_to_le16(l2cap_pi(sk)->scid); 553 req.scid = cpu_to_le16(chan->scid);
481 l2cap_send_cmd(conn, l2cap_get_ident(conn), 554 l2cap_send_cmd(conn, l2cap_get_ident(conn),
482 L2CAP_DISCONN_REQ, sizeof(req), &req); 555 L2CAP_DISCONN_REQ, sizeof(req), &req);
483 556
@@ -488,17 +561,15 @@ void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err)
488/* ---- L2CAP connections ---- */ 561/* ---- L2CAP connections ---- */
489static void l2cap_conn_start(struct l2cap_conn *conn) 562static void l2cap_conn_start(struct l2cap_conn *conn)
490{ 563{
491 struct l2cap_chan_list *l = &conn->chan_list; 564 struct l2cap_chan *chan, *tmp;
492 struct sock_del_list del, *tmp1, *tmp2;
493 struct sock *sk;
494 565
495 BT_DBG("conn %p", conn); 566 BT_DBG("conn %p", conn);
496 567
497 INIT_LIST_HEAD(&del.list); 568 read_lock(&conn->chan_lock);
498 569
499 read_lock(&l->lock); 570 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
571 struct sock *sk = chan->sk;
500 572
501 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
502 bh_lock_sock(sk); 573 bh_lock_sock(sk);
503 574
504 if (sk->sk_type != SOCK_SEQPACKET && 575 if (sk->sk_type != SOCK_SEQPACKET &&
@@ -510,40 +581,41 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
510 if (sk->sk_state == BT_CONNECT) { 581 if (sk->sk_state == BT_CONNECT) {
511 struct l2cap_conn_req req; 582 struct l2cap_conn_req req;
512 583
513 if (!l2cap_check_security(sk) || 584 if (!l2cap_check_security(chan) ||
514 !__l2cap_no_conn_pending(sk)) { 585 !__l2cap_no_conn_pending(chan)) {
515 bh_unlock_sock(sk); 586 bh_unlock_sock(sk);
516 continue; 587 continue;
517 } 588 }
518 589
519 if (!l2cap_mode_supported(l2cap_pi(sk)->mode, 590 if (!l2cap_mode_supported(chan->mode,
520 conn->feat_mask) 591 conn->feat_mask)
521 && l2cap_pi(sk)->conf_state & 592 && chan->conf_state &
522 L2CAP_CONF_STATE2_DEVICE) { 593 L2CAP_CONF_STATE2_DEVICE) {
523 tmp1 = kzalloc(sizeof(struct sock_del_list), 594 /* __l2cap_sock_close() calls list_del(chan)
524 GFP_ATOMIC); 595 * so release the lock */
525 tmp1->sk = sk; 596 read_unlock_bh(&conn->chan_lock);
526 list_add_tail(&tmp1->list, &del.list); 597 __l2cap_sock_close(sk, ECONNRESET);
598 read_lock_bh(&conn->chan_lock);
527 bh_unlock_sock(sk); 599 bh_unlock_sock(sk);
528 continue; 600 continue;
529 } 601 }
530 602
531 req.scid = cpu_to_le16(l2cap_pi(sk)->scid); 603 req.scid = cpu_to_le16(chan->scid);
532 req.psm = l2cap_pi(sk)->psm; 604 req.psm = chan->psm;
533 605
534 l2cap_pi(sk)->ident = l2cap_get_ident(conn); 606 chan->ident = l2cap_get_ident(conn);
535 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND; 607 chan->conf_state |= L2CAP_CONF_CONNECT_PEND;
536 608
537 l2cap_send_cmd(conn, l2cap_pi(sk)->ident, 609 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
538 L2CAP_CONN_REQ, sizeof(req), &req); 610 sizeof(req), &req);
539 611
540 } else if (sk->sk_state == BT_CONNECT2) { 612 } else if (sk->sk_state == BT_CONNECT2) {
541 struct l2cap_conn_rsp rsp; 613 struct l2cap_conn_rsp rsp;
542 char buf[128]; 614 char buf[128];
543 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid); 615 rsp.scid = cpu_to_le16(chan->dcid);
544 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid); 616 rsp.dcid = cpu_to_le16(chan->scid);
545 617
546 if (l2cap_check_security(sk)) { 618 if (l2cap_check_security(chan)) {
547 if (bt_sk(sk)->defer_setup) { 619 if (bt_sk(sk)->defer_setup) {
548 struct sock *parent = bt_sk(sk)->parent; 620 struct sock *parent = bt_sk(sk)->parent;
549 rsp.result = cpu_to_le16(L2CAP_CR_PEND); 621 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
@@ -560,80 +632,77 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
560 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND); 632 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
561 } 633 }
562 634
563 l2cap_send_cmd(conn, l2cap_pi(sk)->ident, 635 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
564 L2CAP_CONN_RSP, sizeof(rsp), &rsp); 636 sizeof(rsp), &rsp);
565 637
566 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT || 638 if (chan->conf_state & L2CAP_CONF_REQ_SENT ||
567 rsp.result != L2CAP_CR_SUCCESS) { 639 rsp.result != L2CAP_CR_SUCCESS) {
568 bh_unlock_sock(sk); 640 bh_unlock_sock(sk);
569 continue; 641 continue;
570 } 642 }
571 643
572 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT; 644 chan->conf_state |= L2CAP_CONF_REQ_SENT;
573 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, 645 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
574 l2cap_build_conf_req(sk, buf), buf); 646 l2cap_build_conf_req(chan, buf), buf);
575 l2cap_pi(sk)->num_conf_req++; 647 chan->num_conf_req++;
576 } 648 }
577 649
578 bh_unlock_sock(sk); 650 bh_unlock_sock(sk);
579 } 651 }
580 652
581 read_unlock(&l->lock); 653 read_unlock(&conn->chan_lock);
582
583 list_for_each_entry_safe(tmp1, tmp2, &del.list, list) {
584 bh_lock_sock(tmp1->sk);
585 __l2cap_sock_close(tmp1->sk, ECONNRESET);
586 bh_unlock_sock(tmp1->sk);
587 list_del(&tmp1->list);
588 kfree(tmp1);
589 }
590} 654}
591 655
592/* Find socket with cid and source bdaddr. 656/* Find socket with cid and source bdaddr.
593 * Returns closest match, locked. 657 * Returns closest match, locked.
594 */ 658 */
595static struct sock *l2cap_get_sock_by_scid(int state, __le16 cid, bdaddr_t *src) 659static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
596{ 660{
597 struct sock *s, *sk = NULL, *sk1 = NULL; 661 struct l2cap_chan *c, *c1 = NULL;
598 struct hlist_node *node; 662
663 read_lock(&chan_list_lock);
599 664
600 read_lock(&l2cap_sk_list.lock); 665 list_for_each_entry(c, &chan_list, global_l) {
666 struct sock *sk = c->sk;
601 667
602 sk_for_each(sk, node, &l2cap_sk_list.head) {
603 if (state && sk->sk_state != state) 668 if (state && sk->sk_state != state)
604 continue; 669 continue;
605 670
606 if (l2cap_pi(sk)->scid == cid) { 671 if (c->scid == cid) {
607 /* Exact match. */ 672 /* Exact match. */
608 if (!bacmp(&bt_sk(sk)->src, src)) 673 if (!bacmp(&bt_sk(sk)->src, src)) {
609 break; 674 read_unlock(&chan_list_lock);
675 return c;
676 }
610 677
611 /* Closest match */ 678 /* Closest match */
612 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) 679 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
613 sk1 = sk; 680 c1 = c;
614 } 681 }
615 } 682 }
616 s = node ? sk : sk1;
617 if (s)
618 bh_lock_sock(s);
619 read_unlock(&l2cap_sk_list.lock);
620 683
621 return s; 684 read_unlock(&chan_list_lock);
685
686 return c1;
622} 687}
623 688
624static void l2cap_le_conn_ready(struct l2cap_conn *conn) 689static void l2cap_le_conn_ready(struct l2cap_conn *conn)
625{ 690{
626 struct l2cap_chan_list *list = &conn->chan_list; 691 struct sock *parent, *sk;
627 struct sock *parent, *uninitialized_var(sk); 692 struct l2cap_chan *chan, *pchan;
628 693
629 BT_DBG(""); 694 BT_DBG("");
630 695
631 /* Check if we have socket listening on cid */ 696 /* Check if we have socket listening on cid */
632 parent = l2cap_get_sock_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA, 697 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
633 conn->src); 698 conn->src);
634 if (!parent) 699 if (!pchan)
635 return; 700 return;
636 701
702 parent = pchan->sk;
703
704 bh_lock_sock(parent);
705
637 /* Check for backlog size */ 706 /* Check for backlog size */
638 if (sk_acceptq_is_full(parent)) { 707 if (sk_acceptq_is_full(parent)) {
639 BT_DBG("backlog full %d", parent->sk_ack_backlog); 708 BT_DBG("backlog full %d", parent->sk_ack_backlog);
@@ -644,22 +713,33 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn)
644 if (!sk) 713 if (!sk)
645 goto clean; 714 goto clean;
646 715
647 write_lock_bh(&list->lock); 716 chan = l2cap_chan_create(sk);
717 if (!chan) {
718 l2cap_sock_kill(sk);
719 goto clean;
720 }
721
722 l2cap_pi(sk)->chan = chan;
723
724 write_lock_bh(&conn->chan_lock);
648 725
649 hci_conn_hold(conn->hcon); 726 hci_conn_hold(conn->hcon);
650 727
651 l2cap_sock_init(sk, parent); 728 l2cap_sock_init(sk, parent);
729
652 bacpy(&bt_sk(sk)->src, conn->src); 730 bacpy(&bt_sk(sk)->src, conn->src);
653 bacpy(&bt_sk(sk)->dst, conn->dst); 731 bacpy(&bt_sk(sk)->dst, conn->dst);
654 732
655 __l2cap_chan_add(conn, sk, parent); 733 bt_accept_enqueue(parent, sk);
734
735 __l2cap_chan_add(conn, chan);
656 736
657 l2cap_sock_set_timer(sk, sk->sk_sndtimeo); 737 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
658 738
659 sk->sk_state = BT_CONNECTED; 739 sk->sk_state = BT_CONNECTED;
660 parent->sk_data_ready(parent, 0); 740 parent->sk_data_ready(parent, 0);
661 741
662 write_unlock_bh(&list->lock); 742 write_unlock_bh(&conn->chan_lock);
663 743
664clean: 744clean:
665 bh_unlock_sock(parent); 745 bh_unlock_sock(parent);
@@ -667,17 +747,18 @@ clean:
667 747
668static void l2cap_conn_ready(struct l2cap_conn *conn) 748static void l2cap_conn_ready(struct l2cap_conn *conn)
669{ 749{
670 struct l2cap_chan_list *l = &conn->chan_list; 750 struct l2cap_chan *chan;
671 struct sock *sk;
672 751
673 BT_DBG("conn %p", conn); 752 BT_DBG("conn %p", conn);
674 753
675 if (!conn->hcon->out && conn->hcon->type == LE_LINK) 754 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
676 l2cap_le_conn_ready(conn); 755 l2cap_le_conn_ready(conn);
677 756
678 read_lock(&l->lock); 757 read_lock(&conn->chan_lock);
758
759 list_for_each_entry(chan, &conn->chan_l, list) {
760 struct sock *sk = chan->sk;
679 761
680 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
681 bh_lock_sock(sk); 762 bh_lock_sock(sk);
682 763
683 if (conn->hcon->type == LE_LINK) { 764 if (conn->hcon->type == LE_LINK) {
@@ -692,30 +773,31 @@ static void l2cap_conn_ready(struct l2cap_conn *conn)
692 sk->sk_state = BT_CONNECTED; 773 sk->sk_state = BT_CONNECTED;
693 sk->sk_state_change(sk); 774 sk->sk_state_change(sk);
694 } else if (sk->sk_state == BT_CONNECT) 775 } else if (sk->sk_state == BT_CONNECT)
695 l2cap_do_start(sk); 776 l2cap_do_start(chan);
696 777
697 bh_unlock_sock(sk); 778 bh_unlock_sock(sk);
698 } 779 }
699 780
700 read_unlock(&l->lock); 781 read_unlock(&conn->chan_lock);
701} 782}
702 783
703/* Notify sockets that we cannot guaranty reliability anymore */ 784/* Notify sockets that we cannot guaranty reliability anymore */
704static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err) 785static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
705{ 786{
706 struct l2cap_chan_list *l = &conn->chan_list; 787 struct l2cap_chan *chan;
707 struct sock *sk;
708 788
709 BT_DBG("conn %p", conn); 789 BT_DBG("conn %p", conn);
710 790
711 read_lock(&l->lock); 791 read_lock(&conn->chan_lock);
712 792
713 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) { 793 list_for_each_entry(chan, &conn->chan_l, list) {
714 if (l2cap_pi(sk)->force_reliable) 794 struct sock *sk = chan->sk;
795
796 if (chan->force_reliable)
715 sk->sk_err = err; 797 sk->sk_err = err;
716 } 798 }
717 799
718 read_unlock(&l->lock); 800 read_unlock(&conn->chan_lock);
719} 801}
720 802
721static void l2cap_info_timeout(unsigned long arg) 803static void l2cap_info_timeout(unsigned long arg)
@@ -755,7 +837,9 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
755 conn->feat_mask = 0; 837 conn->feat_mask = 0;
756 838
757 spin_lock_init(&conn->lock); 839 spin_lock_init(&conn->lock);
758 rwlock_init(&conn->chan_list.lock); 840 rwlock_init(&conn->chan_lock);
841
842 INIT_LIST_HEAD(&conn->chan_l);
759 843
760 if (hcon->type != LE_LINK) 844 if (hcon->type != LE_LINK)
761 setup_timer(&conn->info_timer, l2cap_info_timeout, 845 setup_timer(&conn->info_timer, l2cap_info_timeout,
@@ -769,6 +853,7 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
769static void l2cap_conn_del(struct hci_conn *hcon, int err) 853static void l2cap_conn_del(struct hci_conn *hcon, int err)
770{ 854{
771 struct l2cap_conn *conn = hcon->l2cap_data; 855 struct l2cap_conn *conn = hcon->l2cap_data;
856 struct l2cap_chan *chan, *l;
772 struct sock *sk; 857 struct sock *sk;
773 858
774 if (!conn) 859 if (!conn)
@@ -779,9 +864,10 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err)
779 kfree_skb(conn->rx_skb); 864 kfree_skb(conn->rx_skb);
780 865
781 /* Kill channels */ 866 /* Kill channels */
782 while ((sk = conn->chan_list.head)) { 867 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
868 sk = chan->sk;
783 bh_lock_sock(sk); 869 bh_lock_sock(sk);
784 l2cap_chan_del(sk, err); 870 l2cap_chan_del(chan, err);
785 bh_unlock_sock(sk); 871 bh_unlock_sock(sk);
786 l2cap_sock_kill(sk); 872 l2cap_sock_kill(sk);
787 } 873 }
@@ -793,12 +879,11 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err)
793 kfree(conn); 879 kfree(conn);
794} 880}
795 881
796static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent) 882static inline void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
797{ 883{
798 struct l2cap_chan_list *l = &conn->chan_list; 884 write_lock_bh(&conn->chan_lock);
799 write_lock_bh(&l->lock); 885 __l2cap_chan_add(conn, chan);
800 __l2cap_chan_add(conn, sk, parent); 886 write_unlock_bh(&conn->chan_lock);
801 write_unlock_bh(&l->lock);
802} 887}
803 888
804/* ---- Socket interface ---- */ 889/* ---- Socket interface ---- */
@@ -806,35 +891,39 @@ static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, stru
806/* Find socket with psm and source bdaddr. 891/* Find socket with psm and source bdaddr.
807 * Returns closest match. 892 * Returns closest match.
808 */ 893 */
809static struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src) 894static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
810{ 895{
811 struct sock *sk = NULL, *sk1 = NULL; 896 struct l2cap_chan *c, *c1 = NULL;
812 struct hlist_node *node;
813 897
814 read_lock(&l2cap_sk_list.lock); 898 read_lock(&chan_list_lock);
899
900 list_for_each_entry(c, &chan_list, global_l) {
901 struct sock *sk = c->sk;
815 902
816 sk_for_each(sk, node, &l2cap_sk_list.head) {
817 if (state && sk->sk_state != state) 903 if (state && sk->sk_state != state)
818 continue; 904 continue;
819 905
820 if (l2cap_pi(sk)->psm == psm) { 906 if (c->psm == psm) {
821 /* Exact match. */ 907 /* Exact match. */
822 if (!bacmp(&bt_sk(sk)->src, src)) 908 if (!bacmp(&bt_sk(sk)->src, src)) {
823 break; 909 read_unlock_bh(&chan_list_lock);
910 return c;
911 }
824 912
825 /* Closest match */ 913 /* Closest match */
826 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) 914 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
827 sk1 = sk; 915 c1 = c;
828 } 916 }
829 } 917 }
830 918
831 read_unlock(&l2cap_sk_list.lock); 919 read_unlock(&chan_list_lock);
832 920
833 return node ? sk : sk1; 921 return c1;
834} 922}
835 923
836int l2cap_do_connect(struct sock *sk) 924int l2cap_chan_connect(struct l2cap_chan *chan)
837{ 925{
926 struct sock *sk = chan->sk;
838 bdaddr_t *src = &bt_sk(sk)->src; 927 bdaddr_t *src = &bt_sk(sk)->src;
839 bdaddr_t *dst = &bt_sk(sk)->dst; 928 bdaddr_t *dst = &bt_sk(sk)->dst;
840 struct l2cap_conn *conn; 929 struct l2cap_conn *conn;
@@ -844,7 +933,7 @@ int l2cap_do_connect(struct sock *sk)
844 int err; 933 int err;
845 934
846 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst), 935 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
847 l2cap_pi(sk)->psm); 936 chan->psm);
848 937
849 hdev = hci_get_route(dst, src); 938 hdev = hci_get_route(dst, src);
850 if (!hdev) 939 if (!hdev)
@@ -852,14 +941,14 @@ int l2cap_do_connect(struct sock *sk)
852 941
853 hci_dev_lock_bh(hdev); 942 hci_dev_lock_bh(hdev);
854 943
855 auth_type = l2cap_get_auth_type(sk); 944 auth_type = l2cap_get_auth_type(chan);
856 945
857 if (l2cap_pi(sk)->dcid == L2CAP_CID_LE_DATA) 946 if (chan->dcid == L2CAP_CID_LE_DATA)
858 hcon = hci_connect(hdev, LE_LINK, dst, 947 hcon = hci_connect(hdev, LE_LINK, dst,
859 l2cap_pi(sk)->sec_level, auth_type); 948 chan->sec_level, auth_type);
860 else 949 else
861 hcon = hci_connect(hdev, ACL_LINK, dst, 950 hcon = hci_connect(hdev, ACL_LINK, dst,
862 l2cap_pi(sk)->sec_level, auth_type); 951 chan->sec_level, auth_type);
863 952
864 if (IS_ERR(hcon)) { 953 if (IS_ERR(hcon)) {
865 err = PTR_ERR(hcon); 954 err = PTR_ERR(hcon);
@@ -876,7 +965,7 @@ int l2cap_do_connect(struct sock *sk)
876 /* Update source addr of the socket */ 965 /* Update source addr of the socket */
877 bacpy(src, conn->src); 966 bacpy(src, conn->src);
878 967
879 l2cap_chan_add(conn, sk, NULL); 968 l2cap_chan_add(conn, chan);
880 969
881 sk->sk_state = BT_CONNECT; 970 sk->sk_state = BT_CONNECT;
882 l2cap_sock_set_timer(sk, sk->sk_sndtimeo); 971 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
@@ -885,10 +974,10 @@ int l2cap_do_connect(struct sock *sk)
885 if (sk->sk_type != SOCK_SEQPACKET && 974 if (sk->sk_type != SOCK_SEQPACKET &&
886 sk->sk_type != SOCK_STREAM) { 975 sk->sk_type != SOCK_STREAM) {
887 l2cap_sock_clear_timer(sk); 976 l2cap_sock_clear_timer(sk);
888 if (l2cap_check_security(sk)) 977 if (l2cap_check_security(chan))
889 sk->sk_state = BT_CONNECTED; 978 sk->sk_state = BT_CONNECTED;
890 } else 979 } else
891 l2cap_do_start(sk); 980 l2cap_do_start(chan);
892 } 981 }
893 982
894 err = 0; 983 err = 0;
@@ -901,12 +990,13 @@ done:
901 990
902int __l2cap_wait_ack(struct sock *sk) 991int __l2cap_wait_ack(struct sock *sk)
903{ 992{
993 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
904 DECLARE_WAITQUEUE(wait, current); 994 DECLARE_WAITQUEUE(wait, current);
905 int err = 0; 995 int err = 0;
906 int timeo = HZ/5; 996 int timeo = HZ/5;
907 997
908 add_wait_queue(sk_sleep(sk), &wait); 998 add_wait_queue(sk_sleep(sk), &wait);
909 while ((l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn)) { 999 while ((chan->unacked_frames > 0 && chan->conn)) {
910 set_current_state(TASK_INTERRUPTIBLE); 1000 set_current_state(TASK_INTERRUPTIBLE);
911 1001
912 if (!timeo) 1002 if (!timeo)
@@ -932,68 +1022,69 @@ int __l2cap_wait_ack(struct sock *sk)
932 1022
933static void l2cap_monitor_timeout(unsigned long arg) 1023static void l2cap_monitor_timeout(unsigned long arg)
934{ 1024{
935 struct sock *sk = (void *) arg; 1025 struct l2cap_chan *chan = (void *) arg;
1026 struct sock *sk = chan->sk;
936 1027
937 BT_DBG("sk %p", sk); 1028 BT_DBG("chan %p", chan);
938 1029
939 bh_lock_sock(sk); 1030 bh_lock_sock(sk);
940 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) { 1031 if (chan->retry_count >= chan->remote_max_tx) {
941 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNABORTED); 1032 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
942 bh_unlock_sock(sk); 1033 bh_unlock_sock(sk);
943 return; 1034 return;
944 } 1035 }
945 1036
946 l2cap_pi(sk)->retry_count++; 1037 chan->retry_count++;
947 __mod_monitor_timer(); 1038 __mod_monitor_timer();
948 1039
949 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL); 1040 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
950 bh_unlock_sock(sk); 1041 bh_unlock_sock(sk);
951} 1042}
952 1043
953static void l2cap_retrans_timeout(unsigned long arg) 1044static void l2cap_retrans_timeout(unsigned long arg)
954{ 1045{
955 struct sock *sk = (void *) arg; 1046 struct l2cap_chan *chan = (void *) arg;
1047 struct sock *sk = chan->sk;
956 1048
957 BT_DBG("sk %p", sk); 1049 BT_DBG("chan %p", chan);
958 1050
959 bh_lock_sock(sk); 1051 bh_lock_sock(sk);
960 l2cap_pi(sk)->retry_count = 1; 1052 chan->retry_count = 1;
961 __mod_monitor_timer(); 1053 __mod_monitor_timer();
962 1054
963 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F; 1055 chan->conn_state |= L2CAP_CONN_WAIT_F;
964 1056
965 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL); 1057 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
966 bh_unlock_sock(sk); 1058 bh_unlock_sock(sk);
967} 1059}
968 1060
969static void l2cap_drop_acked_frames(struct sock *sk) 1061static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
970{ 1062{
971 struct sk_buff *skb; 1063 struct sk_buff *skb;
972 1064
973 while ((skb = skb_peek(TX_QUEUE(sk))) && 1065 while ((skb = skb_peek(&chan->tx_q)) &&
974 l2cap_pi(sk)->unacked_frames) { 1066 chan->unacked_frames) {
975 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq) 1067 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
976 break; 1068 break;
977 1069
978 skb = skb_dequeue(TX_QUEUE(sk)); 1070 skb = skb_dequeue(&chan->tx_q);
979 kfree_skb(skb); 1071 kfree_skb(skb);
980 1072
981 l2cap_pi(sk)->unacked_frames--; 1073 chan->unacked_frames--;
982 } 1074 }
983 1075
984 if (!l2cap_pi(sk)->unacked_frames) 1076 if (!chan->unacked_frames)
985 del_timer(&l2cap_pi(sk)->retrans_timer); 1077 del_timer(&chan->retrans_timer);
986} 1078}
987 1079
988void l2cap_do_send(struct sock *sk, struct sk_buff *skb) 1080void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
989{ 1081{
990 struct l2cap_pinfo *pi = l2cap_pi(sk); 1082 struct hci_conn *hcon = chan->conn->hcon;
991 struct hci_conn *hcon = pi->conn->hcon;
992 u16 flags; 1083 u16 flags;
993 1084
994 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len); 1085 BT_DBG("chan %p, skb %p len %d", chan, skb, skb->len);
995 1086
996 if (!pi->flushable && lmp_no_flush_capable(hcon->hdev)) 1087 if (!chan->flushable && lmp_no_flush_capable(hcon->hdev))
997 flags = ACL_START_NO_FLUSH; 1088 flags = ACL_START_NO_FLUSH;
998 else 1089 else
999 flags = ACL_START; 1090 flags = ACL_START;
@@ -1001,35 +1092,33 @@ void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1001 hci_send_acl(hcon, skb, flags); 1092 hci_send_acl(hcon, skb, flags);
1002} 1093}
1003 1094
1004void l2cap_streaming_send(struct sock *sk) 1095void l2cap_streaming_send(struct l2cap_chan *chan)
1005{ 1096{
1006 struct sk_buff *skb; 1097 struct sk_buff *skb;
1007 struct l2cap_pinfo *pi = l2cap_pi(sk);
1008 u16 control, fcs; 1098 u16 control, fcs;
1009 1099
1010 while ((skb = skb_dequeue(TX_QUEUE(sk)))) { 1100 while ((skb = skb_dequeue(&chan->tx_q))) {
1011 control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE); 1101 control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
1012 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT; 1102 control |= chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1013 put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE); 1103 put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
1014 1104
1015 if (pi->fcs == L2CAP_FCS_CRC16) { 1105 if (chan->fcs == L2CAP_FCS_CRC16) {
1016 fcs = crc16(0, (u8 *)skb->data, skb->len - 2); 1106 fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
1017 put_unaligned_le16(fcs, skb->data + skb->len - 2); 1107 put_unaligned_le16(fcs, skb->data + skb->len - 2);
1018 } 1108 }
1019 1109
1020 l2cap_do_send(sk, skb); 1110 l2cap_do_send(chan, skb);
1021 1111
1022 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64; 1112 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1023 } 1113 }
1024} 1114}
1025 1115
1026static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq) 1116static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u8 tx_seq)
1027{ 1117{
1028 struct l2cap_pinfo *pi = l2cap_pi(sk);
1029 struct sk_buff *skb, *tx_skb; 1118 struct sk_buff *skb, *tx_skb;
1030 u16 control, fcs; 1119 u16 control, fcs;
1031 1120
1032 skb = skb_peek(TX_QUEUE(sk)); 1121 skb = skb_peek(&chan->tx_q);
1033 if (!skb) 1122 if (!skb)
1034 return; 1123 return;
1035 1124
@@ -1037,14 +1126,14 @@ static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
1037 if (bt_cb(skb)->tx_seq == tx_seq) 1126 if (bt_cb(skb)->tx_seq == tx_seq)
1038 break; 1127 break;
1039 1128
1040 if (skb_queue_is_last(TX_QUEUE(sk), skb)) 1129 if (skb_queue_is_last(&chan->tx_q, skb))
1041 return; 1130 return;
1042 1131
1043 } while ((skb = skb_queue_next(TX_QUEUE(sk), skb))); 1132 } while ((skb = skb_queue_next(&chan->tx_q, skb)));
1044 1133
1045 if (pi->remote_max_tx && 1134 if (chan->remote_max_tx &&
1046 bt_cb(skb)->retries == pi->remote_max_tx) { 1135 bt_cb(skb)->retries == chan->remote_max_tx) {
1047 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED); 1136 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1048 return; 1137 return;
1049 } 1138 }
1050 1139
@@ -1053,39 +1142,39 @@ static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
1053 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE); 1142 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1054 control &= L2CAP_CTRL_SAR; 1143 control &= L2CAP_CTRL_SAR;
1055 1144
1056 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) { 1145 if (chan->conn_state & L2CAP_CONN_SEND_FBIT) {
1057 control |= L2CAP_CTRL_FINAL; 1146 control |= L2CAP_CTRL_FINAL;
1058 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT; 1147 chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1059 } 1148 }
1060 1149
1061 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT) 1150 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1062 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT); 1151 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1063 1152
1064 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE); 1153 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1065 1154
1066 if (pi->fcs == L2CAP_FCS_CRC16) { 1155 if (chan->fcs == L2CAP_FCS_CRC16) {
1067 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2); 1156 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1068 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2); 1157 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1069 } 1158 }
1070 1159
1071 l2cap_do_send(sk, tx_skb); 1160 l2cap_do_send(chan, tx_skb);
1072} 1161}
1073 1162
1074int l2cap_ertm_send(struct sock *sk) 1163int l2cap_ertm_send(struct l2cap_chan *chan)
1075{ 1164{
1076 struct sk_buff *skb, *tx_skb; 1165 struct sk_buff *skb, *tx_skb;
1077 struct l2cap_pinfo *pi = l2cap_pi(sk); 1166 struct sock *sk = chan->sk;
1078 u16 control, fcs; 1167 u16 control, fcs;
1079 int nsent = 0; 1168 int nsent = 0;
1080 1169
1081 if (sk->sk_state != BT_CONNECTED) 1170 if (sk->sk_state != BT_CONNECTED)
1082 return -ENOTCONN; 1171 return -ENOTCONN;
1083 1172
1084 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk))) { 1173 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1085 1174
1086 if (pi->remote_max_tx && 1175 if (chan->remote_max_tx &&
1087 bt_cb(skb)->retries == pi->remote_max_tx) { 1176 bt_cb(skb)->retries == chan->remote_max_tx) {
1088 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED); 1177 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1089 break; 1178 break;
1090 } 1179 }
1091 1180
@@ -1096,36 +1185,36 @@ int l2cap_ertm_send(struct sock *sk)
1096 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE); 1185 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1097 control &= L2CAP_CTRL_SAR; 1186 control &= L2CAP_CTRL_SAR;
1098 1187
1099 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) { 1188 if (chan->conn_state & L2CAP_CONN_SEND_FBIT) {
1100 control |= L2CAP_CTRL_FINAL; 1189 control |= L2CAP_CTRL_FINAL;
1101 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT; 1190 chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1102 } 1191 }
1103 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT) 1192 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1104 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT); 1193 | (chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1105 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE); 1194 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1106 1195
1107 1196
1108 if (pi->fcs == L2CAP_FCS_CRC16) { 1197 if (chan->fcs == L2CAP_FCS_CRC16) {
1109 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2); 1198 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1110 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2); 1199 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1111 } 1200 }
1112 1201
1113 l2cap_do_send(sk, tx_skb); 1202 l2cap_do_send(chan, tx_skb);
1114 1203
1115 __mod_retrans_timer(); 1204 __mod_retrans_timer();
1116 1205
1117 bt_cb(skb)->tx_seq = pi->next_tx_seq; 1206 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1118 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64; 1207 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1119 1208
1120 if (bt_cb(skb)->retries == 1) 1209 if (bt_cb(skb)->retries == 1)
1121 pi->unacked_frames++; 1210 chan->unacked_frames++;
1122 1211
1123 pi->frames_sent++; 1212 chan->frames_sent++;
1124 1213
1125 if (skb_queue_is_last(TX_QUEUE(sk), skb)) 1214 if (skb_queue_is_last(&chan->tx_q, skb))
1126 sk->sk_send_head = NULL; 1215 chan->tx_send_head = NULL;
1127 else 1216 else
1128 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb); 1217 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1129 1218
1130 nsent++; 1219 nsent++;
1131 } 1220 }
@@ -1133,41 +1222,39 @@ int l2cap_ertm_send(struct sock *sk)
1133 return nsent; 1222 return nsent;
1134} 1223}
1135 1224
1136static int l2cap_retransmit_frames(struct sock *sk) 1225static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1137{ 1226{
1138 struct l2cap_pinfo *pi = l2cap_pi(sk);
1139 int ret; 1227 int ret;
1140 1228
1141 if (!skb_queue_empty(TX_QUEUE(sk))) 1229 if (!skb_queue_empty(&chan->tx_q))
1142 sk->sk_send_head = TX_QUEUE(sk)->next; 1230 chan->tx_send_head = chan->tx_q.next;
1143 1231
1144 pi->next_tx_seq = pi->expected_ack_seq; 1232 chan->next_tx_seq = chan->expected_ack_seq;
1145 ret = l2cap_ertm_send(sk); 1233 ret = l2cap_ertm_send(chan);
1146 return ret; 1234 return ret;
1147} 1235}
1148 1236
1149static void l2cap_send_ack(struct l2cap_pinfo *pi) 1237static void l2cap_send_ack(struct l2cap_chan *chan)
1150{ 1238{
1151 struct sock *sk = (struct sock *)pi;
1152 u16 control = 0; 1239 u16 control = 0;
1153 1240
1154 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT; 1241 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1155 1242
1156 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) { 1243 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1157 control |= L2CAP_SUPER_RCV_NOT_READY; 1244 control |= L2CAP_SUPER_RCV_NOT_READY;
1158 pi->conn_state |= L2CAP_CONN_RNR_SENT; 1245 chan->conn_state |= L2CAP_CONN_RNR_SENT;
1159 l2cap_send_sframe(pi, control); 1246 l2cap_send_sframe(chan, control);
1160 return; 1247 return;
1161 } 1248 }
1162 1249
1163 if (l2cap_ertm_send(sk) > 0) 1250 if (l2cap_ertm_send(chan) > 0)
1164 return; 1251 return;
1165 1252
1166 control |= L2CAP_SUPER_RCV_READY; 1253 control |= L2CAP_SUPER_RCV_READY;
1167 l2cap_send_sframe(pi, control); 1254 l2cap_send_sframe(chan, control);
1168} 1255}
1169 1256
1170static void l2cap_send_srejtail(struct sock *sk) 1257static void l2cap_send_srejtail(struct l2cap_chan *chan)
1171{ 1258{
1172 struct srej_list *tail; 1259 struct srej_list *tail;
1173 u16 control; 1260 u16 control;
@@ -1175,15 +1262,15 @@ static void l2cap_send_srejtail(struct sock *sk)
1175 control = L2CAP_SUPER_SELECT_REJECT; 1262 control = L2CAP_SUPER_SELECT_REJECT;
1176 control |= L2CAP_CTRL_FINAL; 1263 control |= L2CAP_CTRL_FINAL;
1177 1264
1178 tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list); 1265 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1179 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT; 1266 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1180 1267
1181 l2cap_send_sframe(l2cap_pi(sk), control); 1268 l2cap_send_sframe(chan, control);
1182} 1269}
1183 1270
1184static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb) 1271static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1185{ 1272{
1186 struct l2cap_conn *conn = l2cap_pi(sk)->conn; 1273 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
1187 struct sk_buff **frag; 1274 struct sk_buff **frag;
1188 int err, sent = 0; 1275 int err, sent = 0;
1189 1276
@@ -1213,9 +1300,10 @@ static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, in
1213 return sent; 1300 return sent;
1214} 1301}
1215 1302
1216struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len) 1303struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1217{ 1304{
1218 struct l2cap_conn *conn = l2cap_pi(sk)->conn; 1305 struct sock *sk = chan->sk;
1306 struct l2cap_conn *conn = chan->conn;
1219 struct sk_buff *skb; 1307 struct sk_buff *skb;
1220 int err, count, hlen = L2CAP_HDR_SIZE + 2; 1308 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1221 struct l2cap_hdr *lh; 1309 struct l2cap_hdr *lh;
@@ -1230,9 +1318,9 @@ struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, s
1230 1318
1231 /* Create L2CAP header */ 1319 /* Create L2CAP header */
1232 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); 1320 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1233 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid); 1321 lh->cid = cpu_to_le16(chan->dcid);
1234 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE)); 1322 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1235 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2)); 1323 put_unaligned_le16(chan->psm, skb_put(skb, 2));
1236 1324
1237 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb); 1325 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1238 if (unlikely(err < 0)) { 1326 if (unlikely(err < 0)) {
@@ -1242,9 +1330,10 @@ struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, s
1242 return skb; 1330 return skb;
1243} 1331}
1244 1332
1245struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len) 1333struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1246{ 1334{
1247 struct l2cap_conn *conn = l2cap_pi(sk)->conn; 1335 struct sock *sk = chan->sk;
1336 struct l2cap_conn *conn = chan->conn;
1248 struct sk_buff *skb; 1337 struct sk_buff *skb;
1249 int err, count, hlen = L2CAP_HDR_SIZE; 1338 int err, count, hlen = L2CAP_HDR_SIZE;
1250 struct l2cap_hdr *lh; 1339 struct l2cap_hdr *lh;
@@ -1259,7 +1348,7 @@ struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size
1259 1348
1260 /* Create L2CAP header */ 1349 /* Create L2CAP header */
1261 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); 1350 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1262 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid); 1351 lh->cid = cpu_to_le16(chan->dcid);
1263 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE)); 1352 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1264 1353
1265 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb); 1354 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
@@ -1270,9 +1359,10 @@ struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size
1270 return skb; 1359 return skb;
1271} 1360}
1272 1361
1273struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen) 1362struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1274{ 1363{
1275 struct l2cap_conn *conn = l2cap_pi(sk)->conn; 1364 struct sock *sk = chan->sk;
1365 struct l2cap_conn *conn = chan->conn;
1276 struct sk_buff *skb; 1366 struct sk_buff *skb;
1277 int err, count, hlen = L2CAP_HDR_SIZE + 2; 1367 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1278 struct l2cap_hdr *lh; 1368 struct l2cap_hdr *lh;
@@ -1285,7 +1375,7 @@ struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, siz
1285 if (sdulen) 1375 if (sdulen)
1286 hlen += 2; 1376 hlen += 2;
1287 1377
1288 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) 1378 if (chan->fcs == L2CAP_FCS_CRC16)
1289 hlen += 2; 1379 hlen += 2;
1290 1380
1291 count = min_t(unsigned int, (conn->mtu - hlen), len); 1381 count = min_t(unsigned int, (conn->mtu - hlen), len);
@@ -1296,7 +1386,7 @@ struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, siz
1296 1386
1297 /* Create L2CAP header */ 1387 /* Create L2CAP header */
1298 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); 1388 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1299 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid); 1389 lh->cid = cpu_to_le16(chan->dcid);
1300 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE)); 1390 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1301 put_unaligned_le16(control, skb_put(skb, 2)); 1391 put_unaligned_le16(control, skb_put(skb, 2));
1302 if (sdulen) 1392 if (sdulen)
@@ -1308,16 +1398,15 @@ struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, siz
1308 return ERR_PTR(err); 1398 return ERR_PTR(err);
1309 } 1399 }
1310 1400
1311 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) 1401 if (chan->fcs == L2CAP_FCS_CRC16)
1312 put_unaligned_le16(0, skb_put(skb, 2)); 1402 put_unaligned_le16(0, skb_put(skb, 2));
1313 1403
1314 bt_cb(skb)->retries = 0; 1404 bt_cb(skb)->retries = 0;
1315 return skb; 1405 return skb;
1316} 1406}
1317 1407
1318int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len) 1408int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1319{ 1409{
1320 struct l2cap_pinfo *pi = l2cap_pi(sk);
1321 struct sk_buff *skb; 1410 struct sk_buff *skb;
1322 struct sk_buff_head sar_queue; 1411 struct sk_buff_head sar_queue;
1323 u16 control; 1412 u16 control;
@@ -1325,26 +1414,26 @@ int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1325 1414
1326 skb_queue_head_init(&sar_queue); 1415 skb_queue_head_init(&sar_queue);
1327 control = L2CAP_SDU_START; 1416 control = L2CAP_SDU_START;
1328 skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len); 1417 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1329 if (IS_ERR(skb)) 1418 if (IS_ERR(skb))
1330 return PTR_ERR(skb); 1419 return PTR_ERR(skb);
1331 1420
1332 __skb_queue_tail(&sar_queue, skb); 1421 __skb_queue_tail(&sar_queue, skb);
1333 len -= pi->remote_mps; 1422 len -= chan->remote_mps;
1334 size += pi->remote_mps; 1423 size += chan->remote_mps;
1335 1424
1336 while (len > 0) { 1425 while (len > 0) {
1337 size_t buflen; 1426 size_t buflen;
1338 1427
1339 if (len > pi->remote_mps) { 1428 if (len > chan->remote_mps) {
1340 control = L2CAP_SDU_CONTINUE; 1429 control = L2CAP_SDU_CONTINUE;
1341 buflen = pi->remote_mps; 1430 buflen = chan->remote_mps;
1342 } else { 1431 } else {
1343 control = L2CAP_SDU_END; 1432 control = L2CAP_SDU_END;
1344 buflen = len; 1433 buflen = len;
1345 } 1434 }
1346 1435
1347 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0); 1436 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1348 if (IS_ERR(skb)) { 1437 if (IS_ERR(skb)) {
1349 skb_queue_purge(&sar_queue); 1438 skb_queue_purge(&sar_queue);
1350 return PTR_ERR(skb); 1439 return PTR_ERR(skb);
@@ -1354,9 +1443,9 @@ int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1354 len -= buflen; 1443 len -= buflen;
1355 size += buflen; 1444 size += buflen;
1356 } 1445 }
1357 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk)); 1446 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1358 if (sk->sk_send_head == NULL) 1447 if (chan->tx_send_head == NULL)
1359 sk->sk_send_head = sar_queue.next; 1448 chan->tx_send_head = sar_queue.next;
1360 1449
1361 return size; 1450 return size;
1362} 1451}
@@ -1364,10 +1453,11 @@ int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1364static void l2cap_chan_ready(struct sock *sk) 1453static void l2cap_chan_ready(struct sock *sk)
1365{ 1454{
1366 struct sock *parent = bt_sk(sk)->parent; 1455 struct sock *parent = bt_sk(sk)->parent;
1456 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1367 1457
1368 BT_DBG("sk %p, parent %p", sk, parent); 1458 BT_DBG("sk %p, parent %p", sk, parent);
1369 1459
1370 l2cap_pi(sk)->conf_state = 0; 1460 chan->conf_state = 0;
1371 l2cap_sock_clear_timer(sk); 1461 l2cap_sock_clear_timer(sk);
1372 1462
1373 if (!parent) { 1463 if (!parent) {
@@ -1387,14 +1477,14 @@ static void l2cap_chan_ready(struct sock *sk)
1387/* Copy frame to all raw sockets on that connection */ 1477/* Copy frame to all raw sockets on that connection */
1388static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb) 1478static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1389{ 1479{
1390 struct l2cap_chan_list *l = &conn->chan_list;
1391 struct sk_buff *nskb; 1480 struct sk_buff *nskb;
1392 struct sock *sk; 1481 struct l2cap_chan *chan;
1393 1482
1394 BT_DBG("conn %p", conn); 1483 BT_DBG("conn %p", conn);
1395 1484
1396 read_lock(&l->lock); 1485 read_lock(&conn->chan_lock);
1397 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) { 1486 list_for_each_entry(chan, &conn->chan_l, list) {
1487 struct sock *sk = chan->sk;
1398 if (sk->sk_type != SOCK_RAW) 1488 if (sk->sk_type != SOCK_RAW)
1399 continue; 1489 continue;
1400 1490
@@ -1408,7 +1498,7 @@ static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1408 if (sock_queue_rcv_skb(sk, nskb)) 1498 if (sock_queue_rcv_skb(sk, nskb))
1409 kfree_skb(nskb); 1499 kfree_skb(nskb);
1410 } 1500 }
1411 read_unlock(&l->lock); 1501 read_unlock(&conn->chan_lock);
1412} 1502}
1413 1503
1414/* ---- L2CAP signalling commands ---- */ 1504/* ---- L2CAP signalling commands ---- */
@@ -1540,32 +1630,35 @@ static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1540 1630
1541static void l2cap_ack_timeout(unsigned long arg) 1631static void l2cap_ack_timeout(unsigned long arg)
1542{ 1632{
1543 struct sock *sk = (void *) arg; 1633 struct l2cap_chan *chan = (void *) arg;
1544 1634
1545 bh_lock_sock(sk); 1635 bh_lock_sock(chan->sk);
1546 l2cap_send_ack(l2cap_pi(sk)); 1636 l2cap_send_ack(chan);
1547 bh_unlock_sock(sk); 1637 bh_unlock_sock(chan->sk);
1548} 1638}
1549 1639
1550static inline void l2cap_ertm_init(struct sock *sk) 1640static inline void l2cap_ertm_init(struct l2cap_chan *chan)
1551{ 1641{
1552 l2cap_pi(sk)->expected_ack_seq = 0; 1642 struct sock *sk = chan->sk;
1553 l2cap_pi(sk)->unacked_frames = 0; 1643
1554 l2cap_pi(sk)->buffer_seq = 0; 1644 chan->expected_ack_seq = 0;
1555 l2cap_pi(sk)->num_acked = 0; 1645 chan->unacked_frames = 0;
1556 l2cap_pi(sk)->frames_sent = 0; 1646 chan->buffer_seq = 0;
1647 chan->num_acked = 0;
1648 chan->frames_sent = 0;
1557 1649
1558 setup_timer(&l2cap_pi(sk)->retrans_timer, 1650 setup_timer(&chan->retrans_timer, l2cap_retrans_timeout,
1559 l2cap_retrans_timeout, (unsigned long) sk); 1651 (unsigned long) chan);
1560 setup_timer(&l2cap_pi(sk)->monitor_timer, 1652 setup_timer(&chan->monitor_timer, l2cap_monitor_timeout,
1561 l2cap_monitor_timeout, (unsigned long) sk); 1653 (unsigned long) chan);
1562 setup_timer(&l2cap_pi(sk)->ack_timer, 1654 setup_timer(&chan->ack_timer, l2cap_ack_timeout, (unsigned long) chan);
1563 l2cap_ack_timeout, (unsigned long) sk);
1564 1655
1565 __skb_queue_head_init(SREJ_QUEUE(sk)); 1656 skb_queue_head_init(&chan->srej_q);
1566 __skb_queue_head_init(BUSY_QUEUE(sk)); 1657 skb_queue_head_init(&chan->busy_q);
1567 1658
1568 INIT_WORK(&l2cap_pi(sk)->busy_work, l2cap_busy_work); 1659 INIT_LIST_HEAD(&chan->srej_l);
1660
1661 INIT_WORK(&chan->busy_work, l2cap_busy_work);
1569 1662
1570 sk->sk_backlog_rcv = l2cap_ertm_data_rcv; 1663 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
1571} 1664}
@@ -1583,38 +1676,37 @@ static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
1583 } 1676 }
1584} 1677}
1585 1678
1586int l2cap_build_conf_req(struct sock *sk, void *data) 1679static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
1587{ 1680{
1588 struct l2cap_pinfo *pi = l2cap_pi(sk);
1589 struct l2cap_conf_req *req = data; 1681 struct l2cap_conf_req *req = data;
1590 struct l2cap_conf_rfc rfc = { .mode = pi->mode }; 1682 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
1591 void *ptr = req->data; 1683 void *ptr = req->data;
1592 1684
1593 BT_DBG("sk %p", sk); 1685 BT_DBG("chan %p", chan);
1594 1686
1595 if (pi->num_conf_req || pi->num_conf_rsp) 1687 if (chan->num_conf_req || chan->num_conf_rsp)
1596 goto done; 1688 goto done;
1597 1689
1598 switch (pi->mode) { 1690 switch (chan->mode) {
1599 case L2CAP_MODE_STREAMING: 1691 case L2CAP_MODE_STREAMING:
1600 case L2CAP_MODE_ERTM: 1692 case L2CAP_MODE_ERTM:
1601 if (pi->conf_state & L2CAP_CONF_STATE2_DEVICE) 1693 if (chan->conf_state & L2CAP_CONF_STATE2_DEVICE)
1602 break; 1694 break;
1603 1695
1604 /* fall through */ 1696 /* fall through */
1605 default: 1697 default:
1606 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask); 1698 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
1607 break; 1699 break;
1608 } 1700 }
1609 1701
1610done: 1702done:
1611 if (pi->imtu != L2CAP_DEFAULT_MTU) 1703 if (chan->imtu != L2CAP_DEFAULT_MTU)
1612 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu); 1704 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
1613 1705
1614 switch (pi->mode) { 1706 switch (chan->mode) {
1615 case L2CAP_MODE_BASIC: 1707 case L2CAP_MODE_BASIC:
1616 if (!(pi->conn->feat_mask & L2CAP_FEAT_ERTM) && 1708 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
1617 !(pi->conn->feat_mask & L2CAP_FEAT_STREAMING)) 1709 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
1618 break; 1710 break;
1619 1711
1620 rfc.mode = L2CAP_MODE_BASIC; 1712 rfc.mode = L2CAP_MODE_BASIC;
@@ -1630,24 +1722,24 @@ done:
1630 1722
1631 case L2CAP_MODE_ERTM: 1723 case L2CAP_MODE_ERTM:
1632 rfc.mode = L2CAP_MODE_ERTM; 1724 rfc.mode = L2CAP_MODE_ERTM;
1633 rfc.txwin_size = pi->tx_win; 1725 rfc.txwin_size = chan->tx_win;
1634 rfc.max_transmit = pi->max_tx; 1726 rfc.max_transmit = chan->max_tx;
1635 rfc.retrans_timeout = 0; 1727 rfc.retrans_timeout = 0;
1636 rfc.monitor_timeout = 0; 1728 rfc.monitor_timeout = 0;
1637 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE); 1729 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1638 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10) 1730 if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
1639 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10); 1731 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1640 1732
1641 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), 1733 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1642 (unsigned long) &rfc); 1734 (unsigned long) &rfc);
1643 1735
1644 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS)) 1736 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
1645 break; 1737 break;
1646 1738
1647 if (pi->fcs == L2CAP_FCS_NONE || 1739 if (chan->fcs == L2CAP_FCS_NONE ||
1648 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) { 1740 chan->conf_state & L2CAP_CONF_NO_FCS_RECV) {
1649 pi->fcs = L2CAP_FCS_NONE; 1741 chan->fcs = L2CAP_FCS_NONE;
1650 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs); 1742 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
1651 } 1743 }
1652 break; 1744 break;
1653 1745
@@ -1658,43 +1750,42 @@ done:
1658 rfc.retrans_timeout = 0; 1750 rfc.retrans_timeout = 0;
1659 rfc.monitor_timeout = 0; 1751 rfc.monitor_timeout = 0;
1660 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE); 1752 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1661 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10) 1753 if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
1662 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10); 1754 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1663 1755
1664 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), 1756 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1665 (unsigned long) &rfc); 1757 (unsigned long) &rfc);
1666 1758
1667 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS)) 1759 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
1668 break; 1760 break;
1669 1761
1670 if (pi->fcs == L2CAP_FCS_NONE || 1762 if (chan->fcs == L2CAP_FCS_NONE ||
1671 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) { 1763 chan->conf_state & L2CAP_CONF_NO_FCS_RECV) {
1672 pi->fcs = L2CAP_FCS_NONE; 1764 chan->fcs = L2CAP_FCS_NONE;
1673 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs); 1765 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
1674 } 1766 }
1675 break; 1767 break;
1676 } 1768 }
1677 1769
1678 req->dcid = cpu_to_le16(pi->dcid); 1770 req->dcid = cpu_to_le16(chan->dcid);
1679 req->flags = cpu_to_le16(0); 1771 req->flags = cpu_to_le16(0);
1680 1772
1681 return ptr - data; 1773 return ptr - data;
1682} 1774}
1683 1775
1684static int l2cap_parse_conf_req(struct sock *sk, void *data) 1776static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
1685{ 1777{
1686 struct l2cap_pinfo *pi = l2cap_pi(sk);
1687 struct l2cap_conf_rsp *rsp = data; 1778 struct l2cap_conf_rsp *rsp = data;
1688 void *ptr = rsp->data; 1779 void *ptr = rsp->data;
1689 void *req = pi->conf_req; 1780 void *req = chan->conf_req;
1690 int len = pi->conf_len; 1781 int len = chan->conf_len;
1691 int type, hint, olen; 1782 int type, hint, olen;
1692 unsigned long val; 1783 unsigned long val;
1693 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC }; 1784 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
1694 u16 mtu = L2CAP_DEFAULT_MTU; 1785 u16 mtu = L2CAP_DEFAULT_MTU;
1695 u16 result = L2CAP_CONF_SUCCESS; 1786 u16 result = L2CAP_CONF_SUCCESS;
1696 1787
1697 BT_DBG("sk %p", sk); 1788 BT_DBG("chan %p", chan);
1698 1789
1699 while (len >= L2CAP_CONF_OPT_SIZE) { 1790 while (len >= L2CAP_CONF_OPT_SIZE) {
1700 len -= l2cap_get_conf_opt(&req, &type, &olen, &val); 1791 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
@@ -1708,7 +1799,7 @@ static int l2cap_parse_conf_req(struct sock *sk, void *data)
1708 break; 1799 break;
1709 1800
1710 case L2CAP_CONF_FLUSH_TO: 1801 case L2CAP_CONF_FLUSH_TO:
1711 pi->flush_to = val; 1802 chan->flush_to = val;
1712 break; 1803 break;
1713 1804
1714 case L2CAP_CONF_QOS: 1805 case L2CAP_CONF_QOS:
@@ -1721,7 +1812,7 @@ static int l2cap_parse_conf_req(struct sock *sk, void *data)
1721 1812
1722 case L2CAP_CONF_FCS: 1813 case L2CAP_CONF_FCS:
1723 if (val == L2CAP_FCS_NONE) 1814 if (val == L2CAP_FCS_NONE)
1724 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV; 1815 chan->conf_state |= L2CAP_CONF_NO_FCS_RECV;
1725 1816
1726 break; 1817 break;
1727 1818
@@ -1735,30 +1826,30 @@ static int l2cap_parse_conf_req(struct sock *sk, void *data)
1735 } 1826 }
1736 } 1827 }
1737 1828
1738 if (pi->num_conf_rsp || pi->num_conf_req > 1) 1829 if (chan->num_conf_rsp || chan->num_conf_req > 1)
1739 goto done; 1830 goto done;
1740 1831
1741 switch (pi->mode) { 1832 switch (chan->mode) {
1742 case L2CAP_MODE_STREAMING: 1833 case L2CAP_MODE_STREAMING:
1743 case L2CAP_MODE_ERTM: 1834 case L2CAP_MODE_ERTM:
1744 if (!(pi->conf_state & L2CAP_CONF_STATE2_DEVICE)) { 1835 if (!(chan->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
1745 pi->mode = l2cap_select_mode(rfc.mode, 1836 chan->mode = l2cap_select_mode(rfc.mode,
1746 pi->conn->feat_mask); 1837 chan->conn->feat_mask);
1747 break; 1838 break;
1748 } 1839 }
1749 1840
1750 if (pi->mode != rfc.mode) 1841 if (chan->mode != rfc.mode)
1751 return -ECONNREFUSED; 1842 return -ECONNREFUSED;
1752 1843
1753 break; 1844 break;
1754 } 1845 }
1755 1846
1756done: 1847done:
1757 if (pi->mode != rfc.mode) { 1848 if (chan->mode != rfc.mode) {
1758 result = L2CAP_CONF_UNACCEPT; 1849 result = L2CAP_CONF_UNACCEPT;
1759 rfc.mode = pi->mode; 1850 rfc.mode = chan->mode;
1760 1851
1761 if (pi->num_conf_rsp == 1) 1852 if (chan->num_conf_rsp == 1)
1762 return -ECONNREFUSED; 1853 return -ECONNREFUSED;
1763 1854
1764 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, 1855 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
@@ -1773,32 +1864,32 @@ done:
1773 if (mtu < L2CAP_DEFAULT_MIN_MTU) 1864 if (mtu < L2CAP_DEFAULT_MIN_MTU)
1774 result = L2CAP_CONF_UNACCEPT; 1865 result = L2CAP_CONF_UNACCEPT;
1775 else { 1866 else {
1776 pi->omtu = mtu; 1867 chan->omtu = mtu;
1777 pi->conf_state |= L2CAP_CONF_MTU_DONE; 1868 chan->conf_state |= L2CAP_CONF_MTU_DONE;
1778 } 1869 }
1779 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu); 1870 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
1780 1871
1781 switch (rfc.mode) { 1872 switch (rfc.mode) {
1782 case L2CAP_MODE_BASIC: 1873 case L2CAP_MODE_BASIC:
1783 pi->fcs = L2CAP_FCS_NONE; 1874 chan->fcs = L2CAP_FCS_NONE;
1784 pi->conf_state |= L2CAP_CONF_MODE_DONE; 1875 chan->conf_state |= L2CAP_CONF_MODE_DONE;
1785 break; 1876 break;
1786 1877
1787 case L2CAP_MODE_ERTM: 1878 case L2CAP_MODE_ERTM:
1788 pi->remote_tx_win = rfc.txwin_size; 1879 chan->remote_tx_win = rfc.txwin_size;
1789 pi->remote_max_tx = rfc.max_transmit; 1880 chan->remote_max_tx = rfc.max_transmit;
1790 1881
1791 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10) 1882 if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
1792 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10); 1883 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1793 1884
1794 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size); 1885 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
1795 1886
1796 rfc.retrans_timeout = 1887 rfc.retrans_timeout =
1797 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO); 1888 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
1798 rfc.monitor_timeout = 1889 rfc.monitor_timeout =
1799 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO); 1890 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
1800 1891
1801 pi->conf_state |= L2CAP_CONF_MODE_DONE; 1892 chan->conf_state |= L2CAP_CONF_MODE_DONE;
1802 1893
1803 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, 1894 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1804 sizeof(rfc), (unsigned long) &rfc); 1895 sizeof(rfc), (unsigned long) &rfc);
@@ -1806,12 +1897,12 @@ done:
1806 break; 1897 break;
1807 1898
1808 case L2CAP_MODE_STREAMING: 1899 case L2CAP_MODE_STREAMING:
1809 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10) 1900 if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
1810 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10); 1901 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1811 1902
1812 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size); 1903 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
1813 1904
1814 pi->conf_state |= L2CAP_CONF_MODE_DONE; 1905 chan->conf_state |= L2CAP_CONF_MODE_DONE;
1815 1906
1816 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, 1907 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1817 sizeof(rfc), (unsigned long) &rfc); 1908 sizeof(rfc), (unsigned long) &rfc);
@@ -1822,29 +1913,28 @@ done:
1822 result = L2CAP_CONF_UNACCEPT; 1913 result = L2CAP_CONF_UNACCEPT;
1823 1914
1824 memset(&rfc, 0, sizeof(rfc)); 1915 memset(&rfc, 0, sizeof(rfc));
1825 rfc.mode = pi->mode; 1916 rfc.mode = chan->mode;
1826 } 1917 }
1827 1918
1828 if (result == L2CAP_CONF_SUCCESS) 1919 if (result == L2CAP_CONF_SUCCESS)
1829 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE; 1920 chan->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1830 } 1921 }
1831 rsp->scid = cpu_to_le16(pi->dcid); 1922 rsp->scid = cpu_to_le16(chan->dcid);
1832 rsp->result = cpu_to_le16(result); 1923 rsp->result = cpu_to_le16(result);
1833 rsp->flags = cpu_to_le16(0x0000); 1924 rsp->flags = cpu_to_le16(0x0000);
1834 1925
1835 return ptr - data; 1926 return ptr - data;
1836} 1927}
1837 1928
1838static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result) 1929static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
1839{ 1930{
1840 struct l2cap_pinfo *pi = l2cap_pi(sk);
1841 struct l2cap_conf_req *req = data; 1931 struct l2cap_conf_req *req = data;
1842 void *ptr = req->data; 1932 void *ptr = req->data;
1843 int type, olen; 1933 int type, olen;
1844 unsigned long val; 1934 unsigned long val;
1845 struct l2cap_conf_rfc rfc; 1935 struct l2cap_conf_rfc rfc;
1846 1936
1847 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data); 1937 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
1848 1938
1849 while (len >= L2CAP_CONF_OPT_SIZE) { 1939 while (len >= L2CAP_CONF_OPT_SIZE) {
1850 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val); 1940 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
@@ -1853,27 +1943,27 @@ static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data,
1853 case L2CAP_CONF_MTU: 1943 case L2CAP_CONF_MTU:
1854 if (val < L2CAP_DEFAULT_MIN_MTU) { 1944 if (val < L2CAP_DEFAULT_MIN_MTU) {
1855 *result = L2CAP_CONF_UNACCEPT; 1945 *result = L2CAP_CONF_UNACCEPT;
1856 pi->imtu = L2CAP_DEFAULT_MIN_MTU; 1946 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
1857 } else 1947 } else
1858 pi->imtu = val; 1948 chan->imtu = val;
1859 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu); 1949 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
1860 break; 1950 break;
1861 1951
1862 case L2CAP_CONF_FLUSH_TO: 1952 case L2CAP_CONF_FLUSH_TO:
1863 pi->flush_to = val; 1953 chan->flush_to = val;
1864 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 1954 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
1865 2, pi->flush_to); 1955 2, chan->flush_to);
1866 break; 1956 break;
1867 1957
1868 case L2CAP_CONF_RFC: 1958 case L2CAP_CONF_RFC:
1869 if (olen == sizeof(rfc)) 1959 if (olen == sizeof(rfc))
1870 memcpy(&rfc, (void *)val, olen); 1960 memcpy(&rfc, (void *)val, olen);
1871 1961
1872 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) && 1962 if ((chan->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
1873 rfc.mode != pi->mode) 1963 rfc.mode != chan->mode)
1874 return -ECONNREFUSED; 1964 return -ECONNREFUSED;
1875 1965
1876 pi->fcs = 0; 1966 chan->fcs = 0;
1877 1967
1878 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, 1968 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1879 sizeof(rfc), (unsigned long) &rfc); 1969 sizeof(rfc), (unsigned long) &rfc);
@@ -1881,53 +1971,74 @@ static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data,
1881 } 1971 }
1882 } 1972 }
1883 1973
1884 if (pi->mode == L2CAP_MODE_BASIC && pi->mode != rfc.mode) 1974 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
1885 return -ECONNREFUSED; 1975 return -ECONNREFUSED;
1886 1976
1887 pi->mode = rfc.mode; 1977 chan->mode = rfc.mode;
1888 1978
1889 if (*result == L2CAP_CONF_SUCCESS) { 1979 if (*result == L2CAP_CONF_SUCCESS) {
1890 switch (rfc.mode) { 1980 switch (rfc.mode) {
1891 case L2CAP_MODE_ERTM: 1981 case L2CAP_MODE_ERTM:
1892 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout); 1982 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
1893 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout); 1983 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
1894 pi->mps = le16_to_cpu(rfc.max_pdu_size); 1984 chan->mps = le16_to_cpu(rfc.max_pdu_size);
1895 break; 1985 break;
1896 case L2CAP_MODE_STREAMING: 1986 case L2CAP_MODE_STREAMING:
1897 pi->mps = le16_to_cpu(rfc.max_pdu_size); 1987 chan->mps = le16_to_cpu(rfc.max_pdu_size);
1898 } 1988 }
1899 } 1989 }
1900 1990
1901 req->dcid = cpu_to_le16(pi->dcid); 1991 req->dcid = cpu_to_le16(chan->dcid);
1902 req->flags = cpu_to_le16(0x0000); 1992 req->flags = cpu_to_le16(0x0000);
1903 1993
1904 return ptr - data; 1994 return ptr - data;
1905} 1995}
1906 1996
1907static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags) 1997static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
1908{ 1998{
1909 struct l2cap_conf_rsp *rsp = data; 1999 struct l2cap_conf_rsp *rsp = data;
1910 void *ptr = rsp->data; 2000 void *ptr = rsp->data;
1911 2001
1912 BT_DBG("sk %p", sk); 2002 BT_DBG("chan %p", chan);
1913 2003
1914 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid); 2004 rsp->scid = cpu_to_le16(chan->dcid);
1915 rsp->result = cpu_to_le16(result); 2005 rsp->result = cpu_to_le16(result);
1916 rsp->flags = cpu_to_le16(flags); 2006 rsp->flags = cpu_to_le16(flags);
1917 2007
1918 return ptr - data; 2008 return ptr - data;
1919} 2009}
1920 2010
1921static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len) 2011void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2012{
2013 struct l2cap_conn_rsp rsp;
2014 struct l2cap_conn *conn = chan->conn;
2015 u8 buf[128];
2016
2017 rsp.scid = cpu_to_le16(chan->dcid);
2018 rsp.dcid = cpu_to_le16(chan->scid);
2019 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2020 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2021 l2cap_send_cmd(conn, chan->ident,
2022 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2023
2024 if (chan->conf_state & L2CAP_CONF_REQ_SENT)
2025 return;
2026
2027 chan->conf_state |= L2CAP_CONF_REQ_SENT;
2028 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2029 l2cap_build_conf_req(chan, buf), buf);
2030 chan->num_conf_req++;
2031}
2032
2033static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
1922{ 2034{
1923 struct l2cap_pinfo *pi = l2cap_pi(sk);
1924 int type, olen; 2035 int type, olen;
1925 unsigned long val; 2036 unsigned long val;
1926 struct l2cap_conf_rfc rfc; 2037 struct l2cap_conf_rfc rfc;
1927 2038
1928 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len); 2039 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
1929 2040
1930 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING)) 2041 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
1931 return; 2042 return;
1932 2043
1933 while (len >= L2CAP_CONF_OPT_SIZE) { 2044 while (len >= L2CAP_CONF_OPT_SIZE) {
@@ -1944,12 +2055,12 @@ static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
1944done: 2055done:
1945 switch (rfc.mode) { 2056 switch (rfc.mode) {
1946 case L2CAP_MODE_ERTM: 2057 case L2CAP_MODE_ERTM:
1947 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout); 2058 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
1948 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout); 2059 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
1949 pi->mps = le16_to_cpu(rfc.max_pdu_size); 2060 chan->mps = le16_to_cpu(rfc.max_pdu_size);
1950 break; 2061 break;
1951 case L2CAP_MODE_STREAMING: 2062 case L2CAP_MODE_STREAMING:
1952 pi->mps = le16_to_cpu(rfc.max_pdu_size); 2063 chan->mps = le16_to_cpu(rfc.max_pdu_size);
1953 } 2064 }
1954} 2065}
1955 2066
@@ -1975,9 +2086,9 @@ static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hd
1975 2086
1976static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data) 2087static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1977{ 2088{
1978 struct l2cap_chan_list *list = &conn->chan_list;
1979 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data; 2089 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
1980 struct l2cap_conn_rsp rsp; 2090 struct l2cap_conn_rsp rsp;
2091 struct l2cap_chan *chan = NULL, *pchan;
1981 struct sock *parent, *sk = NULL; 2092 struct sock *parent, *sk = NULL;
1982 int result, status = L2CAP_CS_NO_INFO; 2093 int result, status = L2CAP_CS_NO_INFO;
1983 2094
@@ -1987,12 +2098,14 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
1987 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid); 2098 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
1988 2099
1989 /* Check if we have socket listening on psm */ 2100 /* Check if we have socket listening on psm */
1990 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src); 2101 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
1991 if (!parent) { 2102 if (!pchan) {
1992 result = L2CAP_CR_BAD_PSM; 2103 result = L2CAP_CR_BAD_PSM;
1993 goto sendresp; 2104 goto sendresp;
1994 } 2105 }
1995 2106
2107 parent = pchan->sk;
2108
1996 bh_lock_sock(parent); 2109 bh_lock_sock(parent);
1997 2110
1998 /* Check if the ACL is secure enough (if not SDP) */ 2111 /* Check if the ACL is secure enough (if not SDP) */
@@ -2015,11 +2128,19 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
2015 if (!sk) 2128 if (!sk)
2016 goto response; 2129 goto response;
2017 2130
2018 write_lock_bh(&list->lock); 2131 chan = l2cap_chan_create(sk);
2132 if (!chan) {
2133 l2cap_sock_kill(sk);
2134 goto response;
2135 }
2136
2137 l2cap_pi(sk)->chan = chan;
2138
2139 write_lock_bh(&conn->chan_lock);
2019 2140
2020 /* Check if we already have channel with that dcid */ 2141 /* Check if we already have channel with that dcid */
2021 if (__l2cap_get_chan_by_dcid(list, scid)) { 2142 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2022 write_unlock_bh(&list->lock); 2143 write_unlock_bh(&conn->chan_lock);
2023 sock_set_flag(sk, SOCK_ZAPPED); 2144 sock_set_flag(sk, SOCK_ZAPPED);
2024 l2cap_sock_kill(sk); 2145 l2cap_sock_kill(sk);
2025 goto response; 2146 goto response;
@@ -2030,18 +2151,21 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
2030 l2cap_sock_init(sk, parent); 2151 l2cap_sock_init(sk, parent);
2031 bacpy(&bt_sk(sk)->src, conn->src); 2152 bacpy(&bt_sk(sk)->src, conn->src);
2032 bacpy(&bt_sk(sk)->dst, conn->dst); 2153 bacpy(&bt_sk(sk)->dst, conn->dst);
2033 l2cap_pi(sk)->psm = psm; 2154 chan->psm = psm;
2034 l2cap_pi(sk)->dcid = scid; 2155 chan->dcid = scid;
2156
2157 bt_accept_enqueue(parent, sk);
2158
2159 __l2cap_chan_add(conn, chan);
2035 2160
2036 __l2cap_chan_add(conn, sk, parent); 2161 dcid = chan->scid;
2037 dcid = l2cap_pi(sk)->scid;
2038 2162
2039 l2cap_sock_set_timer(sk, sk->sk_sndtimeo); 2163 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2040 2164
2041 l2cap_pi(sk)->ident = cmd->ident; 2165 chan->ident = cmd->ident;
2042 2166
2043 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) { 2167 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2044 if (l2cap_check_security(sk)) { 2168 if (l2cap_check_security(chan)) {
2045 if (bt_sk(sk)->defer_setup) { 2169 if (bt_sk(sk)->defer_setup) {
2046 sk->sk_state = BT_CONNECT2; 2170 sk->sk_state = BT_CONNECT2;
2047 result = L2CAP_CR_PEND; 2171 result = L2CAP_CR_PEND;
@@ -2063,7 +2187,7 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
2063 status = L2CAP_CS_NO_INFO; 2187 status = L2CAP_CS_NO_INFO;
2064 } 2188 }
2065 2189
2066 write_unlock_bh(&list->lock); 2190 write_unlock_bh(&conn->chan_lock);
2067 2191
2068response: 2192response:
2069 bh_unlock_sock(parent); 2193 bh_unlock_sock(parent);
@@ -2089,13 +2213,13 @@ sendresp:
2089 L2CAP_INFO_REQ, sizeof(info), &info); 2213 L2CAP_INFO_REQ, sizeof(info), &info);
2090 } 2214 }
2091 2215
2092 if (sk && !(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) && 2216 if (chan && !(chan->conf_state & L2CAP_CONF_REQ_SENT) &&
2093 result == L2CAP_CR_SUCCESS) { 2217 result == L2CAP_CR_SUCCESS) {
2094 u8 buf[128]; 2218 u8 buf[128];
2095 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT; 2219 chan->conf_state |= L2CAP_CONF_REQ_SENT;
2096 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, 2220 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2097 l2cap_build_conf_req(sk, buf), buf); 2221 l2cap_build_conf_req(chan, buf), buf);
2098 l2cap_pi(sk)->num_conf_req++; 2222 chan->num_conf_req++;
2099 } 2223 }
2100 2224
2101 return 0; 2225 return 0;
@@ -2105,6 +2229,7 @@ static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hd
2105{ 2229{
2106 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data; 2230 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2107 u16 scid, dcid, result, status; 2231 u16 scid, dcid, result, status;
2232 struct l2cap_chan *chan;
2108 struct sock *sk; 2233 struct sock *sk;
2109 u8 req[128]; 2234 u8 req[128];
2110 2235
@@ -2116,34 +2241,36 @@ static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hd
2116 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status); 2241 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2117 2242
2118 if (scid) { 2243 if (scid) {
2119 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid); 2244 chan = l2cap_get_chan_by_scid(conn, scid);
2120 if (!sk) 2245 if (!chan)
2121 return -EFAULT; 2246 return -EFAULT;
2122 } else { 2247 } else {
2123 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident); 2248 chan = l2cap_get_chan_by_ident(conn, cmd->ident);
2124 if (!sk) 2249 if (!chan)
2125 return -EFAULT; 2250 return -EFAULT;
2126 } 2251 }
2127 2252
2253 sk = chan->sk;
2254
2128 switch (result) { 2255 switch (result) {
2129 case L2CAP_CR_SUCCESS: 2256 case L2CAP_CR_SUCCESS:
2130 sk->sk_state = BT_CONFIG; 2257 sk->sk_state = BT_CONFIG;
2131 l2cap_pi(sk)->ident = 0; 2258 chan->ident = 0;
2132 l2cap_pi(sk)->dcid = dcid; 2259 chan->dcid = dcid;
2133 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND; 2260 chan->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2134 2261
2135 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) 2262 if (chan->conf_state & L2CAP_CONF_REQ_SENT)
2136 break; 2263 break;
2137 2264
2138 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT; 2265 chan->conf_state |= L2CAP_CONF_REQ_SENT;
2139 2266
2140 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, 2267 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2141 l2cap_build_conf_req(sk, req), req); 2268 l2cap_build_conf_req(chan, req), req);
2142 l2cap_pi(sk)->num_conf_req++; 2269 chan->num_conf_req++;
2143 break; 2270 break;
2144 2271
2145 case L2CAP_CR_PEND: 2272 case L2CAP_CR_PEND:
2146 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND; 2273 chan->conf_state |= L2CAP_CONF_CONNECT_PEND;
2147 break; 2274 break;
2148 2275
2149 default: 2276 default:
@@ -2155,7 +2282,7 @@ static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hd
2155 break; 2282 break;
2156 } 2283 }
2157 2284
2158 l2cap_chan_del(sk, ECONNREFUSED); 2285 l2cap_chan_del(chan, ECONNREFUSED);
2159 break; 2286 break;
2160 } 2287 }
2161 2288
@@ -2163,15 +2290,17 @@ static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hd
2163 return 0; 2290 return 0;
2164} 2291}
2165 2292
2166static inline void set_default_fcs(struct l2cap_pinfo *pi) 2293static inline void set_default_fcs(struct l2cap_chan *chan)
2167{ 2294{
2295 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
2296
2168 /* FCS is enabled only in ERTM or streaming mode, if one or both 2297 /* FCS is enabled only in ERTM or streaming mode, if one or both
2169 * sides request it. 2298 * sides request it.
2170 */ 2299 */
2171 if (pi->mode != L2CAP_MODE_ERTM && pi->mode != L2CAP_MODE_STREAMING) 2300 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2172 pi->fcs = L2CAP_FCS_NONE; 2301 chan->fcs = L2CAP_FCS_NONE;
2173 else if (!(pi->conf_state & L2CAP_CONF_NO_FCS_RECV)) 2302 else if (!(pi->chan->conf_state & L2CAP_CONF_NO_FCS_RECV))
2174 pi->fcs = L2CAP_FCS_CRC16; 2303 chan->fcs = L2CAP_FCS_CRC16;
2175} 2304}
2176 2305
2177static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data) 2306static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
@@ -2179,6 +2308,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
2179 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data; 2308 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2180 u16 dcid, flags; 2309 u16 dcid, flags;
2181 u8 rsp[64]; 2310 u8 rsp[64];
2311 struct l2cap_chan *chan;
2182 struct sock *sk; 2312 struct sock *sk;
2183 int len; 2313 int len;
2184 2314
@@ -2187,10 +2317,12 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
2187 2317
2188 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags); 2318 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2189 2319
2190 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid); 2320 chan = l2cap_get_chan_by_scid(conn, dcid);
2191 if (!sk) 2321 if (!chan)
2192 return -ENOENT; 2322 return -ENOENT;
2193 2323
2324 sk = chan->sk;
2325
2194 if (sk->sk_state != BT_CONFIG) { 2326 if (sk->sk_state != BT_CONFIG) {
2195 struct l2cap_cmd_rej rej; 2327 struct l2cap_cmd_rej rej;
2196 2328
@@ -2202,62 +2334,62 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
2202 2334
2203 /* Reject if config buffer is too small. */ 2335 /* Reject if config buffer is too small. */
2204 len = cmd_len - sizeof(*req); 2336 len = cmd_len - sizeof(*req);
2205 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) { 2337 if (chan->conf_len + len > sizeof(chan->conf_req)) {
2206 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, 2338 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2207 l2cap_build_conf_rsp(sk, rsp, 2339 l2cap_build_conf_rsp(chan, rsp,
2208 L2CAP_CONF_REJECT, flags), rsp); 2340 L2CAP_CONF_REJECT, flags), rsp);
2209 goto unlock; 2341 goto unlock;
2210 } 2342 }
2211 2343
2212 /* Store config. */ 2344 /* Store config. */
2213 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len); 2345 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2214 l2cap_pi(sk)->conf_len += len; 2346 chan->conf_len += len;
2215 2347
2216 if (flags & 0x0001) { 2348 if (flags & 0x0001) {
2217 /* Incomplete config. Send empty response. */ 2349 /* Incomplete config. Send empty response. */
2218 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, 2350 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2219 l2cap_build_conf_rsp(sk, rsp, 2351 l2cap_build_conf_rsp(chan, rsp,
2220 L2CAP_CONF_SUCCESS, 0x0001), rsp); 2352 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2221 goto unlock; 2353 goto unlock;
2222 } 2354 }
2223 2355
2224 /* Complete config. */ 2356 /* Complete config. */
2225 len = l2cap_parse_conf_req(sk, rsp); 2357 len = l2cap_parse_conf_req(chan, rsp);
2226 if (len < 0) { 2358 if (len < 0) {
2227 l2cap_send_disconn_req(conn, sk, ECONNRESET); 2359 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2228 goto unlock; 2360 goto unlock;
2229 } 2361 }
2230 2362
2231 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp); 2363 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2232 l2cap_pi(sk)->num_conf_rsp++; 2364 chan->num_conf_rsp++;
2233 2365
2234 /* Reset config buffer. */ 2366 /* Reset config buffer. */
2235 l2cap_pi(sk)->conf_len = 0; 2367 chan->conf_len = 0;
2236 2368
2237 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE)) 2369 if (!(chan->conf_state & L2CAP_CONF_OUTPUT_DONE))
2238 goto unlock; 2370 goto unlock;
2239 2371
2240 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) { 2372 if (chan->conf_state & L2CAP_CONF_INPUT_DONE) {
2241 set_default_fcs(l2cap_pi(sk)); 2373 set_default_fcs(chan);
2242 2374
2243 sk->sk_state = BT_CONNECTED; 2375 sk->sk_state = BT_CONNECTED;
2244 2376
2245 l2cap_pi(sk)->next_tx_seq = 0; 2377 chan->next_tx_seq = 0;
2246 l2cap_pi(sk)->expected_tx_seq = 0; 2378 chan->expected_tx_seq = 0;
2247 __skb_queue_head_init(TX_QUEUE(sk)); 2379 skb_queue_head_init(&chan->tx_q);
2248 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) 2380 if (chan->mode == L2CAP_MODE_ERTM)
2249 l2cap_ertm_init(sk); 2381 l2cap_ertm_init(chan);
2250 2382
2251 l2cap_chan_ready(sk); 2383 l2cap_chan_ready(sk);
2252 goto unlock; 2384 goto unlock;
2253 } 2385 }
2254 2386
2255 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) { 2387 if (!(chan->conf_state & L2CAP_CONF_REQ_SENT)) {
2256 u8 buf[64]; 2388 u8 buf[64];
2257 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT; 2389 chan->conf_state |= L2CAP_CONF_REQ_SENT;
2258 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, 2390 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2259 l2cap_build_conf_req(sk, buf), buf); 2391 l2cap_build_conf_req(chan, buf), buf);
2260 l2cap_pi(sk)->num_conf_req++; 2392 chan->num_conf_req++;
2261 } 2393 }
2262 2394
2263unlock: 2395unlock:
@@ -2269,6 +2401,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
2269{ 2401{
2270 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data; 2402 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2271 u16 scid, flags, result; 2403 u16 scid, flags, result;
2404 struct l2cap_chan *chan;
2272 struct sock *sk; 2405 struct sock *sk;
2273 int len = cmd->len - sizeof(*rsp); 2406 int len = cmd->len - sizeof(*rsp);
2274 2407
@@ -2279,36 +2412,38 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
2279 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x", 2412 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2280 scid, flags, result); 2413 scid, flags, result);
2281 2414
2282 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid); 2415 chan = l2cap_get_chan_by_scid(conn, scid);
2283 if (!sk) 2416 if (!chan)
2284 return 0; 2417 return 0;
2285 2418
2419 sk = chan->sk;
2420
2286 switch (result) { 2421 switch (result) {
2287 case L2CAP_CONF_SUCCESS: 2422 case L2CAP_CONF_SUCCESS:
2288 l2cap_conf_rfc_get(sk, rsp->data, len); 2423 l2cap_conf_rfc_get(chan, rsp->data, len);
2289 break; 2424 break;
2290 2425
2291 case L2CAP_CONF_UNACCEPT: 2426 case L2CAP_CONF_UNACCEPT:
2292 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) { 2427 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2293 char req[64]; 2428 char req[64];
2294 2429
2295 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) { 2430 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2296 l2cap_send_disconn_req(conn, sk, ECONNRESET); 2431 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2297 goto done; 2432 goto done;
2298 } 2433 }
2299 2434
2300 /* throw out any old stored conf requests */ 2435 /* throw out any old stored conf requests */
2301 result = L2CAP_CONF_SUCCESS; 2436 result = L2CAP_CONF_SUCCESS;
2302 len = l2cap_parse_conf_rsp(sk, rsp->data, 2437 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2303 len, req, &result); 2438 req, &result);
2304 if (len < 0) { 2439 if (len < 0) {
2305 l2cap_send_disconn_req(conn, sk, ECONNRESET); 2440 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2306 goto done; 2441 goto done;
2307 } 2442 }
2308 2443
2309 l2cap_send_cmd(conn, l2cap_get_ident(conn), 2444 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2310 L2CAP_CONF_REQ, len, req); 2445 L2CAP_CONF_REQ, len, req);
2311 l2cap_pi(sk)->num_conf_req++; 2446 chan->num_conf_req++;
2312 if (result != L2CAP_CONF_SUCCESS) 2447 if (result != L2CAP_CONF_SUCCESS)
2313 goto done; 2448 goto done;
2314 break; 2449 break;
@@ -2317,24 +2452,24 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
2317 default: 2452 default:
2318 sk->sk_err = ECONNRESET; 2453 sk->sk_err = ECONNRESET;
2319 l2cap_sock_set_timer(sk, HZ * 5); 2454 l2cap_sock_set_timer(sk, HZ * 5);
2320 l2cap_send_disconn_req(conn, sk, ECONNRESET); 2455 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2321 goto done; 2456 goto done;
2322 } 2457 }
2323 2458
2324 if (flags & 0x01) 2459 if (flags & 0x01)
2325 goto done; 2460 goto done;
2326 2461
2327 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE; 2462 chan->conf_state |= L2CAP_CONF_INPUT_DONE;
2328 2463
2329 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) { 2464 if (chan->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2330 set_default_fcs(l2cap_pi(sk)); 2465 set_default_fcs(chan);
2331 2466
2332 sk->sk_state = BT_CONNECTED; 2467 sk->sk_state = BT_CONNECTED;
2333 l2cap_pi(sk)->next_tx_seq = 0; 2468 chan->next_tx_seq = 0;
2334 l2cap_pi(sk)->expected_tx_seq = 0; 2469 chan->expected_tx_seq = 0;
2335 __skb_queue_head_init(TX_QUEUE(sk)); 2470 skb_queue_head_init(&chan->tx_q);
2336 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) 2471 if (chan->mode == L2CAP_MODE_ERTM)
2337 l2cap_ertm_init(sk); 2472 l2cap_ertm_init(chan);
2338 2473
2339 l2cap_chan_ready(sk); 2474 l2cap_chan_ready(sk);
2340 } 2475 }
@@ -2349,6 +2484,7 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd
2349 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data; 2484 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2350 struct l2cap_disconn_rsp rsp; 2485 struct l2cap_disconn_rsp rsp;
2351 u16 dcid, scid; 2486 u16 dcid, scid;
2487 struct l2cap_chan *chan;
2352 struct sock *sk; 2488 struct sock *sk;
2353 2489
2354 scid = __le16_to_cpu(req->scid); 2490 scid = __le16_to_cpu(req->scid);
@@ -2356,12 +2492,14 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd
2356 2492
2357 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid); 2493 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2358 2494
2359 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid); 2495 chan = l2cap_get_chan_by_scid(conn, dcid);
2360 if (!sk) 2496 if (!chan)
2361 return 0; 2497 return 0;
2362 2498
2363 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid); 2499 sk = chan->sk;
2364 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid); 2500
2501 rsp.dcid = cpu_to_le16(chan->scid);
2502 rsp.scid = cpu_to_le16(chan->dcid);
2365 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp); 2503 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2366 2504
2367 sk->sk_shutdown = SHUTDOWN_MASK; 2505 sk->sk_shutdown = SHUTDOWN_MASK;
@@ -2375,7 +2513,7 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd
2375 return 0; 2513 return 0;
2376 } 2514 }
2377 2515
2378 l2cap_chan_del(sk, ECONNRESET); 2516 l2cap_chan_del(chan, ECONNRESET);
2379 bh_unlock_sock(sk); 2517 bh_unlock_sock(sk);
2380 2518
2381 l2cap_sock_kill(sk); 2519 l2cap_sock_kill(sk);
@@ -2386,6 +2524,7 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd
2386{ 2524{
2387 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data; 2525 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2388 u16 dcid, scid; 2526 u16 dcid, scid;
2527 struct l2cap_chan *chan;
2389 struct sock *sk; 2528 struct sock *sk;
2390 2529
2391 scid = __le16_to_cpu(rsp->scid); 2530 scid = __le16_to_cpu(rsp->scid);
@@ -2393,10 +2532,12 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd
2393 2532
2394 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid); 2533 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2395 2534
2396 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid); 2535 chan = l2cap_get_chan_by_scid(conn, scid);
2397 if (!sk) 2536 if (!chan)
2398 return 0; 2537 return 0;
2399 2538
2539 sk = chan->sk;
2540
2400 /* don't delete l2cap channel if sk is owned by user */ 2541 /* don't delete l2cap channel if sk is owned by user */
2401 if (sock_owned_by_user(sk)) { 2542 if (sock_owned_by_user(sk)) {
2402 sk->sk_state = BT_DISCONN; 2543 sk->sk_state = BT_DISCONN;
@@ -2406,7 +2547,7 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd
2406 return 0; 2547 return 0;
2407 } 2548 }
2408 2549
2409 l2cap_chan_del(sk, 0); 2550 l2cap_chan_del(chan, 0);
2410 bh_unlock_sock(sk); 2551 bh_unlock_sock(sk);
2411 2552
2412 l2cap_sock_kill(sk); 2553 l2cap_sock_kill(sk);
@@ -2463,6 +2604,11 @@ static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cm
2463 2604
2464 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result); 2605 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2465 2606
2607 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
2608 if (cmd->ident != conn->info_ident ||
2609 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
2610 return 0;
2611
2466 del_timer(&conn->info_timer); 2612 del_timer(&conn->info_timer);
2467 2613
2468 if (result != L2CAP_IR_SUCCESS) { 2614 if (result != L2CAP_IR_SUCCESS) {
@@ -2673,7 +2819,8 @@ static inline void l2cap_sig_channel(struct l2cap_conn *conn,
2673 2819
2674 if (err) { 2820 if (err) {
2675 struct l2cap_cmd_rej rej; 2821 struct l2cap_cmd_rej rej;
2676 BT_DBG("error %d", err); 2822
2823 BT_ERR("Wrong link type (%d)", err);
2677 2824
2678 /* FIXME: Map err to a valid reason */ 2825 /* FIXME: Map err to a valid reason */
2679 rej.reason = cpu_to_le16(0); 2826 rej.reason = cpu_to_le16(0);
@@ -2687,12 +2834,12 @@ static inline void l2cap_sig_channel(struct l2cap_conn *conn,
2687 kfree_skb(skb); 2834 kfree_skb(skb);
2688} 2835}
2689 2836
2690static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb) 2837static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
2691{ 2838{
2692 u16 our_fcs, rcv_fcs; 2839 u16 our_fcs, rcv_fcs;
2693 int hdr_size = L2CAP_HDR_SIZE + 2; 2840 int hdr_size = L2CAP_HDR_SIZE + 2;
2694 2841
2695 if (pi->fcs == L2CAP_FCS_CRC16) { 2842 if (chan->fcs == L2CAP_FCS_CRC16) {
2696 skb_trim(skb, skb->len - 2); 2843 skb_trim(skb, skb->len - 2);
2697 rcv_fcs = get_unaligned_le16(skb->data + skb->len); 2844 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
2698 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size); 2845 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
@@ -2703,49 +2850,47 @@ static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
2703 return 0; 2850 return 0;
2704} 2851}
2705 2852
2706static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk) 2853static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
2707{ 2854{
2708 struct l2cap_pinfo *pi = l2cap_pi(sk);
2709 u16 control = 0; 2855 u16 control = 0;
2710 2856
2711 pi->frames_sent = 0; 2857 chan->frames_sent = 0;
2712 2858
2713 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT; 2859 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
2714 2860
2715 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) { 2861 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
2716 control |= L2CAP_SUPER_RCV_NOT_READY; 2862 control |= L2CAP_SUPER_RCV_NOT_READY;
2717 l2cap_send_sframe(pi, control); 2863 l2cap_send_sframe(chan, control);
2718 pi->conn_state |= L2CAP_CONN_RNR_SENT; 2864 chan->conn_state |= L2CAP_CONN_RNR_SENT;
2719 } 2865 }
2720 2866
2721 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY) 2867 if (chan->conn_state & L2CAP_CONN_REMOTE_BUSY)
2722 l2cap_retransmit_frames(sk); 2868 l2cap_retransmit_frames(chan);
2723 2869
2724 l2cap_ertm_send(sk); 2870 l2cap_ertm_send(chan);
2725 2871
2726 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) && 2872 if (!(chan->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
2727 pi->frames_sent == 0) { 2873 chan->frames_sent == 0) {
2728 control |= L2CAP_SUPER_RCV_READY; 2874 control |= L2CAP_SUPER_RCV_READY;
2729 l2cap_send_sframe(pi, control); 2875 l2cap_send_sframe(chan, control);
2730 } 2876 }
2731} 2877}
2732 2878
2733static int l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar) 2879static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u8 tx_seq, u8 sar)
2734{ 2880{
2735 struct sk_buff *next_skb; 2881 struct sk_buff *next_skb;
2736 struct l2cap_pinfo *pi = l2cap_pi(sk);
2737 int tx_seq_offset, next_tx_seq_offset; 2882 int tx_seq_offset, next_tx_seq_offset;
2738 2883
2739 bt_cb(skb)->tx_seq = tx_seq; 2884 bt_cb(skb)->tx_seq = tx_seq;
2740 bt_cb(skb)->sar = sar; 2885 bt_cb(skb)->sar = sar;
2741 2886
2742 next_skb = skb_peek(SREJ_QUEUE(sk)); 2887 next_skb = skb_peek(&chan->srej_q);
2743 if (!next_skb) { 2888 if (!next_skb) {
2744 __skb_queue_tail(SREJ_QUEUE(sk), skb); 2889 __skb_queue_tail(&chan->srej_q, skb);
2745 return 0; 2890 return 0;
2746 } 2891 }
2747 2892
2748 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64; 2893 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
2749 if (tx_seq_offset < 0) 2894 if (tx_seq_offset < 0)
2750 tx_seq_offset += 64; 2895 tx_seq_offset += 64;
2751 2896
@@ -2754,53 +2899,52 @@ static int l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_s
2754 return -EINVAL; 2899 return -EINVAL;
2755 2900
2756 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq - 2901 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
2757 pi->buffer_seq) % 64; 2902 chan->buffer_seq) % 64;
2758 if (next_tx_seq_offset < 0) 2903 if (next_tx_seq_offset < 0)
2759 next_tx_seq_offset += 64; 2904 next_tx_seq_offset += 64;
2760 2905
2761 if (next_tx_seq_offset > tx_seq_offset) { 2906 if (next_tx_seq_offset > tx_seq_offset) {
2762 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb); 2907 __skb_queue_before(&chan->srej_q, next_skb, skb);
2763 return 0; 2908 return 0;
2764 } 2909 }
2765 2910
2766 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb)) 2911 if (skb_queue_is_last(&chan->srej_q, next_skb))
2767 break; 2912 break;
2768 2913
2769 } while ((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb))); 2914 } while ((next_skb = skb_queue_next(&chan->srej_q, next_skb)));
2770 2915
2771 __skb_queue_tail(SREJ_QUEUE(sk), skb); 2916 __skb_queue_tail(&chan->srej_q, skb);
2772 2917
2773 return 0; 2918 return 0;
2774} 2919}
2775 2920
2776static int l2cap_ertm_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control) 2921static int l2cap_ertm_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
2777{ 2922{
2778 struct l2cap_pinfo *pi = l2cap_pi(sk);
2779 struct sk_buff *_skb; 2923 struct sk_buff *_skb;
2780 int err; 2924 int err;
2781 2925
2782 switch (control & L2CAP_CTRL_SAR) { 2926 switch (control & L2CAP_CTRL_SAR) {
2783 case L2CAP_SDU_UNSEGMENTED: 2927 case L2CAP_SDU_UNSEGMENTED:
2784 if (pi->conn_state & L2CAP_CONN_SAR_SDU) 2928 if (chan->conn_state & L2CAP_CONN_SAR_SDU)
2785 goto drop; 2929 goto drop;
2786 2930
2787 err = sock_queue_rcv_skb(sk, skb); 2931 err = sock_queue_rcv_skb(chan->sk, skb);
2788 if (!err) 2932 if (!err)
2789 return err; 2933 return err;
2790 2934
2791 break; 2935 break;
2792 2936
2793 case L2CAP_SDU_START: 2937 case L2CAP_SDU_START:
2794 if (pi->conn_state & L2CAP_CONN_SAR_SDU) 2938 if (chan->conn_state & L2CAP_CONN_SAR_SDU)
2795 goto drop; 2939 goto drop;
2796 2940
2797 pi->sdu_len = get_unaligned_le16(skb->data); 2941 chan->sdu_len = get_unaligned_le16(skb->data);
2798 2942
2799 if (pi->sdu_len > pi->imtu) 2943 if (chan->sdu_len > chan->imtu)
2800 goto disconnect; 2944 goto disconnect;
2801 2945
2802 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC); 2946 chan->sdu = bt_skb_alloc(chan->sdu_len, GFP_ATOMIC);
2803 if (!pi->sdu) 2947 if (!chan->sdu)
2804 return -ENOMEM; 2948 return -ENOMEM;
2805 2949
2806 /* pull sdu_len bytes only after alloc, because of Local Busy 2950 /* pull sdu_len bytes only after alloc, because of Local Busy
@@ -2808,63 +2952,63 @@ static int l2cap_ertm_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 c
2808 * only once, i.e., when alloc does not fail */ 2952 * only once, i.e., when alloc does not fail */
2809 skb_pull(skb, 2); 2953 skb_pull(skb, 2);
2810 2954
2811 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len); 2955 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
2812 2956
2813 pi->conn_state |= L2CAP_CONN_SAR_SDU; 2957 chan->conn_state |= L2CAP_CONN_SAR_SDU;
2814 pi->partial_sdu_len = skb->len; 2958 chan->partial_sdu_len = skb->len;
2815 break; 2959 break;
2816 2960
2817 case L2CAP_SDU_CONTINUE: 2961 case L2CAP_SDU_CONTINUE:
2818 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU)) 2962 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
2819 goto disconnect; 2963 goto disconnect;
2820 2964
2821 if (!pi->sdu) 2965 if (!chan->sdu)
2822 goto disconnect; 2966 goto disconnect;
2823 2967
2824 pi->partial_sdu_len += skb->len; 2968 chan->partial_sdu_len += skb->len;
2825 if (pi->partial_sdu_len > pi->sdu_len) 2969 if (chan->partial_sdu_len > chan->sdu_len)
2826 goto drop; 2970 goto drop;
2827 2971
2828 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len); 2972 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
2829 2973
2830 break; 2974 break;
2831 2975
2832 case L2CAP_SDU_END: 2976 case L2CAP_SDU_END:
2833 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU)) 2977 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
2834 goto disconnect; 2978 goto disconnect;
2835 2979
2836 if (!pi->sdu) 2980 if (!chan->sdu)
2837 goto disconnect; 2981 goto disconnect;
2838 2982
2839 if (!(pi->conn_state & L2CAP_CONN_SAR_RETRY)) { 2983 if (!(chan->conn_state & L2CAP_CONN_SAR_RETRY)) {
2840 pi->partial_sdu_len += skb->len; 2984 chan->partial_sdu_len += skb->len;
2841 2985
2842 if (pi->partial_sdu_len > pi->imtu) 2986 if (chan->partial_sdu_len > chan->imtu)
2843 goto drop; 2987 goto drop;
2844 2988
2845 if (pi->partial_sdu_len != pi->sdu_len) 2989 if (chan->partial_sdu_len != chan->sdu_len)
2846 goto drop; 2990 goto drop;
2847 2991
2848 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len); 2992 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
2849 } 2993 }
2850 2994
2851 _skb = skb_clone(pi->sdu, GFP_ATOMIC); 2995 _skb = skb_clone(chan->sdu, GFP_ATOMIC);
2852 if (!_skb) { 2996 if (!_skb) {
2853 pi->conn_state |= L2CAP_CONN_SAR_RETRY; 2997 chan->conn_state |= L2CAP_CONN_SAR_RETRY;
2854 return -ENOMEM; 2998 return -ENOMEM;
2855 } 2999 }
2856 3000
2857 err = sock_queue_rcv_skb(sk, _skb); 3001 err = sock_queue_rcv_skb(chan->sk, _skb);
2858 if (err < 0) { 3002 if (err < 0) {
2859 kfree_skb(_skb); 3003 kfree_skb(_skb);
2860 pi->conn_state |= L2CAP_CONN_SAR_RETRY; 3004 chan->conn_state |= L2CAP_CONN_SAR_RETRY;
2861 return err; 3005 return err;
2862 } 3006 }
2863 3007
2864 pi->conn_state &= ~L2CAP_CONN_SAR_RETRY; 3008 chan->conn_state &= ~L2CAP_CONN_SAR_RETRY;
2865 pi->conn_state &= ~L2CAP_CONN_SAR_SDU; 3009 chan->conn_state &= ~L2CAP_CONN_SAR_SDU;
2866 3010
2867 kfree_skb(pi->sdu); 3011 kfree_skb(chan->sdu);
2868 break; 3012 break;
2869 } 3013 }
2870 3014
@@ -2872,51 +3016,50 @@ static int l2cap_ertm_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 c
2872 return 0; 3016 return 0;
2873 3017
2874drop: 3018drop:
2875 kfree_skb(pi->sdu); 3019 kfree_skb(chan->sdu);
2876 pi->sdu = NULL; 3020 chan->sdu = NULL;
2877 3021
2878disconnect: 3022disconnect:
2879 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET); 3023 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
2880 kfree_skb(skb); 3024 kfree_skb(skb);
2881 return 0; 3025 return 0;
2882} 3026}
2883 3027
2884static int l2cap_try_push_rx_skb(struct sock *sk) 3028static int l2cap_try_push_rx_skb(struct l2cap_chan *chan)
2885{ 3029{
2886 struct l2cap_pinfo *pi = l2cap_pi(sk);
2887 struct sk_buff *skb; 3030 struct sk_buff *skb;
2888 u16 control; 3031 u16 control;
2889 int err; 3032 int err;
2890 3033
2891 while ((skb = skb_dequeue(BUSY_QUEUE(sk)))) { 3034 while ((skb = skb_dequeue(&chan->busy_q))) {
2892 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT; 3035 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
2893 err = l2cap_ertm_reassembly_sdu(sk, skb, control); 3036 err = l2cap_ertm_reassembly_sdu(chan, skb, control);
2894 if (err < 0) { 3037 if (err < 0) {
2895 skb_queue_head(BUSY_QUEUE(sk), skb); 3038 skb_queue_head(&chan->busy_q, skb);
2896 return -EBUSY; 3039 return -EBUSY;
2897 } 3040 }
2898 3041
2899 pi->buffer_seq = (pi->buffer_seq + 1) % 64; 3042 chan->buffer_seq = (chan->buffer_seq + 1) % 64;
2900 } 3043 }
2901 3044
2902 if (!(pi->conn_state & L2CAP_CONN_RNR_SENT)) 3045 if (!(chan->conn_state & L2CAP_CONN_RNR_SENT))
2903 goto done; 3046 goto done;
2904 3047
2905 control = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT; 3048 control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
2906 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL; 3049 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
2907 l2cap_send_sframe(pi, control); 3050 l2cap_send_sframe(chan, control);
2908 l2cap_pi(sk)->retry_count = 1; 3051 chan->retry_count = 1;
2909 3052
2910 del_timer(&pi->retrans_timer); 3053 del_timer(&chan->retrans_timer);
2911 __mod_monitor_timer(); 3054 __mod_monitor_timer();
2912 3055
2913 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F; 3056 chan->conn_state |= L2CAP_CONN_WAIT_F;
2914 3057
2915done: 3058done:
2916 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY; 3059 chan->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
2917 pi->conn_state &= ~L2CAP_CONN_RNR_SENT; 3060 chan->conn_state &= ~L2CAP_CONN_RNR_SENT;
2918 3061
2919 BT_DBG("sk %p, Exit local busy", sk); 3062 BT_DBG("chan %p, Exit local busy", chan);
2920 3063
2921 return 0; 3064 return 0;
2922} 3065}
@@ -2924,21 +3067,21 @@ done:
2924static void l2cap_busy_work(struct work_struct *work) 3067static void l2cap_busy_work(struct work_struct *work)
2925{ 3068{
2926 DECLARE_WAITQUEUE(wait, current); 3069 DECLARE_WAITQUEUE(wait, current);
2927 struct l2cap_pinfo *pi = 3070 struct l2cap_chan *chan =
2928 container_of(work, struct l2cap_pinfo, busy_work); 3071 container_of(work, struct l2cap_chan, busy_work);
2929 struct sock *sk = (struct sock *)pi; 3072 struct sock *sk = chan->sk;
2930 int n_tries = 0, timeo = HZ/5, err; 3073 int n_tries = 0, timeo = HZ/5, err;
2931 struct sk_buff *skb; 3074 struct sk_buff *skb;
2932 3075
2933 lock_sock(sk); 3076 lock_sock(sk);
2934 3077
2935 add_wait_queue(sk_sleep(sk), &wait); 3078 add_wait_queue(sk_sleep(sk), &wait);
2936 while ((skb = skb_peek(BUSY_QUEUE(sk)))) { 3079 while ((skb = skb_peek(&chan->busy_q))) {
2937 set_current_state(TASK_INTERRUPTIBLE); 3080 set_current_state(TASK_INTERRUPTIBLE);
2938 3081
2939 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) { 3082 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
2940 err = -EBUSY; 3083 err = -EBUSY;
2941 l2cap_send_disconn_req(pi->conn, sk, EBUSY); 3084 l2cap_send_disconn_req(chan->conn, chan, EBUSY);
2942 break; 3085 break;
2943 } 3086 }
2944 3087
@@ -2958,7 +3101,7 @@ static void l2cap_busy_work(struct work_struct *work)
2958 if (err) 3101 if (err)
2959 break; 3102 break;
2960 3103
2961 if (l2cap_try_push_rx_skb(sk) == 0) 3104 if (l2cap_try_push_rx_skb(chan) == 0)
2962 break; 3105 break;
2963 } 3106 }
2964 3107
@@ -2968,48 +3111,46 @@ static void l2cap_busy_work(struct work_struct *work)
2968 release_sock(sk); 3111 release_sock(sk);
2969} 3112}
2970 3113
2971static int l2cap_push_rx_skb(struct sock *sk, struct sk_buff *skb, u16 control) 3114static int l2cap_push_rx_skb(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
2972{ 3115{
2973 struct l2cap_pinfo *pi = l2cap_pi(sk);
2974 int sctrl, err; 3116 int sctrl, err;
2975 3117
2976 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) { 3118 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
2977 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT; 3119 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
2978 __skb_queue_tail(BUSY_QUEUE(sk), skb); 3120 __skb_queue_tail(&chan->busy_q, skb);
2979 return l2cap_try_push_rx_skb(sk); 3121 return l2cap_try_push_rx_skb(chan);
2980 3122
2981 3123
2982 } 3124 }
2983 3125
2984 err = l2cap_ertm_reassembly_sdu(sk, skb, control); 3126 err = l2cap_ertm_reassembly_sdu(chan, skb, control);
2985 if (err >= 0) { 3127 if (err >= 0) {
2986 pi->buffer_seq = (pi->buffer_seq + 1) % 64; 3128 chan->buffer_seq = (chan->buffer_seq + 1) % 64;
2987 return err; 3129 return err;
2988 } 3130 }
2989 3131
2990 /* Busy Condition */ 3132 /* Busy Condition */
2991 BT_DBG("sk %p, Enter local busy", sk); 3133 BT_DBG("chan %p, Enter local busy", chan);
2992 3134
2993 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY; 3135 chan->conn_state |= L2CAP_CONN_LOCAL_BUSY;
2994 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT; 3136 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
2995 __skb_queue_tail(BUSY_QUEUE(sk), skb); 3137 __skb_queue_tail(&chan->busy_q, skb);
2996 3138
2997 sctrl = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT; 3139 sctrl = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
2998 sctrl |= L2CAP_SUPER_RCV_NOT_READY; 3140 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
2999 l2cap_send_sframe(pi, sctrl); 3141 l2cap_send_sframe(chan, sctrl);
3000 3142
3001 pi->conn_state |= L2CAP_CONN_RNR_SENT; 3143 chan->conn_state |= L2CAP_CONN_RNR_SENT;
3002 3144
3003 del_timer(&pi->ack_timer); 3145 del_timer(&chan->ack_timer);
3004 3146
3005 queue_work(_busy_wq, &pi->busy_work); 3147 queue_work(_busy_wq, &chan->busy_work);
3006 3148
3007 return err; 3149 return err;
3008} 3150}
3009 3151
3010static int l2cap_streaming_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control) 3152static int l2cap_streaming_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3011{ 3153{
3012 struct l2cap_pinfo *pi = l2cap_pi(sk);
3013 struct sk_buff *_skb; 3154 struct sk_buff *_skb;
3014 int err = -EINVAL; 3155 int err = -EINVAL;
3015 3156
@@ -3020,80 +3161,80 @@ static int l2cap_streaming_reassembly_sdu(struct sock *sk, struct sk_buff *skb,
3020 3161
3021 switch (control & L2CAP_CTRL_SAR) { 3162 switch (control & L2CAP_CTRL_SAR) {
3022 case L2CAP_SDU_UNSEGMENTED: 3163 case L2CAP_SDU_UNSEGMENTED:
3023 if (pi->conn_state & L2CAP_CONN_SAR_SDU) { 3164 if (chan->conn_state & L2CAP_CONN_SAR_SDU) {
3024 kfree_skb(pi->sdu); 3165 kfree_skb(chan->sdu);
3025 break; 3166 break;
3026 } 3167 }
3027 3168
3028 err = sock_queue_rcv_skb(sk, skb); 3169 err = sock_queue_rcv_skb(chan->sk, skb);
3029 if (!err) 3170 if (!err)
3030 return 0; 3171 return 0;
3031 3172
3032 break; 3173 break;
3033 3174
3034 case L2CAP_SDU_START: 3175 case L2CAP_SDU_START:
3035 if (pi->conn_state & L2CAP_CONN_SAR_SDU) { 3176 if (chan->conn_state & L2CAP_CONN_SAR_SDU) {
3036 kfree_skb(pi->sdu); 3177 kfree_skb(chan->sdu);
3037 break; 3178 break;
3038 } 3179 }
3039 3180
3040 pi->sdu_len = get_unaligned_le16(skb->data); 3181 chan->sdu_len = get_unaligned_le16(skb->data);
3041 skb_pull(skb, 2); 3182 skb_pull(skb, 2);
3042 3183
3043 if (pi->sdu_len > pi->imtu) { 3184 if (chan->sdu_len > chan->imtu) {
3044 err = -EMSGSIZE; 3185 err = -EMSGSIZE;
3045 break; 3186 break;
3046 } 3187 }
3047 3188
3048 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC); 3189 chan->sdu = bt_skb_alloc(chan->sdu_len, GFP_ATOMIC);
3049 if (!pi->sdu) { 3190 if (!chan->sdu) {
3050 err = -ENOMEM; 3191 err = -ENOMEM;
3051 break; 3192 break;
3052 } 3193 }
3053 3194
3054 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len); 3195 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3055 3196
3056 pi->conn_state |= L2CAP_CONN_SAR_SDU; 3197 chan->conn_state |= L2CAP_CONN_SAR_SDU;
3057 pi->partial_sdu_len = skb->len; 3198 chan->partial_sdu_len = skb->len;
3058 err = 0; 3199 err = 0;
3059 break; 3200 break;
3060 3201
3061 case L2CAP_SDU_CONTINUE: 3202 case L2CAP_SDU_CONTINUE:
3062 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU)) 3203 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
3063 break; 3204 break;
3064 3205
3065 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len); 3206 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3066 3207
3067 pi->partial_sdu_len += skb->len; 3208 chan->partial_sdu_len += skb->len;
3068 if (pi->partial_sdu_len > pi->sdu_len) 3209 if (chan->partial_sdu_len > chan->sdu_len)
3069 kfree_skb(pi->sdu); 3210 kfree_skb(chan->sdu);
3070 else 3211 else
3071 err = 0; 3212 err = 0;
3072 3213
3073 break; 3214 break;
3074 3215
3075 case L2CAP_SDU_END: 3216 case L2CAP_SDU_END:
3076 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU)) 3217 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
3077 break; 3218 break;
3078 3219
3079 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len); 3220 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3080 3221
3081 pi->conn_state &= ~L2CAP_CONN_SAR_SDU; 3222 chan->conn_state &= ~L2CAP_CONN_SAR_SDU;
3082 pi->partial_sdu_len += skb->len; 3223 chan->partial_sdu_len += skb->len;
3083 3224
3084 if (pi->partial_sdu_len > pi->imtu) 3225 if (chan->partial_sdu_len > chan->imtu)
3085 goto drop; 3226 goto drop;
3086 3227
3087 if (pi->partial_sdu_len == pi->sdu_len) { 3228 if (chan->partial_sdu_len == chan->sdu_len) {
3088 _skb = skb_clone(pi->sdu, GFP_ATOMIC); 3229 _skb = skb_clone(chan->sdu, GFP_ATOMIC);
3089 err = sock_queue_rcv_skb(sk, _skb); 3230 err = sock_queue_rcv_skb(chan->sk, _skb);
3090 if (err < 0) 3231 if (err < 0)
3091 kfree_skb(_skb); 3232 kfree_skb(_skb);
3092 } 3233 }
3093 err = 0; 3234 err = 0;
3094 3235
3095drop: 3236drop:
3096 kfree_skb(pi->sdu); 3237 kfree_skb(chan->sdu);
3097 break; 3238 break;
3098 } 3239 }
3099 3240
@@ -3101,31 +3242,30 @@ drop:
3101 return err; 3242 return err;
3102} 3243}
3103 3244
3104static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq) 3245static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq)
3105{ 3246{
3106 struct sk_buff *skb; 3247 struct sk_buff *skb;
3107 u16 control; 3248 u16 control;
3108 3249
3109 while ((skb = skb_peek(SREJ_QUEUE(sk)))) { 3250 while ((skb = skb_peek(&chan->srej_q))) {
3110 if (bt_cb(skb)->tx_seq != tx_seq) 3251 if (bt_cb(skb)->tx_seq != tx_seq)
3111 break; 3252 break;
3112 3253
3113 skb = skb_dequeue(SREJ_QUEUE(sk)); 3254 skb = skb_dequeue(&chan->srej_q);
3114 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT; 3255 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3115 l2cap_ertm_reassembly_sdu(sk, skb, control); 3256 l2cap_ertm_reassembly_sdu(chan, skb, control);
3116 l2cap_pi(sk)->buffer_seq_srej = 3257 chan->buffer_seq_srej =
3117 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64; 3258 (chan->buffer_seq_srej + 1) % 64;
3118 tx_seq = (tx_seq + 1) % 64; 3259 tx_seq = (tx_seq + 1) % 64;
3119 } 3260 }
3120} 3261}
3121 3262
3122static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq) 3263static void l2cap_resend_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3123{ 3264{
3124 struct l2cap_pinfo *pi = l2cap_pi(sk);
3125 struct srej_list *l, *tmp; 3265 struct srej_list *l, *tmp;
3126 u16 control; 3266 u16 control;
3127 3267
3128 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) { 3268 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3129 if (l->tx_seq == tx_seq) { 3269 if (l->tx_seq == tx_seq) {
3130 list_del(&l->list); 3270 list_del(&l->list);
3131 kfree(l); 3271 kfree(l);
@@ -3133,107 +3273,105 @@ static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3133 } 3273 }
3134 control = L2CAP_SUPER_SELECT_REJECT; 3274 control = L2CAP_SUPER_SELECT_REJECT;
3135 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT; 3275 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3136 l2cap_send_sframe(pi, control); 3276 l2cap_send_sframe(chan, control);
3137 list_del(&l->list); 3277 list_del(&l->list);
3138 list_add_tail(&l->list, SREJ_LIST(sk)); 3278 list_add_tail(&l->list, &chan->srej_l);
3139 } 3279 }
3140} 3280}
3141 3281
3142static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq) 3282static void l2cap_send_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3143{ 3283{
3144 struct l2cap_pinfo *pi = l2cap_pi(sk);
3145 struct srej_list *new; 3284 struct srej_list *new;
3146 u16 control; 3285 u16 control;
3147 3286
3148 while (tx_seq != pi->expected_tx_seq) { 3287 while (tx_seq != chan->expected_tx_seq) {
3149 control = L2CAP_SUPER_SELECT_REJECT; 3288 control = L2CAP_SUPER_SELECT_REJECT;
3150 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT; 3289 control |= chan->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3151 l2cap_send_sframe(pi, control); 3290 l2cap_send_sframe(chan, control);
3152 3291
3153 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC); 3292 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3154 new->tx_seq = pi->expected_tx_seq; 3293 new->tx_seq = chan->expected_tx_seq;
3155 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64; 3294 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3156 list_add_tail(&new->list, SREJ_LIST(sk)); 3295 list_add_tail(&new->list, &chan->srej_l);
3157 } 3296 }
3158 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64; 3297 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3159} 3298}
3160 3299
3161static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb) 3300static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3162{ 3301{
3163 struct l2cap_pinfo *pi = l2cap_pi(sk);
3164 u8 tx_seq = __get_txseq(rx_control); 3302 u8 tx_seq = __get_txseq(rx_control);
3165 u8 req_seq = __get_reqseq(rx_control); 3303 u8 req_seq = __get_reqseq(rx_control);
3166 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT; 3304 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3167 int tx_seq_offset, expected_tx_seq_offset; 3305 int tx_seq_offset, expected_tx_seq_offset;
3168 int num_to_ack = (pi->tx_win/6) + 1; 3306 int num_to_ack = (chan->tx_win/6) + 1;
3169 int err = 0; 3307 int err = 0;
3170 3308
3171 BT_DBG("sk %p len %d tx_seq %d rx_control 0x%4.4x", sk, skb->len, tx_seq, 3309 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%4.4x", chan, skb->len,
3172 rx_control); 3310 tx_seq, rx_control);
3173 3311
3174 if (L2CAP_CTRL_FINAL & rx_control && 3312 if (L2CAP_CTRL_FINAL & rx_control &&
3175 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) { 3313 chan->conn_state & L2CAP_CONN_WAIT_F) {
3176 del_timer(&pi->monitor_timer); 3314 del_timer(&chan->monitor_timer);
3177 if (pi->unacked_frames > 0) 3315 if (chan->unacked_frames > 0)
3178 __mod_retrans_timer(); 3316 __mod_retrans_timer();
3179 pi->conn_state &= ~L2CAP_CONN_WAIT_F; 3317 chan->conn_state &= ~L2CAP_CONN_WAIT_F;
3180 } 3318 }
3181 3319
3182 pi->expected_ack_seq = req_seq; 3320 chan->expected_ack_seq = req_seq;
3183 l2cap_drop_acked_frames(sk); 3321 l2cap_drop_acked_frames(chan);
3184 3322
3185 if (tx_seq == pi->expected_tx_seq) 3323 if (tx_seq == chan->expected_tx_seq)
3186 goto expected; 3324 goto expected;
3187 3325
3188 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64; 3326 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
3189 if (tx_seq_offset < 0) 3327 if (tx_seq_offset < 0)
3190 tx_seq_offset += 64; 3328 tx_seq_offset += 64;
3191 3329
3192 /* invalid tx_seq */ 3330 /* invalid tx_seq */
3193 if (tx_seq_offset >= pi->tx_win) { 3331 if (tx_seq_offset >= chan->tx_win) {
3194 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET); 3332 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3195 goto drop; 3333 goto drop;
3196 } 3334 }
3197 3335
3198 if (pi->conn_state == L2CAP_CONN_LOCAL_BUSY) 3336 if (chan->conn_state == L2CAP_CONN_LOCAL_BUSY)
3199 goto drop; 3337 goto drop;
3200 3338
3201 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) { 3339 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) {
3202 struct srej_list *first; 3340 struct srej_list *first;
3203 3341
3204 first = list_first_entry(SREJ_LIST(sk), 3342 first = list_first_entry(&chan->srej_l,
3205 struct srej_list, list); 3343 struct srej_list, list);
3206 if (tx_seq == first->tx_seq) { 3344 if (tx_seq == first->tx_seq) {
3207 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar); 3345 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3208 l2cap_check_srej_gap(sk, tx_seq); 3346 l2cap_check_srej_gap(chan, tx_seq);
3209 3347
3210 list_del(&first->list); 3348 list_del(&first->list);
3211 kfree(first); 3349 kfree(first);
3212 3350
3213 if (list_empty(SREJ_LIST(sk))) { 3351 if (list_empty(&chan->srej_l)) {
3214 pi->buffer_seq = pi->buffer_seq_srej; 3352 chan->buffer_seq = chan->buffer_seq_srej;
3215 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT; 3353 chan->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3216 l2cap_send_ack(pi); 3354 l2cap_send_ack(chan);
3217 BT_DBG("sk %p, Exit SREJ_SENT", sk); 3355 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3218 } 3356 }
3219 } else { 3357 } else {
3220 struct srej_list *l; 3358 struct srej_list *l;
3221 3359
3222 /* duplicated tx_seq */ 3360 /* duplicated tx_seq */
3223 if (l2cap_add_to_srej_queue(sk, skb, tx_seq, sar) < 0) 3361 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3224 goto drop; 3362 goto drop;
3225 3363
3226 list_for_each_entry(l, SREJ_LIST(sk), list) { 3364 list_for_each_entry(l, &chan->srej_l, list) {
3227 if (l->tx_seq == tx_seq) { 3365 if (l->tx_seq == tx_seq) {
3228 l2cap_resend_srejframe(sk, tx_seq); 3366 l2cap_resend_srejframe(chan, tx_seq);
3229 return 0; 3367 return 0;
3230 } 3368 }
3231 } 3369 }
3232 l2cap_send_srejframe(sk, tx_seq); 3370 l2cap_send_srejframe(chan, tx_seq);
3233 } 3371 }
3234 } else { 3372 } else {
3235 expected_tx_seq_offset = 3373 expected_tx_seq_offset =
3236 (pi->expected_tx_seq - pi->buffer_seq) % 64; 3374 (chan->expected_tx_seq - chan->buffer_seq) % 64;
3237 if (expected_tx_seq_offset < 0) 3375 if (expected_tx_seq_offset < 0)
3238 expected_tx_seq_offset += 64; 3376 expected_tx_seq_offset += 64;
3239 3377
@@ -3241,51 +3379,51 @@ static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, str
3241 if (tx_seq_offset < expected_tx_seq_offset) 3379 if (tx_seq_offset < expected_tx_seq_offset)
3242 goto drop; 3380 goto drop;
3243 3381
3244 pi->conn_state |= L2CAP_CONN_SREJ_SENT; 3382 chan->conn_state |= L2CAP_CONN_SREJ_SENT;
3245 3383
3246 BT_DBG("sk %p, Enter SREJ", sk); 3384 BT_DBG("chan %p, Enter SREJ", chan);
3247 3385
3248 INIT_LIST_HEAD(SREJ_LIST(sk)); 3386 INIT_LIST_HEAD(&chan->srej_l);
3249 pi->buffer_seq_srej = pi->buffer_seq; 3387 chan->buffer_seq_srej = chan->buffer_seq;
3250 3388
3251 __skb_queue_head_init(SREJ_QUEUE(sk)); 3389 __skb_queue_head_init(&chan->srej_q);
3252 __skb_queue_head_init(BUSY_QUEUE(sk)); 3390 __skb_queue_head_init(&chan->busy_q);
3253 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar); 3391 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3254 3392
3255 pi->conn_state |= L2CAP_CONN_SEND_PBIT; 3393 chan->conn_state |= L2CAP_CONN_SEND_PBIT;
3256 3394
3257 l2cap_send_srejframe(sk, tx_seq); 3395 l2cap_send_srejframe(chan, tx_seq);
3258 3396
3259 del_timer(&pi->ack_timer); 3397 del_timer(&chan->ack_timer);
3260 } 3398 }
3261 return 0; 3399 return 0;
3262 3400
3263expected: 3401expected:
3264 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64; 3402 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3265 3403
3266 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) { 3404 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) {
3267 bt_cb(skb)->tx_seq = tx_seq; 3405 bt_cb(skb)->tx_seq = tx_seq;
3268 bt_cb(skb)->sar = sar; 3406 bt_cb(skb)->sar = sar;
3269 __skb_queue_tail(SREJ_QUEUE(sk), skb); 3407 __skb_queue_tail(&chan->srej_q, skb);
3270 return 0; 3408 return 0;
3271 } 3409 }
3272 3410
3273 err = l2cap_push_rx_skb(sk, skb, rx_control); 3411 err = l2cap_push_rx_skb(chan, skb, rx_control);
3274 if (err < 0) 3412 if (err < 0)
3275 return 0; 3413 return 0;
3276 3414
3277 if (rx_control & L2CAP_CTRL_FINAL) { 3415 if (rx_control & L2CAP_CTRL_FINAL) {
3278 if (pi->conn_state & L2CAP_CONN_REJ_ACT) 3416 if (chan->conn_state & L2CAP_CONN_REJ_ACT)
3279 pi->conn_state &= ~L2CAP_CONN_REJ_ACT; 3417 chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3280 else 3418 else
3281 l2cap_retransmit_frames(sk); 3419 l2cap_retransmit_frames(chan);
3282 } 3420 }
3283 3421
3284 __mod_ack_timer(); 3422 __mod_ack_timer();
3285 3423
3286 pi->num_acked = (pi->num_acked + 1) % num_to_ack; 3424 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
3287 if (pi->num_acked == num_to_ack - 1) 3425 if (chan->num_acked == num_to_ack - 1)
3288 l2cap_send_ack(pi); 3426 l2cap_send_ack(chan);
3289 3427
3290 return 0; 3428 return 0;
3291 3429
@@ -3294,165 +3432,160 @@ drop:
3294 return 0; 3432 return 0;
3295} 3433}
3296 3434
3297static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control) 3435static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u16 rx_control)
3298{ 3436{
3299 struct l2cap_pinfo *pi = l2cap_pi(sk); 3437 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, __get_reqseq(rx_control),
3300
3301 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, __get_reqseq(rx_control),
3302 rx_control); 3438 rx_control);
3303 3439
3304 pi->expected_ack_seq = __get_reqseq(rx_control); 3440 chan->expected_ack_seq = __get_reqseq(rx_control);
3305 l2cap_drop_acked_frames(sk); 3441 l2cap_drop_acked_frames(chan);
3306 3442
3307 if (rx_control & L2CAP_CTRL_POLL) { 3443 if (rx_control & L2CAP_CTRL_POLL) {
3308 pi->conn_state |= L2CAP_CONN_SEND_FBIT; 3444 chan->conn_state |= L2CAP_CONN_SEND_FBIT;
3309 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) { 3445 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) {
3310 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) && 3446 if ((chan->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3311 (pi->unacked_frames > 0)) 3447 (chan->unacked_frames > 0))
3312 __mod_retrans_timer(); 3448 __mod_retrans_timer();
3313 3449
3314 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY; 3450 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3315 l2cap_send_srejtail(sk); 3451 l2cap_send_srejtail(chan);
3316 } else { 3452 } else {
3317 l2cap_send_i_or_rr_or_rnr(sk); 3453 l2cap_send_i_or_rr_or_rnr(chan);
3318 } 3454 }
3319 3455
3320 } else if (rx_control & L2CAP_CTRL_FINAL) { 3456 } else if (rx_control & L2CAP_CTRL_FINAL) {
3321 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY; 3457 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3322 3458
3323 if (pi->conn_state & L2CAP_CONN_REJ_ACT) 3459 if (chan->conn_state & L2CAP_CONN_REJ_ACT)
3324 pi->conn_state &= ~L2CAP_CONN_REJ_ACT; 3460 chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3325 else 3461 else
3326 l2cap_retransmit_frames(sk); 3462 l2cap_retransmit_frames(chan);
3327 3463
3328 } else { 3464 } else {
3329 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) && 3465 if ((chan->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3330 (pi->unacked_frames > 0)) 3466 (chan->unacked_frames > 0))
3331 __mod_retrans_timer(); 3467 __mod_retrans_timer();
3332 3468
3333 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY; 3469 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3334 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) 3470 if (chan->conn_state & L2CAP_CONN_SREJ_SENT)
3335 l2cap_send_ack(pi); 3471 l2cap_send_ack(chan);
3336 else 3472 else
3337 l2cap_ertm_send(sk); 3473 l2cap_ertm_send(chan);
3338 } 3474 }
3339} 3475}
3340 3476
3341static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control) 3477static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u16 rx_control)
3342{ 3478{
3343 struct l2cap_pinfo *pi = l2cap_pi(sk);
3344 u8 tx_seq = __get_reqseq(rx_control); 3479 u8 tx_seq = __get_reqseq(rx_control);
3345 3480
3346 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control); 3481 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3347 3482
3348 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY; 3483 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3349 3484
3350 pi->expected_ack_seq = tx_seq; 3485 chan->expected_ack_seq = tx_seq;
3351 l2cap_drop_acked_frames(sk); 3486 l2cap_drop_acked_frames(chan);
3352 3487
3353 if (rx_control & L2CAP_CTRL_FINAL) { 3488 if (rx_control & L2CAP_CTRL_FINAL) {
3354 if (pi->conn_state & L2CAP_CONN_REJ_ACT) 3489 if (chan->conn_state & L2CAP_CONN_REJ_ACT)
3355 pi->conn_state &= ~L2CAP_CONN_REJ_ACT; 3490 chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3356 else 3491 else
3357 l2cap_retransmit_frames(sk); 3492 l2cap_retransmit_frames(chan);
3358 } else { 3493 } else {
3359 l2cap_retransmit_frames(sk); 3494 l2cap_retransmit_frames(chan);
3360 3495
3361 if (pi->conn_state & L2CAP_CONN_WAIT_F) 3496 if (chan->conn_state & L2CAP_CONN_WAIT_F)
3362 pi->conn_state |= L2CAP_CONN_REJ_ACT; 3497 chan->conn_state |= L2CAP_CONN_REJ_ACT;
3363 } 3498 }
3364} 3499}
3365static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control) 3500static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_control)
3366{ 3501{
3367 struct l2cap_pinfo *pi = l2cap_pi(sk);
3368 u8 tx_seq = __get_reqseq(rx_control); 3502 u8 tx_seq = __get_reqseq(rx_control);
3369 3503
3370 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control); 3504 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3371 3505
3372 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY; 3506 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3373 3507
3374 if (rx_control & L2CAP_CTRL_POLL) { 3508 if (rx_control & L2CAP_CTRL_POLL) {
3375 pi->expected_ack_seq = tx_seq; 3509 chan->expected_ack_seq = tx_seq;
3376 l2cap_drop_acked_frames(sk); 3510 l2cap_drop_acked_frames(chan);
3377 3511
3378 pi->conn_state |= L2CAP_CONN_SEND_FBIT; 3512 chan->conn_state |= L2CAP_CONN_SEND_FBIT;
3379 l2cap_retransmit_one_frame(sk, tx_seq); 3513 l2cap_retransmit_one_frame(chan, tx_seq);
3380 3514
3381 l2cap_ertm_send(sk); 3515 l2cap_ertm_send(chan);
3382 3516
3383 if (pi->conn_state & L2CAP_CONN_WAIT_F) { 3517 if (chan->conn_state & L2CAP_CONN_WAIT_F) {
3384 pi->srej_save_reqseq = tx_seq; 3518 chan->srej_save_reqseq = tx_seq;
3385 pi->conn_state |= L2CAP_CONN_SREJ_ACT; 3519 chan->conn_state |= L2CAP_CONN_SREJ_ACT;
3386 } 3520 }
3387 } else if (rx_control & L2CAP_CTRL_FINAL) { 3521 } else if (rx_control & L2CAP_CTRL_FINAL) {
3388 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) && 3522 if ((chan->conn_state & L2CAP_CONN_SREJ_ACT) &&
3389 pi->srej_save_reqseq == tx_seq) 3523 chan->srej_save_reqseq == tx_seq)
3390 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT; 3524 chan->conn_state &= ~L2CAP_CONN_SREJ_ACT;
3391 else 3525 else
3392 l2cap_retransmit_one_frame(sk, tx_seq); 3526 l2cap_retransmit_one_frame(chan, tx_seq);
3393 } else { 3527 } else {
3394 l2cap_retransmit_one_frame(sk, tx_seq); 3528 l2cap_retransmit_one_frame(chan, tx_seq);
3395 if (pi->conn_state & L2CAP_CONN_WAIT_F) { 3529 if (chan->conn_state & L2CAP_CONN_WAIT_F) {
3396 pi->srej_save_reqseq = tx_seq; 3530 chan->srej_save_reqseq = tx_seq;
3397 pi->conn_state |= L2CAP_CONN_SREJ_ACT; 3531 chan->conn_state |= L2CAP_CONN_SREJ_ACT;
3398 } 3532 }
3399 } 3533 }
3400} 3534}
3401 3535
3402static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control) 3536static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u16 rx_control)
3403{ 3537{
3404 struct l2cap_pinfo *pi = l2cap_pi(sk);
3405 u8 tx_seq = __get_reqseq(rx_control); 3538 u8 tx_seq = __get_reqseq(rx_control);
3406 3539
3407 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control); 3540 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3408 3541
3409 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY; 3542 chan->conn_state |= L2CAP_CONN_REMOTE_BUSY;
3410 pi->expected_ack_seq = tx_seq; 3543 chan->expected_ack_seq = tx_seq;
3411 l2cap_drop_acked_frames(sk); 3544 l2cap_drop_acked_frames(chan);
3412 3545
3413 if (rx_control & L2CAP_CTRL_POLL) 3546 if (rx_control & L2CAP_CTRL_POLL)
3414 pi->conn_state |= L2CAP_CONN_SEND_FBIT; 3547 chan->conn_state |= L2CAP_CONN_SEND_FBIT;
3415 3548
3416 if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) { 3549 if (!(chan->conn_state & L2CAP_CONN_SREJ_SENT)) {
3417 del_timer(&pi->retrans_timer); 3550 del_timer(&chan->retrans_timer);
3418 if (rx_control & L2CAP_CTRL_POLL) 3551 if (rx_control & L2CAP_CTRL_POLL)
3419 l2cap_send_rr_or_rnr(pi, L2CAP_CTRL_FINAL); 3552 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
3420 return; 3553 return;
3421 } 3554 }
3422 3555
3423 if (rx_control & L2CAP_CTRL_POLL) 3556 if (rx_control & L2CAP_CTRL_POLL)
3424 l2cap_send_srejtail(sk); 3557 l2cap_send_srejtail(chan);
3425 else 3558 else
3426 l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY); 3559 l2cap_send_sframe(chan, L2CAP_SUPER_RCV_READY);
3427} 3560}
3428 3561
3429static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb) 3562static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3430{ 3563{
3431 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len); 3564 BT_DBG("chan %p rx_control 0x%4.4x len %d", chan, rx_control, skb->len);
3432 3565
3433 if (L2CAP_CTRL_FINAL & rx_control && 3566 if (L2CAP_CTRL_FINAL & rx_control &&
3434 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) { 3567 chan->conn_state & L2CAP_CONN_WAIT_F) {
3435 del_timer(&l2cap_pi(sk)->monitor_timer); 3568 del_timer(&chan->monitor_timer);
3436 if (l2cap_pi(sk)->unacked_frames > 0) 3569 if (chan->unacked_frames > 0)
3437 __mod_retrans_timer(); 3570 __mod_retrans_timer();
3438 l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F; 3571 chan->conn_state &= ~L2CAP_CONN_WAIT_F;
3439 } 3572 }
3440 3573
3441 switch (rx_control & L2CAP_CTRL_SUPERVISE) { 3574 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3442 case L2CAP_SUPER_RCV_READY: 3575 case L2CAP_SUPER_RCV_READY:
3443 l2cap_data_channel_rrframe(sk, rx_control); 3576 l2cap_data_channel_rrframe(chan, rx_control);
3444 break; 3577 break;
3445 3578
3446 case L2CAP_SUPER_REJECT: 3579 case L2CAP_SUPER_REJECT:
3447 l2cap_data_channel_rejframe(sk, rx_control); 3580 l2cap_data_channel_rejframe(chan, rx_control);
3448 break; 3581 break;
3449 3582
3450 case L2CAP_SUPER_SELECT_REJECT: 3583 case L2CAP_SUPER_SELECT_REJECT:
3451 l2cap_data_channel_srejframe(sk, rx_control); 3584 l2cap_data_channel_srejframe(chan, rx_control);
3452 break; 3585 break;
3453 3586
3454 case L2CAP_SUPER_RCV_NOT_READY: 3587 case L2CAP_SUPER_RCV_NOT_READY:
3455 l2cap_data_channel_rnrframe(sk, rx_control); 3588 l2cap_data_channel_rnrframe(chan, rx_control);
3456 break; 3589 break;
3457 } 3590 }
3458 3591
@@ -3462,7 +3595,7 @@ static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, str
3462 3595
3463static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb) 3596static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
3464{ 3597{
3465 struct l2cap_pinfo *pi = l2cap_pi(sk); 3598 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
3466 u16 control; 3599 u16 control;
3467 u8 req_seq; 3600 u8 req_seq;
3468 int len, next_tx_seq_offset, req_seq_offset; 3601 int len, next_tx_seq_offset, req_seq_offset;
@@ -3476,51 +3609,51 @@ static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
3476 * Receiver will miss it and start proper recovery 3609 * Receiver will miss it and start proper recovery
3477 * procedures and ask retransmission. 3610 * procedures and ask retransmission.
3478 */ 3611 */
3479 if (l2cap_check_fcs(pi, skb)) 3612 if (l2cap_check_fcs(chan, skb))
3480 goto drop; 3613 goto drop;
3481 3614
3482 if (__is_sar_start(control) && __is_iframe(control)) 3615 if (__is_sar_start(control) && __is_iframe(control))
3483 len -= 2; 3616 len -= 2;
3484 3617
3485 if (pi->fcs == L2CAP_FCS_CRC16) 3618 if (chan->fcs == L2CAP_FCS_CRC16)
3486 len -= 2; 3619 len -= 2;
3487 3620
3488 if (len > pi->mps) { 3621 if (len > chan->mps) {
3489 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET); 3622 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3490 goto drop; 3623 goto drop;
3491 } 3624 }
3492 3625
3493 req_seq = __get_reqseq(control); 3626 req_seq = __get_reqseq(control);
3494 req_seq_offset = (req_seq - pi->expected_ack_seq) % 64; 3627 req_seq_offset = (req_seq - chan->expected_ack_seq) % 64;
3495 if (req_seq_offset < 0) 3628 if (req_seq_offset < 0)
3496 req_seq_offset += 64; 3629 req_seq_offset += 64;
3497 3630
3498 next_tx_seq_offset = 3631 next_tx_seq_offset =
3499 (pi->next_tx_seq - pi->expected_ack_seq) % 64; 3632 (chan->next_tx_seq - chan->expected_ack_seq) % 64;
3500 if (next_tx_seq_offset < 0) 3633 if (next_tx_seq_offset < 0)
3501 next_tx_seq_offset += 64; 3634 next_tx_seq_offset += 64;
3502 3635
3503 /* check for invalid req-seq */ 3636 /* check for invalid req-seq */
3504 if (req_seq_offset > next_tx_seq_offset) { 3637 if (req_seq_offset > next_tx_seq_offset) {
3505 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET); 3638 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3506 goto drop; 3639 goto drop;
3507 } 3640 }
3508 3641
3509 if (__is_iframe(control)) { 3642 if (__is_iframe(control)) {
3510 if (len < 0) { 3643 if (len < 0) {
3511 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET); 3644 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3512 goto drop; 3645 goto drop;
3513 } 3646 }
3514 3647
3515 l2cap_data_channel_iframe(sk, control, skb); 3648 l2cap_data_channel_iframe(chan, control, skb);
3516 } else { 3649 } else {
3517 if (len != 0) { 3650 if (len != 0) {
3518 BT_ERR("%d", len); 3651 BT_ERR("%d", len);
3519 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET); 3652 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3520 goto drop; 3653 goto drop;
3521 } 3654 }
3522 3655
3523 l2cap_data_channel_sframe(sk, control, skb); 3656 l2cap_data_channel_sframe(chan, control, skb);
3524 } 3657 }
3525 3658
3526 return 0; 3659 return 0;
@@ -3532,33 +3665,35 @@ drop:
3532 3665
3533static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb) 3666static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3534{ 3667{
3535 struct sock *sk; 3668 struct l2cap_chan *chan;
3669 struct sock *sk = NULL;
3536 struct l2cap_pinfo *pi; 3670 struct l2cap_pinfo *pi;
3537 u16 control; 3671 u16 control;
3538 u8 tx_seq; 3672 u8 tx_seq;
3539 int len; 3673 int len;
3540 3674
3541 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid); 3675 chan = l2cap_get_chan_by_scid(conn, cid);
3542 if (!sk) { 3676 if (!chan) {
3543 BT_DBG("unknown cid 0x%4.4x", cid); 3677 BT_DBG("unknown cid 0x%4.4x", cid);
3544 goto drop; 3678 goto drop;
3545 } 3679 }
3546 3680
3681 sk = chan->sk;
3547 pi = l2cap_pi(sk); 3682 pi = l2cap_pi(sk);
3548 3683
3549 BT_DBG("sk %p, len %d", sk, skb->len); 3684 BT_DBG("chan %p, len %d", chan, skb->len);
3550 3685
3551 if (sk->sk_state != BT_CONNECTED) 3686 if (sk->sk_state != BT_CONNECTED)
3552 goto drop; 3687 goto drop;
3553 3688
3554 switch (pi->mode) { 3689 switch (chan->mode) {
3555 case L2CAP_MODE_BASIC: 3690 case L2CAP_MODE_BASIC:
3556 /* If socket recv buffers overflows we drop data here 3691 /* If socket recv buffers overflows we drop data here
3557 * which is *bad* because L2CAP has to be reliable. 3692 * which is *bad* because L2CAP has to be reliable.
3558 * But we don't have any other choice. L2CAP doesn't 3693 * But we don't have any other choice. L2CAP doesn't
3559 * provide flow control mechanism. */ 3694 * provide flow control mechanism. */
3560 3695
3561 if (pi->imtu < skb->len) 3696 if (chan->imtu < skb->len)
3562 goto drop; 3697 goto drop;
3563 3698
3564 if (!sock_queue_rcv_skb(sk, skb)) 3699 if (!sock_queue_rcv_skb(sk, skb))
@@ -3580,31 +3715,31 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk
3580 skb_pull(skb, 2); 3715 skb_pull(skb, 2);
3581 len = skb->len; 3716 len = skb->len;
3582 3717
3583 if (l2cap_check_fcs(pi, skb)) 3718 if (l2cap_check_fcs(chan, skb))
3584 goto drop; 3719 goto drop;
3585 3720
3586 if (__is_sar_start(control)) 3721 if (__is_sar_start(control))
3587 len -= 2; 3722 len -= 2;
3588 3723
3589 if (pi->fcs == L2CAP_FCS_CRC16) 3724 if (chan->fcs == L2CAP_FCS_CRC16)
3590 len -= 2; 3725 len -= 2;
3591 3726
3592 if (len > pi->mps || len < 0 || __is_sframe(control)) 3727 if (len > chan->mps || len < 0 || __is_sframe(control))
3593 goto drop; 3728 goto drop;
3594 3729
3595 tx_seq = __get_txseq(control); 3730 tx_seq = __get_txseq(control);
3596 3731
3597 if (pi->expected_tx_seq == tx_seq) 3732 if (chan->expected_tx_seq == tx_seq)
3598 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64; 3733 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3599 else 3734 else
3600 pi->expected_tx_seq = (tx_seq + 1) % 64; 3735 chan->expected_tx_seq = (tx_seq + 1) % 64;
3601 3736
3602 l2cap_streaming_reassembly_sdu(sk, skb, control); 3737 l2cap_streaming_reassembly_sdu(chan, skb, control);
3603 3738
3604 goto done; 3739 goto done;
3605 3740
3606 default: 3741 default:
3607 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode); 3742 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
3608 break; 3743 break;
3609 } 3744 }
3610 3745
@@ -3620,12 +3755,48 @@ done:
3620 3755
3621static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb) 3756static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3622{ 3757{
3623 struct sock *sk; 3758 struct sock *sk = NULL;
3759 struct l2cap_chan *chan;
3624 3760
3625 sk = l2cap_get_sock_by_psm(0, psm, conn->src); 3761 chan = l2cap_global_chan_by_psm(0, psm, conn->src);
3626 if (!sk) 3762 if (!chan)
3763 goto drop;
3764
3765 sk = chan->sk;
3766
3767 bh_lock_sock(sk);
3768
3769 BT_DBG("sk %p, len %d", sk, skb->len);
3770
3771 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3772 goto drop;
3773
3774 if (l2cap_pi(sk)->chan->imtu < skb->len)
3775 goto drop;
3776
3777 if (!sock_queue_rcv_skb(sk, skb))
3778 goto done;
3779
3780drop:
3781 kfree_skb(skb);
3782
3783done:
3784 if (sk)
3785 bh_unlock_sock(sk);
3786 return 0;
3787}
3788
3789static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
3790{
3791 struct sock *sk = NULL;
3792 struct l2cap_chan *chan;
3793
3794 chan = l2cap_global_chan_by_scid(0, cid, conn->src);
3795 if (!chan)
3627 goto drop; 3796 goto drop;
3628 3797
3798 sk = chan->sk;
3799
3629 bh_lock_sock(sk); 3800 bh_lock_sock(sk);
3630 3801
3631 BT_DBG("sk %p, len %d", sk, skb->len); 3802 BT_DBG("sk %p, len %d", sk, skb->len);
@@ -3633,7 +3804,7 @@ static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, str
3633 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED) 3804 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3634 goto drop; 3805 goto drop;
3635 3806
3636 if (l2cap_pi(sk)->imtu < skb->len) 3807 if (l2cap_pi(sk)->chan->imtu < skb->len)
3637 goto drop; 3808 goto drop;
3638 3809
3639 if (!sock_queue_rcv_skb(sk, skb)) 3810 if (!sock_queue_rcv_skb(sk, skb))
@@ -3677,6 +3848,10 @@ static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3677 l2cap_conless_channel(conn, psm, skb); 3848 l2cap_conless_channel(conn, psm, skb);
3678 break; 3849 break;
3679 3850
3851 case L2CAP_CID_LE_DATA:
3852 l2cap_att_channel(conn, cid, skb);
3853 break;
3854
3680 default: 3855 default:
3681 l2cap_data_channel(conn, cid, skb); 3856 l2cap_data_channel(conn, cid, skb);
3682 break; 3857 break;
@@ -3688,8 +3863,7 @@ static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3688static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type) 3863static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3689{ 3864{
3690 int exact = 0, lm1 = 0, lm2 = 0; 3865 int exact = 0, lm1 = 0, lm2 = 0;
3691 register struct sock *sk; 3866 struct l2cap_chan *c;
3692 struct hlist_node *node;
3693 3867
3694 if (type != ACL_LINK) 3868 if (type != ACL_LINK)
3695 return -EINVAL; 3869 return -EINVAL;
@@ -3697,23 +3871,25 @@ static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3697 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr)); 3871 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
3698 3872
3699 /* Find listening sockets and check their link_mode */ 3873 /* Find listening sockets and check their link_mode */
3700 read_lock(&l2cap_sk_list.lock); 3874 read_lock(&chan_list_lock);
3701 sk_for_each(sk, node, &l2cap_sk_list.head) { 3875 list_for_each_entry(c, &chan_list, global_l) {
3876 struct sock *sk = c->sk;
3877
3702 if (sk->sk_state != BT_LISTEN) 3878 if (sk->sk_state != BT_LISTEN)
3703 continue; 3879 continue;
3704 3880
3705 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) { 3881 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
3706 lm1 |= HCI_LM_ACCEPT; 3882 lm1 |= HCI_LM_ACCEPT;
3707 if (l2cap_pi(sk)->role_switch) 3883 if (c->role_switch)
3708 lm1 |= HCI_LM_MASTER; 3884 lm1 |= HCI_LM_MASTER;
3709 exact++; 3885 exact++;
3710 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) { 3886 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
3711 lm2 |= HCI_LM_ACCEPT; 3887 lm2 |= HCI_LM_ACCEPT;
3712 if (l2cap_pi(sk)->role_switch) 3888 if (c->role_switch)
3713 lm2 |= HCI_LM_MASTER; 3889 lm2 |= HCI_LM_MASTER;
3714 } 3890 }
3715 } 3891 }
3716 read_unlock(&l2cap_sk_list.lock); 3892 read_unlock(&chan_list_lock);
3717 3893
3718 return exact ? lm1 : lm2; 3894 return exact ? lm1 : lm2;
3719} 3895}
@@ -3761,49 +3937,50 @@ static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
3761 return 0; 3937 return 0;
3762} 3938}
3763 3939
3764static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt) 3940static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
3765{ 3941{
3942 struct sock *sk = chan->sk;
3943
3766 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM) 3944 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
3767 return; 3945 return;
3768 3946
3769 if (encrypt == 0x00) { 3947 if (encrypt == 0x00) {
3770 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) { 3948 if (chan->sec_level == BT_SECURITY_MEDIUM) {
3771 l2cap_sock_clear_timer(sk); 3949 l2cap_sock_clear_timer(sk);
3772 l2cap_sock_set_timer(sk, HZ * 5); 3950 l2cap_sock_set_timer(sk, HZ * 5);
3773 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH) 3951 } else if (chan->sec_level == BT_SECURITY_HIGH)
3774 __l2cap_sock_close(sk, ECONNREFUSED); 3952 __l2cap_sock_close(sk, ECONNREFUSED);
3775 } else { 3953 } else {
3776 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) 3954 if (chan->sec_level == BT_SECURITY_MEDIUM)
3777 l2cap_sock_clear_timer(sk); 3955 l2cap_sock_clear_timer(sk);
3778 } 3956 }
3779} 3957}
3780 3958
3781static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt) 3959static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
3782{ 3960{
3783 struct l2cap_chan_list *l;
3784 struct l2cap_conn *conn = hcon->l2cap_data; 3961 struct l2cap_conn *conn = hcon->l2cap_data;
3785 struct sock *sk; 3962 struct l2cap_chan *chan;
3786 3963
3787 if (!conn) 3964 if (!conn)
3788 return 0; 3965 return 0;
3789 3966
3790 l = &conn->chan_list;
3791
3792 BT_DBG("conn %p", conn); 3967 BT_DBG("conn %p", conn);
3793 3968
3794 read_lock(&l->lock); 3969 read_lock(&conn->chan_lock);
3970
3971 list_for_each_entry(chan, &conn->chan_l, list) {
3972 struct sock *sk = chan->sk;
3795 3973
3796 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
3797 bh_lock_sock(sk); 3974 bh_lock_sock(sk);
3798 3975
3799 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) { 3976 if (chan->conf_state & L2CAP_CONF_CONNECT_PEND) {
3800 bh_unlock_sock(sk); 3977 bh_unlock_sock(sk);
3801 continue; 3978 continue;
3802 } 3979 }
3803 3980
3804 if (!status && (sk->sk_state == BT_CONNECTED || 3981 if (!status && (sk->sk_state == BT_CONNECTED ||
3805 sk->sk_state == BT_CONFIG)) { 3982 sk->sk_state == BT_CONFIG)) {
3806 l2cap_check_encryption(sk, encrypt); 3983 l2cap_check_encryption(chan, encrypt);
3807 bh_unlock_sock(sk); 3984 bh_unlock_sock(sk);
3808 continue; 3985 continue;
3809 } 3986 }
@@ -3811,13 +3988,13 @@ static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
3811 if (sk->sk_state == BT_CONNECT) { 3988 if (sk->sk_state == BT_CONNECT) {
3812 if (!status) { 3989 if (!status) {
3813 struct l2cap_conn_req req; 3990 struct l2cap_conn_req req;
3814 req.scid = cpu_to_le16(l2cap_pi(sk)->scid); 3991 req.scid = cpu_to_le16(chan->scid);
3815 req.psm = l2cap_pi(sk)->psm; 3992 req.psm = chan->psm;
3816 3993
3817 l2cap_pi(sk)->ident = l2cap_get_ident(conn); 3994 chan->ident = l2cap_get_ident(conn);
3818 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND; 3995 chan->conf_state |= L2CAP_CONF_CONNECT_PEND;
3819 3996
3820 l2cap_send_cmd(conn, l2cap_pi(sk)->ident, 3997 l2cap_send_cmd(conn, chan->ident,
3821 L2CAP_CONN_REQ, sizeof(req), &req); 3998 L2CAP_CONN_REQ, sizeof(req), &req);
3822 } else { 3999 } else {
3823 l2cap_sock_clear_timer(sk); 4000 l2cap_sock_clear_timer(sk);
@@ -3836,18 +4013,18 @@ static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
3836 result = L2CAP_CR_SEC_BLOCK; 4013 result = L2CAP_CR_SEC_BLOCK;
3837 } 4014 }
3838 4015
3839 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid); 4016 rsp.scid = cpu_to_le16(chan->dcid);
3840 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid); 4017 rsp.dcid = cpu_to_le16(chan->scid);
3841 rsp.result = cpu_to_le16(result); 4018 rsp.result = cpu_to_le16(result);
3842 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); 4019 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3843 l2cap_send_cmd(conn, l2cap_pi(sk)->ident, 4020 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
3844 L2CAP_CONN_RSP, sizeof(rsp), &rsp); 4021 sizeof(rsp), &rsp);
3845 } 4022 }
3846 4023
3847 bh_unlock_sock(sk); 4024 bh_unlock_sock(sk);
3848 } 4025 }
3849 4026
3850 read_unlock(&l->lock); 4027 read_unlock(&conn->chan_lock);
3851 4028
3852 return 0; 4029 return 0;
3853} 4030}
@@ -3866,7 +4043,7 @@ static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 fl
3866 4043
3867 if (!(flags & ACL_CONT)) { 4044 if (!(flags & ACL_CONT)) {
3868 struct l2cap_hdr *hdr; 4045 struct l2cap_hdr *hdr;
3869 struct sock *sk; 4046 struct l2cap_chan *chan;
3870 u16 cid; 4047 u16 cid;
3871 int len; 4048 int len;
3872 4049
@@ -3904,18 +4081,21 @@ static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 fl
3904 goto drop; 4081 goto drop;
3905 } 4082 }
3906 4083
3907 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid); 4084 chan = l2cap_get_chan_by_scid(conn, cid);
3908 4085
3909 if (sk && l2cap_pi(sk)->imtu < len - L2CAP_HDR_SIZE) { 4086 if (chan && chan->sk) {
3910 BT_ERR("Frame exceeding recv MTU (len %d, MTU %d)", 4087 struct sock *sk = chan->sk;
3911 len, l2cap_pi(sk)->imtu);
3912 bh_unlock_sock(sk);
3913 l2cap_conn_unreliable(conn, ECOMM);
3914 goto drop;
3915 }
3916 4088
3917 if (sk) 4089 if (chan->imtu < len - L2CAP_HDR_SIZE) {
4090 BT_ERR("Frame exceeding recv MTU (len %d, "
4091 "MTU %d)", len,
4092 chan->imtu);
4093 bh_unlock_sock(sk);
4094 l2cap_conn_unreliable(conn, ECOMM);
4095 goto drop;
4096 }
3918 bh_unlock_sock(sk); 4097 bh_unlock_sock(sk);
4098 }
3919 4099
3920 /* Allocate skb for the complete frame (with header) */ 4100 /* Allocate skb for the complete frame (with header) */
3921 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC); 4101 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
@@ -3962,24 +4142,22 @@ drop:
3962 4142
3963static int l2cap_debugfs_show(struct seq_file *f, void *p) 4143static int l2cap_debugfs_show(struct seq_file *f, void *p)
3964{ 4144{
3965 struct sock *sk; 4145 struct l2cap_chan *c;
3966 struct hlist_node *node;
3967 4146
3968 read_lock_bh(&l2cap_sk_list.lock); 4147 read_lock_bh(&chan_list_lock);
3969 4148
3970 sk_for_each(sk, node, &l2cap_sk_list.head) { 4149 list_for_each_entry(c, &chan_list, global_l) {
3971 struct l2cap_pinfo *pi = l2cap_pi(sk); 4150 struct sock *sk = c->sk;
3972 4151
3973 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n", 4152 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
3974 batostr(&bt_sk(sk)->src), 4153 batostr(&bt_sk(sk)->src),
3975 batostr(&bt_sk(sk)->dst), 4154 batostr(&bt_sk(sk)->dst),
3976 sk->sk_state, __le16_to_cpu(pi->psm), 4155 sk->sk_state, __le16_to_cpu(c->psm),
3977 pi->scid, pi->dcid, 4156 c->scid, c->dcid, c->imtu, c->omtu,
3978 pi->imtu, pi->omtu, pi->sec_level, 4157 c->sec_level, c->mode);
3979 pi->mode);
3980 } 4158 }
3981 4159
3982 read_unlock_bh(&l2cap_sk_list.lock); 4160 read_unlock_bh(&chan_list_lock);
3983 4161
3984 return 0; 4162 return 0;
3985} 4163}
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index 299fe56a9668..18dc9888d8c2 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -30,6 +30,8 @@
30#include <net/bluetooth/hci_core.h> 30#include <net/bluetooth/hci_core.h>
31#include <net/bluetooth/l2cap.h> 31#include <net/bluetooth/l2cap.h>
32 32
33static const struct proto_ops l2cap_sock_ops;
34
33/* ---- L2CAP timers ---- */ 35/* ---- L2CAP timers ---- */
34static void l2cap_sock_timeout(unsigned long arg) 36static void l2cap_sock_timeout(unsigned long arg)
35{ 37{
@@ -51,7 +53,7 @@ static void l2cap_sock_timeout(unsigned long arg)
51 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG) 53 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
52 reason = ECONNREFUSED; 54 reason = ECONNREFUSED;
53 else if (sk->sk_state == BT_CONNECT && 55 else if (sk->sk_state == BT_CONNECT &&
54 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP) 56 l2cap_pi(sk)->chan->sec_level != BT_SECURITY_SDP)
55 reason = ECONNREFUSED; 57 reason = ECONNREFUSED;
56 else 58 else
57 reason = ETIMEDOUT; 59 reason = ETIMEDOUT;
@@ -76,21 +78,10 @@ void l2cap_sock_clear_timer(struct sock *sk)
76 sk_stop_timer(sk, &sk->sk_timer); 78 sk_stop_timer(sk, &sk->sk_timer);
77} 79}
78 80
79static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
80{
81 struct sock *sk;
82 struct hlist_node *node;
83 sk_for_each(sk, node, &l2cap_sk_list.head)
84 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
85 goto found;
86 sk = NULL;
87found:
88 return sk;
89}
90
91static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen) 81static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
92{ 82{
93 struct sock *sk = sock->sk; 83 struct sock *sk = sock->sk;
84 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
94 struct sockaddr_l2 la; 85 struct sockaddr_l2 la;
95 int len, err = 0; 86 int len, err = 0;
96 87
@@ -129,26 +120,20 @@ static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
129 } 120 }
130 } 121 }
131 122
132 write_lock_bh(&l2cap_sk_list.lock); 123 if (la.l2_cid)
124 err = l2cap_add_scid(chan, la.l2_cid);
125 else
126 err = l2cap_add_psm(chan, &la.l2_bdaddr, la.l2_psm);
133 127
134 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) { 128 if (err < 0)
135 err = -EADDRINUSE; 129 goto done;
136 } else {
137 /* Save source address */
138 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
139 l2cap_pi(sk)->psm = la.l2_psm;
140 l2cap_pi(sk)->sport = la.l2_psm;
141 sk->sk_state = BT_BOUND;
142
143 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
144 __le16_to_cpu(la.l2_psm) == 0x0003)
145 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
146 }
147 130
148 if (la.l2_cid) 131 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
149 l2cap_pi(sk)->scid = la.l2_cid; 132 __le16_to_cpu(la.l2_psm) == 0x0003)
133 chan->sec_level = BT_SECURITY_SDP;
150 134
151 write_unlock_bh(&l2cap_sk_list.lock); 135 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
136 sk->sk_state = BT_BOUND;
152 137
153done: 138done:
154 release_sock(sk); 139 release_sock(sk);
@@ -158,6 +143,7 @@ done:
158static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags) 143static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
159{ 144{
160 struct sock *sk = sock->sk; 145 struct sock *sk = sock->sk;
146 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
161 struct sockaddr_l2 la; 147 struct sockaddr_l2 la;
162 int len, err = 0; 148 int len, err = 0;
163 149
@@ -182,7 +168,7 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int al
182 goto done; 168 goto done;
183 } 169 }
184 170
185 switch (l2cap_pi(sk)->mode) { 171 switch (chan->mode) {
186 case L2CAP_MODE_BASIC: 172 case L2CAP_MODE_BASIC:
187 break; 173 break;
188 case L2CAP_MODE_ERTM: 174 case L2CAP_MODE_ERTM:
@@ -226,10 +212,10 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int al
226 212
227 /* Set destination address and psm */ 213 /* Set destination address and psm */
228 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr); 214 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
229 l2cap_pi(sk)->psm = la.l2_psm; 215 chan->psm = la.l2_psm;
230 l2cap_pi(sk)->dcid = la.l2_cid; 216 chan->dcid = la.l2_cid;
231 217
232 err = l2cap_do_connect(sk); 218 err = l2cap_chan_connect(l2cap_pi(sk)->chan);
233 if (err) 219 if (err)
234 goto done; 220 goto done;
235 221
@@ -244,6 +230,7 @@ done:
244static int l2cap_sock_listen(struct socket *sock, int backlog) 230static int l2cap_sock_listen(struct socket *sock, int backlog)
245{ 231{
246 struct sock *sk = sock->sk; 232 struct sock *sk = sock->sk;
233 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
247 int err = 0; 234 int err = 0;
248 235
249 BT_DBG("sk %p backlog %d", sk, backlog); 236 BT_DBG("sk %p backlog %d", sk, backlog);
@@ -256,7 +243,7 @@ static int l2cap_sock_listen(struct socket *sock, int backlog)
256 goto done; 243 goto done;
257 } 244 }
258 245
259 switch (l2cap_pi(sk)->mode) { 246 switch (chan->mode) {
260 case L2CAP_MODE_BASIC: 247 case L2CAP_MODE_BASIC:
261 break; 248 break;
262 case L2CAP_MODE_ERTM: 249 case L2CAP_MODE_ERTM:
@@ -269,28 +256,6 @@ static int l2cap_sock_listen(struct socket *sock, int backlog)
269 goto done; 256 goto done;
270 } 257 }
271 258
272 if (!l2cap_pi(sk)->psm && !l2cap_pi(sk)->dcid) {
273 bdaddr_t *src = &bt_sk(sk)->src;
274 u16 psm;
275
276 err = -EINVAL;
277
278 write_lock_bh(&l2cap_sk_list.lock);
279
280 for (psm = 0x1001; psm < 0x1100; psm += 2)
281 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
282 l2cap_pi(sk)->psm = cpu_to_le16(psm);
283 l2cap_pi(sk)->sport = cpu_to_le16(psm);
284 err = 0;
285 break;
286 }
287
288 write_unlock_bh(&l2cap_sk_list.lock);
289
290 if (err < 0)
291 goto done;
292 }
293
294 sk->sk_max_ack_backlog = backlog; 259 sk->sk_max_ack_backlog = backlog;
295 sk->sk_ack_backlog = 0; 260 sk->sk_ack_backlog = 0;
296 sk->sk_state = BT_LISTEN; 261 sk->sk_state = BT_LISTEN;
@@ -360,6 +325,7 @@ static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *l
360{ 325{
361 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr; 326 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
362 struct sock *sk = sock->sk; 327 struct sock *sk = sock->sk;
328 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
363 329
364 BT_DBG("sock %p, sk %p", sock, sk); 330 BT_DBG("sock %p, sk %p", sock, sk);
365 331
@@ -367,13 +333,13 @@ static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *l
367 *len = sizeof(struct sockaddr_l2); 333 *len = sizeof(struct sockaddr_l2);
368 334
369 if (peer) { 335 if (peer) {
370 la->l2_psm = l2cap_pi(sk)->psm; 336 la->l2_psm = chan->psm;
371 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst); 337 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
372 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid); 338 la->l2_cid = cpu_to_le16(chan->dcid);
373 } else { 339 } else {
374 la->l2_psm = l2cap_pi(sk)->sport; 340 la->l2_psm = chan->sport;
375 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src); 341 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
376 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid); 342 la->l2_cid = cpu_to_le16(chan->scid);
377 } 343 }
378 344
379 return 0; 345 return 0;
@@ -382,6 +348,7 @@ static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *l
382static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen) 348static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
383{ 349{
384 struct sock *sk = sock->sk; 350 struct sock *sk = sock->sk;
351 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
385 struct l2cap_options opts; 352 struct l2cap_options opts;
386 struct l2cap_conninfo cinfo; 353 struct l2cap_conninfo cinfo;
387 int len, err = 0; 354 int len, err = 0;
@@ -397,13 +364,13 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __us
397 switch (optname) { 364 switch (optname) {
398 case L2CAP_OPTIONS: 365 case L2CAP_OPTIONS:
399 memset(&opts, 0, sizeof(opts)); 366 memset(&opts, 0, sizeof(opts));
400 opts.imtu = l2cap_pi(sk)->imtu; 367 opts.imtu = chan->imtu;
401 opts.omtu = l2cap_pi(sk)->omtu; 368 opts.omtu = chan->omtu;
402 opts.flush_to = l2cap_pi(sk)->flush_to; 369 opts.flush_to = chan->flush_to;
403 opts.mode = l2cap_pi(sk)->mode; 370 opts.mode = chan->mode;
404 opts.fcs = l2cap_pi(sk)->fcs; 371 opts.fcs = chan->fcs;
405 opts.max_tx = l2cap_pi(sk)->max_tx; 372 opts.max_tx = chan->max_tx;
406 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win; 373 opts.txwin_size = (__u16)chan->tx_win;
407 374
408 len = min_t(unsigned int, len, sizeof(opts)); 375 len = min_t(unsigned int, len, sizeof(opts));
409 if (copy_to_user(optval, (char *) &opts, len)) 376 if (copy_to_user(optval, (char *) &opts, len))
@@ -412,7 +379,7 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __us
412 break; 379 break;
413 380
414 case L2CAP_LM: 381 case L2CAP_LM:
415 switch (l2cap_pi(sk)->sec_level) { 382 switch (chan->sec_level) {
416 case BT_SECURITY_LOW: 383 case BT_SECURITY_LOW:
417 opt = L2CAP_LM_AUTH; 384 opt = L2CAP_LM_AUTH;
418 break; 385 break;
@@ -428,10 +395,10 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __us
428 break; 395 break;
429 } 396 }
430 397
431 if (l2cap_pi(sk)->role_switch) 398 if (chan->role_switch)
432 opt |= L2CAP_LM_MASTER; 399 opt |= L2CAP_LM_MASTER;
433 400
434 if (l2cap_pi(sk)->force_reliable) 401 if (chan->force_reliable)
435 opt |= L2CAP_LM_RELIABLE; 402 opt |= L2CAP_LM_RELIABLE;
436 403
437 if (put_user(opt, (u32 __user *) optval)) 404 if (put_user(opt, (u32 __user *) optval))
@@ -446,8 +413,8 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __us
446 break; 413 break;
447 } 414 }
448 415
449 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle; 416 cinfo.hci_handle = chan->conn->hcon->handle;
450 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3); 417 memcpy(cinfo.dev_class, chan->conn->hcon->dev_class, 3);
451 418
452 len = min_t(unsigned int, len, sizeof(cinfo)); 419 len = min_t(unsigned int, len, sizeof(cinfo));
453 if (copy_to_user(optval, (char *) &cinfo, len)) 420 if (copy_to_user(optval, (char *) &cinfo, len))
@@ -467,6 +434,7 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __us
467static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) 434static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
468{ 435{
469 struct sock *sk = sock->sk; 436 struct sock *sk = sock->sk;
437 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
470 struct bt_security sec; 438 struct bt_security sec;
471 int len, err = 0; 439 int len, err = 0;
472 440
@@ -491,7 +459,7 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch
491 break; 459 break;
492 } 460 }
493 461
494 sec.level = l2cap_pi(sk)->sec_level; 462 sec.level = chan->sec_level;
495 463
496 len = min_t(unsigned int, len, sizeof(sec)); 464 len = min_t(unsigned int, len, sizeof(sec));
497 if (copy_to_user(optval, (char *) &sec, len)) 465 if (copy_to_user(optval, (char *) &sec, len))
@@ -511,7 +479,7 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch
511 break; 479 break;
512 480
513 case BT_FLUSHABLE: 481 case BT_FLUSHABLE:
514 if (put_user(l2cap_pi(sk)->flushable, (u32 __user *) optval)) 482 if (put_user(chan->flushable, (u32 __user *) optval))
515 err = -EFAULT; 483 err = -EFAULT;
516 484
517 break; 485 break;
@@ -528,6 +496,7 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch
528static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen) 496static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
529{ 497{
530 struct sock *sk = sock->sk; 498 struct sock *sk = sock->sk;
499 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
531 struct l2cap_options opts; 500 struct l2cap_options opts;
532 int len, err = 0; 501 int len, err = 0;
533 u32 opt; 502 u32 opt;
@@ -543,13 +512,13 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
543 break; 512 break;
544 } 513 }
545 514
546 opts.imtu = l2cap_pi(sk)->imtu; 515 opts.imtu = chan->imtu;
547 opts.omtu = l2cap_pi(sk)->omtu; 516 opts.omtu = chan->omtu;
548 opts.flush_to = l2cap_pi(sk)->flush_to; 517 opts.flush_to = chan->flush_to;
549 opts.mode = l2cap_pi(sk)->mode; 518 opts.mode = chan->mode;
550 opts.fcs = l2cap_pi(sk)->fcs; 519 opts.fcs = chan->fcs;
551 opts.max_tx = l2cap_pi(sk)->max_tx; 520 opts.max_tx = chan->max_tx;
552 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win; 521 opts.txwin_size = (__u16)chan->tx_win;
553 522
554 len = min_t(unsigned int, sizeof(opts), optlen); 523 len = min_t(unsigned int, sizeof(opts), optlen);
555 if (copy_from_user((char *) &opts, optval, len)) { 524 if (copy_from_user((char *) &opts, optval, len)) {
@@ -562,10 +531,10 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
562 break; 531 break;
563 } 532 }
564 533
565 l2cap_pi(sk)->mode = opts.mode; 534 chan->mode = opts.mode;
566 switch (l2cap_pi(sk)->mode) { 535 switch (chan->mode) {
567 case L2CAP_MODE_BASIC: 536 case L2CAP_MODE_BASIC:
568 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_STATE2_DEVICE; 537 chan->conf_state &= ~L2CAP_CONF_STATE2_DEVICE;
569 break; 538 break;
570 case L2CAP_MODE_ERTM: 539 case L2CAP_MODE_ERTM:
571 case L2CAP_MODE_STREAMING: 540 case L2CAP_MODE_STREAMING:
@@ -577,11 +546,11 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
577 break; 546 break;
578 } 547 }
579 548
580 l2cap_pi(sk)->imtu = opts.imtu; 549 chan->imtu = opts.imtu;
581 l2cap_pi(sk)->omtu = opts.omtu; 550 chan->omtu = opts.omtu;
582 l2cap_pi(sk)->fcs = opts.fcs; 551 chan->fcs = opts.fcs;
583 l2cap_pi(sk)->max_tx = opts.max_tx; 552 chan->max_tx = opts.max_tx;
584 l2cap_pi(sk)->tx_win = (__u8)opts.txwin_size; 553 chan->tx_win = (__u8)opts.txwin_size;
585 break; 554 break;
586 555
587 case L2CAP_LM: 556 case L2CAP_LM:
@@ -591,14 +560,14 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
591 } 560 }
592 561
593 if (opt & L2CAP_LM_AUTH) 562 if (opt & L2CAP_LM_AUTH)
594 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW; 563 chan->sec_level = BT_SECURITY_LOW;
595 if (opt & L2CAP_LM_ENCRYPT) 564 if (opt & L2CAP_LM_ENCRYPT)
596 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM; 565 chan->sec_level = BT_SECURITY_MEDIUM;
597 if (opt & L2CAP_LM_SECURE) 566 if (opt & L2CAP_LM_SECURE)
598 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH; 567 chan->sec_level = BT_SECURITY_HIGH;
599 568
600 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER); 569 chan->role_switch = (opt & L2CAP_LM_MASTER);
601 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE); 570 chan->force_reliable = (opt & L2CAP_LM_RELIABLE);
602 break; 571 break;
603 572
604 default: 573 default:
@@ -613,6 +582,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
613static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) 582static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
614{ 583{
615 struct sock *sk = sock->sk; 584 struct sock *sk = sock->sk;
585 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
616 struct bt_security sec; 586 struct bt_security sec;
617 int len, err = 0; 587 int len, err = 0;
618 u32 opt; 588 u32 opt;
@@ -649,7 +619,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
649 break; 619 break;
650 } 620 }
651 621
652 l2cap_pi(sk)->sec_level = sec.level; 622 chan->sec_level = sec.level;
653 break; 623 break;
654 624
655 case BT_DEFER_SETUP: 625 case BT_DEFER_SETUP:
@@ -678,7 +648,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
678 } 648 }
679 649
680 if (opt == BT_FLUSHABLE_OFF) { 650 if (opt == BT_FLUSHABLE_OFF) {
681 struct l2cap_conn *conn = l2cap_pi(sk)->conn; 651 struct l2cap_conn *conn = chan->conn;
682 /* proceed further only when we have l2cap_conn and 652 /* proceed further only when we have l2cap_conn and
683 No Flush support in the LM */ 653 No Flush support in the LM */
684 if (!conn || !lmp_no_flush_capable(conn->hcon->hdev)) { 654 if (!conn || !lmp_no_flush_capable(conn->hcon->hdev)) {
@@ -687,7 +657,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
687 } 657 }
688 } 658 }
689 659
690 l2cap_pi(sk)->flushable = opt; 660 chan->flushable = opt;
691 break; 661 break;
692 662
693 default: 663 default:
@@ -702,7 +672,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
702static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len) 672static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
703{ 673{
704 struct sock *sk = sock->sk; 674 struct sock *sk = sock->sk;
705 struct l2cap_pinfo *pi = l2cap_pi(sk); 675 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
706 struct sk_buff *skb; 676 struct sk_buff *skb;
707 u16 control; 677 u16 control;
708 int err; 678 int err;
@@ -725,74 +695,77 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct ms
725 695
726 /* Connectionless channel */ 696 /* Connectionless channel */
727 if (sk->sk_type == SOCK_DGRAM) { 697 if (sk->sk_type == SOCK_DGRAM) {
728 skb = l2cap_create_connless_pdu(sk, msg, len); 698 skb = l2cap_create_connless_pdu(chan, msg, len);
729 if (IS_ERR(skb)) { 699 if (IS_ERR(skb)) {
730 err = PTR_ERR(skb); 700 err = PTR_ERR(skb);
731 } else { 701 } else {
732 l2cap_do_send(sk, skb); 702 l2cap_do_send(chan, skb);
733 err = len; 703 err = len;
734 } 704 }
735 goto done; 705 goto done;
736 } 706 }
737 707
738 switch (pi->mode) { 708 switch (chan->mode) {
739 case L2CAP_MODE_BASIC: 709 case L2CAP_MODE_BASIC:
740 /* Check outgoing MTU */ 710 /* Check outgoing MTU */
741 if (len > pi->omtu) { 711 if (len > chan->omtu) {
742 err = -EMSGSIZE; 712 err = -EMSGSIZE;
743 goto done; 713 goto done;
744 } 714 }
745 715
746 /* Create a basic PDU */ 716 /* Create a basic PDU */
747 skb = l2cap_create_basic_pdu(sk, msg, len); 717 skb = l2cap_create_basic_pdu(chan, msg, len);
748 if (IS_ERR(skb)) { 718 if (IS_ERR(skb)) {
749 err = PTR_ERR(skb); 719 err = PTR_ERR(skb);
750 goto done; 720 goto done;
751 } 721 }
752 722
753 l2cap_do_send(sk, skb); 723 l2cap_do_send(chan, skb);
754 err = len; 724 err = len;
755 break; 725 break;
756 726
757 case L2CAP_MODE_ERTM: 727 case L2CAP_MODE_ERTM:
758 case L2CAP_MODE_STREAMING: 728 case L2CAP_MODE_STREAMING:
759 /* Entire SDU fits into one PDU */ 729 /* Entire SDU fits into one PDU */
760 if (len <= pi->remote_mps) { 730 if (len <= chan->remote_mps) {
761 control = L2CAP_SDU_UNSEGMENTED; 731 control = L2CAP_SDU_UNSEGMENTED;
762 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0); 732 skb = l2cap_create_iframe_pdu(chan, msg, len, control,
733 0);
763 if (IS_ERR(skb)) { 734 if (IS_ERR(skb)) {
764 err = PTR_ERR(skb); 735 err = PTR_ERR(skb);
765 goto done; 736 goto done;
766 } 737 }
767 __skb_queue_tail(TX_QUEUE(sk), skb); 738 __skb_queue_tail(&chan->tx_q, skb);
768 739
769 if (sk->sk_send_head == NULL) 740 if (chan->tx_send_head == NULL)
770 sk->sk_send_head = skb; 741 chan->tx_send_head = skb;
771 742
772 } else { 743 } else {
773 /* Segment SDU into multiples PDUs */ 744 /* Segment SDU into multiples PDUs */
774 err = l2cap_sar_segment_sdu(sk, msg, len); 745 err = l2cap_sar_segment_sdu(chan, msg, len);
775 if (err < 0) 746 if (err < 0)
776 goto done; 747 goto done;
777 } 748 }
778 749
779 if (pi->mode == L2CAP_MODE_STREAMING) { 750 if (chan->mode == L2CAP_MODE_STREAMING) {
780 l2cap_streaming_send(sk); 751 l2cap_streaming_send(chan);
781 } else { 752 err = len;
782 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) && 753 break;
783 (pi->conn_state & L2CAP_CONN_WAIT_F)) { 754 }
784 err = len; 755
785 break; 756 if ((chan->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
786 } 757 (chan->conn_state & L2CAP_CONN_WAIT_F)) {
787 err = l2cap_ertm_send(sk); 758 err = len;
759 break;
788 } 760 }
761 err = l2cap_ertm_send(chan);
789 762
790 if (err >= 0) 763 if (err >= 0)
791 err = len; 764 err = len;
792 break; 765 break;
793 766
794 default: 767 default:
795 BT_DBG("bad state %1.1x", pi->mode); 768 BT_DBG("bad state %1.1x", chan->mode);
796 err = -EBADFD; 769 err = -EBADFD;
797 } 770 }
798 771
@@ -808,29 +781,9 @@ static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct ms
808 lock_sock(sk); 781 lock_sock(sk);
809 782
810 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) { 783 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
811 struct l2cap_conn_rsp rsp;
812 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
813 u8 buf[128];
814
815 sk->sk_state = BT_CONFIG; 784 sk->sk_state = BT_CONFIG;
816 785
817 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid); 786 __l2cap_connect_rsp_defer(l2cap_pi(sk)->chan);
818 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
819 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
820 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
821 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
822 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
823
824 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) {
825 release_sock(sk);
826 return 0;
827 }
828
829 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
830 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
831 l2cap_build_conf_req(sk, buf), buf);
832 l2cap_pi(sk)->num_conf_req++;
833
834 release_sock(sk); 787 release_sock(sk);
835 return 0; 788 return 0;
836 } 789 }
@@ -854,7 +807,8 @@ void l2cap_sock_kill(struct sock *sk)
854 BT_DBG("sk %p state %d", sk, sk->sk_state); 807 BT_DBG("sk %p state %d", sk, sk->sk_state);
855 808
856 /* Kill poor orphan */ 809 /* Kill poor orphan */
857 bt_sock_unlink(&l2cap_sk_list, sk); 810
811 l2cap_chan_destroy(l2cap_pi(sk)->chan);
858 sock_set_flag(sk, SOCK_DEAD); 812 sock_set_flag(sk, SOCK_DEAD);
859 sock_put(sk); 813 sock_put(sk);
860} 814}
@@ -885,7 +839,8 @@ static void l2cap_sock_cleanup_listen(struct sock *parent)
885 839
886void __l2cap_sock_close(struct sock *sk, int reason) 840void __l2cap_sock_close(struct sock *sk, int reason)
887{ 841{
888 struct l2cap_conn *conn = l2cap_pi(sk)->conn; 842 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
843 struct l2cap_conn *conn = chan->conn;
889 844
890 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket); 845 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
891 846
@@ -900,9 +855,9 @@ void __l2cap_sock_close(struct sock *sk, int reason)
900 sk->sk_type == SOCK_STREAM) && 855 sk->sk_type == SOCK_STREAM) &&
901 conn->hcon->type == ACL_LINK) { 856 conn->hcon->type == ACL_LINK) {
902 l2cap_sock_set_timer(sk, sk->sk_sndtimeo); 857 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
903 l2cap_send_disconn_req(conn, sk, reason); 858 l2cap_send_disconn_req(conn, chan, reason);
904 } else 859 } else
905 l2cap_chan_del(sk, reason); 860 l2cap_chan_del(chan, reason);
906 break; 861 break;
907 862
908 case BT_CONNECT2: 863 case BT_CONNECT2:
@@ -917,20 +872,20 @@ void __l2cap_sock_close(struct sock *sk, int reason)
917 else 872 else
918 result = L2CAP_CR_BAD_PSM; 873 result = L2CAP_CR_BAD_PSM;
919 874
920 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid); 875 rsp.scid = cpu_to_le16(chan->dcid);
921 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid); 876 rsp.dcid = cpu_to_le16(chan->scid);
922 rsp.result = cpu_to_le16(result); 877 rsp.result = cpu_to_le16(result);
923 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); 878 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
924 l2cap_send_cmd(conn, l2cap_pi(sk)->ident, 879 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
925 L2CAP_CONN_RSP, sizeof(rsp), &rsp); 880 sizeof(rsp), &rsp);
926 } 881 }
927 882
928 l2cap_chan_del(sk, reason); 883 l2cap_chan_del(chan, reason);
929 break; 884 break;
930 885
931 case BT_CONNECT: 886 case BT_CONNECT:
932 case BT_DISCONN: 887 case BT_DISCONN:
933 l2cap_chan_del(sk, reason); 888 l2cap_chan_del(chan, reason);
934 break; 889 break;
935 890
936 default: 891 default:
@@ -942,6 +897,7 @@ void __l2cap_sock_close(struct sock *sk, int reason)
942static int l2cap_sock_shutdown(struct socket *sock, int how) 897static int l2cap_sock_shutdown(struct socket *sock, int how)
943{ 898{
944 struct sock *sk = sock->sk; 899 struct sock *sk = sock->sk;
900 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
945 int err = 0; 901 int err = 0;
946 902
947 BT_DBG("sock %p, sk %p", sock, sk); 903 BT_DBG("sock %p, sk %p", sock, sk);
@@ -951,7 +907,7 @@ static int l2cap_sock_shutdown(struct socket *sock, int how)
951 907
952 lock_sock(sk); 908 lock_sock(sk);
953 if (!sk->sk_shutdown) { 909 if (!sk->sk_shutdown) {
954 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) 910 if (chan->mode == L2CAP_MODE_ERTM)
955 err = __l2cap_wait_ack(sk); 911 err = __l2cap_wait_ack(sk);
956 912
957 sk->sk_shutdown = SHUTDOWN_MASK; 913 sk->sk_shutdown = SHUTDOWN_MASK;
@@ -998,49 +954,47 @@ static void l2cap_sock_destruct(struct sock *sk)
998void l2cap_sock_init(struct sock *sk, struct sock *parent) 954void l2cap_sock_init(struct sock *sk, struct sock *parent)
999{ 955{
1000 struct l2cap_pinfo *pi = l2cap_pi(sk); 956 struct l2cap_pinfo *pi = l2cap_pi(sk);
957 struct l2cap_chan *chan = pi->chan;
1001 958
1002 BT_DBG("sk %p", sk); 959 BT_DBG("sk %p", sk);
1003 960
1004 if (parent) { 961 if (parent) {
962 struct l2cap_chan *pchan = l2cap_pi(parent)->chan;
963
1005 sk->sk_type = parent->sk_type; 964 sk->sk_type = parent->sk_type;
1006 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup; 965 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
1007 966
1008 pi->imtu = l2cap_pi(parent)->imtu; 967 chan->imtu = pchan->imtu;
1009 pi->omtu = l2cap_pi(parent)->omtu; 968 chan->omtu = pchan->omtu;
1010 pi->conf_state = l2cap_pi(parent)->conf_state; 969 chan->conf_state = pchan->conf_state;
1011 pi->mode = l2cap_pi(parent)->mode; 970 chan->mode = pchan->mode;
1012 pi->fcs = l2cap_pi(parent)->fcs; 971 chan->fcs = pchan->fcs;
1013 pi->max_tx = l2cap_pi(parent)->max_tx; 972 chan->max_tx = pchan->max_tx;
1014 pi->tx_win = l2cap_pi(parent)->tx_win; 973 chan->tx_win = pchan->tx_win;
1015 pi->sec_level = l2cap_pi(parent)->sec_level; 974 chan->sec_level = pchan->sec_level;
1016 pi->role_switch = l2cap_pi(parent)->role_switch; 975 chan->role_switch = pchan->role_switch;
1017 pi->force_reliable = l2cap_pi(parent)->force_reliable; 976 chan->force_reliable = pchan->force_reliable;
1018 pi->flushable = l2cap_pi(parent)->flushable; 977 chan->flushable = pchan->flushable;
1019 } else { 978 } else {
1020 pi->imtu = L2CAP_DEFAULT_MTU; 979 chan->imtu = L2CAP_DEFAULT_MTU;
1021 pi->omtu = 0; 980 chan->omtu = 0;
1022 if (!disable_ertm && sk->sk_type == SOCK_STREAM) { 981 if (!disable_ertm && sk->sk_type == SOCK_STREAM) {
1023 pi->mode = L2CAP_MODE_ERTM; 982 chan->mode = L2CAP_MODE_ERTM;
1024 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE; 983 chan->conf_state |= L2CAP_CONF_STATE2_DEVICE;
1025 } else { 984 } else {
1026 pi->mode = L2CAP_MODE_BASIC; 985 chan->mode = L2CAP_MODE_BASIC;
1027 } 986 }
1028 pi->max_tx = L2CAP_DEFAULT_MAX_TX; 987 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
1029 pi->fcs = L2CAP_FCS_CRC16; 988 chan->fcs = L2CAP_FCS_CRC16;
1030 pi->tx_win = L2CAP_DEFAULT_TX_WINDOW; 989 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
1031 pi->sec_level = BT_SECURITY_LOW; 990 chan->sec_level = BT_SECURITY_LOW;
1032 pi->role_switch = 0; 991 chan->role_switch = 0;
1033 pi->force_reliable = 0; 992 chan->force_reliable = 0;
1034 pi->flushable = BT_FLUSHABLE_OFF; 993 chan->flushable = BT_FLUSHABLE_OFF;
1035 } 994 }
1036 995
1037 /* Default config options */ 996 /* Default config options */
1038 pi->conf_len = 0; 997 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
1039 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
1040 skb_queue_head_init(TX_QUEUE(sk));
1041 skb_queue_head_init(SREJ_QUEUE(sk));
1042 skb_queue_head_init(BUSY_QUEUE(sk));
1043 INIT_LIST_HEAD(SREJ_LIST(sk));
1044} 998}
1045 999
1046static struct proto l2cap_proto = { 1000static struct proto l2cap_proto = {
@@ -1070,7 +1024,6 @@ struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, g
1070 1024
1071 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk); 1025 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
1072 1026
1073 bt_sock_link(&l2cap_sk_list, sk);
1074 return sk; 1027 return sk;
1075} 1028}
1076 1029
@@ -1078,6 +1031,7 @@ static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
1078 int kern) 1031 int kern)
1079{ 1032{
1080 struct sock *sk; 1033 struct sock *sk;
1034 struct l2cap_chan *chan;
1081 1035
1082 BT_DBG("sock %p", sock); 1036 BT_DBG("sock %p", sock);
1083 1037
@@ -1096,11 +1050,19 @@ static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
1096 if (!sk) 1050 if (!sk)
1097 return -ENOMEM; 1051 return -ENOMEM;
1098 1052
1053 chan = l2cap_chan_create(sk);
1054 if (!chan) {
1055 l2cap_sock_kill(sk);
1056 return -ENOMEM;
1057 }
1058
1059 l2cap_pi(sk)->chan = chan;
1060
1099 l2cap_sock_init(sk, NULL); 1061 l2cap_sock_init(sk, NULL);
1100 return 0; 1062 return 0;
1101} 1063}
1102 1064
1103const struct proto_ops l2cap_sock_ops = { 1065static const struct proto_ops l2cap_sock_ops = {
1104 .family = PF_BLUETOOTH, 1066 .family = PF_BLUETOOTH,
1105 .owner = THIS_MODULE, 1067 .owner = THIS_MODULE,
1106 .release = l2cap_sock_release, 1068 .release = l2cap_sock_release,
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 4476d8e3c0f2..dae382ce7020 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -36,7 +36,7 @@ struct pending_cmd {
36 struct list_head list; 36 struct list_head list;
37 __u16 opcode; 37 __u16 opcode;
38 int index; 38 int index;
39 void *cmd; 39 void *param;
40 struct sock *sk; 40 struct sock *sk;
41 void *user_data; 41 void *user_data;
42}; 42};
@@ -179,10 +179,12 @@ static int read_controller_info(struct sock *sk, u16 index)
179 179
180 hci_del_off_timer(hdev); 180 hci_del_off_timer(hdev);
181 181
182 hci_dev_lock_bh(hdev); 182 hci_dev_lock(hdev);
183 183
184 set_bit(HCI_MGMT, &hdev->flags); 184 set_bit(HCI_MGMT, &hdev->flags);
185 185
186 memset(&rp, 0, sizeof(rp));
187
186 rp.type = hdev->dev_type; 188 rp.type = hdev->dev_type;
187 189
188 rp.powered = test_bit(HCI_UP, &hdev->flags); 190 rp.powered = test_bit(HCI_UP, &hdev->flags);
@@ -204,7 +206,9 @@ static int read_controller_info(struct sock *sk, u16 index)
204 rp.hci_ver = hdev->hci_ver; 206 rp.hci_ver = hdev->hci_ver;
205 put_unaligned_le16(hdev->hci_rev, &rp.hci_rev); 207 put_unaligned_le16(hdev->hci_rev, &rp.hci_rev);
206 208
207 hci_dev_unlock_bh(hdev); 209 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
210
211 hci_dev_unlock(hdev);
208 hci_dev_put(hdev); 212 hci_dev_put(hdev);
209 213
210 return cmd_complete(sk, index, MGMT_OP_READ_INFO, &rp, sizeof(rp)); 214 return cmd_complete(sk, index, MGMT_OP_READ_INFO, &rp, sizeof(rp));
@@ -213,7 +217,7 @@ static int read_controller_info(struct sock *sk, u16 index)
213static void mgmt_pending_free(struct pending_cmd *cmd) 217static void mgmt_pending_free(struct pending_cmd *cmd)
214{ 218{
215 sock_put(cmd->sk); 219 sock_put(cmd->sk);
216 kfree(cmd->cmd); 220 kfree(cmd->param);
217 kfree(cmd); 221 kfree(cmd);
218} 222}
219 223
@@ -229,13 +233,14 @@ static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
229 cmd->opcode = opcode; 233 cmd->opcode = opcode;
230 cmd->index = index; 234 cmd->index = index;
231 235
232 cmd->cmd = kmalloc(len, GFP_ATOMIC); 236 cmd->param = kmalloc(len, GFP_ATOMIC);
233 if (!cmd->cmd) { 237 if (!cmd->param) {
234 kfree(cmd); 238 kfree(cmd);
235 return NULL; 239 return NULL;
236 } 240 }
237 241
238 memcpy(cmd->cmd, data, len); 242 if (data)
243 memcpy(cmd->param, data, len);
239 244
240 cmd->sk = sk; 245 cmd->sk = sk;
241 sock_hold(sk); 246 sock_hold(sk);
@@ -311,7 +316,7 @@ static int set_powered(struct sock *sk, u16 index, unsigned char *data, u16 len)
311 if (!hdev) 316 if (!hdev)
312 return cmd_status(sk, index, MGMT_OP_SET_POWERED, ENODEV); 317 return cmd_status(sk, index, MGMT_OP_SET_POWERED, ENODEV);
313 318
314 hci_dev_lock_bh(hdev); 319 hci_dev_lock(hdev);
315 320
316 up = test_bit(HCI_UP, &hdev->flags); 321 up = test_bit(HCI_UP, &hdev->flags);
317 if ((cp->val && up) || (!cp->val && !up)) { 322 if ((cp->val && up) || (!cp->val && !up)) {
@@ -338,7 +343,7 @@ static int set_powered(struct sock *sk, u16 index, unsigned char *data, u16 len)
338 err = 0; 343 err = 0;
339 344
340failed: 345failed:
341 hci_dev_unlock_bh(hdev); 346 hci_dev_unlock(hdev);
342 hci_dev_put(hdev); 347 hci_dev_put(hdev);
343 return err; 348 return err;
344} 349}
@@ -363,7 +368,7 @@ static int set_discoverable(struct sock *sk, u16 index, unsigned char *data,
363 if (!hdev) 368 if (!hdev)
364 return cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, ENODEV); 369 return cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, ENODEV);
365 370
366 hci_dev_lock_bh(hdev); 371 hci_dev_lock(hdev);
367 372
368 if (!test_bit(HCI_UP, &hdev->flags)) { 373 if (!test_bit(HCI_UP, &hdev->flags)) {
369 err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, ENETDOWN); 374 err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, ENETDOWN);
@@ -398,7 +403,7 @@ static int set_discoverable(struct sock *sk, u16 index, unsigned char *data,
398 mgmt_pending_remove(cmd); 403 mgmt_pending_remove(cmd);
399 404
400failed: 405failed:
401 hci_dev_unlock_bh(hdev); 406 hci_dev_unlock(hdev);
402 hci_dev_put(hdev); 407 hci_dev_put(hdev);
403 408
404 return err; 409 return err;
@@ -424,7 +429,7 @@ static int set_connectable(struct sock *sk, u16 index, unsigned char *data,
424 if (!hdev) 429 if (!hdev)
425 return cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, ENODEV); 430 return cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, ENODEV);
426 431
427 hci_dev_lock_bh(hdev); 432 hci_dev_lock(hdev);
428 433
429 if (!test_bit(HCI_UP, &hdev->flags)) { 434 if (!test_bit(HCI_UP, &hdev->flags)) {
430 err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, ENETDOWN); 435 err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, ENETDOWN);
@@ -458,7 +463,7 @@ static int set_connectable(struct sock *sk, u16 index, unsigned char *data,
458 mgmt_pending_remove(cmd); 463 mgmt_pending_remove(cmd);
459 464
460failed: 465failed:
461 hci_dev_unlock_bh(hdev); 466 hci_dev_unlock(hdev);
462 hci_dev_put(hdev); 467 hci_dev_put(hdev);
463 468
464 return err; 469 return err;
@@ -517,7 +522,7 @@ static int set_pairable(struct sock *sk, u16 index, unsigned char *data,
517 if (!hdev) 522 if (!hdev)
518 return cmd_status(sk, index, MGMT_OP_SET_PAIRABLE, ENODEV); 523 return cmd_status(sk, index, MGMT_OP_SET_PAIRABLE, ENODEV);
519 524
520 hci_dev_lock_bh(hdev); 525 hci_dev_lock(hdev);
521 526
522 if (cp->val) 527 if (cp->val)
523 set_bit(HCI_PAIRABLE, &hdev->flags); 528 set_bit(HCI_PAIRABLE, &hdev->flags);
@@ -533,12 +538,156 @@ static int set_pairable(struct sock *sk, u16 index, unsigned char *data,
533 err = mgmt_event(MGMT_EV_PAIRABLE, index, &ev, sizeof(ev), sk); 538 err = mgmt_event(MGMT_EV_PAIRABLE, index, &ev, sizeof(ev), sk);
534 539
535failed: 540failed:
536 hci_dev_unlock_bh(hdev); 541 hci_dev_unlock(hdev);
537 hci_dev_put(hdev); 542 hci_dev_put(hdev);
538 543
539 return err; 544 return err;
540} 545}
541 546
547#define EIR_FLAGS 0x01 /* flags */
548#define EIR_UUID16_SOME 0x02 /* 16-bit UUID, more available */
549#define EIR_UUID16_ALL 0x03 /* 16-bit UUID, all listed */
550#define EIR_UUID32_SOME 0x04 /* 32-bit UUID, more available */
551#define EIR_UUID32_ALL 0x05 /* 32-bit UUID, all listed */
552#define EIR_UUID128_SOME 0x06 /* 128-bit UUID, more available */
553#define EIR_UUID128_ALL 0x07 /* 128-bit UUID, all listed */
554#define EIR_NAME_SHORT 0x08 /* shortened local name */
555#define EIR_NAME_COMPLETE 0x09 /* complete local name */
556#define EIR_TX_POWER 0x0A /* transmit power level */
557#define EIR_DEVICE_ID 0x10 /* device ID */
558
559#define PNP_INFO_SVCLASS_ID 0x1200
560
561static u8 bluetooth_base_uuid[] = {
562 0xFB, 0x34, 0x9B, 0x5F, 0x80, 0x00, 0x00, 0x80,
563 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
564};
565
566static u16 get_uuid16(u8 *uuid128)
567{
568 u32 val;
569 int i;
570
571 for (i = 0; i < 12; i++) {
572 if (bluetooth_base_uuid[i] != uuid128[i])
573 return 0;
574 }
575
576 memcpy(&val, &uuid128[12], 4);
577
578 val = le32_to_cpu(val);
579 if (val > 0xffff)
580 return 0;
581
582 return (u16) val;
583}
584
585static void create_eir(struct hci_dev *hdev, u8 *data)
586{
587 u8 *ptr = data;
588 u16 eir_len = 0;
589 u16 uuid16_list[HCI_MAX_EIR_LENGTH / sizeof(u16)];
590 int i, truncated = 0;
591 struct list_head *p;
592 size_t name_len;
593
594 name_len = strlen(hdev->dev_name);
595
596 if (name_len > 0) {
597 /* EIR Data type */
598 if (name_len > 48) {
599 name_len = 48;
600 ptr[1] = EIR_NAME_SHORT;
601 } else
602 ptr[1] = EIR_NAME_COMPLETE;
603
604 /* EIR Data length */
605 ptr[0] = name_len + 1;
606
607 memcpy(ptr + 2, hdev->dev_name, name_len);
608
609 eir_len += (name_len + 2);
610 ptr += (name_len + 2);
611 }
612
613 memset(uuid16_list, 0, sizeof(uuid16_list));
614
615 /* Group all UUID16 types */
616 list_for_each(p, &hdev->uuids) {
617 struct bt_uuid *uuid = list_entry(p, struct bt_uuid, list);
618 u16 uuid16;
619
620 uuid16 = get_uuid16(uuid->uuid);
621 if (uuid16 == 0)
622 return;
623
624 if (uuid16 < 0x1100)
625 continue;
626
627 if (uuid16 == PNP_INFO_SVCLASS_ID)
628 continue;
629
630 /* Stop if not enough space to put next UUID */
631 if (eir_len + 2 + sizeof(u16) > HCI_MAX_EIR_LENGTH) {
632 truncated = 1;
633 break;
634 }
635
636 /* Check for duplicates */
637 for (i = 0; uuid16_list[i] != 0; i++)
638 if (uuid16_list[i] == uuid16)
639 break;
640
641 if (uuid16_list[i] == 0) {
642 uuid16_list[i] = uuid16;
643 eir_len += sizeof(u16);
644 }
645 }
646
647 if (uuid16_list[0] != 0) {
648 u8 *length = ptr;
649
650 /* EIR Data type */
651 ptr[1] = truncated ? EIR_UUID16_SOME : EIR_UUID16_ALL;
652
653 ptr += 2;
654 eir_len += 2;
655
656 for (i = 0; uuid16_list[i] != 0; i++) {
657 *ptr++ = (uuid16_list[i] & 0x00ff);
658 *ptr++ = (uuid16_list[i] & 0xff00) >> 8;
659 }
660
661 /* EIR Data length */
662 *length = (i * sizeof(u16)) + 1;
663 }
664}
665
666static int update_eir(struct hci_dev *hdev)
667{
668 struct hci_cp_write_eir cp;
669
670 if (!(hdev->features[6] & LMP_EXT_INQ))
671 return 0;
672
673 if (hdev->ssp_mode == 0)
674 return 0;
675
676 if (test_bit(HCI_SERVICE_CACHE, &hdev->flags))
677 return 0;
678
679 memset(&cp, 0, sizeof(cp));
680
681 create_eir(hdev, cp.data);
682
683 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
684 return 0;
685
686 memcpy(hdev->eir, cp.data, sizeof(cp.data));
687
688 return hci_send_cmd(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
689}
690
542static u8 get_service_classes(struct hci_dev *hdev) 691static u8 get_service_classes(struct hci_dev *hdev)
543{ 692{
544 struct list_head *p; 693 struct list_head *p;
@@ -590,7 +739,7 @@ static int add_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len)
590 if (!hdev) 739 if (!hdev)
591 return cmd_status(sk, index, MGMT_OP_ADD_UUID, ENODEV); 740 return cmd_status(sk, index, MGMT_OP_ADD_UUID, ENODEV);
592 741
593 hci_dev_lock_bh(hdev); 742 hci_dev_lock(hdev);
594 743
595 uuid = kmalloc(sizeof(*uuid), GFP_ATOMIC); 744 uuid = kmalloc(sizeof(*uuid), GFP_ATOMIC);
596 if (!uuid) { 745 if (!uuid) {
@@ -607,10 +756,14 @@ static int add_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len)
607 if (err < 0) 756 if (err < 0)
608 goto failed; 757 goto failed;
609 758
759 err = update_eir(hdev);
760 if (err < 0)
761 goto failed;
762
610 err = cmd_complete(sk, index, MGMT_OP_ADD_UUID, NULL, 0); 763 err = cmd_complete(sk, index, MGMT_OP_ADD_UUID, NULL, 0);
611 764
612failed: 765failed:
613 hci_dev_unlock_bh(hdev); 766 hci_dev_unlock(hdev);
614 hci_dev_put(hdev); 767 hci_dev_put(hdev);
615 768
616 return err; 769 return err;
@@ -635,7 +788,7 @@ static int remove_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len)
635 if (!hdev) 788 if (!hdev)
636 return cmd_status(sk, index, MGMT_OP_REMOVE_UUID, ENODEV); 789 return cmd_status(sk, index, MGMT_OP_REMOVE_UUID, ENODEV);
637 790
638 hci_dev_lock_bh(hdev); 791 hci_dev_lock(hdev);
639 792
640 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) { 793 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
641 err = hci_uuids_clear(hdev); 794 err = hci_uuids_clear(hdev);
@@ -663,10 +816,14 @@ static int remove_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len)
663 if (err < 0) 816 if (err < 0)
664 goto unlock; 817 goto unlock;
665 818
819 err = update_eir(hdev);
820 if (err < 0)
821 goto unlock;
822
666 err = cmd_complete(sk, index, MGMT_OP_REMOVE_UUID, NULL, 0); 823 err = cmd_complete(sk, index, MGMT_OP_REMOVE_UUID, NULL, 0);
667 824
668unlock: 825unlock:
669 hci_dev_unlock_bh(hdev); 826 hci_dev_unlock(hdev);
670 hci_dev_put(hdev); 827 hci_dev_put(hdev);
671 828
672 return err; 829 return err;
@@ -690,7 +847,7 @@ static int set_dev_class(struct sock *sk, u16 index, unsigned char *data,
690 if (!hdev) 847 if (!hdev)
691 return cmd_status(sk, index, MGMT_OP_SET_DEV_CLASS, ENODEV); 848 return cmd_status(sk, index, MGMT_OP_SET_DEV_CLASS, ENODEV);
692 849
693 hci_dev_lock_bh(hdev); 850 hci_dev_lock(hdev);
694 851
695 hdev->major_class = cp->major; 852 hdev->major_class = cp->major;
696 hdev->minor_class = cp->minor; 853 hdev->minor_class = cp->minor;
@@ -700,7 +857,7 @@ static int set_dev_class(struct sock *sk, u16 index, unsigned char *data,
700 if (err == 0) 857 if (err == 0)
701 err = cmd_complete(sk, index, MGMT_OP_SET_DEV_CLASS, NULL, 0); 858 err = cmd_complete(sk, index, MGMT_OP_SET_DEV_CLASS, NULL, 0);
702 859
703 hci_dev_unlock_bh(hdev); 860 hci_dev_unlock(hdev);
704 hci_dev_put(hdev); 861 hci_dev_put(hdev);
705 862
706 return err; 863 return err;
@@ -722,7 +879,7 @@ static int set_service_cache(struct sock *sk, u16 index, unsigned char *data,
722 if (!hdev) 879 if (!hdev)
723 return cmd_status(sk, index, MGMT_OP_SET_SERVICE_CACHE, ENODEV); 880 return cmd_status(sk, index, MGMT_OP_SET_SERVICE_CACHE, ENODEV);
724 881
725 hci_dev_lock_bh(hdev); 882 hci_dev_lock(hdev);
726 883
727 BT_DBG("hci%u enable %d", index, cp->enable); 884 BT_DBG("hci%u enable %d", index, cp->enable);
728 885
@@ -732,13 +889,15 @@ static int set_service_cache(struct sock *sk, u16 index, unsigned char *data,
732 } else { 889 } else {
733 clear_bit(HCI_SERVICE_CACHE, &hdev->flags); 890 clear_bit(HCI_SERVICE_CACHE, &hdev->flags);
734 err = update_class(hdev); 891 err = update_class(hdev);
892 if (err == 0)
893 err = update_eir(hdev);
735 } 894 }
736 895
737 if (err == 0) 896 if (err == 0)
738 err = cmd_complete(sk, index, MGMT_OP_SET_SERVICE_CACHE, NULL, 897 err = cmd_complete(sk, index, MGMT_OP_SET_SERVICE_CACHE, NULL,
739 0); 898 0);
740 899
741 hci_dev_unlock_bh(hdev); 900 hci_dev_unlock(hdev);
742 hci_dev_put(hdev); 901 hci_dev_put(hdev);
743 902
744 return err; 903 return err;
@@ -772,7 +931,7 @@ static int load_keys(struct sock *sk, u16 index, unsigned char *data, u16 len)
772 BT_DBG("hci%u debug_keys %u key_count %u", index, cp->debug_keys, 931 BT_DBG("hci%u debug_keys %u key_count %u", index, cp->debug_keys,
773 key_count); 932 key_count);
774 933
775 hci_dev_lock_bh(hdev); 934 hci_dev_lock(hdev);
776 935
777 hci_link_keys_clear(hdev); 936 hci_link_keys_clear(hdev);
778 937
@@ -786,11 +945,11 @@ static int load_keys(struct sock *sk, u16 index, unsigned char *data, u16 len)
786 for (i = 0; i < key_count; i++) { 945 for (i = 0; i < key_count; i++) {
787 struct mgmt_key_info *key = &cp->keys[i]; 946 struct mgmt_key_info *key = &cp->keys[i];
788 947
789 hci_add_link_key(hdev, 0, &key->bdaddr, key->val, key->type, 948 hci_add_link_key(hdev, NULL, 0, &key->bdaddr, key->val, key->type,
790 key->pin_len); 949 key->pin_len);
791 } 950 }
792 951
793 hci_dev_unlock_bh(hdev); 952 hci_dev_unlock(hdev);
794 hci_dev_put(hdev); 953 hci_dev_put(hdev);
795 954
796 return 0; 955 return 0;
@@ -812,7 +971,7 @@ static int remove_key(struct sock *sk, u16 index, unsigned char *data, u16 len)
812 if (!hdev) 971 if (!hdev)
813 return cmd_status(sk, index, MGMT_OP_REMOVE_KEY, ENODEV); 972 return cmd_status(sk, index, MGMT_OP_REMOVE_KEY, ENODEV);
814 973
815 hci_dev_lock_bh(hdev); 974 hci_dev_lock(hdev);
816 975
817 err = hci_remove_link_key(hdev, &cp->bdaddr); 976 err = hci_remove_link_key(hdev, &cp->bdaddr);
818 if (err < 0) { 977 if (err < 0) {
@@ -835,7 +994,7 @@ static int remove_key(struct sock *sk, u16 index, unsigned char *data, u16 len)
835 } 994 }
836 995
837unlock: 996unlock:
838 hci_dev_unlock_bh(hdev); 997 hci_dev_unlock(hdev);
839 hci_dev_put(hdev); 998 hci_dev_put(hdev);
840 999
841 return err; 1000 return err;
@@ -861,7 +1020,7 @@ static int disconnect(struct sock *sk, u16 index, unsigned char *data, u16 len)
861 if (!hdev) 1020 if (!hdev)
862 return cmd_status(sk, index, MGMT_OP_DISCONNECT, ENODEV); 1021 return cmd_status(sk, index, MGMT_OP_DISCONNECT, ENODEV);
863 1022
864 hci_dev_lock_bh(hdev); 1023 hci_dev_lock(hdev);
865 1024
866 if (!test_bit(HCI_UP, &hdev->flags)) { 1025 if (!test_bit(HCI_UP, &hdev->flags)) {
867 err = cmd_status(sk, index, MGMT_OP_DISCONNECT, ENETDOWN); 1026 err = cmd_status(sk, index, MGMT_OP_DISCONNECT, ENETDOWN);
@@ -874,6 +1033,9 @@ static int disconnect(struct sock *sk, u16 index, unsigned char *data, u16 len)
874 } 1033 }
875 1034
876 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 1035 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1036 if (!conn)
1037 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->bdaddr);
1038
877 if (!conn) { 1039 if (!conn) {
878 err = cmd_status(sk, index, MGMT_OP_DISCONNECT, ENOTCONN); 1040 err = cmd_status(sk, index, MGMT_OP_DISCONNECT, ENOTCONN);
879 goto failed; 1041 goto failed;
@@ -893,7 +1055,7 @@ static int disconnect(struct sock *sk, u16 index, unsigned char *data, u16 len)
893 mgmt_pending_remove(cmd); 1055 mgmt_pending_remove(cmd);
894 1056
895failed: 1057failed:
896 hci_dev_unlock_bh(hdev); 1058 hci_dev_unlock(hdev);
897 hci_dev_put(hdev); 1059 hci_dev_put(hdev);
898 1060
899 return err; 1061 return err;
@@ -914,7 +1076,7 @@ static int get_connections(struct sock *sk, u16 index)
914 if (!hdev) 1076 if (!hdev)
915 return cmd_status(sk, index, MGMT_OP_GET_CONNECTIONS, ENODEV); 1077 return cmd_status(sk, index, MGMT_OP_GET_CONNECTIONS, ENODEV);
916 1078
917 hci_dev_lock_bh(hdev); 1079 hci_dev_lock(hdev);
918 1080
919 count = 0; 1081 count = 0;
920 list_for_each(p, &hdev->conn_hash.list) { 1082 list_for_each(p, &hdev->conn_hash.list) {
@@ -945,7 +1107,7 @@ static int get_connections(struct sock *sk, u16 index)
945 1107
946unlock: 1108unlock:
947 kfree(rp); 1109 kfree(rp);
948 hci_dev_unlock_bh(hdev); 1110 hci_dev_unlock(hdev);
949 hci_dev_put(hdev); 1111 hci_dev_put(hdev);
950 return err; 1112 return err;
951} 1113}
@@ -970,7 +1132,7 @@ static int pin_code_reply(struct sock *sk, u16 index, unsigned char *data,
970 if (!hdev) 1132 if (!hdev)
971 return cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, ENODEV); 1133 return cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, ENODEV);
972 1134
973 hci_dev_lock_bh(hdev); 1135 hci_dev_lock(hdev);
974 1136
975 if (!test_bit(HCI_UP, &hdev->flags)) { 1137 if (!test_bit(HCI_UP, &hdev->flags)) {
976 err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, ENETDOWN); 1138 err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, ENETDOWN);
@@ -992,7 +1154,7 @@ static int pin_code_reply(struct sock *sk, u16 index, unsigned char *data,
992 mgmt_pending_remove(cmd); 1154 mgmt_pending_remove(cmd);
993 1155
994failed: 1156failed:
995 hci_dev_unlock_bh(hdev); 1157 hci_dev_unlock(hdev);
996 hci_dev_put(hdev); 1158 hci_dev_put(hdev);
997 1159
998 return err; 1160 return err;
@@ -1019,7 +1181,7 @@ static int pin_code_neg_reply(struct sock *sk, u16 index, unsigned char *data,
1019 return cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY, 1181 return cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY,
1020 ENODEV); 1182 ENODEV);
1021 1183
1022 hci_dev_lock_bh(hdev); 1184 hci_dev_lock(hdev);
1023 1185
1024 if (!test_bit(HCI_UP, &hdev->flags)) { 1186 if (!test_bit(HCI_UP, &hdev->flags)) {
1025 err = cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY, 1187 err = cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY,
@@ -1040,7 +1202,7 @@ static int pin_code_neg_reply(struct sock *sk, u16 index, unsigned char *data,
1040 mgmt_pending_remove(cmd); 1202 mgmt_pending_remove(cmd);
1041 1203
1042failed: 1204failed:
1043 hci_dev_unlock_bh(hdev); 1205 hci_dev_unlock(hdev);
1044 hci_dev_put(hdev); 1206 hci_dev_put(hdev);
1045 1207
1046 return err; 1208 return err;
@@ -1063,14 +1225,14 @@ static int set_io_capability(struct sock *sk, u16 index, unsigned char *data,
1063 if (!hdev) 1225 if (!hdev)
1064 return cmd_status(sk, index, MGMT_OP_SET_IO_CAPABILITY, ENODEV); 1226 return cmd_status(sk, index, MGMT_OP_SET_IO_CAPABILITY, ENODEV);
1065 1227
1066 hci_dev_lock_bh(hdev); 1228 hci_dev_lock(hdev);
1067 1229
1068 hdev->io_capability = cp->io_capability; 1230 hdev->io_capability = cp->io_capability;
1069 1231
1070 BT_DBG("%s IO capability set to 0x%02x", hdev->name, 1232 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
1071 hdev->io_capability); 1233 hdev->io_capability);
1072 1234
1073 hci_dev_unlock_bh(hdev); 1235 hci_dev_unlock(hdev);
1074 hci_dev_put(hdev); 1236 hci_dev_put(hdev);
1075 1237
1076 return cmd_complete(sk, index, MGMT_OP_SET_IO_CAPABILITY, NULL, 0); 1238 return cmd_complete(sk, index, MGMT_OP_SET_IO_CAPABILITY, NULL, 0);
@@ -1156,7 +1318,7 @@ static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len)
1156 if (!hdev) 1318 if (!hdev)
1157 return cmd_status(sk, index, MGMT_OP_PAIR_DEVICE, ENODEV); 1319 return cmd_status(sk, index, MGMT_OP_PAIR_DEVICE, ENODEV);
1158 1320
1159 hci_dev_lock_bh(hdev); 1321 hci_dev_lock(hdev);
1160 1322
1161 if (cp->io_cap == 0x03) { 1323 if (cp->io_cap == 0x03) {
1162 sec_level = BT_SECURITY_MEDIUM; 1324 sec_level = BT_SECURITY_MEDIUM;
@@ -1198,7 +1360,7 @@ static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len)
1198 err = 0; 1360 err = 0;
1199 1361
1200unlock: 1362unlock:
1201 hci_dev_unlock_bh(hdev); 1363 hci_dev_unlock(hdev);
1202 hci_dev_put(hdev); 1364 hci_dev_put(hdev);
1203 1365
1204 return err; 1366 return err;
@@ -1230,7 +1392,7 @@ static int user_confirm_reply(struct sock *sk, u16 index, unsigned char *data,
1230 if (!hdev) 1392 if (!hdev)
1231 return cmd_status(sk, index, mgmt_op, ENODEV); 1393 return cmd_status(sk, index, mgmt_op, ENODEV);
1232 1394
1233 hci_dev_lock_bh(hdev); 1395 hci_dev_lock(hdev);
1234 1396
1235 if (!test_bit(HCI_UP, &hdev->flags)) { 1397 if (!test_bit(HCI_UP, &hdev->flags)) {
1236 err = cmd_status(sk, index, mgmt_op, ENETDOWN); 1398 err = cmd_status(sk, index, mgmt_op, ENETDOWN);
@@ -1248,6 +1410,231 @@ static int user_confirm_reply(struct sock *sk, u16 index, unsigned char *data,
1248 mgmt_pending_remove(cmd); 1410 mgmt_pending_remove(cmd);
1249 1411
1250failed: 1412failed:
1413 hci_dev_unlock(hdev);
1414 hci_dev_put(hdev);
1415
1416 return err;
1417}
1418
1419static int set_local_name(struct sock *sk, u16 index, unsigned char *data,
1420 u16 len)
1421{
1422 struct mgmt_cp_set_local_name *mgmt_cp = (void *) data;
1423 struct hci_cp_write_local_name hci_cp;
1424 struct hci_dev *hdev;
1425 struct pending_cmd *cmd;
1426 int err;
1427
1428 BT_DBG("");
1429
1430 if (len != sizeof(*mgmt_cp))
1431 return cmd_status(sk, index, MGMT_OP_SET_LOCAL_NAME, EINVAL);
1432
1433 hdev = hci_dev_get(index);
1434 if (!hdev)
1435 return cmd_status(sk, index, MGMT_OP_SET_LOCAL_NAME, ENODEV);
1436
1437 hci_dev_lock(hdev);
1438
1439 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, index, data, len);
1440 if (!cmd) {
1441 err = -ENOMEM;
1442 goto failed;
1443 }
1444
1445 memcpy(hci_cp.name, mgmt_cp->name, sizeof(hci_cp.name));
1446 err = hci_send_cmd(hdev, HCI_OP_WRITE_LOCAL_NAME, sizeof(hci_cp),
1447 &hci_cp);
1448 if (err < 0)
1449 mgmt_pending_remove(cmd);
1450
1451failed:
1452 hci_dev_unlock(hdev);
1453 hci_dev_put(hdev);
1454
1455 return err;
1456}
1457
1458static int read_local_oob_data(struct sock *sk, u16 index)
1459{
1460 struct hci_dev *hdev;
1461 struct pending_cmd *cmd;
1462 int err;
1463
1464 BT_DBG("hci%u", index);
1465
1466 hdev = hci_dev_get(index);
1467 if (!hdev)
1468 return cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA,
1469 ENODEV);
1470
1471 hci_dev_lock(hdev);
1472
1473 if (!test_bit(HCI_UP, &hdev->flags)) {
1474 err = cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA,
1475 ENETDOWN);
1476 goto unlock;
1477 }
1478
1479 if (!(hdev->features[6] & LMP_SIMPLE_PAIR)) {
1480 err = cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA,
1481 EOPNOTSUPP);
1482 goto unlock;
1483 }
1484
1485 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, index)) {
1486 err = cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA, EBUSY);
1487 goto unlock;
1488 }
1489
1490 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, index, NULL, 0);
1491 if (!cmd) {
1492 err = -ENOMEM;
1493 goto unlock;
1494 }
1495
1496 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
1497 if (err < 0)
1498 mgmt_pending_remove(cmd);
1499
1500unlock:
1501 hci_dev_unlock(hdev);
1502 hci_dev_put(hdev);
1503
1504 return err;
1505}
1506
1507static int add_remote_oob_data(struct sock *sk, u16 index, unsigned char *data,
1508 u16 len)
1509{
1510 struct hci_dev *hdev;
1511 struct mgmt_cp_add_remote_oob_data *cp = (void *) data;
1512 int err;
1513
1514 BT_DBG("hci%u ", index);
1515
1516 if (len != sizeof(*cp))
1517 return cmd_status(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA,
1518 EINVAL);
1519
1520 hdev = hci_dev_get(index);
1521 if (!hdev)
1522 return cmd_status(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA,
1523 ENODEV);
1524
1525 hci_dev_lock(hdev);
1526
1527 err = hci_add_remote_oob_data(hdev, &cp->bdaddr, cp->hash,
1528 cp->randomizer);
1529 if (err < 0)
1530 err = cmd_status(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA, -err);
1531 else
1532 err = cmd_complete(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA, NULL,
1533 0);
1534
1535 hci_dev_unlock(hdev);
1536 hci_dev_put(hdev);
1537
1538 return err;
1539}
1540
1541static int remove_remote_oob_data(struct sock *sk, u16 index,
1542 unsigned char *data, u16 len)
1543{
1544 struct hci_dev *hdev;
1545 struct mgmt_cp_remove_remote_oob_data *cp = (void *) data;
1546 int err;
1547
1548 BT_DBG("hci%u ", index);
1549
1550 if (len != sizeof(*cp))
1551 return cmd_status(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
1552 EINVAL);
1553
1554 hdev = hci_dev_get(index);
1555 if (!hdev)
1556 return cmd_status(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
1557 ENODEV);
1558
1559 hci_dev_lock(hdev);
1560
1561 err = hci_remove_remote_oob_data(hdev, &cp->bdaddr);
1562 if (err < 0)
1563 err = cmd_status(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
1564 -err);
1565 else
1566 err = cmd_complete(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
1567 NULL, 0);
1568
1569 hci_dev_unlock(hdev);
1570 hci_dev_put(hdev);
1571
1572 return err;
1573}
1574
1575static int start_discovery(struct sock *sk, u16 index)
1576{
1577 u8 lap[3] = { 0x33, 0x8b, 0x9e };
1578 struct hci_cp_inquiry cp;
1579 struct pending_cmd *cmd;
1580 struct hci_dev *hdev;
1581 int err;
1582
1583 BT_DBG("hci%u", index);
1584
1585 hdev = hci_dev_get(index);
1586 if (!hdev)
1587 return cmd_status(sk, index, MGMT_OP_START_DISCOVERY, ENODEV);
1588
1589 hci_dev_lock_bh(hdev);
1590
1591 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, index, NULL, 0);
1592 if (!cmd) {
1593 err = -ENOMEM;
1594 goto failed;
1595 }
1596
1597 memset(&cp, 0, sizeof(cp));
1598 memcpy(&cp.lap, lap, 3);
1599 cp.length = 0x08;
1600 cp.num_rsp = 0x00;
1601
1602 err = hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
1603 if (err < 0)
1604 mgmt_pending_remove(cmd);
1605
1606failed:
1607 hci_dev_unlock_bh(hdev);
1608 hci_dev_put(hdev);
1609
1610 return err;
1611}
1612
1613static int stop_discovery(struct sock *sk, u16 index)
1614{
1615 struct hci_dev *hdev;
1616 struct pending_cmd *cmd;
1617 int err;
1618
1619 BT_DBG("hci%u", index);
1620
1621 hdev = hci_dev_get(index);
1622 if (!hdev)
1623 return cmd_status(sk, index, MGMT_OP_STOP_DISCOVERY, ENODEV);
1624
1625 hci_dev_lock_bh(hdev);
1626
1627 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, index, NULL, 0);
1628 if (!cmd) {
1629 err = -ENOMEM;
1630 goto failed;
1631 }
1632
1633 err = hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1634 if (err < 0)
1635 mgmt_pending_remove(cmd);
1636
1637failed:
1251 hci_dev_unlock_bh(hdev); 1638 hci_dev_unlock_bh(hdev);
1252 hci_dev_put(hdev); 1639 hci_dev_put(hdev);
1253 1640
@@ -1266,7 +1653,7 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
1266 if (msglen < sizeof(*hdr)) 1653 if (msglen < sizeof(*hdr))
1267 return -EINVAL; 1654 return -EINVAL;
1268 1655
1269 buf = kmalloc(msglen, GFP_ATOMIC); 1656 buf = kmalloc(msglen, GFP_KERNEL);
1270 if (!buf) 1657 if (!buf)
1271 return -ENOMEM; 1658 return -ENOMEM;
1272 1659
@@ -1349,6 +1736,25 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
1349 case MGMT_OP_USER_CONFIRM_NEG_REPLY: 1736 case MGMT_OP_USER_CONFIRM_NEG_REPLY:
1350 err = user_confirm_reply(sk, index, buf + sizeof(*hdr), len, 0); 1737 err = user_confirm_reply(sk, index, buf + sizeof(*hdr), len, 0);
1351 break; 1738 break;
1739 case MGMT_OP_SET_LOCAL_NAME:
1740 err = set_local_name(sk, index, buf + sizeof(*hdr), len);
1741 break;
1742 case MGMT_OP_READ_LOCAL_OOB_DATA:
1743 err = read_local_oob_data(sk, index);
1744 break;
1745 case MGMT_OP_ADD_REMOTE_OOB_DATA:
1746 err = add_remote_oob_data(sk, index, buf + sizeof(*hdr), len);
1747 break;
1748 case MGMT_OP_REMOVE_REMOTE_OOB_DATA:
1749 err = remove_remote_oob_data(sk, index, buf + sizeof(*hdr),
1750 len);
1751 break;
1752 case MGMT_OP_START_DISCOVERY:
1753 err = start_discovery(sk, index);
1754 break;
1755 case MGMT_OP_STOP_DISCOVERY:
1756 err = stop_discovery(sk, index);
1757 break;
1352 default: 1758 default:
1353 BT_DBG("Unknown op %u", opcode); 1759 BT_DBG("Unknown op %u", opcode);
1354 err = cmd_status(sk, index, opcode, 0x01); 1760 err = cmd_status(sk, index, opcode, 0x01);
@@ -1382,7 +1788,7 @@ struct cmd_lookup {
1382 1788
1383static void mode_rsp(struct pending_cmd *cmd, void *data) 1789static void mode_rsp(struct pending_cmd *cmd, void *data)
1384{ 1790{
1385 struct mgmt_mode *cp = cmd->cmd; 1791 struct mgmt_mode *cp = cmd->param;
1386 struct cmd_lookup *match = data; 1792 struct cmd_lookup *match = data;
1387 1793
1388 if (cp->val != match->val) 1794 if (cp->val != match->val)
@@ -1455,17 +1861,17 @@ int mgmt_connectable(u16 index, u8 connectable)
1455 return ret; 1861 return ret;
1456} 1862}
1457 1863
1458int mgmt_new_key(u16 index, struct link_key *key, u8 old_key_type) 1864int mgmt_new_key(u16 index, struct link_key *key, u8 persistent)
1459{ 1865{
1460 struct mgmt_ev_new_key ev; 1866 struct mgmt_ev_new_key ev;
1461 1867
1462 memset(&ev, 0, sizeof(ev)); 1868 memset(&ev, 0, sizeof(ev));
1463 1869
1870 ev.store_hint = persistent;
1464 bacpy(&ev.key.bdaddr, &key->bdaddr); 1871 bacpy(&ev.key.bdaddr, &key->bdaddr);
1465 ev.key.type = key->type; 1872 ev.key.type = key->type;
1466 memcpy(ev.key.val, key->val, 16); 1873 memcpy(ev.key.val, key->val, 16);
1467 ev.key.pin_len = key->pin_len; 1874 ev.key.pin_len = key->pin_len;
1468 ev.old_key_type = old_key_type;
1469 1875
1470 return mgmt_event(MGMT_EV_NEW_KEY, index, &ev, sizeof(ev), NULL); 1876 return mgmt_event(MGMT_EV_NEW_KEY, index, &ev, sizeof(ev), NULL);
1471} 1877}
@@ -1481,7 +1887,7 @@ int mgmt_connected(u16 index, bdaddr_t *bdaddr)
1481 1887
1482static void disconnect_rsp(struct pending_cmd *cmd, void *data) 1888static void disconnect_rsp(struct pending_cmd *cmd, void *data)
1483{ 1889{
1484 struct mgmt_cp_disconnect *cp = cmd->cmd; 1890 struct mgmt_cp_disconnect *cp = cmd->param;
1485 struct sock **sk = data; 1891 struct sock **sk = data;
1486 struct mgmt_rp_disconnect rp; 1892 struct mgmt_rp_disconnect rp;
1487 1893
@@ -1539,11 +1945,12 @@ int mgmt_connect_failed(u16 index, bdaddr_t *bdaddr, u8 status)
1539 return mgmt_event(MGMT_EV_CONNECT_FAILED, index, &ev, sizeof(ev), NULL); 1945 return mgmt_event(MGMT_EV_CONNECT_FAILED, index, &ev, sizeof(ev), NULL);
1540} 1946}
1541 1947
1542int mgmt_pin_code_request(u16 index, bdaddr_t *bdaddr) 1948int mgmt_pin_code_request(u16 index, bdaddr_t *bdaddr, u8 secure)
1543{ 1949{
1544 struct mgmt_ev_pin_code_request ev; 1950 struct mgmt_ev_pin_code_request ev;
1545 1951
1546 bacpy(&ev.bdaddr, bdaddr); 1952 bacpy(&ev.bdaddr, bdaddr);
1953 ev.secure = secure;
1547 1954
1548 return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, index, &ev, sizeof(ev), 1955 return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, index, &ev, sizeof(ev),
1549 NULL); 1956 NULL);
@@ -1591,13 +1998,15 @@ int mgmt_pin_code_neg_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status)
1591 return err; 1998 return err;
1592} 1999}
1593 2000
1594int mgmt_user_confirm_request(u16 index, bdaddr_t *bdaddr, __le32 value) 2001int mgmt_user_confirm_request(u16 index, bdaddr_t *bdaddr, __le32 value,
2002 u8 confirm_hint)
1595{ 2003{
1596 struct mgmt_ev_user_confirm_request ev; 2004 struct mgmt_ev_user_confirm_request ev;
1597 2005
1598 BT_DBG("hci%u", index); 2006 BT_DBG("hci%u", index);
1599 2007
1600 bacpy(&ev.bdaddr, bdaddr); 2008 bacpy(&ev.bdaddr, bdaddr);
2009 ev.confirm_hint = confirm_hint;
1601 put_unaligned_le32(value, &ev.value); 2010 put_unaligned_le32(value, &ev.value);
1602 2011
1603 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, index, &ev, sizeof(ev), 2012 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, index, &ev, sizeof(ev),
@@ -1645,3 +2054,110 @@ int mgmt_auth_failed(u16 index, bdaddr_t *bdaddr, u8 status)
1645 2054
1646 return mgmt_event(MGMT_EV_AUTH_FAILED, index, &ev, sizeof(ev), NULL); 2055 return mgmt_event(MGMT_EV_AUTH_FAILED, index, &ev, sizeof(ev), NULL);
1647} 2056}
2057
2058int mgmt_set_local_name_complete(u16 index, u8 *name, u8 status)
2059{
2060 struct pending_cmd *cmd;
2061 struct hci_dev *hdev;
2062 struct mgmt_cp_set_local_name ev;
2063 int err;
2064
2065 memset(&ev, 0, sizeof(ev));
2066 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
2067
2068 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, index);
2069 if (!cmd)
2070 goto send_event;
2071
2072 if (status) {
2073 err = cmd_status(cmd->sk, index, MGMT_OP_SET_LOCAL_NAME, EIO);
2074 goto failed;
2075 }
2076
2077 hdev = hci_dev_get(index);
2078 if (hdev) {
2079 hci_dev_lock_bh(hdev);
2080 update_eir(hdev);
2081 hci_dev_unlock_bh(hdev);
2082 hci_dev_put(hdev);
2083 }
2084
2085 err = cmd_complete(cmd->sk, index, MGMT_OP_SET_LOCAL_NAME, &ev,
2086 sizeof(ev));
2087 if (err < 0)
2088 goto failed;
2089
2090send_event:
2091 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, index, &ev, sizeof(ev),
2092 cmd ? cmd->sk : NULL);
2093
2094failed:
2095 if (cmd)
2096 mgmt_pending_remove(cmd);
2097 return err;
2098}
2099
2100int mgmt_read_local_oob_data_reply_complete(u16 index, u8 *hash, u8 *randomizer,
2101 u8 status)
2102{
2103 struct pending_cmd *cmd;
2104 int err;
2105
2106 BT_DBG("hci%u status %u", index, status);
2107
2108 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, index);
2109 if (!cmd)
2110 return -ENOENT;
2111
2112 if (status) {
2113 err = cmd_status(cmd->sk, index, MGMT_OP_READ_LOCAL_OOB_DATA,
2114 EIO);
2115 } else {
2116 struct mgmt_rp_read_local_oob_data rp;
2117
2118 memcpy(rp.hash, hash, sizeof(rp.hash));
2119 memcpy(rp.randomizer, randomizer, sizeof(rp.randomizer));
2120
2121 err = cmd_complete(cmd->sk, index, MGMT_OP_READ_LOCAL_OOB_DATA,
2122 &rp, sizeof(rp));
2123 }
2124
2125 mgmt_pending_remove(cmd);
2126
2127 return err;
2128}
2129
2130int mgmt_device_found(u16 index, bdaddr_t *bdaddr, u8 *dev_class, s8 rssi,
2131 u8 *eir)
2132{
2133 struct mgmt_ev_device_found ev;
2134
2135 memset(&ev, 0, sizeof(ev));
2136
2137 bacpy(&ev.bdaddr, bdaddr);
2138 memcpy(ev.dev_class, dev_class, sizeof(ev.dev_class));
2139 ev.rssi = rssi;
2140
2141 if (eir)
2142 memcpy(ev.eir, eir, sizeof(ev.eir));
2143
2144 return mgmt_event(MGMT_EV_DEVICE_FOUND, index, &ev, sizeof(ev), NULL);
2145}
2146
2147int mgmt_remote_name(u16 index, bdaddr_t *bdaddr, u8 *name)
2148{
2149 struct mgmt_ev_remote_name ev;
2150
2151 memset(&ev, 0, sizeof(ev));
2152
2153 bacpy(&ev.bdaddr, bdaddr);
2154 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
2155
2156 return mgmt_event(MGMT_EV_REMOTE_NAME, index, &ev, sizeof(ev), NULL);
2157}
2158
2159int mgmt_discovering(u16 index, u8 discovering)
2160{
2161 return mgmt_event(MGMT_EV_DISCOVERING, index, &discovering,
2162 sizeof(discovering), NULL);
2163}
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index c9973932456f..5759bb7054f7 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -232,6 +232,8 @@ static int rfcomm_l2sock_create(struct socket **sock)
232static inline int rfcomm_check_security(struct rfcomm_dlc *d) 232static inline int rfcomm_check_security(struct rfcomm_dlc *d)
233{ 233{
234 struct sock *sk = d->session->sock->sk; 234 struct sock *sk = d->session->sock->sk;
235 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
236
235 __u8 auth_type; 237 __u8 auth_type;
236 238
237 switch (d->sec_level) { 239 switch (d->sec_level) {
@@ -246,8 +248,7 @@ static inline int rfcomm_check_security(struct rfcomm_dlc *d)
246 break; 248 break;
247 } 249 }
248 250
249 return hci_conn_security(l2cap_pi(sk)->conn->hcon, d->sec_level, 251 return hci_conn_security(conn->hcon, d->sec_level, auth_type);
250 auth_type);
251} 252}
252 253
253static void rfcomm_session_timeout(unsigned long arg) 254static void rfcomm_session_timeout(unsigned long arg)
@@ -710,10 +711,10 @@ static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src,
710 /* Set L2CAP options */ 711 /* Set L2CAP options */
711 sk = sock->sk; 712 sk = sock->sk;
712 lock_sock(sk); 713 lock_sock(sk);
713 l2cap_pi(sk)->imtu = l2cap_mtu; 714 l2cap_pi(sk)->chan->imtu = l2cap_mtu;
714 l2cap_pi(sk)->sec_level = sec_level; 715 l2cap_pi(sk)->chan->sec_level = sec_level;
715 if (l2cap_ertm) 716 if (l2cap_ertm)
716 l2cap_pi(sk)->mode = L2CAP_MODE_ERTM; 717 l2cap_pi(sk)->chan->mode = L2CAP_MODE_ERTM;
717 release_sock(sk); 718 release_sock(sk);
718 719
719 s = rfcomm_session_add(sock, BT_BOUND); 720 s = rfcomm_session_add(sock, BT_BOUND);
@@ -1241,6 +1242,7 @@ static int rfcomm_recv_disc(struct rfcomm_session *s, u8 dlci)
1241void rfcomm_dlc_accept(struct rfcomm_dlc *d) 1242void rfcomm_dlc_accept(struct rfcomm_dlc *d)
1242{ 1243{
1243 struct sock *sk = d->session->sock->sk; 1244 struct sock *sk = d->session->sock->sk;
1245 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
1244 1246
1245 BT_DBG("dlc %p", d); 1247 BT_DBG("dlc %p", d);
1246 1248
@@ -1254,7 +1256,7 @@ void rfcomm_dlc_accept(struct rfcomm_dlc *d)
1254 rfcomm_dlc_unlock(d); 1256 rfcomm_dlc_unlock(d);
1255 1257
1256 if (d->role_switch) 1258 if (d->role_switch)
1257 hci_conn_switch_role(l2cap_pi(sk)->conn->hcon, 0x00); 1259 hci_conn_switch_role(conn->hcon, 0x00);
1258 1260
1259 rfcomm_send_msc(d->session, 1, d->dlci, d->v24_sig); 1261 rfcomm_send_msc(d->session, 1, d->dlci, d->v24_sig);
1260} 1262}
@@ -1890,7 +1892,8 @@ static inline void rfcomm_accept_connection(struct rfcomm_session *s)
1890 1892
1891 /* We should adjust MTU on incoming sessions. 1893 /* We should adjust MTU on incoming sessions.
1892 * L2CAP MTU minus UIH header and FCS. */ 1894 * L2CAP MTU minus UIH header and FCS. */
1893 s->mtu = min(l2cap_pi(nsock->sk)->omtu, l2cap_pi(nsock->sk)->imtu) - 5; 1895 s->mtu = min(l2cap_pi(nsock->sk)->chan->omtu,
1896 l2cap_pi(nsock->sk)->chan->imtu) - 5;
1894 1897
1895 rfcomm_schedule(); 1898 rfcomm_schedule();
1896 } else 1899 } else
@@ -1909,7 +1912,7 @@ static inline void rfcomm_check_connection(struct rfcomm_session *s)
1909 1912
1910 /* We can adjust MTU on outgoing sessions. 1913 /* We can adjust MTU on outgoing sessions.
1911 * L2CAP MTU minus UIH header and FCS. */ 1914 * L2CAP MTU minus UIH header and FCS. */
1912 s->mtu = min(l2cap_pi(sk)->omtu, l2cap_pi(sk)->imtu) - 5; 1915 s->mtu = min(l2cap_pi(sk)->chan->omtu, l2cap_pi(sk)->chan->imtu) - 5;
1913 1916
1914 rfcomm_send_sabm(s, 0); 1917 rfcomm_send_sabm(s, 0);
1915 break; 1918 break;
@@ -1992,7 +1995,7 @@ static int rfcomm_add_listener(bdaddr_t *ba)
1992 /* Set L2CAP options */ 1995 /* Set L2CAP options */
1993 sk = sock->sk; 1996 sk = sock->sk;
1994 lock_sock(sk); 1997 lock_sock(sk);
1995 l2cap_pi(sk)->imtu = l2cap_mtu; 1998 l2cap_pi(sk)->chan->imtu = l2cap_mtu;
1996 release_sock(sk); 1999 release_sock(sk);
1997 2000
1998 /* Start listening on the socket */ 2001 /* Start listening on the socket */
@@ -2093,7 +2096,7 @@ static void rfcomm_security_cfm(struct hci_conn *conn, u8 status, u8 encrypt)
2093 if (!test_and_clear_bit(RFCOMM_AUTH_PENDING, &d->flags)) 2096 if (!test_and_clear_bit(RFCOMM_AUTH_PENDING, &d->flags))
2094 continue; 2097 continue;
2095 2098
2096 if (!status) 2099 if (!status && hci_conn_check_secure(conn, d->sec_level))
2097 set_bit(RFCOMM_AUTH_ACCEPT, &d->flags); 2100 set_bit(RFCOMM_AUTH_ACCEPT, &d->flags);
2098 else 2101 else
2099 set_bit(RFCOMM_AUTH_REJECT, &d->flags); 2102 set_bit(RFCOMM_AUTH_REJECT, &d->flags);
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index 66cc1f0c3df8..386cfaffd4b7 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -743,6 +743,7 @@ static int rfcomm_sock_getsockopt_old(struct socket *sock, int optname, char __u
743 struct sock *sk = sock->sk; 743 struct sock *sk = sock->sk;
744 struct sock *l2cap_sk; 744 struct sock *l2cap_sk;
745 struct rfcomm_conninfo cinfo; 745 struct rfcomm_conninfo cinfo;
746 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
746 int len, err = 0; 747 int len, err = 0;
747 u32 opt; 748 u32 opt;
748 749
@@ -787,8 +788,8 @@ static int rfcomm_sock_getsockopt_old(struct socket *sock, int optname, char __u
787 788
788 l2cap_sk = rfcomm_pi(sk)->dlc->session->sock->sk; 789 l2cap_sk = rfcomm_pi(sk)->dlc->session->sock->sk;
789 790
790 cinfo.hci_handle = l2cap_pi(l2cap_sk)->conn->hcon->handle; 791 cinfo.hci_handle = conn->hcon->handle;
791 memcpy(cinfo.dev_class, l2cap_pi(l2cap_sk)->conn->hcon->dev_class, 3); 792 memcpy(cinfo.dev_class, conn->hcon->dev_class, 3);
792 793
793 len = min_t(unsigned int, len, sizeof(cinfo)); 794 len = min_t(unsigned int, len, sizeof(cinfo));
794 if (copy_to_user(optval, (char *) &cinfo, len)) 795 if (copy_to_user(optval, (char *) &cinfo, len))
diff --git a/net/bridge/br.c b/net/bridge/br.c
index 84bbb82599b2..f20c4fd915a8 100644
--- a/net/bridge/br.c
+++ b/net/bridge/br.c
@@ -104,3 +104,4 @@ module_init(br_init)
104module_exit(br_deinit) 104module_exit(br_deinit)
105MODULE_LICENSE("GPL"); 105MODULE_LICENSE("GPL");
106MODULE_VERSION(BR_VERSION); 106MODULE_VERSION(BR_VERSION);
107MODULE_ALIAS_RTNL_LINK("bridge");
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 21e5901186ea..a6b2f86378c7 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -74,13 +74,23 @@ out:
74 return NETDEV_TX_OK; 74 return NETDEV_TX_OK;
75} 75}
76 76
77static int br_dev_init(struct net_device *dev)
78{
79 struct net_bridge *br = netdev_priv(dev);
80
81 br->stats = alloc_percpu(struct br_cpu_netstats);
82 if (!br->stats)
83 return -ENOMEM;
84
85 return 0;
86}
87
77static int br_dev_open(struct net_device *dev) 88static int br_dev_open(struct net_device *dev)
78{ 89{
79 struct net_bridge *br = netdev_priv(dev); 90 struct net_bridge *br = netdev_priv(dev);
80 91
81 netif_carrier_off(dev); 92 netif_carrier_off(dev);
82 93 netdev_update_features(dev);
83 br_features_recompute(br);
84 netif_start_queue(dev); 94 netif_start_queue(dev);
85 br_stp_enable_bridge(br); 95 br_stp_enable_bridge(br);
86 br_multicast_open(br); 96 br_multicast_open(br);
@@ -177,48 +187,11 @@ static void br_getinfo(struct net_device *dev, struct ethtool_drvinfo *info)
177 strcpy(info->bus_info, "N/A"); 187 strcpy(info->bus_info, "N/A");
178} 188}
179 189
180static int br_set_sg(struct net_device *dev, u32 data) 190static u32 br_fix_features(struct net_device *dev, u32 features)
181{
182 struct net_bridge *br = netdev_priv(dev);
183
184 if (data)
185 br->feature_mask |= NETIF_F_SG;
186 else
187 br->feature_mask &= ~NETIF_F_SG;
188
189 br_features_recompute(br);
190 return 0;
191}
192
193static int br_set_tso(struct net_device *dev, u32 data)
194{
195 struct net_bridge *br = netdev_priv(dev);
196
197 if (data)
198 br->feature_mask |= NETIF_F_TSO;
199 else
200 br->feature_mask &= ~NETIF_F_TSO;
201
202 br_features_recompute(br);
203 return 0;
204}
205
206static int br_set_tx_csum(struct net_device *dev, u32 data)
207{ 191{
208 struct net_bridge *br = netdev_priv(dev); 192 struct net_bridge *br = netdev_priv(dev);
209 193
210 if (data) 194 return br_features_recompute(br, features);
211 br->feature_mask |= NETIF_F_NO_CSUM;
212 else
213 br->feature_mask &= ~NETIF_F_ALL_CSUM;
214
215 br_features_recompute(br);
216 return 0;
217}
218
219static int br_set_flags(struct net_device *netdev, u32 data)
220{
221 return ethtool_op_set_flags(netdev, data, ETH_FLAG_TXVLAN);
222} 195}
223 196
224#ifdef CONFIG_NET_POLL_CONTROLLER 197#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -319,21 +292,12 @@ static int br_del_slave(struct net_device *dev, struct net_device *slave_dev)
319static const struct ethtool_ops br_ethtool_ops = { 292static const struct ethtool_ops br_ethtool_ops = {
320 .get_drvinfo = br_getinfo, 293 .get_drvinfo = br_getinfo,
321 .get_link = ethtool_op_get_link, 294 .get_link = ethtool_op_get_link,
322 .get_tx_csum = ethtool_op_get_tx_csum,
323 .set_tx_csum = br_set_tx_csum,
324 .get_sg = ethtool_op_get_sg,
325 .set_sg = br_set_sg,
326 .get_tso = ethtool_op_get_tso,
327 .set_tso = br_set_tso,
328 .get_ufo = ethtool_op_get_ufo,
329 .set_ufo = ethtool_op_set_ufo,
330 .get_flags = ethtool_op_get_flags,
331 .set_flags = br_set_flags,
332}; 295};
333 296
334static const struct net_device_ops br_netdev_ops = { 297static const struct net_device_ops br_netdev_ops = {
335 .ndo_open = br_dev_open, 298 .ndo_open = br_dev_open,
336 .ndo_stop = br_dev_stop, 299 .ndo_stop = br_dev_stop,
300 .ndo_init = br_dev_init,
337 .ndo_start_xmit = br_dev_xmit, 301 .ndo_start_xmit = br_dev_xmit,
338 .ndo_get_stats64 = br_get_stats64, 302 .ndo_get_stats64 = br_get_stats64,
339 .ndo_set_mac_address = br_set_mac_address, 303 .ndo_set_mac_address = br_set_mac_address,
@@ -347,6 +311,7 @@ static const struct net_device_ops br_netdev_ops = {
347#endif 311#endif
348 .ndo_add_slave = br_add_slave, 312 .ndo_add_slave = br_add_slave,
349 .ndo_del_slave = br_del_slave, 313 .ndo_del_slave = br_del_slave,
314 .ndo_fix_features = br_fix_features,
350}; 315};
351 316
352static void br_dev_free(struct net_device *dev) 317static void br_dev_free(struct net_device *dev)
@@ -357,18 +322,49 @@ static void br_dev_free(struct net_device *dev)
357 free_netdev(dev); 322 free_netdev(dev);
358} 323}
359 324
325static struct device_type br_type = {
326 .name = "bridge",
327};
328
360void br_dev_setup(struct net_device *dev) 329void br_dev_setup(struct net_device *dev)
361{ 330{
331 struct net_bridge *br = netdev_priv(dev);
332
362 random_ether_addr(dev->dev_addr); 333 random_ether_addr(dev->dev_addr);
363 ether_setup(dev); 334 ether_setup(dev);
364 335
365 dev->netdev_ops = &br_netdev_ops; 336 dev->netdev_ops = &br_netdev_ops;
366 dev->destructor = br_dev_free; 337 dev->destructor = br_dev_free;
367 SET_ETHTOOL_OPS(dev, &br_ethtool_ops); 338 SET_ETHTOOL_OPS(dev, &br_ethtool_ops);
339 SET_NETDEV_DEVTYPE(dev, &br_type);
368 dev->tx_queue_len = 0; 340 dev->tx_queue_len = 0;
369 dev->priv_flags = IFF_EBRIDGE; 341 dev->priv_flags = IFF_EBRIDGE;
370 342
371 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | 343 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA |
372 NETIF_F_GSO_MASK | NETIF_F_NO_CSUM | NETIF_F_LLTX | 344 NETIF_F_GSO_MASK | NETIF_F_NO_CSUM | NETIF_F_LLTX |
373 NETIF_F_NETNS_LOCAL | NETIF_F_GSO | NETIF_F_HW_VLAN_TX; 345 NETIF_F_NETNS_LOCAL | NETIF_F_HW_VLAN_TX;
346 dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA |
347 NETIF_F_GSO_MASK | NETIF_F_NO_CSUM |
348 NETIF_F_HW_VLAN_TX;
349
350 br->dev = dev;
351 spin_lock_init(&br->lock);
352 INIT_LIST_HEAD(&br->port_list);
353 spin_lock_init(&br->hash_lock);
354
355 br->bridge_id.prio[0] = 0x80;
356 br->bridge_id.prio[1] = 0x00;
357
358 memcpy(br->group_addr, br_group_address, ETH_ALEN);
359
360 br->stp_enabled = BR_NO_STP;
361 br->designated_root = br->bridge_id;
362 br->bridge_max_age = br->max_age = 20 * HZ;
363 br->bridge_hello_time = br->hello_time = 2 * HZ;
364 br->bridge_forward_delay = br->forward_delay = 15 * HZ;
365 br->ageing_time = 300 * HZ;
366
367 br_netfilter_rtable_init(br);
368 br_stp_timer_init(br);
369 br_multicast_init(br);
374} 370}
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index cc4d3c5ab1c6..e0dfbc151dd7 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -28,6 +28,7 @@
28static struct kmem_cache *br_fdb_cache __read_mostly; 28static struct kmem_cache *br_fdb_cache __read_mostly;
29static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source, 29static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
30 const unsigned char *addr); 30 const unsigned char *addr);
31static void fdb_notify(const struct net_bridge_fdb_entry *, int);
31 32
32static u32 fdb_salt __read_mostly; 33static u32 fdb_salt __read_mostly;
33 34
@@ -62,7 +63,7 @@ static inline int has_expired(const struct net_bridge *br,
62 const struct net_bridge_fdb_entry *fdb) 63 const struct net_bridge_fdb_entry *fdb)
63{ 64{
64 return !fdb->is_static && 65 return !fdb->is_static &&
65 time_before_eq(fdb->ageing_timer + hold_time(br), jiffies); 66 time_before_eq(fdb->updated + hold_time(br), jiffies);
66} 67}
67 68
68static inline int br_mac_hash(const unsigned char *mac) 69static inline int br_mac_hash(const unsigned char *mac)
@@ -81,6 +82,7 @@ static void fdb_rcu_free(struct rcu_head *head)
81 82
82static inline void fdb_delete(struct net_bridge_fdb_entry *f) 83static inline void fdb_delete(struct net_bridge_fdb_entry *f)
83{ 84{
85 fdb_notify(f, RTM_DELNEIGH);
84 hlist_del_rcu(&f->hlist); 86 hlist_del_rcu(&f->hlist);
85 call_rcu(&f->rcu, fdb_rcu_free); 87 call_rcu(&f->rcu, fdb_rcu_free);
86} 88}
@@ -140,7 +142,7 @@ void br_fdb_cleanup(unsigned long _data)
140 unsigned long this_timer; 142 unsigned long this_timer;
141 if (f->is_static) 143 if (f->is_static)
142 continue; 144 continue;
143 this_timer = f->ageing_timer + delay; 145 this_timer = f->updated + delay;
144 if (time_before_eq(this_timer, jiffies)) 146 if (time_before_eq(this_timer, jiffies))
145 fdb_delete(f); 147 fdb_delete(f);
146 else if (time_before(this_timer, next_timer)) 148 else if (time_before(this_timer, next_timer))
@@ -293,7 +295,7 @@ int br_fdb_fillbuf(struct net_bridge *br, void *buf,
293 295
294 fe->is_local = f->is_local; 296 fe->is_local = f->is_local;
295 if (!f->is_static) 297 if (!f->is_static)
296 fe->ageing_timer_value = jiffies_to_clock_t(jiffies - f->ageing_timer); 298 fe->ageing_timer_value = jiffies_to_clock_t(jiffies - f->updated);
297 ++fe; 299 ++fe;
298 ++num; 300 ++num;
299 } 301 }
@@ -305,8 +307,21 @@ int br_fdb_fillbuf(struct net_bridge *br, void *buf,
305 return num; 307 return num;
306} 308}
307 309
308static inline struct net_bridge_fdb_entry *fdb_find(struct hlist_head *head, 310static struct net_bridge_fdb_entry *fdb_find(struct hlist_head *head,
309 const unsigned char *addr) 311 const unsigned char *addr)
312{
313 struct hlist_node *h;
314 struct net_bridge_fdb_entry *fdb;
315
316 hlist_for_each_entry(fdb, h, head, hlist) {
317 if (!compare_ether_addr(fdb->addr.addr, addr))
318 return fdb;
319 }
320 return NULL;
321}
322
323static struct net_bridge_fdb_entry *fdb_find_rcu(struct hlist_head *head,
324 const unsigned char *addr)
310{ 325{
311 struct hlist_node *h; 326 struct hlist_node *h;
312 struct net_bridge_fdb_entry *fdb; 327 struct net_bridge_fdb_entry *fdb;
@@ -320,8 +335,7 @@ static inline struct net_bridge_fdb_entry *fdb_find(struct hlist_head *head,
320 335
321static struct net_bridge_fdb_entry *fdb_create(struct hlist_head *head, 336static struct net_bridge_fdb_entry *fdb_create(struct hlist_head *head,
322 struct net_bridge_port *source, 337 struct net_bridge_port *source,
323 const unsigned char *addr, 338 const unsigned char *addr)
324 int is_local)
325{ 339{
326 struct net_bridge_fdb_entry *fdb; 340 struct net_bridge_fdb_entry *fdb;
327 341
@@ -329,11 +343,11 @@ static struct net_bridge_fdb_entry *fdb_create(struct hlist_head *head,
329 if (fdb) { 343 if (fdb) {
330 memcpy(fdb->addr.addr, addr, ETH_ALEN); 344 memcpy(fdb->addr.addr, addr, ETH_ALEN);
331 fdb->dst = source; 345 fdb->dst = source;
332 fdb->is_local = is_local; 346 fdb->is_local = 0;
333 fdb->is_static = is_local; 347 fdb->is_static = 0;
334 fdb->ageing_timer = jiffies; 348 fdb->updated = fdb->used = jiffies;
335
336 hlist_add_head_rcu(&fdb->hlist, head); 349 hlist_add_head_rcu(&fdb->hlist, head);
350 fdb_notify(fdb, RTM_NEWNEIGH);
337 } 351 }
338 return fdb; 352 return fdb;
339} 353}
@@ -360,12 +374,15 @@ static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
360 fdb_delete(fdb); 374 fdb_delete(fdb);
361 } 375 }
362 376
363 if (!fdb_create(head, source, addr, 1)) 377 fdb = fdb_create(head, source, addr);
378 if (!fdb)
364 return -ENOMEM; 379 return -ENOMEM;
365 380
381 fdb->is_local = fdb->is_static = 1;
366 return 0; 382 return 0;
367} 383}
368 384
385/* Add entry for local address of interface */
369int br_fdb_insert(struct net_bridge *br, struct net_bridge_port *source, 386int br_fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
370 const unsigned char *addr) 387 const unsigned char *addr)
371{ 388{
@@ -392,7 +409,7 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
392 source->state == BR_STATE_FORWARDING)) 409 source->state == BR_STATE_FORWARDING))
393 return; 410 return;
394 411
395 fdb = fdb_find(head, addr); 412 fdb = fdb_find_rcu(head, addr);
396 if (likely(fdb)) { 413 if (likely(fdb)) {
397 /* attempt to update an entry for a local interface */ 414 /* attempt to update an entry for a local interface */
398 if (unlikely(fdb->is_local)) { 415 if (unlikely(fdb->is_local)) {
@@ -403,15 +420,277 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
403 } else { 420 } else {
404 /* fastpath: update of existing entry */ 421 /* fastpath: update of existing entry */
405 fdb->dst = source; 422 fdb->dst = source;
406 fdb->ageing_timer = jiffies; 423 fdb->updated = jiffies;
407 } 424 }
408 } else { 425 } else {
409 spin_lock(&br->hash_lock); 426 spin_lock(&br->hash_lock);
410 if (!fdb_find(head, addr)) 427 if (likely(!fdb_find(head, addr)))
411 fdb_create(head, source, addr, 0); 428 fdb_create(head, source, addr);
429
412 /* else we lose race and someone else inserts 430 /* else we lose race and someone else inserts
413 * it first, don't bother updating 431 * it first, don't bother updating
414 */ 432 */
415 spin_unlock(&br->hash_lock); 433 spin_unlock(&br->hash_lock);
416 } 434 }
417} 435}
436
437static int fdb_to_nud(const struct net_bridge_fdb_entry *fdb)
438{
439 if (fdb->is_local)
440 return NUD_PERMANENT;
441 else if (fdb->is_static)
442 return NUD_NOARP;
443 else if (has_expired(fdb->dst->br, fdb))
444 return NUD_STALE;
445 else
446 return NUD_REACHABLE;
447}
448
449static int fdb_fill_info(struct sk_buff *skb,
450 const struct net_bridge_fdb_entry *fdb,
451 u32 pid, u32 seq, int type, unsigned int flags)
452{
453 unsigned long now = jiffies;
454 struct nda_cacheinfo ci;
455 struct nlmsghdr *nlh;
456 struct ndmsg *ndm;
457
458 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
459 if (nlh == NULL)
460 return -EMSGSIZE;
461
462
463 ndm = nlmsg_data(nlh);
464 ndm->ndm_family = AF_BRIDGE;
465 ndm->ndm_pad1 = 0;
466 ndm->ndm_pad2 = 0;
467 ndm->ndm_flags = 0;
468 ndm->ndm_type = 0;
469 ndm->ndm_ifindex = fdb->dst->dev->ifindex;
470 ndm->ndm_state = fdb_to_nud(fdb);
471
472 NLA_PUT(skb, NDA_LLADDR, ETH_ALEN, &fdb->addr);
473
474 ci.ndm_used = jiffies_to_clock_t(now - fdb->used);
475 ci.ndm_confirmed = 0;
476 ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated);
477 ci.ndm_refcnt = 0;
478 NLA_PUT(skb, NDA_CACHEINFO, sizeof(ci), &ci);
479
480 return nlmsg_end(skb, nlh);
481
482nla_put_failure:
483 nlmsg_cancel(skb, nlh);
484 return -EMSGSIZE;
485}
486
487static inline size_t fdb_nlmsg_size(void)
488{
489 return NLMSG_ALIGN(sizeof(struct ndmsg))
490 + nla_total_size(ETH_ALEN) /* NDA_LLADDR */
491 + nla_total_size(sizeof(struct nda_cacheinfo));
492}
493
494static void fdb_notify(const struct net_bridge_fdb_entry *fdb, int type)
495{
496 struct net *net = dev_net(fdb->dst->dev);
497 struct sk_buff *skb;
498 int err = -ENOBUFS;
499
500 skb = nlmsg_new(fdb_nlmsg_size(), GFP_ATOMIC);
501 if (skb == NULL)
502 goto errout;
503
504 err = fdb_fill_info(skb, fdb, 0, 0, type, 0);
505 if (err < 0) {
506 /* -EMSGSIZE implies BUG in fdb_nlmsg_size() */
507 WARN_ON(err == -EMSGSIZE);
508 kfree_skb(skb);
509 goto errout;
510 }
511 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
512 return;
513errout:
514 if (err < 0)
515 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
516}
517
518/* Dump information about entries, in response to GETNEIGH */
519int br_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
520{
521 struct net *net = sock_net(skb->sk);
522 struct net_device *dev;
523 int idx = 0;
524
525 rcu_read_lock();
526 for_each_netdev_rcu(net, dev) {
527 struct net_bridge *br = netdev_priv(dev);
528 int i;
529
530 if (!(dev->priv_flags & IFF_EBRIDGE))
531 continue;
532
533 for (i = 0; i < BR_HASH_SIZE; i++) {
534 struct hlist_node *h;
535 struct net_bridge_fdb_entry *f;
536
537 hlist_for_each_entry_rcu(f, h, &br->hash[i], hlist) {
538 if (idx < cb->args[0])
539 goto skip;
540
541 if (fdb_fill_info(skb, f,
542 NETLINK_CB(cb->skb).pid,
543 cb->nlh->nlmsg_seq,
544 RTM_NEWNEIGH,
545 NLM_F_MULTI) < 0)
546 break;
547skip:
548 ++idx;
549 }
550 }
551 }
552 rcu_read_unlock();
553
554 cb->args[0] = idx;
555
556 return skb->len;
557}
558
559/* Create new static fdb entry */
560static int fdb_add_entry(struct net_bridge_port *source, const __u8 *addr,
561 __u16 state)
562{
563 struct net_bridge *br = source->br;
564 struct hlist_head *head = &br->hash[br_mac_hash(addr)];
565 struct net_bridge_fdb_entry *fdb;
566
567 fdb = fdb_find(head, addr);
568 if (fdb)
569 return -EEXIST;
570
571 fdb = fdb_create(head, source, addr);
572 if (!fdb)
573 return -ENOMEM;
574
575 if (state & NUD_PERMANENT)
576 fdb->is_local = fdb->is_static = 1;
577 else if (state & NUD_NOARP)
578 fdb->is_static = 1;
579 return 0;
580}
581
582/* Add new permanent fdb entry with RTM_NEWNEIGH */
583int br_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
584{
585 struct net *net = sock_net(skb->sk);
586 struct ndmsg *ndm;
587 struct nlattr *tb[NDA_MAX+1];
588 struct net_device *dev;
589 struct net_bridge_port *p;
590 const __u8 *addr;
591 int err;
592
593 ASSERT_RTNL();
594 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
595 if (err < 0)
596 return err;
597
598 ndm = nlmsg_data(nlh);
599 if (ndm->ndm_ifindex == 0) {
600 pr_info("bridge: RTM_NEWNEIGH with invalid ifindex\n");
601 return -EINVAL;
602 }
603
604 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
605 if (dev == NULL) {
606 pr_info("bridge: RTM_NEWNEIGH with unknown ifindex\n");
607 return -ENODEV;
608 }
609
610 if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
611 pr_info("bridge: RTM_NEWNEIGH with invalid address\n");
612 return -EINVAL;
613 }
614
615 addr = nla_data(tb[NDA_LLADDR]);
616 if (!is_valid_ether_addr(addr)) {
617 pr_info("bridge: RTM_NEWNEIGH with invalid ether address\n");
618 return -EINVAL;
619 }
620
621 p = br_port_get_rtnl(dev);
622 if (p == NULL) {
623 pr_info("bridge: RTM_NEWNEIGH %s not a bridge port\n",
624 dev->name);
625 return -EINVAL;
626 }
627
628 spin_lock_bh(&p->br->hash_lock);
629 err = fdb_add_entry(p, addr, ndm->ndm_state);
630 spin_unlock_bh(&p->br->hash_lock);
631
632 return err;
633}
634
635static int fdb_delete_by_addr(struct net_bridge_port *p, const u8 *addr)
636{
637 struct net_bridge *br = p->br;
638 struct hlist_head *head = &br->hash[br_mac_hash(addr)];
639 struct net_bridge_fdb_entry *fdb;
640
641 fdb = fdb_find(head, addr);
642 if (!fdb)
643 return -ENOENT;
644
645 fdb_delete(fdb);
646 return 0;
647}
648
649/* Remove neighbor entry with RTM_DELNEIGH */
650int br_fdb_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
651{
652 struct net *net = sock_net(skb->sk);
653 struct ndmsg *ndm;
654 struct net_bridge_port *p;
655 struct nlattr *llattr;
656 const __u8 *addr;
657 struct net_device *dev;
658 int err;
659
660 ASSERT_RTNL();
661 if (nlmsg_len(nlh) < sizeof(*ndm))
662 return -EINVAL;
663
664 ndm = nlmsg_data(nlh);
665 if (ndm->ndm_ifindex == 0) {
666 pr_info("bridge: RTM_DELNEIGH with invalid ifindex\n");
667 return -EINVAL;
668 }
669
670 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
671 if (dev == NULL) {
672 pr_info("bridge: RTM_DELNEIGH with unknown ifindex\n");
673 return -ENODEV;
674 }
675
676 llattr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_LLADDR);
677 if (llattr == NULL || nla_len(llattr) != ETH_ALEN) {
678 pr_info("bridge: RTM_DELNEIGH with invalid address\n");
679 return -EINVAL;
680 }
681
682 addr = nla_data(llattr);
683
684 p = br_port_get_rtnl(dev);
685 if (p == NULL) {
686 pr_info("bridge: RTM_DELNEIGH %s not a bridge port\n",
687 dev->name);
688 return -EINVAL;
689 }
690
691 spin_lock_bh(&p->br->hash_lock);
692 err = fdb_delete_by_addr(p, addr);
693 spin_unlock_bh(&p->br->hash_lock);
694
695 return err;
696}
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 718b60366dfe..5dbdfdfc3a34 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -36,8 +36,8 @@ static int port_cost(struct net_device *dev)
36 if (dev->ethtool_ops && dev->ethtool_ops->get_settings) { 36 if (dev->ethtool_ops && dev->ethtool_ops->get_settings) {
37 struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET, }; 37 struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET, };
38 38
39 if (!dev->ethtool_ops->get_settings(dev, &ecmd)) { 39 if (!dev_ethtool_get_settings(dev, &ecmd)) {
40 switch(ecmd.speed) { 40 switch (ethtool_cmd_speed(&ecmd)) {
41 case SPEED_10000: 41 case SPEED_10000:
42 return 2; 42 return 2;
43 case SPEED_1000: 43 case SPEED_1000:
@@ -175,56 +175,6 @@ static void del_br(struct net_bridge *br, struct list_head *head)
175 unregister_netdevice_queue(br->dev, head); 175 unregister_netdevice_queue(br->dev, head);
176} 176}
177 177
178static struct net_device *new_bridge_dev(struct net *net, const char *name)
179{
180 struct net_bridge *br;
181 struct net_device *dev;
182
183 dev = alloc_netdev(sizeof(struct net_bridge), name,
184 br_dev_setup);
185
186 if (!dev)
187 return NULL;
188 dev_net_set(dev, net);
189
190 br = netdev_priv(dev);
191 br->dev = dev;
192
193 br->stats = alloc_percpu(struct br_cpu_netstats);
194 if (!br->stats) {
195 free_netdev(dev);
196 return NULL;
197 }
198
199 spin_lock_init(&br->lock);
200 INIT_LIST_HEAD(&br->port_list);
201 spin_lock_init(&br->hash_lock);
202
203 br->bridge_id.prio[0] = 0x80;
204 br->bridge_id.prio[1] = 0x00;
205
206 memcpy(br->group_addr, br_group_address, ETH_ALEN);
207
208 br->feature_mask = dev->features;
209 br->stp_enabled = BR_NO_STP;
210 br->designated_root = br->bridge_id;
211 br->root_path_cost = 0;
212 br->root_port = 0;
213 br->bridge_max_age = br->max_age = 20 * HZ;
214 br->bridge_hello_time = br->hello_time = 2 * HZ;
215 br->bridge_forward_delay = br->forward_delay = 15 * HZ;
216 br->topology_change = 0;
217 br->topology_change_detected = 0;
218 br->ageing_time = 300 * HZ;
219
220 br_netfilter_rtable_init(br);
221
222 br_stp_timer_init(br);
223 br_multicast_init(br);
224
225 return dev;
226}
227
228/* find an available port number */ 178/* find an available port number */
229static int find_portno(struct net_bridge *br) 179static int find_portno(struct net_bridge *br)
230{ 180{
@@ -277,42 +227,19 @@ static struct net_bridge_port *new_nbp(struct net_bridge *br,
277 return p; 227 return p;
278} 228}
279 229
280static struct device_type br_type = {
281 .name = "bridge",
282};
283
284int br_add_bridge(struct net *net, const char *name) 230int br_add_bridge(struct net *net, const char *name)
285{ 231{
286 struct net_device *dev; 232 struct net_device *dev;
287 int ret;
288 233
289 dev = new_bridge_dev(net, name); 234 dev = alloc_netdev(sizeof(struct net_bridge), name,
235 br_dev_setup);
236
290 if (!dev) 237 if (!dev)
291 return -ENOMEM; 238 return -ENOMEM;
292 239
293 rtnl_lock(); 240 dev_net_set(dev, net);
294 if (strchr(dev->name, '%')) {
295 ret = dev_alloc_name(dev, dev->name);
296 if (ret < 0)
297 goto out_free;
298 }
299
300 SET_NETDEV_DEVTYPE(dev, &br_type);
301
302 ret = register_netdevice(dev);
303 if (ret)
304 goto out_free;
305
306 ret = br_sysfs_addbr(dev);
307 if (ret)
308 unregister_netdevice(dev);
309 out:
310 rtnl_unlock();
311 return ret;
312 241
313out_free: 242 return register_netdev(dev);
314 free_netdev(dev);
315 goto out;
316} 243}
317 244
318int br_del_bridge(struct net *net, const char *name) 245int br_del_bridge(struct net *net, const char *name)
@@ -364,15 +291,15 @@ int br_min_mtu(const struct net_bridge *br)
364/* 291/*
365 * Recomputes features using slave's features 292 * Recomputes features using slave's features
366 */ 293 */
367void br_features_recompute(struct net_bridge *br) 294u32 br_features_recompute(struct net_bridge *br, u32 features)
368{ 295{
369 struct net_bridge_port *p; 296 struct net_bridge_port *p;
370 u32 features, mask; 297 u32 mask;
371 298
372 features = mask = br->feature_mask;
373 if (list_empty(&br->port_list)) 299 if (list_empty(&br->port_list))
374 goto done; 300 return features;
375 301
302 mask = features;
376 features &= ~NETIF_F_ONE_FOR_ALL; 303 features &= ~NETIF_F_ONE_FOR_ALL;
377 304
378 list_for_each_entry(p, &br->port_list, list) { 305 list_for_each_entry(p, &br->port_list, list) {
@@ -380,8 +307,7 @@ void br_features_recompute(struct net_bridge *br)
380 p->dev->features, mask); 307 p->dev->features, mask);
381 } 308 }
382 309
383done: 310 return features;
384 br->dev->features = netdev_fix_features(br->dev, features);
385} 311}
386 312
387/* called with RTNL */ 313/* called with RTNL */
@@ -446,9 +372,10 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
446 372
447 list_add_rcu(&p->list, &br->port_list); 373 list_add_rcu(&p->list, &br->port_list);
448 374
375 netdev_update_features(br->dev);
376
449 spin_lock_bh(&br->lock); 377 spin_lock_bh(&br->lock);
450 changed_addr = br_stp_recalculate_bridge_id(br); 378 changed_addr = br_stp_recalculate_bridge_id(br);
451 br_features_recompute(br);
452 379
453 if ((dev->flags & IFF_UP) && netif_carrier_ok(dev) && 380 if ((dev->flags & IFF_UP) && netif_carrier_ok(dev) &&
454 (br->dev->flags & IFF_UP)) 381 (br->dev->flags & IFF_UP))
@@ -496,9 +423,10 @@ int br_del_if(struct net_bridge *br, struct net_device *dev)
496 423
497 spin_lock_bh(&br->lock); 424 spin_lock_bh(&br->lock);
498 br_stp_recalculate_bridge_id(br); 425 br_stp_recalculate_bridge_id(br);
499 br_features_recompute(br);
500 spin_unlock_bh(&br->lock); 426 spin_unlock_bh(&br->lock);
501 427
428 netdev_update_features(br->dev);
429
502 return 0; 430 return 0;
503} 431}
504 432
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 0c7badad62af..f3ac1e858ee1 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -98,9 +98,10 @@ int br_handle_frame_finish(struct sk_buff *skb)
98 } 98 }
99 99
100 if (skb) { 100 if (skb) {
101 if (dst) 101 if (dst) {
102 dst->used = jiffies;
102 br_forward(dst->dst, skb, skb2); 103 br_forward(dst->dst, skb, skb2);
103 else 104 } else
104 br_flood_forward(br, skb, skb2); 105 br_flood_forward(br, skb, skb2);
105 } 106 }
106 107
diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c
index 3d9fca0e3370..7222fe1d5460 100644
--- a/net/bridge/br_ioctl.c
+++ b/net/bridge/br_ioctl.c
@@ -181,40 +181,19 @@ static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
181 if (!capable(CAP_NET_ADMIN)) 181 if (!capable(CAP_NET_ADMIN))
182 return -EPERM; 182 return -EPERM;
183 183
184 spin_lock_bh(&br->lock); 184 return br_set_forward_delay(br, args[1]);
185 br->bridge_forward_delay = clock_t_to_jiffies(args[1]);
186 if (br_is_root_bridge(br))
187 br->forward_delay = br->bridge_forward_delay;
188 spin_unlock_bh(&br->lock);
189 return 0;
190 185
191 case BRCTL_SET_BRIDGE_HELLO_TIME: 186 case BRCTL_SET_BRIDGE_HELLO_TIME:
192 {
193 unsigned long t = clock_t_to_jiffies(args[1]);
194 if (!capable(CAP_NET_ADMIN)) 187 if (!capable(CAP_NET_ADMIN))
195 return -EPERM; 188 return -EPERM;
196 189
197 if (t < HZ) 190 return br_set_hello_time(br, args[1]);
198 return -EINVAL;
199
200 spin_lock_bh(&br->lock);
201 br->bridge_hello_time = t;
202 if (br_is_root_bridge(br))
203 br->hello_time = br->bridge_hello_time;
204 spin_unlock_bh(&br->lock);
205 return 0;
206 }
207 191
208 case BRCTL_SET_BRIDGE_MAX_AGE: 192 case BRCTL_SET_BRIDGE_MAX_AGE:
209 if (!capable(CAP_NET_ADMIN)) 193 if (!capable(CAP_NET_ADMIN))
210 return -EPERM; 194 return -EPERM;
211 195
212 spin_lock_bh(&br->lock); 196 return br_set_max_age(br, args[1]);
213 br->bridge_max_age = clock_t_to_jiffies(args[1]);
214 if (br_is_root_bridge(br))
215 br->max_age = br->bridge_max_age;
216 spin_unlock_bh(&br->lock);
217 return 0;
218 197
219 case BRCTL_SET_AGEING_TIME: 198 case BRCTL_SET_AGEING_TIME:
220 if (!capable(CAP_NET_ADMIN)) 199 if (!capable(CAP_NET_ADMIN))
@@ -275,19 +254,16 @@ static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
275 case BRCTL_SET_PORT_PRIORITY: 254 case BRCTL_SET_PORT_PRIORITY:
276 { 255 {
277 struct net_bridge_port *p; 256 struct net_bridge_port *p;
278 int ret = 0; 257 int ret;
279 258
280 if (!capable(CAP_NET_ADMIN)) 259 if (!capable(CAP_NET_ADMIN))
281 return -EPERM; 260 return -EPERM;
282 261
283 if (args[2] >= (1<<(16-BR_PORT_BITS)))
284 return -ERANGE;
285
286 spin_lock_bh(&br->lock); 262 spin_lock_bh(&br->lock);
287 if ((p = br_get_port(br, args[1])) == NULL) 263 if ((p = br_get_port(br, args[1])) == NULL)
288 ret = -EINVAL; 264 ret = -EINVAL;
289 else 265 else
290 br_stp_set_port_priority(p, args[2]); 266 ret = br_stp_set_port_priority(p, args[2]);
291 spin_unlock_bh(&br->lock); 267 spin_unlock_bh(&br->lock);
292 return ret; 268 return ret;
293 } 269 }
@@ -295,15 +271,17 @@ static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
295 case BRCTL_SET_PATH_COST: 271 case BRCTL_SET_PATH_COST:
296 { 272 {
297 struct net_bridge_port *p; 273 struct net_bridge_port *p;
298 int ret = 0; 274 int ret;
299 275
300 if (!capable(CAP_NET_ADMIN)) 276 if (!capable(CAP_NET_ADMIN))
301 return -EPERM; 277 return -EPERM;
302 278
279 spin_lock_bh(&br->lock);
303 if ((p = br_get_port(br, args[1])) == NULL) 280 if ((p = br_get_port(br, args[1])) == NULL)
304 ret = -EINVAL; 281 ret = -EINVAL;
305 else 282 else
306 br_stp_set_path_cost(p, args[2]); 283 ret = br_stp_set_path_cost(p, args[2]);
284 spin_unlock_bh(&br->lock);
307 285
308 return ret; 286 return ret;
309 } 287 }
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 59660c909a7c..2f14eafdeeab 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -413,7 +413,7 @@ out:
413 413
414#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 414#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
415static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br, 415static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
416 struct in6_addr *group) 416 const struct in6_addr *group)
417{ 417{
418 struct sk_buff *skb; 418 struct sk_buff *skb;
419 struct ipv6hdr *ip6h; 419 struct ipv6hdr *ip6h;
@@ -1115,7 +1115,7 @@ static int br_ip4_multicast_query(struct net_bridge *br,
1115 struct net_bridge_port *port, 1115 struct net_bridge_port *port,
1116 struct sk_buff *skb) 1116 struct sk_buff *skb)
1117{ 1117{
1118 struct iphdr *iph = ip_hdr(skb); 1118 const struct iphdr *iph = ip_hdr(skb);
1119 struct igmphdr *ih = igmp_hdr(skb); 1119 struct igmphdr *ih = igmp_hdr(skb);
1120 struct net_bridge_mdb_entry *mp; 1120 struct net_bridge_mdb_entry *mp;
1121 struct igmpv3_query *ih3; 1121 struct igmpv3_query *ih3;
@@ -1190,7 +1190,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
1190 struct net_bridge_port *port, 1190 struct net_bridge_port *port,
1191 struct sk_buff *skb) 1191 struct sk_buff *skb)
1192{ 1192{
1193 struct ipv6hdr *ip6h = ipv6_hdr(skb); 1193 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
1194 struct mld_msg *mld = (struct mld_msg *) icmp6_hdr(skb); 1194 struct mld_msg *mld = (struct mld_msg *) icmp6_hdr(skb);
1195 struct net_bridge_mdb_entry *mp; 1195 struct net_bridge_mdb_entry *mp;
1196 struct mld2_query *mld2q; 1196 struct mld2_query *mld2q;
@@ -1198,7 +1198,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
1198 struct net_bridge_port_group __rcu **pp; 1198 struct net_bridge_port_group __rcu **pp;
1199 unsigned long max_delay; 1199 unsigned long max_delay;
1200 unsigned long now = jiffies; 1200 unsigned long now = jiffies;
1201 struct in6_addr *group = NULL; 1201 const struct in6_addr *group = NULL;
1202 int err = 0; 1202 int err = 0;
1203 1203
1204 spin_lock(&br->multicast_lock); 1204 spin_lock(&br->multicast_lock);
@@ -1356,7 +1356,7 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
1356 struct sk_buff *skb) 1356 struct sk_buff *skb)
1357{ 1357{
1358 struct sk_buff *skb2 = skb; 1358 struct sk_buff *skb2 = skb;
1359 struct iphdr *iph; 1359 const struct iphdr *iph;
1360 struct igmphdr *ih; 1360 struct igmphdr *ih;
1361 unsigned len; 1361 unsigned len;
1362 unsigned offset; 1362 unsigned offset;
@@ -1452,7 +1452,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
1452 struct sk_buff *skb) 1452 struct sk_buff *skb)
1453{ 1453{
1454 struct sk_buff *skb2; 1454 struct sk_buff *skb2;
1455 struct ipv6hdr *ip6h; 1455 const struct ipv6hdr *ip6h;
1456 struct icmp6hdr *icmp6h; 1456 struct icmp6hdr *icmp6h;
1457 u8 nexthdr; 1457 u8 nexthdr;
1458 unsigned len; 1458 unsigned len;
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index 74ef4d4846a4..e1f5ec75e91c 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -219,7 +219,7 @@ static inline void nf_bridge_update_protocol(struct sk_buff *skb)
219static int br_parse_ip_options(struct sk_buff *skb) 219static int br_parse_ip_options(struct sk_buff *skb)
220{ 220{
221 struct ip_options *opt; 221 struct ip_options *opt;
222 struct iphdr *iph; 222 const struct iphdr *iph;
223 struct net_device *dev = skb->dev; 223 struct net_device *dev = skb->dev;
224 u32 len; 224 u32 len;
225 225
@@ -554,7 +554,7 @@ static unsigned int br_nf_pre_routing_ipv6(unsigned int hook,
554 const struct net_device *out, 554 const struct net_device *out,
555 int (*okfn)(struct sk_buff *)) 555 int (*okfn)(struct sk_buff *))
556{ 556{
557 struct ipv6hdr *hdr; 557 const struct ipv6hdr *hdr;
558 u32 pkt_len; 558 u32 pkt_len;
559 559
560 if (skb->len < sizeof(struct ipv6hdr)) 560 if (skb->len < sizeof(struct ipv6hdr))
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index f8bf4c7f842c..ffb0dc4cc0e8 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -12,9 +12,11 @@
12 12
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/slab.h> 14#include <linux/slab.h>
15#include <linux/etherdevice.h>
15#include <net/rtnetlink.h> 16#include <net/rtnetlink.h>
16#include <net/net_namespace.h> 17#include <net/net_namespace.h>
17#include <net/sock.h> 18#include <net/sock.h>
19
18#include "br_private.h" 20#include "br_private.h"
19 21
20static inline size_t br_nlmsg_size(void) 22static inline size_t br_nlmsg_size(void)
@@ -118,8 +120,9 @@ static int br_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
118 int idx; 120 int idx;
119 121
120 idx = 0; 122 idx = 0;
121 for_each_netdev(net, dev) { 123 rcu_read_lock();
122 struct net_bridge_port *port = br_port_get_rtnl(dev); 124 for_each_netdev_rcu(net, dev) {
125 struct net_bridge_port *port = br_port_get_rcu(dev);
123 126
124 /* not a bridge port */ 127 /* not a bridge port */
125 if (!port || idx < cb->args[0]) 128 if (!port || idx < cb->args[0])
@@ -133,7 +136,7 @@ static int br_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
133skip: 136skip:
134 ++idx; 137 ++idx;
135 } 138 }
136 139 rcu_read_unlock();
137 cb->args[0] = idx; 140 cb->args[0] = idx;
138 141
139 return skb->len; 142 return skb->len;
@@ -188,20 +191,61 @@ static int br_rtm_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
188 return 0; 191 return 0;
189} 192}
190 193
194static int br_validate(struct nlattr *tb[], struct nlattr *data[])
195{
196 if (tb[IFLA_ADDRESS]) {
197 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
198 return -EINVAL;
199 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
200 return -EADDRNOTAVAIL;
201 }
202
203 return 0;
204}
205
206static struct rtnl_link_ops br_link_ops __read_mostly = {
207 .kind = "bridge",
208 .priv_size = sizeof(struct net_bridge),
209 .setup = br_dev_setup,
210 .validate = br_validate,
211};
191 212
192int __init br_netlink_init(void) 213int __init br_netlink_init(void)
193{ 214{
194 if (__rtnl_register(PF_BRIDGE, RTM_GETLINK, NULL, br_dump_ifinfo)) 215 int err;
195 return -ENOBUFS;
196 216
197 /* Only the first call to __rtnl_register can fail */ 217 err = rtnl_link_register(&br_link_ops);
198 __rtnl_register(PF_BRIDGE, RTM_SETLINK, br_rtm_setlink, NULL); 218 if (err < 0)
219 goto err1;
220
221 err = __rtnl_register(PF_BRIDGE, RTM_GETLINK, NULL, br_dump_ifinfo);
222 if (err)
223 goto err2;
224 err = __rtnl_register(PF_BRIDGE, RTM_SETLINK, br_rtm_setlink, NULL);
225 if (err)
226 goto err3;
227 err = __rtnl_register(PF_BRIDGE, RTM_NEWNEIGH, br_fdb_add, NULL);
228 if (err)
229 goto err3;
230 err = __rtnl_register(PF_BRIDGE, RTM_DELNEIGH, br_fdb_delete, NULL);
231 if (err)
232 goto err3;
233 err = __rtnl_register(PF_BRIDGE, RTM_GETNEIGH, NULL, br_fdb_dump);
234 if (err)
235 goto err3;
199 236
200 return 0; 237 return 0;
238
239err3:
240 rtnl_unregister_all(PF_BRIDGE);
241err2:
242 rtnl_link_unregister(&br_link_ops);
243err1:
244 return err;
201} 245}
202 246
203void __exit br_netlink_fini(void) 247void __exit br_netlink_fini(void)
204{ 248{
249 rtnl_link_unregister(&br_link_ops);
205 rtnl_unregister_all(PF_BRIDGE); 250 rtnl_unregister_all(PF_BRIDGE);
206} 251}
207
diff --git a/net/bridge/br_notify.c b/net/bridge/br_notify.c
index 7d337c9b6082..6545ee9591d1 100644
--- a/net/bridge/br_notify.c
+++ b/net/bridge/br_notify.c
@@ -36,6 +36,12 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v
36 struct net_bridge *br; 36 struct net_bridge *br;
37 int err; 37 int err;
38 38
39 /* register of bridge completed, add sysfs entries */
40 if ((dev->priv_flags & IFF_EBRIDGE) && event == NETDEV_REGISTER) {
41 br_sysfs_addbr(dev);
42 return NOTIFY_DONE;
43 }
44
39 /* not a port of a bridge */ 45 /* not a port of a bridge */
40 p = br_port_get_rtnl(dev); 46 p = br_port_get_rtnl(dev);
41 if (!p) 47 if (!p)
@@ -60,10 +66,7 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v
60 break; 66 break;
61 67
62 case NETDEV_FEAT_CHANGE: 68 case NETDEV_FEAT_CHANGE:
63 spin_lock_bh(&br->lock); 69 netdev_update_features(br->dev);
64 if (netif_running(br->dev))
65 br_features_recompute(br);
66 spin_unlock_bh(&br->lock);
67 break; 70 break;
68 71
69 case NETDEV_DOWN: 72 case NETDEV_DOWN:
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 387013d33745..54578f274d85 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -64,7 +64,8 @@ struct net_bridge_fdb_entry
64 struct net_bridge_port *dst; 64 struct net_bridge_port *dst;
65 65
66 struct rcu_head rcu; 66 struct rcu_head rcu;
67 unsigned long ageing_timer; 67 unsigned long updated;
68 unsigned long used;
68 mac_addr addr; 69 mac_addr addr;
69 unsigned char is_local; 70 unsigned char is_local;
70 unsigned char is_static; 71 unsigned char is_static;
@@ -182,7 +183,6 @@ struct net_bridge
182 struct br_cpu_netstats __percpu *stats; 183 struct br_cpu_netstats __percpu *stats;
183 spinlock_t hash_lock; 184 spinlock_t hash_lock;
184 struct hlist_head hash[BR_HASH_SIZE]; 185 struct hlist_head hash[BR_HASH_SIZE];
185 u32 feature_mask;
186#ifdef CONFIG_BRIDGE_NETFILTER 186#ifdef CONFIG_BRIDGE_NETFILTER
187 struct rtable fake_rtable; 187 struct rtable fake_rtable;
188 bool nf_call_iptables; 188 bool nf_call_iptables;
@@ -353,6 +353,9 @@ extern int br_fdb_insert(struct net_bridge *br,
353extern void br_fdb_update(struct net_bridge *br, 353extern void br_fdb_update(struct net_bridge *br,
354 struct net_bridge_port *source, 354 struct net_bridge_port *source,
355 const unsigned char *addr); 355 const unsigned char *addr);
356extern int br_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb);
357extern int br_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg);
358extern int br_fdb_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg);
356 359
357/* br_forward.c */ 360/* br_forward.c */
358extern void br_deliver(const struct net_bridge_port *to, 361extern void br_deliver(const struct net_bridge_port *to,
@@ -375,7 +378,7 @@ extern int br_add_if(struct net_bridge *br,
375extern int br_del_if(struct net_bridge *br, 378extern int br_del_if(struct net_bridge *br,
376 struct net_device *dev); 379 struct net_device *dev);
377extern int br_min_mtu(const struct net_bridge *br); 380extern int br_min_mtu(const struct net_bridge *br);
378extern void br_features_recompute(struct net_bridge *br); 381extern u32 br_features_recompute(struct net_bridge *br, u32 features);
379 382
380/* br_input.c */ 383/* br_input.c */
381extern int br_handle_frame_finish(struct sk_buff *skb); 384extern int br_handle_frame_finish(struct sk_buff *skb);
@@ -491,6 +494,11 @@ extern struct net_bridge_port *br_get_port(struct net_bridge *br,
491extern void br_init_port(struct net_bridge_port *p); 494extern void br_init_port(struct net_bridge_port *p);
492extern void br_become_designated_port(struct net_bridge_port *p); 495extern void br_become_designated_port(struct net_bridge_port *p);
493 496
497extern int br_set_forward_delay(struct net_bridge *br, unsigned long x);
498extern int br_set_hello_time(struct net_bridge *br, unsigned long x);
499extern int br_set_max_age(struct net_bridge *br, unsigned long x);
500
501
494/* br_stp_if.c */ 502/* br_stp_if.c */
495extern void br_stp_enable_bridge(struct net_bridge *br); 503extern void br_stp_enable_bridge(struct net_bridge *br);
496extern void br_stp_disable_bridge(struct net_bridge *br); 504extern void br_stp_disable_bridge(struct net_bridge *br);
@@ -501,10 +509,10 @@ extern bool br_stp_recalculate_bridge_id(struct net_bridge *br);
501extern void br_stp_change_bridge_id(struct net_bridge *br, const unsigned char *a); 509extern void br_stp_change_bridge_id(struct net_bridge *br, const unsigned char *a);
502extern void br_stp_set_bridge_priority(struct net_bridge *br, 510extern void br_stp_set_bridge_priority(struct net_bridge *br,
503 u16 newprio); 511 u16 newprio);
504extern void br_stp_set_port_priority(struct net_bridge_port *p, 512extern int br_stp_set_port_priority(struct net_bridge_port *p,
505 u8 newprio); 513 unsigned long newprio);
506extern void br_stp_set_path_cost(struct net_bridge_port *p, 514extern int br_stp_set_path_cost(struct net_bridge_port *p,
507 u32 path_cost); 515 unsigned long path_cost);
508extern ssize_t br_show_bridge_id(char *buf, const struct bridge_id *id); 516extern ssize_t br_show_bridge_id(char *buf, const struct bridge_id *id);
509 517
510/* br_stp_bpdu.c */ 518/* br_stp_bpdu.c */
diff --git a/net/bridge/br_private_stp.h b/net/bridge/br_private_stp.h
index 8b650f7fbfa0..642ef47a867e 100644
--- a/net/bridge/br_private_stp.h
+++ b/net/bridge/br_private_stp.h
@@ -16,6 +16,19 @@
16#define BPDU_TYPE_CONFIG 0 16#define BPDU_TYPE_CONFIG 0
17#define BPDU_TYPE_TCN 0x80 17#define BPDU_TYPE_TCN 0x80
18 18
19/* IEEE 802.1D-1998 timer values */
20#define BR_MIN_HELLO_TIME (1*HZ)
21#define BR_MAX_HELLO_TIME (10*HZ)
22
23#define BR_MIN_FORWARD_DELAY (2*HZ)
24#define BR_MAX_FORWARD_DELAY (30*HZ)
25
26#define BR_MIN_MAX_AGE (6*HZ)
27#define BR_MAX_MAX_AGE (40*HZ)
28
29#define BR_MIN_PATH_COST 1
30#define BR_MAX_PATH_COST 65535
31
19struct br_config_bpdu 32struct br_config_bpdu
20{ 33{
21 unsigned topology_change:1; 34 unsigned topology_change:1;
diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c
index 7370d14f634d..bb4383e84de9 100644
--- a/net/bridge/br_stp.c
+++ b/net/bridge/br_stp.c
@@ -484,3 +484,51 @@ void br_received_tcn_bpdu(struct net_bridge_port *p)
484 br_topology_change_acknowledge(p); 484 br_topology_change_acknowledge(p);
485 } 485 }
486} 486}
487
488/* Change bridge STP parameter */
489int br_set_hello_time(struct net_bridge *br, unsigned long val)
490{
491 unsigned long t = clock_t_to_jiffies(val);
492
493 if (t < BR_MIN_HELLO_TIME || t > BR_MAX_HELLO_TIME)
494 return -ERANGE;
495
496 spin_lock_bh(&br->lock);
497 br->bridge_hello_time = t;
498 if (br_is_root_bridge(br))
499 br->hello_time = br->bridge_hello_time;
500 spin_unlock_bh(&br->lock);
501 return 0;
502}
503
504int br_set_max_age(struct net_bridge *br, unsigned long val)
505{
506 unsigned long t = clock_t_to_jiffies(val);
507
508 if (t < BR_MIN_MAX_AGE || t > BR_MAX_MAX_AGE)
509 return -ERANGE;
510
511 spin_lock_bh(&br->lock);
512 br->bridge_max_age = t;
513 if (br_is_root_bridge(br))
514 br->max_age = br->bridge_max_age;
515 spin_unlock_bh(&br->lock);
516 return 0;
517
518}
519
520int br_set_forward_delay(struct net_bridge *br, unsigned long val)
521{
522 unsigned long t = clock_t_to_jiffies(val);
523
524 if (br->stp_enabled != BR_NO_STP &&
525 (t < BR_MIN_FORWARD_DELAY || t > BR_MAX_FORWARD_DELAY))
526 return -ERANGE;
527
528 spin_lock_bh(&br->lock);
529 br->bridge_forward_delay = t;
530 if (br_is_root_bridge(br))
531 br->forward_delay = br->bridge_forward_delay;
532 spin_unlock_bh(&br->lock);
533 return 0;
534}
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index 9b61d09de9b9..6f615b8192f4 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -20,7 +20,7 @@
20 20
21 21
22/* Port id is composed of priority and port number. 22/* Port id is composed of priority and port number.
23 * NB: least significant bits of priority are dropped to 23 * NB: some bits of priority are dropped to
24 * make room for more ports. 24 * make room for more ports.
25 */ 25 */
26static inline port_id br_make_port_id(__u8 priority, __u16 port_no) 26static inline port_id br_make_port_id(__u8 priority, __u16 port_no)
@@ -29,6 +29,8 @@ static inline port_id br_make_port_id(__u8 priority, __u16 port_no)
29 | (port_no & ((1<<BR_PORT_BITS)-1)); 29 | (port_no & ((1<<BR_PORT_BITS)-1));
30} 30}
31 31
32#define BR_MAX_PORT_PRIORITY ((u16)~0 >> BR_PORT_BITS)
33
32/* called under bridge lock */ 34/* called under bridge lock */
33void br_init_port(struct net_bridge_port *p) 35void br_init_port(struct net_bridge_port *p)
34{ 36{
@@ -255,10 +257,14 @@ void br_stp_set_bridge_priority(struct net_bridge *br, u16 newprio)
255} 257}
256 258
257/* called under bridge lock */ 259/* called under bridge lock */
258void br_stp_set_port_priority(struct net_bridge_port *p, u8 newprio) 260int br_stp_set_port_priority(struct net_bridge_port *p, unsigned long newprio)
259{ 261{
260 port_id new_port_id = br_make_port_id(newprio, p->port_no); 262 port_id new_port_id;
263
264 if (newprio > BR_MAX_PORT_PRIORITY)
265 return -ERANGE;
261 266
267 new_port_id = br_make_port_id(newprio, p->port_no);
262 if (br_is_designated_port(p)) 268 if (br_is_designated_port(p))
263 p->designated_port = new_port_id; 269 p->designated_port = new_port_id;
264 270
@@ -269,14 +275,21 @@ void br_stp_set_port_priority(struct net_bridge_port *p, u8 newprio)
269 br_become_designated_port(p); 275 br_become_designated_port(p);
270 br_port_state_selection(p->br); 276 br_port_state_selection(p->br);
271 } 277 }
278
279 return 0;
272} 280}
273 281
274/* called under bridge lock */ 282/* called under bridge lock */
275void br_stp_set_path_cost(struct net_bridge_port *p, u32 path_cost) 283int br_stp_set_path_cost(struct net_bridge_port *p, unsigned long path_cost)
276{ 284{
285 if (path_cost < BR_MIN_PATH_COST ||
286 path_cost > BR_MAX_PATH_COST)
287 return -ERANGE;
288
277 p->path_cost = path_cost; 289 p->path_cost = path_cost;
278 br_configuration_update(p->br); 290 br_configuration_update(p->br);
279 br_port_state_selection(p->br); 291 br_port_state_selection(p->br);
292 return 0;
280} 293}
281 294
282ssize_t br_show_bridge_id(char *buf, const struct bridge_id *id) 295ssize_t br_show_bridge_id(char *buf, const struct bridge_id *id)
diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c
index 5c1e5559ebba..68b893ea8c3a 100644
--- a/net/bridge/br_sysfs_br.c
+++ b/net/bridge/br_sysfs_br.c
@@ -43,9 +43,7 @@ static ssize_t store_bridge_parm(struct device *d,
43 if (endp == buf) 43 if (endp == buf)
44 return -EINVAL; 44 return -EINVAL;
45 45
46 spin_lock_bh(&br->lock);
47 err = (*set)(br, val); 46 err = (*set)(br, val);
48 spin_unlock_bh(&br->lock);
49 return err ? err : len; 47 return err ? err : len;
50} 48}
51 49
@@ -57,20 +55,11 @@ static ssize_t show_forward_delay(struct device *d,
57 return sprintf(buf, "%lu\n", jiffies_to_clock_t(br->forward_delay)); 55 return sprintf(buf, "%lu\n", jiffies_to_clock_t(br->forward_delay));
58} 56}
59 57
60static int set_forward_delay(struct net_bridge *br, unsigned long val)
61{
62 unsigned long delay = clock_t_to_jiffies(val);
63 br->forward_delay = delay;
64 if (br_is_root_bridge(br))
65 br->bridge_forward_delay = delay;
66 return 0;
67}
68
69static ssize_t store_forward_delay(struct device *d, 58static ssize_t store_forward_delay(struct device *d,
70 struct device_attribute *attr, 59 struct device_attribute *attr,
71 const char *buf, size_t len) 60 const char *buf, size_t len)
72{ 61{
73 return store_bridge_parm(d, buf, len, set_forward_delay); 62 return store_bridge_parm(d, buf, len, br_set_forward_delay);
74} 63}
75static DEVICE_ATTR(forward_delay, S_IRUGO | S_IWUSR, 64static DEVICE_ATTR(forward_delay, S_IRUGO | S_IWUSR,
76 show_forward_delay, store_forward_delay); 65 show_forward_delay, store_forward_delay);
@@ -82,24 +71,11 @@ static ssize_t show_hello_time(struct device *d, struct device_attribute *attr,
82 jiffies_to_clock_t(to_bridge(d)->hello_time)); 71 jiffies_to_clock_t(to_bridge(d)->hello_time));
83} 72}
84 73
85static int set_hello_time(struct net_bridge *br, unsigned long val)
86{
87 unsigned long t = clock_t_to_jiffies(val);
88
89 if (t < HZ)
90 return -EINVAL;
91
92 br->hello_time = t;
93 if (br_is_root_bridge(br))
94 br->bridge_hello_time = t;
95 return 0;
96}
97
98static ssize_t store_hello_time(struct device *d, 74static ssize_t store_hello_time(struct device *d,
99 struct device_attribute *attr, const char *buf, 75 struct device_attribute *attr, const char *buf,
100 size_t len) 76 size_t len)
101{ 77{
102 return store_bridge_parm(d, buf, len, set_hello_time); 78 return store_bridge_parm(d, buf, len, br_set_hello_time);
103} 79}
104static DEVICE_ATTR(hello_time, S_IRUGO | S_IWUSR, show_hello_time, 80static DEVICE_ATTR(hello_time, S_IRUGO | S_IWUSR, show_hello_time,
105 store_hello_time); 81 store_hello_time);
@@ -111,19 +87,10 @@ static ssize_t show_max_age(struct device *d, struct device_attribute *attr,
111 jiffies_to_clock_t(to_bridge(d)->max_age)); 87 jiffies_to_clock_t(to_bridge(d)->max_age));
112} 88}
113 89
114static int set_max_age(struct net_bridge *br, unsigned long val)
115{
116 unsigned long t = clock_t_to_jiffies(val);
117 br->max_age = t;
118 if (br_is_root_bridge(br))
119 br->bridge_max_age = t;
120 return 0;
121}
122
123static ssize_t store_max_age(struct device *d, struct device_attribute *attr, 90static ssize_t store_max_age(struct device *d, struct device_attribute *attr,
124 const char *buf, size_t len) 91 const char *buf, size_t len)
125{ 92{
126 return store_bridge_parm(d, buf, len, set_max_age); 93 return store_bridge_parm(d, buf, len, br_set_max_age);
127} 94}
128static DEVICE_ATTR(max_age, S_IRUGO | S_IWUSR, show_max_age, store_max_age); 95static DEVICE_ATTR(max_age, S_IRUGO | S_IWUSR, show_max_age, store_max_age);
129 96
diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c
index fd5799c9bc8d..6229b62749e8 100644
--- a/net/bridge/br_sysfs_if.c
+++ b/net/bridge/br_sysfs_if.c
@@ -23,7 +23,7 @@
23struct brport_attribute { 23struct brport_attribute {
24 struct attribute attr; 24 struct attribute attr;
25 ssize_t (*show)(struct net_bridge_port *, char *); 25 ssize_t (*show)(struct net_bridge_port *, char *);
26 ssize_t (*store)(struct net_bridge_port *, unsigned long); 26 int (*store)(struct net_bridge_port *, unsigned long);
27}; 27};
28 28
29#define BRPORT_ATTR(_name,_mode,_show,_store) \ 29#define BRPORT_ATTR(_name,_mode,_show,_store) \
@@ -38,27 +38,17 @@ static ssize_t show_path_cost(struct net_bridge_port *p, char *buf)
38{ 38{
39 return sprintf(buf, "%d\n", p->path_cost); 39 return sprintf(buf, "%d\n", p->path_cost);
40} 40}
41static ssize_t store_path_cost(struct net_bridge_port *p, unsigned long v) 41
42{
43 br_stp_set_path_cost(p, v);
44 return 0;
45}
46static BRPORT_ATTR(path_cost, S_IRUGO | S_IWUSR, 42static BRPORT_ATTR(path_cost, S_IRUGO | S_IWUSR,
47 show_path_cost, store_path_cost); 43 show_path_cost, br_stp_set_path_cost);
48 44
49static ssize_t show_priority(struct net_bridge_port *p, char *buf) 45static ssize_t show_priority(struct net_bridge_port *p, char *buf)
50{ 46{
51 return sprintf(buf, "%d\n", p->priority); 47 return sprintf(buf, "%d\n", p->priority);
52} 48}
53static ssize_t store_priority(struct net_bridge_port *p, unsigned long v) 49
54{
55 if (v >= (1<<(16-BR_PORT_BITS)))
56 return -ERANGE;
57 br_stp_set_port_priority(p, v);
58 return 0;
59}
60static BRPORT_ATTR(priority, S_IRUGO | S_IWUSR, 50static BRPORT_ATTR(priority, S_IRUGO | S_IWUSR,
61 show_priority, store_priority); 51 show_priority, br_stp_set_port_priority);
62 52
63static ssize_t show_designated_root(struct net_bridge_port *p, char *buf) 53static ssize_t show_designated_root(struct net_bridge_port *p, char *buf)
64{ 54{
@@ -136,7 +126,7 @@ static ssize_t show_hold_timer(struct net_bridge_port *p,
136} 126}
137static BRPORT_ATTR(hold_timer, S_IRUGO, show_hold_timer, NULL); 127static BRPORT_ATTR(hold_timer, S_IRUGO, show_hold_timer, NULL);
138 128
139static ssize_t store_flush(struct net_bridge_port *p, unsigned long v) 129static int store_flush(struct net_bridge_port *p, unsigned long v)
140{ 130{
141 br_fdb_delete_by_port(p->br, p, 0); // Don't delete local entry 131 br_fdb_delete_by_port(p->br, p, 0); // Don't delete local entry
142 return 0; 132 return 0;
@@ -148,7 +138,7 @@ static ssize_t show_hairpin_mode(struct net_bridge_port *p, char *buf)
148 int hairpin_mode = (p->flags & BR_HAIRPIN_MODE) ? 1 : 0; 138 int hairpin_mode = (p->flags & BR_HAIRPIN_MODE) ? 1 : 0;
149 return sprintf(buf, "%d\n", hairpin_mode); 139 return sprintf(buf, "%d\n", hairpin_mode);
150} 140}
151static ssize_t store_hairpin_mode(struct net_bridge_port *p, unsigned long v) 141static int store_hairpin_mode(struct net_bridge_port *p, unsigned long v)
152{ 142{
153 if (v) 143 if (v)
154 p->flags |= BR_HAIRPIN_MODE; 144 p->flags |= BR_HAIRPIN_MODE;
@@ -165,7 +155,7 @@ static ssize_t show_multicast_router(struct net_bridge_port *p, char *buf)
165 return sprintf(buf, "%d\n", p->multicast_router); 155 return sprintf(buf, "%d\n", p->multicast_router);
166} 156}
167 157
168static ssize_t store_multicast_router(struct net_bridge_port *p, 158static int store_multicast_router(struct net_bridge_port *p,
169 unsigned long v) 159 unsigned long v)
170{ 160{
171 return br_multicast_set_port_router(p, v); 161 return br_multicast_set_port_router(p, v);
diff --git a/net/caif/Makefile b/net/caif/Makefile
index 9d38e406e4a4..ebcd4e7e6f47 100644
--- a/net/caif/Makefile
+++ b/net/caif/Makefile
@@ -5,7 +5,7 @@ caif-y := caif_dev.o \
5 cffrml.o cfveil.o cfdbgl.o\ 5 cffrml.o cfveil.o cfdbgl.o\
6 cfserl.o cfdgml.o \ 6 cfserl.o cfdgml.o \
7 cfrfml.o cfvidl.o cfutill.o \ 7 cfrfml.o cfvidl.o cfutill.o \
8 cfsrvl.o cfpkt_skbuff.o caif_config_util.o 8 cfsrvl.o cfpkt_skbuff.o
9 9
10obj-$(CONFIG_CAIF) += caif.o 10obj-$(CONFIG_CAIF) += caif.o
11obj-$(CONFIG_CAIF_NETDEV) += chnl_net.o 11obj-$(CONFIG_CAIF_NETDEV) += chnl_net.o
diff --git a/net/caif/caif_config_util.c b/net/caif/caif_config_util.c
deleted file mode 100644
index d522d8c1703e..000000000000
--- a/net/caif/caif_config_util.c
+++ /dev/null
@@ -1,99 +0,0 @@
1/*
2 * Copyright (C) ST-Ericsson AB 2010
3 * Author: Sjur Brendeland sjur.brandeland@stericsson.com
4 * License terms: GNU General Public License (GPL) version 2
5 */
6
7#include <linux/module.h>
8#include <linux/spinlock.h>
9#include <net/caif/cfctrl.h>
10#include <net/caif/cfcnfg.h>
11#include <net/caif/caif_dev.h>
12
13int connect_req_to_link_param(struct cfcnfg *cnfg,
14 struct caif_connect_request *s,
15 struct cfctrl_link_param *l)
16{
17 struct dev_info *dev_info;
18 enum cfcnfg_phy_preference pref;
19 int res;
20
21 memset(l, 0, sizeof(*l));
22 /* In caif protocol low value is high priority */
23 l->priority = CAIF_PRIO_MAX - s->priority + 1;
24
25 if (s->ifindex != 0){
26 res = cfcnfg_get_id_from_ifi(cnfg, s->ifindex);
27 if (res < 0)
28 return res;
29 l->phyid = res;
30 }
31 else {
32 switch (s->link_selector) {
33 case CAIF_LINK_HIGH_BANDW:
34 pref = CFPHYPREF_HIGH_BW;
35 break;
36 case CAIF_LINK_LOW_LATENCY:
37 pref = CFPHYPREF_LOW_LAT;
38 break;
39 default:
40 return -EINVAL;
41 }
42 dev_info = cfcnfg_get_phyid(cnfg, pref);
43 if (dev_info == NULL)
44 return -ENODEV;
45 l->phyid = dev_info->id;
46 }
47 switch (s->protocol) {
48 case CAIFPROTO_AT:
49 l->linktype = CFCTRL_SRV_VEI;
50 if (s->sockaddr.u.at.type == CAIF_ATTYPE_PLAIN)
51 l->chtype = 0x02;
52 else
53 l->chtype = s->sockaddr.u.at.type;
54 l->endpoint = 0x00;
55 break;
56 case CAIFPROTO_DATAGRAM:
57 l->linktype = CFCTRL_SRV_DATAGRAM;
58 l->chtype = 0x00;
59 l->u.datagram.connid = s->sockaddr.u.dgm.connection_id;
60 break;
61 case CAIFPROTO_DATAGRAM_LOOP:
62 l->linktype = CFCTRL_SRV_DATAGRAM;
63 l->chtype = 0x03;
64 l->endpoint = 0x00;
65 l->u.datagram.connid = s->sockaddr.u.dgm.connection_id;
66 break;
67 case CAIFPROTO_RFM:
68 l->linktype = CFCTRL_SRV_RFM;
69 l->u.datagram.connid = s->sockaddr.u.rfm.connection_id;
70 strncpy(l->u.rfm.volume, s->sockaddr.u.rfm.volume,
71 sizeof(l->u.rfm.volume)-1);
72 l->u.rfm.volume[sizeof(l->u.rfm.volume)-1] = 0;
73 break;
74 case CAIFPROTO_UTIL:
75 l->linktype = CFCTRL_SRV_UTIL;
76 l->endpoint = 0x00;
77 l->chtype = 0x00;
78 strncpy(l->u.utility.name, s->sockaddr.u.util.service,
79 sizeof(l->u.utility.name)-1);
80 l->u.utility.name[sizeof(l->u.utility.name)-1] = 0;
81 caif_assert(sizeof(l->u.utility.name) > 10);
82 l->u.utility.paramlen = s->param.size;
83 if (l->u.utility.paramlen > sizeof(l->u.utility.params))
84 l->u.utility.paramlen = sizeof(l->u.utility.params);
85
86 memcpy(l->u.utility.params, s->param.data,
87 l->u.utility.paramlen);
88
89 break;
90 case CAIFPROTO_DEBUG:
91 l->linktype = CFCTRL_SRV_DBG;
92 l->endpoint = s->sockaddr.u.dbg.service;
93 l->chtype = s->sockaddr.u.dbg.type;
94 break;
95 default:
96 return -EINVAL;
97 }
98 return 0;
99}
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
index a42a408306e4..366ca0fb7a29 100644
--- a/net/caif/caif_dev.c
+++ b/net/caif/caif_dev.c
@@ -12,49 +12,51 @@
12#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__ 12#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
13 13
14#include <linux/version.h> 14#include <linux/version.h>
15#include <linux/module.h>
16#include <linux/kernel.h> 15#include <linux/kernel.h>
17#include <linux/if_arp.h> 16#include <linux/if_arp.h>
18#include <linux/net.h> 17#include <linux/net.h>
19#include <linux/netdevice.h> 18#include <linux/netdevice.h>
20#include <linux/skbuff.h> 19#include <linux/mutex.h>
21#include <linux/sched.h>
22#include <linux/wait.h>
23#include <net/netns/generic.h> 20#include <net/netns/generic.h>
24#include <net/net_namespace.h> 21#include <net/net_namespace.h>
25#include <net/pkt_sched.h> 22#include <net/pkt_sched.h>
26#include <net/caif/caif_device.h> 23#include <net/caif/caif_device.h>
27#include <net/caif/caif_dev.h>
28#include <net/caif/caif_layer.h> 24#include <net/caif/caif_layer.h>
29#include <net/caif/cfpkt.h> 25#include <net/caif/cfpkt.h>
30#include <net/caif/cfcnfg.h> 26#include <net/caif/cfcnfg.h>
31 27
32MODULE_LICENSE("GPL"); 28MODULE_LICENSE("GPL");
33#define TIMEOUT (HZ*5)
34 29
35/* Used for local tracking of the CAIF net devices */ 30/* Used for local tracking of the CAIF net devices */
36struct caif_device_entry { 31struct caif_device_entry {
37 struct cflayer layer; 32 struct cflayer layer;
38 struct list_head list; 33 struct list_head list;
39 atomic_t in_use;
40 atomic_t state;
41 u16 phyid;
42 struct net_device *netdev; 34 struct net_device *netdev;
43 wait_queue_head_t event; 35 int __percpu *pcpu_refcnt;
44}; 36};
45 37
46struct caif_device_entry_list { 38struct caif_device_entry_list {
47 struct list_head list; 39 struct list_head list;
48 /* Protects simulanous deletes in list */ 40 /* Protects simulanous deletes in list */
49 spinlock_t lock; 41 struct mutex lock;
50}; 42};
51 43
52struct caif_net { 44struct caif_net {
45 struct cfcnfg *cfg;
53 struct caif_device_entry_list caifdevs; 46 struct caif_device_entry_list caifdevs;
54}; 47};
55 48
56static int caif_net_id; 49static int caif_net_id;
57static struct cfcnfg *cfg; 50
51struct cfcnfg *get_cfcnfg(struct net *net)
52{
53 struct caif_net *caifn;
54 BUG_ON(!net);
55 caifn = net_generic(net, caif_net_id);
56 BUG_ON(!caifn);
57 return caifn->cfg;
58}
59EXPORT_SYMBOL(get_cfcnfg);
58 60
59static struct caif_device_entry_list *caif_device_list(struct net *net) 61static struct caif_device_entry_list *caif_device_list(struct net *net)
60{ 62{
@@ -65,19 +67,39 @@ static struct caif_device_entry_list *caif_device_list(struct net *net)
65 return &caifn->caifdevs; 67 return &caifn->caifdevs;
66} 68}
67 69
70static void caifd_put(struct caif_device_entry *e)
71{
72 irqsafe_cpu_dec(*e->pcpu_refcnt);
73}
74
75static void caifd_hold(struct caif_device_entry *e)
76{
77 irqsafe_cpu_inc(*e->pcpu_refcnt);
78}
79
80static int caifd_refcnt_read(struct caif_device_entry *e)
81{
82 int i, refcnt = 0;
83 for_each_possible_cpu(i)
84 refcnt += *per_cpu_ptr(e->pcpu_refcnt, i);
85 return refcnt;
86}
87
68/* Allocate new CAIF device. */ 88/* Allocate new CAIF device. */
69static struct caif_device_entry *caif_device_alloc(struct net_device *dev) 89static struct caif_device_entry *caif_device_alloc(struct net_device *dev)
70{ 90{
71 struct caif_device_entry_list *caifdevs; 91 struct caif_device_entry_list *caifdevs;
72 struct caif_device_entry *caifd; 92 struct caif_device_entry *caifd;
93
73 caifdevs = caif_device_list(dev_net(dev)); 94 caifdevs = caif_device_list(dev_net(dev));
74 BUG_ON(!caifdevs); 95 BUG_ON(!caifdevs);
96
75 caifd = kzalloc(sizeof(*caifd), GFP_ATOMIC); 97 caifd = kzalloc(sizeof(*caifd), GFP_ATOMIC);
76 if (!caifd) 98 if (!caifd)
77 return NULL; 99 return NULL;
100 caifd->pcpu_refcnt = alloc_percpu(int);
78 caifd->netdev = dev; 101 caifd->netdev = dev;
79 list_add(&caifd->list, &caifdevs->list); 102 dev_hold(dev);
80 init_waitqueue_head(&caifd->event);
81 return caifd; 103 return caifd;
82} 104}
83 105
@@ -87,98 +109,60 @@ static struct caif_device_entry *caif_get(struct net_device *dev)
87 caif_device_list(dev_net(dev)); 109 caif_device_list(dev_net(dev));
88 struct caif_device_entry *caifd; 110 struct caif_device_entry *caifd;
89 BUG_ON(!caifdevs); 111 BUG_ON(!caifdevs);
90 list_for_each_entry(caifd, &caifdevs->list, list) { 112 list_for_each_entry_rcu(caifd, &caifdevs->list, list) {
91 if (caifd->netdev == dev) 113 if (caifd->netdev == dev)
92 return caifd; 114 return caifd;
93 } 115 }
94 return NULL; 116 return NULL;
95} 117}
96 118
97static void caif_device_destroy(struct net_device *dev)
98{
99 struct caif_device_entry_list *caifdevs =
100 caif_device_list(dev_net(dev));
101 struct caif_device_entry *caifd;
102 ASSERT_RTNL();
103 if (dev->type != ARPHRD_CAIF)
104 return;
105
106 spin_lock_bh(&caifdevs->lock);
107 caifd = caif_get(dev);
108 if (caifd == NULL) {
109 spin_unlock_bh(&caifdevs->lock);
110 return;
111 }
112
113 list_del(&caifd->list);
114 spin_unlock_bh(&caifdevs->lock);
115
116 kfree(caifd);
117}
118
119static int transmit(struct cflayer *layer, struct cfpkt *pkt) 119static int transmit(struct cflayer *layer, struct cfpkt *pkt)
120{ 120{
121 int err;
121 struct caif_device_entry *caifd = 122 struct caif_device_entry *caifd =
122 container_of(layer, struct caif_device_entry, layer); 123 container_of(layer, struct caif_device_entry, layer);
123 struct sk_buff *skb, *skb2; 124 struct sk_buff *skb;
124 int ret = -EINVAL; 125
125 skb = cfpkt_tonative(pkt); 126 skb = cfpkt_tonative(pkt);
126 skb->dev = caifd->netdev; 127 skb->dev = caifd->netdev;
127 /*
128 * Don't allow SKB to be destroyed upon error, but signal resend
129 * notification to clients. We can't rely on the return value as
130 * congestion (NET_XMIT_CN) sometimes drops the packet, sometimes don't.
131 */
132 if (netif_queue_stopped(caifd->netdev))
133 return -EAGAIN;
134 skb2 = skb_get(skb);
135
136 ret = dev_queue_xmit(skb2);
137
138 if (!ret)
139 kfree_skb(skb);
140 else
141 return -EAGAIN;
142 128
143 return 0; 129 err = dev_queue_xmit(skb);
144} 130 if (err > 0)
131 err = -EIO;
145 132
146static int modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl) 133 return err;
147{
148 struct caif_device_entry *caifd;
149 struct caif_dev_common *caifdev;
150 caifd = container_of(layr, struct caif_device_entry, layer);
151 caifdev = netdev_priv(caifd->netdev);
152 if (ctrl == _CAIF_MODEMCMD_PHYIF_USEFULL) {
153 atomic_set(&caifd->in_use, 1);
154 wake_up_interruptible(&caifd->event);
155
156 } else if (ctrl == _CAIF_MODEMCMD_PHYIF_USELESS) {
157 atomic_set(&caifd->in_use, 0);
158 wake_up_interruptible(&caifd->event);
159 }
160 return 0;
161} 134}
162 135
163/* 136/*
164 * Stuff received packets to associated sockets. 137 * Stuff received packets into the CAIF stack.
165 * On error, returns non-zero and releases the skb. 138 * On error, returns non-zero and releases the skb.
166 */ 139 */
167static int receive(struct sk_buff *skb, struct net_device *dev, 140static int receive(struct sk_buff *skb, struct net_device *dev,
168 struct packet_type *pkttype, struct net_device *orig_dev) 141 struct packet_type *pkttype, struct net_device *orig_dev)
169{ 142{
170 struct net *net;
171 struct cfpkt *pkt; 143 struct cfpkt *pkt;
172 struct caif_device_entry *caifd; 144 struct caif_device_entry *caifd;
173 net = dev_net(dev); 145
174 pkt = cfpkt_fromnative(CAIF_DIR_IN, skb); 146 pkt = cfpkt_fromnative(CAIF_DIR_IN, skb);
147
148 rcu_read_lock();
175 caifd = caif_get(dev); 149 caifd = caif_get(dev);
176 if (!caifd || !caifd->layer.up || !caifd->layer.up->receive)
177 return NET_RX_DROP;
178 150
179 if (caifd->layer.up->receive(caifd->layer.up, pkt)) 151 if (!caifd || !caifd->layer.up || !caifd->layer.up->receive ||
152 !netif_oper_up(caifd->netdev)) {
153 rcu_read_unlock();
154 kfree_skb(skb);
180 return NET_RX_DROP; 155 return NET_RX_DROP;
156 }
157
158 /* Hold reference to netdevice while using CAIF stack */
159 caifd_hold(caifd);
160 rcu_read_unlock();
161
162 caifd->layer.up->receive(caifd->layer.up, pkt);
181 163
164 /* Release reference to stack upwards */
165 caifd_put(caifd);
182 return 0; 166 return 0;
183} 167}
184 168
@@ -189,15 +173,25 @@ static struct packet_type caif_packet_type __read_mostly = {
189 173
190static void dev_flowctrl(struct net_device *dev, int on) 174static void dev_flowctrl(struct net_device *dev, int on)
191{ 175{
192 struct caif_device_entry *caifd = caif_get(dev); 176 struct caif_device_entry *caifd;
193 if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) 177
178 rcu_read_lock();
179
180 caifd = caif_get(dev);
181 if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) {
182 rcu_read_unlock();
194 return; 183 return;
184 }
185
186 caifd_hold(caifd);
187 rcu_read_unlock();
195 188
196 caifd->layer.up->ctrlcmd(caifd->layer.up, 189 caifd->layer.up->ctrlcmd(caifd->layer.up,
197 on ? 190 on ?
198 _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND : 191 _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND :
199 _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND, 192 _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND,
200 caifd->layer.id); 193 caifd->layer.id);
194 caifd_put(caifd);
201} 195}
202 196
203/* notify Caif of device events */ 197/* notify Caif of device events */
@@ -208,37 +202,28 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
208 struct caif_device_entry *caifd = NULL; 202 struct caif_device_entry *caifd = NULL;
209 struct caif_dev_common *caifdev; 203 struct caif_dev_common *caifdev;
210 enum cfcnfg_phy_preference pref; 204 enum cfcnfg_phy_preference pref;
211 int res = -EINVAL;
212 enum cfcnfg_phy_type phy_type; 205 enum cfcnfg_phy_type phy_type;
206 struct cfcnfg *cfg;
207 struct caif_device_entry_list *caifdevs =
208 caif_device_list(dev_net(dev));
213 209
214 if (dev->type != ARPHRD_CAIF) 210 if (dev->type != ARPHRD_CAIF)
215 return 0; 211 return 0;
216 212
213 cfg = get_cfcnfg(dev_net(dev));
214 if (cfg == NULL)
215 return 0;
216
217 switch (what) { 217 switch (what) {
218 case NETDEV_REGISTER: 218 case NETDEV_REGISTER:
219 netdev_info(dev, "register\n");
220 caifd = caif_device_alloc(dev); 219 caifd = caif_device_alloc(dev);
221 if (caifd == NULL) 220 if (!caifd)
222 break; 221 return 0;
222
223 caifdev = netdev_priv(dev); 223 caifdev = netdev_priv(dev);
224 caifdev->flowctrl = dev_flowctrl; 224 caifdev->flowctrl = dev_flowctrl;
225 atomic_set(&caifd->state, what);
226 res = 0;
227 break;
228 225
229 case NETDEV_UP:
230 netdev_info(dev, "up\n");
231 caifd = caif_get(dev);
232 if (caifd == NULL)
233 break;
234 caifdev = netdev_priv(dev);
235 if (atomic_read(&caifd->state) == NETDEV_UP) {
236 netdev_info(dev, "already up\n");
237 break;
238 }
239 atomic_set(&caifd->state, what);
240 caifd->layer.transmit = transmit; 226 caifd->layer.transmit = transmit;
241 caifd->layer.modemcmd = modemcmd;
242 227
243 if (caifdev->use_frag) 228 if (caifdev->use_frag)
244 phy_type = CFPHYTYPE_FRAG; 229 phy_type = CFPHYTYPE_FRAG;
@@ -256,62 +241,94 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
256 pref = CFPHYPREF_HIGH_BW; 241 pref = CFPHYPREF_HIGH_BW;
257 break; 242 break;
258 } 243 }
259 dev_hold(dev); 244 strncpy(caifd->layer.name, dev->name,
260 cfcnfg_add_phy_layer(get_caif_conf(), 245 sizeof(caifd->layer.name) - 1);
246 caifd->layer.name[sizeof(caifd->layer.name) - 1] = 0;
247
248 mutex_lock(&caifdevs->lock);
249 list_add_rcu(&caifd->list, &caifdevs->list);
250
251 cfcnfg_add_phy_layer(cfg,
261 phy_type, 252 phy_type,
262 dev, 253 dev,
263 &caifd->layer, 254 &caifd->layer,
264 &caifd->phyid,
265 pref, 255 pref,
266 caifdev->use_fcs, 256 caifdev->use_fcs,
267 caifdev->use_stx); 257 caifdev->use_stx);
268 strncpy(caifd->layer.name, dev->name, 258 mutex_unlock(&caifdevs->lock);
269 sizeof(caifd->layer.name) - 1);
270 caifd->layer.name[sizeof(caifd->layer.name) - 1] = 0;
271 break; 259 break;
272 260
273 case NETDEV_GOING_DOWN: 261 case NETDEV_UP:
262 rcu_read_lock();
263
274 caifd = caif_get(dev); 264 caifd = caif_get(dev);
275 if (caifd == NULL) 265 if (caifd == NULL) {
266 rcu_read_unlock();
276 break; 267 break;
277 netdev_info(dev, "going down\n"); 268 }
278 269
279 if (atomic_read(&caifd->state) == NETDEV_GOING_DOWN || 270 cfcnfg_set_phy_state(cfg, &caifd->layer, true);
280 atomic_read(&caifd->state) == NETDEV_DOWN) 271 rcu_read_unlock();
281 break;
282 272
283 atomic_set(&caifd->state, what);
284 if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd)
285 return -EINVAL;
286 caifd->layer.up->ctrlcmd(caifd->layer.up,
287 _CAIF_CTRLCMD_PHYIF_DOWN_IND,
288 caifd->layer.id);
289 might_sleep();
290 res = wait_event_interruptible_timeout(caifd->event,
291 atomic_read(&caifd->in_use) == 0,
292 TIMEOUT);
293 break; 273 break;
294 274
295 case NETDEV_DOWN: 275 case NETDEV_DOWN:
276 rcu_read_lock();
277
296 caifd = caif_get(dev); 278 caifd = caif_get(dev);
297 if (caifd == NULL) 279 if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) {
298 break; 280 rcu_read_unlock();
299 netdev_info(dev, "down\n"); 281 return -EINVAL;
300 if (atomic_read(&caifd->in_use)) 282 }
301 netdev_warn(dev, 283
302 "Unregistering an active CAIF device\n"); 284 cfcnfg_set_phy_state(cfg, &caifd->layer, false);
303 cfcnfg_del_phy_layer(get_caif_conf(), &caifd->layer); 285 caifd_hold(caifd);
304 dev_put(dev); 286 rcu_read_unlock();
305 atomic_set(&caifd->state, what); 287
288 caifd->layer.up->ctrlcmd(caifd->layer.up,
289 _CAIF_CTRLCMD_PHYIF_DOWN_IND,
290 caifd->layer.id);
291 caifd_put(caifd);
306 break; 292 break;
307 293
308 case NETDEV_UNREGISTER: 294 case NETDEV_UNREGISTER:
295 mutex_lock(&caifdevs->lock);
296
309 caifd = caif_get(dev); 297 caifd = caif_get(dev);
310 if (caifd == NULL) 298 if (caifd == NULL) {
299 mutex_unlock(&caifdevs->lock);
300 break;
301 }
302 list_del_rcu(&caifd->list);
303
304 /*
305 * NETDEV_UNREGISTER is called repeatedly until all reference
306 * counts for the net-device are released. If references to
307 * caifd is taken, simply ignore NETDEV_UNREGISTER and wait for
308 * the next call to NETDEV_UNREGISTER.
309 *
310 * If any packets are in flight down the CAIF Stack,
311 * cfcnfg_del_phy_layer will return nonzero.
312 * If no packets are in flight, the CAIF Stack associated
313 * with the net-device un-registering is freed.
314 */
315
316 if (caifd_refcnt_read(caifd) != 0 ||
317 cfcnfg_del_phy_layer(cfg, &caifd->layer) != 0) {
318
319 pr_info("Wait for device inuse\n");
320 /* Enrole device if CAIF Stack is still in use */
321 list_add_rcu(&caifd->list, &caifdevs->list);
322 mutex_unlock(&caifdevs->lock);
311 break; 323 break;
312 netdev_info(dev, "unregister\n"); 324 }
313 atomic_set(&caifd->state, what); 325
314 caif_device_destroy(dev); 326 synchronize_rcu();
327 dev_put(caifd->netdev);
328 free_percpu(caifd->pcpu_refcnt);
329 kfree(caifd);
330
331 mutex_unlock(&caifdevs->lock);
315 break; 332 break;
316 } 333 }
317 return 0; 334 return 0;
@@ -322,61 +339,60 @@ static struct notifier_block caif_device_notifier = {
322 .priority = 0, 339 .priority = 0,
323}; 340};
324 341
325
326struct cfcnfg *get_caif_conf(void)
327{
328 return cfg;
329}
330EXPORT_SYMBOL(get_caif_conf);
331
332int caif_connect_client(struct caif_connect_request *conn_req,
333 struct cflayer *client_layer, int *ifindex,
334 int *headroom, int *tailroom)
335{
336 struct cfctrl_link_param param;
337 int ret;
338 ret = connect_req_to_link_param(get_caif_conf(), conn_req, &param);
339 if (ret)
340 return ret;
341 /* Hook up the adaptation layer. */
342 return cfcnfg_add_adaptation_layer(get_caif_conf(), &param,
343 client_layer, ifindex,
344 headroom, tailroom);
345}
346EXPORT_SYMBOL(caif_connect_client);
347
348int caif_disconnect_client(struct cflayer *adap_layer)
349{
350 return cfcnfg_disconn_adapt_layer(get_caif_conf(), adap_layer);
351}
352EXPORT_SYMBOL(caif_disconnect_client);
353
354void caif_release_client(struct cflayer *adap_layer)
355{
356 cfcnfg_release_adap_layer(adap_layer);
357}
358EXPORT_SYMBOL(caif_release_client);
359
360/* Per-namespace Caif devices handling */ 342/* Per-namespace Caif devices handling */
361static int caif_init_net(struct net *net) 343static int caif_init_net(struct net *net)
362{ 344{
363 struct caif_net *caifn = net_generic(net, caif_net_id); 345 struct caif_net *caifn = net_generic(net, caif_net_id);
346 BUG_ON(!caifn);
364 INIT_LIST_HEAD(&caifn->caifdevs.list); 347 INIT_LIST_HEAD(&caifn->caifdevs.list);
365 spin_lock_init(&caifn->caifdevs.lock); 348 mutex_init(&caifn->caifdevs.lock);
349
350 caifn->cfg = cfcnfg_create();
351 if (!caifn->cfg) {
352 pr_warn("can't create cfcnfg\n");
353 return -ENOMEM;
354 }
355
366 return 0; 356 return 0;
367} 357}
368 358
369static void caif_exit_net(struct net *net) 359static void caif_exit_net(struct net *net)
370{ 360{
371 struct net_device *dev; 361 struct caif_device_entry *caifd, *tmp;
372 int res; 362 struct caif_device_entry_list *caifdevs =
363 caif_device_list(net);
364 struct cfcnfg *cfg;
365
373 rtnl_lock(); 366 rtnl_lock();
374 for_each_netdev(net, dev) { 367 mutex_lock(&caifdevs->lock);
375 if (dev->type != ARPHRD_CAIF) 368
376 continue; 369 cfg = get_cfcnfg(net);
377 res = dev_close(dev); 370 if (cfg == NULL) {
378 caif_device_destroy(dev); 371 mutex_unlock(&caifdevs->lock);
372 return;
379 } 373 }
374
375 list_for_each_entry_safe(caifd, tmp, &caifdevs->list, list) {
376 int i = 0;
377 list_del_rcu(&caifd->list);
378 cfcnfg_set_phy_state(cfg, &caifd->layer, false);
379
380 while (i < 10 &&
381 (caifd_refcnt_read(caifd) != 0 ||
382 cfcnfg_del_phy_layer(cfg, &caifd->layer) != 0)) {
383
384 pr_info("Wait for device inuse\n");
385 msleep(250);
386 i++;
387 }
388 synchronize_rcu();
389 dev_put(caifd->netdev);
390 free_percpu(caifd->pcpu_refcnt);
391 kfree(caifd);
392 }
393 cfcnfg_remove(cfg);
394
395 mutex_unlock(&caifdevs->lock);
380 rtnl_unlock(); 396 rtnl_unlock();
381} 397}
382 398
@@ -391,32 +407,23 @@ static struct pernet_operations caif_net_ops = {
391static int __init caif_device_init(void) 407static int __init caif_device_init(void)
392{ 408{
393 int result; 409 int result;
394 cfg = cfcnfg_create(); 410
395 if (!cfg) {
396 pr_warn("can't create cfcnfg\n");
397 goto err_cfcnfg_create_failed;
398 }
399 result = register_pernet_device(&caif_net_ops); 411 result = register_pernet_device(&caif_net_ops);
400 412
401 if (result) { 413 if (result)
402 kfree(cfg);
403 cfg = NULL;
404 return result; 414 return result;
405 } 415
406 dev_add_pack(&caif_packet_type);
407 register_netdevice_notifier(&caif_device_notifier); 416 register_netdevice_notifier(&caif_device_notifier);
417 dev_add_pack(&caif_packet_type);
408 418
409 return result; 419 return result;
410err_cfcnfg_create_failed:
411 return -ENODEV;
412} 420}
413 421
414static void __exit caif_device_exit(void) 422static void __exit caif_device_exit(void)
415{ 423{
416 dev_remove_pack(&caif_packet_type);
417 unregister_pernet_device(&caif_net_ops); 424 unregister_pernet_device(&caif_net_ops);
418 unregister_netdevice_notifier(&caif_device_notifier); 425 unregister_netdevice_notifier(&caif_device_notifier);
419 cfcnfg_remove(cfg); 426 dev_remove_pack(&caif_packet_type);
420} 427}
421 428
422module_init(caif_device_init); 429module_init(caif_device_init);
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index 37a4034dfc29..b840395ced1d 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -48,6 +48,7 @@ static struct dentry *debugfsdir;
48#ifdef CONFIG_DEBUG_FS 48#ifdef CONFIG_DEBUG_FS
49struct debug_fs_counter { 49struct debug_fs_counter {
50 atomic_t caif_nr_socks; 50 atomic_t caif_nr_socks;
51 atomic_t caif_sock_create;
51 atomic_t num_connect_req; 52 atomic_t num_connect_req;
52 atomic_t num_connect_resp; 53 atomic_t num_connect_resp;
53 atomic_t num_connect_fail_resp; 54 atomic_t num_connect_fail_resp;
@@ -59,11 +60,11 @@ struct debug_fs_counter {
59 atomic_t num_rx_flow_on; 60 atomic_t num_rx_flow_on;
60}; 61};
61static struct debug_fs_counter cnt; 62static struct debug_fs_counter cnt;
62#define dbfs_atomic_inc(v) atomic_inc(v) 63#define dbfs_atomic_inc(v) atomic_inc_return(v)
63#define dbfs_atomic_dec(v) atomic_dec(v) 64#define dbfs_atomic_dec(v) atomic_dec_return(v)
64#else 65#else
65#define dbfs_atomic_inc(v) 66#define dbfs_atomic_inc(v) 0
66#define dbfs_atomic_dec(v) 67#define dbfs_atomic_dec(v) 0
67#endif 68#endif
68 69
69struct caifsock { 70struct caifsock {
@@ -155,9 +156,10 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
155 156
156 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 157 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
157 (unsigned)sk->sk_rcvbuf && rx_flow_is_on(cf_sk)) { 158 (unsigned)sk->sk_rcvbuf && rx_flow_is_on(cf_sk)) {
158 pr_debug("sending flow OFF (queue len = %d %d)\n", 159 if (net_ratelimit())
159 atomic_read(&cf_sk->sk.sk_rmem_alloc), 160 pr_debug("sending flow OFF (queue len = %d %d)\n",
160 sk_rcvbuf_lowwater(cf_sk)); 161 atomic_read(&cf_sk->sk.sk_rmem_alloc),
162 sk_rcvbuf_lowwater(cf_sk));
161 set_rx_flow_off(cf_sk); 163 set_rx_flow_off(cf_sk);
162 dbfs_atomic_inc(&cnt.num_rx_flow_off); 164 dbfs_atomic_inc(&cnt.num_rx_flow_off);
163 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ); 165 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
@@ -168,7 +170,8 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
168 return err; 170 return err;
169 if (!sk_rmem_schedule(sk, skb->truesize) && rx_flow_is_on(cf_sk)) { 171 if (!sk_rmem_schedule(sk, skb->truesize) && rx_flow_is_on(cf_sk)) {
170 set_rx_flow_off(cf_sk); 172 set_rx_flow_off(cf_sk);
171 pr_debug("sending flow OFF due to rmem_schedule\n"); 173 if (net_ratelimit())
174 pr_debug("sending flow OFF due to rmem_schedule\n");
172 dbfs_atomic_inc(&cnt.num_rx_flow_off); 175 dbfs_atomic_inc(&cnt.num_rx_flow_off);
173 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ); 176 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
174 } 177 }
@@ -202,13 +205,25 @@ static int caif_sktrecv_cb(struct cflayer *layr, struct cfpkt *pkt)
202 skb = cfpkt_tonative(pkt); 205 skb = cfpkt_tonative(pkt);
203 206
204 if (unlikely(cf_sk->sk.sk_state != CAIF_CONNECTED)) { 207 if (unlikely(cf_sk->sk.sk_state != CAIF_CONNECTED)) {
205 cfpkt_destroy(pkt); 208 kfree_skb(skb);
206 return 0; 209 return 0;
207 } 210 }
208 caif_queue_rcv_skb(&cf_sk->sk, skb); 211 caif_queue_rcv_skb(&cf_sk->sk, skb);
209 return 0; 212 return 0;
210} 213}
211 214
215static void cfsk_hold(struct cflayer *layr)
216{
217 struct caifsock *cf_sk = container_of(layr, struct caifsock, layer);
218 sock_hold(&cf_sk->sk);
219}
220
221static void cfsk_put(struct cflayer *layr)
222{
223 struct caifsock *cf_sk = container_of(layr, struct caifsock, layer);
224 sock_put(&cf_sk->sk);
225}
226
212/* Packet Control Callback function called from CAIF */ 227/* Packet Control Callback function called from CAIF */
213static void caif_ctrl_cb(struct cflayer *layr, 228static void caif_ctrl_cb(struct cflayer *layr,
214 enum caif_ctrlcmd flow, 229 enum caif_ctrlcmd flow,
@@ -232,6 +247,8 @@ static void caif_ctrl_cb(struct cflayer *layr,
232 247
233 case CAIF_CTRLCMD_INIT_RSP: 248 case CAIF_CTRLCMD_INIT_RSP:
234 /* We're now connected */ 249 /* We're now connected */
250 caif_client_register_refcnt(&cf_sk->layer,
251 cfsk_hold, cfsk_put);
235 dbfs_atomic_inc(&cnt.num_connect_resp); 252 dbfs_atomic_inc(&cnt.num_connect_resp);
236 cf_sk->sk.sk_state = CAIF_CONNECTED; 253 cf_sk->sk.sk_state = CAIF_CONNECTED;
237 set_tx_flow_on(cf_sk); 254 set_tx_flow_on(cf_sk);
@@ -242,7 +259,6 @@ static void caif_ctrl_cb(struct cflayer *layr,
242 /* We're now disconnected */ 259 /* We're now disconnected */
243 cf_sk->sk.sk_state = CAIF_DISCONNECTED; 260 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
244 cf_sk->sk.sk_state_change(&cf_sk->sk); 261 cf_sk->sk.sk_state_change(&cf_sk->sk);
245 cfcnfg_release_adap_layer(&cf_sk->layer);
246 break; 262 break;
247 263
248 case CAIF_CTRLCMD_INIT_FAIL_RSP: 264 case CAIF_CTRLCMD_INIT_FAIL_RSP:
@@ -519,43 +535,14 @@ static int transmit_skb(struct sk_buff *skb, struct caifsock *cf_sk,
519 int noblock, long timeo) 535 int noblock, long timeo)
520{ 536{
521 struct cfpkt *pkt; 537 struct cfpkt *pkt;
522 int ret, loopcnt = 0;
523 538
524 pkt = cfpkt_fromnative(CAIF_DIR_OUT, skb); 539 pkt = cfpkt_fromnative(CAIF_DIR_OUT, skb);
525 memset(cfpkt_info(pkt), 0, sizeof(struct caif_payload_info)); 540 memset(skb->cb, 0, sizeof(struct caif_payload_info));
526 do {
527 541
528 ret = -ETIMEDOUT; 542 if (cf_sk->layer.dn == NULL)
543 return -EINVAL;
529 544
530 /* Slight paranoia, probably not needed. */ 545 return cf_sk->layer.dn->transmit(cf_sk->layer.dn, pkt);
531 if (unlikely(loopcnt++ > 1000)) {
532 pr_warn("transmit retries failed, error = %d\n", ret);
533 break;
534 }
535
536 if (cf_sk->layer.dn != NULL)
537 ret = cf_sk->layer.dn->transmit(cf_sk->layer.dn, pkt);
538 if (likely(ret >= 0))
539 break;
540 /* if transmit return -EAGAIN, then retry */
541 if (noblock && ret == -EAGAIN)
542 break;
543 timeo = caif_wait_for_flow_on(cf_sk, 0, timeo, &ret);
544 if (signal_pending(current)) {
545 ret = sock_intr_errno(timeo);
546 break;
547 }
548 if (ret)
549 break;
550 if (cf_sk->sk.sk_state != CAIF_CONNECTED ||
551 sock_flag(&cf_sk->sk, SOCK_DEAD) ||
552 (cf_sk->sk.sk_shutdown & RCV_SHUTDOWN)) {
553 ret = -EPIPE;
554 cf_sk->sk.sk_err = EPIPE;
555 break;
556 }
557 } while (ret == -EAGAIN);
558 return ret;
559} 546}
560 547
561/* Copied from af_unix:unix_dgram_sendmsg, and adapted to CAIF */ 548/* Copied from af_unix:unix_dgram_sendmsg, and adapted to CAIF */
@@ -620,7 +607,9 @@ static int caif_seqpkt_sendmsg(struct kiocb *kiocb, struct socket *sock,
620 goto err; 607 goto err;
621 ret = transmit_skb(skb, cf_sk, noblock, timeo); 608 ret = transmit_skb(skb, cf_sk, noblock, timeo);
622 if (ret < 0) 609 if (ret < 0)
623 goto err; 610 /* skb is already freed */
611 return ret;
612
624 return len; 613 return len;
625err: 614err:
626 kfree_skb(skb); 615 kfree_skb(skb);
@@ -826,7 +815,7 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
826 sk->sk_state == CAIF_DISCONNECTED); 815 sk->sk_state == CAIF_DISCONNECTED);
827 if (sk->sk_shutdown & SHUTDOWN_MASK) { 816 if (sk->sk_shutdown & SHUTDOWN_MASK) {
828 /* Allow re-connect after SHUTDOWN_IND */ 817 /* Allow re-connect after SHUTDOWN_IND */
829 caif_disconnect_client(&cf_sk->layer); 818 caif_disconnect_client(sock_net(sk), &cf_sk->layer);
830 break; 819 break;
831 } 820 }
832 /* No reconnect on a seqpacket socket */ 821 /* No reconnect on a seqpacket socket */
@@ -852,7 +841,7 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
852 sock->state = SS_CONNECTING; 841 sock->state = SS_CONNECTING;
853 sk->sk_state = CAIF_CONNECTING; 842 sk->sk_state = CAIF_CONNECTING;
854 843
855 /* Check priority value coming from socket */ 844 /* Check priority value comming from socket */
856 /* if priority value is out of range it will be ajusted */ 845 /* if priority value is out of range it will be ajusted */
857 if (cf_sk->sk.sk_priority > CAIF_PRIO_MAX) 846 if (cf_sk->sk.sk_priority > CAIF_PRIO_MAX)
858 cf_sk->conn_req.priority = CAIF_PRIO_MAX; 847 cf_sk->conn_req.priority = CAIF_PRIO_MAX;
@@ -866,8 +855,10 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
866 855
867 dbfs_atomic_inc(&cnt.num_connect_req); 856 dbfs_atomic_inc(&cnt.num_connect_req);
868 cf_sk->layer.receive = caif_sktrecv_cb; 857 cf_sk->layer.receive = caif_sktrecv_cb;
869 err = caif_connect_client(&cf_sk->conn_req, 858
859 err = caif_connect_client(sock_net(sk), &cf_sk->conn_req,
870 &cf_sk->layer, &ifindex, &headroom, &tailroom); 860 &cf_sk->layer, &ifindex, &headroom, &tailroom);
861
871 if (err < 0) { 862 if (err < 0) {
872 cf_sk->sk.sk_socket->state = SS_UNCONNECTED; 863 cf_sk->sk.sk_socket->state = SS_UNCONNECTED;
873 cf_sk->sk.sk_state = CAIF_DISCONNECTED; 864 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
@@ -947,13 +938,14 @@ static int caif_release(struct socket *sock)
947 * caif_queue_rcv_skb checks SOCK_DEAD holding the queue lock, 938 * caif_queue_rcv_skb checks SOCK_DEAD holding the queue lock,
948 * this ensures no packets when sock is dead. 939 * this ensures no packets when sock is dead.
949 */ 940 */
950 spin_lock(&sk->sk_receive_queue.lock); 941 spin_lock_bh(&sk->sk_receive_queue.lock);
951 sock_set_flag(sk, SOCK_DEAD); 942 sock_set_flag(sk, SOCK_DEAD);
952 spin_unlock(&sk->sk_receive_queue.lock); 943 spin_unlock_bh(&sk->sk_receive_queue.lock);
953 sock->sk = NULL; 944 sock->sk = NULL;
954 945
955 dbfs_atomic_inc(&cnt.num_disconnect); 946 dbfs_atomic_inc(&cnt.num_disconnect);
956 947
948 WARN_ON(IS_ERR(cf_sk->debugfs_socket_dir));
957 if (cf_sk->debugfs_socket_dir != NULL) 949 if (cf_sk->debugfs_socket_dir != NULL)
958 debugfs_remove_recursive(cf_sk->debugfs_socket_dir); 950 debugfs_remove_recursive(cf_sk->debugfs_socket_dir);
959 951
@@ -963,13 +955,12 @@ static int caif_release(struct socket *sock)
963 955
964 if (cf_sk->sk.sk_socket->state == SS_CONNECTED || 956 if (cf_sk->sk.sk_socket->state == SS_CONNECTED ||
965 cf_sk->sk.sk_socket->state == SS_CONNECTING) 957 cf_sk->sk.sk_socket->state == SS_CONNECTING)
966 res = caif_disconnect_client(&cf_sk->layer); 958 res = caif_disconnect_client(sock_net(sk), &cf_sk->layer);
967 959
968 cf_sk->sk.sk_socket->state = SS_DISCONNECTING; 960 cf_sk->sk.sk_socket->state = SS_DISCONNECTING;
969 wake_up_interruptible_poll(sk_sleep(sk), POLLERR|POLLHUP); 961 wake_up_interruptible_poll(sk_sleep(sk), POLLERR|POLLHUP);
970 962
971 sock_orphan(sk); 963 sock_orphan(sk);
972 cf_sk->layer.dn = NULL;
973 sk_stream_kill_queues(&cf_sk->sk); 964 sk_stream_kill_queues(&cf_sk->sk);
974 release_sock(sk); 965 release_sock(sk);
975 sock_put(sk); 966 sock_put(sk);
@@ -1060,16 +1051,18 @@ static void caif_sock_destructor(struct sock *sk)
1060 caif_assert(sk_unhashed(sk)); 1051 caif_assert(sk_unhashed(sk));
1061 caif_assert(!sk->sk_socket); 1052 caif_assert(!sk->sk_socket);
1062 if (!sock_flag(sk, SOCK_DEAD)) { 1053 if (!sock_flag(sk, SOCK_DEAD)) {
1063 pr_info("Attempt to release alive CAIF socket: %p\n", sk); 1054 pr_debug("Attempt to release alive CAIF socket: %p\n", sk);
1064 return; 1055 return;
1065 } 1056 }
1066 sk_stream_kill_queues(&cf_sk->sk); 1057 sk_stream_kill_queues(&cf_sk->sk);
1067 dbfs_atomic_dec(&cnt.caif_nr_socks); 1058 dbfs_atomic_dec(&cnt.caif_nr_socks);
1059 caif_free_client(&cf_sk->layer);
1068} 1060}
1069 1061
1070static int caif_create(struct net *net, struct socket *sock, int protocol, 1062static int caif_create(struct net *net, struct socket *sock, int protocol,
1071 int kern) 1063 int kern)
1072{ 1064{
1065 int num;
1073 struct sock *sk = NULL; 1066 struct sock *sk = NULL;
1074 struct caifsock *cf_sk = NULL; 1067 struct caifsock *cf_sk = NULL;
1075 static struct proto prot = {.name = "PF_CAIF", 1068 static struct proto prot = {.name = "PF_CAIF",
@@ -1132,14 +1125,16 @@ static int caif_create(struct net *net, struct socket *sock, int protocol,
1132 cf_sk->conn_req.protocol = protocol; 1125 cf_sk->conn_req.protocol = protocol;
1133 /* Increase the number of sockets created. */ 1126 /* Increase the number of sockets created. */
1134 dbfs_atomic_inc(&cnt.caif_nr_socks); 1127 dbfs_atomic_inc(&cnt.caif_nr_socks);
1128 num = dbfs_atomic_inc(&cnt.caif_sock_create);
1135#ifdef CONFIG_DEBUG_FS 1129#ifdef CONFIG_DEBUG_FS
1136 if (!IS_ERR(debugfsdir)) { 1130 if (!IS_ERR(debugfsdir)) {
1131
1137 /* Fill in some information concerning the misc socket. */ 1132 /* Fill in some information concerning the misc socket. */
1138 snprintf(cf_sk->name, sizeof(cf_sk->name), "cfsk%d", 1133 snprintf(cf_sk->name, sizeof(cf_sk->name), "cfsk%d", num);
1139 atomic_read(&cnt.caif_nr_socks));
1140 1134
1141 cf_sk->debugfs_socket_dir = 1135 cf_sk->debugfs_socket_dir =
1142 debugfs_create_dir(cf_sk->name, debugfsdir); 1136 debugfs_create_dir(cf_sk->name, debugfsdir);
1137
1143 debugfs_create_u32("sk_state", S_IRUSR | S_IWUSR, 1138 debugfs_create_u32("sk_state", S_IRUSR | S_IWUSR,
1144 cf_sk->debugfs_socket_dir, 1139 cf_sk->debugfs_socket_dir,
1145 (u32 *) &cf_sk->sk.sk_state); 1140 (u32 *) &cf_sk->sk.sk_state);
@@ -1183,6 +1178,9 @@ static int __init caif_sktinit_module(void)
1183 debugfs_create_u32("num_sockets", S_IRUSR | S_IWUSR, 1178 debugfs_create_u32("num_sockets", S_IRUSR | S_IWUSR,
1184 debugfsdir, 1179 debugfsdir,
1185 (u32 *) &cnt.caif_nr_socks); 1180 (u32 *) &cnt.caif_nr_socks);
1181 debugfs_create_u32("num_create", S_IRUSR | S_IWUSR,
1182 debugfsdir,
1183 (u32 *) &cnt.caif_sock_create);
1186 debugfs_create_u32("num_connect_req", S_IRUSR | S_IWUSR, 1184 debugfs_create_u32("num_connect_req", S_IRUSR | S_IWUSR,
1187 debugfsdir, 1185 debugfsdir,
1188 (u32 *) &cnt.num_connect_req); 1186 (u32 *) &cnt.num_connect_req);
diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c
index f1f98d967d8a..351c2ca7e7b9 100644
--- a/net/caif/cfcnfg.c
+++ b/net/caif/cfcnfg.c
@@ -10,6 +10,7 @@
10#include <linux/stddef.h> 10#include <linux/stddef.h>
11#include <linux/slab.h> 11#include <linux/slab.h>
12#include <linux/netdevice.h> 12#include <linux/netdevice.h>
13#include <linux/module.h>
13#include <net/caif/caif_layer.h> 14#include <net/caif/caif_layer.h>
14#include <net/caif/cfpkt.h> 15#include <net/caif/cfpkt.h>
15#include <net/caif/cfcnfg.h> 16#include <net/caif/cfcnfg.h>
@@ -18,11 +19,7 @@
18#include <net/caif/cffrml.h> 19#include <net/caif/cffrml.h>
19#include <net/caif/cfserl.h> 20#include <net/caif/cfserl.h>
20#include <net/caif/cfsrvl.h> 21#include <net/caif/cfsrvl.h>
21 22#include <net/caif/caif_dev.h>
22#include <linux/module.h>
23#include <asm/atomic.h>
24
25#define MAX_PHY_LAYERS 7
26 23
27#define container_obj(layr) container_of(layr, struct cfcnfg, layer) 24#define container_obj(layr) container_of(layr, struct cfcnfg, layer)
28 25
@@ -30,6 +27,9 @@
30 * to manage physical interfaces 27 * to manage physical interfaces
31 */ 28 */
32struct cfcnfg_phyinfo { 29struct cfcnfg_phyinfo {
30 struct list_head node;
31 bool up;
32
33 /* Pointer to the layer below the MUX (framing layer) */ 33 /* Pointer to the layer below the MUX (framing layer) */
34 struct cflayer *frm_layer; 34 struct cflayer *frm_layer;
35 /* Pointer to the lowest actual physical layer */ 35 /* Pointer to the lowest actual physical layer */
@@ -39,9 +39,6 @@ struct cfcnfg_phyinfo {
39 /* Preference of the physical in interface */ 39 /* Preference of the physical in interface */
40 enum cfcnfg_phy_preference pref; 40 enum cfcnfg_phy_preference pref;
41 41
42 /* Reference count, number of channels using the device */
43 int phy_ref_count;
44
45 /* Information about the physical device */ 42 /* Information about the physical device */
46 struct dev_info dev_info; 43 struct dev_info dev_info;
47 44
@@ -59,8 +56,8 @@ struct cfcnfg {
59 struct cflayer layer; 56 struct cflayer layer;
60 struct cflayer *ctrl; 57 struct cflayer *ctrl;
61 struct cflayer *mux; 58 struct cflayer *mux;
62 u8 last_phyid; 59 struct list_head phys;
63 struct cfcnfg_phyinfo phy_layers[MAX_PHY_LAYERS]; 60 struct mutex lock;
64}; 61};
65 62
66static void cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id, 63static void cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id,
@@ -76,6 +73,9 @@ struct cfcnfg *cfcnfg_create(void)
76{ 73{
77 struct cfcnfg *this; 74 struct cfcnfg *this;
78 struct cfctrl_rsp *resp; 75 struct cfctrl_rsp *resp;
76
77 might_sleep();
78
79 /* Initiate this layer */ 79 /* Initiate this layer */
80 this = kzalloc(sizeof(struct cfcnfg), GFP_ATOMIC); 80 this = kzalloc(sizeof(struct cfcnfg), GFP_ATOMIC);
81 if (!this) { 81 if (!this) {
@@ -99,27 +99,33 @@ struct cfcnfg *cfcnfg_create(void)
99 resp->radioset_rsp = cfctrl_resp_func; 99 resp->radioset_rsp = cfctrl_resp_func;
100 resp->linksetup_rsp = cfcnfg_linkup_rsp; 100 resp->linksetup_rsp = cfcnfg_linkup_rsp;
101 resp->reject_rsp = cfcnfg_reject_rsp; 101 resp->reject_rsp = cfcnfg_reject_rsp;
102 102 INIT_LIST_HEAD(&this->phys);
103 this->last_phyid = 1;
104 103
105 cfmuxl_set_uplayer(this->mux, this->ctrl, 0); 104 cfmuxl_set_uplayer(this->mux, this->ctrl, 0);
106 layer_set_dn(this->ctrl, this->mux); 105 layer_set_dn(this->ctrl, this->mux);
107 layer_set_up(this->ctrl, this); 106 layer_set_up(this->ctrl, this);
107 mutex_init(&this->lock);
108
108 return this; 109 return this;
109out_of_mem: 110out_of_mem:
110 pr_warn("Out of memory\n"); 111 pr_warn("Out of memory\n");
112
113 synchronize_rcu();
114
111 kfree(this->mux); 115 kfree(this->mux);
112 kfree(this->ctrl); 116 kfree(this->ctrl);
113 kfree(this); 117 kfree(this);
114 return NULL; 118 return NULL;
115} 119}
116EXPORT_SYMBOL(cfcnfg_create);
117 120
118void cfcnfg_remove(struct cfcnfg *cfg) 121void cfcnfg_remove(struct cfcnfg *cfg)
119{ 122{
123 might_sleep();
120 if (cfg) { 124 if (cfg) {
125 synchronize_rcu();
126
121 kfree(cfg->mux); 127 kfree(cfg->mux);
122 kfree(cfg->ctrl); 128 cfctrl_remove(cfg->ctrl);
123 kfree(cfg); 129 kfree(cfg);
124 } 130 }
125} 131}
@@ -128,132 +134,96 @@ static void cfctrl_resp_func(void)
128{ 134{
129} 135}
130 136
137static struct cfcnfg_phyinfo *cfcnfg_get_phyinfo_rcu(struct cfcnfg *cnfg,
138 u8 phyid)
139{
140 struct cfcnfg_phyinfo *phy;
141
142 list_for_each_entry_rcu(phy, &cnfg->phys, node)
143 if (phy->id == phyid)
144 return phy;
145 return NULL;
146}
147
131static void cfctrl_enum_resp(void) 148static void cfctrl_enum_resp(void)
132{ 149{
133} 150}
134 151
135struct dev_info *cfcnfg_get_phyid(struct cfcnfg *cnfg, 152static struct dev_info *cfcnfg_get_phyid(struct cfcnfg *cnfg,
136 enum cfcnfg_phy_preference phy_pref) 153 enum cfcnfg_phy_preference phy_pref)
137{ 154{
138 u16 i;
139
140 /* Try to match with specified preference */ 155 /* Try to match with specified preference */
141 for (i = 1; i < MAX_PHY_LAYERS; i++) { 156 struct cfcnfg_phyinfo *phy;
142 if (cnfg->phy_layers[i].id == i && 157
143 cnfg->phy_layers[i].pref == phy_pref && 158 list_for_each_entry_rcu(phy, &cnfg->phys, node) {
144 cnfg->phy_layers[i].frm_layer != NULL) { 159 if (phy->up && phy->pref == phy_pref &&
145 caif_assert(cnfg->phy_layers != NULL); 160 phy->frm_layer != NULL)
146 caif_assert(cnfg->phy_layers[i].id == i); 161
147 return &cnfg->phy_layers[i].dev_info; 162 return &phy->dev_info;
148 }
149 } 163 }
164
150 /* Otherwise just return something */ 165 /* Otherwise just return something */
151 for (i = 1; i < MAX_PHY_LAYERS; i++) { 166 list_for_each_entry_rcu(phy, &cnfg->phys, node)
152 if (cnfg->phy_layers[i].id == i) { 167 if (phy->up)
153 caif_assert(cnfg->phy_layers != NULL); 168 return &phy->dev_info;
154 caif_assert(cnfg->phy_layers[i].id == i);
155 return &cnfg->phy_layers[i].dev_info;
156 }
157 }
158 169
159 return NULL; 170 return NULL;
160} 171}
161 172
162static struct cfcnfg_phyinfo *cfcnfg_get_phyinfo(struct cfcnfg *cnfg, 173static int cfcnfg_get_id_from_ifi(struct cfcnfg *cnfg, int ifi)
163 u8 phyid)
164{ 174{
165 int i; 175 struct cfcnfg_phyinfo *phy;
166 /* Try to match with specified preference */
167 for (i = 0; i < MAX_PHY_LAYERS; i++)
168 if (cnfg->phy_layers[i].frm_layer != NULL &&
169 cnfg->phy_layers[i].id == phyid)
170 return &cnfg->phy_layers[i];
171 return NULL;
172}
173
174 176
175int cfcnfg_get_id_from_ifi(struct cfcnfg *cnfg, int ifi) 177 list_for_each_entry_rcu(phy, &cnfg->phys, node)
176{ 178 if (phy->ifindex == ifi && phy->up)
177 int i; 179 return phy->id;
178 for (i = 0; i < MAX_PHY_LAYERS; i++)
179 if (cnfg->phy_layers[i].frm_layer != NULL &&
180 cnfg->phy_layers[i].ifindex == ifi)
181 return i;
182 return -ENODEV; 180 return -ENODEV;
183} 181}
184 182
185int cfcnfg_disconn_adapt_layer(struct cfcnfg *cnfg, struct cflayer *adap_layer) 183int caif_disconnect_client(struct net *net, struct cflayer *adap_layer)
186{ 184{
187 u8 channel_id = 0; 185 u8 channel_id = 0;
188 int ret = 0; 186 int ret = 0;
189 struct cflayer *servl = NULL; 187 struct cflayer *servl = NULL;
190 struct cfcnfg_phyinfo *phyinfo = NULL; 188 struct cfcnfg *cfg = get_cfcnfg(net);
191 u8 phyid = 0;
192 189
193 caif_assert(adap_layer != NULL); 190 caif_assert(adap_layer != NULL);
191
194 channel_id = adap_layer->id; 192 channel_id = adap_layer->id;
195 if (adap_layer->dn == NULL || channel_id == 0) { 193 if (adap_layer->dn == NULL || channel_id == 0) {
196 pr_err("adap_layer->dn == NULL or adap_layer->id is 0\n"); 194 pr_err("adap_layer->dn == NULL or adap_layer->id is 0\n");
197 ret = -ENOTCONN; 195 ret = -ENOTCONN;
198 goto end; 196 goto end;
199 } 197 }
200 servl = cfmuxl_remove_uplayer(cnfg->mux, channel_id); 198
199 servl = cfmuxl_remove_uplayer(cfg->mux, channel_id);
201 if (servl == NULL) { 200 if (servl == NULL) {
202 pr_err("PROTOCOL ERROR - Error removing service_layer Channel_Id(%d)", 201 pr_err("PROTOCOL ERROR - "
203 channel_id); 202 "Error removing service_layer Channel_Id(%d)",
203 channel_id);
204 ret = -EINVAL; 204 ret = -EINVAL;
205 goto end; 205 goto end;
206 } 206 }
207 layer_set_up(servl, NULL); 207
208 ret = cfctrl_linkdown_req(cnfg->ctrl, channel_id, adap_layer); 208 ret = cfctrl_linkdown_req(cfg->ctrl, channel_id, adap_layer);
209 if (ret) 209
210 goto end;
211 caif_assert(channel_id == servl->id);
212 if (adap_layer->dn != NULL) {
213 phyid = cfsrvl_getphyid(adap_layer->dn);
214
215 phyinfo = cfcnfg_get_phyinfo(cnfg, phyid);
216 if (phyinfo == NULL) {
217 pr_warn("No interface to send disconnect to\n");
218 ret = -ENODEV;
219 goto end;
220 }
221 if (phyinfo->id != phyid ||
222 phyinfo->phy_layer->id != phyid ||
223 phyinfo->frm_layer->id != phyid) {
224 pr_err("Inconsistency in phy registration\n");
225 ret = -EINVAL;
226 goto end;
227 }
228 }
229 if (phyinfo != NULL && --phyinfo->phy_ref_count == 0 &&
230 phyinfo->phy_layer != NULL &&
231 phyinfo->phy_layer->modemcmd != NULL) {
232 phyinfo->phy_layer->modemcmd(phyinfo->phy_layer,
233 _CAIF_MODEMCMD_PHYIF_USELESS);
234 }
235end: 210end:
236 cfsrvl_put(servl); 211 cfctrl_cancel_req(cfg->ctrl, adap_layer);
237 cfctrl_cancel_req(cnfg->ctrl, adap_layer); 212
213 /* Do RCU sync before initiating cleanup */
214 synchronize_rcu();
238 if (adap_layer->ctrlcmd != NULL) 215 if (adap_layer->ctrlcmd != NULL)
239 adap_layer->ctrlcmd(adap_layer, CAIF_CTRLCMD_DEINIT_RSP, 0); 216 adap_layer->ctrlcmd(adap_layer, CAIF_CTRLCMD_DEINIT_RSP, 0);
240 return ret; 217 return ret;
241 218
242} 219}
243EXPORT_SYMBOL(cfcnfg_disconn_adapt_layer); 220EXPORT_SYMBOL(caif_disconnect_client);
244
245void cfcnfg_release_adap_layer(struct cflayer *adap_layer)
246{
247 if (adap_layer->dn)
248 cfsrvl_put(adap_layer->dn);
249}
250EXPORT_SYMBOL(cfcnfg_release_adap_layer);
251 221
252static void cfcnfg_linkdestroy_rsp(struct cflayer *layer, u8 channel_id) 222static void cfcnfg_linkdestroy_rsp(struct cflayer *layer, u8 channel_id)
253{ 223{
254} 224}
255 225
256int protohead[CFCTRL_SRV_MASK] = { 226static const int protohead[CFCTRL_SRV_MASK] = {
257 [CFCTRL_SRV_VEI] = 4, 227 [CFCTRL_SRV_VEI] = 4,
258 [CFCTRL_SRV_DATAGRAM] = 7, 228 [CFCTRL_SRV_DATAGRAM] = 7,
259 [CFCTRL_SRV_UTIL] = 4, 229 [CFCTRL_SRV_UTIL] = 4,
@@ -261,49 +231,157 @@ int protohead[CFCTRL_SRV_MASK] = {
261 [CFCTRL_SRV_DBG] = 3, 231 [CFCTRL_SRV_DBG] = 3,
262}; 232};
263 233
264int cfcnfg_add_adaptation_layer(struct cfcnfg *cnfg, 234
265 struct cfctrl_link_param *param, 235static int caif_connect_req_to_link_param(struct cfcnfg *cnfg,
266 struct cflayer *adap_layer, 236 struct caif_connect_request *s,
267 int *ifindex, 237 struct cfctrl_link_param *l)
238{
239 struct dev_info *dev_info;
240 enum cfcnfg_phy_preference pref;
241 int res;
242
243 memset(l, 0, sizeof(*l));
244 /* In caif protocol low value is high priority */
245 l->priority = CAIF_PRIO_MAX - s->priority + 1;
246
247 if (s->ifindex != 0) {
248 res = cfcnfg_get_id_from_ifi(cnfg, s->ifindex);
249 if (res < 0)
250 return res;
251 l->phyid = res;
252 } else {
253 switch (s->link_selector) {
254 case CAIF_LINK_HIGH_BANDW:
255 pref = CFPHYPREF_HIGH_BW;
256 break;
257 case CAIF_LINK_LOW_LATENCY:
258 pref = CFPHYPREF_LOW_LAT;
259 break;
260 default:
261 return -EINVAL;
262 }
263 dev_info = cfcnfg_get_phyid(cnfg, pref);
264 if (dev_info == NULL)
265 return -ENODEV;
266 l->phyid = dev_info->id;
267 }
268 switch (s->protocol) {
269 case CAIFPROTO_AT:
270 l->linktype = CFCTRL_SRV_VEI;
271 l->endpoint = (s->sockaddr.u.at.type >> 2) & 0x3;
272 l->chtype = s->sockaddr.u.at.type & 0x3;
273 break;
274 case CAIFPROTO_DATAGRAM:
275 l->linktype = CFCTRL_SRV_DATAGRAM;
276 l->chtype = 0x00;
277 l->u.datagram.connid = s->sockaddr.u.dgm.connection_id;
278 break;
279 case CAIFPROTO_DATAGRAM_LOOP:
280 l->linktype = CFCTRL_SRV_DATAGRAM;
281 l->chtype = 0x03;
282 l->endpoint = 0x00;
283 l->u.datagram.connid = s->sockaddr.u.dgm.connection_id;
284 break;
285 case CAIFPROTO_RFM:
286 l->linktype = CFCTRL_SRV_RFM;
287 l->u.datagram.connid = s->sockaddr.u.rfm.connection_id;
288 strncpy(l->u.rfm.volume, s->sockaddr.u.rfm.volume,
289 sizeof(l->u.rfm.volume)-1);
290 l->u.rfm.volume[sizeof(l->u.rfm.volume)-1] = 0;
291 break;
292 case CAIFPROTO_UTIL:
293 l->linktype = CFCTRL_SRV_UTIL;
294 l->endpoint = 0x00;
295 l->chtype = 0x00;
296 strncpy(l->u.utility.name, s->sockaddr.u.util.service,
297 sizeof(l->u.utility.name)-1);
298 l->u.utility.name[sizeof(l->u.utility.name)-1] = 0;
299 caif_assert(sizeof(l->u.utility.name) > 10);
300 l->u.utility.paramlen = s->param.size;
301 if (l->u.utility.paramlen > sizeof(l->u.utility.params))
302 l->u.utility.paramlen = sizeof(l->u.utility.params);
303
304 memcpy(l->u.utility.params, s->param.data,
305 l->u.utility.paramlen);
306
307 break;
308 case CAIFPROTO_DEBUG:
309 l->linktype = CFCTRL_SRV_DBG;
310 l->endpoint = s->sockaddr.u.dbg.service;
311 l->chtype = s->sockaddr.u.dbg.type;
312 break;
313 default:
314 return -EINVAL;
315 }
316 return 0;
317}
318
319int caif_connect_client(struct net *net, struct caif_connect_request *conn_req,
320 struct cflayer *adap_layer, int *ifindex,
268 int *proto_head, 321 int *proto_head,
269 int *proto_tail) 322 int *proto_tail)
270{ 323{
271 struct cflayer *frml; 324 struct cflayer *frml;
325 struct cfcnfg_phyinfo *phy;
326 int err;
327 struct cfctrl_link_param param;
328 struct cfcnfg *cfg = get_cfcnfg(net);
329 caif_assert(cfg != NULL);
330
331 rcu_read_lock();
332 err = caif_connect_req_to_link_param(cfg, conn_req, &param);
333 if (err)
334 goto unlock;
335
336 phy = cfcnfg_get_phyinfo_rcu(cfg, param.phyid);
337 if (!phy) {
338 err = -ENODEV;
339 goto unlock;
340 }
341 err = -EINVAL;
342
272 if (adap_layer == NULL) { 343 if (adap_layer == NULL) {
273 pr_err("adap_layer is zero\n"); 344 pr_err("adap_layer is zero\n");
274 return -EINVAL; 345 goto unlock;
275 } 346 }
276 if (adap_layer->receive == NULL) { 347 if (adap_layer->receive == NULL) {
277 pr_err("adap_layer->receive is NULL\n"); 348 pr_err("adap_layer->receive is NULL\n");
278 return -EINVAL; 349 goto unlock;
279 } 350 }
280 if (adap_layer->ctrlcmd == NULL) { 351 if (adap_layer->ctrlcmd == NULL) {
281 pr_err("adap_layer->ctrlcmd == NULL\n"); 352 pr_err("adap_layer->ctrlcmd == NULL\n");
282 return -EINVAL; 353 goto unlock;
283 } 354 }
284 frml = cnfg->phy_layers[param->phyid].frm_layer; 355
356 err = -ENODEV;
357 frml = phy->frm_layer;
285 if (frml == NULL) { 358 if (frml == NULL) {
286 pr_err("Specified PHY type does not exist!\n"); 359 pr_err("Specified PHY type does not exist!\n");
287 return -ENODEV; 360 goto unlock;
288 } 361 }
289 caif_assert(param->phyid == cnfg->phy_layers[param->phyid].id); 362 caif_assert(param.phyid == phy->id);
290 caif_assert(cnfg->phy_layers[param->phyid].frm_layer->id == 363 caif_assert(phy->frm_layer->id ==
291 param->phyid); 364 param.phyid);
292 caif_assert(cnfg->phy_layers[param->phyid].phy_layer->id == 365 caif_assert(phy->phy_layer->id ==
293 param->phyid); 366 param.phyid);
294 367
295 *ifindex = cnfg->phy_layers[param->phyid].ifindex; 368 *ifindex = phy->ifindex;
369 *proto_tail = 2;
296 *proto_head = 370 *proto_head =
297 protohead[param->linktype]+
298 (cnfg->phy_layers[param->phyid].use_stx ? 1 : 0);
299 371
300 *proto_tail = 2; 372 protohead[param.linktype] + (phy->use_stx ? 1 : 0);
373
374 rcu_read_unlock();
301 375
302 /* FIXME: ENUMERATE INITIALLY WHEN ACTIVATING PHYSICAL INTERFACE */ 376 /* FIXME: ENUMERATE INITIALLY WHEN ACTIVATING PHYSICAL INTERFACE */
303 cfctrl_enum_req(cnfg->ctrl, param->phyid); 377 cfctrl_enum_req(cfg->ctrl, param.phyid);
304 return cfctrl_linkup_request(cnfg->ctrl, param, adap_layer); 378 return cfctrl_linkup_request(cfg->ctrl, &param, adap_layer);
379
380unlock:
381 rcu_read_unlock();
382 return err;
305} 383}
306EXPORT_SYMBOL(cfcnfg_add_adaptation_layer); 384EXPORT_SYMBOL(caif_connect_client);
307 385
308static void cfcnfg_reject_rsp(struct cflayer *layer, u8 channel_id, 386static void cfcnfg_reject_rsp(struct cflayer *layer, u8 channel_id,
309 struct cflayer *adapt_layer) 387 struct cflayer *adapt_layer)
@@ -315,32 +393,37 @@ static void cfcnfg_reject_rsp(struct cflayer *layer, u8 channel_id,
315 393
316static void 394static void
317cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id, enum cfctrl_srv serv, 395cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id, enum cfctrl_srv serv,
318 u8 phyid, struct cflayer *adapt_layer) 396 u8 phyid, struct cflayer *adapt_layer)
319{ 397{
320 struct cfcnfg *cnfg = container_obj(layer); 398 struct cfcnfg *cnfg = container_obj(layer);
321 struct cflayer *servicel = NULL; 399 struct cflayer *servicel = NULL;
322 struct cfcnfg_phyinfo *phyinfo; 400 struct cfcnfg_phyinfo *phyinfo;
323 struct net_device *netdev; 401 struct net_device *netdev;
324 402
403 rcu_read_lock();
404
325 if (adapt_layer == NULL) { 405 if (adapt_layer == NULL) {
326 pr_debug("link setup response but no client exist, send linkdown back\n"); 406 pr_debug("link setup response but no client exist,"
407 "send linkdown back\n");
327 cfctrl_linkdown_req(cnfg->ctrl, channel_id, NULL); 408 cfctrl_linkdown_req(cnfg->ctrl, channel_id, NULL);
328 return; 409 goto unlock;
329 } 410 }
330 411
331 caif_assert(cnfg != NULL); 412 caif_assert(cnfg != NULL);
332 caif_assert(phyid != 0); 413 caif_assert(phyid != 0);
333 phyinfo = &cnfg->phy_layers[phyid]; 414
415 phyinfo = cfcnfg_get_phyinfo_rcu(cnfg, phyid);
416 if (phyinfo == NULL) {
417 pr_err("ERROR: Link Layer Device dissapeared"
418 "while connecting\n");
419 goto unlock;
420 }
421
422 caif_assert(phyinfo != NULL);
334 caif_assert(phyinfo->id == phyid); 423 caif_assert(phyinfo->id == phyid);
335 caif_assert(phyinfo->phy_layer != NULL); 424 caif_assert(phyinfo->phy_layer != NULL);
336 caif_assert(phyinfo->phy_layer->id == phyid); 425 caif_assert(phyinfo->phy_layer->id == phyid);
337 426
338 phyinfo->phy_ref_count++;
339 if (phyinfo->phy_ref_count == 1 &&
340 phyinfo->phy_layer->modemcmd != NULL) {
341 phyinfo->phy_layer->modemcmd(phyinfo->phy_layer,
342 _CAIF_MODEMCMD_PHYIF_USEFULL);
343 }
344 adapt_layer->id = channel_id; 427 adapt_layer->id = channel_id;
345 428
346 switch (serv) { 429 switch (serv) {
@@ -348,7 +431,8 @@ cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id, enum cfctrl_srv serv,
348 servicel = cfvei_create(channel_id, &phyinfo->dev_info); 431 servicel = cfvei_create(channel_id, &phyinfo->dev_info);
349 break; 432 break;
350 case CFCTRL_SRV_DATAGRAM: 433 case CFCTRL_SRV_DATAGRAM:
351 servicel = cfdgml_create(channel_id, &phyinfo->dev_info); 434 servicel = cfdgml_create(channel_id,
435 &phyinfo->dev_info);
352 break; 436 break;
353 case CFCTRL_SRV_RFM: 437 case CFCTRL_SRV_RFM:
354 netdev = phyinfo->dev_info.dev; 438 netdev = phyinfo->dev_info.dev;
@@ -365,94 +449,93 @@ cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id, enum cfctrl_srv serv,
365 servicel = cfdbgl_create(channel_id, &phyinfo->dev_info); 449 servicel = cfdbgl_create(channel_id, &phyinfo->dev_info);
366 break; 450 break;
367 default: 451 default:
368 pr_err("Protocol error. Link setup response - unknown channel type\n"); 452 pr_err("Protocol error. Link setup response "
369 return; 453 "- unknown channel type\n");
454 goto unlock;
370 } 455 }
371 if (!servicel) { 456 if (!servicel) {
372 pr_warn("Out of memory\n"); 457 pr_warn("Out of memory\n");
373 return; 458 goto unlock;
374 } 459 }
375 layer_set_dn(servicel, cnfg->mux); 460 layer_set_dn(servicel, cnfg->mux);
376 cfmuxl_set_uplayer(cnfg->mux, servicel, channel_id); 461 cfmuxl_set_uplayer(cnfg->mux, servicel, channel_id);
377 layer_set_up(servicel, adapt_layer); 462 layer_set_up(servicel, adapt_layer);
378 layer_set_dn(adapt_layer, servicel); 463 layer_set_dn(adapt_layer, servicel);
379 cfsrvl_get(servicel); 464
465 rcu_read_unlock();
466
380 servicel->ctrlcmd(servicel, CAIF_CTRLCMD_INIT_RSP, 0); 467 servicel->ctrlcmd(servicel, CAIF_CTRLCMD_INIT_RSP, 0);
468 return;
469unlock:
470 rcu_read_unlock();
381} 471}
382 472
383void 473void
384cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type, 474cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type,
385 struct net_device *dev, struct cflayer *phy_layer, 475 struct net_device *dev, struct cflayer *phy_layer,
386 u16 *phyid, enum cfcnfg_phy_preference pref, 476 enum cfcnfg_phy_preference pref,
387 bool fcs, bool stx) 477 bool fcs, bool stx)
388{ 478{
389 struct cflayer *frml; 479 struct cflayer *frml;
390 struct cflayer *phy_driver = NULL; 480 struct cflayer *phy_driver = NULL;
481 struct cfcnfg_phyinfo *phyinfo;
391 int i; 482 int i;
483 u8 phyid;
392 484
485 mutex_lock(&cnfg->lock);
393 486
394 if (cnfg->phy_layers[cnfg->last_phyid].frm_layer == NULL) { 487 /* CAIF protocol allow maximum 6 link-layers */
395 *phyid = cnfg->last_phyid; 488 for (i = 0; i < 7; i++) {
396 489 phyid = (dev->ifindex + i) & 0x7;
397 /* range: * 1..(MAX_PHY_LAYERS-1) */ 490 if (phyid == 0)
398 cnfg->last_phyid = 491 continue;
399 (cnfg->last_phyid % (MAX_PHY_LAYERS - 1)) + 1; 492 if (cfcnfg_get_phyinfo_rcu(cnfg, phyid) == NULL)
400 } else { 493 goto got_phyid;
401 *phyid = 0;
402 for (i = 1; i < MAX_PHY_LAYERS; i++) {
403 if (cnfg->phy_layers[i].frm_layer == NULL) {
404 *phyid = i;
405 break;
406 }
407 }
408 }
409 if (*phyid == 0) {
410 pr_err("No Available PHY ID\n");
411 return;
412 } 494 }
495 pr_warn("Too many CAIF Link Layers (max 6)\n");
496 goto out;
497
498got_phyid:
499 phyinfo = kzalloc(sizeof(struct cfcnfg_phyinfo), GFP_ATOMIC);
413 500
414 switch (phy_type) { 501 switch (phy_type) {
415 case CFPHYTYPE_FRAG: 502 case CFPHYTYPE_FRAG:
416 phy_driver = 503 phy_driver =
417 cfserl_create(CFPHYTYPE_FRAG, *phyid, stx); 504 cfserl_create(CFPHYTYPE_FRAG, phyid, stx);
418 if (!phy_driver) { 505 if (!phy_driver) {
419 pr_warn("Out of memory\n"); 506 pr_warn("Out of memory\n");
420 return; 507 goto out;
421 } 508 }
422
423 break; 509 break;
424 case CFPHYTYPE_CAIF: 510 case CFPHYTYPE_CAIF:
425 phy_driver = NULL; 511 phy_driver = NULL;
426 break; 512 break;
427 default: 513 default:
428 pr_err("%d\n", phy_type); 514 goto out;
429 return;
430 break;
431 } 515 }
432 516 phy_layer->id = phyid;
433 phy_layer->id = *phyid; 517 phyinfo->pref = pref;
434 cnfg->phy_layers[*phyid].pref = pref; 518 phyinfo->id = phyid;
435 cnfg->phy_layers[*phyid].id = *phyid; 519 phyinfo->dev_info.id = phyid;
436 cnfg->phy_layers[*phyid].dev_info.id = *phyid; 520 phyinfo->dev_info.dev = dev;
437 cnfg->phy_layers[*phyid].dev_info.dev = dev; 521 phyinfo->phy_layer = phy_layer;
438 cnfg->phy_layers[*phyid].phy_layer = phy_layer; 522 phyinfo->ifindex = dev->ifindex;
439 cnfg->phy_layers[*phyid].phy_ref_count = 0; 523 phyinfo->use_stx = stx;
440 cnfg->phy_layers[*phyid].ifindex = dev->ifindex; 524 phyinfo->use_fcs = fcs;
441 cnfg->phy_layers[*phyid].use_stx = stx;
442 cnfg->phy_layers[*phyid].use_fcs = fcs;
443 525
444 phy_layer->type = phy_type; 526 phy_layer->type = phy_type;
445 frml = cffrml_create(*phyid, fcs); 527 frml = cffrml_create(phyid, fcs);
528
446 if (!frml) { 529 if (!frml) {
447 pr_warn("Out of memory\n"); 530 pr_warn("Out of memory\n");
448 return; 531 kfree(phyinfo);
532 goto out;
449 } 533 }
450 cnfg->phy_layers[*phyid].frm_layer = frml; 534 phyinfo->frm_layer = frml;
451 cfmuxl_set_dnlayer(cnfg->mux, frml, *phyid);
452 layer_set_up(frml, cnfg->mux); 535 layer_set_up(frml, cnfg->mux);
453 536
454 if (phy_driver != NULL) { 537 if (phy_driver != NULL) {
455 phy_driver->id = *phyid; 538 phy_driver->id = phyid;
456 layer_set_dn(frml, phy_driver); 539 layer_set_dn(frml, phy_driver);
457 layer_set_up(phy_driver, frml); 540 layer_set_up(phy_driver, frml);
458 layer_set_dn(phy_driver, phy_layer); 541 layer_set_dn(phy_driver, phy_layer);
@@ -461,33 +544,95 @@ cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type,
461 layer_set_dn(frml, phy_layer); 544 layer_set_dn(frml, phy_layer);
462 layer_set_up(phy_layer, frml); 545 layer_set_up(phy_layer, frml);
463 } 546 }
547
548 list_add_rcu(&phyinfo->node, &cnfg->phys);
549out:
550 mutex_unlock(&cnfg->lock);
464} 551}
465EXPORT_SYMBOL(cfcnfg_add_phy_layer); 552EXPORT_SYMBOL(cfcnfg_add_phy_layer);
466 553
554int cfcnfg_set_phy_state(struct cfcnfg *cnfg, struct cflayer *phy_layer,
555 bool up)
556{
557 struct cfcnfg_phyinfo *phyinfo;
558
559 rcu_read_lock();
560 phyinfo = cfcnfg_get_phyinfo_rcu(cnfg, phy_layer->id);
561 if (phyinfo == NULL) {
562 rcu_read_unlock();
563 return -ENODEV;
564 }
565
566 if (phyinfo->up == up) {
567 rcu_read_unlock();
568 return 0;
569 }
570 phyinfo->up = up;
571
572 if (up) {
573 cffrml_hold(phyinfo->frm_layer);
574 cfmuxl_set_dnlayer(cnfg->mux, phyinfo->frm_layer,
575 phy_layer->id);
576 } else {
577 cfmuxl_remove_dnlayer(cnfg->mux, phy_layer->id);
578 cffrml_put(phyinfo->frm_layer);
579 }
580
581 rcu_read_unlock();
582 return 0;
583}
584EXPORT_SYMBOL(cfcnfg_set_phy_state);
585
467int cfcnfg_del_phy_layer(struct cfcnfg *cnfg, struct cflayer *phy_layer) 586int cfcnfg_del_phy_layer(struct cfcnfg *cnfg, struct cflayer *phy_layer)
468{ 587{
469 struct cflayer *frml, *frml_dn; 588 struct cflayer *frml, *frml_dn;
470 u16 phyid; 589 u16 phyid;
590 struct cfcnfg_phyinfo *phyinfo;
591
592 might_sleep();
593
594 mutex_lock(&cnfg->lock);
595
471 phyid = phy_layer->id; 596 phyid = phy_layer->id;
472 caif_assert(phyid == cnfg->phy_layers[phyid].id); 597 phyinfo = cfcnfg_get_phyinfo_rcu(cnfg, phyid);
473 caif_assert(phy_layer == cnfg->phy_layers[phyid].phy_layer); 598
599 if (phyinfo == NULL) {
600 mutex_unlock(&cnfg->lock);
601 return 0;
602 }
603 caif_assert(phyid == phyinfo->id);
604 caif_assert(phy_layer == phyinfo->phy_layer);
474 caif_assert(phy_layer->id == phyid); 605 caif_assert(phy_layer->id == phyid);
475 caif_assert(cnfg->phy_layers[phyid].frm_layer->id == phyid); 606 caif_assert(phyinfo->frm_layer->id == phyid);
476 607
477 memset(&cnfg->phy_layers[phy_layer->id], 0, 608 list_del_rcu(&phyinfo->node);
478 sizeof(struct cfcnfg_phyinfo)); 609 synchronize_rcu();
479 frml = cfmuxl_remove_dnlayer(cnfg->mux, phy_layer->id); 610
611 /* Fail if reference count is not zero */
612 if (cffrml_refcnt_read(phyinfo->frm_layer) != 0) {
613 pr_info("Wait for device inuse\n");
614 list_add_rcu(&phyinfo->node, &cnfg->phys);
615 mutex_unlock(&cnfg->lock);
616 return -EAGAIN;
617 }
618
619 frml = phyinfo->frm_layer;
480 frml_dn = frml->dn; 620 frml_dn = frml->dn;
481 cffrml_set_uplayer(frml, NULL); 621 cffrml_set_uplayer(frml, NULL);
482 cffrml_set_dnlayer(frml, NULL); 622 cffrml_set_dnlayer(frml, NULL);
483 kfree(frml);
484
485 if (phy_layer != frml_dn) { 623 if (phy_layer != frml_dn) {
486 layer_set_up(frml_dn, NULL); 624 layer_set_up(frml_dn, NULL);
487 layer_set_dn(frml_dn, NULL); 625 layer_set_dn(frml_dn, NULL);
488 kfree(frml_dn);
489 } 626 }
490 layer_set_up(phy_layer, NULL); 627 layer_set_up(phy_layer, NULL);
628
629 if (phyinfo->phy_layer != frml_dn)
630 kfree(frml_dn);
631
632 cffrml_free(frml);
633 kfree(phyinfo);
634 mutex_unlock(&cnfg->lock);
635
491 return 0; 636 return 0;
492} 637}
493EXPORT_SYMBOL(cfcnfg_del_phy_layer); 638EXPORT_SYMBOL(cfcnfg_del_phy_layer);
diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
index 3cd8f978e309..0c00a6015dda 100644
--- a/net/caif/cfctrl.c
+++ b/net/caif/cfctrl.c
@@ -17,7 +17,6 @@
17#define UTILITY_NAME_LENGTH 16 17#define UTILITY_NAME_LENGTH 16
18#define CFPKT_CTRL_PKT_LEN 20 18#define CFPKT_CTRL_PKT_LEN 20
19 19
20
21#ifdef CAIF_NO_LOOP 20#ifdef CAIF_NO_LOOP
22static int handle_loop(struct cfctrl *ctrl, 21static int handle_loop(struct cfctrl *ctrl,
23 int cmd, struct cfpkt *pkt){ 22 int cmd, struct cfpkt *pkt){
@@ -51,14 +50,31 @@ struct cflayer *cfctrl_create(void)
51 this->serv.layer.receive = cfctrl_recv; 50 this->serv.layer.receive = cfctrl_recv;
52 sprintf(this->serv.layer.name, "ctrl"); 51 sprintf(this->serv.layer.name, "ctrl");
53 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd; 52 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
53#ifndef CAIF_NO_LOOP
54 spin_lock_init(&this->loop_linkid_lock); 54 spin_lock_init(&this->loop_linkid_lock);
55 this->loop_linkid = 1;
56#endif
55 spin_lock_init(&this->info_list_lock); 57 spin_lock_init(&this->info_list_lock);
56 INIT_LIST_HEAD(&this->list); 58 INIT_LIST_HEAD(&this->list);
57 this->loop_linkid = 1;
58 return &this->serv.layer; 59 return &this->serv.layer;
59} 60}
60 61
61static bool param_eq(struct cfctrl_link_param *p1, struct cfctrl_link_param *p2) 62void cfctrl_remove(struct cflayer *layer)
63{
64 struct cfctrl_request_info *p, *tmp;
65 struct cfctrl *ctrl = container_obj(layer);
66
67 spin_lock_bh(&ctrl->info_list_lock);
68 list_for_each_entry_safe(p, tmp, &ctrl->list, list) {
69 list_del(&p->list);
70 kfree(p);
71 }
72 spin_unlock_bh(&ctrl->info_list_lock);
73 kfree(layer);
74}
75
76static bool param_eq(const struct cfctrl_link_param *p1,
77 const struct cfctrl_link_param *p2)
62{ 78{
63 bool eq = 79 bool eq =
64 p1->linktype == p2->linktype && 80 p1->linktype == p2->linktype &&
@@ -100,8 +116,8 @@ static bool param_eq(struct cfctrl_link_param *p1, struct cfctrl_link_param *p2)
100 return false; 116 return false;
101} 117}
102 118
103bool cfctrl_req_eq(struct cfctrl_request_info *r1, 119static bool cfctrl_req_eq(const struct cfctrl_request_info *r1,
104 struct cfctrl_request_info *r2) 120 const struct cfctrl_request_info *r2)
105{ 121{
106 if (r1->cmd != r2->cmd) 122 if (r1->cmd != r2->cmd)
107 return false; 123 return false;
@@ -112,23 +128,22 @@ bool cfctrl_req_eq(struct cfctrl_request_info *r1,
112} 128}
113 129
114/* Insert request at the end */ 130/* Insert request at the end */
115void cfctrl_insert_req(struct cfctrl *ctrl, 131static void cfctrl_insert_req(struct cfctrl *ctrl,
116 struct cfctrl_request_info *req) 132 struct cfctrl_request_info *req)
117{ 133{
118 spin_lock(&ctrl->info_list_lock); 134 spin_lock_bh(&ctrl->info_list_lock);
119 atomic_inc(&ctrl->req_seq_no); 135 atomic_inc(&ctrl->req_seq_no);
120 req->sequence_no = atomic_read(&ctrl->req_seq_no); 136 req->sequence_no = atomic_read(&ctrl->req_seq_no);
121 list_add_tail(&req->list, &ctrl->list); 137 list_add_tail(&req->list, &ctrl->list);
122 spin_unlock(&ctrl->info_list_lock); 138 spin_unlock_bh(&ctrl->info_list_lock);
123} 139}
124 140
125/* Compare and remove request */ 141/* Compare and remove request */
126struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl, 142static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
127 struct cfctrl_request_info *req) 143 struct cfctrl_request_info *req)
128{ 144{
129 struct cfctrl_request_info *p, *tmp, *first; 145 struct cfctrl_request_info *p, *tmp, *first;
130 146
131 spin_lock(&ctrl->info_list_lock);
132 first = list_first_entry(&ctrl->list, struct cfctrl_request_info, list); 147 first = list_first_entry(&ctrl->list, struct cfctrl_request_info, list);
133 148
134 list_for_each_entry_safe(p, tmp, &ctrl->list, list) { 149 list_for_each_entry_safe(p, tmp, &ctrl->list, list) {
@@ -144,7 +159,6 @@ struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
144 } 159 }
145 p = NULL; 160 p = NULL;
146out: 161out:
147 spin_unlock(&ctrl->info_list_lock);
148 return p; 162 return p;
149} 163}
150 164
@@ -154,16 +168,6 @@ struct cfctrl_rsp *cfctrl_get_respfuncs(struct cflayer *layer)
154 return &this->res; 168 return &this->res;
155} 169}
156 170
157void cfctrl_set_dnlayer(struct cflayer *this, struct cflayer *dn)
158{
159 this->dn = dn;
160}
161
162void cfctrl_set_uplayer(struct cflayer *this, struct cflayer *up)
163{
164 this->up = up;
165}
166
167static void init_info(struct caif_payload_info *info, struct cfctrl *cfctrl) 171static void init_info(struct caif_payload_info *info, struct cfctrl *cfctrl)
168{ 172{
169 info->hdr_len = 0; 173 info->hdr_len = 0;
@@ -188,10 +192,6 @@ void cfctrl_enum_req(struct cflayer *layer, u8 physlinkid)
188 cfpkt_addbdy(pkt, physlinkid); 192 cfpkt_addbdy(pkt, physlinkid);
189 ret = 193 ret =
190 cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt); 194 cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt);
191 if (ret < 0) {
192 pr_err("Could not transmit enum message\n");
193 cfpkt_destroy(pkt);
194 }
195} 195}
196 196
197int cfctrl_linkup_request(struct cflayer *layer, 197int cfctrl_linkup_request(struct cflayer *layer,
@@ -205,14 +205,23 @@ int cfctrl_linkup_request(struct cflayer *layer,
205 struct cfctrl_request_info *req; 205 struct cfctrl_request_info *req;
206 int ret; 206 int ret;
207 char utility_name[16]; 207 char utility_name[16];
208 struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN); 208 struct cfpkt *pkt;
209
210 if (cfctrl_cancel_req(layer, user_layer) > 0) {
211 /* Slight Paranoia, check if already connecting */
212 pr_err("Duplicate connect request for same client\n");
213 WARN_ON(1);
214 return -EALREADY;
215 }
216
217 pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
209 if (!pkt) { 218 if (!pkt) {
210 pr_warn("Out of memory\n"); 219 pr_warn("Out of memory\n");
211 return -ENOMEM; 220 return -ENOMEM;
212 } 221 }
213 cfpkt_addbdy(pkt, CFCTRL_CMD_LINK_SETUP); 222 cfpkt_addbdy(pkt, CFCTRL_CMD_LINK_SETUP);
214 cfpkt_addbdy(pkt, (param->chtype << 4) + param->linktype); 223 cfpkt_addbdy(pkt, (param->chtype << 4) | param->linktype);
215 cfpkt_addbdy(pkt, (param->priority << 3) + param->phyid); 224 cfpkt_addbdy(pkt, (param->priority << 3) | param->phyid);
216 cfpkt_addbdy(pkt, param->endpoint & 0x03); 225 cfpkt_addbdy(pkt, param->endpoint & 0x03);
217 226
218 switch (param->linktype) { 227 switch (param->linktype) {
@@ -275,9 +284,13 @@ int cfctrl_linkup_request(struct cflayer *layer,
275 ret = 284 ret =
276 cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt); 285 cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt);
277 if (ret < 0) { 286 if (ret < 0) {
278 pr_err("Could not transmit linksetup request\n"); 287 int count;
279 cfpkt_destroy(pkt); 288
280 return -ENODEV; 289 count = cfctrl_cancel_req(&cfctrl->serv.layer,
290 user_layer);
291 if (count != 1)
292 pr_err("Could not remove request (%d)", count);
293 return -ENODEV;
281 } 294 }
282 return 0; 295 return 0;
283} 296}
@@ -297,80 +310,29 @@ int cfctrl_linkdown_req(struct cflayer *layer, u8 channelid,
297 init_info(cfpkt_info(pkt), cfctrl); 310 init_info(cfpkt_info(pkt), cfctrl);
298 ret = 311 ret =
299 cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt); 312 cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt);
300 if (ret < 0) { 313#ifndef CAIF_NO_LOOP
301 pr_err("Could not transmit link-down request\n"); 314 cfctrl->loop_linkused[channelid] = 0;
302 cfpkt_destroy(pkt); 315#endif
303 }
304 return ret; 316 return ret;
305} 317}
306 318
307void cfctrl_sleep_req(struct cflayer *layer) 319int cfctrl_cancel_req(struct cflayer *layr, struct cflayer *adap_layer)
308{
309 int ret;
310 struct cfctrl *cfctrl = container_obj(layer);
311 struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
312 if (!pkt) {
313 pr_warn("Out of memory\n");
314 return;
315 }
316 cfpkt_addbdy(pkt, CFCTRL_CMD_SLEEP);
317 init_info(cfpkt_info(pkt), cfctrl);
318 ret =
319 cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt);
320 if (ret < 0)
321 cfpkt_destroy(pkt);
322}
323
324void cfctrl_wake_req(struct cflayer *layer)
325{
326 int ret;
327 struct cfctrl *cfctrl = container_obj(layer);
328 struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
329 if (!pkt) {
330 pr_warn("Out of memory\n");
331 return;
332 }
333 cfpkt_addbdy(pkt, CFCTRL_CMD_WAKE);
334 init_info(cfpkt_info(pkt), cfctrl);
335 ret =
336 cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt);
337 if (ret < 0)
338 cfpkt_destroy(pkt);
339}
340
341void cfctrl_getstartreason_req(struct cflayer *layer)
342{
343 int ret;
344 struct cfctrl *cfctrl = container_obj(layer);
345 struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
346 if (!pkt) {
347 pr_warn("Out of memory\n");
348 return;
349 }
350 cfpkt_addbdy(pkt, CFCTRL_CMD_START_REASON);
351 init_info(cfpkt_info(pkt), cfctrl);
352 ret =
353 cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt);
354 if (ret < 0)
355 cfpkt_destroy(pkt);
356}
357
358
359void cfctrl_cancel_req(struct cflayer *layr, struct cflayer *adap_layer)
360{ 320{
361 struct cfctrl_request_info *p, *tmp; 321 struct cfctrl_request_info *p, *tmp;
362 struct cfctrl *ctrl = container_obj(layr); 322 struct cfctrl *ctrl = container_obj(layr);
363 spin_lock(&ctrl->info_list_lock); 323 int found = 0;
324 spin_lock_bh(&ctrl->info_list_lock);
364 325
365 list_for_each_entry_safe(p, tmp, &ctrl->list, list) { 326 list_for_each_entry_safe(p, tmp, &ctrl->list, list) {
366 if (p->client_layer == adap_layer) { 327 if (p->client_layer == adap_layer) {
367 pr_debug("cancel req :%d\n", p->sequence_no);
368 list_del(&p->list); 328 list_del(&p->list);
369 kfree(p); 329 kfree(p);
330 found++;
370 } 331 }
371 } 332 }
372 333
373 spin_unlock(&ctrl->info_list_lock); 334 spin_unlock_bh(&ctrl->info_list_lock);
335 return found;
374} 336}
375 337
376static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt) 338static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
@@ -522,6 +484,7 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
522 484
523 rsp.cmd = cmd; 485 rsp.cmd = cmd;
524 rsp.param = linkparam; 486 rsp.param = linkparam;
487 spin_lock_bh(&cfctrl->info_list_lock);
525 req = cfctrl_remove_req(cfctrl, &rsp); 488 req = cfctrl_remove_req(cfctrl, &rsp);
526 489
527 if (CFCTRL_ERR_BIT == (CFCTRL_ERR_BIT & cmdrsp) || 490 if (CFCTRL_ERR_BIT == (CFCTRL_ERR_BIT & cmdrsp) ||
@@ -541,6 +504,8 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
541 504
542 if (req != NULL) 505 if (req != NULL)
543 kfree(req); 506 kfree(req);
507
508 spin_unlock_bh(&cfctrl->info_list_lock);
544 } 509 }
545 break; 510 break;
546 case CFCTRL_CMD_LINK_DESTROY: 511 case CFCTRL_CMD_LINK_DESTROY:
@@ -584,12 +549,29 @@ static void cfctrl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
584 switch (ctrl) { 549 switch (ctrl) {
585 case _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND: 550 case _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND:
586 case CAIF_CTRLCMD_FLOW_OFF_IND: 551 case CAIF_CTRLCMD_FLOW_OFF_IND:
587 spin_lock(&this->info_list_lock); 552 spin_lock_bh(&this->info_list_lock);
588 if (!list_empty(&this->list)) { 553 if (!list_empty(&this->list)) {
589 pr_debug("Received flow off in control layer\n"); 554 pr_debug("Received flow off in control layer\n");
590 } 555 }
591 spin_unlock(&this->info_list_lock); 556 spin_unlock_bh(&this->info_list_lock);
592 break; 557 break;
558 case _CAIF_CTRLCMD_PHYIF_DOWN_IND: {
559 struct cfctrl_request_info *p, *tmp;
560
561 /* Find all connect request and report failure */
562 spin_lock_bh(&this->info_list_lock);
563 list_for_each_entry_safe(p, tmp, &this->list, list) {
564 if (p->param.phyid == phyid) {
565 list_del(&p->list);
566 p->client_layer->ctrlcmd(p->client_layer,
567 CAIF_CTRLCMD_INIT_FAIL_RSP,
568 phyid);
569 kfree(p);
570 }
571 }
572 spin_unlock_bh(&this->info_list_lock);
573 break;
574 }
593 default: 575 default:
594 break; 576 break;
595 } 577 }
@@ -599,27 +581,33 @@ static void cfctrl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
599static int handle_loop(struct cfctrl *ctrl, int cmd, struct cfpkt *pkt) 581static int handle_loop(struct cfctrl *ctrl, int cmd, struct cfpkt *pkt)
600{ 582{
601 static int last_linkid; 583 static int last_linkid;
584 static int dec;
602 u8 linkid, linktype, tmp; 585 u8 linkid, linktype, tmp;
603 switch (cmd) { 586 switch (cmd) {
604 case CFCTRL_CMD_LINK_SETUP: 587 case CFCTRL_CMD_LINK_SETUP:
605 spin_lock(&ctrl->loop_linkid_lock); 588 spin_lock_bh(&ctrl->loop_linkid_lock);
606 for (linkid = last_linkid + 1; linkid < 255; linkid++) 589 if (!dec) {
607 if (!ctrl->loop_linkused[linkid]) 590 for (linkid = last_linkid + 1; linkid < 255; linkid++)
608 goto found; 591 if (!ctrl->loop_linkused[linkid])
592 goto found;
593 }
594 dec = 1;
609 for (linkid = last_linkid - 1; linkid > 0; linkid--) 595 for (linkid = last_linkid - 1; linkid > 0; linkid--)
610 if (!ctrl->loop_linkused[linkid]) 596 if (!ctrl->loop_linkused[linkid])
611 goto found; 597 goto found;
612 spin_unlock(&ctrl->loop_linkid_lock); 598 spin_unlock_bh(&ctrl->loop_linkid_lock);
613 pr_err("Out of link-ids\n"); 599
614 return -EINVAL;
615found: 600found:
601 if (linkid < 10)
602 dec = 0;
603
616 if (!ctrl->loop_linkused[linkid]) 604 if (!ctrl->loop_linkused[linkid])
617 ctrl->loop_linkused[linkid] = 1; 605 ctrl->loop_linkused[linkid] = 1;
618 606
619 last_linkid = linkid; 607 last_linkid = linkid;
620 608
621 cfpkt_add_trail(pkt, &linkid, 1); 609 cfpkt_add_trail(pkt, &linkid, 1);
622 spin_unlock(&ctrl->loop_linkid_lock); 610 spin_unlock_bh(&ctrl->loop_linkid_lock);
623 cfpkt_peek_head(pkt, &linktype, 1); 611 cfpkt_peek_head(pkt, &linktype, 1);
624 if (linktype == CFCTRL_SRV_UTIL) { 612 if (linktype == CFCTRL_SRV_UTIL) {
625 tmp = 0x01; 613 tmp = 0x01;
@@ -629,10 +617,10 @@ found:
629 break; 617 break;
630 618
631 case CFCTRL_CMD_LINK_DESTROY: 619 case CFCTRL_CMD_LINK_DESTROY:
632 spin_lock(&ctrl->loop_linkid_lock); 620 spin_lock_bh(&ctrl->loop_linkid_lock);
633 cfpkt_peek_head(pkt, &linkid, 1); 621 cfpkt_peek_head(pkt, &linkid, 1);
634 ctrl->loop_linkused[linkid] = 0; 622 ctrl->loop_linkused[linkid] = 0;
635 spin_unlock(&ctrl->loop_linkid_lock); 623 spin_unlock_bh(&ctrl->loop_linkid_lock);
636 break; 624 break;
637 default: 625 default:
638 break; 626 break;
diff --git a/net/caif/cfdgml.c b/net/caif/cfdgml.c
index 054fdb5aeb88..0382dec84fdc 100644
--- a/net/caif/cfdgml.c
+++ b/net/caif/cfdgml.c
@@ -108,10 +108,5 @@ static int cfdgml_transmit(struct cflayer *layr, struct cfpkt *pkt)
108 */ 108 */
109 info->hdr_len = 4; 109 info->hdr_len = 4;
110 info->dev_info = &service->dev_info; 110 info->dev_info = &service->dev_info;
111 ret = layr->dn->transmit(layr->dn, pkt); 111 return layr->dn->transmit(layr->dn, pkt);
112 if (ret < 0) {
113 u32 tmp32;
114 cfpkt_extr_head(pkt, &tmp32, 4);
115 }
116 return ret;
117} 112}
diff --git a/net/caif/cffrml.c b/net/caif/cffrml.c
index a445043931ae..04204b202718 100644
--- a/net/caif/cffrml.c
+++ b/net/caif/cffrml.c
@@ -12,6 +12,7 @@
12#include <linux/spinlock.h> 12#include <linux/spinlock.h>
13#include <linux/slab.h> 13#include <linux/slab.h>
14#include <linux/crc-ccitt.h> 14#include <linux/crc-ccitt.h>
15#include <linux/netdevice.h>
15#include <net/caif/caif_layer.h> 16#include <net/caif/caif_layer.h>
16#include <net/caif/cfpkt.h> 17#include <net/caif/cfpkt.h>
17#include <net/caif/cffrml.h> 18#include <net/caif/cffrml.h>
@@ -21,6 +22,7 @@
21struct cffrml { 22struct cffrml {
22 struct cflayer layer; 23 struct cflayer layer;
23 bool dofcs; /* !< FCS active */ 24 bool dofcs; /* !< FCS active */
25 int __percpu *pcpu_refcnt;
24}; 26};
25 27
26static int cffrml_receive(struct cflayer *layr, struct cfpkt *pkt); 28static int cffrml_receive(struct cflayer *layr, struct cfpkt *pkt);
@@ -37,6 +39,12 @@ struct cflayer *cffrml_create(u16 phyid, bool use_fcs)
37 pr_warn("Out of memory\n"); 39 pr_warn("Out of memory\n");
38 return NULL; 40 return NULL;
39 } 41 }
42 this->pcpu_refcnt = alloc_percpu(int);
43 if (this->pcpu_refcnt == NULL) {
44 kfree(this);
45 return NULL;
46 }
47
40 caif_assert(offsetof(struct cffrml, layer) == 0); 48 caif_assert(offsetof(struct cffrml, layer) == 0);
41 49
42 memset(this, 0, sizeof(struct cflayer)); 50 memset(this, 0, sizeof(struct cflayer));
@@ -49,6 +57,13 @@ struct cflayer *cffrml_create(u16 phyid, bool use_fcs)
49 return (struct cflayer *) this; 57 return (struct cflayer *) this;
50} 58}
51 59
60void cffrml_free(struct cflayer *layer)
61{
62 struct cffrml *this = container_obj(layer);
63 free_percpu(this->pcpu_refcnt);
64 kfree(layer);
65}
66
52void cffrml_set_uplayer(struct cflayer *this, struct cflayer *up) 67void cffrml_set_uplayer(struct cflayer *this, struct cflayer *up)
53{ 68{
54 this->up = up; 69 this->up = up;
@@ -112,6 +127,13 @@ static int cffrml_receive(struct cflayer *layr, struct cfpkt *pkt)
112 cfpkt_destroy(pkt); 127 cfpkt_destroy(pkt);
113 return -EPROTO; 128 return -EPROTO;
114 } 129 }
130
131 if (layr->up == NULL) {
132 pr_err("Layr up is missing!\n");
133 cfpkt_destroy(pkt);
134 return -EINVAL;
135 }
136
115 return layr->up->receive(layr->up, pkt); 137 return layr->up->receive(layr->up, pkt);
116} 138}
117 139
@@ -120,7 +142,6 @@ static int cffrml_transmit(struct cflayer *layr, struct cfpkt *pkt)
120 int tmp; 142 int tmp;
121 u16 chks; 143 u16 chks;
122 u16 len; 144 u16 len;
123 int ret;
124 struct cffrml *this = container_obj(layr); 145 struct cffrml *this = container_obj(layr);
125 if (this->dofcs) { 146 if (this->dofcs) {
126 chks = cfpkt_iterate(pkt, cffrml_checksum, 0xffff); 147 chks = cfpkt_iterate(pkt, cffrml_checksum, 0xffff);
@@ -135,19 +156,44 @@ static int cffrml_transmit(struct cflayer *layr, struct cfpkt *pkt)
135 cfpkt_info(pkt)->hdr_len += 2; 156 cfpkt_info(pkt)->hdr_len += 2;
136 if (cfpkt_erroneous(pkt)) { 157 if (cfpkt_erroneous(pkt)) {
137 pr_err("Packet is erroneous!\n"); 158 pr_err("Packet is erroneous!\n");
159 cfpkt_destroy(pkt);
138 return -EPROTO; 160 return -EPROTO;
139 } 161 }
140 ret = layr->dn->transmit(layr->dn, pkt); 162
141 if (ret < 0) { 163 if (layr->dn == NULL) {
142 /* Remove header on faulty packet. */ 164 cfpkt_destroy(pkt);
143 cfpkt_extr_head(pkt, &tmp, 2); 165 return -ENODEV;
166
144 } 167 }
145 return ret; 168 return layr->dn->transmit(layr->dn, pkt);
146} 169}
147 170
148static void cffrml_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, 171static void cffrml_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
149 int phyid) 172 int phyid)
150{ 173{
151 if (layr->up->ctrlcmd) 174 if (layr->up && layr->up->ctrlcmd)
152 layr->up->ctrlcmd(layr->up, ctrl, layr->id); 175 layr->up->ctrlcmd(layr->up, ctrl, layr->id);
153} 176}
177
178void cffrml_put(struct cflayer *layr)
179{
180 struct cffrml *this = container_obj(layr);
181 if (layr != NULL && this->pcpu_refcnt != NULL)
182 irqsafe_cpu_dec(*this->pcpu_refcnt);
183}
184
185void cffrml_hold(struct cflayer *layr)
186{
187 struct cffrml *this = container_obj(layr);
188 if (layr != NULL && this->pcpu_refcnt != NULL)
189 irqsafe_cpu_inc(*this->pcpu_refcnt);
190}
191
192int cffrml_refcnt_read(struct cflayer *layr)
193{
194 int i, refcnt = 0;
195 struct cffrml *this = container_obj(layr);
196 for_each_possible_cpu(i)
197 refcnt += *per_cpu_ptr(this->pcpu_refcnt, i);
198 return refcnt;
199}
diff --git a/net/caif/cfmuxl.c b/net/caif/cfmuxl.c
index 24f1ffa74b06..2a56df7e0a4b 100644
--- a/net/caif/cfmuxl.c
+++ b/net/caif/cfmuxl.c
@@ -9,6 +9,7 @@
9#include <linux/stddef.h> 9#include <linux/stddef.h>
10#include <linux/spinlock.h> 10#include <linux/spinlock.h>
11#include <linux/slab.h> 11#include <linux/slab.h>
12#include <linux/rculist.h>
12#include <net/caif/cfpkt.h> 13#include <net/caif/cfpkt.h>
13#include <net/caif/cfmuxl.h> 14#include <net/caif/cfmuxl.h>
14#include <net/caif/cfsrvl.h> 15#include <net/caif/cfsrvl.h>
@@ -64,66 +65,31 @@ struct cflayer *cfmuxl_create(void)
64int cfmuxl_set_uplayer(struct cflayer *layr, struct cflayer *up, u8 linkid) 65int cfmuxl_set_uplayer(struct cflayer *layr, struct cflayer *up, u8 linkid)
65{ 66{
66 struct cfmuxl *muxl = container_obj(layr); 67 struct cfmuxl *muxl = container_obj(layr);
67 spin_lock(&muxl->receive_lock);
68 cfsrvl_get(up);
69 list_add(&up->node, &muxl->srvl_list);
70 spin_unlock(&muxl->receive_lock);
71 return 0;
72}
73
74bool cfmuxl_is_phy_inuse(struct cflayer *layr, u8 phyid)
75{
76 struct list_head *node;
77 struct cflayer *layer;
78 struct cfmuxl *muxl = container_obj(layr);
79 bool match = false;
80 spin_lock(&muxl->receive_lock);
81
82 list_for_each(node, &muxl->srvl_list) {
83 layer = list_entry(node, struct cflayer, node);
84 if (cfsrvl_phyid_match(layer, phyid)) {
85 match = true;
86 break;
87 }
88
89 }
90 spin_unlock(&muxl->receive_lock);
91 return match;
92}
93 68
94u8 cfmuxl_get_phyid(struct cflayer *layr, u8 channel_id) 69 spin_lock_bh(&muxl->receive_lock);
95{ 70 list_add_rcu(&up->node, &muxl->srvl_list);
96 struct cflayer *up; 71 spin_unlock_bh(&muxl->receive_lock);
97 int phyid; 72 return 0;
98 struct cfmuxl *muxl = container_obj(layr);
99 spin_lock(&muxl->receive_lock);
100 up = get_up(muxl, channel_id);
101 if (up != NULL)
102 phyid = cfsrvl_getphyid(up);
103 else
104 phyid = 0;
105 spin_unlock(&muxl->receive_lock);
106 return phyid;
107} 73}
108 74
109int cfmuxl_set_dnlayer(struct cflayer *layr, struct cflayer *dn, u8 phyid) 75int cfmuxl_set_dnlayer(struct cflayer *layr, struct cflayer *dn, u8 phyid)
110{ 76{
111 struct cfmuxl *muxl = (struct cfmuxl *) layr; 77 struct cfmuxl *muxl = (struct cfmuxl *) layr;
112 spin_lock(&muxl->transmit_lock); 78
113 list_add(&dn->node, &muxl->frml_list); 79 spin_lock_bh(&muxl->transmit_lock);
114 spin_unlock(&muxl->transmit_lock); 80 list_add_rcu(&dn->node, &muxl->frml_list);
81 spin_unlock_bh(&muxl->transmit_lock);
115 return 0; 82 return 0;
116} 83}
117 84
118static struct cflayer *get_from_id(struct list_head *list, u16 id) 85static struct cflayer *get_from_id(struct list_head *list, u16 id)
119{ 86{
120 struct list_head *node; 87 struct cflayer *lyr;
121 struct cflayer *layer; 88 list_for_each_entry_rcu(lyr, list, node) {
122 list_for_each(node, list) { 89 if (lyr->id == id)
123 layer = list_entry(node, struct cflayer, node); 90 return lyr;
124 if (layer->id == id)
125 return layer;
126 } 91 }
92
127 return NULL; 93 return NULL;
128} 94}
129 95
@@ -131,41 +97,45 @@ struct cflayer *cfmuxl_remove_dnlayer(struct cflayer *layr, u8 phyid)
131{ 97{
132 struct cfmuxl *muxl = container_obj(layr); 98 struct cfmuxl *muxl = container_obj(layr);
133 struct cflayer *dn; 99 struct cflayer *dn;
134 spin_lock(&muxl->transmit_lock); 100 int idx = phyid % DN_CACHE_SIZE;
135 memset(muxl->dn_cache, 0, sizeof(muxl->dn_cache)); 101
102 spin_lock_bh(&muxl->transmit_lock);
103 rcu_assign_pointer(muxl->dn_cache[idx], NULL);
136 dn = get_from_id(&muxl->frml_list, phyid); 104 dn = get_from_id(&muxl->frml_list, phyid);
137 if (dn == NULL) { 105 if (dn == NULL)
138 spin_unlock(&muxl->transmit_lock); 106 goto out;
139 return NULL; 107
140 } 108 list_del_rcu(&dn->node);
141 list_del(&dn->node);
142 caif_assert(dn != NULL); 109 caif_assert(dn != NULL);
143 spin_unlock(&muxl->transmit_lock); 110out:
111 spin_unlock_bh(&muxl->transmit_lock);
144 return dn; 112 return dn;
145} 113}
146 114
147/* Invariant: lock is taken */
148static struct cflayer *get_up(struct cfmuxl *muxl, u16 id) 115static struct cflayer *get_up(struct cfmuxl *muxl, u16 id)
149{ 116{
150 struct cflayer *up; 117 struct cflayer *up;
151 int idx = id % UP_CACHE_SIZE; 118 int idx = id % UP_CACHE_SIZE;
152 up = muxl->up_cache[idx]; 119 up = rcu_dereference(muxl->up_cache[idx]);
153 if (up == NULL || up->id != id) { 120 if (up == NULL || up->id != id) {
121 spin_lock_bh(&muxl->receive_lock);
154 up = get_from_id(&muxl->srvl_list, id); 122 up = get_from_id(&muxl->srvl_list, id);
155 muxl->up_cache[idx] = up; 123 rcu_assign_pointer(muxl->up_cache[idx], up);
124 spin_unlock_bh(&muxl->receive_lock);
156 } 125 }
157 return up; 126 return up;
158} 127}
159 128
160/* Invariant: lock is taken */
161static struct cflayer *get_dn(struct cfmuxl *muxl, struct dev_info *dev_info) 129static struct cflayer *get_dn(struct cfmuxl *muxl, struct dev_info *dev_info)
162{ 130{
163 struct cflayer *dn; 131 struct cflayer *dn;
164 int idx = dev_info->id % DN_CACHE_SIZE; 132 int idx = dev_info->id % DN_CACHE_SIZE;
165 dn = muxl->dn_cache[idx]; 133 dn = rcu_dereference(muxl->dn_cache[idx]);
166 if (dn == NULL || dn->id != dev_info->id) { 134 if (dn == NULL || dn->id != dev_info->id) {
135 spin_lock_bh(&muxl->transmit_lock);
167 dn = get_from_id(&muxl->frml_list, dev_info->id); 136 dn = get_from_id(&muxl->frml_list, dev_info->id);
168 muxl->dn_cache[idx] = dn; 137 rcu_assign_pointer(muxl->dn_cache[idx], dn);
138 spin_unlock_bh(&muxl->transmit_lock);
169 } 139 }
170 return dn; 140 return dn;
171} 141}
@@ -174,15 +144,17 @@ struct cflayer *cfmuxl_remove_uplayer(struct cflayer *layr, u8 id)
174{ 144{
175 struct cflayer *up; 145 struct cflayer *up;
176 struct cfmuxl *muxl = container_obj(layr); 146 struct cfmuxl *muxl = container_obj(layr);
177 spin_lock(&muxl->receive_lock); 147 int idx = id % UP_CACHE_SIZE;
178 up = get_up(muxl, id); 148
149 spin_lock_bh(&muxl->receive_lock);
150 up = get_from_id(&muxl->srvl_list, id);
179 if (up == NULL) 151 if (up == NULL)
180 goto out; 152 goto out;
181 memset(muxl->up_cache, 0, sizeof(muxl->up_cache)); 153
182 list_del(&up->node); 154 rcu_assign_pointer(muxl->up_cache[idx], NULL);
183 cfsrvl_put(up); 155 list_del_rcu(&up->node);
184out: 156out:
185 spin_unlock(&muxl->receive_lock); 157 spin_unlock_bh(&muxl->receive_lock);
186 return up; 158 return up;
187} 159}
188 160
@@ -197,58 +169,78 @@ static int cfmuxl_receive(struct cflayer *layr, struct cfpkt *pkt)
197 cfpkt_destroy(pkt); 169 cfpkt_destroy(pkt);
198 return -EPROTO; 170 return -EPROTO;
199 } 171 }
200 172 rcu_read_lock();
201 spin_lock(&muxl->receive_lock);
202 up = get_up(muxl, id); 173 up = get_up(muxl, id);
203 spin_unlock(&muxl->receive_lock); 174
204 if (up == NULL) { 175 if (up == NULL) {
205 pr_info("Received data on unknown link ID = %d (0x%x) up == NULL", 176 pr_debug("Received data on unknown link ID = %d (0x%x)"
206 id, id); 177 " up == NULL", id, id);
207 cfpkt_destroy(pkt); 178 cfpkt_destroy(pkt);
208 /* 179 /*
209 * Don't return ERROR, since modem misbehaves and sends out 180 * Don't return ERROR, since modem misbehaves and sends out
210 * flow on before linksetup response. 181 * flow on before linksetup response.
211 */ 182 */
183
184 rcu_read_unlock();
212 return /* CFGLU_EPROT; */ 0; 185 return /* CFGLU_EPROT; */ 0;
213 } 186 }
187
188 /* We can't hold rcu_lock during receive, so take a ref count instead */
214 cfsrvl_get(up); 189 cfsrvl_get(up);
190 rcu_read_unlock();
191
215 ret = up->receive(up, pkt); 192 ret = up->receive(up, pkt);
193
216 cfsrvl_put(up); 194 cfsrvl_put(up);
217 return ret; 195 return ret;
218} 196}
219 197
220static int cfmuxl_transmit(struct cflayer *layr, struct cfpkt *pkt) 198static int cfmuxl_transmit(struct cflayer *layr, struct cfpkt *pkt)
221{ 199{
222 int ret;
223 struct cfmuxl *muxl = container_obj(layr); 200 struct cfmuxl *muxl = container_obj(layr);
201 int err;
224 u8 linkid; 202 u8 linkid;
225 struct cflayer *dn; 203 struct cflayer *dn;
226 struct caif_payload_info *info = cfpkt_info(pkt); 204 struct caif_payload_info *info = cfpkt_info(pkt);
227 dn = get_dn(muxl, cfpkt_info(pkt)->dev_info); 205 BUG_ON(!info);
206
207 rcu_read_lock();
208
209 dn = get_dn(muxl, info->dev_info);
228 if (dn == NULL) { 210 if (dn == NULL) {
229 pr_warn("Send data on unknown phy ID = %d (0x%x)\n", 211 pr_debug("Send data on unknown phy ID = %d (0x%x)\n",
230 info->dev_info->id, info->dev_info->id); 212 info->dev_info->id, info->dev_info->id);
213 rcu_read_unlock();
214 cfpkt_destroy(pkt);
231 return -ENOTCONN; 215 return -ENOTCONN;
232 } 216 }
217
233 info->hdr_len += 1; 218 info->hdr_len += 1;
234 linkid = info->channel_id; 219 linkid = info->channel_id;
235 cfpkt_add_head(pkt, &linkid, 1); 220 cfpkt_add_head(pkt, &linkid, 1);
236 ret = dn->transmit(dn, pkt); 221
237 /* Remove MUX protocol header upon error. */ 222 /* We can't hold rcu_lock during receive, so take a ref count instead */
238 if (ret < 0) 223 cffrml_hold(dn);
239 cfpkt_extr_head(pkt, &linkid, 1); 224
240 return ret; 225 rcu_read_unlock();
226
227 err = dn->transmit(dn, pkt);
228
229 cffrml_put(dn);
230 return err;
241} 231}
242 232
243static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, 233static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
244 int phyid) 234 int phyid)
245{ 235{
246 struct cfmuxl *muxl = container_obj(layr); 236 struct cfmuxl *muxl = container_obj(layr);
247 struct list_head *node, *next;
248 struct cflayer *layer; 237 struct cflayer *layer;
249 list_for_each_safe(node, next, &muxl->srvl_list) { 238
250 layer = list_entry(node, struct cflayer, node); 239 rcu_read_lock();
251 if (cfsrvl_phyid_match(layer, phyid)) 240 list_for_each_entry_rcu(layer, &muxl->srvl_list, node) {
241 if (cfsrvl_phyid_match(layer, phyid) && layer->ctrlcmd)
242 /* NOTE: ctrlcmd is not allowed to block */
252 layer->ctrlcmd(layer, ctrl, phyid); 243 layer->ctrlcmd(layer, ctrl, phyid);
253 } 244 }
245 rcu_read_unlock();
254} 246}
diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c
index d7e865e2ff65..75d4bfae1a78 100644
--- a/net/caif/cfpkt_skbuff.c
+++ b/net/caif/cfpkt_skbuff.c
@@ -42,22 +42,22 @@ struct cfpkt_priv_data {
42 bool erronous; 42 bool erronous;
43}; 43};
44 44
45inline struct cfpkt_priv_data *cfpkt_priv(struct cfpkt *pkt) 45static inline struct cfpkt_priv_data *cfpkt_priv(struct cfpkt *pkt)
46{ 46{
47 return (struct cfpkt_priv_data *) pkt->skb.cb; 47 return (struct cfpkt_priv_data *) pkt->skb.cb;
48} 48}
49 49
50inline bool is_erronous(struct cfpkt *pkt) 50static inline bool is_erronous(struct cfpkt *pkt)
51{ 51{
52 return cfpkt_priv(pkt)->erronous; 52 return cfpkt_priv(pkt)->erronous;
53} 53}
54 54
55inline struct sk_buff *pkt_to_skb(struct cfpkt *pkt) 55static inline struct sk_buff *pkt_to_skb(struct cfpkt *pkt)
56{ 56{
57 return &pkt->skb; 57 return &pkt->skb;
58} 58}
59 59
60inline struct cfpkt *skb_to_pkt(struct sk_buff *skb) 60static inline struct cfpkt *skb_to_pkt(struct sk_buff *skb)
61{ 61{
62 return (struct cfpkt *) skb; 62 return (struct cfpkt *) skb;
63} 63}
@@ -97,21 +97,20 @@ inline struct cfpkt *cfpkt_create(u16 len)
97{ 97{
98 return cfpkt_create_pfx(len + PKT_POSTFIX, PKT_PREFIX); 98 return cfpkt_create_pfx(len + PKT_POSTFIX, PKT_PREFIX);
99} 99}
100EXPORT_SYMBOL(cfpkt_create);
101 100
102void cfpkt_destroy(struct cfpkt *pkt) 101void cfpkt_destroy(struct cfpkt *pkt)
103{ 102{
104 struct sk_buff *skb = pkt_to_skb(pkt); 103 struct sk_buff *skb = pkt_to_skb(pkt);
105 kfree_skb(skb); 104 kfree_skb(skb);
106} 105}
107EXPORT_SYMBOL(cfpkt_destroy); 106
108 107
109inline bool cfpkt_more(struct cfpkt *pkt) 108inline bool cfpkt_more(struct cfpkt *pkt)
110{ 109{
111 struct sk_buff *skb = pkt_to_skb(pkt); 110 struct sk_buff *skb = pkt_to_skb(pkt);
112 return skb->len > 0; 111 return skb->len > 0;
113} 112}
114EXPORT_SYMBOL(cfpkt_more); 113
115 114
116int cfpkt_peek_head(struct cfpkt *pkt, void *data, u16 len) 115int cfpkt_peek_head(struct cfpkt *pkt, void *data, u16 len)
117{ 116{
@@ -123,7 +122,6 @@ int cfpkt_peek_head(struct cfpkt *pkt, void *data, u16 len)
123 return !cfpkt_extr_head(pkt, data, len) && 122 return !cfpkt_extr_head(pkt, data, len) &&
124 !cfpkt_add_head(pkt, data, len); 123 !cfpkt_add_head(pkt, data, len);
125} 124}
126EXPORT_SYMBOL(cfpkt_peek_head);
127 125
128int cfpkt_extr_head(struct cfpkt *pkt, void *data, u16 len) 126int cfpkt_extr_head(struct cfpkt *pkt, void *data, u16 len)
129{ 127{
@@ -148,7 +146,6 @@ int cfpkt_extr_head(struct cfpkt *pkt, void *data, u16 len)
148 memcpy(data, from, len); 146 memcpy(data, from, len);
149 return 0; 147 return 0;
150} 148}
151EXPORT_SYMBOL(cfpkt_extr_head);
152 149
153int cfpkt_extr_trail(struct cfpkt *pkt, void *dta, u16 len) 150int cfpkt_extr_trail(struct cfpkt *pkt, void *dta, u16 len)
154{ 151{
@@ -171,13 +168,13 @@ int cfpkt_extr_trail(struct cfpkt *pkt, void *dta, u16 len)
171 memcpy(data, from, len); 168 memcpy(data, from, len);
172 return 0; 169 return 0;
173} 170}
174EXPORT_SYMBOL(cfpkt_extr_trail); 171
175 172
176int cfpkt_pad_trail(struct cfpkt *pkt, u16 len) 173int cfpkt_pad_trail(struct cfpkt *pkt, u16 len)
177{ 174{
178 return cfpkt_add_body(pkt, NULL, len); 175 return cfpkt_add_body(pkt, NULL, len);
179} 176}
180EXPORT_SYMBOL(cfpkt_pad_trail); 177
181 178
182int cfpkt_add_body(struct cfpkt *pkt, const void *data, u16 len) 179int cfpkt_add_body(struct cfpkt *pkt, const void *data, u16 len)
183{ 180{
@@ -226,13 +223,11 @@ int cfpkt_add_body(struct cfpkt *pkt, const void *data, u16 len)
226 memcpy(to, data, len); 223 memcpy(to, data, len);
227 return 0; 224 return 0;
228} 225}
229EXPORT_SYMBOL(cfpkt_add_body);
230 226
231inline int cfpkt_addbdy(struct cfpkt *pkt, u8 data) 227inline int cfpkt_addbdy(struct cfpkt *pkt, u8 data)
232{ 228{
233 return cfpkt_add_body(pkt, &data, 1); 229 return cfpkt_add_body(pkt, &data, 1);
234} 230}
235EXPORT_SYMBOL(cfpkt_addbdy);
236 231
237int cfpkt_add_head(struct cfpkt *pkt, const void *data2, u16 len) 232int cfpkt_add_head(struct cfpkt *pkt, const void *data2, u16 len)
238{ 233{
@@ -259,20 +254,20 @@ int cfpkt_add_head(struct cfpkt *pkt, const void *data2, u16 len)
259 memcpy(to, data, len); 254 memcpy(to, data, len);
260 return 0; 255 return 0;
261} 256}
262EXPORT_SYMBOL(cfpkt_add_head); 257
263 258
264inline int cfpkt_add_trail(struct cfpkt *pkt, const void *data, u16 len) 259inline int cfpkt_add_trail(struct cfpkt *pkt, const void *data, u16 len)
265{ 260{
266 return cfpkt_add_body(pkt, data, len); 261 return cfpkt_add_body(pkt, data, len);
267} 262}
268EXPORT_SYMBOL(cfpkt_add_trail); 263
269 264
270inline u16 cfpkt_getlen(struct cfpkt *pkt) 265inline u16 cfpkt_getlen(struct cfpkt *pkt)
271{ 266{
272 struct sk_buff *skb = pkt_to_skb(pkt); 267 struct sk_buff *skb = pkt_to_skb(pkt);
273 return skb->len; 268 return skb->len;
274} 269}
275EXPORT_SYMBOL(cfpkt_getlen); 270
276 271
277inline u16 cfpkt_iterate(struct cfpkt *pkt, 272inline u16 cfpkt_iterate(struct cfpkt *pkt,
278 u16 (*iter_func)(u16, void *, u16), 273 u16 (*iter_func)(u16, void *, u16),
@@ -290,7 +285,7 @@ inline u16 cfpkt_iterate(struct cfpkt *pkt,
290 } 285 }
291 return iter_func(data, pkt->skb.data, cfpkt_getlen(pkt)); 286 return iter_func(data, pkt->skb.data, cfpkt_getlen(pkt));
292} 287}
293EXPORT_SYMBOL(cfpkt_iterate); 288
294 289
295int cfpkt_setlen(struct cfpkt *pkt, u16 len) 290int cfpkt_setlen(struct cfpkt *pkt, u16 len)
296{ 291{
@@ -315,18 +310,6 @@ int cfpkt_setlen(struct cfpkt *pkt, u16 len)
315 310
316 return cfpkt_getlen(pkt); 311 return cfpkt_getlen(pkt);
317} 312}
318EXPORT_SYMBOL(cfpkt_setlen);
319
320struct cfpkt *cfpkt_create_uplink(const unsigned char *data, unsigned int len)
321{
322 struct cfpkt *pkt = cfpkt_create_pfx(len + PKT_POSTFIX, PKT_PREFIX);
323 if (!pkt)
324 return NULL;
325 if (unlikely(data != NULL))
326 cfpkt_add_body(pkt, data, len);
327 return pkt;
328}
329EXPORT_SYMBOL(cfpkt_create_uplink);
330 313
331struct cfpkt *cfpkt_append(struct cfpkt *dstpkt, 314struct cfpkt *cfpkt_append(struct cfpkt *dstpkt,
332 struct cfpkt *addpkt, 315 struct cfpkt *addpkt,
@@ -368,7 +351,6 @@ struct cfpkt *cfpkt_append(struct cfpkt *dstpkt,
368 dst->len += addlen; 351 dst->len += addlen;
369 return skb_to_pkt(dst); 352 return skb_to_pkt(dst);
370} 353}
371EXPORT_SYMBOL(cfpkt_append);
372 354
373struct cfpkt *cfpkt_split(struct cfpkt *pkt, u16 pos) 355struct cfpkt *cfpkt_split(struct cfpkt *pkt, u16 pos)
374{ 356{
@@ -406,174 +388,13 @@ struct cfpkt *cfpkt_split(struct cfpkt *pkt, u16 pos)
406 skb2->len += len2nd; 388 skb2->len += len2nd;
407 return skb_to_pkt(skb2); 389 return skb_to_pkt(skb2);
408} 390}
409EXPORT_SYMBOL(cfpkt_split);
410
411char *cfpkt_log_pkt(struct cfpkt *pkt, char *buf, int buflen)
412{
413 struct sk_buff *skb = pkt_to_skb(pkt);
414 char *p = buf;
415 int i;
416
417 /*
418 * Sanity check buffer length, it needs to be at least as large as
419 * the header info: ~=50+ bytes
420 */
421 if (buflen < 50)
422 return NULL;
423
424 snprintf(buf, buflen, "%s: pkt:%p len:%ld(%ld+%ld) {%ld,%ld} data: [",
425 is_erronous(pkt) ? "ERRONOUS-SKB" :
426 (skb->data_len != 0 ? "COMPLEX-SKB" : "SKB"),
427 skb,
428 (long) skb->len,
429 (long) (skb_tail_pointer(skb) - skb->data),
430 (long) skb->data_len,
431 (long) (skb->data - skb->head),
432 (long) (skb_tail_pointer(skb) - skb->head));
433 p = buf + strlen(buf);
434
435 for (i = 0; i < skb_tail_pointer(skb) - skb->data && i < 300; i++) {
436 if (p > buf + buflen - 10) {
437 sprintf(p, "...");
438 p = buf + strlen(buf);
439 break;
440 }
441 sprintf(p, "%02x,", skb->data[i]);
442 p = buf + strlen(buf);
443 }
444 sprintf(p, "]\n");
445 return buf;
446}
447EXPORT_SYMBOL(cfpkt_log_pkt);
448
449int cfpkt_raw_append(struct cfpkt *pkt, void **buf, unsigned int buflen)
450{
451 struct sk_buff *skb = pkt_to_skb(pkt);
452 struct sk_buff *lastskb;
453
454 caif_assert(buf != NULL);
455 if (unlikely(is_erronous(pkt)))
456 return -EPROTO;
457 /* Make sure SKB is writable */
458 if (unlikely(skb_cow_data(skb, 0, &lastskb) < 0)) {
459 PKT_ERROR(pkt, "skb_cow_data failed\n");
460 return -EPROTO;
461 }
462
463 if (unlikely(skb_linearize(skb) != 0)) {
464 PKT_ERROR(pkt, "linearize failed\n");
465 return -EPROTO;
466 }
467
468 if (unlikely(skb_tailroom(skb) < buflen)) {
469 PKT_ERROR(pkt, "buffer too short - failed\n");
470 return -EPROTO;
471 }
472
473 *buf = skb_put(skb, buflen);
474 return 1;
475}
476EXPORT_SYMBOL(cfpkt_raw_append);
477 391
478int cfpkt_raw_extract(struct cfpkt *pkt, void **buf, unsigned int buflen) 392bool cfpkt_erroneous(struct cfpkt *pkt)
479{
480 struct sk_buff *skb = pkt_to_skb(pkt);
481
482 caif_assert(buf != NULL);
483 if (unlikely(is_erronous(pkt)))
484 return -EPROTO;
485
486 if (unlikely(buflen > skb->len)) {
487 PKT_ERROR(pkt, "buflen too large - failed\n");
488 return -EPROTO;
489 }
490
491 if (unlikely(buflen > skb_headlen(skb))) {
492 if (unlikely(skb_linearize(skb) != 0)) {
493 PKT_ERROR(pkt, "linearize failed\n");
494 return -EPROTO;
495 }
496 }
497
498 *buf = skb->data;
499 skb_pull(skb, buflen);
500
501 return 1;
502}
503EXPORT_SYMBOL(cfpkt_raw_extract);
504
505inline bool cfpkt_erroneous(struct cfpkt *pkt)
506{ 393{
507 return cfpkt_priv(pkt)->erronous; 394 return cfpkt_priv(pkt)->erronous;
508} 395}
509EXPORT_SYMBOL(cfpkt_erroneous);
510
511struct cfpktq *cfpktq_create(void)
512{
513 struct cfpktq *q = kmalloc(sizeof(struct cfpktq), GFP_ATOMIC);
514 if (!q)
515 return NULL;
516 skb_queue_head_init(&q->head);
517 atomic_set(&q->count, 0);
518 spin_lock_init(&q->lock);
519 return q;
520}
521EXPORT_SYMBOL(cfpktq_create);
522
523void cfpkt_queue(struct cfpktq *pktq, struct cfpkt *pkt, unsigned short prio)
524{
525 atomic_inc(&pktq->count);
526 spin_lock(&pktq->lock);
527 skb_queue_tail(&pktq->head, pkt_to_skb(pkt));
528 spin_unlock(&pktq->lock);
529
530}
531EXPORT_SYMBOL(cfpkt_queue);
532
533struct cfpkt *cfpkt_qpeek(struct cfpktq *pktq)
534{
535 struct cfpkt *tmp;
536 spin_lock(&pktq->lock);
537 tmp = skb_to_pkt(skb_peek(&pktq->head));
538 spin_unlock(&pktq->lock);
539 return tmp;
540}
541EXPORT_SYMBOL(cfpkt_qpeek);
542
543struct cfpkt *cfpkt_dequeue(struct cfpktq *pktq)
544{
545 struct cfpkt *pkt;
546 spin_lock(&pktq->lock);
547 pkt = skb_to_pkt(skb_dequeue(&pktq->head));
548 if (pkt) {
549 atomic_dec(&pktq->count);
550 caif_assert(atomic_read(&pktq->count) >= 0);
551 }
552 spin_unlock(&pktq->lock);
553 return pkt;
554}
555EXPORT_SYMBOL(cfpkt_dequeue);
556
557int cfpkt_qcount(struct cfpktq *pktq)
558{
559 return atomic_read(&pktq->count);
560}
561EXPORT_SYMBOL(cfpkt_qcount);
562
563struct cfpkt *cfpkt_clone_release(struct cfpkt *pkt)
564{
565 struct cfpkt *clone;
566 clone = skb_to_pkt(skb_clone(pkt_to_skb(pkt), GFP_ATOMIC));
567 /* Free original packet. */
568 cfpkt_destroy(pkt);
569 if (!clone)
570 return NULL;
571 return clone;
572}
573EXPORT_SYMBOL(cfpkt_clone_release);
574 396
575struct caif_payload_info *cfpkt_info(struct cfpkt *pkt) 397struct caif_payload_info *cfpkt_info(struct cfpkt *pkt)
576{ 398{
577 return (struct caif_payload_info *)&pkt_to_skb(pkt)->cb; 399 return (struct caif_payload_info *)&pkt_to_skb(pkt)->cb;
578} 400}
579EXPORT_SYMBOL(cfpkt_info);
diff --git a/net/caif/cfrfml.c b/net/caif/cfrfml.c
index e2fb5fa75795..0deabb440051 100644
--- a/net/caif/cfrfml.c
+++ b/net/caif/cfrfml.c
@@ -31,9 +31,9 @@ struct cfrfml {
31 spinlock_t sync; 31 spinlock_t sync;
32}; 32};
33 33
34static void cfrfml_release(struct kref *kref) 34static void cfrfml_release(struct cflayer *layer)
35{ 35{
36 struct cfsrvl *srvl = container_of(kref, struct cfsrvl, ref); 36 struct cfsrvl *srvl = container_of(layer, struct cfsrvl, layer);
37 struct cfrfml *rfml = container_obj(&srvl->layer); 37 struct cfrfml *rfml = container_obj(&srvl->layer);
38 38
39 if (rfml->incomplete_frm) 39 if (rfml->incomplete_frm)
diff --git a/net/caif/cfserl.c b/net/caif/cfserl.c
index 8303fe3ebf89..2715c84cfa87 100644
--- a/net/caif/cfserl.c
+++ b/net/caif/cfserl.c
@@ -179,15 +179,10 @@ static int cfserl_receive(struct cflayer *l, struct cfpkt *newpkt)
179static int cfserl_transmit(struct cflayer *layer, struct cfpkt *newpkt) 179static int cfserl_transmit(struct cflayer *layer, struct cfpkt *newpkt)
180{ 180{
181 struct cfserl *layr = container_obj(layer); 181 struct cfserl *layr = container_obj(layer);
182 int ret;
183 u8 tmp8 = CFSERL_STX; 182 u8 tmp8 = CFSERL_STX;
184 if (layr->usestx) 183 if (layr->usestx)
185 cfpkt_add_head(newpkt, &tmp8, 1); 184 cfpkt_add_head(newpkt, &tmp8, 1);
186 ret = layer->dn->transmit(layer->dn, newpkt); 185 return layer->dn->transmit(layer->dn, newpkt);
187 if (ret < 0)
188 cfpkt_extr_head(newpkt, &tmp8, 1);
189
190 return ret;
191} 186}
192 187
193static void cfserl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, 188static void cfserl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
diff --git a/net/caif/cfsrvl.c b/net/caif/cfsrvl.c
index ab5e542526bf..535a1e72b366 100644
--- a/net/caif/cfsrvl.c
+++ b/net/caif/cfsrvl.c
@@ -10,6 +10,7 @@
10#include <linux/types.h> 10#include <linux/types.h>
11#include <linux/errno.h> 11#include <linux/errno.h>
12#include <linux/slab.h> 12#include <linux/slab.h>
13#include <linux/module.h>
13#include <net/caif/caif_layer.h> 14#include <net/caif/caif_layer.h>
14#include <net/caif/cfsrvl.h> 15#include <net/caif/cfsrvl.h>
15#include <net/caif/cfpkt.h> 16#include <net/caif/cfpkt.h>
@@ -27,8 +28,8 @@ static void cfservl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
27{ 28{
28 struct cfsrvl *service = container_obj(layr); 29 struct cfsrvl *service = container_obj(layr);
29 30
30 caif_assert(layr->up != NULL); 31 if (layr->up == NULL || layr->up->ctrlcmd == NULL)
31 caif_assert(layr->up->ctrlcmd != NULL); 32 return;
32 33
33 switch (ctrl) { 34 switch (ctrl) {
34 case CAIF_CTRLCMD_INIT_RSP: 35 case CAIF_CTRLCMD_INIT_RSP:
@@ -151,14 +152,9 @@ static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl)
151 return -EINVAL; 152 return -EINVAL;
152} 153}
153 154
154void cfservl_destroy(struct cflayer *layer) 155static void cfsrvl_release(struct cflayer *layer)
155{ 156{
156 kfree(layer); 157 struct cfsrvl *service = container_of(layer, struct cfsrvl, layer);
157}
158
159void cfsrvl_release(struct kref *kref)
160{
161 struct cfsrvl *service = container_of(kref, struct cfsrvl, ref);
162 kfree(service); 158 kfree(service);
163} 159}
164 160
@@ -178,10 +174,8 @@ void cfsrvl_init(struct cfsrvl *service,
178 service->dev_info = *dev_info; 174 service->dev_info = *dev_info;
179 service->supports_flowctrl = supports_flowctrl; 175 service->supports_flowctrl = supports_flowctrl;
180 service->release = cfsrvl_release; 176 service->release = cfsrvl_release;
181 kref_init(&service->ref);
182} 177}
183 178
184
185bool cfsrvl_ready(struct cfsrvl *service, int *err) 179bool cfsrvl_ready(struct cfsrvl *service, int *err)
186{ 180{
187 if (service->open && service->modem_flow_on && service->phy_flow_on) 181 if (service->open && service->modem_flow_on && service->phy_flow_on)
@@ -194,6 +188,7 @@ bool cfsrvl_ready(struct cfsrvl *service, int *err)
194 *err = -EAGAIN; 188 *err = -EAGAIN;
195 return false; 189 return false;
196} 190}
191
197u8 cfsrvl_getphyid(struct cflayer *layer) 192u8 cfsrvl_getphyid(struct cflayer *layer)
198{ 193{
199 struct cfsrvl *servl = container_obj(layer); 194 struct cfsrvl *servl = container_obj(layer);
@@ -205,3 +200,26 @@ bool cfsrvl_phyid_match(struct cflayer *layer, int phyid)
205 struct cfsrvl *servl = container_obj(layer); 200 struct cfsrvl *servl = container_obj(layer);
206 return servl->dev_info.id == phyid; 201 return servl->dev_info.id == phyid;
207} 202}
203
204void caif_free_client(struct cflayer *adap_layer)
205{
206 struct cfsrvl *servl;
207 if (adap_layer == NULL || adap_layer->dn == NULL)
208 return;
209 servl = container_obj(adap_layer->dn);
210 servl->release(&servl->layer);
211}
212EXPORT_SYMBOL(caif_free_client);
213
214void caif_client_register_refcnt(struct cflayer *adapt_layer,
215 void (*hold)(struct cflayer *lyr),
216 void (*put)(struct cflayer *lyr))
217{
218 struct cfsrvl *service;
219 service = container_of(adapt_layer->dn, struct cfsrvl, layer);
220
221 WARN_ON(adapt_layer == NULL || adapt_layer->dn == NULL);
222 service->hold = hold;
223 service->put = put;
224}
225EXPORT_SYMBOL(caif_client_register_refcnt);
diff --git a/net/caif/cfutill.c b/net/caif/cfutill.c
index 315c0d601368..98e027db18ed 100644
--- a/net/caif/cfutill.c
+++ b/net/caif/cfutill.c
@@ -100,10 +100,5 @@ static int cfutill_transmit(struct cflayer *layr, struct cfpkt *pkt)
100 */ 100 */
101 info->hdr_len = 1; 101 info->hdr_len = 1;
102 info->dev_info = &service->dev_info; 102 info->dev_info = &service->dev_info;
103 ret = layr->dn->transmit(layr->dn, pkt); 103 return layr->dn->transmit(layr->dn, pkt);
104 if (ret < 0) {
105 u32 tmp32;
106 cfpkt_extr_head(pkt, &tmp32, 4);
107 }
108 return ret;
109} 104}
diff --git a/net/caif/cfveil.c b/net/caif/cfveil.c
index c3b1dec4acf6..3ec83fbc2887 100644
--- a/net/caif/cfveil.c
+++ b/net/caif/cfveil.c
@@ -82,13 +82,14 @@ static int cfvei_transmit(struct cflayer *layr, struct cfpkt *pkt)
82 int ret; 82 int ret;
83 struct cfsrvl *service = container_obj(layr); 83 struct cfsrvl *service = container_obj(layr);
84 if (!cfsrvl_ready(service, &ret)) 84 if (!cfsrvl_ready(service, &ret))
85 return ret; 85 goto err;
86 caif_assert(layr->dn != NULL); 86 caif_assert(layr->dn != NULL);
87 caif_assert(layr->dn->transmit != NULL); 87 caif_assert(layr->dn->transmit != NULL);
88 88
89 if (cfpkt_add_head(pkt, &tmp, 1) < 0) { 89 if (cfpkt_add_head(pkt, &tmp, 1) < 0) {
90 pr_err("Packet is erroneous!\n"); 90 pr_err("Packet is erroneous!\n");
91 return -EPROTO; 91 ret = -EPROTO;
92 goto err;
92 } 93 }
93 94
94 /* Add info-> for MUX-layer to route the packet out. */ 95 /* Add info-> for MUX-layer to route the packet out. */
@@ -96,8 +97,8 @@ static int cfvei_transmit(struct cflayer *layr, struct cfpkt *pkt)
96 info->channel_id = service->layer.id; 97 info->channel_id = service->layer.id;
97 info->hdr_len = 1; 98 info->hdr_len = 1;
98 info->dev_info = &service->dev_info; 99 info->dev_info = &service->dev_info;
99 ret = layr->dn->transmit(layr->dn, pkt); 100 return layr->dn->transmit(layr->dn, pkt);
100 if (ret < 0) 101err:
101 cfpkt_extr_head(pkt, &tmp, 1); 102 cfpkt_destroy(pkt);
102 return ret; 103 return ret;
103} 104}
diff --git a/net/caif/cfvidl.c b/net/caif/cfvidl.c
index bf6fef2a0eff..b2f5989ad455 100644
--- a/net/caif/cfvidl.c
+++ b/net/caif/cfvidl.c
@@ -60,8 +60,5 @@ static int cfvidl_transmit(struct cflayer *layr, struct cfpkt *pkt)
60 info = cfpkt_info(pkt); 60 info = cfpkt_info(pkt);
61 info->channel_id = service->layer.id; 61 info->channel_id = service->layer.id;
62 info->dev_info = &service->dev_info; 62 info->dev_info = &service->dev_info;
63 ret = layr->dn->transmit(layr->dn, pkt); 63 return layr->dn->transmit(layr->dn, pkt);
64 if (ret < 0)
65 cfpkt_extr_head(pkt, &videoheader, 4);
66 return ret;
67} 64}
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
index 6008d6dc18a0..649ebacaf6bc 100644
--- a/net/caif/chnl_net.c
+++ b/net/caif/chnl_net.c
@@ -20,7 +20,6 @@
20#include <linux/caif/if_caif.h> 20#include <linux/caif/if_caif.h>
21#include <net/rtnetlink.h> 21#include <net/rtnetlink.h>
22#include <net/caif/caif_layer.h> 22#include <net/caif/caif_layer.h>
23#include <net/caif/cfcnfg.h>
24#include <net/caif/cfpkt.h> 23#include <net/caif/cfpkt.h>
25#include <net/caif/caif_dev.h> 24#include <net/caif/caif_dev.h>
26 25
@@ -84,10 +83,11 @@ static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt)
84 if (!priv) 83 if (!priv)
85 return -EINVAL; 84 return -EINVAL;
86 85
86 skb = (struct sk_buff *) cfpkt_tonative(pkt);
87
87 /* Get length of CAIF packet. */ 88 /* Get length of CAIF packet. */
88 pktlen = cfpkt_getlen(pkt); 89 pktlen = skb->len;
89 90
90 skb = (struct sk_buff *) cfpkt_tonative(pkt);
91 /* Pass some minimum information and 91 /* Pass some minimum information and
92 * send the packet to the net stack. 92 * send the packet to the net stack.
93 */ 93 */
@@ -153,6 +153,18 @@ static void close_work(struct work_struct *work)
153} 153}
154static DECLARE_WORK(close_worker, close_work); 154static DECLARE_WORK(close_worker, close_work);
155 155
156static void chnl_hold(struct cflayer *lyr)
157{
158 struct chnl_net *priv = container_of(lyr, struct chnl_net, chnl);
159 dev_hold(priv->netdev);
160}
161
162static void chnl_put(struct cflayer *lyr)
163{
164 struct chnl_net *priv = container_of(lyr, struct chnl_net, chnl);
165 dev_put(priv->netdev);
166}
167
156static void chnl_flowctrl_cb(struct cflayer *layr, enum caif_ctrlcmd flow, 168static void chnl_flowctrl_cb(struct cflayer *layr, enum caif_ctrlcmd flow,
157 int phyid) 169 int phyid)
158{ 170{
@@ -190,6 +202,7 @@ static void chnl_flowctrl_cb(struct cflayer *layr, enum caif_ctrlcmd flow,
190 netif_wake_queue(priv->netdev); 202 netif_wake_queue(priv->netdev);
191 break; 203 break;
192 case CAIF_CTRLCMD_INIT_RSP: 204 case CAIF_CTRLCMD_INIT_RSP:
205 caif_client_register_refcnt(&priv->chnl, chnl_hold, chnl_put);
193 priv->state = CAIF_CONNECTED; 206 priv->state = CAIF_CONNECTED;
194 priv->flowenabled = true; 207 priv->flowenabled = true;
195 netif_wake_queue(priv->netdev); 208 netif_wake_queue(priv->netdev);
@@ -257,8 +270,9 @@ static int chnl_net_open(struct net_device *dev)
257 270
258 if (priv->state != CAIF_CONNECTING) { 271 if (priv->state != CAIF_CONNECTING) {
259 priv->state = CAIF_CONNECTING; 272 priv->state = CAIF_CONNECTING;
260 result = caif_connect_client(&priv->conn_req, &priv->chnl, 273 result = caif_connect_client(dev_net(dev), &priv->conn_req,
261 &llifindex, &headroom, &tailroom); 274 &priv->chnl, &llifindex,
275 &headroom, &tailroom);
262 if (result != 0) { 276 if (result != 0) {
263 pr_debug("err: " 277 pr_debug("err: "
264 "Unable to register and open device," 278 "Unable to register and open device,"
@@ -314,7 +328,7 @@ static int chnl_net_open(struct net_device *dev)
314 328
315 if (result == 0) { 329 if (result == 0) {
316 pr_debug("connect timeout\n"); 330 pr_debug("connect timeout\n");
317 caif_disconnect_client(&priv->chnl); 331 caif_disconnect_client(dev_net(dev), &priv->chnl);
318 priv->state = CAIF_DISCONNECTED; 332 priv->state = CAIF_DISCONNECTED;
319 pr_debug("state disconnected\n"); 333 pr_debug("state disconnected\n");
320 result = -ETIMEDOUT; 334 result = -ETIMEDOUT;
@@ -330,7 +344,7 @@ static int chnl_net_open(struct net_device *dev)
330 return 0; 344 return 0;
331 345
332error: 346error:
333 caif_disconnect_client(&priv->chnl); 347 caif_disconnect_client(dev_net(dev), &priv->chnl);
334 priv->state = CAIF_DISCONNECTED; 348 priv->state = CAIF_DISCONNECTED;
335 pr_debug("state disconnected\n"); 349 pr_debug("state disconnected\n");
336 return result; 350 return result;
@@ -344,7 +358,7 @@ static int chnl_net_stop(struct net_device *dev)
344 ASSERT_RTNL(); 358 ASSERT_RTNL();
345 priv = netdev_priv(dev); 359 priv = netdev_priv(dev);
346 priv->state = CAIF_DISCONNECTED; 360 priv->state = CAIF_DISCONNECTED;
347 caif_disconnect_client(&priv->chnl); 361 caif_disconnect_client(dev_net(dev), &priv->chnl);
348 return 0; 362 return 0;
349} 363}
350 364
@@ -373,11 +387,18 @@ static const struct net_device_ops netdev_ops = {
373 .ndo_start_xmit = chnl_net_start_xmit, 387 .ndo_start_xmit = chnl_net_start_xmit,
374}; 388};
375 389
390static void chnl_net_destructor(struct net_device *dev)
391{
392 struct chnl_net *priv = netdev_priv(dev);
393 caif_free_client(&priv->chnl);
394 free_netdev(dev);
395}
396
376static void ipcaif_net_setup(struct net_device *dev) 397static void ipcaif_net_setup(struct net_device *dev)
377{ 398{
378 struct chnl_net *priv; 399 struct chnl_net *priv;
379 dev->netdev_ops = &netdev_ops; 400 dev->netdev_ops = &netdev_ops;
380 dev->destructor = free_netdev; 401 dev->destructor = chnl_net_destructor;
381 dev->flags |= IFF_NOARP; 402 dev->flags |= IFF_NOARP;
382 dev->flags |= IFF_POINTOPOINT; 403 dev->flags |= IFF_POINTOPOINT;
383 dev->mtu = GPRS_PDP_MTU; 404 dev->mtu = GPRS_PDP_MTU;
@@ -391,7 +412,7 @@ static void ipcaif_net_setup(struct net_device *dev)
391 priv->conn_req.link_selector = CAIF_LINK_HIGH_BANDW; 412 priv->conn_req.link_selector = CAIF_LINK_HIGH_BANDW;
392 priv->conn_req.priority = CAIF_PRIO_LOW; 413 priv->conn_req.priority = CAIF_PRIO_LOW;
393 /* Insert illegal value */ 414 /* Insert illegal value */
394 priv->conn_req.sockaddr.u.dgm.connection_id = -1; 415 priv->conn_req.sockaddr.u.dgm.connection_id = 0;
395 priv->flowenabled = false; 416 priv->flowenabled = false;
396 417
397 init_waitqueue_head(&priv->netmgmt_wq); 418 init_waitqueue_head(&priv->netmgmt_wq);
@@ -453,6 +474,10 @@ static int ipcaif_newlink(struct net *src_net, struct net_device *dev,
453 pr_warn("device rtml registration failed\n"); 474 pr_warn("device rtml registration failed\n");
454 else 475 else
455 list_add(&caifdev->list_field, &chnl_net_list); 476 list_add(&caifdev->list_field, &chnl_net_list);
477
478 /* Take ifindex as connection-id if null */
479 if (caifdev->conn_req.sockaddr.u.dgm.connection_id == 0)
480 caifdev->conn_req.sockaddr.u.dgm.connection_id = dev->ifindex;
456 return ret; 481 return ret;
457} 482}
458 483
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 733d66f1b05a..094fc5332d42 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -84,8 +84,8 @@ static DEFINE_SPINLOCK(can_rcvlists_lock);
84static struct kmem_cache *rcv_cache __read_mostly; 84static struct kmem_cache *rcv_cache __read_mostly;
85 85
86/* table of registered CAN protocols */ 86/* table of registered CAN protocols */
87static struct can_proto *proto_tab[CAN_NPROTO] __read_mostly; 87static const struct can_proto *proto_tab[CAN_NPROTO] __read_mostly;
88static DEFINE_SPINLOCK(proto_tab_lock); 88static DEFINE_MUTEX(proto_tab_lock);
89 89
90struct timer_list can_stattimer; /* timer for statistics update */ 90struct timer_list can_stattimer; /* timer for statistics update */
91struct s_stats can_stats; /* packet statistics */ 91struct s_stats can_stats; /* packet statistics */
@@ -115,11 +115,29 @@ static void can_sock_destruct(struct sock *sk)
115 skb_queue_purge(&sk->sk_receive_queue); 115 skb_queue_purge(&sk->sk_receive_queue);
116} 116}
117 117
118static const struct can_proto *can_get_proto(int protocol)
119{
120 const struct can_proto *cp;
121
122 rcu_read_lock();
123 cp = rcu_dereference(proto_tab[protocol]);
124 if (cp && !try_module_get(cp->prot->owner))
125 cp = NULL;
126 rcu_read_unlock();
127
128 return cp;
129}
130
131static inline void can_put_proto(const struct can_proto *cp)
132{
133 module_put(cp->prot->owner);
134}
135
118static int can_create(struct net *net, struct socket *sock, int protocol, 136static int can_create(struct net *net, struct socket *sock, int protocol,
119 int kern) 137 int kern)
120{ 138{
121 struct sock *sk; 139 struct sock *sk;
122 struct can_proto *cp; 140 const struct can_proto *cp;
123 int err = 0; 141 int err = 0;
124 142
125 sock->state = SS_UNCONNECTED; 143 sock->state = SS_UNCONNECTED;
@@ -130,9 +148,12 @@ static int can_create(struct net *net, struct socket *sock, int protocol,
130 if (!net_eq(net, &init_net)) 148 if (!net_eq(net, &init_net))
131 return -EAFNOSUPPORT; 149 return -EAFNOSUPPORT;
132 150
151 cp = can_get_proto(protocol);
152
133#ifdef CONFIG_MODULES 153#ifdef CONFIG_MODULES
134 /* try to load protocol module kernel is modular */ 154 if (!cp) {
135 if (!proto_tab[protocol]) { 155 /* try to load protocol module if kernel is modular */
156
136 err = request_module("can-proto-%d", protocol); 157 err = request_module("can-proto-%d", protocol);
137 158
138 /* 159 /*
@@ -143,22 +164,18 @@ static int can_create(struct net *net, struct socket *sock, int protocol,
143 if (err && printk_ratelimit()) 164 if (err && printk_ratelimit())
144 printk(KERN_ERR "can: request_module " 165 printk(KERN_ERR "can: request_module "
145 "(can-proto-%d) failed.\n", protocol); 166 "(can-proto-%d) failed.\n", protocol);
167
168 cp = can_get_proto(protocol);
146 } 169 }
147#endif 170#endif
148 171
149 spin_lock(&proto_tab_lock);
150 cp = proto_tab[protocol];
151 if (cp && !try_module_get(cp->prot->owner))
152 cp = NULL;
153 spin_unlock(&proto_tab_lock);
154
155 /* check for available protocol and correct usage */ 172 /* check for available protocol and correct usage */
156 173
157 if (!cp) 174 if (!cp)
158 return -EPROTONOSUPPORT; 175 return -EPROTONOSUPPORT;
159 176
160 if (cp->type != sock->type) { 177 if (cp->type != sock->type) {
161 err = -EPROTONOSUPPORT; 178 err = -EPROTOTYPE;
162 goto errout; 179 goto errout;
163 } 180 }
164 181
@@ -183,7 +200,7 @@ static int can_create(struct net *net, struct socket *sock, int protocol,
183 } 200 }
184 201
185 errout: 202 errout:
186 module_put(cp->prot->owner); 203 can_put_proto(cp);
187 return err; 204 return err;
188} 205}
189 206
@@ -679,7 +696,7 @@ drop:
679 * -EBUSY protocol already in use 696 * -EBUSY protocol already in use
680 * -ENOBUF if proto_register() fails 697 * -ENOBUF if proto_register() fails
681 */ 698 */
682int can_proto_register(struct can_proto *cp) 699int can_proto_register(const struct can_proto *cp)
683{ 700{
684 int proto = cp->protocol; 701 int proto = cp->protocol;
685 int err = 0; 702 int err = 0;
@@ -694,15 +711,16 @@ int can_proto_register(struct can_proto *cp)
694 if (err < 0) 711 if (err < 0)
695 return err; 712 return err;
696 713
697 spin_lock(&proto_tab_lock); 714 mutex_lock(&proto_tab_lock);
715
698 if (proto_tab[proto]) { 716 if (proto_tab[proto]) {
699 printk(KERN_ERR "can: protocol %d already registered\n", 717 printk(KERN_ERR "can: protocol %d already registered\n",
700 proto); 718 proto);
701 err = -EBUSY; 719 err = -EBUSY;
702 } else 720 } else
703 proto_tab[proto] = cp; 721 rcu_assign_pointer(proto_tab[proto], cp);
704 722
705 spin_unlock(&proto_tab_lock); 723 mutex_unlock(&proto_tab_lock);
706 724
707 if (err < 0) 725 if (err < 0)
708 proto_unregister(cp->prot); 726 proto_unregister(cp->prot);
@@ -715,17 +733,16 @@ EXPORT_SYMBOL(can_proto_register);
715 * can_proto_unregister - unregister CAN transport protocol 733 * can_proto_unregister - unregister CAN transport protocol
716 * @cp: pointer to CAN protocol structure 734 * @cp: pointer to CAN protocol structure
717 */ 735 */
718void can_proto_unregister(struct can_proto *cp) 736void can_proto_unregister(const struct can_proto *cp)
719{ 737{
720 int proto = cp->protocol; 738 int proto = cp->protocol;
721 739
722 spin_lock(&proto_tab_lock); 740 mutex_lock(&proto_tab_lock);
723 if (!proto_tab[proto]) { 741 BUG_ON(proto_tab[proto] != cp);
724 printk(KERN_ERR "BUG: can: protocol %d is not registered\n", 742 rcu_assign_pointer(proto_tab[proto], NULL);
725 proto); 743 mutex_unlock(&proto_tab_lock);
726 } 744
727 proto_tab[proto] = NULL; 745 synchronize_rcu();
728 spin_unlock(&proto_tab_lock);
729 746
730 proto_unregister(cp->prot); 747 proto_unregister(cp->prot);
731} 748}
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 8a6a05e7c3c8..cced806098a9 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -1601,7 +1601,7 @@ static struct proto bcm_proto __read_mostly = {
1601 .init = bcm_init, 1601 .init = bcm_init,
1602}; 1602};
1603 1603
1604static struct can_proto bcm_can_proto __read_mostly = { 1604static const struct can_proto bcm_can_proto = {
1605 .type = SOCK_DGRAM, 1605 .type = SOCK_DGRAM,
1606 .protocol = CAN_BCM, 1606 .protocol = CAN_BCM,
1607 .ops = &bcm_ops, 1607 .ops = &bcm_ops,
diff --git a/net/can/raw.c b/net/can/raw.c
index 0eb39a7fdf64..dea99a6e596c 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -774,7 +774,7 @@ static struct proto raw_proto __read_mostly = {
774 .init = raw_init, 774 .init = raw_init,
775}; 775};
776 776
777static struct can_proto raw_can_proto __read_mostly = { 777static const struct can_proto raw_can_proto = {
778 .type = SOCK_RAW, 778 .type = SOCK_RAW,
779 .protocol = CAN_RAW, 779 .protocol = CAN_RAW,
780 .ops = &raw_ops, 780 .ops = &raw_ops,
diff --git a/net/compat.c b/net/compat.c
index 3649d5895361..c578d9382e19 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -722,11 +722,11 @@ EXPORT_SYMBOL(compat_mc_getsockopt);
722 722
723/* Argument list sizes for compat_sys_socketcall */ 723/* Argument list sizes for compat_sys_socketcall */
724#define AL(x) ((x) * sizeof(u32)) 724#define AL(x) ((x) * sizeof(u32))
725static unsigned char nas[20] = { 725static unsigned char nas[21] = {
726 AL(0), AL(3), AL(3), AL(3), AL(2), AL(3), 726 AL(0), AL(3), AL(3), AL(3), AL(2), AL(3),
727 AL(3), AL(3), AL(4), AL(4), AL(4), AL(6), 727 AL(3), AL(3), AL(4), AL(4), AL(4), AL(6),
728 AL(6), AL(2), AL(5), AL(5), AL(3), AL(3), 728 AL(6), AL(2), AL(5), AL(5), AL(3), AL(3),
729 AL(4), AL(5) 729 AL(4), AL(5), AL(4)
730}; 730};
731#undef AL 731#undef AL
732 732
@@ -735,6 +735,13 @@ asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg, uns
735 return sys_sendmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT); 735 return sys_sendmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
736} 736}
737 737
738asmlinkage long compat_sys_sendmmsg(int fd, struct compat_mmsghdr __user *mmsg,
739 unsigned vlen, unsigned int flags)
740{
741 return __sys_sendmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
742 flags | MSG_CMSG_COMPAT);
743}
744
738asmlinkage long compat_sys_recvmsg(int fd, struct compat_msghdr __user *msg, unsigned int flags) 745asmlinkage long compat_sys_recvmsg(int fd, struct compat_msghdr __user *msg, unsigned int flags)
739{ 746{
740 return sys_recvmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT); 747 return sys_recvmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
@@ -780,7 +787,7 @@ asmlinkage long compat_sys_socketcall(int call, u32 __user *args)
780 u32 a[6]; 787 u32 a[6];
781 u32 a0, a1; 788 u32 a0, a1;
782 789
783 if (call < SYS_SOCKET || call > SYS_RECVMMSG) 790 if (call < SYS_SOCKET || call > SYS_SENDMMSG)
784 return -EINVAL; 791 return -EINVAL;
785 if (copy_from_user(a, args, nas[call])) 792 if (copy_from_user(a, args, nas[call]))
786 return -EFAULT; 793 return -EFAULT;
@@ -839,6 +846,9 @@ asmlinkage long compat_sys_socketcall(int call, u32 __user *args)
839 case SYS_SENDMSG: 846 case SYS_SENDMSG:
840 ret = compat_sys_sendmsg(a0, compat_ptr(a1), a[2]); 847 ret = compat_sys_sendmsg(a0, compat_ptr(a1), a[2]);
841 break; 848 break;
849 case SYS_SENDMMSG:
850 ret = compat_sys_sendmmsg(a0, compat_ptr(a1), a[2], a[3]);
851 break;
842 case SYS_RECVMSG: 852 case SYS_RECVMSG:
843 ret = compat_sys_recvmsg(a0, compat_ptr(a1), a[2]); 853 ret = compat_sys_recvmsg(a0, compat_ptr(a1), a[2]);
844 break; 854 break;
diff --git a/net/core/dev.c b/net/core/dev.c
index b624fe4d9bd7..d94537914a71 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -948,7 +948,7 @@ int dev_alloc_name(struct net_device *dev, const char *name)
948} 948}
949EXPORT_SYMBOL(dev_alloc_name); 949EXPORT_SYMBOL(dev_alloc_name);
950 950
951static int dev_get_valid_name(struct net_device *dev, const char *name, bool fmt) 951static int dev_get_valid_name(struct net_device *dev, const char *name)
952{ 952{
953 struct net *net; 953 struct net *net;
954 954
@@ -958,7 +958,7 @@ static int dev_get_valid_name(struct net_device *dev, const char *name, bool fmt
958 if (!dev_valid_name(name)) 958 if (!dev_valid_name(name))
959 return -EINVAL; 959 return -EINVAL;
960 960
961 if (fmt && strchr(name, '%')) 961 if (strchr(name, '%'))
962 return dev_alloc_name(dev, name); 962 return dev_alloc_name(dev, name);
963 else if (__dev_get_by_name(net, name)) 963 else if (__dev_get_by_name(net, name))
964 return -EEXIST; 964 return -EEXIST;
@@ -995,7 +995,7 @@ int dev_change_name(struct net_device *dev, const char *newname)
995 995
996 memcpy(oldname, dev->name, IFNAMSIZ); 996 memcpy(oldname, dev->name, IFNAMSIZ);
997 997
998 err = dev_get_valid_name(dev, newname, 1); 998 err = dev_get_valid_name(dev, newname);
999 if (err < 0) 999 if (err < 0)
1000 return err; 1000 return err;
1001 1001
@@ -1007,7 +1007,7 @@ rollback:
1007 } 1007 }
1008 1008
1009 write_lock_bh(&dev_base_lock); 1009 write_lock_bh(&dev_base_lock);
1010 hlist_del(&dev->name_hlist); 1010 hlist_del_rcu(&dev->name_hlist);
1011 write_unlock_bh(&dev_base_lock); 1011 write_unlock_bh(&dev_base_lock);
1012 1012
1013 synchronize_rcu(); 1013 synchronize_rcu();
@@ -1317,7 +1317,8 @@ void dev_disable_lro(struct net_device *dev)
1317 return; 1317 return;
1318 1318
1319 __ethtool_set_flags(dev, flags & ~ETH_FLAG_LRO); 1319 __ethtool_set_flags(dev, flags & ~ETH_FLAG_LRO);
1320 WARN_ON(dev->features & NETIF_F_LRO); 1320 if (unlikely(dev->features & NETIF_F_LRO))
1321 netdev_WARN(dev, "failed to disable LRO!\n");
1321} 1322}
1322EXPORT_SYMBOL(dev_disable_lro); 1323EXPORT_SYMBOL(dev_disable_lro);
1323 1324
@@ -2504,8 +2505,8 @@ static inline void ____napi_schedule(struct softnet_data *sd,
2504__u32 __skb_get_rxhash(struct sk_buff *skb) 2505__u32 __skb_get_rxhash(struct sk_buff *skb)
2505{ 2506{
2506 int nhoff, hash = 0, poff; 2507 int nhoff, hash = 0, poff;
2507 struct ipv6hdr *ip6; 2508 const struct ipv6hdr *ip6;
2508 struct iphdr *ip; 2509 const struct iphdr *ip;
2509 u8 ip_proto; 2510 u8 ip_proto;
2510 u32 addr1, addr2, ihl; 2511 u32 addr1, addr2, ihl;
2511 union { 2512 union {
@@ -2520,7 +2521,7 @@ __u32 __skb_get_rxhash(struct sk_buff *skb)
2520 if (!pskb_may_pull(skb, sizeof(*ip) + nhoff)) 2521 if (!pskb_may_pull(skb, sizeof(*ip) + nhoff))
2521 goto done; 2522 goto done;
2522 2523
2523 ip = (struct iphdr *) (skb->data + nhoff); 2524 ip = (const struct iphdr *) (skb->data + nhoff);
2524 if (ip->frag_off & htons(IP_MF | IP_OFFSET)) 2525 if (ip->frag_off & htons(IP_MF | IP_OFFSET))
2525 ip_proto = 0; 2526 ip_proto = 0;
2526 else 2527 else
@@ -2533,7 +2534,7 @@ __u32 __skb_get_rxhash(struct sk_buff *skb)
2533 if (!pskb_may_pull(skb, sizeof(*ip6) + nhoff)) 2534 if (!pskb_may_pull(skb, sizeof(*ip6) + nhoff))
2534 goto done; 2535 goto done;
2535 2536
2536 ip6 = (struct ipv6hdr *) (skb->data + nhoff); 2537 ip6 = (const struct ipv6hdr *) (skb->data + nhoff);
2537 ip_proto = ip6->nexthdr; 2538 ip_proto = ip6->nexthdr;
2538 addr1 = (__force u32) ip6->saddr.s6_addr32[3]; 2539 addr1 = (__force u32) ip6->saddr.s6_addr32[3];
2539 addr2 = (__force u32) ip6->daddr.s6_addr32[3]; 2540 addr2 = (__force u32) ip6->daddr.s6_addr32[3];
@@ -3078,25 +3079,6 @@ void netdev_rx_handler_unregister(struct net_device *dev)
3078} 3079}
3079EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister); 3080EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
3080 3081
3081static void vlan_on_bond_hook(struct sk_buff *skb)
3082{
3083 /*
3084 * Make sure ARP frames received on VLAN interfaces stacked on
3085 * bonding interfaces still make their way to any base bonding
3086 * device that may have registered for a specific ptype.
3087 */
3088 if (skb->dev->priv_flags & IFF_802_1Q_VLAN &&
3089 vlan_dev_real_dev(skb->dev)->priv_flags & IFF_BONDING &&
3090 skb->protocol == htons(ETH_P_ARP)) {
3091 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
3092
3093 if (!skb2)
3094 return;
3095 skb2->dev = vlan_dev_real_dev(skb->dev);
3096 netif_rx(skb2);
3097 }
3098}
3099
3100static int __netif_receive_skb(struct sk_buff *skb) 3082static int __netif_receive_skb(struct sk_buff *skb)
3101{ 3083{
3102 struct packet_type *ptype, *pt_prev; 3084 struct packet_type *ptype, *pt_prev;
@@ -3132,6 +3114,12 @@ another_round:
3132 3114
3133 __this_cpu_inc(softnet_data.processed); 3115 __this_cpu_inc(softnet_data.processed);
3134 3116
3117 if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
3118 skb = vlan_untag(skb);
3119 if (unlikely(!skb))
3120 goto out;
3121 }
3122
3135#ifdef CONFIG_NET_CLS_ACT 3123#ifdef CONFIG_NET_CLS_ACT
3136 if (skb->tc_verd & TC_NCLS) { 3124 if (skb->tc_verd & TC_NCLS) {
3137 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd); 3125 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
@@ -3179,15 +3167,13 @@ ncls:
3179 ret = deliver_skb(skb, pt_prev, orig_dev); 3167 ret = deliver_skb(skb, pt_prev, orig_dev);
3180 pt_prev = NULL; 3168 pt_prev = NULL;
3181 } 3169 }
3182 if (vlan_hwaccel_do_receive(&skb)) { 3170 if (vlan_do_receive(&skb)) {
3183 ret = __netif_receive_skb(skb); 3171 ret = __netif_receive_skb(skb);
3184 goto out; 3172 goto out;
3185 } else if (unlikely(!skb)) 3173 } else if (unlikely(!skb))
3186 goto out; 3174 goto out;
3187 } 3175 }
3188 3176
3189 vlan_on_bond_hook(skb);
3190
3191 /* deliver only exact match when indicated */ 3177 /* deliver only exact match when indicated */
3192 null_or_dev = deliver_exact ? skb->dev : NULL; 3178 null_or_dev = deliver_exact ? skb->dev : NULL;
3193 3179
@@ -4512,6 +4498,30 @@ void dev_set_rx_mode(struct net_device *dev)
4512} 4498}
4513 4499
4514/** 4500/**
4501 * dev_ethtool_get_settings - call device's ethtool_ops::get_settings()
4502 * @dev: device
4503 * @cmd: memory area for ethtool_ops::get_settings() result
4504 *
4505 * The cmd arg is initialized properly (cleared and
4506 * ethtool_cmd::cmd field set to ETHTOOL_GSET).
4507 *
4508 * Return device's ethtool_ops::get_settings() result value or
4509 * -EOPNOTSUPP when device doesn't expose
4510 * ethtool_ops::get_settings() operation.
4511 */
4512int dev_ethtool_get_settings(struct net_device *dev,
4513 struct ethtool_cmd *cmd)
4514{
4515 if (!dev->ethtool_ops || !dev->ethtool_ops->get_settings)
4516 return -EOPNOTSUPP;
4517
4518 memset(cmd, 0, sizeof(struct ethtool_cmd));
4519 cmd->cmd = ETHTOOL_GSET;
4520 return dev->ethtool_ops->get_settings(dev, cmd);
4521}
4522EXPORT_SYMBOL(dev_ethtool_get_settings);
4523
4524/**
4515 * dev_get_flags - get flags reported to userspace 4525 * dev_get_flags - get flags reported to userspace
4516 * @dev: device 4526 * @dev: device
4517 * 4527 *
@@ -5116,7 +5126,7 @@ static void rollback_registered_many(struct list_head *head)
5116 list_del(&dev->unreg_list); 5126 list_del(&dev->unreg_list);
5117 continue; 5127 continue;
5118 } 5128 }
5119 5129 dev->dismantle = true;
5120 BUG_ON(dev->reg_state != NETREG_REGISTERED); 5130 BUG_ON(dev->reg_state != NETREG_REGISTERED);
5121 } 5131 }
5122 5132
@@ -5242,11 +5252,13 @@ u32 netdev_fix_features(struct net_device *dev, u32 features)
5242} 5252}
5243EXPORT_SYMBOL(netdev_fix_features); 5253EXPORT_SYMBOL(netdev_fix_features);
5244 5254
5245void netdev_update_features(struct net_device *dev) 5255int __netdev_update_features(struct net_device *dev)
5246{ 5256{
5247 u32 features; 5257 u32 features;
5248 int err = 0; 5258 int err = 0;
5249 5259
5260 ASSERT_RTNL();
5261
5250 features = netdev_get_wanted_features(dev); 5262 features = netdev_get_wanted_features(dev);
5251 5263
5252 if (dev->netdev_ops->ndo_fix_features) 5264 if (dev->netdev_ops->ndo_fix_features)
@@ -5256,24 +5268,60 @@ void netdev_update_features(struct net_device *dev)
5256 features = netdev_fix_features(dev, features); 5268 features = netdev_fix_features(dev, features);
5257 5269
5258 if (dev->features == features) 5270 if (dev->features == features)
5259 return; 5271 return 0;
5260 5272
5261 netdev_info(dev, "Features changed: 0x%08x -> 0x%08x\n", 5273 netdev_dbg(dev, "Features changed: 0x%08x -> 0x%08x\n",
5262 dev->features, features); 5274 dev->features, features);
5263 5275
5264 if (dev->netdev_ops->ndo_set_features) 5276 if (dev->netdev_ops->ndo_set_features)
5265 err = dev->netdev_ops->ndo_set_features(dev, features); 5277 err = dev->netdev_ops->ndo_set_features(dev, features);
5266 5278
5267 if (!err) 5279 if (unlikely(err < 0)) {
5268 dev->features = features;
5269 else if (err < 0)
5270 netdev_err(dev, 5280 netdev_err(dev,
5271 "set_features() failed (%d); wanted 0x%08x, left 0x%08x\n", 5281 "set_features() failed (%d); wanted 0x%08x, left 0x%08x\n",
5272 err, features, dev->features); 5282 err, features, dev->features);
5283 return -1;
5284 }
5285
5286 if (!err)
5287 dev->features = features;
5288
5289 return 1;
5290}
5291
5292/**
5293 * netdev_update_features - recalculate device features
5294 * @dev: the device to check
5295 *
5296 * Recalculate dev->features set and send notifications if it
5297 * has changed. Should be called after driver or hardware dependent
5298 * conditions might have changed that influence the features.
5299 */
5300void netdev_update_features(struct net_device *dev)
5301{
5302 if (__netdev_update_features(dev))
5303 netdev_features_change(dev);
5273} 5304}
5274EXPORT_SYMBOL(netdev_update_features); 5305EXPORT_SYMBOL(netdev_update_features);
5275 5306
5276/** 5307/**
5308 * netdev_change_features - recalculate device features
5309 * @dev: the device to check
5310 *
5311 * Recalculate dev->features set and send notifications even
5312 * if they have not changed. Should be called instead of
5313 * netdev_update_features() if also dev->vlan_features might
5314 * have changed to allow the changes to be propagated to stacked
5315 * VLAN devices.
5316 */
5317void netdev_change_features(struct net_device *dev)
5318{
5319 __netdev_update_features(dev);
5320 netdev_features_change(dev);
5321}
5322EXPORT_SYMBOL(netdev_change_features);
5323
5324/**
5277 * netif_stacked_transfer_operstate - transfer operstate 5325 * netif_stacked_transfer_operstate - transfer operstate
5278 * @rootdev: the root or lower level device to transfer state from 5326 * @rootdev: the root or lower level device to transfer state from
5279 * @dev: the device to transfer operstate to 5327 * @dev: the device to transfer operstate to
@@ -5389,6 +5437,10 @@ int register_netdevice(struct net_device *dev)
5389 5437
5390 dev->iflink = -1; 5438 dev->iflink = -1;
5391 5439
5440 ret = dev_get_valid_name(dev, dev->name);
5441 if (ret < 0)
5442 goto out;
5443
5392 /* Init, if this function is available */ 5444 /* Init, if this function is available */
5393 if (dev->netdev_ops->ndo_init) { 5445 if (dev->netdev_ops->ndo_init) {
5394 ret = dev->netdev_ops->ndo_init(dev); 5446 ret = dev->netdev_ops->ndo_init(dev);
@@ -5399,10 +5451,6 @@ int register_netdevice(struct net_device *dev)
5399 } 5451 }
5400 } 5452 }
5401 5453
5402 ret = dev_get_valid_name(dev, dev->name, 0);
5403 if (ret)
5404 goto err_uninit;
5405
5406 dev->ifindex = dev_new_index(net); 5454 dev->ifindex = dev_new_index(net);
5407 if (dev->iflink == -1) 5455 if (dev->iflink == -1)
5408 dev->iflink = dev->ifindex; 5456 dev->iflink = dev->ifindex;
@@ -5414,6 +5462,14 @@ int register_netdevice(struct net_device *dev)
5414 dev->features |= NETIF_F_SOFT_FEATURES; 5462 dev->features |= NETIF_F_SOFT_FEATURES;
5415 dev->wanted_features = dev->features & dev->hw_features; 5463 dev->wanted_features = dev->features & dev->hw_features;
5416 5464
5465 /* Turn on no cache copy if HW is doing checksum */
5466 dev->hw_features |= NETIF_F_NOCACHE_COPY;
5467 if ((dev->features & NETIF_F_ALL_CSUM) &&
5468 !(dev->features & NETIF_F_NO_CSUM)) {
5469 dev->wanted_features |= NETIF_F_NOCACHE_COPY;
5470 dev->features |= NETIF_F_NOCACHE_COPY;
5471 }
5472
5417 /* Enable GRO and NETIF_F_HIGHDMA for vlans by default, 5473 /* Enable GRO and NETIF_F_HIGHDMA for vlans by default,
5418 * vlan_dev_init() will do the dev->features check, so these features 5474 * vlan_dev_init() will do the dev->features check, so these features
5419 * are enabled only if supported by underlying device. 5475 * are enabled only if supported by underlying device.
@@ -5430,7 +5486,7 @@ int register_netdevice(struct net_device *dev)
5430 goto err_uninit; 5486 goto err_uninit;
5431 dev->reg_state = NETREG_REGISTERED; 5487 dev->reg_state = NETREG_REGISTERED;
5432 5488
5433 netdev_update_features(dev); 5489 __netdev_update_features(dev);
5434 5490
5435 /* 5491 /*
5436 * Default initial state at registry is that the 5492 * Default initial state at registry is that the
@@ -5527,19 +5583,7 @@ int register_netdev(struct net_device *dev)
5527 int err; 5583 int err;
5528 5584
5529 rtnl_lock(); 5585 rtnl_lock();
5530
5531 /*
5532 * If the name is a format string the caller wants us to do a
5533 * name allocation.
5534 */
5535 if (strchr(dev->name, '%')) {
5536 err = dev_alloc_name(dev, dev->name);
5537 if (err < 0)
5538 goto out;
5539 }
5540
5541 err = register_netdevice(dev); 5586 err = register_netdevice(dev);
5542out:
5543 rtnl_unlock(); 5587 rtnl_unlock();
5544 return err; 5588 return err;
5545} 5589}
@@ -6021,7 +6065,7 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
6021 /* We get here if we can't use the current device name */ 6065 /* We get here if we can't use the current device name */
6022 if (!pat) 6066 if (!pat)
6023 goto out; 6067 goto out;
6024 if (dev_get_valid_name(dev, pat, 1)) 6068 if (dev_get_valid_name(dev, pat) < 0)
6025 goto out; 6069 goto out;
6026 } 6070 }
6027 6071
@@ -6153,29 +6197,20 @@ static int dev_cpu_callback(struct notifier_block *nfb,
6153 */ 6197 */
6154u32 netdev_increment_features(u32 all, u32 one, u32 mask) 6198u32 netdev_increment_features(u32 all, u32 one, u32 mask)
6155{ 6199{
6156 /* If device needs checksumming, downgrade to it. */ 6200 if (mask & NETIF_F_GEN_CSUM)
6157 if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM)) 6201 mask |= NETIF_F_ALL_CSUM;
6158 all ^= NETIF_F_NO_CSUM | (one & NETIF_F_ALL_CSUM); 6202 mask |= NETIF_F_VLAN_CHALLENGED;
6159 else if (mask & NETIF_F_ALL_CSUM) {
6160 /* If one device supports v4/v6 checksumming, set for all. */
6161 if (one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM) &&
6162 !(all & NETIF_F_GEN_CSUM)) {
6163 all &= ~NETIF_F_ALL_CSUM;
6164 all |= one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
6165 }
6166 6203
6167 /* If one device supports hw checksumming, set for all. */ 6204 all |= one & (NETIF_F_ONE_FOR_ALL|NETIF_F_ALL_CSUM) & mask;
6168 if (one & NETIF_F_GEN_CSUM && !(all & NETIF_F_GEN_CSUM)) { 6205 all &= one | ~NETIF_F_ALL_FOR_ALL;
6169 all &= ~NETIF_F_ALL_CSUM;
6170 all |= NETIF_F_HW_CSUM;
6171 }
6172 }
6173 6206
6174 one |= NETIF_F_ALL_CSUM; 6207 /* If device needs checksumming, downgrade to it. */
6208 if (all & (NETIF_F_ALL_CSUM & ~NETIF_F_NO_CSUM))
6209 all &= ~NETIF_F_NO_CSUM;
6175 6210
6176 one |= all & NETIF_F_ONE_FOR_ALL; 6211 /* If one device supports hw checksumming, set for all. */
6177 all &= one | NETIF_F_LLTX | NETIF_F_GSO | NETIF_F_UFO; 6212 if (all & NETIF_F_GEN_CSUM)
6178 all |= one & mask & NETIF_F_ONE_FOR_ALL; 6213 all &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
6179 6214
6180 return all; 6215 return all;
6181} 6216}
diff --git a/net/core/dst.c b/net/core/dst.c
index 0a3920bf3613..81a4fa1c95ed 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -34,9 +34,6 @@
34 * 3) This list is guarded by a mutex, 34 * 3) This list is guarded by a mutex,
35 * so that the gc_task and dst_dev_event() can be synchronized. 35 * so that the gc_task and dst_dev_event() can be synchronized.
36 */ 36 */
37#if RT_CACHE_DEBUG >= 2
38static atomic_t dst_total = ATOMIC_INIT(0);
39#endif
40 37
41/* 38/*
42 * We want to keep lock & list close together 39 * We want to keep lock & list close together
@@ -70,10 +67,6 @@ static void dst_gc_task(struct work_struct *work)
70 unsigned long expires = ~0L; 67 unsigned long expires = ~0L;
71 struct dst_entry *dst, *next, head; 68 struct dst_entry *dst, *next, head;
72 struct dst_entry *last = &head; 69 struct dst_entry *last = &head;
73#if RT_CACHE_DEBUG >= 2
74 ktime_t time_start = ktime_get();
75 struct timespec elapsed;
76#endif
77 70
78 mutex_lock(&dst_gc_mutex); 71 mutex_lock(&dst_gc_mutex);
79 next = dst_busy_list; 72 next = dst_busy_list;
@@ -147,15 +140,6 @@ loop:
147 140
148 spin_unlock_bh(&dst_garbage.lock); 141 spin_unlock_bh(&dst_garbage.lock);
149 mutex_unlock(&dst_gc_mutex); 142 mutex_unlock(&dst_gc_mutex);
150#if RT_CACHE_DEBUG >= 2
151 elapsed = ktime_to_timespec(ktime_sub(ktime_get(), time_start));
152 printk(KERN_DEBUG "dst_total: %d delayed: %d work_perf: %d"
153 " expires: %lu elapsed: %lu us\n",
154 atomic_read(&dst_total), delayed, work_performed,
155 expires,
156 elapsed.tv_sec * USEC_PER_SEC +
157 elapsed.tv_nsec / NSEC_PER_USEC);
158#endif
159} 143}
160 144
161int dst_discard(struct sk_buff *skb) 145int dst_discard(struct sk_buff *skb)
@@ -167,7 +151,8 @@ EXPORT_SYMBOL(dst_discard);
167 151
168const u32 dst_default_metrics[RTAX_MAX]; 152const u32 dst_default_metrics[RTAX_MAX];
169 153
170void *dst_alloc(struct dst_ops *ops, int initial_ref) 154void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
155 int initial_ref, int initial_obsolete, int flags)
171{ 156{
172 struct dst_entry *dst; 157 struct dst_entry *dst;
173 158
@@ -175,18 +160,36 @@ void *dst_alloc(struct dst_ops *ops, int initial_ref)
175 if (ops->gc(ops)) 160 if (ops->gc(ops))
176 return NULL; 161 return NULL;
177 } 162 }
178 dst = kmem_cache_zalloc(ops->kmem_cachep, GFP_ATOMIC); 163 dst = kmem_cache_alloc(ops->kmem_cachep, GFP_ATOMIC);
179 if (!dst) 164 if (!dst)
180 return NULL; 165 return NULL;
181 atomic_set(&dst->__refcnt, initial_ref); 166 dst->child = NULL;
167 dst->dev = dev;
168 if (dev)
169 dev_hold(dev);
182 dst->ops = ops; 170 dst->ops = ops;
183 dst->lastuse = jiffies;
184 dst->path = dst;
185 dst->input = dst->output = dst_discard;
186 dst_init_metrics(dst, dst_default_metrics, true); 171 dst_init_metrics(dst, dst_default_metrics, true);
187#if RT_CACHE_DEBUG >= 2 172 dst->expires = 0UL;
188 atomic_inc(&dst_total); 173 dst->path = dst;
174 dst->neighbour = NULL;
175 dst->hh = NULL;
176#ifdef CONFIG_XFRM
177 dst->xfrm = NULL;
178#endif
179 dst->input = dst_discard;
180 dst->output = dst_discard;
181 dst->error = 0;
182 dst->obsolete = initial_obsolete;
183 dst->header_len = 0;
184 dst->trailer_len = 0;
185#ifdef CONFIG_IP_ROUTE_CLASSID
186 dst->tclassid = 0;
189#endif 187#endif
188 atomic_set(&dst->__refcnt, initial_ref);
189 dst->__use = 0;
190 dst->lastuse = jiffies;
191 dst->flags = flags;
192 dst->next = NULL;
190 dst_entries_add(ops, 1); 193 dst_entries_add(ops, 1);
191 return dst; 194 return dst;
192} 195}
@@ -246,9 +249,6 @@ again:
246 dst->ops->destroy(dst); 249 dst->ops->destroy(dst);
247 if (dst->dev) 250 if (dst->dev)
248 dev_put(dst->dev); 251 dev_put(dst->dev);
249#if RT_CACHE_DEBUG >= 2
250 atomic_dec(&dst_total);
251#endif
252 kmem_cache_free(dst->ops->kmem_cachep, dst); 252 kmem_cache_free(dst->ops->kmem_cachep, dst);
253 253
254 dst = child; 254 dst = child;
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 74ead9eca126..84e7304532e6 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -21,6 +21,8 @@
21#include <linux/uaccess.h> 21#include <linux/uaccess.h>
22#include <linux/vmalloc.h> 22#include <linux/vmalloc.h>
23#include <linux/slab.h> 23#include <linux/slab.h>
24#include <linux/rtnetlink.h>
25#include <linux/sched.h>
24 26
25/* 27/*
26 * Some useful ethtool_ops methods that're device independent. 28 * Some useful ethtool_ops methods that're device independent.
@@ -317,7 +319,7 @@ static int ethtool_set_features(struct net_device *dev, void __user *useraddr)
317 319
318 dev->wanted_features &= ~features[0].valid; 320 dev->wanted_features &= ~features[0].valid;
319 dev->wanted_features |= features[0].valid & features[0].requested; 321 dev->wanted_features |= features[0].valid & features[0].requested;
320 netdev_update_features(dev); 322 __netdev_update_features(dev);
321 323
322 if ((dev->wanted_features ^ dev->features) & features[0].valid) 324 if ((dev->wanted_features ^ dev->features) & features[0].valid)
323 ret |= ETHTOOL_F_WISH; 325 ret |= ETHTOOL_F_WISH;
@@ -330,7 +332,7 @@ static const char netdev_features_strings[ETHTOOL_DEV_FEATURE_WORDS * 32][ETH_GS
330 /* NETIF_F_IP_CSUM */ "tx-checksum-ipv4", 332 /* NETIF_F_IP_CSUM */ "tx-checksum-ipv4",
331 /* NETIF_F_NO_CSUM */ "tx-checksum-unneeded", 333 /* NETIF_F_NO_CSUM */ "tx-checksum-unneeded",
332 /* NETIF_F_HW_CSUM */ "tx-checksum-ip-generic", 334 /* NETIF_F_HW_CSUM */ "tx-checksum-ip-generic",
333 /* NETIF_F_IPV6_CSUM */ "tx_checksum-ipv6", 335 /* NETIF_F_IPV6_CSUM */ "tx-checksum-ipv6",
334 /* NETIF_F_HIGHDMA */ "highdma", 336 /* NETIF_F_HIGHDMA */ "highdma",
335 /* NETIF_F_FRAGLIST */ "tx-scatter-gather-fraglist", 337 /* NETIF_F_FRAGLIST */ "tx-scatter-gather-fraglist",
336 /* NETIF_F_HW_VLAN_TX */ "tx-vlan-hw-insert", 338 /* NETIF_F_HW_VLAN_TX */ "tx-vlan-hw-insert",
@@ -359,8 +361,8 @@ static const char netdev_features_strings[ETHTOOL_DEV_FEATURE_WORDS * 32][ETH_GS
359 /* NETIF_F_NTUPLE */ "rx-ntuple-filter", 361 /* NETIF_F_NTUPLE */ "rx-ntuple-filter",
360 /* NETIF_F_RXHASH */ "rx-hashing", 362 /* NETIF_F_RXHASH */ "rx-hashing",
361 /* NETIF_F_RXCSUM */ "rx-checksum", 363 /* NETIF_F_RXCSUM */ "rx-checksum",
362 "", 364 /* NETIF_F_NOCACHE_COPY */ "tx-nocache-copy",
363 "", 365 /* NETIF_F_LOOPBACK */ "loopback",
364}; 366};
365 367
366static int __ethtool_get_sset_count(struct net_device *dev, int sset) 368static int __ethtool_get_sset_count(struct net_device *dev, int sset)
@@ -499,7 +501,7 @@ static int ethtool_set_one_feature(struct net_device *dev,
499 else 501 else
500 dev->wanted_features &= ~mask; 502 dev->wanted_features &= ~mask;
501 503
502 netdev_update_features(dev); 504 __netdev_update_features(dev);
503 return 0; 505 return 0;
504 } 506 }
505 507
@@ -544,14 +546,14 @@ int __ethtool_set_flags(struct net_device *dev, u32 data)
544 } 546 }
545 547
546 /* allow changing only bits set in hw_features */ 548 /* allow changing only bits set in hw_features */
547 changed = (data ^ dev->wanted_features) & flags_dup_features; 549 changed = (data ^ dev->features) & flags_dup_features;
548 if (changed & ~dev->hw_features) 550 if (changed & ~dev->hw_features)
549 return (changed & dev->hw_features) ? -EINVAL : -EOPNOTSUPP; 551 return (changed & dev->hw_features) ? -EINVAL : -EOPNOTSUPP;
550 552
551 dev->wanted_features = 553 dev->wanted_features =
552 (dev->wanted_features & ~changed) | data; 554 (dev->wanted_features & ~changed) | (data & dev->hw_features);
553 555
554 netdev_update_features(dev); 556 __netdev_update_features(dev);
555 557
556 return 0; 558 return 0;
557} 559}
@@ -908,6 +910,9 @@ static noinline_for_stack int ethtool_set_rx_ntuple(struct net_device *dev,
908 struct ethtool_rx_ntuple_flow_spec_container *fsc = NULL; 910 struct ethtool_rx_ntuple_flow_spec_container *fsc = NULL;
909 int ret; 911 int ret;
910 912
913 if (!ops->set_rx_ntuple)
914 return -EOPNOTSUPP;
915
911 if (!(dev->features & NETIF_F_NTUPLE)) 916 if (!(dev->features & NETIF_F_NTUPLE))
912 return -EINVAL; 917 return -EINVAL;
913 918
@@ -1441,6 +1446,35 @@ static int ethtool_set_ringparam(struct net_device *dev, void __user *useraddr)
1441 return dev->ethtool_ops->set_ringparam(dev, &ringparam); 1446 return dev->ethtool_ops->set_ringparam(dev, &ringparam);
1442} 1447}
1443 1448
1449static noinline_for_stack int ethtool_get_channels(struct net_device *dev,
1450 void __user *useraddr)
1451{
1452 struct ethtool_channels channels = { .cmd = ETHTOOL_GCHANNELS };
1453
1454 if (!dev->ethtool_ops->get_channels)
1455 return -EOPNOTSUPP;
1456
1457 dev->ethtool_ops->get_channels(dev, &channels);
1458
1459 if (copy_to_user(useraddr, &channels, sizeof(channels)))
1460 return -EFAULT;
1461 return 0;
1462}
1463
1464static noinline_for_stack int ethtool_set_channels(struct net_device *dev,
1465 void __user *useraddr)
1466{
1467 struct ethtool_channels channels;
1468
1469 if (!dev->ethtool_ops->set_channels)
1470 return -EOPNOTSUPP;
1471
1472 if (copy_from_user(&channels, useraddr, sizeof(channels)))
1473 return -EFAULT;
1474
1475 return dev->ethtool_ops->set_channels(dev, &channels);
1476}
1477
1444static int ethtool_get_pauseparam(struct net_device *dev, void __user *useraddr) 1478static int ethtool_get_pauseparam(struct net_device *dev, void __user *useraddr)
1445{ 1479{
1446 struct ethtool_pauseparam pauseparam = { ETHTOOL_GPAUSEPARAM }; 1480 struct ethtool_pauseparam pauseparam = { ETHTOOL_GPAUSEPARAM };
@@ -1618,14 +1652,60 @@ out:
1618static int ethtool_phys_id(struct net_device *dev, void __user *useraddr) 1652static int ethtool_phys_id(struct net_device *dev, void __user *useraddr)
1619{ 1653{
1620 struct ethtool_value id; 1654 struct ethtool_value id;
1655 static bool busy;
1656 int rc;
1621 1657
1622 if (!dev->ethtool_ops->phys_id) 1658 if (!dev->ethtool_ops->set_phys_id)
1623 return -EOPNOTSUPP; 1659 return -EOPNOTSUPP;
1624 1660
1661 if (busy)
1662 return -EBUSY;
1663
1625 if (copy_from_user(&id, useraddr, sizeof(id))) 1664 if (copy_from_user(&id, useraddr, sizeof(id)))
1626 return -EFAULT; 1665 return -EFAULT;
1627 1666
1628 return dev->ethtool_ops->phys_id(dev, id.data); 1667 rc = dev->ethtool_ops->set_phys_id(dev, ETHTOOL_ID_ACTIVE);
1668 if (rc < 0)
1669 return rc;
1670
1671 /* Drop the RTNL lock while waiting, but prevent reentry or
1672 * removal of the device.
1673 */
1674 busy = true;
1675 dev_hold(dev);
1676 rtnl_unlock();
1677
1678 if (rc == 0) {
1679 /* Driver will handle this itself */
1680 schedule_timeout_interruptible(
1681 id.data ? (id.data * HZ) : MAX_SCHEDULE_TIMEOUT);
1682 } else {
1683 /* Driver expects to be called at twice the frequency in rc */
1684 int n = rc * 2, i, interval = HZ / n;
1685
1686 /* Count down seconds */
1687 do {
1688 /* Count down iterations per second */
1689 i = n;
1690 do {
1691 rtnl_lock();
1692 rc = dev->ethtool_ops->set_phys_id(dev,
1693 (i & 1) ? ETHTOOL_ID_OFF : ETHTOOL_ID_ON);
1694 rtnl_unlock();
1695 if (rc)
1696 break;
1697 schedule_timeout_interruptible(interval);
1698 } while (!signal_pending(current) && --i != 0);
1699 } while (!signal_pending(current) &&
1700 (id.data == 0 || --id.data != 0));
1701 }
1702
1703 rtnl_lock();
1704 dev_put(dev);
1705 busy = false;
1706
1707 (void)dev->ethtool_ops->set_phys_id(dev, ETHTOOL_ID_INACTIVE);
1708 return rc;
1629} 1709}
1630 1710
1631static int ethtool_get_stats(struct net_device *dev, void __user *useraddr) 1711static int ethtool_get_stats(struct net_device *dev, void __user *useraddr)
@@ -1743,6 +1823,87 @@ static noinline_for_stack int ethtool_flash_device(struct net_device *dev,
1743 return dev->ethtool_ops->flash_device(dev, &efl); 1823 return dev->ethtool_ops->flash_device(dev, &efl);
1744} 1824}
1745 1825
1826static int ethtool_set_dump(struct net_device *dev,
1827 void __user *useraddr)
1828{
1829 struct ethtool_dump dump;
1830
1831 if (!dev->ethtool_ops->set_dump)
1832 return -EOPNOTSUPP;
1833
1834 if (copy_from_user(&dump, useraddr, sizeof(dump)))
1835 return -EFAULT;
1836
1837 return dev->ethtool_ops->set_dump(dev, &dump);
1838}
1839
1840static int ethtool_get_dump_flag(struct net_device *dev,
1841 void __user *useraddr)
1842{
1843 int ret;
1844 struct ethtool_dump dump;
1845 const struct ethtool_ops *ops = dev->ethtool_ops;
1846
1847 if (!dev->ethtool_ops->get_dump_flag)
1848 return -EOPNOTSUPP;
1849
1850 if (copy_from_user(&dump, useraddr, sizeof(dump)))
1851 return -EFAULT;
1852
1853 ret = ops->get_dump_flag(dev, &dump);
1854 if (ret)
1855 return ret;
1856
1857 if (copy_to_user(useraddr, &dump, sizeof(dump)))
1858 return -EFAULT;
1859 return 0;
1860}
1861
1862static int ethtool_get_dump_data(struct net_device *dev,
1863 void __user *useraddr)
1864{
1865 int ret;
1866 __u32 len;
1867 struct ethtool_dump dump, tmp;
1868 const struct ethtool_ops *ops = dev->ethtool_ops;
1869 void *data = NULL;
1870
1871 if (!dev->ethtool_ops->get_dump_data ||
1872 !dev->ethtool_ops->get_dump_flag)
1873 return -EOPNOTSUPP;
1874
1875 if (copy_from_user(&dump, useraddr, sizeof(dump)))
1876 return -EFAULT;
1877
1878 memset(&tmp, 0, sizeof(tmp));
1879 tmp.cmd = ETHTOOL_GET_DUMP_FLAG;
1880 ret = ops->get_dump_flag(dev, &tmp);
1881 if (ret)
1882 return ret;
1883
1884 len = (tmp.len > dump.len) ? dump.len : tmp.len;
1885 if (!len)
1886 return -EFAULT;
1887
1888 data = vzalloc(tmp.len);
1889 if (!data)
1890 return -ENOMEM;
1891 ret = ops->get_dump_data(dev, &dump, data);
1892 if (ret)
1893 goto out;
1894
1895 if (copy_to_user(useraddr, &dump, sizeof(dump))) {
1896 ret = -EFAULT;
1897 goto out;
1898 }
1899 useraddr += offsetof(struct ethtool_dump, data);
1900 if (copy_to_user(useraddr, data, len))
1901 ret = -EFAULT;
1902out:
1903 vfree(data);
1904 return ret;
1905}
1906
1746/* The main entry point in this file. Called from net/core/dev.c */ 1907/* The main entry point in this file. Called from net/core/dev.c */
1747 1908
1748int dev_ethtool(struct net *net, struct ifreq *ifr) 1909int dev_ethtool(struct net *net, struct ifreq *ifr)
@@ -1953,6 +2114,21 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
1953 case ETHTOOL_SGRO: 2114 case ETHTOOL_SGRO:
1954 rc = ethtool_set_one_feature(dev, useraddr, ethcmd); 2115 rc = ethtool_set_one_feature(dev, useraddr, ethcmd);
1955 break; 2116 break;
2117 case ETHTOOL_GCHANNELS:
2118 rc = ethtool_get_channels(dev, useraddr);
2119 break;
2120 case ETHTOOL_SCHANNELS:
2121 rc = ethtool_set_channels(dev, useraddr);
2122 break;
2123 case ETHTOOL_SET_DUMP:
2124 rc = ethtool_set_dump(dev, useraddr);
2125 break;
2126 case ETHTOOL_GET_DUMP_FLAG:
2127 rc = ethtool_get_dump_flag(dev, useraddr);
2128 break;
2129 case ETHTOOL_GET_DUMP_DATA:
2130 rc = ethtool_get_dump_data(dev, useraddr);
2131 break;
1956 default: 2132 default:
1957 rc = -EOPNOTSUPP; 2133 rc = -EOPNOTSUPP;
1958 } 2134 }
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 8248ebb5891d..3911586e12e4 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -590,7 +590,8 @@ static int dump_rules(struct sk_buff *skb, struct netlink_callback *cb,
590 int idx = 0; 590 int idx = 0;
591 struct fib_rule *rule; 591 struct fib_rule *rule;
592 592
593 list_for_each_entry(rule, &ops->rules_list, list) { 593 rcu_read_lock();
594 list_for_each_entry_rcu(rule, &ops->rules_list, list) {
594 if (idx < cb->args[1]) 595 if (idx < cb->args[1])
595 goto skip; 596 goto skip;
596 597
diff --git a/net/core/filter.c b/net/core/filter.c
index afb8afb066bb..0eb8c4466eaa 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -39,65 +39,6 @@
39#include <linux/filter.h> 39#include <linux/filter.h>
40#include <linux/reciprocal_div.h> 40#include <linux/reciprocal_div.h>
41 41
42enum {
43 BPF_S_RET_K = 1,
44 BPF_S_RET_A,
45 BPF_S_ALU_ADD_K,
46 BPF_S_ALU_ADD_X,
47 BPF_S_ALU_SUB_K,
48 BPF_S_ALU_SUB_X,
49 BPF_S_ALU_MUL_K,
50 BPF_S_ALU_MUL_X,
51 BPF_S_ALU_DIV_X,
52 BPF_S_ALU_AND_K,
53 BPF_S_ALU_AND_X,
54 BPF_S_ALU_OR_K,
55 BPF_S_ALU_OR_X,
56 BPF_S_ALU_LSH_K,
57 BPF_S_ALU_LSH_X,
58 BPF_S_ALU_RSH_K,
59 BPF_S_ALU_RSH_X,
60 BPF_S_ALU_NEG,
61 BPF_S_LD_W_ABS,
62 BPF_S_LD_H_ABS,
63 BPF_S_LD_B_ABS,
64 BPF_S_LD_W_LEN,
65 BPF_S_LD_W_IND,
66 BPF_S_LD_H_IND,
67 BPF_S_LD_B_IND,
68 BPF_S_LD_IMM,
69 BPF_S_LDX_W_LEN,
70 BPF_S_LDX_B_MSH,
71 BPF_S_LDX_IMM,
72 BPF_S_MISC_TAX,
73 BPF_S_MISC_TXA,
74 BPF_S_ALU_DIV_K,
75 BPF_S_LD_MEM,
76 BPF_S_LDX_MEM,
77 BPF_S_ST,
78 BPF_S_STX,
79 BPF_S_JMP_JA,
80 BPF_S_JMP_JEQ_K,
81 BPF_S_JMP_JEQ_X,
82 BPF_S_JMP_JGE_K,
83 BPF_S_JMP_JGE_X,
84 BPF_S_JMP_JGT_K,
85 BPF_S_JMP_JGT_X,
86 BPF_S_JMP_JSET_K,
87 BPF_S_JMP_JSET_X,
88 /* Ancillary data */
89 BPF_S_ANC_PROTOCOL,
90 BPF_S_ANC_PKTTYPE,
91 BPF_S_ANC_IFINDEX,
92 BPF_S_ANC_NLATTR,
93 BPF_S_ANC_NLATTR_NEST,
94 BPF_S_ANC_MARK,
95 BPF_S_ANC_QUEUE,
96 BPF_S_ANC_HATYPE,
97 BPF_S_ANC_RXHASH,
98 BPF_S_ANC_CPU,
99};
100
101/* No hurry in this branch */ 42/* No hurry in this branch */
102static void *__load_pointer(const struct sk_buff *skb, int k, unsigned int size) 43static void *__load_pointer(const struct sk_buff *skb, int k, unsigned int size)
103{ 44{
@@ -145,7 +86,7 @@ int sk_filter(struct sock *sk, struct sk_buff *skb)
145 rcu_read_lock(); 86 rcu_read_lock();
146 filter = rcu_dereference(sk->sk_filter); 87 filter = rcu_dereference(sk->sk_filter);
147 if (filter) { 88 if (filter) {
148 unsigned int pkt_len = sk_run_filter(skb, filter->insns); 89 unsigned int pkt_len = SK_RUN_FILTER(filter, skb);
149 90
150 err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM; 91 err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM;
151 } 92 }
@@ -638,6 +579,7 @@ void sk_filter_release_rcu(struct rcu_head *rcu)
638{ 579{
639 struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu); 580 struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);
640 581
582 bpf_jit_free(fp);
641 kfree(fp); 583 kfree(fp);
642} 584}
643EXPORT_SYMBOL(sk_filter_release_rcu); 585EXPORT_SYMBOL(sk_filter_release_rcu);
@@ -672,6 +614,7 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
672 614
673 atomic_set(&fp->refcnt, 1); 615 atomic_set(&fp->refcnt, 1);
674 fp->len = fprog->len; 616 fp->len = fprog->len;
617 fp->bpf_func = sk_run_filter;
675 618
676 err = sk_chk_filter(fp->insns, fp->len); 619 err = sk_chk_filter(fp->insns, fp->len);
677 if (err) { 620 if (err) {
@@ -679,6 +622,8 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
679 return err; 622 return err;
680 } 623 }
681 624
625 bpf_jit_compile(fp);
626
682 old_fp = rcu_dereference_protected(sk->sk_filter, 627 old_fp = rcu_dereference_protected(sk->sk_filter,
683 sock_owned_by_user(sk)); 628 sock_owned_by_user(sk));
684 rcu_assign_pointer(sk->sk_filter, fp); 629 rcu_assign_pointer(sk->sk_filter, fp);
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 80b2aad3b73d..11b98bc2aa8f 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -28,6 +28,7 @@
28static const char fmt_hex[] = "%#x\n"; 28static const char fmt_hex[] = "%#x\n";
29static const char fmt_long_hex[] = "%#lx\n"; 29static const char fmt_long_hex[] = "%#lx\n";
30static const char fmt_dec[] = "%d\n"; 30static const char fmt_dec[] = "%d\n";
31static const char fmt_udec[] = "%u\n";
31static const char fmt_ulong[] = "%lu\n"; 32static const char fmt_ulong[] = "%lu\n";
32static const char fmt_u64[] = "%llu\n"; 33static const char fmt_u64[] = "%llu\n";
33 34
@@ -145,13 +146,10 @@ static ssize_t show_speed(struct device *dev,
145 if (!rtnl_trylock()) 146 if (!rtnl_trylock())
146 return restart_syscall(); 147 return restart_syscall();
147 148
148 if (netif_running(netdev) && 149 if (netif_running(netdev)) {
149 netdev->ethtool_ops && 150 struct ethtool_cmd cmd;
150 netdev->ethtool_ops->get_settings) { 151 if (!dev_ethtool_get_settings(netdev, &cmd))
151 struct ethtool_cmd cmd = { ETHTOOL_GSET }; 152 ret = sprintf(buf, fmt_udec, ethtool_cmd_speed(&cmd));
152
153 if (!netdev->ethtool_ops->get_settings(netdev, &cmd))
154 ret = sprintf(buf, fmt_dec, ethtool_cmd_speed(&cmd));
155 } 153 }
156 rtnl_unlock(); 154 rtnl_unlock();
157 return ret; 155 return ret;
@@ -166,13 +164,11 @@ static ssize_t show_duplex(struct device *dev,
166 if (!rtnl_trylock()) 164 if (!rtnl_trylock())
167 return restart_syscall(); 165 return restart_syscall();
168 166
169 if (netif_running(netdev) && 167 if (netif_running(netdev)) {
170 netdev->ethtool_ops && 168 struct ethtool_cmd cmd;
171 netdev->ethtool_ops->get_settings) { 169 if (!dev_ethtool_get_settings(netdev, &cmd))
172 struct ethtool_cmd cmd = { ETHTOOL_GSET }; 170 ret = sprintf(buf, "%s\n",
173 171 cmd.duplex ? "full" : "half");
174 if (!netdev->ethtool_ops->get_settings(netdev, &cmd))
175 ret = sprintf(buf, "%s\n", cmd.duplex ? "full" : "half");
176 } 172 }
177 rtnl_unlock(); 173 rtnl_unlock();
178 return ret; 174 return ret;
@@ -946,7 +942,7 @@ static ssize_t store_xps_map(struct netdev_queue *queue,
946 } else 942 } else
947 pos = map_len = alloc_len = 0; 943 pos = map_len = alloc_len = 0;
948 944
949 need_set = cpu_isset(cpu, *mask) && cpu_online(cpu); 945 need_set = cpumask_test_cpu(cpu, mask) && cpu_online(cpu);
950#ifdef CONFIG_NUMA 946#ifdef CONFIG_NUMA
951 if (need_set) { 947 if (need_set) {
952 if (numa_node == -2) 948 if (numa_node == -2)
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 297bb9272240..2e2dce6583e1 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -208,11 +208,14 @@ static void net_free(struct net *net)
208 kmem_cache_free(net_cachep, net); 208 kmem_cache_free(net_cachep, net);
209} 209}
210 210
211static struct net *net_create(void) 211struct net *copy_net_ns(unsigned long flags, struct net *old_net)
212{ 212{
213 struct net *net; 213 struct net *net;
214 int rv; 214 int rv;
215 215
216 if (!(flags & CLONE_NEWNET))
217 return get_net(old_net);
218
216 net = net_alloc(); 219 net = net_alloc();
217 if (!net) 220 if (!net)
218 return ERR_PTR(-ENOMEM); 221 return ERR_PTR(-ENOMEM);
@@ -231,13 +234,6 @@ static struct net *net_create(void)
231 return net; 234 return net;
232} 235}
233 236
234struct net *copy_net_ns(unsigned long flags, struct net *old_net)
235{
236 if (!(flags & CLONE_NEWNET))
237 return get_net(old_net);
238 return net_create();
239}
240
241static DEFINE_SPINLOCK(cleanup_list_lock); 237static DEFINE_SPINLOCK(cleanup_list_lock);
242static LIST_HEAD(cleanup_list); /* Must hold cleanup_list_lock to touch */ 238static LIST_HEAD(cleanup_list); /* Must hold cleanup_list_lock to touch */
243 239
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 06be2431753e..2d7d6d473781 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -539,7 +539,7 @@ int __netpoll_rx(struct sk_buff *skb)
539{ 539{
540 int proto, len, ulen; 540 int proto, len, ulen;
541 int hits = 0; 541 int hits = 0;
542 struct iphdr *iph; 542 const struct iphdr *iph;
543 struct udphdr *uh; 543 struct udphdr *uh;
544 struct netpoll_info *npinfo = skb->dev->npinfo; 544 struct netpoll_info *npinfo = skb->dev->npinfo;
545 struct netpoll *np, *tmp; 545 struct netpoll *np, *tmp;
@@ -698,32 +698,8 @@ int netpoll_parse_options(struct netpoll *np, char *opt)
698 698
699 if (*cur != 0) { 699 if (*cur != 0) {
700 /* MAC address */ 700 /* MAC address */
701 if ((delim = strchr(cur, ':')) == NULL) 701 if (!mac_pton(cur, np->remote_mac))
702 goto parse_failed;
703 *delim = 0;
704 np->remote_mac[0] = simple_strtol(cur, NULL, 16);
705 cur = delim + 1;
706 if ((delim = strchr(cur, ':')) == NULL)
707 goto parse_failed;
708 *delim = 0;
709 np->remote_mac[1] = simple_strtol(cur, NULL, 16);
710 cur = delim + 1;
711 if ((delim = strchr(cur, ':')) == NULL)
712 goto parse_failed; 702 goto parse_failed;
713 *delim = 0;
714 np->remote_mac[2] = simple_strtol(cur, NULL, 16);
715 cur = delim + 1;
716 if ((delim = strchr(cur, ':')) == NULL)
717 goto parse_failed;
718 *delim = 0;
719 np->remote_mac[3] = simple_strtol(cur, NULL, 16);
720 cur = delim + 1;
721 if ((delim = strchr(cur, ':')) == NULL)
722 goto parse_failed;
723 *delim = 0;
724 np->remote_mac[4] = simple_strtol(cur, NULL, 16);
725 cur = delim + 1;
726 np->remote_mac[5] = simple_strtol(cur, NULL, 16);
727 } 703 }
728 704
729 netpoll_print_options(np); 705 netpoll_print_options(np);
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 6ed9e27d8202..67870e9fd097 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -450,7 +450,6 @@ static void pktgen_stop(struct pktgen_thread *t);
450static void pktgen_clear_counters(struct pktgen_dev *pkt_dev); 450static void pktgen_clear_counters(struct pktgen_dev *pkt_dev);
451 451
452static unsigned int scan_ip6(const char *s, char ip[16]); 452static unsigned int scan_ip6(const char *s, char ip[16]);
453static unsigned int fmt_ip6(char *s, const char ip[16]);
454 453
455/* Module parameters, defaults. */ 454/* Module parameters, defaults. */
456static int pg_count_d __read_mostly = 1000; 455static int pg_count_d __read_mostly = 1000;
@@ -557,21 +556,13 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
557 pkt_dev->skb_priority); 556 pkt_dev->skb_priority);
558 557
559 if (pkt_dev->flags & F_IPV6) { 558 if (pkt_dev->flags & F_IPV6) {
560 char b1[128], b2[128], b3[128];
561 fmt_ip6(b1, pkt_dev->in6_saddr.s6_addr);
562 fmt_ip6(b2, pkt_dev->min_in6_saddr.s6_addr);
563 fmt_ip6(b3, pkt_dev->max_in6_saddr.s6_addr);
564 seq_printf(seq, 559 seq_printf(seq,
565 " saddr: %s min_saddr: %s max_saddr: %s\n", b1, 560 " saddr: %pI6c min_saddr: %pI6c max_saddr: %pI6c\n"
566 b2, b3); 561 " daddr: %pI6c min_daddr: %pI6c max_daddr: %pI6c\n",
567 562 &pkt_dev->in6_saddr,
568 fmt_ip6(b1, pkt_dev->in6_daddr.s6_addr); 563 &pkt_dev->min_in6_saddr, &pkt_dev->max_in6_saddr,
569 fmt_ip6(b2, pkt_dev->min_in6_daddr.s6_addr); 564 &pkt_dev->in6_daddr,
570 fmt_ip6(b3, pkt_dev->max_in6_daddr.s6_addr); 565 &pkt_dev->min_in6_daddr, &pkt_dev->max_in6_daddr);
571 seq_printf(seq,
572 " daddr: %s min_daddr: %s max_daddr: %s\n", b1,
573 b2, b3);
574
575 } else { 566 } else {
576 seq_printf(seq, 567 seq_printf(seq,
577 " dst_min: %s dst_max: %s\n", 568 " dst_min: %s dst_max: %s\n",
@@ -707,10 +698,9 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
707 pkt_dev->cur_src_mac_offset); 698 pkt_dev->cur_src_mac_offset);
708 699
709 if (pkt_dev->flags & F_IPV6) { 700 if (pkt_dev->flags & F_IPV6) {
710 char b1[128], b2[128]; 701 seq_printf(seq, " cur_saddr: %pI6c cur_daddr: %pI6c\n",
711 fmt_ip6(b1, pkt_dev->cur_in6_daddr.s6_addr); 702 &pkt_dev->cur_in6_saddr,
712 fmt_ip6(b2, pkt_dev->cur_in6_saddr.s6_addr); 703 &pkt_dev->cur_in6_daddr);
713 seq_printf(seq, " cur_saddr: %s cur_daddr: %s\n", b2, b1);
714 } else 704 } else
715 seq_printf(seq, " cur_saddr: 0x%x cur_daddr: 0x%x\n", 705 seq_printf(seq, " cur_saddr: 0x%x cur_daddr: 0x%x\n",
716 pkt_dev->cur_saddr, pkt_dev->cur_daddr); 706 pkt_dev->cur_saddr, pkt_dev->cur_daddr);
@@ -1310,7 +1300,7 @@ static ssize_t pktgen_if_write(struct file *file,
1310 buf[len] = 0; 1300 buf[len] = 0;
1311 1301
1312 scan_ip6(buf, pkt_dev->in6_daddr.s6_addr); 1302 scan_ip6(buf, pkt_dev->in6_daddr.s6_addr);
1313 fmt_ip6(buf, pkt_dev->in6_daddr.s6_addr); 1303 snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->in6_daddr);
1314 1304
1315 ipv6_addr_copy(&pkt_dev->cur_in6_daddr, &pkt_dev->in6_daddr); 1305 ipv6_addr_copy(&pkt_dev->cur_in6_daddr, &pkt_dev->in6_daddr);
1316 1306
@@ -1333,7 +1323,7 @@ static ssize_t pktgen_if_write(struct file *file,
1333 buf[len] = 0; 1323 buf[len] = 0;
1334 1324
1335 scan_ip6(buf, pkt_dev->min_in6_daddr.s6_addr); 1325 scan_ip6(buf, pkt_dev->min_in6_daddr.s6_addr);
1336 fmt_ip6(buf, pkt_dev->min_in6_daddr.s6_addr); 1326 snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->min_in6_daddr);
1337 1327
1338 ipv6_addr_copy(&pkt_dev->cur_in6_daddr, 1328 ipv6_addr_copy(&pkt_dev->cur_in6_daddr,
1339 &pkt_dev->min_in6_daddr); 1329 &pkt_dev->min_in6_daddr);
@@ -1356,7 +1346,7 @@ static ssize_t pktgen_if_write(struct file *file,
1356 buf[len] = 0; 1346 buf[len] = 0;
1357 1347
1358 scan_ip6(buf, pkt_dev->max_in6_daddr.s6_addr); 1348 scan_ip6(buf, pkt_dev->max_in6_daddr.s6_addr);
1359 fmt_ip6(buf, pkt_dev->max_in6_daddr.s6_addr); 1349 snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->max_in6_daddr);
1360 1350
1361 if (debug) 1351 if (debug)
1362 printk(KERN_DEBUG "pktgen: dst6_max set to: %s\n", buf); 1352 printk(KERN_DEBUG "pktgen: dst6_max set to: %s\n", buf);
@@ -1377,7 +1367,7 @@ static ssize_t pktgen_if_write(struct file *file,
1377 buf[len] = 0; 1367 buf[len] = 0;
1378 1368
1379 scan_ip6(buf, pkt_dev->in6_saddr.s6_addr); 1369 scan_ip6(buf, pkt_dev->in6_saddr.s6_addr);
1380 fmt_ip6(buf, pkt_dev->in6_saddr.s6_addr); 1370 snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->in6_saddr);
1381 1371
1382 ipv6_addr_copy(&pkt_dev->cur_in6_saddr, &pkt_dev->in6_saddr); 1372 ipv6_addr_copy(&pkt_dev->cur_in6_saddr, &pkt_dev->in6_saddr);
1383 1373
@@ -1431,11 +1421,6 @@ static ssize_t pktgen_if_write(struct file *file,
1431 return count; 1421 return count;
1432 } 1422 }
1433 if (!strcmp(name, "dst_mac")) { 1423 if (!strcmp(name, "dst_mac")) {
1434 char *v = valstr;
1435 unsigned char old_dmac[ETH_ALEN];
1436 unsigned char *m = pkt_dev->dst_mac;
1437 memcpy(old_dmac, pkt_dev->dst_mac, ETH_ALEN);
1438
1439 len = strn_len(&user_buffer[i], sizeof(valstr) - 1); 1424 len = strn_len(&user_buffer[i], sizeof(valstr) - 1);
1440 if (len < 0) 1425 if (len < 0)
1441 return len; 1426 return len;
@@ -1443,35 +1428,16 @@ static ssize_t pktgen_if_write(struct file *file,
1443 memset(valstr, 0, sizeof(valstr)); 1428 memset(valstr, 0, sizeof(valstr));
1444 if (copy_from_user(valstr, &user_buffer[i], len)) 1429 if (copy_from_user(valstr, &user_buffer[i], len))
1445 return -EFAULT; 1430 return -EFAULT;
1446 i += len;
1447
1448 for (*m = 0; *v && m < pkt_dev->dst_mac + 6; v++) {
1449 int value;
1450
1451 value = hex_to_bin(*v);
1452 if (value >= 0)
1453 *m = *m * 16 + value;
1454
1455 if (*v == ':') {
1456 m++;
1457 *m = 0;
1458 }
1459 }
1460 1431
1432 if (!mac_pton(valstr, pkt_dev->dst_mac))
1433 return -EINVAL;
1461 /* Set up Dest MAC */ 1434 /* Set up Dest MAC */
1462 if (compare_ether_addr(old_dmac, pkt_dev->dst_mac)) 1435 memcpy(&pkt_dev->hh[0], pkt_dev->dst_mac, ETH_ALEN);
1463 memcpy(&(pkt_dev->hh[0]), pkt_dev->dst_mac, ETH_ALEN);
1464 1436
1465 sprintf(pg_result, "OK: dstmac"); 1437 sprintf(pg_result, "OK: dstmac %pM", pkt_dev->dst_mac);
1466 return count; 1438 return count;
1467 } 1439 }
1468 if (!strcmp(name, "src_mac")) { 1440 if (!strcmp(name, "src_mac")) {
1469 char *v = valstr;
1470 unsigned char old_smac[ETH_ALEN];
1471 unsigned char *m = pkt_dev->src_mac;
1472
1473 memcpy(old_smac, pkt_dev->src_mac, ETH_ALEN);
1474
1475 len = strn_len(&user_buffer[i], sizeof(valstr) - 1); 1441 len = strn_len(&user_buffer[i], sizeof(valstr) - 1);
1476 if (len < 0) 1442 if (len < 0)
1477 return len; 1443 return len;
@@ -1479,26 +1445,13 @@ static ssize_t pktgen_if_write(struct file *file,
1479 memset(valstr, 0, sizeof(valstr)); 1445 memset(valstr, 0, sizeof(valstr));
1480 if (copy_from_user(valstr, &user_buffer[i], len)) 1446 if (copy_from_user(valstr, &user_buffer[i], len))
1481 return -EFAULT; 1447 return -EFAULT;
1482 i += len;
1483
1484 for (*m = 0; *v && m < pkt_dev->src_mac + 6; v++) {
1485 int value;
1486
1487 value = hex_to_bin(*v);
1488 if (value >= 0)
1489 *m = *m * 16 + value;
1490
1491 if (*v == ':') {
1492 m++;
1493 *m = 0;
1494 }
1495 }
1496 1448
1449 if (!mac_pton(valstr, pkt_dev->src_mac))
1450 return -EINVAL;
1497 /* Set up Src MAC */ 1451 /* Set up Src MAC */
1498 if (compare_ether_addr(old_smac, pkt_dev->src_mac)) 1452 memcpy(&pkt_dev->hh[6], pkt_dev->src_mac, ETH_ALEN);
1499 memcpy(&(pkt_dev->hh[6]), pkt_dev->src_mac, ETH_ALEN);
1500 1453
1501 sprintf(pg_result, "OK: srcmac"); 1454 sprintf(pg_result, "OK: srcmac %pM", pkt_dev->src_mac);
1502 return count; 1455 return count;
1503 } 1456 }
1504 1457
@@ -2515,7 +2468,6 @@ static int pktgen_output_ipsec(struct sk_buff *skb, struct pktgen_dev *pkt_dev)
2515{ 2468{
2516 struct xfrm_state *x = pkt_dev->flows[pkt_dev->curfl].x; 2469 struct xfrm_state *x = pkt_dev->flows[pkt_dev->curfl].x;
2517 int err = 0; 2470 int err = 0;
2518 struct iphdr *iph;
2519 2471
2520 if (!x) 2472 if (!x)
2521 return 0; 2473 return 0;
@@ -2525,7 +2477,6 @@ static int pktgen_output_ipsec(struct sk_buff *skb, struct pktgen_dev *pkt_dev)
2525 return 0; 2477 return 0;
2526 2478
2527 spin_lock(&x->lock); 2479 spin_lock(&x->lock);
2528 iph = ip_hdr(skb);
2529 2480
2530 err = x->outer_mode->output(x, skb); 2481 err = x->outer_mode->output(x, skb);
2531 if (err) 2482 if (err)
@@ -2625,6 +2576,7 @@ static void pktgen_finalize_skb(struct pktgen_dev *pkt_dev, struct sk_buff *skb,
2625 } else { 2576 } else {
2626 int frags = pkt_dev->nfrags; 2577 int frags = pkt_dev->nfrags;
2627 int i, len; 2578 int i, len;
2579 int frag_len;
2628 2580
2629 2581
2630 if (frags > MAX_SKB_FRAGS) 2582 if (frags > MAX_SKB_FRAGS)
@@ -2636,6 +2588,8 @@ static void pktgen_finalize_skb(struct pktgen_dev *pkt_dev, struct sk_buff *skb,
2636 } 2588 }
2637 2589
2638 i = 0; 2590 i = 0;
2591 frag_len = (datalen/frags) < PAGE_SIZE ?
2592 (datalen/frags) : PAGE_SIZE;
2639 while (datalen > 0) { 2593 while (datalen > 0) {
2640 if (unlikely(!pkt_dev->page)) { 2594 if (unlikely(!pkt_dev->page)) {
2641 int node = numa_node_id(); 2595 int node = numa_node_id();
@@ -2649,38 +2603,18 @@ static void pktgen_finalize_skb(struct pktgen_dev *pkt_dev, struct sk_buff *skb,
2649 skb_shinfo(skb)->frags[i].page = pkt_dev->page; 2603 skb_shinfo(skb)->frags[i].page = pkt_dev->page;
2650 get_page(pkt_dev->page); 2604 get_page(pkt_dev->page);
2651 skb_shinfo(skb)->frags[i].page_offset = 0; 2605 skb_shinfo(skb)->frags[i].page_offset = 0;
2652 skb_shinfo(skb)->frags[i].size = 2606 /*last fragment, fill rest of data*/
2653 (datalen < PAGE_SIZE ? datalen : PAGE_SIZE); 2607 if (i == (frags - 1))
2608 skb_shinfo(skb)->frags[i].size =
2609 (datalen < PAGE_SIZE ? datalen : PAGE_SIZE);
2610 else
2611 skb_shinfo(skb)->frags[i].size = frag_len;
2654 datalen -= skb_shinfo(skb)->frags[i].size; 2612 datalen -= skb_shinfo(skb)->frags[i].size;
2655 skb->len += skb_shinfo(skb)->frags[i].size; 2613 skb->len += skb_shinfo(skb)->frags[i].size;
2656 skb->data_len += skb_shinfo(skb)->frags[i].size; 2614 skb->data_len += skb_shinfo(skb)->frags[i].size;
2657 i++; 2615 i++;
2658 skb_shinfo(skb)->nr_frags = i; 2616 skb_shinfo(skb)->nr_frags = i;
2659 } 2617 }
2660
2661 while (i < frags) {
2662 int rem;
2663
2664 if (i == 0)
2665 break;
2666
2667 rem = skb_shinfo(skb)->frags[i - 1].size / 2;
2668 if (rem == 0)
2669 break;
2670
2671 skb_shinfo(skb)->frags[i - 1].size -= rem;
2672
2673 skb_shinfo(skb)->frags[i] =
2674 skb_shinfo(skb)->frags[i - 1];
2675 get_page(skb_shinfo(skb)->frags[i].page);
2676 skb_shinfo(skb)->frags[i].page =
2677 skb_shinfo(skb)->frags[i - 1].page;
2678 skb_shinfo(skb)->frags[i].page_offset +=
2679 skb_shinfo(skb)->frags[i - 1].size;
2680 skb_shinfo(skb)->frags[i].size = rem;
2681 i++;
2682 skb_shinfo(skb)->nr_frags = i;
2683 }
2684 } 2618 }
2685 2619
2686 /* Stamp the time, and sequence number, 2620 /* Stamp the time, and sequence number,
@@ -2918,79 +2852,6 @@ static unsigned int scan_ip6(const char *s, char ip[16])
2918 return len; 2852 return len;
2919} 2853}
2920 2854
2921static char tohex(char hexdigit)
2922{
2923 return hexdigit > 9 ? hexdigit + 'a' - 10 : hexdigit + '0';
2924}
2925
2926static int fmt_xlong(char *s, unsigned int i)
2927{
2928 char *bak = s;
2929 *s = tohex((i >> 12) & 0xf);
2930 if (s != bak || *s != '0')
2931 ++s;
2932 *s = tohex((i >> 8) & 0xf);
2933 if (s != bak || *s != '0')
2934 ++s;
2935 *s = tohex((i >> 4) & 0xf);
2936 if (s != bak || *s != '0')
2937 ++s;
2938 *s = tohex(i & 0xf);
2939 return s - bak + 1;
2940}
2941
2942static unsigned int fmt_ip6(char *s, const char ip[16])
2943{
2944 unsigned int len;
2945 unsigned int i;
2946 unsigned int temp;
2947 unsigned int compressing;
2948 int j;
2949
2950 len = 0;
2951 compressing = 0;
2952 for (j = 0; j < 16; j += 2) {
2953
2954#ifdef V4MAPPEDPREFIX
2955 if (j == 12 && !memcmp(ip, V4mappedprefix, 12)) {
2956 inet_ntoa_r(*(struct in_addr *)(ip + 12), s);
2957 temp = strlen(s);
2958 return len + temp;
2959 }
2960#endif
2961 temp = ((unsigned long)(unsigned char)ip[j] << 8) +
2962 (unsigned long)(unsigned char)ip[j + 1];
2963 if (temp == 0) {
2964 if (!compressing) {
2965 compressing = 1;
2966 if (j == 0) {
2967 *s++ = ':';
2968 ++len;
2969 }
2970 }
2971 } else {
2972 if (compressing) {
2973 compressing = 0;
2974 *s++ = ':';
2975 ++len;
2976 }
2977 i = fmt_xlong(s, temp);
2978 len += i;
2979 s += i;
2980 if (j < 14) {
2981 *s++ = ':';
2982 ++len;
2983 }
2984 }
2985 }
2986 if (compressing) {
2987 *s++ = ':';
2988 ++len;
2989 }
2990 *s = 0;
2991 return len;
2992}
2993
2994static struct sk_buff *fill_packet_ipv6(struct net_device *odev, 2855static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
2995 struct pktgen_dev *pkt_dev) 2856 struct pktgen_dev *pkt_dev)
2996{ 2857{
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index d7c4bb4b1820..d2ba2597c75a 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1007,10 +1007,11 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
1007 s_h = cb->args[0]; 1007 s_h = cb->args[0];
1008 s_idx = cb->args[1]; 1008 s_idx = cb->args[1];
1009 1009
1010 rcu_read_lock();
1010 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { 1011 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
1011 idx = 0; 1012 idx = 0;
1012 head = &net->dev_index_head[h]; 1013 head = &net->dev_index_head[h];
1013 hlist_for_each_entry(dev, node, head, index_hlist) { 1014 hlist_for_each_entry_rcu(dev, node, head, index_hlist) {
1014 if (idx < s_idx) 1015 if (idx < s_idx)
1015 goto cont; 1016 goto cont;
1016 if (rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK, 1017 if (rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
@@ -1023,6 +1024,7 @@ cont:
1023 } 1024 }
1024 } 1025 }
1025out: 1026out:
1027 rcu_read_unlock();
1026 cb->args[1] = idx; 1028 cb->args[1] = idx;
1027 cb->args[0] = h; 1029 cb->args[0] = h;
1028 1030
@@ -1499,6 +1501,7 @@ static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1499 char ifname[IFNAMSIZ]; 1501 char ifname[IFNAMSIZ];
1500 struct nlattr *tb[IFLA_MAX+1]; 1502 struct nlattr *tb[IFLA_MAX+1];
1501 int err; 1503 int err;
1504 LIST_HEAD(list_kill);
1502 1505
1503 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy); 1506 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy);
1504 if (err < 0) 1507 if (err < 0)
@@ -1522,7 +1525,9 @@ static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1522 if (!ops) 1525 if (!ops)
1523 return -EOPNOTSUPP; 1526 return -EOPNOTSUPP;
1524 1527
1525 ops->dellink(dev, NULL); 1528 ops->dellink(dev, &list_kill);
1529 unregister_netdevice_many(&list_kill);
1530 list_del(&list_kill);
1526 return 0; 1531 return 0;
1527} 1532}
1528 1533
@@ -1570,12 +1575,6 @@ struct net_device *rtnl_create_link(struct net *src_net, struct net *net,
1570 dev->rtnl_link_state = RTNL_LINK_INITIALIZING; 1575 dev->rtnl_link_state = RTNL_LINK_INITIALIZING;
1571 dev->real_num_tx_queues = real_num_queues; 1576 dev->real_num_tx_queues = real_num_queues;
1572 1577
1573 if (strchr(dev->name, '%')) {
1574 err = dev_alloc_name(dev, dev->name);
1575 if (err < 0)
1576 goto err_free;
1577 }
1578
1579 if (tb[IFLA_MTU]) 1578 if (tb[IFLA_MTU])
1580 dev->mtu = nla_get_u32(tb[IFLA_MTU]); 1579 dev->mtu = nla_get_u32(tb[IFLA_MTU]);
1581 if (tb[IFLA_ADDRESS]) 1580 if (tb[IFLA_ADDRESS])
@@ -1595,8 +1594,6 @@ struct net_device *rtnl_create_link(struct net *src_net, struct net *net,
1595 1594
1596 return dev; 1595 return dev;
1597 1596
1598err_free:
1599 free_netdev(dev);
1600err: 1597err:
1601 return ERR_PTR(err); 1598 return ERR_PTR(err);
1602} 1599}
@@ -1879,7 +1876,6 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
1879 int min_len; 1876 int min_len;
1880 int family; 1877 int family;
1881 int type; 1878 int type;
1882 int err;
1883 1879
1884 type = nlh->nlmsg_type; 1880 type = nlh->nlmsg_type;
1885 if (type > RTM_MAX) 1881 if (type > RTM_MAX)
@@ -1906,11 +1902,8 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
1906 if (dumpit == NULL) 1902 if (dumpit == NULL)
1907 return -EOPNOTSUPP; 1903 return -EOPNOTSUPP;
1908 1904
1909 __rtnl_unlock();
1910 rtnl = net->rtnl; 1905 rtnl = net->rtnl;
1911 err = netlink_dump_start(rtnl, skb, nlh, dumpit, NULL); 1906 return netlink_dump_start(rtnl, skb, nlh, dumpit, NULL);
1912 rtnl_lock();
1913 return err;
1914 } 1907 }
1915 1908
1916 memset(rta_buf, 0, (rtattr_max * sizeof(struct rtattr *))); 1909 memset(rta_buf, 0, (rtattr_max * sizeof(struct rtattr *)));
@@ -1980,7 +1973,7 @@ static int __net_init rtnetlink_net_init(struct net *net)
1980{ 1973{
1981 struct sock *sk; 1974 struct sock *sk;
1982 sk = netlink_kernel_create(net, NETLINK_ROUTE, RTNLGRP_MAX, 1975 sk = netlink_kernel_create(net, NETLINK_ROUTE, RTNLGRP_MAX,
1983 rtnetlink_rcv, &rtnl_mutex, THIS_MODULE); 1976 rtnetlink_rcv, NULL, THIS_MODULE);
1984 if (!sk) 1977 if (!sk)
1985 return -ENOMEM; 1978 return -ENOMEM;
1986 net->rtnl = sk; 1979 net->rtnl = sk;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 960ea899c864..46cbd28f40f9 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -2994,6 +2994,9 @@ int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
2994 skb->destructor = sock_rmem_free; 2994 skb->destructor = sock_rmem_free;
2995 atomic_add(skb->truesize, &sk->sk_rmem_alloc); 2995 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
2996 2996
2997 /* before exiting rcu section, make sure dst is refcounted */
2998 skb_dst_force(skb);
2999
2997 skb_queue_tail(&sk->sk_error_queue, skb); 3000 skb_queue_tail(&sk->sk_error_queue, skb);
2998 if (!sock_flag(sk, SOCK_DEAD)) 3001 if (!sock_flag(sk, SOCK_DEAD))
2999 sk->sk_data_ready(sk, skb->len); 3002 sk->sk_data_ready(sk, skb->len);
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 385b6095fdc4..a829e3f60aeb 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -122,6 +122,15 @@ static struct ctl_table net_core_table[] = {
122 .mode = 0644, 122 .mode = 0644,
123 .proc_handler = proc_dointvec 123 .proc_handler = proc_dointvec
124 }, 124 },
125#ifdef CONFIG_BPF_JIT
126 {
127 .procname = "bpf_jit_enable",
128 .data = &bpf_jit_enable,
129 .maxlen = sizeof(int),
130 .mode = 0644,
131 .proc_handler = proc_dointvec
132 },
133#endif
125 { 134 {
126 .procname = "netdev_tstamp_prequeue", 135 .procname = "netdev_tstamp_prequeue",
127 .data = &netdev_tstamp_prequeue, 136 .data = &netdev_tstamp_prequeue,
diff --git a/net/core/utils.c b/net/core/utils.c
index 5fea0ab21902..2012bc797f9c 100644
--- a/net/core/utils.c
+++ b/net/core/utils.c
@@ -296,3 +296,27 @@ void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb,
296 csum_unfold(*sum))); 296 csum_unfold(*sum)));
297} 297}
298EXPORT_SYMBOL(inet_proto_csum_replace4); 298EXPORT_SYMBOL(inet_proto_csum_replace4);
299
300int mac_pton(const char *s, u8 *mac)
301{
302 int i;
303
304 /* XX:XX:XX:XX:XX:XX */
305 if (strlen(s) < 3 * ETH_ALEN - 1)
306 return 0;
307
308 /* Don't dirty result unless string is valid MAC. */
309 for (i = 0; i < ETH_ALEN; i++) {
310 if (!strchr("0123456789abcdefABCDEF", s[i * 3]))
311 return 0;
312 if (!strchr("0123456789abcdefABCDEF", s[i * 3 + 1]))
313 return 0;
314 if (i != ETH_ALEN - 1 && s[i * 3 + 2] != ':')
315 return 0;
316 }
317 for (i = 0; i < ETH_ALEN; i++) {
318 mac[i] = (hex_to_bin(s[i * 3]) << 4) | hex_to_bin(s[i * 3 + 1]);
319 }
320 return 1;
321}
322EXPORT_SYMBOL(mac_pton);
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index ae451c6d83ba..8c36adfd1919 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -40,13 +40,15 @@
40 40
41int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) 41int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
42{ 42{
43 const struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
43 struct inet_sock *inet = inet_sk(sk); 44 struct inet_sock *inet = inet_sk(sk);
44 struct dccp_sock *dp = dccp_sk(sk); 45 struct dccp_sock *dp = dccp_sk(sk);
45 const struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
46 __be16 orig_sport, orig_dport; 46 __be16 orig_sport, orig_dport;
47 struct rtable *rt;
48 __be32 daddr, nexthop; 47 __be32 daddr, nexthop;
48 struct flowi4 *fl4;
49 struct rtable *rt;
49 int err; 50 int err;
51 struct ip_options_rcu *inet_opt;
50 52
51 dp->dccps_role = DCCP_ROLE_CLIENT; 53 dp->dccps_role = DCCP_ROLE_CLIENT;
52 54
@@ -57,15 +59,19 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
57 return -EAFNOSUPPORT; 59 return -EAFNOSUPPORT;
58 60
59 nexthop = daddr = usin->sin_addr.s_addr; 61 nexthop = daddr = usin->sin_addr.s_addr;
60 if (inet->opt != NULL && inet->opt->srr) { 62
63 inet_opt = rcu_dereference_protected(inet->inet_opt,
64 sock_owned_by_user(sk));
65 if (inet_opt != NULL && inet_opt->opt.srr) {
61 if (daddr == 0) 66 if (daddr == 0)
62 return -EINVAL; 67 return -EINVAL;
63 nexthop = inet->opt->faddr; 68 nexthop = inet_opt->opt.faddr;
64 } 69 }
65 70
66 orig_sport = inet->inet_sport; 71 orig_sport = inet->inet_sport;
67 orig_dport = usin->sin_port; 72 orig_dport = usin->sin_port;
68 rt = ip_route_connect(nexthop, inet->inet_saddr, 73 fl4 = &inet->cork.fl.u.ip4;
74 rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
69 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if, 75 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
70 IPPROTO_DCCP, 76 IPPROTO_DCCP,
71 orig_sport, orig_dport, sk, true); 77 orig_sport, orig_dport, sk, true);
@@ -77,19 +83,19 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
77 return -ENETUNREACH; 83 return -ENETUNREACH;
78 } 84 }
79 85
80 if (inet->opt == NULL || !inet->opt->srr) 86 if (inet_opt == NULL || !inet_opt->opt.srr)
81 daddr = rt->rt_dst; 87 daddr = fl4->daddr;
82 88
83 if (inet->inet_saddr == 0) 89 if (inet->inet_saddr == 0)
84 inet->inet_saddr = rt->rt_src; 90 inet->inet_saddr = fl4->saddr;
85 inet->inet_rcv_saddr = inet->inet_saddr; 91 inet->inet_rcv_saddr = inet->inet_saddr;
86 92
87 inet->inet_dport = usin->sin_port; 93 inet->inet_dport = usin->sin_port;
88 inet->inet_daddr = daddr; 94 inet->inet_daddr = daddr;
89 95
90 inet_csk(sk)->icsk_ext_hdr_len = 0; 96 inet_csk(sk)->icsk_ext_hdr_len = 0;
91 if (inet->opt != NULL) 97 if (inet_opt)
92 inet_csk(sk)->icsk_ext_hdr_len = inet->opt->optlen; 98 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
93 /* 99 /*
94 * Socket identity is still unknown (sport may be zero). 100 * Socket identity is still unknown (sport may be zero).
95 * However we set state to DCCP_REQUESTING and not releasing socket 101 * However we set state to DCCP_REQUESTING and not releasing socket
@@ -101,8 +107,7 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
101 if (err != 0) 107 if (err != 0)
102 goto failure; 108 goto failure;
103 109
104 rt = ip_route_newports(rt, IPPROTO_DCCP, 110 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
105 orig_sport, orig_dport,
106 inet->inet_sport, inet->inet_dport, sk); 111 inet->inet_sport, inet->inet_dport, sk);
107 if (IS_ERR(rt)) { 112 if (IS_ERR(rt)) {
108 rt = NULL; 113 rt = NULL;
@@ -391,32 +396,30 @@ struct sock *dccp_v4_request_recv_sock(struct sock *sk, struct sk_buff *skb,
391 if (sk_acceptq_is_full(sk)) 396 if (sk_acceptq_is_full(sk))
392 goto exit_overflow; 397 goto exit_overflow;
393 398
394 if (dst == NULL && (dst = inet_csk_route_req(sk, req)) == NULL)
395 goto exit;
396
397 newsk = dccp_create_openreq_child(sk, req, skb); 399 newsk = dccp_create_openreq_child(sk, req, skb);
398 if (newsk == NULL) 400 if (newsk == NULL)
399 goto exit_nonewsk; 401 goto exit_nonewsk;
400 402
401 sk_setup_caps(newsk, dst);
402
403 newinet = inet_sk(newsk); 403 newinet = inet_sk(newsk);
404 ireq = inet_rsk(req); 404 ireq = inet_rsk(req);
405 newinet->inet_daddr = ireq->rmt_addr; 405 newinet->inet_daddr = ireq->rmt_addr;
406 newinet->inet_rcv_saddr = ireq->loc_addr; 406 newinet->inet_rcv_saddr = ireq->loc_addr;
407 newinet->inet_saddr = ireq->loc_addr; 407 newinet->inet_saddr = ireq->loc_addr;
408 newinet->opt = ireq->opt; 408 newinet->inet_opt = ireq->opt;
409 ireq->opt = NULL; 409 ireq->opt = NULL;
410 newinet->mc_index = inet_iif(skb); 410 newinet->mc_index = inet_iif(skb);
411 newinet->mc_ttl = ip_hdr(skb)->ttl; 411 newinet->mc_ttl = ip_hdr(skb)->ttl;
412 newinet->inet_id = jiffies; 412 newinet->inet_id = jiffies;
413 413
414 if (dst == NULL && (dst = inet_csk_route_child_sock(sk, newsk, req)) == NULL)
415 goto put_and_exit;
416
417 sk_setup_caps(newsk, dst);
418
414 dccp_sync_mss(newsk, dst_mtu(dst)); 419 dccp_sync_mss(newsk, dst_mtu(dst));
415 420
416 if (__inet_inherit_port(sk, newsk) < 0) { 421 if (__inet_inherit_port(sk, newsk) < 0)
417 sock_put(newsk); 422 goto put_and_exit;
418 goto exit;
419 }
420 __inet_hash_nolisten(newsk, NULL); 423 __inet_hash_nolisten(newsk, NULL);
421 424
422 return newsk; 425 return newsk;
@@ -428,6 +431,9 @@ exit_nonewsk:
428exit: 431exit:
429 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); 432 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
430 return NULL; 433 return NULL;
434put_and_exit:
435 sock_put(newsk);
436 goto exit;
431} 437}
432 438
433EXPORT_SYMBOL_GPL(dccp_v4_request_recv_sock); 439EXPORT_SYMBOL_GPL(dccp_v4_request_recv_sock);
@@ -491,8 +497,9 @@ static int dccp_v4_send_response(struct sock *sk, struct request_sock *req,
491 int err = -1; 497 int err = -1;
492 struct sk_buff *skb; 498 struct sk_buff *skb;
493 struct dst_entry *dst; 499 struct dst_entry *dst;
500 struct flowi4 fl4;
494 501
495 dst = inet_csk_route_req(sk, req); 502 dst = inet_csk_route_req(sk, &fl4, req);
496 if (dst == NULL) 503 if (dst == NULL)
497 goto out; 504 goto out;
498 505
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index de1b7e37ad5b..8dc4348774a5 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -54,8 +54,8 @@ static void dccp_v6_hash(struct sock *sk)
54 54
55/* add pseudo-header to DCCP checksum stored in skb->csum */ 55/* add pseudo-header to DCCP checksum stored in skb->csum */
56static inline __sum16 dccp_v6_csum_finish(struct sk_buff *skb, 56static inline __sum16 dccp_v6_csum_finish(struct sk_buff *skb,
57 struct in6_addr *saddr, 57 const struct in6_addr *saddr,
58 struct in6_addr *daddr) 58 const struct in6_addr *daddr)
59{ 59{
60 return csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_DCCP, skb->csum); 60 return csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_DCCP, skb->csum);
61} 61}
@@ -87,7 +87,7 @@ static inline __u32 dccp_v6_init_sequence(struct sk_buff *skb)
87static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 87static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
88 u8 type, u8 code, int offset, __be32 info) 88 u8 type, u8 code, int offset, __be32 info)
89{ 89{
90 struct ipv6hdr *hdr = (struct ipv6hdr *)skb->data; 90 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
91 const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + offset); 91 const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + offset);
92 struct dccp_sock *dp; 92 struct dccp_sock *dp;
93 struct ipv6_pinfo *np; 93 struct ipv6_pinfo *np;
@@ -296,7 +296,7 @@ static void dccp_v6_reqsk_destructor(struct request_sock *req)
296 296
297static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb) 297static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb)
298{ 298{
299 struct ipv6hdr *rxip6h; 299 const struct ipv6hdr *rxip6h;
300 struct sk_buff *skb; 300 struct sk_buff *skb;
301 struct flowi6 fl6; 301 struct flowi6 fl6;
302 struct net *net = dev_net(skb_dst(rxskb)->dev); 302 struct net *net = dev_net(skb_dst(rxskb)->dev);
@@ -573,7 +573,7 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
573 573
574 First: no IPv4 options. 574 First: no IPv4 options.
575 */ 575 */
576 newinet->opt = NULL; 576 newinet->inet_opt = NULL;
577 577
578 /* Clone RX bits */ 578 /* Clone RX bits */
579 newnp->rxopt.all = np->rxopt.all; 579 newnp->rxopt.all = np->rxopt.all;
diff --git a/net/dccp/output.c b/net/dccp/output.c
index 136d41cbcd02..fab108e51e5a 100644
--- a/net/dccp/output.c
+++ b/net/dccp/output.c
@@ -43,7 +43,7 @@ static void dccp_skb_entail(struct sock *sk, struct sk_buff *skb)
43static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb) 43static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb)
44{ 44{
45 if (likely(skb != NULL)) { 45 if (likely(skb != NULL)) {
46 const struct inet_sock *inet = inet_sk(sk); 46 struct inet_sock *inet = inet_sk(sk);
47 const struct inet_connection_sock *icsk = inet_csk(sk); 47 const struct inet_connection_sock *icsk = inet_csk(sk);
48 struct dccp_sock *dp = dccp_sk(sk); 48 struct dccp_sock *dp = dccp_sk(sk);
49 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb); 49 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
@@ -136,7 +136,7 @@ static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb)
136 136
137 DCCP_INC_STATS(DCCP_MIB_OUTSEGS); 137 DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
138 138
139 err = icsk->icsk_af_ops->queue_xmit(skb); 139 err = icsk->icsk_af_ops->queue_xmit(skb, &inet->cork.fl);
140 return net_xmit_eval(err); 140 return net_xmit_eval(err);
141 } 141 }
142 return -ENOBUFS; 142 return -ENOBUFS;
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index 4c27615340dc..cf26ac74a188 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -747,7 +747,8 @@ static int dn_nl_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
747 skip_naddr = cb->args[1]; 747 skip_naddr = cb->args[1];
748 748
749 idx = 0; 749 idx = 0;
750 for_each_netdev(&init_net, dev) { 750 rcu_read_lock();
751 for_each_netdev_rcu(&init_net, dev) {
751 if (idx < skip_ndevs) 752 if (idx < skip_ndevs)
752 goto cont; 753 goto cont;
753 else if (idx > skip_ndevs) { 754 else if (idx > skip_ndevs) {
@@ -756,11 +757,11 @@ static int dn_nl_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
756 skip_naddr = 0; 757 skip_naddr = 0;
757 } 758 }
758 759
759 if ((dn_db = rtnl_dereference(dev->dn_ptr)) == NULL) 760 if ((dn_db = rcu_dereference(dev->dn_ptr)) == NULL)
760 goto cont; 761 goto cont;
761 762
762 for (ifa = rtnl_dereference(dn_db->ifa_list), dn_idx = 0; ifa; 763 for (ifa = rcu_dereference(dn_db->ifa_list), dn_idx = 0; ifa;
763 ifa = rtnl_dereference(ifa->ifa_next), dn_idx++) { 764 ifa = rcu_dereference(ifa->ifa_next), dn_idx++) {
764 if (dn_idx < skip_naddr) 765 if (dn_idx < skip_naddr)
765 continue; 766 continue;
766 767
@@ -773,6 +774,7 @@ cont:
773 idx++; 774 idx++;
774 } 775 }
775done: 776done:
777 rcu_read_unlock();
776 cb->args[0] = idx; 778 cb->args[0] = idx;
777 cb->args[1] = dn_idx; 779 cb->args[1] = dn_idx;
778 780
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 9f09d4fc2880..74544bc6fdec 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -1125,13 +1125,11 @@ make_route:
1125 if (dev_out->flags & IFF_LOOPBACK) 1125 if (dev_out->flags & IFF_LOOPBACK)
1126 flags |= RTCF_LOCAL; 1126 flags |= RTCF_LOCAL;
1127 1127
1128 rt = dst_alloc(&dn_dst_ops, 0); 1128 rt = dst_alloc(&dn_dst_ops, dev_out, 1, 0, DST_HOST);
1129 if (rt == NULL) 1129 if (rt == NULL)
1130 goto e_nobufs; 1130 goto e_nobufs;
1131 1131
1132 atomic_set(&rt->dst.__refcnt, 1); 1132 memset(&rt->fld, 0, sizeof(rt->fld));
1133 rt->dst.flags = DST_HOST;
1134
1135 rt->fld.saddr = oldflp->saddr; 1133 rt->fld.saddr = oldflp->saddr;
1136 rt->fld.daddr = oldflp->daddr; 1134 rt->fld.daddr = oldflp->daddr;
1137 rt->fld.flowidn_oif = oldflp->flowidn_oif; 1135 rt->fld.flowidn_oif = oldflp->flowidn_oif;
@@ -1146,8 +1144,6 @@ make_route:
1146 rt->rt_dst_map = fld.daddr; 1144 rt->rt_dst_map = fld.daddr;
1147 rt->rt_src_map = fld.saddr; 1145 rt->rt_src_map = fld.saddr;
1148 1146
1149 rt->dst.dev = dev_out;
1150 dev_hold(dev_out);
1151 rt->dst.neighbour = neigh; 1147 rt->dst.neighbour = neigh;
1152 neigh = NULL; 1148 neigh = NULL;
1153 1149
@@ -1399,10 +1395,11 @@ static int dn_route_input_slow(struct sk_buff *skb)
1399 } 1395 }
1400 1396
1401make_route: 1397make_route:
1402 rt = dst_alloc(&dn_dst_ops, 0); 1398 rt = dst_alloc(&dn_dst_ops, out_dev, 0, 0, DST_HOST);
1403 if (rt == NULL) 1399 if (rt == NULL)
1404 goto e_nobufs; 1400 goto e_nobufs;
1405 1401
1402 memset(&rt->fld, 0, sizeof(rt->fld));
1406 rt->rt_saddr = fld.saddr; 1403 rt->rt_saddr = fld.saddr;
1407 rt->rt_daddr = fld.daddr; 1404 rt->rt_daddr = fld.daddr;
1408 rt->rt_gateway = fld.daddr; 1405 rt->rt_gateway = fld.daddr;
@@ -1419,9 +1416,7 @@ make_route:
1419 rt->fld.flowidn_iif = in_dev->ifindex; 1416 rt->fld.flowidn_iif = in_dev->ifindex;
1420 rt->fld.flowidn_mark = fld.flowidn_mark; 1417 rt->fld.flowidn_mark = fld.flowidn_mark;
1421 1418
1422 rt->dst.flags = DST_HOST;
1423 rt->dst.neighbour = neigh; 1419 rt->dst.neighbour = neigh;
1424 rt->dst.dev = out_dev;
1425 rt->dst.lastuse = jiffies; 1420 rt->dst.lastuse = jiffies;
1426 rt->dst.output = dn_rt_bug; 1421 rt->dst.output = dn_rt_bug;
1427 switch(res.type) { 1422 switch(res.type) {
@@ -1440,8 +1435,6 @@ make_route:
1440 rt->dst.input = dst_discard; 1435 rt->dst.input = dst_discard;
1441 } 1436 }
1442 rt->rt_flags = flags; 1437 rt->rt_flags = flags;
1443 if (rt->dst.dev)
1444 dev_hold(rt->dst.dev);
1445 1438
1446 err = dn_rt_set_next_hop(rt, &res); 1439 err = dn_rt_set_next_hop(rt, &res);
1447 if (err) 1440 if (err)
diff --git a/net/decnet/dn_table.c b/net/decnet/dn_table.c
index 99d8d3a40998..bd0a52dd1d40 100644
--- a/net/decnet/dn_table.c
+++ b/net/decnet/dn_table.c
@@ -123,11 +123,11 @@ static inline void dn_rebuild_zone(struct dn_zone *dz,
123 struct dn_fib_node **old_ht, 123 struct dn_fib_node **old_ht,
124 int old_divisor) 124 int old_divisor)
125{ 125{
126 int i;
127 struct dn_fib_node *f, **fp, *next; 126 struct dn_fib_node *f, **fp, *next;
127 int i;
128 128
129 for(i = 0; i < old_divisor; i++) { 129 for(i = 0; i < old_divisor; i++) {
130 for(f = old_ht[i]; f; f = f->fn_next) { 130 for(f = old_ht[i]; f; f = next) {
131 next = f->fn_next; 131 next = f->fn_next;
132 for(fp = dn_chain_p(f->fn_key, dz); 132 for(fp = dn_chain_p(f->fn_key, dz);
133 *fp && dn_key_leq((*fp)->fn_key, f->fn_key); 133 *fp && dn_key_leq((*fp)->fn_key, f->fn_key);
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 64ca2a6fa0d4..0a47b6c37038 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -288,7 +288,6 @@ static const struct ethtool_ops dsa_slave_ethtool_ops = {
288 .get_drvinfo = dsa_slave_get_drvinfo, 288 .get_drvinfo = dsa_slave_get_drvinfo,
289 .nway_reset = dsa_slave_nway_reset, 289 .nway_reset = dsa_slave_nway_reset,
290 .get_link = dsa_slave_get_link, 290 .get_link = dsa_slave_get_link,
291 .set_sg = ethtool_op_set_sg,
292 .get_strings = dsa_slave_get_strings, 291 .get_strings = dsa_slave_get_strings,
293 .get_ethtool_stats = dsa_slave_get_ethtool_stats, 292 .get_ethtool_stats = dsa_slave_get_ethtool_stats,
294 .get_sset_count = dsa_slave_get_sset_count, 293 .get_sset_count = dsa_slave_get_sset_count,
diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c
index 116d3fd3d669..a1d9f3787dd5 100644
--- a/net/econet/af_econet.c
+++ b/net/econet/af_econet.c
@@ -935,7 +935,6 @@ static void aun_data_available(struct sock *sk, int slen)
935 struct sk_buff *skb; 935 struct sk_buff *skb;
936 unsigned char *data; 936 unsigned char *data;
937 struct aunhdr *ah; 937 struct aunhdr *ah;
938 struct iphdr *ip;
939 size_t len; 938 size_t len;
940 939
941 while ((skb = skb_recv_datagram(sk, 0, 1, &err)) == NULL) { 940 while ((skb = skb_recv_datagram(sk, 0, 1, &err)) == NULL) {
@@ -949,7 +948,6 @@ static void aun_data_available(struct sock *sk, int slen)
949 data = skb_transport_header(skb) + sizeof(struct udphdr); 948 data = skb_transport_header(skb) + sizeof(struct udphdr);
950 ah = (struct aunhdr *)data; 949 ah = (struct aunhdr *)data;
951 len = skb->len - sizeof(struct udphdr); 950 len = skb->len - sizeof(struct udphdr);
952 ip = ip_hdr(skb);
953 951
954 switch (ah->code) 952 switch (ah->code)
955 { 953 {
@@ -962,12 +960,6 @@ static void aun_data_available(struct sock *sk, int slen)
962 case 4: 960 case 4:
963 aun_tx_ack(ah->handle, ECTYPE_TRANSMIT_NOT_LISTENING); 961 aun_tx_ack(ah->handle, ECTYPE_TRANSMIT_NOT_LISTENING);
964 break; 962 break;
965#if 0
966 /* This isn't quite right yet. */
967 case 5:
968 aun_send_response(ip->saddr, ah->handle, 6, ah->cb);
969 break;
970#endif
971 default: 963 default:
972 printk(KERN_DEBUG "unknown AUN packet (type %d)\n", data[0]); 964 printk(KERN_DEBUG "unknown AUN packet (type %d)\n", data[0]);
973 } 965 }
diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile
index 0dc772d0d125..f2dc69cffb57 100644
--- a/net/ipv4/Makefile
+++ b/net/ipv4/Makefile
@@ -11,7 +11,7 @@ obj-y := route.o inetpeer.o protocol.o \
11 datagram.o raw.o udp.o udplite.o \ 11 datagram.o raw.o udp.o udplite.o \
12 arp.o icmp.o devinet.o af_inet.o igmp.o \ 12 arp.o icmp.o devinet.o af_inet.o igmp.o \
13 fib_frontend.o fib_semantics.o fib_trie.o \ 13 fib_frontend.o fib_semantics.o fib_trie.o \
14 inet_fragment.o 14 inet_fragment.o ping.o
15 15
16obj-$(CONFIG_SYSCTL) += sysctl_net_ipv4.o 16obj-$(CONFIG_SYSCTL) += sysctl_net_ipv4.o
17obj-$(CONFIG_PROC_FS) += proc.o 17obj-$(CONFIG_PROC_FS) += proc.o
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 807d83c02ef6..cc1463156cd0 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -105,6 +105,7 @@
105#include <net/tcp.h> 105#include <net/tcp.h>
106#include <net/udp.h> 106#include <net/udp.h>
107#include <net/udplite.h> 107#include <net/udplite.h>
108#include <net/ping.h>
108#include <linux/skbuff.h> 109#include <linux/skbuff.h>
109#include <net/sock.h> 110#include <net/sock.h>
110#include <net/raw.h> 111#include <net/raw.h>
@@ -153,7 +154,7 @@ void inet_sock_destruct(struct sock *sk)
153 WARN_ON(sk->sk_wmem_queued); 154 WARN_ON(sk->sk_wmem_queued);
154 WARN_ON(sk->sk_forward_alloc); 155 WARN_ON(sk->sk_forward_alloc);
155 156
156 kfree(inet->opt); 157 kfree(rcu_dereference_protected(inet->inet_opt, 1));
157 dst_release(rcu_dereference_check(sk->sk_dst_cache, 1)); 158 dst_release(rcu_dereference_check(sk->sk_dst_cache, 1));
158 sk_refcnt_debug_dec(sk); 159 sk_refcnt_debug_dec(sk);
159} 160}
@@ -1008,6 +1009,14 @@ static struct inet_protosw inetsw_array[] =
1008 .flags = INET_PROTOSW_PERMANENT, 1009 .flags = INET_PROTOSW_PERMANENT,
1009 }, 1010 },
1010 1011
1012 {
1013 .type = SOCK_DGRAM,
1014 .protocol = IPPROTO_ICMP,
1015 .prot = &ping_prot,
1016 .ops = &inet_dgram_ops,
1017 .no_check = UDP_CSUM_DEFAULT,
1018 .flags = INET_PROTOSW_REUSE,
1019 },
1011 1020
1012 { 1021 {
1013 .type = SOCK_RAW, 1022 .type = SOCK_RAW,
@@ -1103,14 +1112,19 @@ static int inet_sk_reselect_saddr(struct sock *sk)
1103 struct inet_sock *inet = inet_sk(sk); 1112 struct inet_sock *inet = inet_sk(sk);
1104 __be32 old_saddr = inet->inet_saddr; 1113 __be32 old_saddr = inet->inet_saddr;
1105 __be32 daddr = inet->inet_daddr; 1114 __be32 daddr = inet->inet_daddr;
1115 struct flowi4 *fl4;
1106 struct rtable *rt; 1116 struct rtable *rt;
1107 __be32 new_saddr; 1117 __be32 new_saddr;
1118 struct ip_options_rcu *inet_opt;
1108 1119
1109 if (inet->opt && inet->opt->srr) 1120 inet_opt = rcu_dereference_protected(inet->inet_opt,
1110 daddr = inet->opt->faddr; 1121 sock_owned_by_user(sk));
1122 if (inet_opt && inet_opt->opt.srr)
1123 daddr = inet_opt->opt.faddr;
1111 1124
1112 /* Query new route. */ 1125 /* Query new route. */
1113 rt = ip_route_connect(daddr, 0, RT_CONN_FLAGS(sk), 1126 fl4 = &inet->cork.fl.u.ip4;
1127 rt = ip_route_connect(fl4, daddr, 0, RT_CONN_FLAGS(sk),
1114 sk->sk_bound_dev_if, sk->sk_protocol, 1128 sk->sk_bound_dev_if, sk->sk_protocol,
1115 inet->inet_sport, inet->inet_dport, sk, false); 1129 inet->inet_sport, inet->inet_dport, sk, false);
1116 if (IS_ERR(rt)) 1130 if (IS_ERR(rt))
@@ -1118,7 +1132,7 @@ static int inet_sk_reselect_saddr(struct sock *sk)
1118 1132
1119 sk_setup_caps(sk, &rt->dst); 1133 sk_setup_caps(sk, &rt->dst);
1120 1134
1121 new_saddr = rt->rt_src; 1135 new_saddr = fl4->saddr;
1122 1136
1123 if (new_saddr == old_saddr) 1137 if (new_saddr == old_saddr)
1124 return 0; 1138 return 0;
@@ -1147,6 +1161,8 @@ int inet_sk_rebuild_header(struct sock *sk)
1147 struct inet_sock *inet = inet_sk(sk); 1161 struct inet_sock *inet = inet_sk(sk);
1148 struct rtable *rt = (struct rtable *)__sk_dst_check(sk, 0); 1162 struct rtable *rt = (struct rtable *)__sk_dst_check(sk, 0);
1149 __be32 daddr; 1163 __be32 daddr;
1164 struct ip_options_rcu *inet_opt;
1165 struct flowi4 *fl4;
1150 int err; 1166 int err;
1151 1167
1152 /* Route is OK, nothing to do. */ 1168 /* Route is OK, nothing to do. */
@@ -1154,10 +1170,14 @@ int inet_sk_rebuild_header(struct sock *sk)
1154 return 0; 1170 return 0;
1155 1171
1156 /* Reroute. */ 1172 /* Reroute. */
1173 rcu_read_lock();
1174 inet_opt = rcu_dereference(inet->inet_opt);
1157 daddr = inet->inet_daddr; 1175 daddr = inet->inet_daddr;
1158 if (inet->opt && inet->opt->srr) 1176 if (inet_opt && inet_opt->opt.srr)
1159 daddr = inet->opt->faddr; 1177 daddr = inet_opt->opt.faddr;
1160 rt = ip_route_output_ports(sock_net(sk), sk, daddr, inet->inet_saddr, 1178 rcu_read_unlock();
1179 fl4 = &inet->cork.fl.u.ip4;
1180 rt = ip_route_output_ports(sock_net(sk), fl4, sk, daddr, inet->inet_saddr,
1161 inet->inet_dport, inet->inet_sport, 1181 inet->inet_dport, inet->inet_sport,
1162 sk->sk_protocol, RT_CONN_FLAGS(sk), 1182 sk->sk_protocol, RT_CONN_FLAGS(sk),
1163 sk->sk_bound_dev_if); 1183 sk->sk_bound_dev_if);
@@ -1186,7 +1206,7 @@ EXPORT_SYMBOL(inet_sk_rebuild_header);
1186 1206
1187static int inet_gso_send_check(struct sk_buff *skb) 1207static int inet_gso_send_check(struct sk_buff *skb)
1188{ 1208{
1189 struct iphdr *iph; 1209 const struct iphdr *iph;
1190 const struct net_protocol *ops; 1210 const struct net_protocol *ops;
1191 int proto; 1211 int proto;
1192 int ihl; 1212 int ihl;
@@ -1293,7 +1313,7 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head,
1293 const struct net_protocol *ops; 1313 const struct net_protocol *ops;
1294 struct sk_buff **pp = NULL; 1314 struct sk_buff **pp = NULL;
1295 struct sk_buff *p; 1315 struct sk_buff *p;
1296 struct iphdr *iph; 1316 const struct iphdr *iph;
1297 unsigned int hlen; 1317 unsigned int hlen;
1298 unsigned int off; 1318 unsigned int off;
1299 unsigned int id; 1319 unsigned int id;
@@ -1516,6 +1536,7 @@ static const struct net_protocol udp_protocol = {
1516 1536
1517static const struct net_protocol icmp_protocol = { 1537static const struct net_protocol icmp_protocol = {
1518 .handler = icmp_rcv, 1538 .handler = icmp_rcv,
1539 .err_handler = ping_err,
1519 .no_policy = 1, 1540 .no_policy = 1,
1520 .netns_ok = 1, 1541 .netns_ok = 1,
1521}; 1542};
@@ -1631,6 +1652,10 @@ static int __init inet_init(void)
1631 if (rc) 1652 if (rc)
1632 goto out_unregister_udp_proto; 1653 goto out_unregister_udp_proto;
1633 1654
1655 rc = proto_register(&ping_prot, 1);
1656 if (rc)
1657 goto out_unregister_raw_proto;
1658
1634 /* 1659 /*
1635 * Tell SOCKET that we are alive... 1660 * Tell SOCKET that we are alive...
1636 */ 1661 */
@@ -1686,6 +1711,8 @@ static int __init inet_init(void)
1686 /* Add UDP-Lite (RFC 3828) */ 1711 /* Add UDP-Lite (RFC 3828) */
1687 udplite4_register(); 1712 udplite4_register();
1688 1713
1714 ping_init();
1715
1689 /* 1716 /*
1690 * Set the ICMP layer up 1717 * Set the ICMP layer up
1691 */ 1718 */
@@ -1716,6 +1743,8 @@ static int __init inet_init(void)
1716 rc = 0; 1743 rc = 0;
1717out: 1744out:
1718 return rc; 1745 return rc;
1746out_unregister_raw_proto:
1747 proto_unregister(&raw_prot);
1719out_unregister_udp_proto: 1748out_unregister_udp_proto:
1720 proto_unregister(&udp_prot); 1749 proto_unregister(&udp_prot);
1721out_unregister_tcp_proto: 1750out_unregister_tcp_proto:
@@ -1740,11 +1769,15 @@ static int __init ipv4_proc_init(void)
1740 goto out_tcp; 1769 goto out_tcp;
1741 if (udp4_proc_init()) 1770 if (udp4_proc_init())
1742 goto out_udp; 1771 goto out_udp;
1772 if (ping_proc_init())
1773 goto out_ping;
1743 if (ip_misc_proc_init()) 1774 if (ip_misc_proc_init())
1744 goto out_misc; 1775 goto out_misc;
1745out: 1776out:
1746 return rc; 1777 return rc;
1747out_misc: 1778out_misc:
1779 ping_proc_exit();
1780out_ping:
1748 udp4_proc_exit(); 1781 udp4_proc_exit();
1749out_udp: 1782out_udp:
1750 tcp4_proc_exit(); 1783 tcp4_proc_exit();
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index 4286fd3cc0e2..c1f4154552fc 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -73,7 +73,7 @@ static inline struct scatterlist *ah_req_sg(struct crypto_ahash *ahash,
73 * into IP header for icv calculation. Options are already checked 73 * into IP header for icv calculation. Options are already checked
74 * for validity, so paranoia is not required. */ 74 * for validity, so paranoia is not required. */
75 75
76static int ip_clear_mutable_options(struct iphdr *iph, __be32 *daddr) 76static int ip_clear_mutable_options(const struct iphdr *iph, __be32 *daddr)
77{ 77{
78 unsigned char * optptr = (unsigned char*)(iph+1); 78 unsigned char * optptr = (unsigned char*)(iph+1);
79 int l = iph->ihl*4 - sizeof(struct iphdr); 79 int l = iph->ihl*4 - sizeof(struct iphdr);
@@ -396,7 +396,7 @@ out:
396static void ah4_err(struct sk_buff *skb, u32 info) 396static void ah4_err(struct sk_buff *skb, u32 info)
397{ 397{
398 struct net *net = dev_net(skb->dev); 398 struct net *net = dev_net(skb->dev);
399 struct iphdr *iph = (struct iphdr *)skb->data; 399 const struct iphdr *iph = (const struct iphdr *)skb->data;
400 struct ip_auth_hdr *ah = (struct ip_auth_hdr *)(skb->data+(iph->ihl<<2)); 400 struct ip_auth_hdr *ah = (struct ip_auth_hdr *)(skb->data+(iph->ihl<<2));
401 struct xfrm_state *x; 401 struct xfrm_state *x;
402 402
@@ -404,7 +404,8 @@ static void ah4_err(struct sk_buff *skb, u32 info)
404 icmp_hdr(skb)->code != ICMP_FRAG_NEEDED) 404 icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
405 return; 405 return;
406 406
407 x = xfrm_state_lookup(net, skb->mark, (xfrm_address_t *)&iph->daddr, ah->spi, IPPROTO_AH, AF_INET); 407 x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
408 ah->spi, IPPROTO_AH, AF_INET);
408 if (!x) 409 if (!x)
409 return; 410 return;
410 printk(KERN_DEBUG "pmtu discovery on SA AH/%08x/%08x\n", 411 printk(KERN_DEBUG "pmtu discovery on SA AH/%08x/%08x\n",
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index a0af7ea87870..2b3c23c287cd 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -1857,6 +1857,11 @@ static int cipso_v4_genopt(unsigned char *buf, u32 buf_len,
1857 return CIPSO_V4_HDR_LEN + ret_val; 1857 return CIPSO_V4_HDR_LEN + ret_val;
1858} 1858}
1859 1859
1860static void opt_kfree_rcu(struct rcu_head *head)
1861{
1862 kfree(container_of(head, struct ip_options_rcu, rcu));
1863}
1864
1860/** 1865/**
1861 * cipso_v4_sock_setattr - Add a CIPSO option to a socket 1866 * cipso_v4_sock_setattr - Add a CIPSO option to a socket
1862 * @sk: the socket 1867 * @sk: the socket
@@ -1879,7 +1884,7 @@ int cipso_v4_sock_setattr(struct sock *sk,
1879 unsigned char *buf = NULL; 1884 unsigned char *buf = NULL;
1880 u32 buf_len; 1885 u32 buf_len;
1881 u32 opt_len; 1886 u32 opt_len;
1882 struct ip_options *opt = NULL; 1887 struct ip_options_rcu *old, *opt = NULL;
1883 struct inet_sock *sk_inet; 1888 struct inet_sock *sk_inet;
1884 struct inet_connection_sock *sk_conn; 1889 struct inet_connection_sock *sk_conn;
1885 1890
@@ -1915,22 +1920,25 @@ int cipso_v4_sock_setattr(struct sock *sk,
1915 ret_val = -ENOMEM; 1920 ret_val = -ENOMEM;
1916 goto socket_setattr_failure; 1921 goto socket_setattr_failure;
1917 } 1922 }
1918 memcpy(opt->__data, buf, buf_len); 1923 memcpy(opt->opt.__data, buf, buf_len);
1919 opt->optlen = opt_len; 1924 opt->opt.optlen = opt_len;
1920 opt->cipso = sizeof(struct iphdr); 1925 opt->opt.cipso = sizeof(struct iphdr);
1921 kfree(buf); 1926 kfree(buf);
1922 buf = NULL; 1927 buf = NULL;
1923 1928
1924 sk_inet = inet_sk(sk); 1929 sk_inet = inet_sk(sk);
1930
1931 old = rcu_dereference_protected(sk_inet->inet_opt, sock_owned_by_user(sk));
1925 if (sk_inet->is_icsk) { 1932 if (sk_inet->is_icsk) {
1926 sk_conn = inet_csk(sk); 1933 sk_conn = inet_csk(sk);
1927 if (sk_inet->opt) 1934 if (old)
1928 sk_conn->icsk_ext_hdr_len -= sk_inet->opt->optlen; 1935 sk_conn->icsk_ext_hdr_len -= old->opt.optlen;
1929 sk_conn->icsk_ext_hdr_len += opt->optlen; 1936 sk_conn->icsk_ext_hdr_len += opt->opt.optlen;
1930 sk_conn->icsk_sync_mss(sk, sk_conn->icsk_pmtu_cookie); 1937 sk_conn->icsk_sync_mss(sk, sk_conn->icsk_pmtu_cookie);
1931 } 1938 }
1932 opt = xchg(&sk_inet->opt, opt); 1939 rcu_assign_pointer(sk_inet->inet_opt, opt);
1933 kfree(opt); 1940 if (old)
1941 call_rcu(&old->rcu, opt_kfree_rcu);
1934 1942
1935 return 0; 1943 return 0;
1936 1944
@@ -1960,7 +1968,7 @@ int cipso_v4_req_setattr(struct request_sock *req,
1960 unsigned char *buf = NULL; 1968 unsigned char *buf = NULL;
1961 u32 buf_len; 1969 u32 buf_len;
1962 u32 opt_len; 1970 u32 opt_len;
1963 struct ip_options *opt = NULL; 1971 struct ip_options_rcu *opt = NULL;
1964 struct inet_request_sock *req_inet; 1972 struct inet_request_sock *req_inet;
1965 1973
1966 /* We allocate the maximum CIPSO option size here so we are probably 1974 /* We allocate the maximum CIPSO option size here so we are probably
@@ -1988,15 +1996,16 @@ int cipso_v4_req_setattr(struct request_sock *req,
1988 ret_val = -ENOMEM; 1996 ret_val = -ENOMEM;
1989 goto req_setattr_failure; 1997 goto req_setattr_failure;
1990 } 1998 }
1991 memcpy(opt->__data, buf, buf_len); 1999 memcpy(opt->opt.__data, buf, buf_len);
1992 opt->optlen = opt_len; 2000 opt->opt.optlen = opt_len;
1993 opt->cipso = sizeof(struct iphdr); 2001 opt->opt.cipso = sizeof(struct iphdr);
1994 kfree(buf); 2002 kfree(buf);
1995 buf = NULL; 2003 buf = NULL;
1996 2004
1997 req_inet = inet_rsk(req); 2005 req_inet = inet_rsk(req);
1998 opt = xchg(&req_inet->opt, opt); 2006 opt = xchg(&req_inet->opt, opt);
1999 kfree(opt); 2007 if (opt)
2008 call_rcu(&opt->rcu, opt_kfree_rcu);
2000 2009
2001 return 0; 2010 return 0;
2002 2011
@@ -2016,34 +2025,34 @@ req_setattr_failure:
2016 * values on failure. 2025 * values on failure.
2017 * 2026 *
2018 */ 2027 */
2019static int cipso_v4_delopt(struct ip_options **opt_ptr) 2028static int cipso_v4_delopt(struct ip_options_rcu **opt_ptr)
2020{ 2029{
2021 int hdr_delta = 0; 2030 int hdr_delta = 0;
2022 struct ip_options *opt = *opt_ptr; 2031 struct ip_options_rcu *opt = *opt_ptr;
2023 2032
2024 if (opt->srr || opt->rr || opt->ts || opt->router_alert) { 2033 if (opt->opt.srr || opt->opt.rr || opt->opt.ts || opt->opt.router_alert) {
2025 u8 cipso_len; 2034 u8 cipso_len;
2026 u8 cipso_off; 2035 u8 cipso_off;
2027 unsigned char *cipso_ptr; 2036 unsigned char *cipso_ptr;
2028 int iter; 2037 int iter;
2029 int optlen_new; 2038 int optlen_new;
2030 2039
2031 cipso_off = opt->cipso - sizeof(struct iphdr); 2040 cipso_off = opt->opt.cipso - sizeof(struct iphdr);
2032 cipso_ptr = &opt->__data[cipso_off]; 2041 cipso_ptr = &opt->opt.__data[cipso_off];
2033 cipso_len = cipso_ptr[1]; 2042 cipso_len = cipso_ptr[1];
2034 2043
2035 if (opt->srr > opt->cipso) 2044 if (opt->opt.srr > opt->opt.cipso)
2036 opt->srr -= cipso_len; 2045 opt->opt.srr -= cipso_len;
2037 if (opt->rr > opt->cipso) 2046 if (opt->opt.rr > opt->opt.cipso)
2038 opt->rr -= cipso_len; 2047 opt->opt.rr -= cipso_len;
2039 if (opt->ts > opt->cipso) 2048 if (opt->opt.ts > opt->opt.cipso)
2040 opt->ts -= cipso_len; 2049 opt->opt.ts -= cipso_len;
2041 if (opt->router_alert > opt->cipso) 2050 if (opt->opt.router_alert > opt->opt.cipso)
2042 opt->router_alert -= cipso_len; 2051 opt->opt.router_alert -= cipso_len;
2043 opt->cipso = 0; 2052 opt->opt.cipso = 0;
2044 2053
2045 memmove(cipso_ptr, cipso_ptr + cipso_len, 2054 memmove(cipso_ptr, cipso_ptr + cipso_len,
2046 opt->optlen - cipso_off - cipso_len); 2055 opt->opt.optlen - cipso_off - cipso_len);
2047 2056
2048 /* determining the new total option length is tricky because of 2057 /* determining the new total option length is tricky because of
2049 * the padding necessary, the only thing i can think to do at 2058 * the padding necessary, the only thing i can think to do at
@@ -2052,21 +2061,21 @@ static int cipso_v4_delopt(struct ip_options **opt_ptr)
2052 * from there we can determine the new total option length */ 2061 * from there we can determine the new total option length */
2053 iter = 0; 2062 iter = 0;
2054 optlen_new = 0; 2063 optlen_new = 0;
2055 while (iter < opt->optlen) 2064 while (iter < opt->opt.optlen)
2056 if (opt->__data[iter] != IPOPT_NOP) { 2065 if (opt->opt.__data[iter] != IPOPT_NOP) {
2057 iter += opt->__data[iter + 1]; 2066 iter += opt->opt.__data[iter + 1];
2058 optlen_new = iter; 2067 optlen_new = iter;
2059 } else 2068 } else
2060 iter++; 2069 iter++;
2061 hdr_delta = opt->optlen; 2070 hdr_delta = opt->opt.optlen;
2062 opt->optlen = (optlen_new + 3) & ~3; 2071 opt->opt.optlen = (optlen_new + 3) & ~3;
2063 hdr_delta -= opt->optlen; 2072 hdr_delta -= opt->opt.optlen;
2064 } else { 2073 } else {
2065 /* only the cipso option was present on the socket so we can 2074 /* only the cipso option was present on the socket so we can
2066 * remove the entire option struct */ 2075 * remove the entire option struct */
2067 *opt_ptr = NULL; 2076 *opt_ptr = NULL;
2068 hdr_delta = opt->optlen; 2077 hdr_delta = opt->opt.optlen;
2069 kfree(opt); 2078 call_rcu(&opt->rcu, opt_kfree_rcu);
2070 } 2079 }
2071 2080
2072 return hdr_delta; 2081 return hdr_delta;
@@ -2083,15 +2092,15 @@ static int cipso_v4_delopt(struct ip_options **opt_ptr)
2083void cipso_v4_sock_delattr(struct sock *sk) 2092void cipso_v4_sock_delattr(struct sock *sk)
2084{ 2093{
2085 int hdr_delta; 2094 int hdr_delta;
2086 struct ip_options *opt; 2095 struct ip_options_rcu *opt;
2087 struct inet_sock *sk_inet; 2096 struct inet_sock *sk_inet;
2088 2097
2089 sk_inet = inet_sk(sk); 2098 sk_inet = inet_sk(sk);
2090 opt = sk_inet->opt; 2099 opt = rcu_dereference_protected(sk_inet->inet_opt, 1);
2091 if (opt == NULL || opt->cipso == 0) 2100 if (opt == NULL || opt->opt.cipso == 0)
2092 return; 2101 return;
2093 2102
2094 hdr_delta = cipso_v4_delopt(&sk_inet->opt); 2103 hdr_delta = cipso_v4_delopt(&sk_inet->inet_opt);
2095 if (sk_inet->is_icsk && hdr_delta > 0) { 2104 if (sk_inet->is_icsk && hdr_delta > 0) {
2096 struct inet_connection_sock *sk_conn = inet_csk(sk); 2105 struct inet_connection_sock *sk_conn = inet_csk(sk);
2097 sk_conn->icsk_ext_hdr_len -= hdr_delta; 2106 sk_conn->icsk_ext_hdr_len -= hdr_delta;
@@ -2109,12 +2118,12 @@ void cipso_v4_sock_delattr(struct sock *sk)
2109 */ 2118 */
2110void cipso_v4_req_delattr(struct request_sock *req) 2119void cipso_v4_req_delattr(struct request_sock *req)
2111{ 2120{
2112 struct ip_options *opt; 2121 struct ip_options_rcu *opt;
2113 struct inet_request_sock *req_inet; 2122 struct inet_request_sock *req_inet;
2114 2123
2115 req_inet = inet_rsk(req); 2124 req_inet = inet_rsk(req);
2116 opt = req_inet->opt; 2125 opt = req_inet->opt;
2117 if (opt == NULL || opt->cipso == 0) 2126 if (opt == NULL || opt->opt.cipso == 0)
2118 return; 2127 return;
2119 2128
2120 cipso_v4_delopt(&req_inet->opt); 2129 cipso_v4_delopt(&req_inet->opt);
@@ -2184,14 +2193,18 @@ getattr_return:
2184 */ 2193 */
2185int cipso_v4_sock_getattr(struct sock *sk, struct netlbl_lsm_secattr *secattr) 2194int cipso_v4_sock_getattr(struct sock *sk, struct netlbl_lsm_secattr *secattr)
2186{ 2195{
2187 struct ip_options *opt; 2196 struct ip_options_rcu *opt;
2197 int res = -ENOMSG;
2188 2198
2189 opt = inet_sk(sk)->opt; 2199 rcu_read_lock();
2190 if (opt == NULL || opt->cipso == 0) 2200 opt = rcu_dereference(inet_sk(sk)->inet_opt);
2191 return -ENOMSG; 2201 if (opt && opt->opt.cipso)
2192 2202 res = cipso_v4_getattr(opt->opt.__data +
2193 return cipso_v4_getattr(opt->__data + opt->cipso - sizeof(struct iphdr), 2203 opt->opt.cipso -
2194 secattr); 2204 sizeof(struct iphdr),
2205 secattr);
2206 rcu_read_unlock();
2207 return res;
2195} 2208}
2196 2209
2197/** 2210/**
diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c
index 85bd24ca4f6d..424fafbc8cb0 100644
--- a/net/ipv4/datagram.c
+++ b/net/ipv4/datagram.c
@@ -24,6 +24,7 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
24{ 24{
25 struct inet_sock *inet = inet_sk(sk); 25 struct inet_sock *inet = inet_sk(sk);
26 struct sockaddr_in *usin = (struct sockaddr_in *) uaddr; 26 struct sockaddr_in *usin = (struct sockaddr_in *) uaddr;
27 struct flowi4 *fl4;
27 struct rtable *rt; 28 struct rtable *rt;
28 __be32 saddr; 29 __be32 saddr;
29 int oif; 30 int oif;
@@ -38,6 +39,8 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
38 39
39 sk_dst_reset(sk); 40 sk_dst_reset(sk);
40 41
42 lock_sock(sk);
43
41 oif = sk->sk_bound_dev_if; 44 oif = sk->sk_bound_dev_if;
42 saddr = inet->inet_saddr; 45 saddr = inet->inet_saddr;
43 if (ipv4_is_multicast(usin->sin_addr.s_addr)) { 46 if (ipv4_is_multicast(usin->sin_addr.s_addr)) {
@@ -46,7 +49,8 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
46 if (!saddr) 49 if (!saddr)
47 saddr = inet->mc_addr; 50 saddr = inet->mc_addr;
48 } 51 }
49 rt = ip_route_connect(usin->sin_addr.s_addr, saddr, 52 fl4 = &inet->cork.fl.u.ip4;
53 rt = ip_route_connect(fl4, usin->sin_addr.s_addr, saddr,
50 RT_CONN_FLAGS(sk), oif, 54 RT_CONN_FLAGS(sk), oif,
51 sk->sk_protocol, 55 sk->sk_protocol,
52 inet->inet_sport, usin->sin_port, sk, true); 56 inet->inet_sport, usin->sin_port, sk, true);
@@ -54,26 +58,30 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
54 err = PTR_ERR(rt); 58 err = PTR_ERR(rt);
55 if (err == -ENETUNREACH) 59 if (err == -ENETUNREACH)
56 IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES); 60 IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
57 return err; 61 goto out;
58 } 62 }
59 63
60 if ((rt->rt_flags & RTCF_BROADCAST) && !sock_flag(sk, SOCK_BROADCAST)) { 64 if ((rt->rt_flags & RTCF_BROADCAST) && !sock_flag(sk, SOCK_BROADCAST)) {
61 ip_rt_put(rt); 65 ip_rt_put(rt);
62 return -EACCES; 66 err = -EACCES;
67 goto out;
63 } 68 }
64 if (!inet->inet_saddr) 69 if (!inet->inet_saddr)
65 inet->inet_saddr = rt->rt_src; /* Update source address */ 70 inet->inet_saddr = fl4->saddr; /* Update source address */
66 if (!inet->inet_rcv_saddr) { 71 if (!inet->inet_rcv_saddr) {
67 inet->inet_rcv_saddr = rt->rt_src; 72 inet->inet_rcv_saddr = fl4->saddr;
68 if (sk->sk_prot->rehash) 73 if (sk->sk_prot->rehash)
69 sk->sk_prot->rehash(sk); 74 sk->sk_prot->rehash(sk);
70 } 75 }
71 inet->inet_daddr = rt->rt_dst; 76 inet->inet_daddr = fl4->daddr;
72 inet->inet_dport = usin->sin_port; 77 inet->inet_dport = usin->sin_port;
73 sk->sk_state = TCP_ESTABLISHED; 78 sk->sk_state = TCP_ESTABLISHED;
74 inet->inet_id = jiffies; 79 inet->inet_id = jiffies;
75 80
76 sk_dst_set(sk, &rt->dst); 81 sk_dst_set(sk, &rt->dst);
77 return 0; 82 err = 0;
83out:
84 release_sock(sk);
85 return err;
78} 86}
79EXPORT_SYMBOL(ip4_datagram_connect); 87EXPORT_SYMBOL(ip4_datagram_connect);
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index cd9ca0811cfa..0d4a184af16f 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1369,7 +1369,7 @@ errout:
1369 1369
1370static size_t inet_get_link_af_size(const struct net_device *dev) 1370static size_t inet_get_link_af_size(const struct net_device *dev)
1371{ 1371{
1372 struct in_device *in_dev = __in_dev_get_rtnl(dev); 1372 struct in_device *in_dev = rcu_dereference_rtnl(dev->ip_ptr);
1373 1373
1374 if (!in_dev) 1374 if (!in_dev)
1375 return 0; 1375 return 0;
@@ -1379,7 +1379,7 @@ static size_t inet_get_link_af_size(const struct net_device *dev)
1379 1379
1380static int inet_fill_link_af(struct sk_buff *skb, const struct net_device *dev) 1380static int inet_fill_link_af(struct sk_buff *skb, const struct net_device *dev)
1381{ 1381{
1382 struct in_device *in_dev = __in_dev_get_rtnl(dev); 1382 struct in_device *in_dev = rcu_dereference_rtnl(dev->ip_ptr);
1383 struct nlattr *nla; 1383 struct nlattr *nla;
1384 int i; 1384 int i;
1385 1385
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 03f994bcf7de..a5b413416da3 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -276,7 +276,7 @@ error:
276 276
277static int esp_input_done2(struct sk_buff *skb, int err) 277static int esp_input_done2(struct sk_buff *skb, int err)
278{ 278{
279 struct iphdr *iph; 279 const struct iphdr *iph;
280 struct xfrm_state *x = xfrm_input_state(skb); 280 struct xfrm_state *x = xfrm_input_state(skb);
281 struct esp_data *esp = x->data; 281 struct esp_data *esp = x->data;
282 struct crypto_aead *aead = esp->aead; 282 struct crypto_aead *aead = esp->aead;
@@ -484,7 +484,7 @@ static u32 esp4_get_mtu(struct xfrm_state *x, int mtu)
484static void esp4_err(struct sk_buff *skb, u32 info) 484static void esp4_err(struct sk_buff *skb, u32 info)
485{ 485{
486 struct net *net = dev_net(skb->dev); 486 struct net *net = dev_net(skb->dev);
487 struct iphdr *iph = (struct iphdr *)skb->data; 487 const struct iphdr *iph = (const struct iphdr *)skb->data;
488 struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2)); 488 struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2));
489 struct xfrm_state *x; 489 struct xfrm_state *x;
490 490
@@ -492,7 +492,8 @@ static void esp4_err(struct sk_buff *skb, u32 info)
492 icmp_hdr(skb)->code != ICMP_FRAG_NEEDED) 492 icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
493 return; 493 return;
494 494
495 x = xfrm_state_lookup(net, skb->mark, (xfrm_address_t *)&iph->daddr, esph->spi, IPPROTO_ESP, AF_INET); 495 x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
496 esph->spi, IPPROTO_ESP, AF_INET);
496 if (!x) 497 if (!x)
497 return; 498 return;
498 NETDEBUG(KERN_DEBUG "pmtu discovery on SA ESP/%08x/%08x\n", 499 NETDEBUG(KERN_DEBUG "pmtu discovery on SA ESP/%08x/%08x\n",
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 451088330bbb..22524716fe70 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -44,6 +44,7 @@
44#include <net/arp.h> 44#include <net/arp.h>
45#include <net/ip_fib.h> 45#include <net/ip_fib.h>
46#include <net/rtnetlink.h> 46#include <net/rtnetlink.h>
47#include <net/xfrm.h>
47 48
48#ifndef CONFIG_IP_MULTIPLE_TABLES 49#ifndef CONFIG_IP_MULTIPLE_TABLES
49 50
@@ -188,9 +189,9 @@ EXPORT_SYMBOL(inet_dev_addr_type);
188 * - check, that packet arrived from expected physical interface. 189 * - check, that packet arrived from expected physical interface.
189 * called with rcu_read_lock() 190 * called with rcu_read_lock()
190 */ 191 */
191int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif, 192int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, u8 tos,
192 struct net_device *dev, __be32 *spec_dst, 193 int oif, struct net_device *dev, __be32 *spec_dst,
193 u32 *itag, u32 mark) 194 u32 *itag)
194{ 195{
195 struct in_device *in_dev; 196 struct in_device *in_dev;
196 struct flowi4 fl4; 197 struct flowi4 fl4;
@@ -202,7 +203,6 @@ int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif,
202 203
203 fl4.flowi4_oif = 0; 204 fl4.flowi4_oif = 0;
204 fl4.flowi4_iif = oif; 205 fl4.flowi4_iif = oif;
205 fl4.flowi4_mark = mark;
206 fl4.daddr = src; 206 fl4.daddr = src;
207 fl4.saddr = dst; 207 fl4.saddr = dst;
208 fl4.flowi4_tos = tos; 208 fl4.flowi4_tos = tos;
@@ -212,10 +212,12 @@ int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif,
212 in_dev = __in_dev_get_rcu(dev); 212 in_dev = __in_dev_get_rcu(dev);
213 if (in_dev) { 213 if (in_dev) {
214 no_addr = in_dev->ifa_list == NULL; 214 no_addr = in_dev->ifa_list == NULL;
215 rpf = IN_DEV_RPFILTER(in_dev); 215
216 /* Ignore rp_filter for packets protected by IPsec. */
217 rpf = secpath_exists(skb) ? 0 : IN_DEV_RPFILTER(in_dev);
218
216 accept_local = IN_DEV_ACCEPT_LOCAL(in_dev); 219 accept_local = IN_DEV_ACCEPT_LOCAL(in_dev);
217 if (mark && !IN_DEV_SRC_VMARK(in_dev)) 220 fl4.flowi4_mark = IN_DEV_SRC_VMARK(in_dev) ? skb->mark : 0;
218 fl4.flowi4_mark = 0;
219 } 221 }
220 222
221 if (in_dev == NULL) 223 if (in_dev == NULL)
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 11d4d28190bd..c779ce96e5b5 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -126,7 +126,7 @@ struct tnode {
126 struct work_struct work; 126 struct work_struct work;
127 struct tnode *tnode_free; 127 struct tnode *tnode_free;
128 }; 128 };
129 struct rt_trie_node *child[0]; 129 struct rt_trie_node __rcu *child[0];
130}; 130};
131 131
132#ifdef CONFIG_IP_FIB_TRIE_STATS 132#ifdef CONFIG_IP_FIB_TRIE_STATS
@@ -151,7 +151,7 @@ struct trie_stat {
151}; 151};
152 152
153struct trie { 153struct trie {
154 struct rt_trie_node *trie; 154 struct rt_trie_node __rcu *trie;
155#ifdef CONFIG_IP_FIB_TRIE_STATS 155#ifdef CONFIG_IP_FIB_TRIE_STATS
156 struct trie_use_stats stats; 156 struct trie_use_stats stats;
157#endif 157#endif
@@ -177,16 +177,29 @@ static const int sync_pages = 128;
177static struct kmem_cache *fn_alias_kmem __read_mostly; 177static struct kmem_cache *fn_alias_kmem __read_mostly;
178static struct kmem_cache *trie_leaf_kmem __read_mostly; 178static struct kmem_cache *trie_leaf_kmem __read_mostly;
179 179
180static inline struct tnode *node_parent(struct rt_trie_node *node) 180/*
181 * caller must hold RTNL
182 */
183static inline struct tnode *node_parent(const struct rt_trie_node *node)
181{ 184{
182 return (struct tnode *)(node->parent & ~NODE_TYPE_MASK); 185 unsigned long parent;
186
187 parent = rcu_dereference_index_check(node->parent, lockdep_rtnl_is_held());
188
189 return (struct tnode *)(parent & ~NODE_TYPE_MASK);
183} 190}
184 191
185static inline struct tnode *node_parent_rcu(struct rt_trie_node *node) 192/*
193 * caller must hold RCU read lock or RTNL
194 */
195static inline struct tnode *node_parent_rcu(const struct rt_trie_node *node)
186{ 196{
187 struct tnode *ret = node_parent(node); 197 unsigned long parent;
198
199 parent = rcu_dereference_index_check(node->parent, rcu_read_lock_held() ||
200 lockdep_rtnl_is_held());
188 201
189 return rcu_dereference_rtnl(ret); 202 return (struct tnode *)(parent & ~NODE_TYPE_MASK);
190} 203}
191 204
192/* Same as rcu_assign_pointer 205/* Same as rcu_assign_pointer
@@ -198,18 +211,24 @@ static inline void node_set_parent(struct rt_trie_node *node, struct tnode *ptr)
198 node->parent = (unsigned long)ptr | NODE_TYPE(node); 211 node->parent = (unsigned long)ptr | NODE_TYPE(node);
199} 212}
200 213
201static inline struct rt_trie_node *tnode_get_child(struct tnode *tn, unsigned int i) 214/*
215 * caller must hold RTNL
216 */
217static inline struct rt_trie_node *tnode_get_child(const struct tnode *tn, unsigned int i)
202{ 218{
203 BUG_ON(i >= 1U << tn->bits); 219 BUG_ON(i >= 1U << tn->bits);
204 220
205 return tn->child[i]; 221 return rtnl_dereference(tn->child[i]);
206} 222}
207 223
208static inline struct rt_trie_node *tnode_get_child_rcu(struct tnode *tn, unsigned int i) 224/*
225 * caller must hold RCU read lock or RTNL
226 */
227static inline struct rt_trie_node *tnode_get_child_rcu(const struct tnode *tn, unsigned int i)
209{ 228{
210 struct rt_trie_node *ret = tnode_get_child(tn, i); 229 BUG_ON(i >= 1U << tn->bits);
211 230
212 return rcu_dereference_rtnl(ret); 231 return rcu_dereference_rtnl(tn->child[i]);
213} 232}
214 233
215static inline int tnode_child_length(const struct tnode *tn) 234static inline int tnode_child_length(const struct tnode *tn)
@@ -482,7 +501,7 @@ static inline void put_child(struct trie *t, struct tnode *tn, int i,
482static void tnode_put_child_reorg(struct tnode *tn, int i, struct rt_trie_node *n, 501static void tnode_put_child_reorg(struct tnode *tn, int i, struct rt_trie_node *n,
483 int wasfull) 502 int wasfull)
484{ 503{
485 struct rt_trie_node *chi = tn->child[i]; 504 struct rt_trie_node *chi = rtnl_dereference(tn->child[i]);
486 int isfull; 505 int isfull;
487 506
488 BUG_ON(i >= 1<<tn->bits); 507 BUG_ON(i >= 1<<tn->bits);
@@ -660,7 +679,7 @@ one_child:
660 for (i = 0; i < tnode_child_length(tn); i++) { 679 for (i = 0; i < tnode_child_length(tn); i++) {
661 struct rt_trie_node *n; 680 struct rt_trie_node *n;
662 681
663 n = tn->child[i]; 682 n = rtnl_dereference(tn->child[i]);
664 if (!n) 683 if (!n)
665 continue; 684 continue;
666 685
@@ -674,6 +693,20 @@ one_child:
674 return (struct rt_trie_node *) tn; 693 return (struct rt_trie_node *) tn;
675} 694}
676 695
696
697static void tnode_clean_free(struct tnode *tn)
698{
699 int i;
700 struct tnode *tofree;
701
702 for (i = 0; i < tnode_child_length(tn); i++) {
703 tofree = (struct tnode *)rtnl_dereference(tn->child[i]);
704 if (tofree)
705 tnode_free(tofree);
706 }
707 tnode_free(tn);
708}
709
677static struct tnode *inflate(struct trie *t, struct tnode *tn) 710static struct tnode *inflate(struct trie *t, struct tnode *tn)
678{ 711{
679 struct tnode *oldtnode = tn; 712 struct tnode *oldtnode = tn;
@@ -750,8 +783,8 @@ static struct tnode *inflate(struct trie *t, struct tnode *tn)
750 inode = (struct tnode *) node; 783 inode = (struct tnode *) node;
751 784
752 if (inode->bits == 1) { 785 if (inode->bits == 1) {
753 put_child(t, tn, 2*i, inode->child[0]); 786 put_child(t, tn, 2*i, rtnl_dereference(inode->child[0]));
754 put_child(t, tn, 2*i+1, inode->child[1]); 787 put_child(t, tn, 2*i+1, rtnl_dereference(inode->child[1]));
755 788
756 tnode_free_safe(inode); 789 tnode_free_safe(inode);
757 continue; 790 continue;
@@ -792,8 +825,8 @@ static struct tnode *inflate(struct trie *t, struct tnode *tn)
792 825
793 size = tnode_child_length(left); 826 size = tnode_child_length(left);
794 for (j = 0; j < size; j++) { 827 for (j = 0; j < size; j++) {
795 put_child(t, left, j, inode->child[j]); 828 put_child(t, left, j, rtnl_dereference(inode->child[j]));
796 put_child(t, right, j, inode->child[j + size]); 829 put_child(t, right, j, rtnl_dereference(inode->child[j + size]));
797 } 830 }
798 put_child(t, tn, 2*i, resize(t, left)); 831 put_child(t, tn, 2*i, resize(t, left));
799 put_child(t, tn, 2*i+1, resize(t, right)); 832 put_child(t, tn, 2*i+1, resize(t, right));
@@ -803,18 +836,8 @@ static struct tnode *inflate(struct trie *t, struct tnode *tn)
803 tnode_free_safe(oldtnode); 836 tnode_free_safe(oldtnode);
804 return tn; 837 return tn;
805nomem: 838nomem:
806 { 839 tnode_clean_free(tn);
807 int size = tnode_child_length(tn); 840 return ERR_PTR(-ENOMEM);
808 int j;
809
810 for (j = 0; j < size; j++)
811 if (tn->child[j])
812 tnode_free((struct tnode *)tn->child[j]);
813
814 tnode_free(tn);
815
816 return ERR_PTR(-ENOMEM);
817 }
818} 841}
819 842
820static struct tnode *halve(struct trie *t, struct tnode *tn) 843static struct tnode *halve(struct trie *t, struct tnode *tn)
@@ -885,18 +908,8 @@ static struct tnode *halve(struct trie *t, struct tnode *tn)
885 tnode_free_safe(oldtnode); 908 tnode_free_safe(oldtnode);
886 return tn; 909 return tn;
887nomem: 910nomem:
888 { 911 tnode_clean_free(tn);
889 int size = tnode_child_length(tn); 912 return ERR_PTR(-ENOMEM);
890 int j;
891
892 for (j = 0; j < size; j++)
893 if (tn->child[j])
894 tnode_free((struct tnode *)tn->child[j]);
895
896 tnode_free(tn);
897
898 return ERR_PTR(-ENOMEM);
899 }
900} 913}
901 914
902/* readside must use rcu_read_lock currently dump routines 915/* readside must use rcu_read_lock currently dump routines
@@ -1028,7 +1041,7 @@ static struct list_head *fib_insert_node(struct trie *t, u32 key, int plen)
1028 t_key cindex; 1041 t_key cindex;
1029 1042
1030 pos = 0; 1043 pos = 0;
1031 n = t->trie; 1044 n = rtnl_dereference(t->trie);
1032 1045
1033 /* If we point to NULL, stop. Either the tree is empty and we should 1046 /* If we point to NULL, stop. Either the tree is empty and we should
1034 * just put a new leaf in if, or we have reached an empty child slot, 1047 * just put a new leaf in if, or we have reached an empty child slot,
@@ -1314,6 +1327,9 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
1314 } 1327 }
1315 } 1328 }
1316 1329
1330 if (!plen)
1331 tb->tb_num_default++;
1332
1317 list_add_tail_rcu(&new_fa->fa_list, 1333 list_add_tail_rcu(&new_fa->fa_list,
1318 (fa ? &fa->fa_list : fa_head)); 1334 (fa ? &fa->fa_list : fa_head));
1319 1335
@@ -1679,6 +1695,9 @@ int fib_table_delete(struct fib_table *tb, struct fib_config *cfg)
1679 1695
1680 list_del_rcu(&fa->fa_list); 1696 list_del_rcu(&fa->fa_list);
1681 1697
1698 if (!plen)
1699 tb->tb_num_default--;
1700
1682 if (list_empty(fa_head)) { 1701 if (list_empty(fa_head)) {
1683 hlist_del_rcu(&li->hlist); 1702 hlist_del_rcu(&li->hlist);
1684 free_leaf_info(li); 1703 free_leaf_info(li);
@@ -1751,7 +1770,7 @@ static struct leaf *leaf_walk_rcu(struct tnode *p, struct rt_trie_node *c)
1751 continue; 1770 continue;
1752 1771
1753 if (IS_LEAF(c)) { 1772 if (IS_LEAF(c)) {
1754 prefetch(p->child[idx]); 1773 prefetch(rcu_dereference_rtnl(p->child[idx]));
1755 return (struct leaf *) c; 1774 return (struct leaf *) c;
1756 } 1775 }
1757 1776
@@ -1969,6 +1988,7 @@ struct fib_table *fib_trie_table(u32 id)
1969 1988
1970 tb->tb_id = id; 1989 tb->tb_id = id;
1971 tb->tb_default = -1; 1990 tb->tb_default = -1;
1991 tb->tb_num_default = 0;
1972 1992
1973 t = (struct trie *) tb->tb_data; 1993 t = (struct trie *) tb->tb_data;
1974 memset(t, 0, sizeof(*t)); 1994 memset(t, 0, sizeof(*t));
@@ -2264,7 +2284,7 @@ static void *fib_trie_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2264 2284
2265 /* walk rest of this hash chain */ 2285 /* walk rest of this hash chain */
2266 h = tb->tb_id & (FIB_TABLE_HASHSZ - 1); 2286 h = tb->tb_id & (FIB_TABLE_HASHSZ - 1);
2267 while ( (tb_node = rcu_dereference(tb->tb_hlist.next)) ) { 2287 while ((tb_node = rcu_dereference(hlist_next_rcu(&tb->tb_hlist)))) {
2268 tb = hlist_entry(tb_node, struct fib_table, tb_hlist); 2288 tb = hlist_entry(tb_node, struct fib_table, tb_hlist);
2269 n = fib_trie_get_first(iter, (struct trie *) tb->tb_data); 2289 n = fib_trie_get_first(iter, (struct trie *) tb->tb_data);
2270 if (n) 2290 if (n)
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index e5f8a71d3a2a..5395e45dcce6 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -83,6 +83,7 @@
83#include <net/tcp.h> 83#include <net/tcp.h>
84#include <net/udp.h> 84#include <net/udp.h>
85#include <net/raw.h> 85#include <net/raw.h>
86#include <net/ping.h>
86#include <linux/skbuff.h> 87#include <linux/skbuff.h>
87#include <net/sock.h> 88#include <net/sock.h>
88#include <linux/errno.h> 89#include <linux/errno.h>
@@ -108,8 +109,7 @@ struct icmp_bxm {
108 __be32 times[3]; 109 __be32 times[3];
109 } data; 110 } data;
110 int head_len; 111 int head_len;
111 struct ip_options replyopts; 112 struct ip_options_data replyopts;
112 unsigned char optbuf[40];
113}; 113};
114 114
115/* An array of errno for error messages from dest unreach. */ 115/* An array of errno for error messages from dest unreach. */
@@ -234,7 +234,7 @@ static inline void icmp_xmit_unlock(struct sock *sk)
234 */ 234 */
235 235
236static inline bool icmpv4_xrlim_allow(struct net *net, struct rtable *rt, 236static inline bool icmpv4_xrlim_allow(struct net *net, struct rtable *rt,
237 int type, int code) 237 struct flowi4 *fl4, int type, int code)
238{ 238{
239 struct dst_entry *dst = &rt->dst; 239 struct dst_entry *dst = &rt->dst;
240 bool rc = true; 240 bool rc = true;
@@ -253,7 +253,7 @@ static inline bool icmpv4_xrlim_allow(struct net *net, struct rtable *rt,
253 /* Limit if icmp type is enabled in ratemask. */ 253 /* Limit if icmp type is enabled in ratemask. */
254 if ((1 << type) & net->ipv4.sysctl_icmp_ratemask) { 254 if ((1 << type) & net->ipv4.sysctl_icmp_ratemask) {
255 if (!rt->peer) 255 if (!rt->peer)
256 rt_bind_peer(rt, 1); 256 rt_bind_peer(rt, fl4->daddr, 1);
257 rc = inet_peer_xrlim_allow(rt->peer, 257 rc = inet_peer_xrlim_allow(rt->peer,
258 net->ipv4.sysctl_icmp_ratelimit); 258 net->ipv4.sysctl_icmp_ratelimit);
259 } 259 }
@@ -291,13 +291,14 @@ static int icmp_glue_bits(void *from, char *to, int offset, int len, int odd,
291} 291}
292 292
293static void icmp_push_reply(struct icmp_bxm *icmp_param, 293static void icmp_push_reply(struct icmp_bxm *icmp_param,
294 struct flowi4 *fl4,
294 struct ipcm_cookie *ipc, struct rtable **rt) 295 struct ipcm_cookie *ipc, struct rtable **rt)
295{ 296{
296 struct sock *sk; 297 struct sock *sk;
297 struct sk_buff *skb; 298 struct sk_buff *skb;
298 299
299 sk = icmp_sk(dev_net((*rt)->dst.dev)); 300 sk = icmp_sk(dev_net((*rt)->dst.dev));
300 if (ip_append_data(sk, icmp_glue_bits, icmp_param, 301 if (ip_append_data(sk, fl4, icmp_glue_bits, icmp_param,
301 icmp_param->data_len+icmp_param->head_len, 302 icmp_param->data_len+icmp_param->head_len,
302 icmp_param->head_len, 303 icmp_param->head_len,
303 ipc, rt, MSG_DONTWAIT) < 0) { 304 ipc, rt, MSG_DONTWAIT) < 0) {
@@ -316,7 +317,7 @@ static void icmp_push_reply(struct icmp_bxm *icmp_param,
316 icmp_param->head_len, csum); 317 icmp_param->head_len, csum);
317 icmph->checksum = csum_fold(csum); 318 icmph->checksum = csum_fold(csum);
318 skb->ip_summed = CHECKSUM_NONE; 319 skb->ip_summed = CHECKSUM_NONE;
319 ip_push_pending_frames(sk); 320 ip_push_pending_frames(sk, fl4);
320 } 321 }
321} 322}
322 323
@@ -329,11 +330,12 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
329 struct ipcm_cookie ipc; 330 struct ipcm_cookie ipc;
330 struct rtable *rt = skb_rtable(skb); 331 struct rtable *rt = skb_rtable(skb);
331 struct net *net = dev_net(rt->dst.dev); 332 struct net *net = dev_net(rt->dst.dev);
333 struct flowi4 fl4;
332 struct sock *sk; 334 struct sock *sk;
333 struct inet_sock *inet; 335 struct inet_sock *inet;
334 __be32 daddr; 336 __be32 daddr;
335 337
336 if (ip_options_echo(&icmp_param->replyopts, skb)) 338 if (ip_options_echo(&icmp_param->replyopts.opt.opt, skb))
337 return; 339 return;
338 340
339 sk = icmp_xmit_lock(net); 341 sk = icmp_xmit_lock(net);
@@ -344,65 +346,60 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
344 icmp_param->data.icmph.checksum = 0; 346 icmp_param->data.icmph.checksum = 0;
345 347
346 inet->tos = ip_hdr(skb)->tos; 348 inet->tos = ip_hdr(skb)->tos;
347 daddr = ipc.addr = rt->rt_src; 349 daddr = ipc.addr = ip_hdr(skb)->saddr;
348 ipc.opt = NULL; 350 ipc.opt = NULL;
349 ipc.tx_flags = 0; 351 ipc.tx_flags = 0;
350 if (icmp_param->replyopts.optlen) { 352 if (icmp_param->replyopts.opt.opt.optlen) {
351 ipc.opt = &icmp_param->replyopts; 353 ipc.opt = &icmp_param->replyopts.opt;
352 if (ipc.opt->srr) 354 if (ipc.opt->opt.srr)
353 daddr = icmp_param->replyopts.faddr; 355 daddr = icmp_param->replyopts.opt.opt.faddr;
354 } 356 }
355 { 357 memset(&fl4, 0, sizeof(fl4));
356 struct flowi4 fl4 = { 358 fl4.daddr = daddr;
357 .daddr = daddr, 359 fl4.saddr = rt->rt_spec_dst;
358 .saddr = rt->rt_spec_dst, 360 fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos);
359 .flowi4_tos = RT_TOS(ip_hdr(skb)->tos), 361 fl4.flowi4_proto = IPPROTO_ICMP;
360 .flowi4_proto = IPPROTO_ICMP, 362 security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
361 }; 363 rt = ip_route_output_key(net, &fl4);
362 security_skb_classify_flow(skb, flowi4_to_flowi(&fl4)); 364 if (IS_ERR(rt))
363 rt = ip_route_output_key(net, &fl4); 365 goto out_unlock;
364 if (IS_ERR(rt)) 366 if (icmpv4_xrlim_allow(net, rt, &fl4, icmp_param->data.icmph.type,
365 goto out_unlock;
366 }
367 if (icmpv4_xrlim_allow(net, rt, icmp_param->data.icmph.type,
368 icmp_param->data.icmph.code)) 367 icmp_param->data.icmph.code))
369 icmp_push_reply(icmp_param, &ipc, &rt); 368 icmp_push_reply(icmp_param, &fl4, &ipc, &rt);
370 ip_rt_put(rt); 369 ip_rt_put(rt);
371out_unlock: 370out_unlock:
372 icmp_xmit_unlock(sk); 371 icmp_xmit_unlock(sk);
373} 372}
374 373
375static struct rtable *icmp_route_lookup(struct net *net, struct sk_buff *skb_in, 374static struct rtable *icmp_route_lookup(struct net *net,
376 struct iphdr *iph, 375 struct flowi4 *fl4,
376 struct sk_buff *skb_in,
377 const struct iphdr *iph,
377 __be32 saddr, u8 tos, 378 __be32 saddr, u8 tos,
378 int type, int code, 379 int type, int code,
379 struct icmp_bxm *param) 380 struct icmp_bxm *param)
380{ 381{
381 struct flowi4 fl4 = {
382 .daddr = (param->replyopts.srr ?
383 param->replyopts.faddr : iph->saddr),
384 .saddr = saddr,
385 .flowi4_tos = RT_TOS(tos),
386 .flowi4_proto = IPPROTO_ICMP,
387 .fl4_icmp_type = type,
388 .fl4_icmp_code = code,
389 };
390 struct rtable *rt, *rt2; 382 struct rtable *rt, *rt2;
391 int err; 383 int err;
392 384
393 security_skb_classify_flow(skb_in, flowi4_to_flowi(&fl4)); 385 memset(fl4, 0, sizeof(*fl4));
394 rt = __ip_route_output_key(net, &fl4); 386 fl4->daddr = (param->replyopts.opt.opt.srr ?
387 param->replyopts.opt.opt.faddr : iph->saddr);
388 fl4->saddr = saddr;
389 fl4->flowi4_tos = RT_TOS(tos);
390 fl4->flowi4_proto = IPPROTO_ICMP;
391 fl4->fl4_icmp_type = type;
392 fl4->fl4_icmp_code = code;
393 security_skb_classify_flow(skb_in, flowi4_to_flowi(fl4));
394 rt = __ip_route_output_key(net, fl4);
395 if (IS_ERR(rt)) 395 if (IS_ERR(rt))
396 return rt; 396 return rt;
397 397
398 /* No need to clone since we're just using its address. */ 398 /* No need to clone since we're just using its address. */
399 rt2 = rt; 399 rt2 = rt;
400 400
401 if (!fl4.saddr)
402 fl4.saddr = rt->rt_src;
403
404 rt = (struct rtable *) xfrm_lookup(net, &rt->dst, 401 rt = (struct rtable *) xfrm_lookup(net, &rt->dst,
405 flowi4_to_flowi(&fl4), NULL, 0); 402 flowi4_to_flowi(fl4), NULL, 0);
406 if (!IS_ERR(rt)) { 403 if (!IS_ERR(rt)) {
407 if (rt != rt2) 404 if (rt != rt2)
408 return rt; 405 return rt;
@@ -411,19 +408,19 @@ static struct rtable *icmp_route_lookup(struct net *net, struct sk_buff *skb_in,
411 } else 408 } else
412 return rt; 409 return rt;
413 410
414 err = xfrm_decode_session_reverse(skb_in, flowi4_to_flowi(&fl4), AF_INET); 411 err = xfrm_decode_session_reverse(skb_in, flowi4_to_flowi(fl4), AF_INET);
415 if (err) 412 if (err)
416 goto relookup_failed; 413 goto relookup_failed;
417 414
418 if (inet_addr_type(net, fl4.saddr) == RTN_LOCAL) { 415 if (inet_addr_type(net, fl4->saddr) == RTN_LOCAL) {
419 rt2 = __ip_route_output_key(net, &fl4); 416 rt2 = __ip_route_output_key(net, fl4);
420 if (IS_ERR(rt2)) 417 if (IS_ERR(rt2))
421 err = PTR_ERR(rt2); 418 err = PTR_ERR(rt2);
422 } else { 419 } else {
423 struct flowi4 fl4_2 = {}; 420 struct flowi4 fl4_2 = {};
424 unsigned long orefdst; 421 unsigned long orefdst;
425 422
426 fl4_2.daddr = fl4.saddr; 423 fl4_2.daddr = fl4->saddr;
427 rt2 = ip_route_output_key(net, &fl4_2); 424 rt2 = ip_route_output_key(net, &fl4_2);
428 if (IS_ERR(rt2)) { 425 if (IS_ERR(rt2)) {
429 err = PTR_ERR(rt2); 426 err = PTR_ERR(rt2);
@@ -431,7 +428,7 @@ static struct rtable *icmp_route_lookup(struct net *net, struct sk_buff *skb_in,
431 } 428 }
432 /* Ugh! */ 429 /* Ugh! */
433 orefdst = skb_in->_skb_refdst; /* save old refdst */ 430 orefdst = skb_in->_skb_refdst; /* save old refdst */
434 err = ip_route_input(skb_in, fl4.daddr, fl4.saddr, 431 err = ip_route_input(skb_in, fl4->daddr, fl4->saddr,
435 RT_TOS(tos), rt2->dst.dev); 432 RT_TOS(tos), rt2->dst.dev);
436 433
437 dst_release(&rt2->dst); 434 dst_release(&rt2->dst);
@@ -443,7 +440,7 @@ static struct rtable *icmp_route_lookup(struct net *net, struct sk_buff *skb_in,
443 goto relookup_failed; 440 goto relookup_failed;
444 441
445 rt2 = (struct rtable *) xfrm_lookup(net, &rt2->dst, 442 rt2 = (struct rtable *) xfrm_lookup(net, &rt2->dst,
446 flowi4_to_flowi(&fl4), NULL, 443 flowi4_to_flowi(fl4), NULL,
447 XFRM_LOOKUP_ICMP); 444 XFRM_LOOKUP_ICMP);
448 if (!IS_ERR(rt2)) { 445 if (!IS_ERR(rt2)) {
449 dst_release(&rt->dst); 446 dst_release(&rt->dst);
@@ -482,6 +479,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
482 struct icmp_bxm icmp_param; 479 struct icmp_bxm icmp_param;
483 struct rtable *rt = skb_rtable(skb_in); 480 struct rtable *rt = skb_rtable(skb_in);
484 struct ipcm_cookie ipc; 481 struct ipcm_cookie ipc;
482 struct flowi4 fl4;
485 __be32 saddr; 483 __be32 saddr;
486 u8 tos; 484 u8 tos;
487 struct net *net; 485 struct net *net;
@@ -581,7 +579,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
581 IPTOS_PREC_INTERNETCONTROL) : 579 IPTOS_PREC_INTERNETCONTROL) :
582 iph->tos; 580 iph->tos;
583 581
584 if (ip_options_echo(&icmp_param.replyopts, skb_in)) 582 if (ip_options_echo(&icmp_param.replyopts.opt.opt, skb_in))
585 goto out_unlock; 583 goto out_unlock;
586 584
587 585
@@ -597,15 +595,15 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
597 icmp_param.offset = skb_network_offset(skb_in); 595 icmp_param.offset = skb_network_offset(skb_in);
598 inet_sk(sk)->tos = tos; 596 inet_sk(sk)->tos = tos;
599 ipc.addr = iph->saddr; 597 ipc.addr = iph->saddr;
600 ipc.opt = &icmp_param.replyopts; 598 ipc.opt = &icmp_param.replyopts.opt;
601 ipc.tx_flags = 0; 599 ipc.tx_flags = 0;
602 600
603 rt = icmp_route_lookup(net, skb_in, iph, saddr, tos, 601 rt = icmp_route_lookup(net, &fl4, skb_in, iph, saddr, tos,
604 type, code, &icmp_param); 602 type, code, &icmp_param);
605 if (IS_ERR(rt)) 603 if (IS_ERR(rt))
606 goto out_unlock; 604 goto out_unlock;
607 605
608 if (!icmpv4_xrlim_allow(net, rt, type, code)) 606 if (!icmpv4_xrlim_allow(net, rt, &fl4, type, code))
609 goto ende; 607 goto ende;
610 608
611 /* RFC says return as much as we can without exceeding 576 bytes. */ 609 /* RFC says return as much as we can without exceeding 576 bytes. */
@@ -613,7 +611,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
613 room = dst_mtu(&rt->dst); 611 room = dst_mtu(&rt->dst);
614 if (room > 576) 612 if (room > 576)
615 room = 576; 613 room = 576;
616 room -= sizeof(struct iphdr) + icmp_param.replyopts.optlen; 614 room -= sizeof(struct iphdr) + icmp_param.replyopts.opt.opt.optlen;
617 room -= sizeof(struct icmphdr); 615 room -= sizeof(struct icmphdr);
618 616
619 icmp_param.data_len = skb_in->len - icmp_param.offset; 617 icmp_param.data_len = skb_in->len - icmp_param.offset;
@@ -621,7 +619,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
621 icmp_param.data_len = room; 619 icmp_param.data_len = room;
622 icmp_param.head_len = sizeof(struct icmphdr); 620 icmp_param.head_len = sizeof(struct icmphdr);
623 621
624 icmp_push_reply(&icmp_param, &ipc, &rt); 622 icmp_push_reply(&icmp_param, &fl4, &ipc, &rt);
625ende: 623ende:
626 ip_rt_put(rt); 624 ip_rt_put(rt);
627out_unlock: 625out_unlock:
@@ -637,7 +635,7 @@ EXPORT_SYMBOL(icmp_send);
637 635
638static void icmp_unreach(struct sk_buff *skb) 636static void icmp_unreach(struct sk_buff *skb)
639{ 637{
640 struct iphdr *iph; 638 const struct iphdr *iph;
641 struct icmphdr *icmph; 639 struct icmphdr *icmph;
642 int hash, protocol; 640 int hash, protocol;
643 const struct net_protocol *ipprot; 641 const struct net_protocol *ipprot;
@@ -656,7 +654,7 @@ static void icmp_unreach(struct sk_buff *skb)
656 goto out_err; 654 goto out_err;
657 655
658 icmph = icmp_hdr(skb); 656 icmph = icmp_hdr(skb);
659 iph = (struct iphdr *)skb->data; 657 iph = (const struct iphdr *)skb->data;
660 658
661 if (iph->ihl < 5) /* Mangled header, drop. */ 659 if (iph->ihl < 5) /* Mangled header, drop. */
662 goto out_err; 660 goto out_err;
@@ -729,7 +727,7 @@ static void icmp_unreach(struct sk_buff *skb)
729 if (!pskb_may_pull(skb, iph->ihl * 4 + 8)) 727 if (!pskb_may_pull(skb, iph->ihl * 4 + 8))
730 goto out; 728 goto out;
731 729
732 iph = (struct iphdr *)skb->data; 730 iph = (const struct iphdr *)skb->data;
733 protocol = iph->protocol; 731 protocol = iph->protocol;
734 732
735 /* 733 /*
@@ -758,7 +756,7 @@ out_err:
758 756
759static void icmp_redirect(struct sk_buff *skb) 757static void icmp_redirect(struct sk_buff *skb)
760{ 758{
761 struct iphdr *iph; 759 const struct iphdr *iph;
762 760
763 if (skb->len < sizeof(struct iphdr)) 761 if (skb->len < sizeof(struct iphdr))
764 goto out_err; 762 goto out_err;
@@ -769,7 +767,7 @@ static void icmp_redirect(struct sk_buff *skb)
769 if (!pskb_may_pull(skb, sizeof(struct iphdr))) 767 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
770 goto out; 768 goto out;
771 769
772 iph = (struct iphdr *)skb->data; 770 iph = (const struct iphdr *)skb->data;
773 771
774 switch (icmp_hdr(skb)->code & 7) { 772 switch (icmp_hdr(skb)->code & 7) {
775 case ICMP_REDIR_NET: 773 case ICMP_REDIR_NET:
@@ -784,6 +782,15 @@ static void icmp_redirect(struct sk_buff *skb)
784 iph->saddr, skb->dev); 782 iph->saddr, skb->dev);
785 break; 783 break;
786 } 784 }
785
786 /* Ping wants to see redirects.
787 * Let's pretend they are errors of sorts... */
788 if (iph->protocol == IPPROTO_ICMP &&
789 iph->ihl >= 5 &&
790 pskb_may_pull(skb, (iph->ihl<<2)+8)) {
791 ping_err(skb, icmp_hdr(skb)->un.gateway);
792 }
793
787out: 794out:
788 return; 795 return;
789out_err: 796out_err:
@@ -933,12 +940,12 @@ static void icmp_address_reply(struct sk_buff *skb)
933 BUG_ON(mp == NULL); 940 BUG_ON(mp == NULL);
934 for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) { 941 for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
935 if (*mp == ifa->ifa_mask && 942 if (*mp == ifa->ifa_mask &&
936 inet_ifa_match(rt->rt_src, ifa)) 943 inet_ifa_match(ip_hdr(skb)->saddr, ifa))
937 break; 944 break;
938 } 945 }
939 if (!ifa && net_ratelimit()) { 946 if (!ifa && net_ratelimit()) {
940 printk(KERN_INFO "Wrong address mask %pI4 from %s/%pI4\n", 947 printk(KERN_INFO "Wrong address mask %pI4 from %s/%pI4\n",
941 mp, dev->name, &rt->rt_src); 948 mp, dev->name, &ip_hdr(skb)->saddr);
942 } 949 }
943 } 950 }
944} 951}
@@ -1044,7 +1051,7 @@ error:
1044 */ 1051 */
1045static const struct icmp_control icmp_pointers[NR_ICMP_TYPES + 1] = { 1052static const struct icmp_control icmp_pointers[NR_ICMP_TYPES + 1] = {
1046 [ICMP_ECHOREPLY] = { 1053 [ICMP_ECHOREPLY] = {
1047 .handler = icmp_discard, 1054 .handler = ping_rcv,
1048 }, 1055 },
1049 [1] = { 1056 [1] = {
1050 .handler = icmp_discard, 1057 .handler = icmp_discard,
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 8f62d66d0857..672e476c8c8a 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -303,6 +303,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
303 struct iphdr *pip; 303 struct iphdr *pip;
304 struct igmpv3_report *pig; 304 struct igmpv3_report *pig;
305 struct net *net = dev_net(dev); 305 struct net *net = dev_net(dev);
306 struct flowi4 fl4;
306 307
307 while (1) { 308 while (1) {
308 skb = alloc_skb(size + LL_ALLOCATED_SPACE(dev), 309 skb = alloc_skb(size + LL_ALLOCATED_SPACE(dev),
@@ -315,18 +316,13 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
315 } 316 }
316 igmp_skb_size(skb) = size; 317 igmp_skb_size(skb) = size;
317 318
318 rt = ip_route_output_ports(net, NULL, IGMPV3_ALL_MCR, 0, 319 rt = ip_route_output_ports(net, &fl4, NULL, IGMPV3_ALL_MCR, 0,
319 0, 0, 320 0, 0,
320 IPPROTO_IGMP, 0, dev->ifindex); 321 IPPROTO_IGMP, 0, dev->ifindex);
321 if (IS_ERR(rt)) { 322 if (IS_ERR(rt)) {
322 kfree_skb(skb); 323 kfree_skb(skb);
323 return NULL; 324 return NULL;
324 } 325 }
325 if (rt->rt_src == 0) {
326 kfree_skb(skb);
327 ip_rt_put(rt);
328 return NULL;
329 }
330 326
331 skb_dst_set(skb, &rt->dst); 327 skb_dst_set(skb, &rt->dst);
332 skb->dev = dev; 328 skb->dev = dev;
@@ -342,8 +338,8 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
342 pip->tos = 0xc0; 338 pip->tos = 0xc0;
343 pip->frag_off = htons(IP_DF); 339 pip->frag_off = htons(IP_DF);
344 pip->ttl = 1; 340 pip->ttl = 1;
345 pip->daddr = rt->rt_dst; 341 pip->daddr = fl4.daddr;
346 pip->saddr = rt->rt_src; 342 pip->saddr = fl4.saddr;
347 pip->protocol = IPPROTO_IGMP; 343 pip->protocol = IPPROTO_IGMP;
348 pip->tot_len = 0; /* filled in later */ 344 pip->tot_len = 0; /* filled in later */
349 ip_select_ident(pip, &rt->dst, NULL); 345 ip_select_ident(pip, &rt->dst, NULL);
@@ -649,6 +645,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
649 struct net_device *dev = in_dev->dev; 645 struct net_device *dev = in_dev->dev;
650 struct net *net = dev_net(dev); 646 struct net *net = dev_net(dev);
651 __be32 group = pmc ? pmc->multiaddr : 0; 647 __be32 group = pmc ? pmc->multiaddr : 0;
648 struct flowi4 fl4;
652 __be32 dst; 649 __be32 dst;
653 650
654 if (type == IGMPV3_HOST_MEMBERSHIP_REPORT) 651 if (type == IGMPV3_HOST_MEMBERSHIP_REPORT)
@@ -658,17 +655,12 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
658 else 655 else
659 dst = group; 656 dst = group;
660 657
661 rt = ip_route_output_ports(net, NULL, dst, 0, 658 rt = ip_route_output_ports(net, &fl4, NULL, dst, 0,
662 0, 0, 659 0, 0,
663 IPPROTO_IGMP, 0, dev->ifindex); 660 IPPROTO_IGMP, 0, dev->ifindex);
664 if (IS_ERR(rt)) 661 if (IS_ERR(rt))
665 return -1; 662 return -1;
666 663
667 if (rt->rt_src == 0) {
668 ip_rt_put(rt);
669 return -1;
670 }
671
672 skb = alloc_skb(IGMP_SIZE+LL_ALLOCATED_SPACE(dev), GFP_ATOMIC); 664 skb = alloc_skb(IGMP_SIZE+LL_ALLOCATED_SPACE(dev), GFP_ATOMIC);
673 if (skb == NULL) { 665 if (skb == NULL) {
674 ip_rt_put(rt); 666 ip_rt_put(rt);
@@ -689,7 +681,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
689 iph->frag_off = htons(IP_DF); 681 iph->frag_off = htons(IP_DF);
690 iph->ttl = 1; 682 iph->ttl = 1;
691 iph->daddr = dst; 683 iph->daddr = dst;
692 iph->saddr = rt->rt_src; 684 iph->saddr = fl4.saddr;
693 iph->protocol = IPPROTO_IGMP; 685 iph->protocol = IPPROTO_IGMP;
694 ip_select_ident(iph, &rt->dst, NULL); 686 ip_select_ident(iph, &rt->dst, NULL);
695 ((u8*)&iph[1])[0] = IPOPT_RA; 687 ((u8*)&iph[1])[0] = IPOPT_RA;
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 38f23e721b80..61fac4cabc78 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -350,30 +350,24 @@ void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long len)
350EXPORT_SYMBOL(inet_csk_reset_keepalive_timer); 350EXPORT_SYMBOL(inet_csk_reset_keepalive_timer);
351 351
352struct dst_entry *inet_csk_route_req(struct sock *sk, 352struct dst_entry *inet_csk_route_req(struct sock *sk,
353 struct flowi4 *fl4,
353 const struct request_sock *req) 354 const struct request_sock *req)
354{ 355{
355 struct rtable *rt; 356 struct rtable *rt;
356 const struct inet_request_sock *ireq = inet_rsk(req); 357 const struct inet_request_sock *ireq = inet_rsk(req);
357 struct ip_options *opt = inet_rsk(req)->opt; 358 struct ip_options_rcu *opt = inet_rsk(req)->opt;
358 struct flowi4 fl4 = {
359 .flowi4_oif = sk->sk_bound_dev_if,
360 .flowi4_mark = sk->sk_mark,
361 .daddr = ((opt && opt->srr) ?
362 opt->faddr : ireq->rmt_addr),
363 .saddr = ireq->loc_addr,
364 .flowi4_tos = RT_CONN_FLAGS(sk),
365 .flowi4_proto = sk->sk_protocol,
366 .flowi4_flags = inet_sk_flowi_flags(sk),
367 .fl4_sport = inet_sk(sk)->inet_sport,
368 .fl4_dport = ireq->rmt_port,
369 };
370 struct net *net = sock_net(sk); 359 struct net *net = sock_net(sk);
371 360
372 security_req_classify_flow(req, flowi4_to_flowi(&fl4)); 361 flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
373 rt = ip_route_output_flow(net, &fl4, sk); 362 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
363 sk->sk_protocol, inet_sk_flowi_flags(sk),
364 (opt && opt->opt.srr) ? opt->opt.faddr : ireq->rmt_addr,
365 ireq->loc_addr, ireq->rmt_port, inet_sk(sk)->inet_sport);
366 security_req_classify_flow(req, flowi4_to_flowi(fl4));
367 rt = ip_route_output_flow(net, fl4, sk);
374 if (IS_ERR(rt)) 368 if (IS_ERR(rt))
375 goto no_route; 369 goto no_route;
376 if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway) 370 if (opt && opt->opt.is_strictroute && fl4->daddr != rt->rt_gateway)
377 goto route_err; 371 goto route_err;
378 return &rt->dst; 372 return &rt->dst;
379 373
@@ -385,6 +379,39 @@ no_route:
385} 379}
386EXPORT_SYMBOL_GPL(inet_csk_route_req); 380EXPORT_SYMBOL_GPL(inet_csk_route_req);
387 381
382struct dst_entry *inet_csk_route_child_sock(struct sock *sk,
383 struct sock *newsk,
384 const struct request_sock *req)
385{
386 const struct inet_request_sock *ireq = inet_rsk(req);
387 struct inet_sock *newinet = inet_sk(newsk);
388 struct ip_options_rcu *opt = ireq->opt;
389 struct net *net = sock_net(sk);
390 struct flowi4 *fl4;
391 struct rtable *rt;
392
393 fl4 = &newinet->cork.fl.u.ip4;
394 flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
395 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
396 sk->sk_protocol, inet_sk_flowi_flags(sk),
397 (opt && opt->opt.srr) ? opt->opt.faddr : ireq->rmt_addr,
398 ireq->loc_addr, ireq->rmt_port, inet_sk(sk)->inet_sport);
399 security_req_classify_flow(req, flowi4_to_flowi(fl4));
400 rt = ip_route_output_flow(net, fl4, sk);
401 if (IS_ERR(rt))
402 goto no_route;
403 if (opt && opt->opt.is_strictroute && fl4->daddr != rt->rt_gateway)
404 goto route_err;
405 return &rt->dst;
406
407route_err:
408 ip_rt_put(rt);
409no_route:
410 IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
411 return NULL;
412}
413EXPORT_SYMBOL_GPL(inet_csk_route_child_sock);
414
388static inline u32 inet_synq_hash(const __be32 raddr, const __be16 rport, 415static inline u32 inet_synq_hash(const __be32 raddr, const __be16 rport,
389 const u32 rnd, const u32 synq_hsize) 416 const u32 rnd, const u32 synq_hsize)
390{ 417{
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 2ada17129fce..6ffe94ca5bc9 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -124,7 +124,7 @@ static int inet_csk_diag_fill(struct sock *sk,
124 124
125#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) 125#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
126 if (r->idiag_family == AF_INET6) { 126 if (r->idiag_family == AF_INET6) {
127 struct ipv6_pinfo *np = inet6_sk(sk); 127 const struct ipv6_pinfo *np = inet6_sk(sk);
128 128
129 ipv6_addr_copy((struct in6_addr *)r->id.idiag_src, 129 ipv6_addr_copy((struct in6_addr *)r->id.idiag_src,
130 &np->rcv_saddr); 130 &np->rcv_saddr);
diff --git a/net/ipv4/inet_lro.c b/net/ipv4/inet_lro.c
index 47038cb6c138..85a0f75dae64 100644
--- a/net/ipv4/inet_lro.c
+++ b/net/ipv4/inet_lro.c
@@ -51,8 +51,8 @@ MODULE_DESCRIPTION("Large Receive Offload (ipv4 / tcp)");
51 * Basic tcp checks whether packet is suitable for LRO 51 * Basic tcp checks whether packet is suitable for LRO
52 */ 52 */
53 53
54static int lro_tcp_ip_check(struct iphdr *iph, struct tcphdr *tcph, 54static int lro_tcp_ip_check(const struct iphdr *iph, const struct tcphdr *tcph,
55 int len, struct net_lro_desc *lro_desc) 55 int len, const struct net_lro_desc *lro_desc)
56{ 56{
57 /* check ip header: don't aggregate padded frames */ 57 /* check ip header: don't aggregate padded frames */
58 if (ntohs(iph->tot_len) != len) 58 if (ntohs(iph->tot_len) != len)
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index 99461f09320f..3b34d1c86270 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -84,7 +84,7 @@ int ip_forward(struct sk_buff *skb)
84 84
85 rt = skb_rtable(skb); 85 rt = skb_rtable(skb);
86 86
87 if (opt->is_strictroute && rt->rt_dst != rt->rt_gateway) 87 if (opt->is_strictroute && ip_hdr(skb)->daddr != rt->rt_gateway)
88 goto sr_failed; 88 goto sr_failed;
89 89
90 if (unlikely(skb->len > dst_mtu(&rt->dst) && !skb_is_gso(skb) && 90 if (unlikely(skb->len > dst_mtu(&rt->dst) && !skb_is_gso(skb) &&
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index b1d282f11be7..0ad6035f6366 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -77,22 +77,40 @@ struct ipq {
77 struct inet_peer *peer; 77 struct inet_peer *peer;
78}; 78};
79 79
80#define IPFRAG_ECN_CLEAR 0x01 /* one frag had INET_ECN_NOT_ECT */ 80/* RFC 3168 support :
81#define IPFRAG_ECN_SET_CE 0x04 /* one frag had INET_ECN_CE */ 81 * We want to check ECN values of all fragments, do detect invalid combinations.
82 * In ipq->ecn, we store the OR value of each ip4_frag_ecn() fragment value.
83 */
84#define IPFRAG_ECN_NOT_ECT 0x01 /* one frag had ECN_NOT_ECT */
85#define IPFRAG_ECN_ECT_1 0x02 /* one frag had ECN_ECT_1 */
86#define IPFRAG_ECN_ECT_0 0x04 /* one frag had ECN_ECT_0 */
87#define IPFRAG_ECN_CE 0x08 /* one frag had ECN_CE */
82 88
83static inline u8 ip4_frag_ecn(u8 tos) 89static inline u8 ip4_frag_ecn(u8 tos)
84{ 90{
85 tos = (tos & INET_ECN_MASK) + 1; 91 return 1 << (tos & INET_ECN_MASK);
86 /*
87 * After the last operation we have (in binary):
88 * INET_ECN_NOT_ECT => 001
89 * INET_ECN_ECT_1 => 010
90 * INET_ECN_ECT_0 => 011
91 * INET_ECN_CE => 100
92 */
93 return (tos & 2) ? 0 : tos;
94} 92}
95 93
94/* Given the OR values of all fragments, apply RFC 3168 5.3 requirements
95 * Value : 0xff if frame should be dropped.
96 * 0 or INET_ECN_CE value, to be ORed in to final iph->tos field
97 */
98static const u8 ip4_frag_ecn_table[16] = {
99 /* at least one fragment had CE, and others ECT_0 or ECT_1 */
100 [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = INET_ECN_CE,
101 [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = INET_ECN_CE,
102 [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = INET_ECN_CE,
103
104 /* invalid combinations : drop frame */
105 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE] = 0xff,
106 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0] = 0xff,
107 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_1] = 0xff,
108 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
109 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = 0xff,
110 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = 0xff,
111 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
112};
113
96static struct inet_frags ip4_frags; 114static struct inet_frags ip4_frags;
97 115
98int ip_frag_nqueues(struct net *net) 116int ip_frag_nqueues(struct net *net)
@@ -524,9 +542,15 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
524 int len; 542 int len;
525 int ihlen; 543 int ihlen;
526 int err; 544 int err;
545 u8 ecn;
527 546
528 ipq_kill(qp); 547 ipq_kill(qp);
529 548
549 ecn = ip4_frag_ecn_table[qp->ecn];
550 if (unlikely(ecn == 0xff)) {
551 err = -EINVAL;
552 goto out_fail;
553 }
530 /* Make the one we just received the head. */ 554 /* Make the one we just received the head. */
531 if (prev) { 555 if (prev) {
532 head = prev->next; 556 head = prev->next;
@@ -605,17 +629,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
605 iph = ip_hdr(head); 629 iph = ip_hdr(head);
606 iph->frag_off = 0; 630 iph->frag_off = 0;
607 iph->tot_len = htons(len); 631 iph->tot_len = htons(len);
608 /* RFC3168 5.3 Fragmentation support 632 iph->tos |= ecn;
609 * If one fragment had INET_ECN_NOT_ECT,
610 * reassembled frame also has INET_ECN_NOT_ECT
611 * Elif one fragment had INET_ECN_CE
612 * reassembled frame also has INET_ECN_CE
613 */
614 if (qp->ecn & IPFRAG_ECN_CLEAR)
615 iph->tos &= ~INET_ECN_MASK;
616 else if (qp->ecn & IPFRAG_ECN_SET_CE)
617 iph->tos |= INET_ECN_CE;
618
619 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS); 633 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS);
620 qp->q.fragments = NULL; 634 qp->q.fragments = NULL;
621 qp->q.fragments_tail = NULL; 635 qp->q.fragments_tail = NULL;
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index da5941f18c3c..8871067560db 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -413,11 +413,6 @@ static struct ip_tunnel *ipgre_tunnel_locate(struct net *net,
413 413
414 dev_net_set(dev, net); 414 dev_net_set(dev, net);
415 415
416 if (strchr(name, '%')) {
417 if (dev_alloc_name(dev, name) < 0)
418 goto failed_free;
419 }
420
421 nt = netdev_priv(dev); 416 nt = netdev_priv(dev);
422 nt->parms = *parms; 417 nt->parms = *parms;
423 dev->rtnl_link_ops = &ipgre_link_ops; 418 dev->rtnl_link_ops = &ipgre_link_ops;
@@ -462,7 +457,7 @@ static void ipgre_err(struct sk_buff *skb, u32 info)
462 by themself??? 457 by themself???
463 */ 458 */
464 459
465 struct iphdr *iph = (struct iphdr *)skb->data; 460 const struct iphdr *iph = (const struct iphdr *)skb->data;
466 __be16 *p = (__be16*)(skb->data+(iph->ihl<<2)); 461 __be16 *p = (__be16*)(skb->data+(iph->ihl<<2));
467 int grehlen = (iph->ihl<<2) + 4; 462 int grehlen = (iph->ihl<<2) + 4;
468 const int type = icmp_hdr(skb)->type; 463 const int type = icmp_hdr(skb)->type;
@@ -534,7 +529,7 @@ out:
534 rcu_read_unlock(); 529 rcu_read_unlock();
535} 530}
536 531
537static inline void ipgre_ecn_decapsulate(struct iphdr *iph, struct sk_buff *skb) 532static inline void ipgre_ecn_decapsulate(const struct iphdr *iph, struct sk_buff *skb)
538{ 533{
539 if (INET_ECN_is_ce(iph->tos)) { 534 if (INET_ECN_is_ce(iph->tos)) {
540 if (skb->protocol == htons(ETH_P_IP)) { 535 if (skb->protocol == htons(ETH_P_IP)) {
@@ -546,19 +541,19 @@ static inline void ipgre_ecn_decapsulate(struct iphdr *iph, struct sk_buff *skb)
546} 541}
547 542
548static inline u8 543static inline u8
549ipgre_ecn_encapsulate(u8 tos, struct iphdr *old_iph, struct sk_buff *skb) 544ipgre_ecn_encapsulate(u8 tos, const struct iphdr *old_iph, struct sk_buff *skb)
550{ 545{
551 u8 inner = 0; 546 u8 inner = 0;
552 if (skb->protocol == htons(ETH_P_IP)) 547 if (skb->protocol == htons(ETH_P_IP))
553 inner = old_iph->tos; 548 inner = old_iph->tos;
554 else if (skb->protocol == htons(ETH_P_IPV6)) 549 else if (skb->protocol == htons(ETH_P_IPV6))
555 inner = ipv6_get_dsfield((struct ipv6hdr *)old_iph); 550 inner = ipv6_get_dsfield((const struct ipv6hdr *)old_iph);
556 return INET_ECN_encapsulate(tos, inner); 551 return INET_ECN_encapsulate(tos, inner);
557} 552}
558 553
559static int ipgre_rcv(struct sk_buff *skb) 554static int ipgre_rcv(struct sk_buff *skb)
560{ 555{
561 struct iphdr *iph; 556 const struct iphdr *iph;
562 u8 *h; 557 u8 *h;
563 __be16 flags; 558 __be16 flags;
564 __sum16 csum = 0; 559 __sum16 csum = 0;
@@ -697,8 +692,9 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
697{ 692{
698 struct ip_tunnel *tunnel = netdev_priv(dev); 693 struct ip_tunnel *tunnel = netdev_priv(dev);
699 struct pcpu_tstats *tstats; 694 struct pcpu_tstats *tstats;
700 struct iphdr *old_iph = ip_hdr(skb); 695 const struct iphdr *old_iph = ip_hdr(skb);
701 struct iphdr *tiph; 696 const struct iphdr *tiph;
697 struct flowi4 fl4;
702 u8 tos; 698 u8 tos;
703 __be16 df; 699 __be16 df;
704 struct rtable *rt; /* Route to the other host */ 700 struct rtable *rt; /* Route to the other host */
@@ -714,7 +710,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
714 710
715 if (dev->header_ops && dev->type == ARPHRD_IPGRE) { 711 if (dev->header_ops && dev->type == ARPHRD_IPGRE) {
716 gre_hlen = 0; 712 gre_hlen = 0;
717 tiph = (struct iphdr *)skb->data; 713 tiph = (const struct iphdr *)skb->data;
718 } else { 714 } else {
719 gre_hlen = tunnel->hlen; 715 gre_hlen = tunnel->hlen;
720 tiph = &tunnel->parms.iph; 716 tiph = &tunnel->parms.iph;
@@ -735,14 +731,14 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
735 } 731 }
736#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 732#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
737 else if (skb->protocol == htons(ETH_P_IPV6)) { 733 else if (skb->protocol == htons(ETH_P_IPV6)) {
738 struct in6_addr *addr6; 734 const struct in6_addr *addr6;
739 int addr_type; 735 int addr_type;
740 struct neighbour *neigh = skb_dst(skb)->neighbour; 736 struct neighbour *neigh = skb_dst(skb)->neighbour;
741 737
742 if (neigh == NULL) 738 if (neigh == NULL)
743 goto tx_error; 739 goto tx_error;
744 740
745 addr6 = (struct in6_addr *)&neigh->primary_key; 741 addr6 = (const struct in6_addr *)&neigh->primary_key;
746 addr_type = ipv6_addr_type(addr6); 742 addr_type = ipv6_addr_type(addr6);
747 743
748 if (addr_type == IPV6_ADDR_ANY) { 744 if (addr_type == IPV6_ADDR_ANY) {
@@ -766,10 +762,10 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
766 if (skb->protocol == htons(ETH_P_IP)) 762 if (skb->protocol == htons(ETH_P_IP))
767 tos = old_iph->tos; 763 tos = old_iph->tos;
768 else if (skb->protocol == htons(ETH_P_IPV6)) 764 else if (skb->protocol == htons(ETH_P_IPV6))
769 tos = ipv6_get_dsfield((struct ipv6hdr *)old_iph); 765 tos = ipv6_get_dsfield((const struct ipv6hdr *)old_iph);
770 } 766 }
771 767
772 rt = ip_route_output_gre(dev_net(dev), dst, tiph->saddr, 768 rt = ip_route_output_gre(dev_net(dev), &fl4, dst, tiph->saddr,
773 tunnel->parms.o_key, RT_TOS(tos), 769 tunnel->parms.o_key, RT_TOS(tos),
774 tunnel->parms.link); 770 tunnel->parms.link);
775 if (IS_ERR(rt)) { 771 if (IS_ERR(rt)) {
@@ -873,15 +869,15 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
873 iph->frag_off = df; 869 iph->frag_off = df;
874 iph->protocol = IPPROTO_GRE; 870 iph->protocol = IPPROTO_GRE;
875 iph->tos = ipgre_ecn_encapsulate(tos, old_iph, skb); 871 iph->tos = ipgre_ecn_encapsulate(tos, old_iph, skb);
876 iph->daddr = rt->rt_dst; 872 iph->daddr = fl4.daddr;
877 iph->saddr = rt->rt_src; 873 iph->saddr = fl4.saddr;
878 874
879 if ((iph->ttl = tiph->ttl) == 0) { 875 if ((iph->ttl = tiph->ttl) == 0) {
880 if (skb->protocol == htons(ETH_P_IP)) 876 if (skb->protocol == htons(ETH_P_IP))
881 iph->ttl = old_iph->ttl; 877 iph->ttl = old_iph->ttl;
882#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 878#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
883 else if (skb->protocol == htons(ETH_P_IPV6)) 879 else if (skb->protocol == htons(ETH_P_IPV6))
884 iph->ttl = ((struct ipv6hdr *)old_iph)->hop_limit; 880 iph->ttl = ((const struct ipv6hdr *)old_iph)->hop_limit;
885#endif 881#endif
886 else 882 else
887 iph->ttl = ip4_dst_hoplimit(&rt->dst); 883 iph->ttl = ip4_dst_hoplimit(&rt->dst);
@@ -927,7 +923,7 @@ static int ipgre_tunnel_bind_dev(struct net_device *dev)
927{ 923{
928 struct net_device *tdev = NULL; 924 struct net_device *tdev = NULL;
929 struct ip_tunnel *tunnel; 925 struct ip_tunnel *tunnel;
930 struct iphdr *iph; 926 const struct iphdr *iph;
931 int hlen = LL_MAX_HEADER; 927 int hlen = LL_MAX_HEADER;
932 int mtu = ETH_DATA_LEN; 928 int mtu = ETH_DATA_LEN;
933 int addend = sizeof(struct iphdr) + 4; 929 int addend = sizeof(struct iphdr) + 4;
@@ -938,12 +934,14 @@ static int ipgre_tunnel_bind_dev(struct net_device *dev)
938 /* Guess output device to choose reasonable mtu and needed_headroom */ 934 /* Guess output device to choose reasonable mtu and needed_headroom */
939 935
940 if (iph->daddr) { 936 if (iph->daddr) {
941 struct rtable *rt = ip_route_output_gre(dev_net(dev), 937 struct flowi4 fl4;
942 iph->daddr, iph->saddr, 938 struct rtable *rt;
943 tunnel->parms.o_key, 939
944 RT_TOS(iph->tos), 940 rt = ip_route_output_gre(dev_net(dev), &fl4,
945 tunnel->parms.link); 941 iph->daddr, iph->saddr,
946 942 tunnel->parms.o_key,
943 RT_TOS(iph->tos),
944 tunnel->parms.link);
947 if (!IS_ERR(rt)) { 945 if (!IS_ERR(rt)) {
948 tdev = rt->dst.dev; 946 tdev = rt->dst.dev;
949 ip_rt_put(rt); 947 ip_rt_put(rt);
@@ -1180,7 +1178,7 @@ static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
1180 1178
1181static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr) 1179static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr)
1182{ 1180{
1183 struct iphdr *iph = (struct iphdr *) skb_mac_header(skb); 1181 const struct iphdr *iph = (const struct iphdr *) skb_mac_header(skb);
1184 memcpy(haddr, &iph->saddr, 4); 1182 memcpy(haddr, &iph->saddr, 4);
1185 return 4; 1183 return 4;
1186} 1184}
@@ -1196,13 +1194,15 @@ static int ipgre_open(struct net_device *dev)
1196 struct ip_tunnel *t = netdev_priv(dev); 1194 struct ip_tunnel *t = netdev_priv(dev);
1197 1195
1198 if (ipv4_is_multicast(t->parms.iph.daddr)) { 1196 if (ipv4_is_multicast(t->parms.iph.daddr)) {
1199 struct rtable *rt = ip_route_output_gre(dev_net(dev), 1197 struct flowi4 fl4;
1200 t->parms.iph.daddr, 1198 struct rtable *rt;
1201 t->parms.iph.saddr, 1199
1202 t->parms.o_key, 1200 rt = ip_route_output_gre(dev_net(dev), &fl4,
1203 RT_TOS(t->parms.iph.tos), 1201 t->parms.iph.daddr,
1204 t->parms.link); 1202 t->parms.iph.saddr,
1205 1203 t->parms.o_key,
1204 RT_TOS(t->parms.iph.tos),
1205 t->parms.link);
1206 if (IS_ERR(rt)) 1206 if (IS_ERR(rt))
1207 return -EADDRNOTAVAIL; 1207 return -EADDRNOTAVAIL;
1208 dev = rt->dst.dev; 1208 dev = rt->dst.dev;
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index d7b2b0987a3b..c8f48efc5fd3 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -268,7 +268,7 @@ int ip_local_deliver(struct sk_buff *skb)
268static inline int ip_rcv_options(struct sk_buff *skb) 268static inline int ip_rcv_options(struct sk_buff *skb)
269{ 269{
270 struct ip_options *opt; 270 struct ip_options *opt;
271 struct iphdr *iph; 271 const struct iphdr *iph;
272 struct net_device *dev = skb->dev; 272 struct net_device *dev = skb->dev;
273 273
274 /* It looks as overkill, because not all 274 /* It looks as overkill, because not all
@@ -374,7 +374,7 @@ drop:
374 */ 374 */
375int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) 375int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
376{ 376{
377 struct iphdr *iph; 377 const struct iphdr *iph;
378 u32 len; 378 u32 len;
379 379
380 /* When the interface is in promisc. mode, drop all the crap 380 /* When the interface is in promisc. mode, drop all the crap
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index 2391b24e8251..c3118e1cd3bb 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -36,8 +36,8 @@
36 * saddr is address of outgoing interface. 36 * saddr is address of outgoing interface.
37 */ 37 */
38 38
39void ip_options_build(struct sk_buff * skb, struct ip_options * opt, 39void ip_options_build(struct sk_buff *skb, struct ip_options *opt,
40 __be32 daddr, struct rtable *rt, int is_frag) 40 __be32 daddr, struct rtable *rt, int is_frag)
41{ 41{
42 unsigned char *iph = skb_network_header(skb); 42 unsigned char *iph = skb_network_header(skb);
43 43
@@ -50,9 +50,9 @@ void ip_options_build(struct sk_buff * skb, struct ip_options * opt,
50 50
51 if (!is_frag) { 51 if (!is_frag) {
52 if (opt->rr_needaddr) 52 if (opt->rr_needaddr)
53 ip_rt_get_source(iph+opt->rr+iph[opt->rr+2]-5, rt); 53 ip_rt_get_source(iph+opt->rr+iph[opt->rr+2]-5, skb, rt);
54 if (opt->ts_needaddr) 54 if (opt->ts_needaddr)
55 ip_rt_get_source(iph+opt->ts+iph[opt->ts+2]-9, rt); 55 ip_rt_get_source(iph+opt->ts+iph[opt->ts+2]-9, skb, rt);
56 if (opt->ts_needtime) { 56 if (opt->ts_needtime) {
57 struct timespec tv; 57 struct timespec tv;
58 __be32 midtime; 58 __be32 midtime;
@@ -83,9 +83,9 @@ void ip_options_build(struct sk_buff * skb, struct ip_options * opt,
83 * NOTE: dopt cannot point to skb. 83 * NOTE: dopt cannot point to skb.
84 */ 84 */
85 85
86int ip_options_echo(struct ip_options * dopt, struct sk_buff * skb) 86int ip_options_echo(struct ip_options *dopt, struct sk_buff *skb)
87{ 87{
88 struct ip_options *sopt; 88 const struct ip_options *sopt;
89 unsigned char *sptr, *dptr; 89 unsigned char *sptr, *dptr;
90 int soffset, doffset; 90 int soffset, doffset;
91 int optlen; 91 int optlen;
@@ -95,10 +95,8 @@ int ip_options_echo(struct ip_options * dopt, struct sk_buff * skb)
95 95
96 sopt = &(IPCB(skb)->opt); 96 sopt = &(IPCB(skb)->opt);
97 97
98 if (sopt->optlen == 0) { 98 if (sopt->optlen == 0)
99 dopt->optlen = 0;
100 return 0; 99 return 0;
101 }
102 100
103 sptr = skb_network_header(skb); 101 sptr = skb_network_header(skb);
104 dptr = dopt->__data; 102 dptr = dopt->__data;
@@ -157,7 +155,7 @@ int ip_options_echo(struct ip_options * dopt, struct sk_buff * skb)
157 dopt->optlen += optlen; 155 dopt->optlen += optlen;
158 } 156 }
159 if (sopt->srr) { 157 if (sopt->srr) {
160 unsigned char * start = sptr+sopt->srr; 158 unsigned char *start = sptr+sopt->srr;
161 __be32 faddr; 159 __be32 faddr;
162 160
163 optlen = start[1]; 161 optlen = start[1];
@@ -499,19 +497,19 @@ void ip_options_undo(struct ip_options * opt)
499 } 497 }
500} 498}
501 499
502static struct ip_options *ip_options_get_alloc(const int optlen) 500static struct ip_options_rcu *ip_options_get_alloc(const int optlen)
503{ 501{
504 return kzalloc(sizeof(struct ip_options) + ((optlen + 3) & ~3), 502 return kzalloc(sizeof(struct ip_options_rcu) + ((optlen + 3) & ~3),
505 GFP_KERNEL); 503 GFP_KERNEL);
506} 504}
507 505
508static int ip_options_get_finish(struct net *net, struct ip_options **optp, 506static int ip_options_get_finish(struct net *net, struct ip_options_rcu **optp,
509 struct ip_options *opt, int optlen) 507 struct ip_options_rcu *opt, int optlen)
510{ 508{
511 while (optlen & 3) 509 while (optlen & 3)
512 opt->__data[optlen++] = IPOPT_END; 510 opt->opt.__data[optlen++] = IPOPT_END;
513 opt->optlen = optlen; 511 opt->opt.optlen = optlen;
514 if (optlen && ip_options_compile(net, opt, NULL)) { 512 if (optlen && ip_options_compile(net, &opt->opt, NULL)) {
515 kfree(opt); 513 kfree(opt);
516 return -EINVAL; 514 return -EINVAL;
517 } 515 }
@@ -520,29 +518,29 @@ static int ip_options_get_finish(struct net *net, struct ip_options **optp,
520 return 0; 518 return 0;
521} 519}
522 520
523int ip_options_get_from_user(struct net *net, struct ip_options **optp, 521int ip_options_get_from_user(struct net *net, struct ip_options_rcu **optp,
524 unsigned char __user *data, int optlen) 522 unsigned char __user *data, int optlen)
525{ 523{
526 struct ip_options *opt = ip_options_get_alloc(optlen); 524 struct ip_options_rcu *opt = ip_options_get_alloc(optlen);
527 525
528 if (!opt) 526 if (!opt)
529 return -ENOMEM; 527 return -ENOMEM;
530 if (optlen && copy_from_user(opt->__data, data, optlen)) { 528 if (optlen && copy_from_user(opt->opt.__data, data, optlen)) {
531 kfree(opt); 529 kfree(opt);
532 return -EFAULT; 530 return -EFAULT;
533 } 531 }
534 return ip_options_get_finish(net, optp, opt, optlen); 532 return ip_options_get_finish(net, optp, opt, optlen);
535} 533}
536 534
537int ip_options_get(struct net *net, struct ip_options **optp, 535int ip_options_get(struct net *net, struct ip_options_rcu **optp,
538 unsigned char *data, int optlen) 536 unsigned char *data, int optlen)
539{ 537{
540 struct ip_options *opt = ip_options_get_alloc(optlen); 538 struct ip_options_rcu *opt = ip_options_get_alloc(optlen);
541 539
542 if (!opt) 540 if (!opt)
543 return -ENOMEM; 541 return -ENOMEM;
544 if (optlen) 542 if (optlen)
545 memcpy(opt->__data, data, optlen); 543 memcpy(opt->opt.__data, data, optlen);
546 return ip_options_get_finish(net, optp, opt, optlen); 544 return ip_options_get_finish(net, optp, opt, optlen);
547} 545}
548 546
@@ -555,7 +553,7 @@ void ip_forward_options(struct sk_buff *skb)
555 553
556 if (opt->rr_needaddr) { 554 if (opt->rr_needaddr) {
557 optptr = (unsigned char *)raw + opt->rr; 555 optptr = (unsigned char *)raw + opt->rr;
558 ip_rt_get_source(&optptr[optptr[2]-5], rt); 556 ip_rt_get_source(&optptr[optptr[2]-5], skb, rt);
559 opt->is_changed = 1; 557 opt->is_changed = 1;
560 } 558 }
561 if (opt->srr_is_hit) { 559 if (opt->srr_is_hit) {
@@ -569,19 +567,18 @@ void ip_forward_options(struct sk_buff *skb)
569 ) { 567 ) {
570 if (srrptr + 3 > srrspace) 568 if (srrptr + 3 > srrspace)
571 break; 569 break;
572 if (memcmp(&rt->rt_dst, &optptr[srrptr-1], 4) == 0) 570 if (memcmp(&ip_hdr(skb)->daddr, &optptr[srrptr-1], 4) == 0)
573 break; 571 break;
574 } 572 }
575 if (srrptr + 3 <= srrspace) { 573 if (srrptr + 3 <= srrspace) {
576 opt->is_changed = 1; 574 opt->is_changed = 1;
577 ip_rt_get_source(&optptr[srrptr-1], rt); 575 ip_rt_get_source(&optptr[srrptr-1], skb, rt);
578 ip_hdr(skb)->daddr = rt->rt_dst;
579 optptr[2] = srrptr+4; 576 optptr[2] = srrptr+4;
580 } else if (net_ratelimit()) 577 } else if (net_ratelimit())
581 printk(KERN_CRIT "ip_forward(): Argh! Destination lost!\n"); 578 printk(KERN_CRIT "ip_forward(): Argh! Destination lost!\n");
582 if (opt->ts_needaddr) { 579 if (opt->ts_needaddr) {
583 optptr = raw + opt->ts; 580 optptr = raw + opt->ts;
584 ip_rt_get_source(&optptr[optptr[2]-9], rt); 581 ip_rt_get_source(&optptr[optptr[2]-9], skb, rt);
585 opt->is_changed = 1; 582 opt->is_changed = 1;
586 } 583 }
587 } 584 }
@@ -603,7 +600,7 @@ int ip_options_rcv_srr(struct sk_buff *skb)
603 unsigned long orefdst; 600 unsigned long orefdst;
604 int err; 601 int err;
605 602
606 if (!opt->srr || !rt) 603 if (!rt)
607 return 0; 604 return 0;
608 605
609 if (skb->pkt_type != PACKET_HOST) 606 if (skb->pkt_type != PACKET_HOST)
@@ -637,7 +634,7 @@ int ip_options_rcv_srr(struct sk_buff *skb)
637 if (rt2->rt_type != RTN_LOCAL) 634 if (rt2->rt_type != RTN_LOCAL)
638 break; 635 break;
639 /* Superfast 8) loopback forward */ 636 /* Superfast 8) loopback forward */
640 memcpy(&iph->daddr, &optptr[srrptr-1], 4); 637 iph->daddr = nexthop;
641 opt->is_changed = 1; 638 opt->is_changed = 1;
642 } 639 }
643 if (srrptr <= srrspace) { 640 if (srrptr <= srrspace) {
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 459c011b1d4a..98af3697c718 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -140,14 +140,14 @@ static inline int ip_select_ttl(struct inet_sock *inet, struct dst_entry *dst)
140 * 140 *
141 */ 141 */
142int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk, 142int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
143 __be32 saddr, __be32 daddr, struct ip_options *opt) 143 __be32 saddr, __be32 daddr, struct ip_options_rcu *opt)
144{ 144{
145 struct inet_sock *inet = inet_sk(sk); 145 struct inet_sock *inet = inet_sk(sk);
146 struct rtable *rt = skb_rtable(skb); 146 struct rtable *rt = skb_rtable(skb);
147 struct iphdr *iph; 147 struct iphdr *iph;
148 148
149 /* Build the IP header. */ 149 /* Build the IP header. */
150 skb_push(skb, sizeof(struct iphdr) + (opt ? opt->optlen : 0)); 150 skb_push(skb, sizeof(struct iphdr) + (opt ? opt->opt.optlen : 0));
151 skb_reset_network_header(skb); 151 skb_reset_network_header(skb);
152 iph = ip_hdr(skb); 152 iph = ip_hdr(skb);
153 iph->version = 4; 153 iph->version = 4;
@@ -158,14 +158,14 @@ int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
158 else 158 else
159 iph->frag_off = 0; 159 iph->frag_off = 0;
160 iph->ttl = ip_select_ttl(inet, &rt->dst); 160 iph->ttl = ip_select_ttl(inet, &rt->dst);
161 iph->daddr = rt->rt_dst; 161 iph->daddr = (opt && opt->opt.srr ? opt->opt.faddr : daddr);
162 iph->saddr = rt->rt_src; 162 iph->saddr = saddr;
163 iph->protocol = sk->sk_protocol; 163 iph->protocol = sk->sk_protocol;
164 ip_select_ident(iph, &rt->dst, sk); 164 ip_select_ident(iph, &rt->dst, sk);
165 165
166 if (opt && opt->optlen) { 166 if (opt && opt->opt.optlen) {
167 iph->ihl += opt->optlen>>2; 167 iph->ihl += opt->opt.optlen>>2;
168 ip_options_build(skb, opt, daddr, rt, 0); 168 ip_options_build(skb, &opt->opt, daddr, rt, 0);
169 } 169 }
170 170
171 skb->priority = sk->sk_priority; 171 skb->priority = sk->sk_priority;
@@ -312,11 +312,12 @@ int ip_output(struct sk_buff *skb)
312 !(IPCB(skb)->flags & IPSKB_REROUTED)); 312 !(IPCB(skb)->flags & IPSKB_REROUTED));
313} 313}
314 314
315int ip_queue_xmit(struct sk_buff *skb) 315int ip_queue_xmit(struct sk_buff *skb, struct flowi *fl)
316{ 316{
317 struct sock *sk = skb->sk; 317 struct sock *sk = skb->sk;
318 struct inet_sock *inet = inet_sk(sk); 318 struct inet_sock *inet = inet_sk(sk);
319 struct ip_options *opt = inet->opt; 319 struct ip_options_rcu *inet_opt;
320 struct flowi4 *fl4;
320 struct rtable *rt; 321 struct rtable *rt;
321 struct iphdr *iph; 322 struct iphdr *iph;
322 int res; 323 int res;
@@ -325,6 +326,8 @@ int ip_queue_xmit(struct sk_buff *skb)
325 * f.e. by something like SCTP. 326 * f.e. by something like SCTP.
326 */ 327 */
327 rcu_read_lock(); 328 rcu_read_lock();
329 inet_opt = rcu_dereference(inet->inet_opt);
330 fl4 = &fl->u.ip4;
328 rt = skb_rtable(skb); 331 rt = skb_rtable(skb);
329 if (rt != NULL) 332 if (rt != NULL)
330 goto packet_routed; 333 goto packet_routed;
@@ -336,14 +339,14 @@ int ip_queue_xmit(struct sk_buff *skb)
336 339
337 /* Use correct destination address if we have options. */ 340 /* Use correct destination address if we have options. */
338 daddr = inet->inet_daddr; 341 daddr = inet->inet_daddr;
339 if(opt && opt->srr) 342 if (inet_opt && inet_opt->opt.srr)
340 daddr = opt->faddr; 343 daddr = inet_opt->opt.faddr;
341 344
342 /* If this fails, retransmit mechanism of transport layer will 345 /* If this fails, retransmit mechanism of transport layer will
343 * keep trying until route appears or the connection times 346 * keep trying until route appears or the connection times
344 * itself out. 347 * itself out.
345 */ 348 */
346 rt = ip_route_output_ports(sock_net(sk), sk, 349 rt = ip_route_output_ports(sock_net(sk), fl4, sk,
347 daddr, inet->inet_saddr, 350 daddr, inet->inet_saddr,
348 inet->inet_dport, 351 inet->inet_dport,
349 inet->inet_sport, 352 inet->inet_sport,
@@ -357,11 +360,11 @@ int ip_queue_xmit(struct sk_buff *skb)
357 skb_dst_set_noref(skb, &rt->dst); 360 skb_dst_set_noref(skb, &rt->dst);
358 361
359packet_routed: 362packet_routed:
360 if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway) 363 if (inet_opt && inet_opt->opt.is_strictroute && fl4->daddr != rt->rt_gateway)
361 goto no_route; 364 goto no_route;
362 365
363 /* OK, we know where to send it, allocate and build IP header. */ 366 /* OK, we know where to send it, allocate and build IP header. */
364 skb_push(skb, sizeof(struct iphdr) + (opt ? opt->optlen : 0)); 367 skb_push(skb, sizeof(struct iphdr) + (inet_opt ? inet_opt->opt.optlen : 0));
365 skb_reset_network_header(skb); 368 skb_reset_network_header(skb);
366 iph = ip_hdr(skb); 369 iph = ip_hdr(skb);
367 *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff)); 370 *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff));
@@ -371,13 +374,13 @@ packet_routed:
371 iph->frag_off = 0; 374 iph->frag_off = 0;
372 iph->ttl = ip_select_ttl(inet, &rt->dst); 375 iph->ttl = ip_select_ttl(inet, &rt->dst);
373 iph->protocol = sk->sk_protocol; 376 iph->protocol = sk->sk_protocol;
374 iph->saddr = rt->rt_src; 377 iph->saddr = fl4->saddr;
375 iph->daddr = rt->rt_dst; 378 iph->daddr = fl4->daddr;
376 /* Transport layer set skb->h.foo itself. */ 379 /* Transport layer set skb->h.foo itself. */
377 380
378 if (opt && opt->optlen) { 381 if (inet_opt && inet_opt->opt.optlen) {
379 iph->ihl += opt->optlen >> 2; 382 iph->ihl += inet_opt->opt.optlen >> 2;
380 ip_options_build(skb, opt, inet->inet_daddr, rt, 0); 383 ip_options_build(skb, &inet_opt->opt, inet->inet_daddr, rt, 0);
381 } 384 }
382 385
383 ip_select_ident_more(iph, &rt->dst, sk, 386 ip_select_ident_more(iph, &rt->dst, sk,
@@ -773,7 +776,9 @@ static inline int ip_ufo_append_data(struct sock *sk,
773 (length - transhdrlen)); 776 (length - transhdrlen));
774} 777}
775 778
776static int __ip_append_data(struct sock *sk, struct sk_buff_head *queue, 779static int __ip_append_data(struct sock *sk,
780 struct flowi4 *fl4,
781 struct sk_buff_head *queue,
777 struct inet_cork *cork, 782 struct inet_cork *cork,
778 int getfrag(void *from, char *to, int offset, 783 int getfrag(void *from, char *to, int offset,
779 int len, int odd, struct sk_buff *skb), 784 int len, int odd, struct sk_buff *skb),
@@ -805,7 +810,7 @@ static int __ip_append_data(struct sock *sk, struct sk_buff_head *queue,
805 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen; 810 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
806 811
807 if (cork->length + length > 0xFFFF - fragheaderlen) { 812 if (cork->length + length > 0xFFFF - fragheaderlen) {
808 ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->inet_dport, 813 ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
809 mtu-exthdrlen); 814 mtu-exthdrlen);
810 return -EMSGSIZE; 815 return -EMSGSIZE;
811 } 816 }
@@ -1033,7 +1038,7 @@ static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
1033 struct ipcm_cookie *ipc, struct rtable **rtp) 1038 struct ipcm_cookie *ipc, struct rtable **rtp)
1034{ 1039{
1035 struct inet_sock *inet = inet_sk(sk); 1040 struct inet_sock *inet = inet_sk(sk);
1036 struct ip_options *opt; 1041 struct ip_options_rcu *opt;
1037 struct rtable *rt; 1042 struct rtable *rt;
1038 1043
1039 /* 1044 /*
@@ -1047,7 +1052,7 @@ static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
1047 if (unlikely(cork->opt == NULL)) 1052 if (unlikely(cork->opt == NULL))
1048 return -ENOBUFS; 1053 return -ENOBUFS;
1049 } 1054 }
1050 memcpy(cork->opt, opt, sizeof(struct ip_options) + opt->optlen); 1055 memcpy(cork->opt, &opt->opt, sizeof(struct ip_options) + opt->opt.optlen);
1051 cork->flags |= IPCORK_OPT; 1056 cork->flags |= IPCORK_OPT;
1052 cork->addr = ipc->addr; 1057 cork->addr = ipc->addr;
1053 } 1058 }
@@ -1080,7 +1085,7 @@ static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
1080 * 1085 *
1081 * LATER: length must be adjusted by pad at tail, when it is required. 1086 * LATER: length must be adjusted by pad at tail, when it is required.
1082 */ 1087 */
1083int ip_append_data(struct sock *sk, 1088int ip_append_data(struct sock *sk, struct flowi4 *fl4,
1084 int getfrag(void *from, char *to, int offset, int len, 1089 int getfrag(void *from, char *to, int offset, int len,
1085 int odd, struct sk_buff *skb), 1090 int odd, struct sk_buff *skb),
1086 void *from, int length, int transhdrlen, 1091 void *from, int length, int transhdrlen,
@@ -1094,24 +1099,25 @@ int ip_append_data(struct sock *sk,
1094 return 0; 1099 return 0;
1095 1100
1096 if (skb_queue_empty(&sk->sk_write_queue)) { 1101 if (skb_queue_empty(&sk->sk_write_queue)) {
1097 err = ip_setup_cork(sk, &inet->cork, ipc, rtp); 1102 err = ip_setup_cork(sk, &inet->cork.base, ipc, rtp);
1098 if (err) 1103 if (err)
1099 return err; 1104 return err;
1100 } else { 1105 } else {
1101 transhdrlen = 0; 1106 transhdrlen = 0;
1102 } 1107 }
1103 1108
1104 return __ip_append_data(sk, &sk->sk_write_queue, &inet->cork, getfrag, 1109 return __ip_append_data(sk, fl4, &sk->sk_write_queue, &inet->cork.base, getfrag,
1105 from, length, transhdrlen, flags); 1110 from, length, transhdrlen, flags);
1106} 1111}
1107 1112
1108ssize_t ip_append_page(struct sock *sk, struct page *page, 1113ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
1109 int offset, size_t size, int flags) 1114 int offset, size_t size, int flags)
1110{ 1115{
1111 struct inet_sock *inet = inet_sk(sk); 1116 struct inet_sock *inet = inet_sk(sk);
1112 struct sk_buff *skb; 1117 struct sk_buff *skb;
1113 struct rtable *rt; 1118 struct rtable *rt;
1114 struct ip_options *opt = NULL; 1119 struct ip_options *opt = NULL;
1120 struct inet_cork *cork;
1115 int hh_len; 1121 int hh_len;
1116 int mtu; 1122 int mtu;
1117 int len; 1123 int len;
@@ -1127,28 +1133,29 @@ ssize_t ip_append_page(struct sock *sk, struct page *page,
1127 if (skb_queue_empty(&sk->sk_write_queue)) 1133 if (skb_queue_empty(&sk->sk_write_queue))
1128 return -EINVAL; 1134 return -EINVAL;
1129 1135
1130 rt = (struct rtable *)inet->cork.dst; 1136 cork = &inet->cork.base;
1131 if (inet->cork.flags & IPCORK_OPT) 1137 rt = (struct rtable *)cork->dst;
1132 opt = inet->cork.opt; 1138 if (cork->flags & IPCORK_OPT)
1139 opt = cork->opt;
1133 1140
1134 if (!(rt->dst.dev->features&NETIF_F_SG)) 1141 if (!(rt->dst.dev->features&NETIF_F_SG))
1135 return -EOPNOTSUPP; 1142 return -EOPNOTSUPP;
1136 1143
1137 hh_len = LL_RESERVED_SPACE(rt->dst.dev); 1144 hh_len = LL_RESERVED_SPACE(rt->dst.dev);
1138 mtu = inet->cork.fragsize; 1145 mtu = cork->fragsize;
1139 1146
1140 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0); 1147 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
1141 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen; 1148 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
1142 1149
1143 if (inet->cork.length + size > 0xFFFF - fragheaderlen) { 1150 if (cork->length + size > 0xFFFF - fragheaderlen) {
1144 ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->inet_dport, mtu); 1151 ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport, mtu);
1145 return -EMSGSIZE; 1152 return -EMSGSIZE;
1146 } 1153 }
1147 1154
1148 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) 1155 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
1149 return -EINVAL; 1156 return -EINVAL;
1150 1157
1151 inet->cork.length += size; 1158 cork->length += size;
1152 if ((size + skb->len > mtu) && 1159 if ((size + skb->len > mtu) &&
1153 (sk->sk_protocol == IPPROTO_UDP) && 1160 (sk->sk_protocol == IPPROTO_UDP) &&
1154 (rt->dst.dev->features & NETIF_F_UFO)) { 1161 (rt->dst.dev->features & NETIF_F_UFO)) {
@@ -1243,7 +1250,7 @@ ssize_t ip_append_page(struct sock *sk, struct page *page,
1243 return 0; 1250 return 0;
1244 1251
1245error: 1252error:
1246 inet->cork.length -= size; 1253 cork->length -= size;
1247 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS); 1254 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
1248 return err; 1255 return err;
1249} 1256}
@@ -1262,6 +1269,7 @@ static void ip_cork_release(struct inet_cork *cork)
1262 * and push them out. 1269 * and push them out.
1263 */ 1270 */
1264struct sk_buff *__ip_make_skb(struct sock *sk, 1271struct sk_buff *__ip_make_skb(struct sock *sk,
1272 struct flowi4 *fl4,
1265 struct sk_buff_head *queue, 1273 struct sk_buff_head *queue,
1266 struct inet_cork *cork) 1274 struct inet_cork *cork)
1267{ 1275{
@@ -1319,17 +1327,18 @@ struct sk_buff *__ip_make_skb(struct sock *sk,
1319 iph = (struct iphdr *)skb->data; 1327 iph = (struct iphdr *)skb->data;
1320 iph->version = 4; 1328 iph->version = 4;
1321 iph->ihl = 5; 1329 iph->ihl = 5;
1322 if (opt) {
1323 iph->ihl += opt->optlen>>2;
1324 ip_options_build(skb, opt, cork->addr, rt, 0);
1325 }
1326 iph->tos = inet->tos; 1330 iph->tos = inet->tos;
1327 iph->frag_off = df; 1331 iph->frag_off = df;
1328 ip_select_ident(iph, &rt->dst, sk); 1332 ip_select_ident(iph, &rt->dst, sk);
1329 iph->ttl = ttl; 1333 iph->ttl = ttl;
1330 iph->protocol = sk->sk_protocol; 1334 iph->protocol = sk->sk_protocol;
1331 iph->saddr = rt->rt_src; 1335 iph->saddr = fl4->saddr;
1332 iph->daddr = rt->rt_dst; 1336 iph->daddr = fl4->daddr;
1337
1338 if (opt) {
1339 iph->ihl += opt->optlen>>2;
1340 ip_options_build(skb, opt, cork->addr, rt, 0);
1341 }
1333 1342
1334 skb->priority = sk->sk_priority; 1343 skb->priority = sk->sk_priority;
1335 skb->mark = sk->sk_mark; 1344 skb->mark = sk->sk_mark;
@@ -1365,11 +1374,11 @@ int ip_send_skb(struct sk_buff *skb)
1365 return err; 1374 return err;
1366} 1375}
1367 1376
1368int ip_push_pending_frames(struct sock *sk) 1377int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4)
1369{ 1378{
1370 struct sk_buff *skb; 1379 struct sk_buff *skb;
1371 1380
1372 skb = ip_finish_skb(sk); 1381 skb = ip_finish_skb(sk, fl4);
1373 if (!skb) 1382 if (!skb)
1374 return 0; 1383 return 0;
1375 1384
@@ -1394,17 +1403,18 @@ static void __ip_flush_pending_frames(struct sock *sk,
1394 1403
1395void ip_flush_pending_frames(struct sock *sk) 1404void ip_flush_pending_frames(struct sock *sk)
1396{ 1405{
1397 __ip_flush_pending_frames(sk, &sk->sk_write_queue, &inet_sk(sk)->cork); 1406 __ip_flush_pending_frames(sk, &sk->sk_write_queue, &inet_sk(sk)->cork.base);
1398} 1407}
1399 1408
1400struct sk_buff *ip_make_skb(struct sock *sk, 1409struct sk_buff *ip_make_skb(struct sock *sk,
1410 struct flowi4 *fl4,
1401 int getfrag(void *from, char *to, int offset, 1411 int getfrag(void *from, char *to, int offset,
1402 int len, int odd, struct sk_buff *skb), 1412 int len, int odd, struct sk_buff *skb),
1403 void *from, int length, int transhdrlen, 1413 void *from, int length, int transhdrlen,
1404 struct ipcm_cookie *ipc, struct rtable **rtp, 1414 struct ipcm_cookie *ipc, struct rtable **rtp,
1405 unsigned int flags) 1415 unsigned int flags)
1406{ 1416{
1407 struct inet_cork cork = {}; 1417 struct inet_cork cork;
1408 struct sk_buff_head queue; 1418 struct sk_buff_head queue;
1409 int err; 1419 int err;
1410 1420
@@ -1413,18 +1423,21 @@ struct sk_buff *ip_make_skb(struct sock *sk,
1413 1423
1414 __skb_queue_head_init(&queue); 1424 __skb_queue_head_init(&queue);
1415 1425
1426 cork.flags = 0;
1427 cork.addr = 0;
1428 cork.opt = NULL;
1416 err = ip_setup_cork(sk, &cork, ipc, rtp); 1429 err = ip_setup_cork(sk, &cork, ipc, rtp);
1417 if (err) 1430 if (err)
1418 return ERR_PTR(err); 1431 return ERR_PTR(err);
1419 1432
1420 err = __ip_append_data(sk, &queue, &cork, getfrag, 1433 err = __ip_append_data(sk, fl4, &queue, &cork, getfrag,
1421 from, length, transhdrlen, flags); 1434 from, length, transhdrlen, flags);
1422 if (err) { 1435 if (err) {
1423 __ip_flush_pending_frames(sk, &queue, &cork); 1436 __ip_flush_pending_frames(sk, &queue, &cork);
1424 return ERR_PTR(err); 1437 return ERR_PTR(err);
1425 } 1438 }
1426 1439
1427 return __ip_make_skb(sk, &queue, &cork); 1440 return __ip_make_skb(sk, fl4, &queue, &cork);
1428} 1441}
1429 1442
1430/* 1443/*
@@ -1447,48 +1460,39 @@ static int ip_reply_glue_bits(void *dptr, char *to, int offset,
1447 * Should run single threaded per socket because it uses the sock 1460 * Should run single threaded per socket because it uses the sock
1448 * structure to pass arguments. 1461 * structure to pass arguments.
1449 */ 1462 */
1450void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *arg, 1463void ip_send_reply(struct sock *sk, struct sk_buff *skb, __be32 daddr,
1451 unsigned int len) 1464 struct ip_reply_arg *arg, unsigned int len)
1452{ 1465{
1453 struct inet_sock *inet = inet_sk(sk); 1466 struct inet_sock *inet = inet_sk(sk);
1454 struct { 1467 struct ip_options_data replyopts;
1455 struct ip_options opt;
1456 char data[40];
1457 } replyopts;
1458 struct ipcm_cookie ipc; 1468 struct ipcm_cookie ipc;
1459 __be32 daddr; 1469 struct flowi4 fl4;
1460 struct rtable *rt = skb_rtable(skb); 1470 struct rtable *rt = skb_rtable(skb);
1461 1471
1462 if (ip_options_echo(&replyopts.opt, skb)) 1472 if (ip_options_echo(&replyopts.opt.opt, skb))
1463 return; 1473 return;
1464 1474
1465 daddr = ipc.addr = rt->rt_src; 1475 ipc.addr = daddr;
1466 ipc.opt = NULL; 1476 ipc.opt = NULL;
1467 ipc.tx_flags = 0; 1477 ipc.tx_flags = 0;
1468 1478
1469 if (replyopts.opt.optlen) { 1479 if (replyopts.opt.opt.optlen) {
1470 ipc.opt = &replyopts.opt; 1480 ipc.opt = &replyopts.opt;
1471 1481
1472 if (ipc.opt->srr) 1482 if (replyopts.opt.opt.srr)
1473 daddr = replyopts.opt.faddr; 1483 daddr = replyopts.opt.opt.faddr;
1474 } 1484 }
1475 1485
1476 { 1486 flowi4_init_output(&fl4, arg->bound_dev_if, 0,
1477 struct flowi4 fl4 = { 1487 RT_TOS(ip_hdr(skb)->tos),
1478 .flowi4_oif = arg->bound_dev_if, 1488 RT_SCOPE_UNIVERSE, sk->sk_protocol,
1479 .daddr = daddr, 1489 ip_reply_arg_flowi_flags(arg),
1480 .saddr = rt->rt_spec_dst, 1490 daddr, rt->rt_spec_dst,
1481 .flowi4_tos = RT_TOS(ip_hdr(skb)->tos), 1491 tcp_hdr(skb)->source, tcp_hdr(skb)->dest);
1482 .fl4_sport = tcp_hdr(skb)->dest, 1492 security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
1483 .fl4_dport = tcp_hdr(skb)->source, 1493 rt = ip_route_output_key(sock_net(sk), &fl4);
1484 .flowi4_proto = sk->sk_protocol, 1494 if (IS_ERR(rt))
1485 .flowi4_flags = ip_reply_arg_flowi_flags(arg), 1495 return;
1486 };
1487 security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
1488 rt = ip_route_output_key(sock_net(sk), &fl4);
1489 if (IS_ERR(rt))
1490 return;
1491 }
1492 1496
1493 /* And let IP do all the hard work. 1497 /* And let IP do all the hard work.
1494 1498
@@ -1501,7 +1505,7 @@ void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *ar
1501 sk->sk_priority = skb->priority; 1505 sk->sk_priority = skb->priority;
1502 sk->sk_protocol = ip_hdr(skb)->protocol; 1506 sk->sk_protocol = ip_hdr(skb)->protocol;
1503 sk->sk_bound_dev_if = arg->bound_dev_if; 1507 sk->sk_bound_dev_if = arg->bound_dev_if;
1504 ip_append_data(sk, ip_reply_glue_bits, arg->iov->iov_base, len, 0, 1508 ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base, len, 0,
1505 &ipc, &rt, MSG_DONTWAIT); 1509 &ipc, &rt, MSG_DONTWAIT);
1506 if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) { 1510 if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) {
1507 if (arg->csumoffset >= 0) 1511 if (arg->csumoffset >= 0)
@@ -1509,7 +1513,7 @@ void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *ar
1509 arg->csumoffset) = csum_fold(csum_add(skb->csum, 1513 arg->csumoffset) = csum_fold(csum_add(skb->csum,
1510 arg->csum)); 1514 arg->csum));
1511 skb->ip_summed = CHECKSUM_NONE; 1515 skb->ip_summed = CHECKSUM_NONE;
1512 ip_push_pending_frames(sk); 1516 ip_push_pending_frames(sk, &fl4);
1513 } 1517 }
1514 1518
1515 bh_unlock_sock(sk); 1519 bh_unlock_sock(sk);
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 3948c86e59ca..ab0c9efd1efa 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -131,7 +131,7 @@ static void ip_cmsg_recv_security(struct msghdr *msg, struct sk_buff *skb)
131static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb) 131static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
132{ 132{
133 struct sockaddr_in sin; 133 struct sockaddr_in sin;
134 struct iphdr *iph = ip_hdr(skb); 134 const struct iphdr *iph = ip_hdr(skb);
135 __be16 *ports = (__be16 *)skb_transport_header(skb); 135 __be16 *ports = (__be16 *)skb_transport_header(skb);
136 136
137 if (skb_transport_offset(skb) + 4 > skb->len) 137 if (skb_transport_offset(skb) + 4 > skb->len)
@@ -451,6 +451,11 @@ out:
451} 451}
452 452
453 453
454static void opt_kfree_rcu(struct rcu_head *head)
455{
456 kfree(container_of(head, struct ip_options_rcu, rcu));
457}
458
454/* 459/*
455 * Socket option code for IP. This is the end of the line after any 460 * Socket option code for IP. This is the end of the line after any
456 * TCP,UDP etc options on an IP socket. 461 * TCP,UDP etc options on an IP socket.
@@ -497,13 +502,16 @@ static int do_ip_setsockopt(struct sock *sk, int level,
497 switch (optname) { 502 switch (optname) {
498 case IP_OPTIONS: 503 case IP_OPTIONS:
499 { 504 {
500 struct ip_options *opt = NULL; 505 struct ip_options_rcu *old, *opt = NULL;
506
501 if (optlen > 40) 507 if (optlen > 40)
502 goto e_inval; 508 goto e_inval;
503 err = ip_options_get_from_user(sock_net(sk), &opt, 509 err = ip_options_get_from_user(sock_net(sk), &opt,
504 optval, optlen); 510 optval, optlen);
505 if (err) 511 if (err)
506 break; 512 break;
513 old = rcu_dereference_protected(inet->inet_opt,
514 sock_owned_by_user(sk));
507 if (inet->is_icsk) { 515 if (inet->is_icsk) {
508 struct inet_connection_sock *icsk = inet_csk(sk); 516 struct inet_connection_sock *icsk = inet_csk(sk);
509#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 517#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
@@ -512,17 +520,18 @@ static int do_ip_setsockopt(struct sock *sk, int level,
512 (TCPF_LISTEN | TCPF_CLOSE)) && 520 (TCPF_LISTEN | TCPF_CLOSE)) &&
513 inet->inet_daddr != LOOPBACK4_IPV6)) { 521 inet->inet_daddr != LOOPBACK4_IPV6)) {
514#endif 522#endif
515 if (inet->opt) 523 if (old)
516 icsk->icsk_ext_hdr_len -= inet->opt->optlen; 524 icsk->icsk_ext_hdr_len -= old->opt.optlen;
517 if (opt) 525 if (opt)
518 icsk->icsk_ext_hdr_len += opt->optlen; 526 icsk->icsk_ext_hdr_len += opt->opt.optlen;
519 icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie); 527 icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie);
520#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 528#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
521 } 529 }
522#endif 530#endif
523 } 531 }
524 opt = xchg(&inet->opt, opt); 532 rcu_assign_pointer(inet->inet_opt, opt);
525 kfree(opt); 533 if (old)
534 call_rcu(&old->rcu, opt_kfree_rcu);
526 break; 535 break;
527 } 536 }
528 case IP_PKTINFO: 537 case IP_PKTINFO:
@@ -1081,12 +1090,16 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
1081 case IP_OPTIONS: 1090 case IP_OPTIONS:
1082 { 1091 {
1083 unsigned char optbuf[sizeof(struct ip_options)+40]; 1092 unsigned char optbuf[sizeof(struct ip_options)+40];
1084 struct ip_options * opt = (struct ip_options *)optbuf; 1093 struct ip_options *opt = (struct ip_options *)optbuf;
1094 struct ip_options_rcu *inet_opt;
1095
1096 inet_opt = rcu_dereference_protected(inet->inet_opt,
1097 sock_owned_by_user(sk));
1085 opt->optlen = 0; 1098 opt->optlen = 0;
1086 if (inet->opt) 1099 if (inet_opt)
1087 memcpy(optbuf, inet->opt, 1100 memcpy(optbuf, &inet_opt->opt,
1088 sizeof(struct ip_options)+ 1101 sizeof(struct ip_options) +
1089 inet->opt->optlen); 1102 inet_opt->opt.optlen);
1090 release_sock(sk); 1103 release_sock(sk);
1091 1104
1092 if (opt->optlen == 0) 1105 if (opt->optlen == 0)
diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
index 629067571f02..c857f6f49b03 100644
--- a/net/ipv4/ipcomp.c
+++ b/net/ipv4/ipcomp.c
@@ -27,7 +27,7 @@ static void ipcomp4_err(struct sk_buff *skb, u32 info)
27{ 27{
28 struct net *net = dev_net(skb->dev); 28 struct net *net = dev_net(skb->dev);
29 __be32 spi; 29 __be32 spi;
30 struct iphdr *iph = (struct iphdr *)skb->data; 30 const struct iphdr *iph = (const struct iphdr *)skb->data;
31 struct ip_comp_hdr *ipch = (struct ip_comp_hdr *)(skb->data+(iph->ihl<<2)); 31 struct ip_comp_hdr *ipch = (struct ip_comp_hdr *)(skb->data+(iph->ihl<<2));
32 struct xfrm_state *x; 32 struct xfrm_state *x;
33 33
@@ -36,7 +36,7 @@ static void ipcomp4_err(struct sk_buff *skb, u32 info)
36 return; 36 return;
37 37
38 spi = htonl(ntohs(ipch->cpi)); 38 spi = htonl(ntohs(ipch->cpi));
39 x = xfrm_state_lookup(net, skb->mark, (xfrm_address_t *)&iph->daddr, 39 x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
40 spi, IPPROTO_COMP, AF_INET); 40 spi, IPPROTO_COMP, AF_INET);
41 if (!x) 41 if (!x)
42 return; 42 return;
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index cbff2ecccf3d..ab7e5542c1cf 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -87,8 +87,8 @@
87#endif 87#endif
88 88
89/* Define the friendly delay before and after opening net devices */ 89/* Define the friendly delay before and after opening net devices */
90#define CONF_PRE_OPEN 500 /* Before opening: 1/2 second */ 90#define CONF_POST_OPEN 10 /* After opening: 10 msecs */
91#define CONF_POST_OPEN 1 /* After opening: 1 second */ 91#define CONF_CARRIER_TIMEOUT 120000 /* Wait for carrier timeout */
92 92
93/* Define the timeout for waiting for a DHCP/BOOTP/RARP reply */ 93/* Define the timeout for waiting for a DHCP/BOOTP/RARP reply */
94#define CONF_OPEN_RETRIES 2 /* (Re)open devices twice */ 94#define CONF_OPEN_RETRIES 2 /* (Re)open devices twice */
@@ -188,14 +188,14 @@ struct ic_device {
188static struct ic_device *ic_first_dev __initdata = NULL;/* List of open device */ 188static struct ic_device *ic_first_dev __initdata = NULL;/* List of open device */
189static struct net_device *ic_dev __initdata = NULL; /* Selected device */ 189static struct net_device *ic_dev __initdata = NULL; /* Selected device */
190 190
191static bool __init ic_device_match(struct net_device *dev) 191static bool __init ic_is_init_dev(struct net_device *dev)
192{ 192{
193 if (user_dev_name[0] ? !strcmp(dev->name, user_dev_name) : 193 if (dev->flags & IFF_LOOPBACK)
194 return false;
195 return user_dev_name[0] ? !strcmp(dev->name, user_dev_name) :
194 (!(dev->flags & IFF_LOOPBACK) && 196 (!(dev->flags & IFF_LOOPBACK) &&
195 (dev->flags & (IFF_POINTOPOINT|IFF_BROADCAST)) && 197 (dev->flags & (IFF_POINTOPOINT|IFF_BROADCAST)) &&
196 strncmp(dev->name, "dummy", 5))) 198 strncmp(dev->name, "dummy", 5));
197 return true;
198 return false;
199} 199}
200 200
201static int __init ic_open_devs(void) 201static int __init ic_open_devs(void)
@@ -203,6 +203,7 @@ static int __init ic_open_devs(void)
203 struct ic_device *d, **last; 203 struct ic_device *d, **last;
204 struct net_device *dev; 204 struct net_device *dev;
205 unsigned short oflags; 205 unsigned short oflags;
206 unsigned long start;
206 207
207 last = &ic_first_dev; 208 last = &ic_first_dev;
208 rtnl_lock(); 209 rtnl_lock();
@@ -216,9 +217,7 @@ static int __init ic_open_devs(void)
216 } 217 }
217 218
218 for_each_netdev(&init_net, dev) { 219 for_each_netdev(&init_net, dev) {
219 if (dev->flags & IFF_LOOPBACK) 220 if (ic_is_init_dev(dev)) {
220 continue;
221 if (ic_device_match(dev)) {
222 int able = 0; 221 int able = 0;
223 if (dev->mtu >= 364) 222 if (dev->mtu >= 364)
224 able |= IC_BOOTP; 223 able |= IC_BOOTP;
@@ -252,6 +251,17 @@ static int __init ic_open_devs(void)
252 dev->name, able, d->xid)); 251 dev->name, able, d->xid));
253 } 252 }
254 } 253 }
254
255 /* wait for a carrier on at least one device */
256 start = jiffies;
257 while (jiffies - start < msecs_to_jiffies(CONF_CARRIER_TIMEOUT)) {
258 for_each_netdev(&init_net, dev)
259 if (ic_is_init_dev(dev) && netif_carrier_ok(dev))
260 goto have_carrier;
261
262 msleep(1);
263 }
264have_carrier:
255 rtnl_unlock(); 265 rtnl_unlock();
256 266
257 *last = NULL; 267 *last = NULL;
@@ -1324,14 +1334,13 @@ static int __init wait_for_devices(void)
1324{ 1334{
1325 int i; 1335 int i;
1326 1336
1327 msleep(CONF_PRE_OPEN);
1328 for (i = 0; i < DEVICE_WAIT_MAX; i++) { 1337 for (i = 0; i < DEVICE_WAIT_MAX; i++) {
1329 struct net_device *dev; 1338 struct net_device *dev;
1330 int found = 0; 1339 int found = 0;
1331 1340
1332 rtnl_lock(); 1341 rtnl_lock();
1333 for_each_netdev(&init_net, dev) { 1342 for_each_netdev(&init_net, dev) {
1334 if (ic_device_match(dev)) { 1343 if (ic_is_init_dev(dev)) {
1335 found = 1; 1344 found = 1;
1336 break; 1345 break;
1337 } 1346 }
@@ -1378,7 +1387,7 @@ static int __init ip_auto_config(void)
1378 return err; 1387 return err;
1379 1388
1380 /* Give drivers a chance to settle */ 1389 /* Give drivers a chance to settle */
1381 ssleep(CONF_POST_OPEN); 1390 msleep(CONF_POST_OPEN);
1382 1391
1383 /* 1392 /*
1384 * If the config information is insufficient (e.g., our IP address or 1393 * If the config information is insufficient (e.g., our IP address or
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index bfc17c5914e7..378b20b7ca6e 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -276,11 +276,6 @@ static struct ip_tunnel * ipip_tunnel_locate(struct net *net,
276 276
277 dev_net_set(dev, net); 277 dev_net_set(dev, net);
278 278
279 if (strchr(name, '%')) {
280 if (dev_alloc_name(dev, name) < 0)
281 goto failed_free;
282 }
283
284 nt = netdev_priv(dev); 279 nt = netdev_priv(dev);
285 nt->parms = *parms; 280 nt->parms = *parms;
286 281
@@ -319,7 +314,7 @@ static int ipip_err(struct sk_buff *skb, u32 info)
319 8 bytes of packet payload. It means, that precise relaying of 314 8 bytes of packet payload. It means, that precise relaying of
320 ICMP in the real Internet is absolutely infeasible. 315 ICMP in the real Internet is absolutely infeasible.
321 */ 316 */
322 struct iphdr *iph = (struct iphdr *)skb->data; 317 const struct iphdr *iph = (const struct iphdr *)skb->data;
323 const int type = icmp_hdr(skb)->type; 318 const int type = icmp_hdr(skb)->type;
324 const int code = icmp_hdr(skb)->code; 319 const int code = icmp_hdr(skb)->code;
325 struct ip_tunnel *t; 320 struct ip_tunnel *t;
@@ -433,15 +428,16 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
433{ 428{
434 struct ip_tunnel *tunnel = netdev_priv(dev); 429 struct ip_tunnel *tunnel = netdev_priv(dev);
435 struct pcpu_tstats *tstats; 430 struct pcpu_tstats *tstats;
436 struct iphdr *tiph = &tunnel->parms.iph; 431 const struct iphdr *tiph = &tunnel->parms.iph;
437 u8 tos = tunnel->parms.iph.tos; 432 u8 tos = tunnel->parms.iph.tos;
438 __be16 df = tiph->frag_off; 433 __be16 df = tiph->frag_off;
439 struct rtable *rt; /* Route to the other host */ 434 struct rtable *rt; /* Route to the other host */
440 struct net_device *tdev; /* Device to other host */ 435 struct net_device *tdev; /* Device to other host */
441 struct iphdr *old_iph = ip_hdr(skb); 436 const struct iphdr *old_iph = ip_hdr(skb);
442 struct iphdr *iph; /* Our new IP header */ 437 struct iphdr *iph; /* Our new IP header */
443 unsigned int max_headroom; /* The extra header space needed */ 438 unsigned int max_headroom; /* The extra header space needed */
444 __be32 dst = tiph->daddr; 439 __be32 dst = tiph->daddr;
440 struct flowi4 fl4;
445 int mtu; 441 int mtu;
446 442
447 if (skb->protocol != htons(ETH_P_IP)) 443 if (skb->protocol != htons(ETH_P_IP))
@@ -460,7 +456,7 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
460 goto tx_error_icmp; 456 goto tx_error_icmp;
461 } 457 }
462 458
463 rt = ip_route_output_ports(dev_net(dev), NULL, 459 rt = ip_route_output_ports(dev_net(dev), &fl4, NULL,
464 dst, tiph->saddr, 460 dst, tiph->saddr,
465 0, 0, 461 0, 0,
466 IPPROTO_IPIP, RT_TOS(tos), 462 IPPROTO_IPIP, RT_TOS(tos),
@@ -549,8 +545,8 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
549 iph->frag_off = df; 545 iph->frag_off = df;
550 iph->protocol = IPPROTO_IPIP; 546 iph->protocol = IPPROTO_IPIP;
551 iph->tos = INET_ECN_encapsulate(tos, old_iph->tos); 547 iph->tos = INET_ECN_encapsulate(tos, old_iph->tos);
552 iph->daddr = rt->rt_dst; 548 iph->daddr = fl4.daddr;
553 iph->saddr = rt->rt_src; 549 iph->saddr = fl4.saddr;
554 550
555 if ((iph->ttl = tiph->ttl) == 0) 551 if ((iph->ttl = tiph->ttl) == 0)
556 iph->ttl = old_iph->ttl; 552 iph->ttl = old_iph->ttl;
@@ -572,19 +568,21 @@ static void ipip_tunnel_bind_dev(struct net_device *dev)
572{ 568{
573 struct net_device *tdev = NULL; 569 struct net_device *tdev = NULL;
574 struct ip_tunnel *tunnel; 570 struct ip_tunnel *tunnel;
575 struct iphdr *iph; 571 const struct iphdr *iph;
576 572
577 tunnel = netdev_priv(dev); 573 tunnel = netdev_priv(dev);
578 iph = &tunnel->parms.iph; 574 iph = &tunnel->parms.iph;
579 575
580 if (iph->daddr) { 576 if (iph->daddr) {
581 struct rtable *rt = ip_route_output_ports(dev_net(dev), NULL, 577 struct rtable *rt;
582 iph->daddr, iph->saddr, 578 struct flowi4 fl4;
583 0, 0, 579
584 IPPROTO_IPIP, 580 rt = ip_route_output_ports(dev_net(dev), &fl4, NULL,
585 RT_TOS(iph->tos), 581 iph->daddr, iph->saddr,
586 tunnel->parms.link); 582 0, 0,
587 583 IPPROTO_IPIP,
584 RT_TOS(iph->tos),
585 tunnel->parms.link);
588 if (!IS_ERR(rt)) { 586 if (!IS_ERR(rt)) {
589 tdev = rt->dst.dev; 587 tdev = rt->dst.dev;
590 ip_rt_put(rt); 588 ip_rt_put(rt);
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 1f62eaeb6de4..30a7763c400e 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -1549,7 +1549,7 @@ static struct notifier_block ip_mr_notifier = {
1549static void ip_encap(struct sk_buff *skb, __be32 saddr, __be32 daddr) 1549static void ip_encap(struct sk_buff *skb, __be32 saddr, __be32 daddr)
1550{ 1550{
1551 struct iphdr *iph; 1551 struct iphdr *iph;
1552 struct iphdr *old_iph = ip_hdr(skb); 1552 const struct iphdr *old_iph = ip_hdr(skb);
1553 1553
1554 skb_push(skb, sizeof(struct iphdr)); 1554 skb_push(skb, sizeof(struct iphdr));
1555 skb->transport_header = skb->network_header; 1555 skb->transport_header = skb->network_header;
@@ -1595,6 +1595,7 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
1595 struct vif_device *vif = &mrt->vif_table[vifi]; 1595 struct vif_device *vif = &mrt->vif_table[vifi];
1596 struct net_device *dev; 1596 struct net_device *dev;
1597 struct rtable *rt; 1597 struct rtable *rt;
1598 struct flowi4 fl4;
1598 int encap = 0; 1599 int encap = 0;
1599 1600
1600 if (vif->dev == NULL) 1601 if (vif->dev == NULL)
@@ -1612,7 +1613,7 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
1612#endif 1613#endif
1613 1614
1614 if (vif->flags & VIFF_TUNNEL) { 1615 if (vif->flags & VIFF_TUNNEL) {
1615 rt = ip_route_output_ports(net, NULL, 1616 rt = ip_route_output_ports(net, &fl4, NULL,
1616 vif->remote, vif->local, 1617 vif->remote, vif->local,
1617 0, 0, 1618 0, 0,
1618 IPPROTO_IPIP, 1619 IPPROTO_IPIP,
@@ -1621,7 +1622,7 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
1621 goto out_free; 1622 goto out_free;
1622 encap = sizeof(struct iphdr); 1623 encap = sizeof(struct iphdr);
1623 } else { 1624 } else {
1624 rt = ip_route_output_ports(net, NULL, iph->daddr, 0, 1625 rt = ip_route_output_ports(net, &fl4, NULL, iph->daddr, 0,
1625 0, 0, 1626 0, 0,
1626 IPPROTO_IPIP, 1627 IPPROTO_IPIP,
1627 RT_TOS(iph->tos), vif->link); 1628 RT_TOS(iph->tos), vif->link);
@@ -1788,12 +1789,14 @@ dont_forward:
1788 return 0; 1789 return 0;
1789} 1790}
1790 1791
1791static struct mr_table *ipmr_rt_fib_lookup(struct net *net, struct rtable *rt) 1792static struct mr_table *ipmr_rt_fib_lookup(struct net *net, struct sk_buff *skb)
1792{ 1793{
1794 struct rtable *rt = skb_rtable(skb);
1795 struct iphdr *iph = ip_hdr(skb);
1793 struct flowi4 fl4 = { 1796 struct flowi4 fl4 = {
1794 .daddr = rt->rt_key_dst, 1797 .daddr = iph->daddr,
1795 .saddr = rt->rt_key_src, 1798 .saddr = iph->saddr,
1796 .flowi4_tos = rt->rt_tos, 1799 .flowi4_tos = iph->tos,
1797 .flowi4_oif = rt->rt_oif, 1800 .flowi4_oif = rt->rt_oif,
1798 .flowi4_iif = rt->rt_iif, 1801 .flowi4_iif = rt->rt_iif,
1799 .flowi4_mark = rt->rt_mark, 1802 .flowi4_mark = rt->rt_mark,
@@ -1825,7 +1828,7 @@ int ip_mr_input(struct sk_buff *skb)
1825 if (IPCB(skb)->flags & IPSKB_FORWARDED) 1828 if (IPCB(skb)->flags & IPSKB_FORWARDED)
1826 goto dont_forward; 1829 goto dont_forward;
1827 1830
1828 mrt = ipmr_rt_fib_lookup(net, skb_rtable(skb)); 1831 mrt = ipmr_rt_fib_lookup(net, skb);
1829 if (IS_ERR(mrt)) { 1832 if (IS_ERR(mrt)) {
1830 kfree_skb(skb); 1833 kfree_skb(skb);
1831 return PTR_ERR(mrt); 1834 return PTR_ERR(mrt);
@@ -1957,7 +1960,7 @@ int pim_rcv_v1(struct sk_buff *skb)
1957 1960
1958 pim = igmp_hdr(skb); 1961 pim = igmp_hdr(skb);
1959 1962
1960 mrt = ipmr_rt_fib_lookup(net, skb_rtable(skb)); 1963 mrt = ipmr_rt_fib_lookup(net, skb);
1961 if (IS_ERR(mrt)) 1964 if (IS_ERR(mrt))
1962 goto drop; 1965 goto drop;
1963 if (!mrt->mroute_do_pim || 1966 if (!mrt->mroute_do_pim ||
@@ -1989,7 +1992,7 @@ static int pim_rcv(struct sk_buff *skb)
1989 csum_fold(skb_checksum(skb, 0, skb->len, 0)))) 1992 csum_fold(skb_checksum(skb, 0, skb->len, 0))))
1990 goto drop; 1993 goto drop;
1991 1994
1992 mrt = ipmr_rt_fib_lookup(net, skb_rtable(skb)); 1995 mrt = ipmr_rt_fib_lookup(net, skb);
1993 if (IS_ERR(mrt)) 1996 if (IS_ERR(mrt))
1994 goto drop; 1997 goto drop;
1995 if (__pim_rcv(mrt, skb, sizeof(*pim))) { 1998 if (__pim_rcv(mrt, skb, sizeof(*pim))) {
@@ -2038,20 +2041,20 @@ rtattr_failure:
2038 return -EMSGSIZE; 2041 return -EMSGSIZE;
2039} 2042}
2040 2043
2041int ipmr_get_route(struct net *net, 2044int ipmr_get_route(struct net *net, struct sk_buff *skb,
2042 struct sk_buff *skb, struct rtmsg *rtm, int nowait) 2045 __be32 saddr, __be32 daddr,
2046 struct rtmsg *rtm, int nowait)
2043{ 2047{
2044 int err;
2045 struct mr_table *mrt;
2046 struct mfc_cache *cache; 2048 struct mfc_cache *cache;
2047 struct rtable *rt = skb_rtable(skb); 2049 struct mr_table *mrt;
2050 int err;
2048 2051
2049 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT); 2052 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
2050 if (mrt == NULL) 2053 if (mrt == NULL)
2051 return -ENOENT; 2054 return -ENOENT;
2052 2055
2053 rcu_read_lock(); 2056 rcu_read_lock();
2054 cache = ipmr_cache_find(mrt, rt->rt_src, rt->rt_dst); 2057 cache = ipmr_cache_find(mrt, saddr, daddr);
2055 2058
2056 if (cache == NULL) { 2059 if (cache == NULL) {
2057 struct sk_buff *skb2; 2060 struct sk_buff *skb2;
@@ -2084,8 +2087,8 @@ int ipmr_get_route(struct net *net,
2084 skb_reset_network_header(skb2); 2087 skb_reset_network_header(skb2);
2085 iph = ip_hdr(skb2); 2088 iph = ip_hdr(skb2);
2086 iph->ihl = sizeof(struct iphdr) >> 2; 2089 iph->ihl = sizeof(struct iphdr) >> 2;
2087 iph->saddr = rt->rt_src; 2090 iph->saddr = saddr;
2088 iph->daddr = rt->rt_dst; 2091 iph->daddr = daddr;
2089 iph->version = 0; 2092 iph->version = 0;
2090 err = ipmr_cache_unresolved(mrt, vif, skb2); 2093 err = ipmr_cache_unresolved(mrt, vif, skb2);
2091 read_unlock(&mrt_lock); 2094 read_unlock(&mrt_lock);
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 89bc7e66d598..fd7a3f68917f 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -260,6 +260,7 @@ unsigned int arpt_do_table(struct sk_buff *skb,
260 void *table_base; 260 void *table_base;
261 const struct xt_table_info *private; 261 const struct xt_table_info *private;
262 struct xt_action_param acpar; 262 struct xt_action_param acpar;
263 unsigned int addend;
263 264
264 if (!pskb_may_pull(skb, arp_hdr_len(skb->dev))) 265 if (!pskb_may_pull(skb, arp_hdr_len(skb->dev)))
265 return NF_DROP; 266 return NF_DROP;
@@ -267,7 +268,8 @@ unsigned int arpt_do_table(struct sk_buff *skb,
267 indev = in ? in->name : nulldevname; 268 indev = in ? in->name : nulldevname;
268 outdev = out ? out->name : nulldevname; 269 outdev = out ? out->name : nulldevname;
269 270
270 xt_info_rdlock_bh(); 271 local_bh_disable();
272 addend = xt_write_recseq_begin();
271 private = table->private; 273 private = table->private;
272 table_base = private->entries[smp_processor_id()]; 274 table_base = private->entries[smp_processor_id()];
273 275
@@ -338,7 +340,8 @@ unsigned int arpt_do_table(struct sk_buff *skb,
338 /* Verdict */ 340 /* Verdict */
339 break; 341 break;
340 } while (!acpar.hotdrop); 342 } while (!acpar.hotdrop);
341 xt_info_rdunlock_bh(); 343 xt_write_recseq_end(addend);
344 local_bh_enable();
342 345
343 if (acpar.hotdrop) 346 if (acpar.hotdrop)
344 return NF_DROP; 347 return NF_DROP;
@@ -712,7 +715,7 @@ static void get_counters(const struct xt_table_info *t,
712 unsigned int i; 715 unsigned int i;
713 716
714 for_each_possible_cpu(cpu) { 717 for_each_possible_cpu(cpu) {
715 seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock; 718 seqcount_t *s = &per_cpu(xt_recseq, cpu);
716 719
717 i = 0; 720 i = 0;
718 xt_entry_foreach(iter, t->entries[cpu], t->size) { 721 xt_entry_foreach(iter, t->entries[cpu], t->size) {
@@ -720,10 +723,10 @@ static void get_counters(const struct xt_table_info *t,
720 unsigned int start; 723 unsigned int start;
721 724
722 do { 725 do {
723 start = read_seqbegin(lock); 726 start = read_seqcount_begin(s);
724 bcnt = iter->counters.bcnt; 727 bcnt = iter->counters.bcnt;
725 pcnt = iter->counters.pcnt; 728 pcnt = iter->counters.pcnt;
726 } while (read_seqretry(lock, start)); 729 } while (read_seqcount_retry(s, start));
727 730
728 ADD_COUNTER(counters[i], bcnt, pcnt); 731 ADD_COUNTER(counters[i], bcnt, pcnt);
729 ++i; 732 ++i;
@@ -1115,6 +1118,7 @@ static int do_add_counters(struct net *net, const void __user *user,
1115 int ret = 0; 1118 int ret = 0;
1116 void *loc_cpu_entry; 1119 void *loc_cpu_entry;
1117 struct arpt_entry *iter; 1120 struct arpt_entry *iter;
1121 unsigned int addend;
1118#ifdef CONFIG_COMPAT 1122#ifdef CONFIG_COMPAT
1119 struct compat_xt_counters_info compat_tmp; 1123 struct compat_xt_counters_info compat_tmp;
1120 1124
@@ -1171,12 +1175,12 @@ static int do_add_counters(struct net *net, const void __user *user,
1171 /* Choose the copy that is on our node */ 1175 /* Choose the copy that is on our node */
1172 curcpu = smp_processor_id(); 1176 curcpu = smp_processor_id();
1173 loc_cpu_entry = private->entries[curcpu]; 1177 loc_cpu_entry = private->entries[curcpu];
1174 xt_info_wrlock(curcpu); 1178 addend = xt_write_recseq_begin();
1175 xt_entry_foreach(iter, loc_cpu_entry, private->size) { 1179 xt_entry_foreach(iter, loc_cpu_entry, private->size) {
1176 ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt); 1180 ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt);
1177 ++i; 1181 ++i;
1178 } 1182 }
1179 xt_info_wrunlock(curcpu); 1183 xt_write_recseq_end(addend);
1180 unlock_up_free: 1184 unlock_up_free:
1181 local_bh_enable(); 1185 local_bh_enable();
1182 xt_table_unlock(t); 1186 xt_table_unlock(t);
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 704915028009..764743843503 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -68,15 +68,6 @@ void *ipt_alloc_initial_table(const struct xt_table *info)
68} 68}
69EXPORT_SYMBOL_GPL(ipt_alloc_initial_table); 69EXPORT_SYMBOL_GPL(ipt_alloc_initial_table);
70 70
71/*
72 We keep a set of rules for each CPU, so we can avoid write-locking
73 them in the softirq when updating the counters and therefore
74 only need to read-lock in the softirq; doing a write_lock_bh() in user
75 context stops packets coming through and allows user context to read
76 the counters or update the rules.
77
78 Hence the start of any table is given by get_table() below. */
79
80/* Returns whether matches rule or not. */ 71/* Returns whether matches rule or not. */
81/* Performance critical - called for every packet */ 72/* Performance critical - called for every packet */
82static inline bool 73static inline bool
@@ -311,6 +302,7 @@ ipt_do_table(struct sk_buff *skb,
311 unsigned int *stackptr, origptr, cpu; 302 unsigned int *stackptr, origptr, cpu;
312 const struct xt_table_info *private; 303 const struct xt_table_info *private;
313 struct xt_action_param acpar; 304 struct xt_action_param acpar;
305 unsigned int addend;
314 306
315 /* Initialization */ 307 /* Initialization */
316 ip = ip_hdr(skb); 308 ip = ip_hdr(skb);
@@ -331,7 +323,8 @@ ipt_do_table(struct sk_buff *skb,
331 acpar.hooknum = hook; 323 acpar.hooknum = hook;
332 324
333 IP_NF_ASSERT(table->valid_hooks & (1 << hook)); 325 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
334 xt_info_rdlock_bh(); 326 local_bh_disable();
327 addend = xt_write_recseq_begin();
335 private = table->private; 328 private = table->private;
336 cpu = smp_processor_id(); 329 cpu = smp_processor_id();
337 table_base = private->entries[cpu]; 330 table_base = private->entries[cpu];
@@ -430,7 +423,9 @@ ipt_do_table(struct sk_buff *skb,
430 pr_debug("Exiting %s; resetting sp from %u to %u\n", 423 pr_debug("Exiting %s; resetting sp from %u to %u\n",
431 __func__, *stackptr, origptr); 424 __func__, *stackptr, origptr);
432 *stackptr = origptr; 425 *stackptr = origptr;
433 xt_info_rdunlock_bh(); 426 xt_write_recseq_end(addend);
427 local_bh_enable();
428
434#ifdef DEBUG_ALLOW_ALL 429#ifdef DEBUG_ALLOW_ALL
435 return NF_ACCEPT; 430 return NF_ACCEPT;
436#else 431#else
@@ -886,7 +881,7 @@ get_counters(const struct xt_table_info *t,
886 unsigned int i; 881 unsigned int i;
887 882
888 for_each_possible_cpu(cpu) { 883 for_each_possible_cpu(cpu) {
889 seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock; 884 seqcount_t *s = &per_cpu(xt_recseq, cpu);
890 885
891 i = 0; 886 i = 0;
892 xt_entry_foreach(iter, t->entries[cpu], t->size) { 887 xt_entry_foreach(iter, t->entries[cpu], t->size) {
@@ -894,10 +889,10 @@ get_counters(const struct xt_table_info *t,
894 unsigned int start; 889 unsigned int start;
895 890
896 do { 891 do {
897 start = read_seqbegin(lock); 892 start = read_seqcount_begin(s);
898 bcnt = iter->counters.bcnt; 893 bcnt = iter->counters.bcnt;
899 pcnt = iter->counters.pcnt; 894 pcnt = iter->counters.pcnt;
900 } while (read_seqretry(lock, start)); 895 } while (read_seqcount_retry(s, start));
901 896
902 ADD_COUNTER(counters[i], bcnt, pcnt); 897 ADD_COUNTER(counters[i], bcnt, pcnt);
903 ++i; /* macro does multi eval of i */ 898 ++i; /* macro does multi eval of i */
@@ -1312,6 +1307,7 @@ do_add_counters(struct net *net, const void __user *user,
1312 int ret = 0; 1307 int ret = 0;
1313 void *loc_cpu_entry; 1308 void *loc_cpu_entry;
1314 struct ipt_entry *iter; 1309 struct ipt_entry *iter;
1310 unsigned int addend;
1315#ifdef CONFIG_COMPAT 1311#ifdef CONFIG_COMPAT
1316 struct compat_xt_counters_info compat_tmp; 1312 struct compat_xt_counters_info compat_tmp;
1317 1313
@@ -1368,12 +1364,12 @@ do_add_counters(struct net *net, const void __user *user,
1368 /* Choose the copy that is on our node */ 1364 /* Choose the copy that is on our node */
1369 curcpu = smp_processor_id(); 1365 curcpu = smp_processor_id();
1370 loc_cpu_entry = private->entries[curcpu]; 1366 loc_cpu_entry = private->entries[curcpu];
1371 xt_info_wrlock(curcpu); 1367 addend = xt_write_recseq_begin();
1372 xt_entry_foreach(iter, loc_cpu_entry, private->size) { 1368 xt_entry_foreach(iter, loc_cpu_entry, private->size) {
1373 ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt); 1369 ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt);
1374 ++i; 1370 ++i;
1375 } 1371 }
1376 xt_info_wrunlock(curcpu); 1372 xt_write_recseq_end(addend);
1377 unlock_up_free: 1373 unlock_up_free:
1378 local_bh_enable(); 1374 local_bh_enable();
1379 xt_table_unlock(t); 1375 xt_table_unlock(t);
diff --git a/net/ipv4/netfilter/nf_nat_helper.c b/net/ipv4/netfilter/nf_nat_helper.c
index 31427fb57aa8..99cfa28b6d38 100644
--- a/net/ipv4/netfilter/nf_nat_helper.c
+++ b/net/ipv4/netfilter/nf_nat_helper.c
@@ -153,7 +153,7 @@ void nf_nat_set_seq_adjust(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
153} 153}
154EXPORT_SYMBOL_GPL(nf_nat_set_seq_adjust); 154EXPORT_SYMBOL_GPL(nf_nat_set_seq_adjust);
155 155
156static void nf_nat_csum(struct sk_buff *skb, struct iphdr *iph, void *data, 156static void nf_nat_csum(struct sk_buff *skb, const struct iphdr *iph, void *data,
157 int datalen, __sum16 *check, int oldlen) 157 int datalen, __sum16 *check, int oldlen)
158{ 158{
159 struct rtable *rt = skb_rtable(skb); 159 struct rtable *rt = skb_rtable(skb);
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
new file mode 100644
index 000000000000..1f3bb11490c9
--- /dev/null
+++ b/net/ipv4/ping.c
@@ -0,0 +1,935 @@
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * "Ping" sockets
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 *
13 * Based on ipv4/udp.c code.
14 *
15 * Authors: Vasiliy Kulikov / Openwall (for Linux 2.6),
16 * Pavel Kankovsky (for Linux 2.4.32)
17 *
18 * Pavel gave all rights to bugs to Vasiliy,
19 * none of the bugs are Pavel's now.
20 *
21 */
22
23#include <asm/system.h>
24#include <linux/uaccess.h>
25#include <linux/types.h>
26#include <linux/fcntl.h>
27#include <linux/socket.h>
28#include <linux/sockios.h>
29#include <linux/in.h>
30#include <linux/errno.h>
31#include <linux/timer.h>
32#include <linux/mm.h>
33#include <linux/inet.h>
34#include <linux/netdevice.h>
35#include <net/snmp.h>
36#include <net/ip.h>
37#include <net/ipv6.h>
38#include <net/icmp.h>
39#include <net/protocol.h>
40#include <linux/skbuff.h>
41#include <linux/proc_fs.h>
42#include <net/sock.h>
43#include <net/ping.h>
44#include <net/icmp.h>
45#include <net/udp.h>
46#include <net/route.h>
47#include <net/inet_common.h>
48#include <net/checksum.h>
49
50
51static struct ping_table ping_table;
52
53static u16 ping_port_rover;
54
55static inline int ping_hashfn(struct net *net, unsigned num, unsigned mask)
56{
57 int res = (num + net_hash_mix(net)) & mask;
58 pr_debug("hash(%d) = %d\n", num, res);
59 return res;
60}
61
62static inline struct hlist_nulls_head *ping_hashslot(struct ping_table *table,
63 struct net *net, unsigned num)
64{
65 return &table->hash[ping_hashfn(net, num, PING_HTABLE_MASK)];
66}
67
68static int ping_v4_get_port(struct sock *sk, unsigned short ident)
69{
70 struct hlist_nulls_node *node;
71 struct hlist_nulls_head *hlist;
72 struct inet_sock *isk, *isk2;
73 struct sock *sk2 = NULL;
74
75 isk = inet_sk(sk);
76 write_lock_bh(&ping_table.lock);
77 if (ident == 0) {
78 u32 i;
79 u16 result = ping_port_rover + 1;
80
81 for (i = 0; i < (1L << 16); i++, result++) {
82 if (!result)
83 result++; /* avoid zero */
84 hlist = ping_hashslot(&ping_table, sock_net(sk),
85 result);
86 ping_portaddr_for_each_entry(sk2, node, hlist) {
87 isk2 = inet_sk(sk2);
88
89 if (isk2->inet_num == result)
90 goto next_port;
91 }
92
93 /* found */
94 ping_port_rover = ident = result;
95 break;
96next_port:
97 ;
98 }
99 if (i >= (1L << 16))
100 goto fail;
101 } else {
102 hlist = ping_hashslot(&ping_table, sock_net(sk), ident);
103 ping_portaddr_for_each_entry(sk2, node, hlist) {
104 isk2 = inet_sk(sk2);
105
106 if ((isk2->inet_num == ident) &&
107 (sk2 != sk) &&
108 (!sk2->sk_reuse || !sk->sk_reuse))
109 goto fail;
110 }
111 }
112
113 pr_debug("found port/ident = %d\n", ident);
114 isk->inet_num = ident;
115 if (sk_unhashed(sk)) {
116 pr_debug("was not hashed\n");
117 sock_hold(sk);
118 hlist_nulls_add_head(&sk->sk_nulls_node, hlist);
119 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
120 }
121 write_unlock_bh(&ping_table.lock);
122 return 0;
123
124fail:
125 write_unlock_bh(&ping_table.lock);
126 return 1;
127}
128
129static void ping_v4_hash(struct sock *sk)
130{
131 pr_debug("ping_v4_hash(sk->port=%u)\n", inet_sk(sk)->inet_num);
132 BUG(); /* "Please do not press this button again." */
133}
134
135static void ping_v4_unhash(struct sock *sk)
136{
137 struct inet_sock *isk = inet_sk(sk);
138 pr_debug("ping_v4_unhash(isk=%p,isk->num=%u)\n", isk, isk->inet_num);
139 if (sk_hashed(sk)) {
140 struct hlist_nulls_head *hslot;
141
142 hslot = ping_hashslot(&ping_table, sock_net(sk), isk->inet_num);
143 write_lock_bh(&ping_table.lock);
144 hlist_nulls_del(&sk->sk_nulls_node);
145 sock_put(sk);
146 isk->inet_num = isk->inet_sport = 0;
147 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
148 write_unlock_bh(&ping_table.lock);
149 }
150}
151
152static struct sock *ping_v4_lookup(struct net *net, u32 saddr, u32 daddr,
153 u16 ident, int dif)
154{
155 struct hlist_nulls_head *hslot = ping_hashslot(&ping_table, net, ident);
156 struct sock *sk = NULL;
157 struct inet_sock *isk;
158 struct hlist_nulls_node *hnode;
159
160 pr_debug("try to find: num = %d, daddr = %ld, dif = %d\n",
161 (int)ident, (unsigned long)daddr, dif);
162 read_lock_bh(&ping_table.lock);
163
164 ping_portaddr_for_each_entry(sk, hnode, hslot) {
165 isk = inet_sk(sk);
166
167 pr_debug("found: %p: num = %d, daddr = %ld, dif = %d\n", sk,
168 (int)isk->inet_num, (unsigned long)isk->inet_rcv_saddr,
169 sk->sk_bound_dev_if);
170
171 pr_debug("iterate\n");
172 if (isk->inet_num != ident)
173 continue;
174 if (isk->inet_rcv_saddr && isk->inet_rcv_saddr != daddr)
175 continue;
176 if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)
177 continue;
178
179 sock_hold(sk);
180 goto exit;
181 }
182
183 sk = NULL;
184exit:
185 read_unlock_bh(&ping_table.lock);
186
187 return sk;
188}
189
190static void inet_get_ping_group_range_net(struct net *net, gid_t *low,
191 gid_t *high)
192{
193 gid_t *data = net->ipv4.sysctl_ping_group_range;
194 unsigned seq;
195 do {
196 seq = read_seqbegin(&sysctl_local_ports.lock);
197
198 *low = data[0];
199 *high = data[1];
200 } while (read_seqretry(&sysctl_local_ports.lock, seq));
201}
202
203
204static int ping_init_sock(struct sock *sk)
205{
206 struct net *net = sock_net(sk);
207 gid_t group = current_egid();
208 gid_t range[2];
209 struct group_info *group_info = get_current_groups();
210 int i, j, count = group_info->ngroups;
211
212 inet_get_ping_group_range_net(net, range, range+1);
213 if (range[0] <= group && group <= range[1])
214 return 0;
215
216 for (i = 0; i < group_info->nblocks; i++) {
217 int cp_count = min_t(int, NGROUPS_PER_BLOCK, count);
218
219 for (j = 0; j < cp_count; j++) {
220 group = group_info->blocks[i][j];
221 if (range[0] <= group && group <= range[1])
222 return 0;
223 }
224
225 count -= cp_count;
226 }
227
228 return -EACCES;
229}
230
231static void ping_close(struct sock *sk, long timeout)
232{
233 pr_debug("ping_close(sk=%p,sk->num=%u)\n",
234 inet_sk(sk), inet_sk(sk)->inet_num);
235 pr_debug("isk->refcnt = %d\n", sk->sk_refcnt.counter);
236
237 sk_common_release(sk);
238}
239
240/*
241 * We need our own bind because there are no privileged id's == local ports.
242 * Moreover, we don't allow binding to multi- and broadcast addresses.
243 */
244
245static int ping_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
246{
247 struct sockaddr_in *addr = (struct sockaddr_in *)uaddr;
248 struct inet_sock *isk = inet_sk(sk);
249 unsigned short snum;
250 int chk_addr_ret;
251 int err;
252
253 if (addr_len < sizeof(struct sockaddr_in))
254 return -EINVAL;
255
256 pr_debug("ping_v4_bind(sk=%p,sa_addr=%08x,sa_port=%d)\n",
257 sk, addr->sin_addr.s_addr, ntohs(addr->sin_port));
258
259 chk_addr_ret = inet_addr_type(sock_net(sk), addr->sin_addr.s_addr);
260 if (addr->sin_addr.s_addr == INADDR_ANY)
261 chk_addr_ret = RTN_LOCAL;
262
263 if ((sysctl_ip_nonlocal_bind == 0 &&
264 isk->freebind == 0 && isk->transparent == 0 &&
265 chk_addr_ret != RTN_LOCAL) ||
266 chk_addr_ret == RTN_MULTICAST ||
267 chk_addr_ret == RTN_BROADCAST)
268 return -EADDRNOTAVAIL;
269
270 lock_sock(sk);
271
272 err = -EINVAL;
273 if (isk->inet_num != 0)
274 goto out;
275
276 err = -EADDRINUSE;
277 isk->inet_rcv_saddr = isk->inet_saddr = addr->sin_addr.s_addr;
278 snum = ntohs(addr->sin_port);
279 if (ping_v4_get_port(sk, snum) != 0) {
280 isk->inet_saddr = isk->inet_rcv_saddr = 0;
281 goto out;
282 }
283
284 pr_debug("after bind(): num = %d, daddr = %ld, dif = %d\n",
285 (int)isk->inet_num,
286 (unsigned long) isk->inet_rcv_saddr,
287 (int)sk->sk_bound_dev_if);
288
289 err = 0;
290 if (isk->inet_rcv_saddr)
291 sk->sk_userlocks |= SOCK_BINDADDR_LOCK;
292 if (snum)
293 sk->sk_userlocks |= SOCK_BINDPORT_LOCK;
294 isk->inet_sport = htons(isk->inet_num);
295 isk->inet_daddr = 0;
296 isk->inet_dport = 0;
297 sk_dst_reset(sk);
298out:
299 release_sock(sk);
300 pr_debug("ping_v4_bind -> %d\n", err);
301 return err;
302}
303
304/*
305 * Is this a supported type of ICMP message?
306 */
307
308static inline int ping_supported(int type, int code)
309{
310 if (type == ICMP_ECHO && code == 0)
311 return 1;
312 return 0;
313}
314
315/*
316 * This routine is called by the ICMP module when it gets some
317 * sort of error condition.
318 */
319
320static int ping_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
321
322void ping_err(struct sk_buff *skb, u32 info)
323{
324 struct iphdr *iph = (struct iphdr *)skb->data;
325 struct icmphdr *icmph = (struct icmphdr *)(skb->data+(iph->ihl<<2));
326 struct inet_sock *inet_sock;
327 int type = icmph->type;
328 int code = icmph->code;
329 struct net *net = dev_net(skb->dev);
330 struct sock *sk;
331 int harderr;
332 int err;
333
334 /* We assume the packet has already been checked by icmp_unreach */
335
336 if (!ping_supported(icmph->type, icmph->code))
337 return;
338
339 pr_debug("ping_err(type=%04x,code=%04x,id=%04x,seq=%04x)\n", type,
340 code, ntohs(icmph->un.echo.id), ntohs(icmph->un.echo.sequence));
341
342 sk = ping_v4_lookup(net, iph->daddr, iph->saddr,
343 ntohs(icmph->un.echo.id), skb->dev->ifindex);
344 if (sk == NULL) {
345 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
346 pr_debug("no socket, dropping\n");
347 return; /* No socket for error */
348 }
349 pr_debug("err on socket %p\n", sk);
350
351 err = 0;
352 harderr = 0;
353 inet_sock = inet_sk(sk);
354
355 switch (type) {
356 default:
357 case ICMP_TIME_EXCEEDED:
358 err = EHOSTUNREACH;
359 break;
360 case ICMP_SOURCE_QUENCH:
361 /* This is not a real error but ping wants to see it.
362 * Report it with some fake errno. */
363 err = EREMOTEIO;
364 break;
365 case ICMP_PARAMETERPROB:
366 err = EPROTO;
367 harderr = 1;
368 break;
369 case ICMP_DEST_UNREACH:
370 if (code == ICMP_FRAG_NEEDED) { /* Path MTU discovery */
371 if (inet_sock->pmtudisc != IP_PMTUDISC_DONT) {
372 err = EMSGSIZE;
373 harderr = 1;
374 break;
375 }
376 goto out;
377 }
378 err = EHOSTUNREACH;
379 if (code <= NR_ICMP_UNREACH) {
380 harderr = icmp_err_convert[code].fatal;
381 err = icmp_err_convert[code].errno;
382 }
383 break;
384 case ICMP_REDIRECT:
385 /* See ICMP_SOURCE_QUENCH */
386 err = EREMOTEIO;
387 break;
388 }
389
390 /*
391 * RFC1122: OK. Passes ICMP errors back to application, as per
392 * 4.1.3.3.
393 */
394 if (!inet_sock->recverr) {
395 if (!harderr || sk->sk_state != TCP_ESTABLISHED)
396 goto out;
397 } else {
398 ip_icmp_error(sk, skb, err, 0 /* no remote port */,
399 info, (u8 *)icmph);
400 }
401 sk->sk_err = err;
402 sk->sk_error_report(sk);
403out:
404 sock_put(sk);
405}
406
407/*
408 * Copy and checksum an ICMP Echo packet from user space into a buffer.
409 */
410
411struct pingfakehdr {
412 struct icmphdr icmph;
413 struct iovec *iov;
414 u32 wcheck;
415};
416
417static int ping_getfrag(void *from, char * to,
418 int offset, int fraglen, int odd, struct sk_buff *skb)
419{
420 struct pingfakehdr *pfh = (struct pingfakehdr *)from;
421
422 if (offset == 0) {
423 if (fraglen < sizeof(struct icmphdr))
424 BUG();
425 if (csum_partial_copy_fromiovecend(to + sizeof(struct icmphdr),
426 pfh->iov, 0, fraglen - sizeof(struct icmphdr),
427 &pfh->wcheck))
428 return -EFAULT;
429
430 return 0;
431 }
432 if (offset < sizeof(struct icmphdr))
433 BUG();
434 if (csum_partial_copy_fromiovecend
435 (to, pfh->iov, offset - sizeof(struct icmphdr),
436 fraglen, &pfh->wcheck))
437 return -EFAULT;
438 return 0;
439}
440
441static int ping_push_pending_frames(struct sock *sk, struct pingfakehdr *pfh,
442 struct flowi4 *fl4)
443{
444 struct sk_buff *skb = skb_peek(&sk->sk_write_queue);
445
446 pfh->wcheck = csum_partial((char *)&pfh->icmph,
447 sizeof(struct icmphdr), pfh->wcheck);
448 pfh->icmph.checksum = csum_fold(pfh->wcheck);
449 memcpy(icmp_hdr(skb), &pfh->icmph, sizeof(struct icmphdr));
450 skb->ip_summed = CHECKSUM_NONE;
451 return ip_push_pending_frames(sk, fl4);
452}
453
454static int ping_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
455 size_t len)
456{
457 struct net *net = sock_net(sk);
458 struct flowi4 fl4;
459 struct inet_sock *inet = inet_sk(sk);
460 struct ipcm_cookie ipc;
461 struct icmphdr user_icmph;
462 struct pingfakehdr pfh;
463 struct rtable *rt = NULL;
464 struct ip_options_data opt_copy;
465 int free = 0;
466 u32 saddr, daddr, faddr;
467 u8 tos;
468 int err;
469
470 pr_debug("ping_sendmsg(sk=%p,sk->num=%u)\n", inet, inet->inet_num);
471
472
473 if (len > 0xFFFF)
474 return -EMSGSIZE;
475
476 /*
477 * Check the flags.
478 */
479
480 /* Mirror BSD error message compatibility */
481 if (msg->msg_flags & MSG_OOB)
482 return -EOPNOTSUPP;
483
484 /*
485 * Fetch the ICMP header provided by the userland.
486 * iovec is modified!
487 */
488
489 if (memcpy_fromiovec((u8 *)&user_icmph, msg->msg_iov,
490 sizeof(struct icmphdr)))
491 return -EFAULT;
492 if (!ping_supported(user_icmph.type, user_icmph.code))
493 return -EINVAL;
494
495 /*
496 * Get and verify the address.
497 */
498
499 if (msg->msg_name) {
500 struct sockaddr_in *usin = (struct sockaddr_in *)msg->msg_name;
501 if (msg->msg_namelen < sizeof(*usin))
502 return -EINVAL;
503 if (usin->sin_family != AF_INET)
504 return -EINVAL;
505 daddr = usin->sin_addr.s_addr;
506 /* no remote port */
507 } else {
508 if (sk->sk_state != TCP_ESTABLISHED)
509 return -EDESTADDRREQ;
510 daddr = inet->inet_daddr;
511 /* no remote port */
512 }
513
514 ipc.addr = inet->inet_saddr;
515 ipc.opt = NULL;
516 ipc.oif = sk->sk_bound_dev_if;
517 ipc.tx_flags = 0;
518 err = sock_tx_timestamp(sk, &ipc.tx_flags);
519 if (err)
520 return err;
521
522 if (msg->msg_controllen) {
523 err = ip_cmsg_send(sock_net(sk), msg, &ipc);
524 if (err)
525 return err;
526 if (ipc.opt)
527 free = 1;
528 }
529 if (!ipc.opt) {
530 struct ip_options_rcu *inet_opt;
531
532 rcu_read_lock();
533 inet_opt = rcu_dereference(inet->inet_opt);
534 if (inet_opt) {
535 memcpy(&opt_copy, inet_opt,
536 sizeof(*inet_opt) + inet_opt->opt.optlen);
537 ipc.opt = &opt_copy.opt;
538 }
539 rcu_read_unlock();
540 }
541
542 saddr = ipc.addr;
543 ipc.addr = faddr = daddr;
544
545 if (ipc.opt && ipc.opt->opt.srr) {
546 if (!daddr)
547 return -EINVAL;
548 faddr = ipc.opt->opt.faddr;
549 }
550 tos = RT_TOS(inet->tos);
551 if (sock_flag(sk, SOCK_LOCALROUTE) ||
552 (msg->msg_flags & MSG_DONTROUTE) ||
553 (ipc.opt && ipc.opt->opt.is_strictroute)) {
554 tos |= RTO_ONLINK;
555 }
556
557 if (ipv4_is_multicast(daddr)) {
558 if (!ipc.oif)
559 ipc.oif = inet->mc_index;
560 if (!saddr)
561 saddr = inet->mc_addr;
562 }
563
564 flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos,
565 RT_SCOPE_UNIVERSE, sk->sk_protocol,
566 inet_sk_flowi_flags(sk), faddr, saddr, 0, 0);
567
568 security_sk_classify_flow(sk, flowi4_to_flowi(&fl4));
569 rt = ip_route_output_flow(net, &fl4, sk);
570 if (IS_ERR(rt)) {
571 err = PTR_ERR(rt);
572 rt = NULL;
573 if (err == -ENETUNREACH)
574 IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
575 goto out;
576 }
577
578 err = -EACCES;
579 if ((rt->rt_flags & RTCF_BROADCAST) &&
580 !sock_flag(sk, SOCK_BROADCAST))
581 goto out;
582
583 if (msg->msg_flags & MSG_CONFIRM)
584 goto do_confirm;
585back_from_confirm:
586
587 if (!ipc.addr)
588 ipc.addr = fl4.daddr;
589
590 lock_sock(sk);
591
592 pfh.icmph.type = user_icmph.type; /* already checked */
593 pfh.icmph.code = user_icmph.code; /* ditto */
594 pfh.icmph.checksum = 0;
595 pfh.icmph.un.echo.id = inet->inet_sport;
596 pfh.icmph.un.echo.sequence = user_icmph.un.echo.sequence;
597 pfh.iov = msg->msg_iov;
598 pfh.wcheck = 0;
599
600 err = ip_append_data(sk, &fl4, ping_getfrag, &pfh, len,
601 0, &ipc, &rt, msg->msg_flags);
602 if (err)
603 ip_flush_pending_frames(sk);
604 else
605 err = ping_push_pending_frames(sk, &pfh, &fl4);
606 release_sock(sk);
607
608out:
609 ip_rt_put(rt);
610 if (free)
611 kfree(ipc.opt);
612 if (!err) {
613 icmp_out_count(sock_net(sk), user_icmph.type);
614 return len;
615 }
616 return err;
617
618do_confirm:
619 dst_confirm(&rt->dst);
620 if (!(msg->msg_flags & MSG_PROBE) || len)
621 goto back_from_confirm;
622 err = 0;
623 goto out;
624}
625
626static int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
627 size_t len, int noblock, int flags, int *addr_len)
628{
629 struct inet_sock *isk = inet_sk(sk);
630 struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name;
631 struct sk_buff *skb;
632 int copied, err;
633
634 pr_debug("ping_recvmsg(sk=%p,sk->num=%u)\n", isk, isk->inet_num);
635
636 if (flags & MSG_OOB)
637 goto out;
638
639 if (addr_len)
640 *addr_len = sizeof(*sin);
641
642 if (flags & MSG_ERRQUEUE)
643 return ip_recv_error(sk, msg, len);
644
645 skb = skb_recv_datagram(sk, flags, noblock, &err);
646 if (!skb)
647 goto out;
648
649 copied = skb->len;
650 if (copied > len) {
651 msg->msg_flags |= MSG_TRUNC;
652 copied = len;
653 }
654
655 /* Don't bother checking the checksum */
656 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
657 if (err)
658 goto done;
659
660 sock_recv_timestamp(msg, sk, skb);
661
662 /* Copy the address. */
663 if (sin) {
664 sin->sin_family = AF_INET;
665 sin->sin_port = 0 /* skb->h.uh->source */;
666 sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
667 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
668 }
669 if (isk->cmsg_flags)
670 ip_cmsg_recv(msg, skb);
671 err = copied;
672
673done:
674 skb_free_datagram(sk, skb);
675out:
676 pr_debug("ping_recvmsg -> %d\n", err);
677 return err;
678}
679
680static int ping_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
681{
682 pr_debug("ping_queue_rcv_skb(sk=%p,sk->num=%d,skb=%p)\n",
683 inet_sk(sk), inet_sk(sk)->inet_num, skb);
684 if (sock_queue_rcv_skb(sk, skb) < 0) {
685 ICMP_INC_STATS_BH(sock_net(sk), ICMP_MIB_INERRORS);
686 kfree_skb(skb);
687 pr_debug("ping_queue_rcv_skb -> failed\n");
688 return -1;
689 }
690 return 0;
691}
692
693
694/*
695 * All we need to do is get the socket.
696 */
697
698void ping_rcv(struct sk_buff *skb)
699{
700 struct sock *sk;
701 struct net *net = dev_net(skb->dev);
702 struct iphdr *iph = ip_hdr(skb);
703 struct icmphdr *icmph = icmp_hdr(skb);
704 u32 saddr = iph->saddr;
705 u32 daddr = iph->daddr;
706
707 /* We assume the packet has already been checked by icmp_rcv */
708
709 pr_debug("ping_rcv(skb=%p,id=%04x,seq=%04x)\n",
710 skb, ntohs(icmph->un.echo.id), ntohs(icmph->un.echo.sequence));
711
712 /* Push ICMP header back */
713 skb_push(skb, skb->data - (u8 *)icmph);
714
715 sk = ping_v4_lookup(net, saddr, daddr, ntohs(icmph->un.echo.id),
716 skb->dev->ifindex);
717 if (sk != NULL) {
718 pr_debug("rcv on socket %p\n", sk);
719 ping_queue_rcv_skb(sk, skb_get(skb));
720 sock_put(sk);
721 return;
722 }
723 pr_debug("no socket, dropping\n");
724
725 /* We're called from icmp_rcv(). kfree_skb() is done there. */
726}
727
728struct proto ping_prot = {
729 .name = "PING",
730 .owner = THIS_MODULE,
731 .init = ping_init_sock,
732 .close = ping_close,
733 .connect = ip4_datagram_connect,
734 .disconnect = udp_disconnect,
735 .setsockopt = ip_setsockopt,
736 .getsockopt = ip_getsockopt,
737 .sendmsg = ping_sendmsg,
738 .recvmsg = ping_recvmsg,
739 .bind = ping_bind,
740 .backlog_rcv = ping_queue_rcv_skb,
741 .hash = ping_v4_hash,
742 .unhash = ping_v4_unhash,
743 .get_port = ping_v4_get_port,
744 .obj_size = sizeof(struct inet_sock),
745};
746EXPORT_SYMBOL(ping_prot);
747
748#ifdef CONFIG_PROC_FS
749
750static struct sock *ping_get_first(struct seq_file *seq, int start)
751{
752 struct sock *sk;
753 struct ping_iter_state *state = seq->private;
754 struct net *net = seq_file_net(seq);
755
756 for (state->bucket = start; state->bucket < PING_HTABLE_SIZE;
757 ++state->bucket) {
758 struct hlist_nulls_node *node;
759 struct hlist_nulls_head *hslot;
760
761 hslot = &ping_table.hash[state->bucket];
762
763 if (hlist_nulls_empty(hslot))
764 continue;
765
766 sk_nulls_for_each(sk, node, hslot) {
767 if (net_eq(sock_net(sk), net))
768 goto found;
769 }
770 }
771 sk = NULL;
772found:
773 return sk;
774}
775
776static struct sock *ping_get_next(struct seq_file *seq, struct sock *sk)
777{
778 struct ping_iter_state *state = seq->private;
779 struct net *net = seq_file_net(seq);
780
781 do {
782 sk = sk_nulls_next(sk);
783 } while (sk && (!net_eq(sock_net(sk), net)));
784
785 if (!sk)
786 return ping_get_first(seq, state->bucket + 1);
787 return sk;
788}
789
790static struct sock *ping_get_idx(struct seq_file *seq, loff_t pos)
791{
792 struct sock *sk = ping_get_first(seq, 0);
793
794 if (sk)
795 while (pos && (sk = ping_get_next(seq, sk)) != NULL)
796 --pos;
797 return pos ? NULL : sk;
798}
799
800static void *ping_seq_start(struct seq_file *seq, loff_t *pos)
801{
802 struct ping_iter_state *state = seq->private;
803 state->bucket = 0;
804
805 read_lock_bh(&ping_table.lock);
806
807 return *pos ? ping_get_idx(seq, *pos-1) : SEQ_START_TOKEN;
808}
809
810static void *ping_seq_next(struct seq_file *seq, void *v, loff_t *pos)
811{
812 struct sock *sk;
813
814 if (v == SEQ_START_TOKEN)
815 sk = ping_get_idx(seq, 0);
816 else
817 sk = ping_get_next(seq, v);
818
819 ++*pos;
820 return sk;
821}
822
823static void ping_seq_stop(struct seq_file *seq, void *v)
824{
825 read_unlock_bh(&ping_table.lock);
826}
827
828static void ping_format_sock(struct sock *sp, struct seq_file *f,
829 int bucket, int *len)
830{
831 struct inet_sock *inet = inet_sk(sp);
832 __be32 dest = inet->inet_daddr;
833 __be32 src = inet->inet_rcv_saddr;
834 __u16 destp = ntohs(inet->inet_dport);
835 __u16 srcp = ntohs(inet->inet_sport);
836
837 seq_printf(f, "%5d: %08X:%04X %08X:%04X"
838 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %d%n",
839 bucket, src, srcp, dest, destp, sp->sk_state,
840 sk_wmem_alloc_get(sp),
841 sk_rmem_alloc_get(sp),
842 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
843 atomic_read(&sp->sk_refcnt), sp,
844 atomic_read(&sp->sk_drops), len);
845}
846
847static int ping_seq_show(struct seq_file *seq, void *v)
848{
849 if (v == SEQ_START_TOKEN)
850 seq_printf(seq, "%-127s\n",
851 " sl local_address rem_address st tx_queue "
852 "rx_queue tr tm->when retrnsmt uid timeout "
853 "inode ref pointer drops");
854 else {
855 struct ping_iter_state *state = seq->private;
856 int len;
857
858 ping_format_sock(v, seq, state->bucket, &len);
859 seq_printf(seq, "%*s\n", 127 - len, "");
860 }
861 return 0;
862}
863
864static const struct seq_operations ping_seq_ops = {
865 .show = ping_seq_show,
866 .start = ping_seq_start,
867 .next = ping_seq_next,
868 .stop = ping_seq_stop,
869};
870
871static int ping_seq_open(struct inode *inode, struct file *file)
872{
873 return seq_open_net(inode, file, &ping_seq_ops,
874 sizeof(struct ping_iter_state));
875}
876
877static const struct file_operations ping_seq_fops = {
878 .open = ping_seq_open,
879 .read = seq_read,
880 .llseek = seq_lseek,
881 .release = seq_release_net,
882};
883
884static int ping_proc_register(struct net *net)
885{
886 struct proc_dir_entry *p;
887 int rc = 0;
888
889 p = proc_net_fops_create(net, "icmp", S_IRUGO, &ping_seq_fops);
890 if (!p)
891 rc = -ENOMEM;
892 return rc;
893}
894
895static void ping_proc_unregister(struct net *net)
896{
897 proc_net_remove(net, "icmp");
898}
899
900
901static int __net_init ping_proc_init_net(struct net *net)
902{
903 return ping_proc_register(net);
904}
905
906static void __net_exit ping_proc_exit_net(struct net *net)
907{
908 ping_proc_unregister(net);
909}
910
911static struct pernet_operations ping_net_ops = {
912 .init = ping_proc_init_net,
913 .exit = ping_proc_exit_net,
914};
915
916int __init ping_proc_init(void)
917{
918 return register_pernet_subsys(&ping_net_ops);
919}
920
921void ping_proc_exit(void)
922{
923 unregister_pernet_subsys(&ping_net_ops);
924}
925
926#endif
927
928void __init ping_init(void)
929{
930 int i;
931
932 for (i = 0; i < PING_HTABLE_SIZE; i++)
933 INIT_HLIST_NULLS_HEAD(&ping_table.hash[i], i);
934 rwlock_init(&ping_table.lock);
935}
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index bceaec42c37d..11e1780455f2 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -154,7 +154,7 @@ static __inline__ int icmp_filter(struct sock *sk, struct sk_buff *skb)
154 * RFC 1122: SHOULD pass TOS value up to the transport layer. 154 * RFC 1122: SHOULD pass TOS value up to the transport layer.
155 * -> It does. And not only TOS, but all IP header. 155 * -> It does. And not only TOS, but all IP header.
156 */ 156 */
157static int raw_v4_input(struct sk_buff *skb, struct iphdr *iph, int hash) 157static int raw_v4_input(struct sk_buff *skb, const struct iphdr *iph, int hash)
158{ 158{
159 struct sock *sk; 159 struct sock *sk;
160 struct hlist_head *head; 160 struct hlist_head *head;
@@ -247,7 +247,7 @@ static void raw_err(struct sock *sk, struct sk_buff *skb, u32 info)
247 } 247 }
248 248
249 if (inet->recverr) { 249 if (inet->recverr) {
250 struct iphdr *iph = (struct iphdr *)skb->data; 250 const struct iphdr *iph = (const struct iphdr *)skb->data;
251 u8 *payload = skb->data + (iph->ihl << 2); 251 u8 *payload = skb->data + (iph->ihl << 2);
252 252
253 if (inet->hdrincl) 253 if (inet->hdrincl)
@@ -265,7 +265,7 @@ void raw_icmp_error(struct sk_buff *skb, int protocol, u32 info)
265{ 265{
266 int hash; 266 int hash;
267 struct sock *raw_sk; 267 struct sock *raw_sk;
268 struct iphdr *iph; 268 const struct iphdr *iph;
269 struct net *net; 269 struct net *net;
270 270
271 hash = protocol & (RAW_HTABLE_SIZE - 1); 271 hash = protocol & (RAW_HTABLE_SIZE - 1);
@@ -273,7 +273,7 @@ void raw_icmp_error(struct sk_buff *skb, int protocol, u32 info)
273 read_lock(&raw_v4_hashinfo.lock); 273 read_lock(&raw_v4_hashinfo.lock);
274 raw_sk = sk_head(&raw_v4_hashinfo.ht[hash]); 274 raw_sk = sk_head(&raw_v4_hashinfo.ht[hash]);
275 if (raw_sk != NULL) { 275 if (raw_sk != NULL) {
276 iph = (struct iphdr *)skb->data; 276 iph = (const struct iphdr *)skb->data;
277 net = dev_net(skb->dev); 277 net = dev_net(skb->dev);
278 278
279 while ((raw_sk = __raw_v4_lookup(net, raw_sk, protocol, 279 while ((raw_sk = __raw_v4_lookup(net, raw_sk, protocol,
@@ -281,7 +281,7 @@ void raw_icmp_error(struct sk_buff *skb, int protocol, u32 info)
281 skb->dev->ifindex)) != NULL) { 281 skb->dev->ifindex)) != NULL) {
282 raw_err(raw_sk, skb, info); 282 raw_err(raw_sk, skb, info);
283 raw_sk = sk_next(raw_sk); 283 raw_sk = sk_next(raw_sk);
284 iph = (struct iphdr *)skb->data; 284 iph = (const struct iphdr *)skb->data;
285 } 285 }
286 } 286 }
287 read_unlock(&raw_v4_hashinfo.lock); 287 read_unlock(&raw_v4_hashinfo.lock);
@@ -314,9 +314,10 @@ int raw_rcv(struct sock *sk, struct sk_buff *skb)
314 return 0; 314 return 0;
315} 315}
316 316
317static int raw_send_hdrinc(struct sock *sk, void *from, size_t length, 317static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4,
318 struct rtable **rtp, 318 void *from, size_t length,
319 unsigned int flags) 319 struct rtable **rtp,
320 unsigned int flags)
320{ 321{
321 struct inet_sock *inet = inet_sk(sk); 322 struct inet_sock *inet = inet_sk(sk);
322 struct net *net = sock_net(sk); 323 struct net *net = sock_net(sk);
@@ -327,7 +328,7 @@ static int raw_send_hdrinc(struct sock *sk, void *from, size_t length,
327 struct rtable *rt = *rtp; 328 struct rtable *rt = *rtp;
328 329
329 if (length > rt->dst.dev->mtu) { 330 if (length > rt->dst.dev->mtu) {
330 ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->inet_dport, 331 ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
331 rt->dst.dev->mtu); 332 rt->dst.dev->mtu);
332 return -EMSGSIZE; 333 return -EMSGSIZE;
333 } 334 }
@@ -372,7 +373,7 @@ static int raw_send_hdrinc(struct sock *sk, void *from, size_t length,
372 373
373 if (iphlen >= sizeof(*iph)) { 374 if (iphlen >= sizeof(*iph)) {
374 if (!iph->saddr) 375 if (!iph->saddr)
375 iph->saddr = rt->rt_src; 376 iph->saddr = fl4->saddr;
376 iph->check = 0; 377 iph->check = 0;
377 iph->tot_len = htons(length); 378 iph->tot_len = htons(length);
378 if (!iph->id) 379 if (!iph->id)
@@ -455,11 +456,13 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
455 struct inet_sock *inet = inet_sk(sk); 456 struct inet_sock *inet = inet_sk(sk);
456 struct ipcm_cookie ipc; 457 struct ipcm_cookie ipc;
457 struct rtable *rt = NULL; 458 struct rtable *rt = NULL;
459 struct flowi4 fl4;
458 int free = 0; 460 int free = 0;
459 __be32 daddr; 461 __be32 daddr;
460 __be32 saddr; 462 __be32 saddr;
461 u8 tos; 463 u8 tos;
462 int err; 464 int err;
465 struct ip_options_data opt_copy;
463 466
464 err = -EMSGSIZE; 467 err = -EMSGSIZE;
465 if (len > 0xFFFF) 468 if (len > 0xFFFF)
@@ -520,8 +523,18 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
520 saddr = ipc.addr; 523 saddr = ipc.addr;
521 ipc.addr = daddr; 524 ipc.addr = daddr;
522 525
523 if (!ipc.opt) 526 if (!ipc.opt) {
524 ipc.opt = inet->opt; 527 struct ip_options_rcu *inet_opt;
528
529 rcu_read_lock();
530 inet_opt = rcu_dereference(inet->inet_opt);
531 if (inet_opt) {
532 memcpy(&opt_copy, inet_opt,
533 sizeof(*inet_opt) + inet_opt->opt.optlen);
534 ipc.opt = &opt_copy.opt;
535 }
536 rcu_read_unlock();
537 }
525 538
526 if (ipc.opt) { 539 if (ipc.opt) {
527 err = -EINVAL; 540 err = -EINVAL;
@@ -530,10 +543,10 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
530 */ 543 */
531 if (inet->hdrincl) 544 if (inet->hdrincl)
532 goto done; 545 goto done;
533 if (ipc.opt->srr) { 546 if (ipc.opt->opt.srr) {
534 if (!daddr) 547 if (!daddr)
535 goto done; 548 goto done;
536 daddr = ipc.opt->faddr; 549 daddr = ipc.opt->opt.faddr;
537 } 550 }
538 } 551 }
539 tos = RT_CONN_FLAGS(sk); 552 tos = RT_CONN_FLAGS(sk);
@@ -547,31 +560,23 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
547 saddr = inet->mc_addr; 560 saddr = inet->mc_addr;
548 } 561 }
549 562
550 { 563 flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos,
551 struct flowi4 fl4 = { 564 RT_SCOPE_UNIVERSE,
552 .flowi4_oif = ipc.oif, 565 inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
553 .flowi4_mark = sk->sk_mark, 566 FLOWI_FLAG_CAN_SLEEP, daddr, saddr, 0, 0);
554 .daddr = daddr,
555 .saddr = saddr,
556 .flowi4_tos = tos,
557 .flowi4_proto = (inet->hdrincl ?
558 IPPROTO_RAW :
559 sk->sk_protocol),
560 .flowi4_flags = FLOWI_FLAG_CAN_SLEEP,
561 };
562 if (!inet->hdrincl) {
563 err = raw_probe_proto_opt(&fl4, msg);
564 if (err)
565 goto done;
566 }
567 567
568 security_sk_classify_flow(sk, flowi4_to_flowi(&fl4)); 568 if (!inet->hdrincl) {
569 rt = ip_route_output_flow(sock_net(sk), &fl4, sk); 569 err = raw_probe_proto_opt(&fl4, msg);
570 if (IS_ERR(rt)) { 570 if (err)
571 err = PTR_ERR(rt);
572 rt = NULL;
573 goto done; 571 goto done;
574 } 572 }
573
574 security_sk_classify_flow(sk, flowi4_to_flowi(&fl4));
575 rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
576 if (IS_ERR(rt)) {
577 err = PTR_ERR(rt);
578 rt = NULL;
579 goto done;
575 } 580 }
576 581
577 err = -EACCES; 582 err = -EACCES;
@@ -583,19 +588,20 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
583back_from_confirm: 588back_from_confirm:
584 589
585 if (inet->hdrincl) 590 if (inet->hdrincl)
586 err = raw_send_hdrinc(sk, msg->msg_iov, len, 591 err = raw_send_hdrinc(sk, &fl4, msg->msg_iov, len,
587 &rt, msg->msg_flags); 592 &rt, msg->msg_flags);
588 593
589 else { 594 else {
590 if (!ipc.addr) 595 if (!ipc.addr)
591 ipc.addr = rt->rt_dst; 596 ipc.addr = fl4.daddr;
592 lock_sock(sk); 597 lock_sock(sk);
593 err = ip_append_data(sk, ip_generic_getfrag, msg->msg_iov, len, 0, 598 err = ip_append_data(sk, &fl4, ip_generic_getfrag,
594 &ipc, &rt, msg->msg_flags); 599 msg->msg_iov, len, 0,
600 &ipc, &rt, msg->msg_flags);
595 if (err) 601 if (err)
596 ip_flush_pending_frames(sk); 602 ip_flush_pending_frames(sk);
597 else if (!(msg->msg_flags & MSG_MORE)) { 603 else if (!(msg->msg_flags & MSG_MORE)) {
598 err = ip_push_pending_frames(sk); 604 err = ip_push_pending_frames(sk, &fl4);
599 if (err == -ENOBUFS && !inet->recverr) 605 if (err == -ENOBUFS && !inet->recverr)
600 err = 0; 606 err = 0;
601 } 607 }
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 99e6e4bb1c72..b24d58e6bbcd 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -156,7 +156,7 @@ static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old)
156 u32 *p = NULL; 156 u32 *p = NULL;
157 157
158 if (!rt->peer) 158 if (!rt->peer)
159 rt_bind_peer(rt, 1); 159 rt_bind_peer(rt, rt->rt_dst, 1);
160 160
161 peer = rt->peer; 161 peer = rt->peer;
162 if (peer) { 162 if (peer) {
@@ -424,7 +424,7 @@ static int rt_cache_seq_show(struct seq_file *seq, void *v)
424 dst_metric(&r->dst, RTAX_WINDOW), 424 dst_metric(&r->dst, RTAX_WINDOW),
425 (int)((dst_metric(&r->dst, RTAX_RTT) >> 3) + 425 (int)((dst_metric(&r->dst, RTAX_RTT) >> 3) +
426 dst_metric(&r->dst, RTAX_RTTVAR)), 426 dst_metric(&r->dst, RTAX_RTTVAR)),
427 r->rt_tos, 427 r->rt_key_tos,
428 r->dst.hh ? atomic_read(&r->dst.hh->hh_refcnt) : -1, 428 r->dst.hh ? atomic_read(&r->dst.hh->hh_refcnt) : -1,
429 r->dst.hh ? (r->dst.hh->hh_output == 429 r->dst.hh ? (r->dst.hh->hh_output ==
430 dev_queue_xmit) : 0, 430 dev_queue_xmit) : 0,
@@ -724,7 +724,7 @@ static inline int compare_keys(struct rtable *rt1, struct rtable *rt2)
724 return (((__force u32)rt1->rt_key_dst ^ (__force u32)rt2->rt_key_dst) | 724 return (((__force u32)rt1->rt_key_dst ^ (__force u32)rt2->rt_key_dst) |
725 ((__force u32)rt1->rt_key_src ^ (__force u32)rt2->rt_key_src) | 725 ((__force u32)rt1->rt_key_src ^ (__force u32)rt2->rt_key_src) |
726 (rt1->rt_mark ^ rt2->rt_mark) | 726 (rt1->rt_mark ^ rt2->rt_mark) |
727 (rt1->rt_tos ^ rt2->rt_tos) | 727 (rt1->rt_key_tos ^ rt2->rt_key_tos) |
728 (rt1->rt_oif ^ rt2->rt_oif) | 728 (rt1->rt_oif ^ rt2->rt_oif) |
729 (rt1->rt_iif ^ rt2->rt_iif)) == 0; 729 (rt1->rt_iif ^ rt2->rt_iif)) == 0;
730} 730}
@@ -968,10 +968,6 @@ static int rt_garbage_collect(struct dst_ops *ops)
968 break; 968 break;
969 969
970 expire >>= 1; 970 expire >>= 1;
971#if RT_CACHE_DEBUG >= 2
972 printk(KERN_DEBUG "expire>> %u %d %d %d\n", expire,
973 dst_entries_get_fast(&ipv4_dst_ops), goal, i);
974#endif
975 971
976 if (dst_entries_get_fast(&ipv4_dst_ops) < ip_rt_max_size) 972 if (dst_entries_get_fast(&ipv4_dst_ops) < ip_rt_max_size)
977 goto out; 973 goto out;
@@ -992,10 +988,6 @@ work_done:
992 dst_entries_get_fast(&ipv4_dst_ops) < ipv4_dst_ops.gc_thresh || 988 dst_entries_get_fast(&ipv4_dst_ops) < ipv4_dst_ops.gc_thresh ||
993 dst_entries_get_slow(&ipv4_dst_ops) < ipv4_dst_ops.gc_thresh) 989 dst_entries_get_slow(&ipv4_dst_ops) < ipv4_dst_ops.gc_thresh)
994 expire = ip_rt_gc_timeout; 990 expire = ip_rt_gc_timeout;
995#if RT_CACHE_DEBUG >= 2
996 printk(KERN_DEBUG "expire++ %u %d %d %d\n", expire,
997 dst_entries_get_fast(&ipv4_dst_ops), goal, rover);
998#endif
999out: return 0; 991out: return 0;
1000} 992}
1001 993
@@ -1179,16 +1171,6 @@ restart:
1179 1171
1180 rt->dst.rt_next = rt_hash_table[hash].chain; 1172 rt->dst.rt_next = rt_hash_table[hash].chain;
1181 1173
1182#if RT_CACHE_DEBUG >= 2
1183 if (rt->dst.rt_next) {
1184 struct rtable *trt;
1185 printk(KERN_DEBUG "rt_cache @%02x: %pI4",
1186 hash, &rt->rt_dst);
1187 for (trt = rt->dst.rt_next; trt; trt = trt->dst.rt_next)
1188 printk(" . %pI4", &trt->rt_dst);
1189 printk("\n");
1190 }
1191#endif
1192 /* 1174 /*
1193 * Since lookup is lockfree, we must make sure 1175 * Since lookup is lockfree, we must make sure
1194 * previous writes to rt are committed to memory 1176 * previous writes to rt are committed to memory
@@ -1211,11 +1193,11 @@ static u32 rt_peer_genid(void)
1211 return atomic_read(&__rt_peer_genid); 1193 return atomic_read(&__rt_peer_genid);
1212} 1194}
1213 1195
1214void rt_bind_peer(struct rtable *rt, int create) 1196void rt_bind_peer(struct rtable *rt, __be32 daddr, int create)
1215{ 1197{
1216 struct inet_peer *peer; 1198 struct inet_peer *peer;
1217 1199
1218 peer = inet_getpeer_v4(rt->rt_dst, create); 1200 peer = inet_getpeer_v4(daddr, create);
1219 1201
1220 if (peer && cmpxchg(&rt->peer, NULL, peer) != NULL) 1202 if (peer && cmpxchg(&rt->peer, NULL, peer) != NULL)
1221 inet_putpeer(peer); 1203 inet_putpeer(peer);
@@ -1249,7 +1231,7 @@ void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
1249 1231
1250 if (rt) { 1232 if (rt) {
1251 if (rt->peer == NULL) 1233 if (rt->peer == NULL)
1252 rt_bind_peer(rt, 1); 1234 rt_bind_peer(rt, rt->rt_dst, 1);
1253 1235
1254 /* If peer is attached to destination, it is never detached, 1236 /* If peer is attached to destination, it is never detached,
1255 so that we need not to grab a lock to dereference it. 1237 so that we need not to grab a lock to dereference it.
@@ -1347,10 +1329,6 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
1347 unsigned hash = rt_hash(rt->rt_key_dst, rt->rt_key_src, 1329 unsigned hash = rt_hash(rt->rt_key_dst, rt->rt_key_src,
1348 rt->rt_oif, 1330 rt->rt_oif,
1349 rt_genid(dev_net(dst->dev))); 1331 rt_genid(dev_net(dst->dev)));
1350#if RT_CACHE_DEBUG >= 1
1351 printk(KERN_DEBUG "ipv4_negative_advice: redirect to %pI4/%02x dropped\n",
1352 &rt->rt_dst, rt->rt_tos);
1353#endif
1354 rt_del(hash, rt); 1332 rt_del(hash, rt);
1355 ret = NULL; 1333 ret = NULL;
1356 } else if (rt->peer && 1334 } else if (rt->peer &&
@@ -1399,7 +1377,7 @@ void ip_rt_send_redirect(struct sk_buff *skb)
1399 rcu_read_unlock(); 1377 rcu_read_unlock();
1400 1378
1401 if (!rt->peer) 1379 if (!rt->peer)
1402 rt_bind_peer(rt, 1); 1380 rt_bind_peer(rt, rt->rt_dst, 1);
1403 peer = rt->peer; 1381 peer = rt->peer;
1404 if (!peer) { 1382 if (!peer) {
1405 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway); 1383 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway);
@@ -1435,7 +1413,7 @@ void ip_rt_send_redirect(struct sk_buff *skb)
1435 peer->rate_tokens == ip_rt_redirect_number && 1413 peer->rate_tokens == ip_rt_redirect_number &&
1436 net_ratelimit()) 1414 net_ratelimit())
1437 printk(KERN_WARNING "host %pI4/if%d ignores redirects for %pI4 to %pI4.\n", 1415 printk(KERN_WARNING "host %pI4/if%d ignores redirects for %pI4 to %pI4.\n",
1438 &rt->rt_src, rt->rt_iif, 1416 &ip_hdr(skb)->saddr, rt->rt_iif,
1439 &rt->rt_dst, &rt->rt_gateway); 1417 &rt->rt_dst, &rt->rt_gateway);
1440#endif 1418#endif
1441 } 1419 }
@@ -1467,7 +1445,7 @@ static int ip_error(struct sk_buff *skb)
1467 } 1445 }
1468 1446
1469 if (!rt->peer) 1447 if (!rt->peer)
1470 rt_bind_peer(rt, 1); 1448 rt_bind_peer(rt, rt->rt_dst, 1);
1471 peer = rt->peer; 1449 peer = rt->peer;
1472 1450
1473 send = true; 1451 send = true;
@@ -1507,7 +1485,7 @@ static inline unsigned short guess_mtu(unsigned short old_mtu)
1507 return 68; 1485 return 68;
1508} 1486}
1509 1487
1510unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph, 1488unsigned short ip_rt_frag_needed(struct net *net, const struct iphdr *iph,
1511 unsigned short new_mtu, 1489 unsigned short new_mtu,
1512 struct net_device *dev) 1490 struct net_device *dev)
1513{ 1491{
@@ -1574,7 +1552,7 @@ static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
1574 dst_confirm(dst); 1552 dst_confirm(dst);
1575 1553
1576 if (!rt->peer) 1554 if (!rt->peer)
1577 rt_bind_peer(rt, 1); 1555 rt_bind_peer(rt, rt->rt_dst, 1);
1578 peer = rt->peer; 1556 peer = rt->peer;
1579 if (peer) { 1557 if (peer) {
1580 if (mtu < ip_rt_min_pmtu) 1558 if (mtu < ip_rt_min_pmtu)
@@ -1631,7 +1609,7 @@ static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
1631 struct inet_peer *peer; 1609 struct inet_peer *peer;
1632 1610
1633 if (!rt->peer) 1611 if (!rt->peer)
1634 rt_bind_peer(rt, 0); 1612 rt_bind_peer(rt, rt->rt_dst, 0);
1635 1613
1636 peer = rt->peer; 1614 peer = rt->peer;
1637 if (peer && peer->pmtu_expires) 1615 if (peer && peer->pmtu_expires)
@@ -1699,22 +1677,26 @@ static int ip_rt_bug(struct sk_buff *skb)
1699 in IP options! 1677 in IP options!
1700 */ 1678 */
1701 1679
1702void ip_rt_get_source(u8 *addr, struct rtable *rt) 1680void ip_rt_get_source(u8 *addr, struct sk_buff *skb, struct rtable *rt)
1703{ 1681{
1704 __be32 src; 1682 __be32 src;
1705 struct fib_result res;
1706 1683
1707 if (rt_is_output_route(rt)) 1684 if (rt_is_output_route(rt))
1708 src = rt->rt_src; 1685 src = ip_hdr(skb)->saddr;
1709 else { 1686 else {
1710 struct flowi4 fl4 = { 1687 struct fib_result res;
1711 .daddr = rt->rt_key_dst, 1688 struct flowi4 fl4;
1712 .saddr = rt->rt_key_src, 1689 struct iphdr *iph;
1713 .flowi4_tos = rt->rt_tos, 1690
1714 .flowi4_oif = rt->rt_oif, 1691 iph = ip_hdr(skb);
1715 .flowi4_iif = rt->rt_iif, 1692
1716 .flowi4_mark = rt->rt_mark, 1693 memset(&fl4, 0, sizeof(fl4));
1717 }; 1694 fl4.daddr = iph->daddr;
1695 fl4.saddr = iph->saddr;
1696 fl4.flowi4_tos = iph->tos;
1697 fl4.flowi4_oif = rt->dst.dev->ifindex;
1698 fl4.flowi4_iif = skb->dev->ifindex;
1699 fl4.flowi4_mark = skb->mark;
1718 1700
1719 rcu_read_lock(); 1701 rcu_read_lock();
1720 if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res) == 0) 1702 if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res) == 0)
@@ -1767,7 +1749,7 @@ static unsigned int ipv4_default_mtu(const struct dst_entry *dst)
1767 return mtu; 1749 return mtu;
1768} 1750}
1769 1751
1770static void rt_init_metrics(struct rtable *rt, const struct flowi4 *oldflp4, 1752static void rt_init_metrics(struct rtable *rt, const struct flowi4 *fl4,
1771 struct fib_info *fi) 1753 struct fib_info *fi)
1772{ 1754{
1773 struct inet_peer *peer; 1755 struct inet_peer *peer;
@@ -1776,7 +1758,7 @@ static void rt_init_metrics(struct rtable *rt, const struct flowi4 *oldflp4,
1776 /* If a peer entry exists for this destination, we must hook 1758 /* If a peer entry exists for this destination, we must hook
1777 * it up in order to get at cached metrics. 1759 * it up in order to get at cached metrics.
1778 */ 1760 */
1779 if (oldflp4 && (oldflp4->flowi4_flags & FLOWI_FLAG_PRECOW_METRICS)) 1761 if (fl4 && (fl4->flowi4_flags & FLOWI_FLAG_PRECOW_METRICS))
1780 create = 1; 1762 create = 1;
1781 1763
1782 rt->peer = peer = inet_getpeer_v4(rt->rt_dst, create); 1764 rt->peer = peer = inet_getpeer_v4(rt->rt_dst, create);
@@ -1803,7 +1785,7 @@ static void rt_init_metrics(struct rtable *rt, const struct flowi4 *oldflp4,
1803 } 1785 }
1804} 1786}
1805 1787
1806static void rt_set_nexthop(struct rtable *rt, const struct flowi4 *oldflp4, 1788static void rt_set_nexthop(struct rtable *rt, const struct flowi4 *fl4,
1807 const struct fib_result *res, 1789 const struct fib_result *res,
1808 struct fib_info *fi, u16 type, u32 itag) 1790 struct fib_info *fi, u16 type, u32 itag)
1809{ 1791{
@@ -1813,7 +1795,7 @@ static void rt_set_nexthop(struct rtable *rt, const struct flowi4 *oldflp4,
1813 if (FIB_RES_GW(*res) && 1795 if (FIB_RES_GW(*res) &&
1814 FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK) 1796 FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)
1815 rt->rt_gateway = FIB_RES_GW(*res); 1797 rt->rt_gateway = FIB_RES_GW(*res);
1816 rt_init_metrics(rt, oldflp4, fi); 1798 rt_init_metrics(rt, fl4, fi);
1817#ifdef CONFIG_IP_ROUTE_CLASSID 1799#ifdef CONFIG_IP_ROUTE_CLASSID
1818 dst->tclassid = FIB_RES_NH(*res).nh_tclassid; 1800 dst->tclassid = FIB_RES_NH(*res).nh_tclassid;
1819#endif 1801#endif
@@ -1830,20 +1812,15 @@ static void rt_set_nexthop(struct rtable *rt, const struct flowi4 *oldflp4,
1830#endif 1812#endif
1831 set_class_tag(rt, itag); 1813 set_class_tag(rt, itag);
1832#endif 1814#endif
1833 rt->rt_type = type;
1834} 1815}
1835 1816
1836static struct rtable *rt_dst_alloc(bool nopolicy, bool noxfrm) 1817static struct rtable *rt_dst_alloc(struct net_device *dev,
1818 bool nopolicy, bool noxfrm)
1837{ 1819{
1838 struct rtable *rt = dst_alloc(&ipv4_dst_ops, 1); 1820 return dst_alloc(&ipv4_dst_ops, dev, 1, -1,
1839 if (rt) { 1821 DST_HOST |
1840 rt->dst.obsolete = -1; 1822 (nopolicy ? DST_NOPOLICY : 0) |
1841 1823 (noxfrm ? DST_NOXFRM : 0));
1842 rt->dst.flags = DST_HOST |
1843 (nopolicy ? DST_NOPOLICY : 0) |
1844 (noxfrm ? DST_NOXFRM : 0);
1845 }
1846 return rt;
1847} 1824}
1848 1825
1849/* called in rcu_read_lock() section */ 1826/* called in rcu_read_lock() section */
@@ -1871,36 +1848,38 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1871 goto e_inval; 1848 goto e_inval;
1872 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK); 1849 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
1873 } else { 1850 } else {
1874 err = fib_validate_source(saddr, 0, tos, 0, dev, &spec_dst, 1851 err = fib_validate_source(skb, saddr, 0, tos, 0, dev, &spec_dst,
1875 &itag, 0); 1852 &itag);
1876 if (err < 0) 1853 if (err < 0)
1877 goto e_err; 1854 goto e_err;
1878 } 1855 }
1879 rth = rt_dst_alloc(IN_DEV_CONF_GET(in_dev, NOPOLICY), false); 1856 rth = rt_dst_alloc(init_net.loopback_dev,
1857 IN_DEV_CONF_GET(in_dev, NOPOLICY), false);
1880 if (!rth) 1858 if (!rth)
1881 goto e_nobufs; 1859 goto e_nobufs;
1882 1860
1861#ifdef CONFIG_IP_ROUTE_CLASSID
1862 rth->dst.tclassid = itag;
1863#endif
1883 rth->dst.output = ip_rt_bug; 1864 rth->dst.output = ip_rt_bug;
1884 1865
1885 rth->rt_key_dst = daddr; 1866 rth->rt_key_dst = daddr;
1886 rth->rt_dst = daddr;
1887 rth->rt_tos = tos;
1888 rth->rt_mark = skb->mark;
1889 rth->rt_key_src = saddr; 1867 rth->rt_key_src = saddr;
1868 rth->rt_genid = rt_genid(dev_net(dev));
1869 rth->rt_flags = RTCF_MULTICAST;
1870 rth->rt_type = RTN_MULTICAST;
1871 rth->rt_key_tos = tos;
1872 rth->rt_dst = daddr;
1890 rth->rt_src = saddr; 1873 rth->rt_src = saddr;
1891#ifdef CONFIG_IP_ROUTE_CLASSID
1892 rth->dst.tclassid = itag;
1893#endif
1894 rth->rt_route_iif = dev->ifindex; 1874 rth->rt_route_iif = dev->ifindex;
1895 rth->rt_iif = dev->ifindex; 1875 rth->rt_iif = dev->ifindex;
1896 rth->dst.dev = init_net.loopback_dev;
1897 dev_hold(rth->dst.dev);
1898 rth->rt_oif = 0; 1876 rth->rt_oif = 0;
1877 rth->rt_mark = skb->mark;
1899 rth->rt_gateway = daddr; 1878 rth->rt_gateway = daddr;
1900 rth->rt_spec_dst= spec_dst; 1879 rth->rt_spec_dst= spec_dst;
1901 rth->rt_genid = rt_genid(dev_net(dev)); 1880 rth->rt_peer_genid = 0;
1902 rth->rt_flags = RTCF_MULTICAST; 1881 rth->peer = NULL;
1903 rth->rt_type = RTN_MULTICAST; 1882 rth->fi = NULL;
1904 if (our) { 1883 if (our) {
1905 rth->dst.input= ip_local_deliver; 1884 rth->dst.input= ip_local_deliver;
1906 rth->rt_flags |= RTCF_LOCAL; 1885 rth->rt_flags |= RTCF_LOCAL;
@@ -1981,8 +1960,8 @@ static int __mkroute_input(struct sk_buff *skb,
1981 } 1960 }
1982 1961
1983 1962
1984 err = fib_validate_source(saddr, daddr, tos, FIB_RES_OIF(*res), 1963 err = fib_validate_source(skb, saddr, daddr, tos, FIB_RES_OIF(*res),
1985 in_dev->dev, &spec_dst, &itag, skb->mark); 1964 in_dev->dev, &spec_dst, &itag);
1986 if (err < 0) { 1965 if (err < 0) {
1987 ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr, 1966 ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
1988 saddr); 1967 saddr);
@@ -2013,7 +1992,8 @@ static int __mkroute_input(struct sk_buff *skb,
2013 } 1992 }
2014 } 1993 }
2015 1994
2016 rth = rt_dst_alloc(IN_DEV_CONF_GET(in_dev, NOPOLICY), 1995 rth = rt_dst_alloc(out_dev->dev,
1996 IN_DEV_CONF_GET(in_dev, NOPOLICY),
2017 IN_DEV_CONF_GET(out_dev, NOXFRM)); 1997 IN_DEV_CONF_GET(out_dev, NOXFRM));
2018 if (!rth) { 1998 if (!rth) {
2019 err = -ENOBUFS; 1999 err = -ENOBUFS;
@@ -2021,27 +2001,28 @@ static int __mkroute_input(struct sk_buff *skb,
2021 } 2001 }
2022 2002
2023 rth->rt_key_dst = daddr; 2003 rth->rt_key_dst = daddr;
2024 rth->rt_dst = daddr;
2025 rth->rt_tos = tos;
2026 rth->rt_mark = skb->mark;
2027 rth->rt_key_src = saddr; 2004 rth->rt_key_src = saddr;
2005 rth->rt_genid = rt_genid(dev_net(rth->dst.dev));
2006 rth->rt_flags = flags;
2007 rth->rt_type = res->type;
2008 rth->rt_key_tos = tos;
2009 rth->rt_dst = daddr;
2028 rth->rt_src = saddr; 2010 rth->rt_src = saddr;
2029 rth->rt_gateway = daddr;
2030 rth->rt_route_iif = in_dev->dev->ifindex; 2011 rth->rt_route_iif = in_dev->dev->ifindex;
2031 rth->rt_iif = in_dev->dev->ifindex; 2012 rth->rt_iif = in_dev->dev->ifindex;
2032 rth->dst.dev = (out_dev)->dev;
2033 dev_hold(rth->dst.dev);
2034 rth->rt_oif = 0; 2013 rth->rt_oif = 0;
2014 rth->rt_mark = skb->mark;
2015 rth->rt_gateway = daddr;
2035 rth->rt_spec_dst= spec_dst; 2016 rth->rt_spec_dst= spec_dst;
2017 rth->rt_peer_genid = 0;
2018 rth->peer = NULL;
2019 rth->fi = NULL;
2036 2020
2037 rth->dst.input = ip_forward; 2021 rth->dst.input = ip_forward;
2038 rth->dst.output = ip_output; 2022 rth->dst.output = ip_output;
2039 rth->rt_genid = rt_genid(dev_net(rth->dst.dev));
2040 2023
2041 rt_set_nexthop(rth, NULL, res, res->fi, res->type, itag); 2024 rt_set_nexthop(rth, NULL, res, res->fi, res->type, itag);
2042 2025
2043 rth->rt_flags = flags;
2044
2045 *result = rth; 2026 *result = rth;
2046 err = 0; 2027 err = 0;
2047 cleanup: 2028 cleanup:
@@ -2150,9 +2131,9 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2150 goto brd_input; 2131 goto brd_input;
2151 2132
2152 if (res.type == RTN_LOCAL) { 2133 if (res.type == RTN_LOCAL) {
2153 err = fib_validate_source(saddr, daddr, tos, 2134 err = fib_validate_source(skb, saddr, daddr, tos,
2154 net->loopback_dev->ifindex, 2135 net->loopback_dev->ifindex,
2155 dev, &spec_dst, &itag, skb->mark); 2136 dev, &spec_dst, &itag);
2156 if (err < 0) 2137 if (err < 0)
2157 goto martian_source_keep_err; 2138 goto martian_source_keep_err;
2158 if (err) 2139 if (err)
@@ -2176,8 +2157,8 @@ brd_input:
2176 if (ipv4_is_zeronet(saddr)) 2157 if (ipv4_is_zeronet(saddr))
2177 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK); 2158 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
2178 else { 2159 else {
2179 err = fib_validate_source(saddr, 0, tos, 0, dev, &spec_dst, 2160 err = fib_validate_source(skb, saddr, 0, tos, 0, dev, &spec_dst,
2180 &itag, skb->mark); 2161 &itag);
2181 if (err < 0) 2162 if (err < 0)
2182 goto martian_source_keep_err; 2163 goto martian_source_keep_err;
2183 if (err) 2164 if (err)
@@ -2188,36 +2169,42 @@ brd_input:
2188 RT_CACHE_STAT_INC(in_brd); 2169 RT_CACHE_STAT_INC(in_brd);
2189 2170
2190local_input: 2171local_input:
2191 rth = rt_dst_alloc(IN_DEV_CONF_GET(in_dev, NOPOLICY), false); 2172 rth = rt_dst_alloc(net->loopback_dev,
2173 IN_DEV_CONF_GET(in_dev, NOPOLICY), false);
2192 if (!rth) 2174 if (!rth)
2193 goto e_nobufs; 2175 goto e_nobufs;
2194 2176
2177 rth->dst.input= ip_local_deliver;
2195 rth->dst.output= ip_rt_bug; 2178 rth->dst.output= ip_rt_bug;
2196 rth->rt_genid = rt_genid(net); 2179#ifdef CONFIG_IP_ROUTE_CLASSID
2180 rth->dst.tclassid = itag;
2181#endif
2197 2182
2198 rth->rt_key_dst = daddr; 2183 rth->rt_key_dst = daddr;
2199 rth->rt_dst = daddr;
2200 rth->rt_tos = tos;
2201 rth->rt_mark = skb->mark;
2202 rth->rt_key_src = saddr; 2184 rth->rt_key_src = saddr;
2185 rth->rt_genid = rt_genid(net);
2186 rth->rt_flags = flags|RTCF_LOCAL;
2187 rth->rt_type = res.type;
2188 rth->rt_key_tos = tos;
2189 rth->rt_dst = daddr;
2203 rth->rt_src = saddr; 2190 rth->rt_src = saddr;
2204#ifdef CONFIG_IP_ROUTE_CLASSID 2191#ifdef CONFIG_IP_ROUTE_CLASSID
2205 rth->dst.tclassid = itag; 2192 rth->dst.tclassid = itag;
2206#endif 2193#endif
2207 rth->rt_route_iif = dev->ifindex; 2194 rth->rt_route_iif = dev->ifindex;
2208 rth->rt_iif = dev->ifindex; 2195 rth->rt_iif = dev->ifindex;
2209 rth->dst.dev = net->loopback_dev; 2196 rth->rt_oif = 0;
2210 dev_hold(rth->dst.dev); 2197 rth->rt_mark = skb->mark;
2211 rth->rt_gateway = daddr; 2198 rth->rt_gateway = daddr;
2212 rth->rt_spec_dst= spec_dst; 2199 rth->rt_spec_dst= spec_dst;
2213 rth->dst.input= ip_local_deliver; 2200 rth->rt_peer_genid = 0;
2214 rth->rt_flags = flags|RTCF_LOCAL; 2201 rth->peer = NULL;
2202 rth->fi = NULL;
2215 if (res.type == RTN_UNREACHABLE) { 2203 if (res.type == RTN_UNREACHABLE) {
2216 rth->dst.input= ip_error; 2204 rth->dst.input= ip_error;
2217 rth->dst.error= -err; 2205 rth->dst.error= -err;
2218 rth->rt_flags &= ~RTCF_LOCAL; 2206 rth->rt_flags &= ~RTCF_LOCAL;
2219 } 2207 }
2220 rth->rt_type = res.type;
2221 hash = rt_hash(daddr, saddr, fl4.flowi4_iif, rt_genid(net)); 2208 hash = rt_hash(daddr, saddr, fl4.flowi4_iif, rt_genid(net));
2222 rth = rt_intern_hash(hash, rth, skb, fl4.flowi4_iif); 2209 rth = rt_intern_hash(hash, rth, skb, fl4.flowi4_iif);
2223 err = 0; 2210 err = 0;
@@ -2288,7 +2275,7 @@ int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2288 ((__force u32)rth->rt_key_src ^ (__force u32)saddr) | 2275 ((__force u32)rth->rt_key_src ^ (__force u32)saddr) |
2289 (rth->rt_iif ^ iif) | 2276 (rth->rt_iif ^ iif) |
2290 rth->rt_oif | 2277 rth->rt_oif |
2291 (rth->rt_tos ^ tos)) == 0 && 2278 (rth->rt_key_tos ^ tos)) == 0 &&
2292 rth->rt_mark == skb->mark && 2279 rth->rt_mark == skb->mark &&
2293 net_eq(dev_net(rth->dst.dev), net) && 2280 net_eq(dev_net(rth->dst.dev), net) &&
2294 !rt_is_expired(rth)) { 2281 !rt_is_expired(rth)) {
@@ -2349,12 +2336,12 @@ EXPORT_SYMBOL(ip_route_input_common);
2349/* called with rcu_read_lock() */ 2336/* called with rcu_read_lock() */
2350static struct rtable *__mkroute_output(const struct fib_result *res, 2337static struct rtable *__mkroute_output(const struct fib_result *res,
2351 const struct flowi4 *fl4, 2338 const struct flowi4 *fl4,
2352 const struct flowi4 *oldflp4, 2339 __be32 orig_daddr, __be32 orig_saddr,
2353 struct net_device *dev_out, 2340 int orig_oif, struct net_device *dev_out,
2354 unsigned int flags) 2341 unsigned int flags)
2355{ 2342{
2356 struct fib_info *fi = res->fi; 2343 struct fib_info *fi = res->fi;
2357 u32 tos = RT_FL_TOS(oldflp4); 2344 u32 tos = RT_FL_TOS(fl4);
2358 struct in_device *in_dev; 2345 struct in_device *in_dev;
2359 u16 type = res->type; 2346 u16 type = res->type;
2360 struct rtable *rth; 2347 struct rtable *rth;
@@ -2381,8 +2368,8 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
2381 fi = NULL; 2368 fi = NULL;
2382 } else if (type == RTN_MULTICAST) { 2369 } else if (type == RTN_MULTICAST) {
2383 flags |= RTCF_MULTICAST | RTCF_LOCAL; 2370 flags |= RTCF_MULTICAST | RTCF_LOCAL;
2384 if (!ip_check_mc_rcu(in_dev, oldflp4->daddr, oldflp4->saddr, 2371 if (!ip_check_mc_rcu(in_dev, fl4->daddr, fl4->saddr,
2385 oldflp4->flowi4_proto)) 2372 fl4->flowi4_proto))
2386 flags &= ~RTCF_LOCAL; 2373 flags &= ~RTCF_LOCAL;
2387 /* If multicast route do not exist use 2374 /* If multicast route do not exist use
2388 * default one, but do not gateway in this case. 2375 * default one, but do not gateway in this case.
@@ -2392,29 +2379,31 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
2392 fi = NULL; 2379 fi = NULL;
2393 } 2380 }
2394 2381
2395 rth = rt_dst_alloc(IN_DEV_CONF_GET(in_dev, NOPOLICY), 2382 rth = rt_dst_alloc(dev_out,
2383 IN_DEV_CONF_GET(in_dev, NOPOLICY),
2396 IN_DEV_CONF_GET(in_dev, NOXFRM)); 2384 IN_DEV_CONF_GET(in_dev, NOXFRM));
2397 if (!rth) 2385 if (!rth)
2398 return ERR_PTR(-ENOBUFS); 2386 return ERR_PTR(-ENOBUFS);
2399 2387
2400 rth->rt_key_dst = oldflp4->daddr; 2388 rth->dst.output = ip_output;
2401 rth->rt_tos = tos; 2389
2402 rth->rt_key_src = oldflp4->saddr; 2390 rth->rt_key_dst = orig_daddr;
2403 rth->rt_oif = oldflp4->flowi4_oif; 2391 rth->rt_key_src = orig_saddr;
2404 rth->rt_mark = oldflp4->flowi4_mark; 2392 rth->rt_genid = rt_genid(dev_net(dev_out));
2393 rth->rt_flags = flags;
2394 rth->rt_type = type;
2395 rth->rt_key_tos = tos;
2405 rth->rt_dst = fl4->daddr; 2396 rth->rt_dst = fl4->daddr;
2406 rth->rt_src = fl4->saddr; 2397 rth->rt_src = fl4->saddr;
2407 rth->rt_route_iif = 0; 2398 rth->rt_route_iif = 0;
2408 rth->rt_iif = oldflp4->flowi4_oif ? : dev_out->ifindex; 2399 rth->rt_iif = orig_oif ? : dev_out->ifindex;
2409 /* get references to the devices that are to be hold by the routing 2400 rth->rt_oif = orig_oif;
2410 cache entry */ 2401 rth->rt_mark = fl4->flowi4_mark;
2411 rth->dst.dev = dev_out;
2412 dev_hold(dev_out);
2413 rth->rt_gateway = fl4->daddr; 2402 rth->rt_gateway = fl4->daddr;
2414 rth->rt_spec_dst= fl4->saddr; 2403 rth->rt_spec_dst= fl4->saddr;
2415 2404 rth->rt_peer_genid = 0;
2416 rth->dst.output=ip_output; 2405 rth->peer = NULL;
2417 rth->rt_genid = rt_genid(dev_net(dev_out)); 2406 rth->fi = NULL;
2418 2407
2419 RT_CACHE_STAT_INC(out_slow_tot); 2408 RT_CACHE_STAT_INC(out_slow_tot);
2420 2409
@@ -2432,7 +2421,7 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
2432#ifdef CONFIG_IP_MROUTE 2421#ifdef CONFIG_IP_MROUTE
2433 if (type == RTN_MULTICAST) { 2422 if (type == RTN_MULTICAST) {
2434 if (IN_DEV_MFORWARD(in_dev) && 2423 if (IN_DEV_MFORWARD(in_dev) &&
2435 !ipv4_is_local_multicast(oldflp4->daddr)) { 2424 !ipv4_is_local_multicast(fl4->daddr)) {
2436 rth->dst.input = ip_mr_input; 2425 rth->dst.input = ip_mr_input;
2437 rth->dst.output = ip_mc_output; 2426 rth->dst.output = ip_mc_output;
2438 } 2427 }
@@ -2440,9 +2429,8 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
2440#endif 2429#endif
2441 } 2430 }
2442 2431
2443 rt_set_nexthop(rth, oldflp4, res, fi, type, 0); 2432 rt_set_nexthop(rth, fl4, res, fi, type, 0);
2444 2433
2445 rth->rt_flags = flags;
2446 return rth; 2434 return rth;
2447} 2435}
2448 2436
@@ -2451,36 +2439,37 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
2451 * called with rcu_read_lock(); 2439 * called with rcu_read_lock();
2452 */ 2440 */
2453 2441
2454static struct rtable *ip_route_output_slow(struct net *net, 2442static struct rtable *ip_route_output_slow(struct net *net, struct flowi4 *fl4)
2455 const struct flowi4 *oldflp4)
2456{ 2443{
2457 u32 tos = RT_FL_TOS(oldflp4);
2458 struct flowi4 fl4;
2459 struct fib_result res;
2460 unsigned int flags = 0;
2461 struct net_device *dev_out = NULL; 2444 struct net_device *dev_out = NULL;
2445 u32 tos = RT_FL_TOS(fl4);
2446 unsigned int flags = 0;
2447 struct fib_result res;
2462 struct rtable *rth; 2448 struct rtable *rth;
2449 __be32 orig_daddr;
2450 __be32 orig_saddr;
2451 int orig_oif;
2463 2452
2464 res.fi = NULL; 2453 res.fi = NULL;
2465#ifdef CONFIG_IP_MULTIPLE_TABLES 2454#ifdef CONFIG_IP_MULTIPLE_TABLES
2466 res.r = NULL; 2455 res.r = NULL;
2467#endif 2456#endif
2468 2457
2469 fl4.flowi4_oif = oldflp4->flowi4_oif; 2458 orig_daddr = fl4->daddr;
2470 fl4.flowi4_iif = net->loopback_dev->ifindex; 2459 orig_saddr = fl4->saddr;
2471 fl4.flowi4_mark = oldflp4->flowi4_mark; 2460 orig_oif = fl4->flowi4_oif;
2472 fl4.daddr = oldflp4->daddr; 2461
2473 fl4.saddr = oldflp4->saddr; 2462 fl4->flowi4_iif = net->loopback_dev->ifindex;
2474 fl4.flowi4_tos = tos & IPTOS_RT_MASK; 2463 fl4->flowi4_tos = tos & IPTOS_RT_MASK;
2475 fl4.flowi4_scope = ((tos & RTO_ONLINK) ? 2464 fl4->flowi4_scope = ((tos & RTO_ONLINK) ?
2476 RT_SCOPE_LINK : RT_SCOPE_UNIVERSE); 2465 RT_SCOPE_LINK : RT_SCOPE_UNIVERSE);
2477 2466
2478 rcu_read_lock(); 2467 rcu_read_lock();
2479 if (oldflp4->saddr) { 2468 if (fl4->saddr) {
2480 rth = ERR_PTR(-EINVAL); 2469 rth = ERR_PTR(-EINVAL);
2481 if (ipv4_is_multicast(oldflp4->saddr) || 2470 if (ipv4_is_multicast(fl4->saddr) ||
2482 ipv4_is_lbcast(oldflp4->saddr) || 2471 ipv4_is_lbcast(fl4->saddr) ||
2483 ipv4_is_zeronet(oldflp4->saddr)) 2472 ipv4_is_zeronet(fl4->saddr))
2484 goto out; 2473 goto out;
2485 2474
2486 /* I removed check for oif == dev_out->oif here. 2475 /* I removed check for oif == dev_out->oif here.
@@ -2491,11 +2480,11 @@ static struct rtable *ip_route_output_slow(struct net *net,
2491 of another iface. --ANK 2480 of another iface. --ANK
2492 */ 2481 */
2493 2482
2494 if (oldflp4->flowi4_oif == 0 && 2483 if (fl4->flowi4_oif == 0 &&
2495 (ipv4_is_multicast(oldflp4->daddr) || 2484 (ipv4_is_multicast(fl4->daddr) ||
2496 ipv4_is_lbcast(oldflp4->daddr))) { 2485 ipv4_is_lbcast(fl4->daddr))) {
2497 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */ 2486 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2498 dev_out = __ip_dev_find(net, oldflp4->saddr, false); 2487 dev_out = __ip_dev_find(net, fl4->saddr, false);
2499 if (dev_out == NULL) 2488 if (dev_out == NULL)
2500 goto out; 2489 goto out;
2501 2490
@@ -2514,20 +2503,20 @@ static struct rtable *ip_route_output_slow(struct net *net,
2514 Luckily, this hack is good workaround. 2503 Luckily, this hack is good workaround.
2515 */ 2504 */
2516 2505
2517 fl4.flowi4_oif = dev_out->ifindex; 2506 fl4->flowi4_oif = dev_out->ifindex;
2518 goto make_route; 2507 goto make_route;
2519 } 2508 }
2520 2509
2521 if (!(oldflp4->flowi4_flags & FLOWI_FLAG_ANYSRC)) { 2510 if (!(fl4->flowi4_flags & FLOWI_FLAG_ANYSRC)) {
2522 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */ 2511 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2523 if (!__ip_dev_find(net, oldflp4->saddr, false)) 2512 if (!__ip_dev_find(net, fl4->saddr, false))
2524 goto out; 2513 goto out;
2525 } 2514 }
2526 } 2515 }
2527 2516
2528 2517
2529 if (oldflp4->flowi4_oif) { 2518 if (fl4->flowi4_oif) {
2530 dev_out = dev_get_by_index_rcu(net, oldflp4->flowi4_oif); 2519 dev_out = dev_get_by_index_rcu(net, fl4->flowi4_oif);
2531 rth = ERR_PTR(-ENODEV); 2520 rth = ERR_PTR(-ENODEV);
2532 if (dev_out == NULL) 2521 if (dev_out == NULL)
2533 goto out; 2522 goto out;
@@ -2537,37 +2526,37 @@ static struct rtable *ip_route_output_slow(struct net *net,
2537 rth = ERR_PTR(-ENETUNREACH); 2526 rth = ERR_PTR(-ENETUNREACH);
2538 goto out; 2527 goto out;
2539 } 2528 }
2540 if (ipv4_is_local_multicast(oldflp4->daddr) || 2529 if (ipv4_is_local_multicast(fl4->daddr) ||
2541 ipv4_is_lbcast(oldflp4->daddr)) { 2530 ipv4_is_lbcast(fl4->daddr)) {
2542 if (!fl4.saddr) 2531 if (!fl4->saddr)
2543 fl4.saddr = inet_select_addr(dev_out, 0, 2532 fl4->saddr = inet_select_addr(dev_out, 0,
2544 RT_SCOPE_LINK); 2533 RT_SCOPE_LINK);
2545 goto make_route; 2534 goto make_route;
2546 } 2535 }
2547 if (!fl4.saddr) { 2536 if (fl4->saddr) {
2548 if (ipv4_is_multicast(oldflp4->daddr)) 2537 if (ipv4_is_multicast(fl4->daddr))
2549 fl4.saddr = inet_select_addr(dev_out, 0, 2538 fl4->saddr = inet_select_addr(dev_out, 0,
2550 fl4.flowi4_scope); 2539 fl4->flowi4_scope);
2551 else if (!oldflp4->daddr) 2540 else if (!fl4->daddr)
2552 fl4.saddr = inet_select_addr(dev_out, 0, 2541 fl4->saddr = inet_select_addr(dev_out, 0,
2553 RT_SCOPE_HOST); 2542 RT_SCOPE_HOST);
2554 } 2543 }
2555 } 2544 }
2556 2545
2557 if (!fl4.daddr) { 2546 if (!fl4->daddr) {
2558 fl4.daddr = fl4.saddr; 2547 fl4->daddr = fl4->saddr;
2559 if (!fl4.daddr) 2548 if (!fl4->daddr)
2560 fl4.daddr = fl4.saddr = htonl(INADDR_LOOPBACK); 2549 fl4->daddr = fl4->saddr = htonl(INADDR_LOOPBACK);
2561 dev_out = net->loopback_dev; 2550 dev_out = net->loopback_dev;
2562 fl4.flowi4_oif = net->loopback_dev->ifindex; 2551 fl4->flowi4_oif = net->loopback_dev->ifindex;
2563 res.type = RTN_LOCAL; 2552 res.type = RTN_LOCAL;
2564 flags |= RTCF_LOCAL; 2553 flags |= RTCF_LOCAL;
2565 goto make_route; 2554 goto make_route;
2566 } 2555 }
2567 2556
2568 if (fib_lookup(net, &fl4, &res)) { 2557 if (fib_lookup(net, fl4, &res)) {
2569 res.fi = NULL; 2558 res.fi = NULL;
2570 if (oldflp4->flowi4_oif) { 2559 if (fl4->flowi4_oif) {
2571 /* Apparently, routing tables are wrong. Assume, 2560 /* Apparently, routing tables are wrong. Assume,
2572 that the destination is on link. 2561 that the destination is on link.
2573 2562
@@ -2586,9 +2575,9 @@ static struct rtable *ip_route_output_slow(struct net *net,
2586 likely IPv6, but we do not. 2575 likely IPv6, but we do not.
2587 */ 2576 */
2588 2577
2589 if (fl4.saddr == 0) 2578 if (fl4->saddr == 0)
2590 fl4.saddr = inet_select_addr(dev_out, 0, 2579 fl4->saddr = inet_select_addr(dev_out, 0,
2591 RT_SCOPE_LINK); 2580 RT_SCOPE_LINK);
2592 res.type = RTN_UNICAST; 2581 res.type = RTN_UNICAST;
2593 goto make_route; 2582 goto make_route;
2594 } 2583 }
@@ -2597,42 +2586,45 @@ static struct rtable *ip_route_output_slow(struct net *net,
2597 } 2586 }
2598 2587
2599 if (res.type == RTN_LOCAL) { 2588 if (res.type == RTN_LOCAL) {
2600 if (!fl4.saddr) { 2589 if (!fl4->saddr) {
2601 if (res.fi->fib_prefsrc) 2590 if (res.fi->fib_prefsrc)
2602 fl4.saddr = res.fi->fib_prefsrc; 2591 fl4->saddr = res.fi->fib_prefsrc;
2603 else 2592 else
2604 fl4.saddr = fl4.daddr; 2593 fl4->saddr = fl4->daddr;
2605 } 2594 }
2606 dev_out = net->loopback_dev; 2595 dev_out = net->loopback_dev;
2607 fl4.flowi4_oif = dev_out->ifindex; 2596 fl4->flowi4_oif = dev_out->ifindex;
2608 res.fi = NULL; 2597 res.fi = NULL;
2609 flags |= RTCF_LOCAL; 2598 flags |= RTCF_LOCAL;
2610 goto make_route; 2599 goto make_route;
2611 } 2600 }
2612 2601
2613#ifdef CONFIG_IP_ROUTE_MULTIPATH 2602#ifdef CONFIG_IP_ROUTE_MULTIPATH
2614 if (res.fi->fib_nhs > 1 && fl4.flowi4_oif == 0) 2603 if (res.fi->fib_nhs > 1 && fl4->flowi4_oif == 0)
2615 fib_select_multipath(&res); 2604 fib_select_multipath(&res);
2616 else 2605 else
2617#endif 2606#endif
2618 if (!res.prefixlen && res.type == RTN_UNICAST && !fl4.flowi4_oif) 2607 if (!res.prefixlen &&
2608 res.table->tb_num_default > 1 &&
2609 res.type == RTN_UNICAST && !fl4->flowi4_oif)
2619 fib_select_default(&res); 2610 fib_select_default(&res);
2620 2611
2621 if (!fl4.saddr) 2612 if (!fl4->saddr)
2622 fl4.saddr = FIB_RES_PREFSRC(net, res); 2613 fl4->saddr = FIB_RES_PREFSRC(net, res);
2623 2614
2624 dev_out = FIB_RES_DEV(res); 2615 dev_out = FIB_RES_DEV(res);
2625 fl4.flowi4_oif = dev_out->ifindex; 2616 fl4->flowi4_oif = dev_out->ifindex;
2626 2617
2627 2618
2628make_route: 2619make_route:
2629 rth = __mkroute_output(&res, &fl4, oldflp4, dev_out, flags); 2620 rth = __mkroute_output(&res, fl4, orig_daddr, orig_saddr, orig_oif,
2621 dev_out, flags);
2630 if (!IS_ERR(rth)) { 2622 if (!IS_ERR(rth)) {
2631 unsigned int hash; 2623 unsigned int hash;
2632 2624
2633 hash = rt_hash(oldflp4->daddr, oldflp4->saddr, oldflp4->flowi4_oif, 2625 hash = rt_hash(orig_daddr, orig_saddr, orig_oif,
2634 rt_genid(dev_net(dev_out))); 2626 rt_genid(dev_net(dev_out)));
2635 rth = rt_intern_hash(hash, rth, NULL, oldflp4->flowi4_oif); 2627 rth = rt_intern_hash(hash, rth, NULL, orig_oif);
2636 } 2628 }
2637 2629
2638out: 2630out:
@@ -2640,7 +2632,7 @@ out:
2640 return rth; 2632 return rth;
2641} 2633}
2642 2634
2643struct rtable *__ip_route_output_key(struct net *net, const struct flowi4 *flp4) 2635struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *flp4)
2644{ 2636{
2645 struct rtable *rth; 2637 struct rtable *rth;
2646 unsigned int hash; 2638 unsigned int hash;
@@ -2658,13 +2650,17 @@ struct rtable *__ip_route_output_key(struct net *net, const struct flowi4 *flp4)
2658 rt_is_output_route(rth) && 2650 rt_is_output_route(rth) &&
2659 rth->rt_oif == flp4->flowi4_oif && 2651 rth->rt_oif == flp4->flowi4_oif &&
2660 rth->rt_mark == flp4->flowi4_mark && 2652 rth->rt_mark == flp4->flowi4_mark &&
2661 !((rth->rt_tos ^ flp4->flowi4_tos) & 2653 !((rth->rt_key_tos ^ flp4->flowi4_tos) &
2662 (IPTOS_RT_MASK | RTO_ONLINK)) && 2654 (IPTOS_RT_MASK | RTO_ONLINK)) &&
2663 net_eq(dev_net(rth->dst.dev), net) && 2655 net_eq(dev_net(rth->dst.dev), net) &&
2664 !rt_is_expired(rth)) { 2656 !rt_is_expired(rth)) {
2665 dst_use(&rth->dst, jiffies); 2657 dst_use(&rth->dst, jiffies);
2666 RT_CACHE_STAT_INC(out_hit); 2658 RT_CACHE_STAT_INC(out_hit);
2667 rcu_read_unlock_bh(); 2659 rcu_read_unlock_bh();
2660 if (!flp4->saddr)
2661 flp4->saddr = rth->rt_src;
2662 if (!flp4->daddr)
2663 flp4->daddr = rth->rt_dst;
2668 return rth; 2664 return rth;
2669 } 2665 }
2670 RT_CACHE_STAT_INC(out_hlist_search); 2666 RT_CACHE_STAT_INC(out_hlist_search);
@@ -2709,7 +2705,7 @@ static struct dst_ops ipv4_dst_blackhole_ops = {
2709 2705
2710struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig) 2706struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig)
2711{ 2707{
2712 struct rtable *rt = dst_alloc(&ipv4_dst_blackhole_ops, 1); 2708 struct rtable *rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, 0, 0);
2713 struct rtable *ort = (struct rtable *) dst_orig; 2709 struct rtable *ort = (struct rtable *) dst_orig;
2714 2710
2715 if (rt) { 2711 if (rt) {
@@ -2726,7 +2722,7 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or
2726 2722
2727 rt->rt_key_dst = ort->rt_key_dst; 2723 rt->rt_key_dst = ort->rt_key_dst;
2728 rt->rt_key_src = ort->rt_key_src; 2724 rt->rt_key_src = ort->rt_key_src;
2729 rt->rt_tos = ort->rt_tos; 2725 rt->rt_key_tos = ort->rt_key_tos;
2730 rt->rt_route_iif = ort->rt_route_iif; 2726 rt->rt_route_iif = ort->rt_route_iif;
2731 rt->rt_iif = ort->rt_iif; 2727 rt->rt_iif = ort->rt_iif;
2732 rt->rt_oif = ort->rt_oif; 2728 rt->rt_oif = ort->rt_oif;
@@ -2762,15 +2758,10 @@ struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
2762 if (IS_ERR(rt)) 2758 if (IS_ERR(rt))
2763 return rt; 2759 return rt;
2764 2760
2765 if (flp4->flowi4_proto) { 2761 if (flp4->flowi4_proto)
2766 if (!flp4->saddr)
2767 flp4->saddr = rt->rt_src;
2768 if (!flp4->daddr)
2769 flp4->daddr = rt->rt_dst;
2770 rt = (struct rtable *) xfrm_lookup(net, &rt->dst, 2762 rt = (struct rtable *) xfrm_lookup(net, &rt->dst,
2771 flowi4_to_flowi(flp4), 2763 flowi4_to_flowi(flp4),
2772 sk, 0); 2764 sk, 0);
2773 }
2774 2765
2775 return rt; 2766 return rt;
2776} 2767}
@@ -2794,7 +2785,7 @@ static int rt_fill_info(struct net *net,
2794 r->rtm_family = AF_INET; 2785 r->rtm_family = AF_INET;
2795 r->rtm_dst_len = 32; 2786 r->rtm_dst_len = 32;
2796 r->rtm_src_len = 0; 2787 r->rtm_src_len = 0;
2797 r->rtm_tos = rt->rt_tos; 2788 r->rtm_tos = rt->rt_key_tos;
2798 r->rtm_table = RT_TABLE_MAIN; 2789 r->rtm_table = RT_TABLE_MAIN;
2799 NLA_PUT_U32(skb, RTA_TABLE, RT_TABLE_MAIN); 2790 NLA_PUT_U32(skb, RTA_TABLE, RT_TABLE_MAIN);
2800 r->rtm_type = rt->rt_type; 2791 r->rtm_type = rt->rt_type;
@@ -2848,7 +2839,9 @@ static int rt_fill_info(struct net *net,
2848 2839
2849 if (ipv4_is_multicast(dst) && !ipv4_is_local_multicast(dst) && 2840 if (ipv4_is_multicast(dst) && !ipv4_is_local_multicast(dst) &&
2850 IPV4_DEVCONF_ALL(net, MC_FORWARDING)) { 2841 IPV4_DEVCONF_ALL(net, MC_FORWARDING)) {
2851 int err = ipmr_get_route(net, skb, r, nowait); 2842 int err = ipmr_get_route(net, skb,
2843 rt->rt_src, rt->rt_dst,
2844 r, nowait);
2852 if (err <= 0) { 2845 if (err <= 0) {
2853 if (!nowait) { 2846 if (!nowait) {
2854 if (err == 0) 2847 if (err == 0)
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 8b44c6d2a79b..26461492a847 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -321,10 +321,10 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
321 * the ACK carries the same options again (see RFC1122 4.2.3.8) 321 * the ACK carries the same options again (see RFC1122 4.2.3.8)
322 */ 322 */
323 if (opt && opt->optlen) { 323 if (opt && opt->optlen) {
324 int opt_size = sizeof(struct ip_options) + opt->optlen; 324 int opt_size = sizeof(struct ip_options_rcu) + opt->optlen;
325 325
326 ireq->opt = kmalloc(opt_size, GFP_ATOMIC); 326 ireq->opt = kmalloc(opt_size, GFP_ATOMIC);
327 if (ireq->opt != NULL && ip_options_echo(ireq->opt, skb)) { 327 if (ireq->opt != NULL && ip_options_echo(&ireq->opt->opt, skb)) {
328 kfree(ireq->opt); 328 kfree(ireq->opt);
329 ireq->opt = NULL; 329 ireq->opt = NULL;
330 } 330 }
@@ -345,17 +345,13 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
345 * no easy way to do this. 345 * no easy way to do this.
346 */ 346 */
347 { 347 {
348 struct flowi4 fl4 = { 348 struct flowi4 fl4;
349 .flowi4_mark = sk->sk_mark, 349
350 .daddr = ((opt && opt->srr) ? 350 flowi4_init_output(&fl4, 0, sk->sk_mark, RT_CONN_FLAGS(sk),
351 opt->faddr : ireq->rmt_addr), 351 RT_SCOPE_UNIVERSE, IPPROTO_TCP,
352 .saddr = ireq->loc_addr, 352 inet_sk_flowi_flags(sk),
353 .flowi4_tos = RT_CONN_FLAGS(sk), 353 (opt && opt->srr) ? opt->faddr : ireq->rmt_addr,
354 .flowi4_proto = IPPROTO_TCP, 354 ireq->loc_addr, th->source, th->dest);
355 .flowi4_flags = inet_sk_flowi_flags(sk),
356 .fl4_sport = th->dest,
357 .fl4_dport = th->source,
358 };
359 security_req_classify_flow(req, flowi4_to_flowi(&fl4)); 355 security_req_classify_flow(req, flowi4_to_flowi(&fl4));
360 rt = ip_route_output_key(sock_net(sk), &fl4); 356 rt = ip_route_output_key(sock_net(sk), &fl4);
361 if (IS_ERR(rt)) { 357 if (IS_ERR(rt)) {
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 321e6e84dbcc..57d0752e239a 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -13,6 +13,7 @@
13#include <linux/seqlock.h> 13#include <linux/seqlock.h>
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/nsproxy.h>
16#include <net/snmp.h> 17#include <net/snmp.h>
17#include <net/icmp.h> 18#include <net/icmp.h>
18#include <net/ip.h> 19#include <net/ip.h>
@@ -21,6 +22,7 @@
21#include <net/udp.h> 22#include <net/udp.h>
22#include <net/cipso_ipv4.h> 23#include <net/cipso_ipv4.h>
23#include <net/inet_frag.h> 24#include <net/inet_frag.h>
25#include <net/ping.h>
24 26
25static int zero; 27static int zero;
26static int tcp_retr1_max = 255; 28static int tcp_retr1_max = 255;
@@ -30,6 +32,8 @@ static int tcp_adv_win_scale_min = -31;
30static int tcp_adv_win_scale_max = 31; 32static int tcp_adv_win_scale_max = 31;
31static int ip_ttl_min = 1; 33static int ip_ttl_min = 1;
32static int ip_ttl_max = 255; 34static int ip_ttl_max = 255;
35static int ip_ping_group_range_min[] = { 0, 0 };
36static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
33 37
34/* Update system visible IP port range */ 38/* Update system visible IP port range */
35static void set_local_port_range(int range[2]) 39static void set_local_port_range(int range[2])
@@ -68,6 +72,53 @@ static int ipv4_local_port_range(ctl_table *table, int write,
68 return ret; 72 return ret;
69} 73}
70 74
75
76void inet_get_ping_group_range_table(struct ctl_table *table, gid_t *low, gid_t *high)
77{
78 gid_t *data = table->data;
79 unsigned seq;
80 do {
81 seq = read_seqbegin(&sysctl_local_ports.lock);
82
83 *low = data[0];
84 *high = data[1];
85 } while (read_seqretry(&sysctl_local_ports.lock, seq));
86}
87
88/* Update system visible IP port range */
89static void set_ping_group_range(struct ctl_table *table, int range[2])
90{
91 gid_t *data = table->data;
92 write_seqlock(&sysctl_local_ports.lock);
93 data[0] = range[0];
94 data[1] = range[1];
95 write_sequnlock(&sysctl_local_ports.lock);
96}
97
98/* Validate changes from /proc interface. */
99static int ipv4_ping_group_range(ctl_table *table, int write,
100 void __user *buffer,
101 size_t *lenp, loff_t *ppos)
102{
103 int ret;
104 gid_t range[2];
105 ctl_table tmp = {
106 .data = &range,
107 .maxlen = sizeof(range),
108 .mode = table->mode,
109 .extra1 = &ip_ping_group_range_min,
110 .extra2 = &ip_ping_group_range_max,
111 };
112
113 inet_get_ping_group_range_table(table, range, range + 1);
114 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
115
116 if (write && ret == 0)
117 set_ping_group_range(table, range);
118
119 return ret;
120}
121
71static int proc_tcp_congestion_control(ctl_table *ctl, int write, 122static int proc_tcp_congestion_control(ctl_table *ctl, int write,
72 void __user *buffer, size_t *lenp, loff_t *ppos) 123 void __user *buffer, size_t *lenp, loff_t *ppos)
73{ 124{
@@ -677,6 +728,13 @@ static struct ctl_table ipv4_net_table[] = {
677 .mode = 0644, 728 .mode = 0644,
678 .proc_handler = proc_dointvec 729 .proc_handler = proc_dointvec
679 }, 730 },
731 {
732 .procname = "ping_group_range",
733 .data = &init_net.ipv4.sysctl_ping_group_range,
734 .maxlen = sizeof(init_net.ipv4.sysctl_ping_group_range),
735 .mode = 0644,
736 .proc_handler = ipv4_ping_group_range,
737 },
680 { } 738 { }
681}; 739};
682 740
@@ -711,8 +769,18 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
711 &net->ipv4.sysctl_icmp_ratemask; 769 &net->ipv4.sysctl_icmp_ratemask;
712 table[6].data = 770 table[6].data =
713 &net->ipv4.sysctl_rt_cache_rebuild_count; 771 &net->ipv4.sysctl_rt_cache_rebuild_count;
772 table[7].data =
773 &net->ipv4.sysctl_ping_group_range;
774
714 } 775 }
715 776
777 /*
778 * Sane defaults - nobody may create ping sockets.
779 * Boot scripts should set this to distro-specific group.
780 */
781 net->ipv4.sysctl_ping_group_range[0] = 1;
782 net->ipv4.sysctl_ping_group_range[1] = 0;
783
716 net->ipv4.sysctl_rt_cache_rebuild_count = 4; 784 net->ipv4.sysctl_rt_cache_rebuild_count = 4;
717 785
718 net->ipv4.ipv4_hdr = register_net_sysctl_table(net, 786 net->ipv4.ipv4_hdr = register_net_sysctl_table(net,
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index b22d45010545..054a59d21eb0 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -999,7 +999,8 @@ new_segment:
999 /* We have some space in skb head. Superb! */ 999 /* We have some space in skb head. Superb! */
1000 if (copy > skb_tailroom(skb)) 1000 if (copy > skb_tailroom(skb))
1001 copy = skb_tailroom(skb); 1001 copy = skb_tailroom(skb);
1002 if ((err = skb_add_data(skb, from, copy)) != 0) 1002 err = skb_add_data_nocache(sk, skb, from, copy);
1003 if (err)
1003 goto do_fault; 1004 goto do_fault;
1004 } else { 1005 } else {
1005 int merge = 0; 1006 int merge = 0;
@@ -1042,8 +1043,8 @@ new_segment:
1042 1043
1043 /* Time to copy data. We are close to 1044 /* Time to copy data. We are close to
1044 * the end! */ 1045 * the end! */
1045 err = skb_copy_to_page(sk, from, skb, page, 1046 err = skb_copy_to_page_nocache(sk, from, skb,
1046 off, copy); 1047 page, off, copy);
1047 if (err) { 1048 if (err) {
1048 /* If this page was new, give it to the 1049 /* If this page was new, give it to the
1049 * socket so it does not get leaked. 1050 * socket so it does not get leaked.
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index f7e6c2c2d2bb..3c8d9b6f1ea4 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -146,13 +146,15 @@ EXPORT_SYMBOL_GPL(tcp_twsk_unique);
146/* This will initiate an outgoing connection. */ 146/* This will initiate an outgoing connection. */
147int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) 147int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
148{ 148{
149 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
149 struct inet_sock *inet = inet_sk(sk); 150 struct inet_sock *inet = inet_sk(sk);
150 struct tcp_sock *tp = tcp_sk(sk); 151 struct tcp_sock *tp = tcp_sk(sk);
151 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
152 __be16 orig_sport, orig_dport; 152 __be16 orig_sport, orig_dport;
153 struct rtable *rt;
154 __be32 daddr, nexthop; 153 __be32 daddr, nexthop;
154 struct flowi4 *fl4;
155 struct rtable *rt;
155 int err; 156 int err;
157 struct ip_options_rcu *inet_opt;
156 158
157 if (addr_len < sizeof(struct sockaddr_in)) 159 if (addr_len < sizeof(struct sockaddr_in))
158 return -EINVAL; 160 return -EINVAL;
@@ -161,15 +163,18 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
161 return -EAFNOSUPPORT; 163 return -EAFNOSUPPORT;
162 164
163 nexthop = daddr = usin->sin_addr.s_addr; 165 nexthop = daddr = usin->sin_addr.s_addr;
164 if (inet->opt && inet->opt->srr) { 166 inet_opt = rcu_dereference_protected(inet->inet_opt,
167 sock_owned_by_user(sk));
168 if (inet_opt && inet_opt->opt.srr) {
165 if (!daddr) 169 if (!daddr)
166 return -EINVAL; 170 return -EINVAL;
167 nexthop = inet->opt->faddr; 171 nexthop = inet_opt->opt.faddr;
168 } 172 }
169 173
170 orig_sport = inet->inet_sport; 174 orig_sport = inet->inet_sport;
171 orig_dport = usin->sin_port; 175 orig_dport = usin->sin_port;
172 rt = ip_route_connect(nexthop, inet->inet_saddr, 176 fl4 = &inet->cork.fl.u.ip4;
177 rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
173 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if, 178 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
174 IPPROTO_TCP, 179 IPPROTO_TCP,
175 orig_sport, orig_dport, sk, true); 180 orig_sport, orig_dport, sk, true);
@@ -185,11 +190,11 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
185 return -ENETUNREACH; 190 return -ENETUNREACH;
186 } 191 }
187 192
188 if (!inet->opt || !inet->opt->srr) 193 if (!inet_opt || !inet_opt->opt.srr)
189 daddr = rt->rt_dst; 194 daddr = fl4->daddr;
190 195
191 if (!inet->inet_saddr) 196 if (!inet->inet_saddr)
192 inet->inet_saddr = rt->rt_src; 197 inet->inet_saddr = fl4->saddr;
193 inet->inet_rcv_saddr = inet->inet_saddr; 198 inet->inet_rcv_saddr = inet->inet_saddr;
194 199
195 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) { 200 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
@@ -200,8 +205,8 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
200 } 205 }
201 206
202 if (tcp_death_row.sysctl_tw_recycle && 207 if (tcp_death_row.sysctl_tw_recycle &&
203 !tp->rx_opt.ts_recent_stamp && rt->rt_dst == daddr) { 208 !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr) {
204 struct inet_peer *peer = rt_get_peer(rt); 209 struct inet_peer *peer = rt_get_peer(rt, fl4->daddr);
205 /* 210 /*
206 * VJ's idea. We save last timestamp seen from 211 * VJ's idea. We save last timestamp seen from
207 * the destination in peer table, when entering state 212 * the destination in peer table, when entering state
@@ -221,8 +226,8 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
221 inet->inet_daddr = daddr; 226 inet->inet_daddr = daddr;
222 227
223 inet_csk(sk)->icsk_ext_hdr_len = 0; 228 inet_csk(sk)->icsk_ext_hdr_len = 0;
224 if (inet->opt) 229 if (inet_opt)
225 inet_csk(sk)->icsk_ext_hdr_len = inet->opt->optlen; 230 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
226 231
227 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT; 232 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
228 233
@@ -236,8 +241,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
236 if (err) 241 if (err)
237 goto failure; 242 goto failure;
238 243
239 rt = ip_route_newports(rt, IPPROTO_TCP, 244 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
240 orig_sport, orig_dport,
241 inet->inet_sport, inet->inet_dport, sk); 245 inet->inet_sport, inet->inet_dport, sk);
242 if (IS_ERR(rt)) { 246 if (IS_ERR(rt)) {
243 err = PTR_ERR(rt); 247 err = PTR_ERR(rt);
@@ -279,7 +283,7 @@ EXPORT_SYMBOL(tcp_v4_connect);
279/* 283/*
280 * This routine does path mtu discovery as defined in RFC1191. 284 * This routine does path mtu discovery as defined in RFC1191.
281 */ 285 */
282static void do_pmtu_discovery(struct sock *sk, struct iphdr *iph, u32 mtu) 286static void do_pmtu_discovery(struct sock *sk, const struct iphdr *iph, u32 mtu)
283{ 287{
284 struct dst_entry *dst; 288 struct dst_entry *dst;
285 struct inet_sock *inet = inet_sk(sk); 289 struct inet_sock *inet = inet_sk(sk);
@@ -341,7 +345,7 @@ static void do_pmtu_discovery(struct sock *sk, struct iphdr *iph, u32 mtu)
341 345
342void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) 346void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
343{ 347{
344 struct iphdr *iph = (struct iphdr *)icmp_skb->data; 348 const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
345 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2)); 349 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
346 struct inet_connection_sock *icsk; 350 struct inet_connection_sock *icsk;
347 struct tcp_sock *tp; 351 struct tcp_sock *tp;
@@ -647,7 +651,7 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
647 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0; 651 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
648 652
649 net = dev_net(skb_dst(skb)->dev); 653 net = dev_net(skb_dst(skb)->dev);
650 ip_send_reply(net->ipv4.tcp_sock, skb, 654 ip_send_reply(net->ipv4.tcp_sock, skb, ip_hdr(skb)->saddr,
651 &arg, arg.iov[0].iov_len); 655 &arg, arg.iov[0].iov_len);
652 656
653 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS); 657 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
@@ -722,7 +726,7 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
722 if (oif) 726 if (oif)
723 arg.bound_dev_if = oif; 727 arg.bound_dev_if = oif;
724 728
725 ip_send_reply(net->ipv4.tcp_sock, skb, 729 ip_send_reply(net->ipv4.tcp_sock, skb, ip_hdr(skb)->saddr,
726 &arg, arg.iov[0].iov_len); 730 &arg, arg.iov[0].iov_len);
727 731
728 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS); 732 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
@@ -765,11 +769,12 @@ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
765 struct request_values *rvp) 769 struct request_values *rvp)
766{ 770{
767 const struct inet_request_sock *ireq = inet_rsk(req); 771 const struct inet_request_sock *ireq = inet_rsk(req);
772 struct flowi4 fl4;
768 int err = -1; 773 int err = -1;
769 struct sk_buff * skb; 774 struct sk_buff * skb;
770 775
771 /* First, grab a route. */ 776 /* First, grab a route. */
772 if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL) 777 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
773 return -1; 778 return -1;
774 779
775 skb = tcp_make_synack(sk, dst, req, rvp); 780 skb = tcp_make_synack(sk, dst, req, rvp);
@@ -820,17 +825,18 @@ static void syn_flood_warning(const struct sk_buff *skb)
820/* 825/*
821 * Save and compile IPv4 options into the request_sock if needed. 826 * Save and compile IPv4 options into the request_sock if needed.
822 */ 827 */
823static struct ip_options *tcp_v4_save_options(struct sock *sk, 828static struct ip_options_rcu *tcp_v4_save_options(struct sock *sk,
824 struct sk_buff *skb) 829 struct sk_buff *skb)
825{ 830{
826 struct ip_options *opt = &(IPCB(skb)->opt); 831 const struct ip_options *opt = &(IPCB(skb)->opt);
827 struct ip_options *dopt = NULL; 832 struct ip_options_rcu *dopt = NULL;
828 833
829 if (opt && opt->optlen) { 834 if (opt && opt->optlen) {
830 int opt_size = optlength(opt); 835 int opt_size = sizeof(*dopt) + opt->optlen;
836
831 dopt = kmalloc(opt_size, GFP_ATOMIC); 837 dopt = kmalloc(opt_size, GFP_ATOMIC);
832 if (dopt) { 838 if (dopt) {
833 if (ip_options_echo(dopt, skb)) { 839 if (ip_options_echo(&dopt->opt, skb)) {
834 kfree(dopt); 840 kfree(dopt);
835 dopt = NULL; 841 dopt = NULL;
836 } 842 }
@@ -1333,6 +1339,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1333 req->cookie_ts = tmp_opt.tstamp_ok; 1339 req->cookie_ts = tmp_opt.tstamp_ok;
1334 } else if (!isn) { 1340 } else if (!isn) {
1335 struct inet_peer *peer = NULL; 1341 struct inet_peer *peer = NULL;
1342 struct flowi4 fl4;
1336 1343
1337 /* VJ's idea. We save last timestamp seen 1344 /* VJ's idea. We save last timestamp seen
1338 * from the destination in peer table, when entering 1345 * from the destination in peer table, when entering
@@ -1345,9 +1352,9 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1345 */ 1352 */
1346 if (tmp_opt.saw_tstamp && 1353 if (tmp_opt.saw_tstamp &&
1347 tcp_death_row.sysctl_tw_recycle && 1354 tcp_death_row.sysctl_tw_recycle &&
1348 (dst = inet_csk_route_req(sk, req)) != NULL && 1355 (dst = inet_csk_route_req(sk, &fl4, req)) != NULL &&
1349 (peer = rt_get_peer((struct rtable *)dst)) != NULL && 1356 fl4.daddr == saddr &&
1350 peer->daddr.addr.a4 == saddr) { 1357 (peer = rt_get_peer((struct rtable *)dst, fl4.daddr)) != NULL) {
1351 inet_peer_refcheck(peer); 1358 inet_peer_refcheck(peer);
1352 if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL && 1359 if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
1353 (s32)(peer->tcp_ts - req->ts_recent) > 1360 (s32)(peer->tcp_ts - req->ts_recent) >
@@ -1411,19 +1418,16 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1411#ifdef CONFIG_TCP_MD5SIG 1418#ifdef CONFIG_TCP_MD5SIG
1412 struct tcp_md5sig_key *key; 1419 struct tcp_md5sig_key *key;
1413#endif 1420#endif
1421 struct ip_options_rcu *inet_opt;
1414 1422
1415 if (sk_acceptq_is_full(sk)) 1423 if (sk_acceptq_is_full(sk))
1416 goto exit_overflow; 1424 goto exit_overflow;
1417 1425
1418 if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL)
1419 goto exit;
1420
1421 newsk = tcp_create_openreq_child(sk, req, skb); 1426 newsk = tcp_create_openreq_child(sk, req, skb);
1422 if (!newsk) 1427 if (!newsk)
1423 goto exit_nonewsk; 1428 goto exit_nonewsk;
1424 1429
1425 newsk->sk_gso_type = SKB_GSO_TCPV4; 1430 newsk->sk_gso_type = SKB_GSO_TCPV4;
1426 sk_setup_caps(newsk, dst);
1427 1431
1428 newtp = tcp_sk(newsk); 1432 newtp = tcp_sk(newsk);
1429 newinet = inet_sk(newsk); 1433 newinet = inet_sk(newsk);
@@ -1431,15 +1435,21 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1431 newinet->inet_daddr = ireq->rmt_addr; 1435 newinet->inet_daddr = ireq->rmt_addr;
1432 newinet->inet_rcv_saddr = ireq->loc_addr; 1436 newinet->inet_rcv_saddr = ireq->loc_addr;
1433 newinet->inet_saddr = ireq->loc_addr; 1437 newinet->inet_saddr = ireq->loc_addr;
1434 newinet->opt = ireq->opt; 1438 inet_opt = ireq->opt;
1439 rcu_assign_pointer(newinet->inet_opt, inet_opt);
1435 ireq->opt = NULL; 1440 ireq->opt = NULL;
1436 newinet->mc_index = inet_iif(skb); 1441 newinet->mc_index = inet_iif(skb);
1437 newinet->mc_ttl = ip_hdr(skb)->ttl; 1442 newinet->mc_ttl = ip_hdr(skb)->ttl;
1438 inet_csk(newsk)->icsk_ext_hdr_len = 0; 1443 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1439 if (newinet->opt) 1444 if (inet_opt)
1440 inet_csk(newsk)->icsk_ext_hdr_len = newinet->opt->optlen; 1445 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1441 newinet->inet_id = newtp->write_seq ^ jiffies; 1446 newinet->inet_id = newtp->write_seq ^ jiffies;
1442 1447
1448 if (!dst && (dst = inet_csk_route_child_sock(sk, newsk, req)) == NULL)
1449 goto put_and_exit;
1450
1451 sk_setup_caps(newsk, dst);
1452
1443 tcp_mtup_init(newsk); 1453 tcp_mtup_init(newsk);
1444 tcp_sync_mss(newsk, dst_mtu(dst)); 1454 tcp_sync_mss(newsk, dst_mtu(dst));
1445 newtp->advmss = dst_metric_advmss(dst); 1455 newtp->advmss = dst_metric_advmss(dst);
@@ -1467,10 +1477,8 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1467 } 1477 }
1468#endif 1478#endif
1469 1479
1470 if (__inet_inherit_port(sk, newsk) < 0) { 1480 if (__inet_inherit_port(sk, newsk) < 0)
1471 sock_put(newsk); 1481 goto put_and_exit;
1472 goto exit;
1473 }
1474 __inet_hash_nolisten(newsk, NULL); 1482 __inet_hash_nolisten(newsk, NULL);
1475 1483
1476 return newsk; 1484 return newsk;
@@ -1482,6 +1490,9 @@ exit_nonewsk:
1482exit: 1490exit:
1483 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); 1491 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1484 return NULL; 1492 return NULL;
1493put_and_exit:
1494 sock_put(newsk);
1495 goto exit;
1485} 1496}
1486EXPORT_SYMBOL(tcp_v4_syn_recv_sock); 1497EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1487 1498
@@ -1764,12 +1775,13 @@ struct inet_peer *tcp_v4_get_peer(struct sock *sk, bool *release_it)
1764 struct inet_sock *inet = inet_sk(sk); 1775 struct inet_sock *inet = inet_sk(sk);
1765 struct inet_peer *peer; 1776 struct inet_peer *peer;
1766 1777
1767 if (!rt || rt->rt_dst != inet->inet_daddr) { 1778 if (!rt ||
1779 inet->cork.fl.u.ip4.daddr != inet->inet_daddr) {
1768 peer = inet_getpeer_v4(inet->inet_daddr, 1); 1780 peer = inet_getpeer_v4(inet->inet_daddr, 1);
1769 *release_it = true; 1781 *release_it = true;
1770 } else { 1782 } else {
1771 if (!rt->peer) 1783 if (!rt->peer)
1772 rt_bind_peer(rt, 1); 1784 rt_bind_peer(rt, inet->inet_daddr, 1);
1773 peer = rt->peer; 1785 peer = rt->peer;
1774 *release_it = false; 1786 *release_it = false;
1775 } 1787 }
@@ -2527,7 +2539,7 @@ void tcp4_proc_exit(void)
2527 2539
2528struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb) 2540struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2529{ 2541{
2530 struct iphdr *iph = skb_gro_network_header(skb); 2542 const struct iphdr *iph = skb_gro_network_header(skb);
2531 2543
2532 switch (skb->ip_summed) { 2544 switch (skb->ip_summed) {
2533 case CHECKSUM_COMPLETE: 2545 case CHECKSUM_COMPLETE:
@@ -2548,7 +2560,7 @@ struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2548 2560
2549int tcp4_gro_complete(struct sk_buff *skb) 2561int tcp4_gro_complete(struct sk_buff *skb)
2550{ 2562{
2551 struct iphdr *iph = ip_hdr(skb); 2563 const struct iphdr *iph = ip_hdr(skb);
2552 struct tcphdr *th = tcp_hdr(skb); 2564 struct tcphdr *th = tcp_hdr(skb);
2553 2565
2554 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb), 2566 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 17388c7f49c4..882e0b0964d0 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -899,7 +899,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
899 TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS, 899 TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS,
900 tcp_skb_pcount(skb)); 900 tcp_skb_pcount(skb));
901 901
902 err = icsk->icsk_af_ops->queue_xmit(skb); 902 err = icsk->icsk_af_ops->queue_xmit(skb, &inet->cork.fl);
903 if (likely(err <= 0)) 903 if (likely(err <= 0))
904 return err; 904 return err;
905 905
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index f87a8eb76f3b..599374f65c76 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -578,7 +578,7 @@ found:
578void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable) 578void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
579{ 579{
580 struct inet_sock *inet; 580 struct inet_sock *inet;
581 struct iphdr *iph = (struct iphdr *)skb->data; 581 const struct iphdr *iph = (const struct iphdr *)skb->data;
582 struct udphdr *uh = (struct udphdr *)(skb->data+(iph->ihl<<2)); 582 struct udphdr *uh = (struct udphdr *)(skb->data+(iph->ihl<<2));
583 const int type = icmp_hdr(skb)->type; 583 const int type = icmp_hdr(skb)->type;
584 const int code = icmp_hdr(skb)->code; 584 const int code = icmp_hdr(skb)->code;
@@ -706,12 +706,11 @@ static void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst)
706 } 706 }
707} 707}
708 708
709static int udp_send_skb(struct sk_buff *skb, __be32 daddr, __be32 dport) 709static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4)
710{ 710{
711 struct sock *sk = skb->sk; 711 struct sock *sk = skb->sk;
712 struct inet_sock *inet = inet_sk(sk); 712 struct inet_sock *inet = inet_sk(sk);
713 struct udphdr *uh; 713 struct udphdr *uh;
714 struct rtable *rt = (struct rtable *)skb_dst(skb);
715 int err = 0; 714 int err = 0;
716 int is_udplite = IS_UDPLITE(sk); 715 int is_udplite = IS_UDPLITE(sk);
717 int offset = skb_transport_offset(skb); 716 int offset = skb_transport_offset(skb);
@@ -723,7 +722,7 @@ static int udp_send_skb(struct sk_buff *skb, __be32 daddr, __be32 dport)
723 */ 722 */
724 uh = udp_hdr(skb); 723 uh = udp_hdr(skb);
725 uh->source = inet->inet_sport; 724 uh->source = inet->inet_sport;
726 uh->dest = dport; 725 uh->dest = fl4->fl4_dport;
727 uh->len = htons(len); 726 uh->len = htons(len);
728 uh->check = 0; 727 uh->check = 0;
729 728
@@ -737,14 +736,14 @@ static int udp_send_skb(struct sk_buff *skb, __be32 daddr, __be32 dport)
737 736
738 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */ 737 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
739 738
740 udp4_hwcsum(skb, rt->rt_src, daddr); 739 udp4_hwcsum(skb, fl4->saddr, fl4->daddr);
741 goto send; 740 goto send;
742 741
743 } else 742 } else
744 csum = udp_csum(skb); 743 csum = udp_csum(skb);
745 744
746 /* add protocol-dependent pseudo-header */ 745 /* add protocol-dependent pseudo-header */
747 uh->check = csum_tcpudp_magic(rt->rt_src, daddr, len, 746 uh->check = csum_tcpudp_magic(fl4->saddr, fl4->daddr, len,
748 sk->sk_protocol, csum); 747 sk->sk_protocol, csum);
749 if (uh->check == 0) 748 if (uh->check == 0)
750 uh->check = CSUM_MANGLED_0; 749 uh->check = CSUM_MANGLED_0;
@@ -774,11 +773,11 @@ static int udp_push_pending_frames(struct sock *sk)
774 struct sk_buff *skb; 773 struct sk_buff *skb;
775 int err = 0; 774 int err = 0;
776 775
777 skb = ip_finish_skb(sk); 776 skb = ip_finish_skb(sk, fl4);
778 if (!skb) 777 if (!skb)
779 goto out; 778 goto out;
780 779
781 err = udp_send_skb(skb, fl4->daddr, fl4->fl4_dport); 780 err = udp_send_skb(skb, fl4);
782 781
783out: 782out:
784 up->len = 0; 783 up->len = 0;
@@ -791,6 +790,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
791{ 790{
792 struct inet_sock *inet = inet_sk(sk); 791 struct inet_sock *inet = inet_sk(sk);
793 struct udp_sock *up = udp_sk(sk); 792 struct udp_sock *up = udp_sk(sk);
793 struct flowi4 fl4_stack;
794 struct flowi4 *fl4; 794 struct flowi4 *fl4;
795 int ulen = len; 795 int ulen = len;
796 struct ipcm_cookie ipc; 796 struct ipcm_cookie ipc;
@@ -804,6 +804,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
804 int corkreq = up->corkflag || msg->msg_flags&MSG_MORE; 804 int corkreq = up->corkflag || msg->msg_flags&MSG_MORE;
805 int (*getfrag)(void *, char *, int, int, int, struct sk_buff *); 805 int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
806 struct sk_buff *skb; 806 struct sk_buff *skb;
807 struct ip_options_data opt_copy;
807 808
808 if (len > 0xFFFF) 809 if (len > 0xFFFF)
809 return -EMSGSIZE; 810 return -EMSGSIZE;
@@ -820,6 +821,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
820 821
821 getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag; 822 getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;
822 823
824 fl4 = &inet->cork.fl.u.ip4;
823 if (up->pending) { 825 if (up->pending) {
824 /* 826 /*
825 * There are pending frames. 827 * There are pending frames.
@@ -877,22 +879,32 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
877 free = 1; 879 free = 1;
878 connected = 0; 880 connected = 0;
879 } 881 }
880 if (!ipc.opt) 882 if (!ipc.opt) {
881 ipc.opt = inet->opt; 883 struct ip_options_rcu *inet_opt;
884
885 rcu_read_lock();
886 inet_opt = rcu_dereference(inet->inet_opt);
887 if (inet_opt) {
888 memcpy(&opt_copy, inet_opt,
889 sizeof(*inet_opt) + inet_opt->opt.optlen);
890 ipc.opt = &opt_copy.opt;
891 }
892 rcu_read_unlock();
893 }
882 894
883 saddr = ipc.addr; 895 saddr = ipc.addr;
884 ipc.addr = faddr = daddr; 896 ipc.addr = faddr = daddr;
885 897
886 if (ipc.opt && ipc.opt->srr) { 898 if (ipc.opt && ipc.opt->opt.srr) {
887 if (!daddr) 899 if (!daddr)
888 return -EINVAL; 900 return -EINVAL;
889 faddr = ipc.opt->faddr; 901 faddr = ipc.opt->opt.faddr;
890 connected = 0; 902 connected = 0;
891 } 903 }
892 tos = RT_TOS(inet->tos); 904 tos = RT_TOS(inet->tos);
893 if (sock_flag(sk, SOCK_LOCALROUTE) || 905 if (sock_flag(sk, SOCK_LOCALROUTE) ||
894 (msg->msg_flags & MSG_DONTROUTE) || 906 (msg->msg_flags & MSG_DONTROUTE) ||
895 (ipc.opt && ipc.opt->is_strictroute)) { 907 (ipc.opt && ipc.opt->opt.is_strictroute)) {
896 tos |= RTO_ONLINK; 908 tos |= RTO_ONLINK;
897 connected = 0; 909 connected = 0;
898 } 910 }
@@ -909,22 +921,16 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
909 rt = (struct rtable *)sk_dst_check(sk, 0); 921 rt = (struct rtable *)sk_dst_check(sk, 0);
910 922
911 if (rt == NULL) { 923 if (rt == NULL) {
912 struct flowi4 fl4 = {
913 .flowi4_oif = ipc.oif,
914 .flowi4_mark = sk->sk_mark,
915 .daddr = faddr,
916 .saddr = saddr,
917 .flowi4_tos = tos,
918 .flowi4_proto = sk->sk_protocol,
919 .flowi4_flags = (inet_sk_flowi_flags(sk) |
920 FLOWI_FLAG_CAN_SLEEP),
921 .fl4_sport = inet->inet_sport,
922 .fl4_dport = dport,
923 };
924 struct net *net = sock_net(sk); 924 struct net *net = sock_net(sk);
925 925
926 security_sk_classify_flow(sk, flowi4_to_flowi(&fl4)); 926 fl4 = &fl4_stack;
927 rt = ip_route_output_flow(net, &fl4, sk); 927 flowi4_init_output(fl4, ipc.oif, sk->sk_mark, tos,
928 RT_SCOPE_UNIVERSE, sk->sk_protocol,
929 inet_sk_flowi_flags(sk)|FLOWI_FLAG_CAN_SLEEP,
930 faddr, saddr, dport, inet->inet_sport);
931
932 security_sk_classify_flow(sk, flowi4_to_flowi(fl4));
933 rt = ip_route_output_flow(net, fl4, sk);
928 if (IS_ERR(rt)) { 934 if (IS_ERR(rt)) {
929 err = PTR_ERR(rt); 935 err = PTR_ERR(rt);
930 rt = NULL; 936 rt = NULL;
@@ -945,18 +951,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
945 goto do_confirm; 951 goto do_confirm;
946back_from_confirm: 952back_from_confirm:
947 953
948 saddr = rt->rt_src; 954 saddr = fl4->saddr;
949 if (!ipc.addr) 955 if (!ipc.addr)
950 daddr = ipc.addr = rt->rt_dst; 956 daddr = ipc.addr = fl4->daddr;
951 957
952 /* Lockless fast path for the non-corking case. */ 958 /* Lockless fast path for the non-corking case. */
953 if (!corkreq) { 959 if (!corkreq) {
954 skb = ip_make_skb(sk, getfrag, msg->msg_iov, ulen, 960 skb = ip_make_skb(sk, fl4, getfrag, msg->msg_iov, ulen,
955 sizeof(struct udphdr), &ipc, &rt, 961 sizeof(struct udphdr), &ipc, &rt,
956 msg->msg_flags); 962 msg->msg_flags);
957 err = PTR_ERR(skb); 963 err = PTR_ERR(skb);
958 if (skb && !IS_ERR(skb)) 964 if (skb && !IS_ERR(skb))
959 err = udp_send_skb(skb, daddr, dport); 965 err = udp_send_skb(skb, fl4);
960 goto out; 966 goto out;
961 } 967 }
962 968
@@ -982,9 +988,9 @@ back_from_confirm:
982 988
983do_append_data: 989do_append_data:
984 up->len += ulen; 990 up->len += ulen;
985 err = ip_append_data(sk, getfrag, msg->msg_iov, ulen, 991 err = ip_append_data(sk, fl4, getfrag, msg->msg_iov, ulen,
986 sizeof(struct udphdr), &ipc, &rt, 992 sizeof(struct udphdr), &ipc, &rt,
987 corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags); 993 corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
988 if (err) 994 if (err)
989 udp_flush_pending_frames(sk); 995 udp_flush_pending_frames(sk);
990 else if (!corkreq) 996 else if (!corkreq)
@@ -1024,6 +1030,7 @@ EXPORT_SYMBOL(udp_sendmsg);
1024int udp_sendpage(struct sock *sk, struct page *page, int offset, 1030int udp_sendpage(struct sock *sk, struct page *page, int offset,
1025 size_t size, int flags) 1031 size_t size, int flags)
1026{ 1032{
1033 struct inet_sock *inet = inet_sk(sk);
1027 struct udp_sock *up = udp_sk(sk); 1034 struct udp_sock *up = udp_sk(sk);
1028 int ret; 1035 int ret;
1029 1036
@@ -1048,7 +1055,8 @@ int udp_sendpage(struct sock *sk, struct page *page, int offset,
1048 return -EINVAL; 1055 return -EINVAL;
1049 } 1056 }
1050 1057
1051 ret = ip_append_page(sk, page, offset, size, flags); 1058 ret = ip_append_page(sk, &inet->cork.fl.u.ip4,
1059 page, offset, size, flags);
1052 if (ret == -EOPNOTSUPP) { 1060 if (ret == -EOPNOTSUPP) {
1053 release_sock(sk); 1061 release_sock(sk);
1054 return sock_no_sendpage(sk->sk_socket, page, offset, 1062 return sock_no_sendpage(sk->sk_socket, page, offset,
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index d20a05e970d8..981e43eaf704 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -18,38 +18,46 @@
18 18
19static struct xfrm_policy_afinfo xfrm4_policy_afinfo; 19static struct xfrm_policy_afinfo xfrm4_policy_afinfo;
20 20
21static struct dst_entry *xfrm4_dst_lookup(struct net *net, int tos, 21static struct dst_entry *__xfrm4_dst_lookup(struct net *net, struct flowi4 *fl4,
22 const xfrm_address_t *saddr, 22 int tos,
23 const xfrm_address_t *daddr) 23 const xfrm_address_t *saddr,
24 const xfrm_address_t *daddr)
24{ 25{
25 struct flowi4 fl4 = {
26 .daddr = daddr->a4,
27 .flowi4_tos = tos,
28 };
29 struct rtable *rt; 26 struct rtable *rt;
30 27
28 memset(fl4, 0, sizeof(*fl4));
29 fl4->daddr = daddr->a4;
30 fl4->flowi4_tos = tos;
31 if (saddr) 31 if (saddr)
32 fl4.saddr = saddr->a4; 32 fl4->saddr = saddr->a4;
33 33
34 rt = __ip_route_output_key(net, &fl4); 34 rt = __ip_route_output_key(net, fl4);
35 if (!IS_ERR(rt)) 35 if (!IS_ERR(rt))
36 return &rt->dst; 36 return &rt->dst;
37 37
38 return ERR_CAST(rt); 38 return ERR_CAST(rt);
39} 39}
40 40
41static struct dst_entry *xfrm4_dst_lookup(struct net *net, int tos,
42 const xfrm_address_t *saddr,
43 const xfrm_address_t *daddr)
44{
45 struct flowi4 fl4;
46
47 return __xfrm4_dst_lookup(net, &fl4, tos, saddr, daddr);
48}
49
41static int xfrm4_get_saddr(struct net *net, 50static int xfrm4_get_saddr(struct net *net,
42 xfrm_address_t *saddr, xfrm_address_t *daddr) 51 xfrm_address_t *saddr, xfrm_address_t *daddr)
43{ 52{
44 struct dst_entry *dst; 53 struct dst_entry *dst;
45 struct rtable *rt; 54 struct flowi4 fl4;
46 55
47 dst = xfrm4_dst_lookup(net, 0, NULL, daddr); 56 dst = __xfrm4_dst_lookup(net, &fl4, 0, NULL, daddr);
48 if (IS_ERR(dst)) 57 if (IS_ERR(dst))
49 return -EHOSTUNREACH; 58 return -EHOSTUNREACH;
50 59
51 rt = (struct rtable *)dst; 60 saddr->a4 = fl4.saddr;
52 saddr->a4 = rt->rt_src;
53 dst_release(dst); 61 dst_release(dst);
54 return 0; 62 return 0;
55} 63}
@@ -73,7 +81,7 @@ static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
73 81
74 rt->rt_key_dst = fl4->daddr; 82 rt->rt_key_dst = fl4->daddr;
75 rt->rt_key_src = fl4->saddr; 83 rt->rt_key_src = fl4->saddr;
76 rt->rt_tos = fl4->flowi4_tos; 84 rt->rt_key_tos = fl4->flowi4_tos;
77 rt->rt_route_iif = fl4->flowi4_iif; 85 rt->rt_route_iif = fl4->flowi4_iif;
78 rt->rt_iif = fl4->flowi4_iif; 86 rt->rt_iif = fl4->flowi4_iif;
79 rt->rt_oif = fl4->flowi4_oif; 87 rt->rt_oif = fl4->flowi4_oif;
@@ -102,7 +110,7 @@ static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
102static void 110static void
103_decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse) 111_decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
104{ 112{
105 struct iphdr *iph = ip_hdr(skb); 113 const struct iphdr *iph = ip_hdr(skb);
106 u8 *xprth = skb_network_header(skb) + iph->ihl * 4; 114 u8 *xprth = skb_network_header(skb) + iph->ihl * 4;
107 struct flowi4 *fl4 = &fl->u.ip4; 115 struct flowi4 *fl4 = &fl->u.ip4;
108 116
diff --git a/net/ipv4/xfrm4_state.c b/net/ipv4/xfrm4_state.c
index 805d63ef4340..d9ac0a0058b5 100644
--- a/net/ipv4/xfrm4_state.c
+++ b/net/ipv4/xfrm4_state.c
@@ -55,7 +55,7 @@ xfrm4_init_temprop(struct xfrm_state *x, const struct xfrm_tmpl *tmpl,
55 55
56int xfrm4_extract_header(struct sk_buff *skb) 56int xfrm4_extract_header(struct sk_buff *skb)
57{ 57{
58 struct iphdr *iph = ip_hdr(skb); 58 const struct iphdr *iph = ip_hdr(skb);
59 59
60 XFRM_MODE_SKB_CB(skb)->ihl = sizeof(*iph); 60 XFRM_MODE_SKB_CB(skb)->ihl = sizeof(*iph);
61 XFRM_MODE_SKB_CB(skb)->id = iph->id; 61 XFRM_MODE_SKB_CB(skb)->id = iph->id;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 8f13d88d7dba..498b927f68be 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -289,19 +289,19 @@ static int snmp6_alloc_dev(struct inet6_dev *idev)
289 sizeof(struct ipstats_mib), 289 sizeof(struct ipstats_mib),
290 __alignof__(struct ipstats_mib)) < 0) 290 __alignof__(struct ipstats_mib)) < 0)
291 goto err_ip; 291 goto err_ip;
292 if (snmp_mib_init((void __percpu **)idev->stats.icmpv6, 292 idev->stats.icmpv6dev = kzalloc(sizeof(struct icmpv6_mib_device),
293 sizeof(struct icmpv6_mib), 293 GFP_KERNEL);
294 __alignof__(struct icmpv6_mib)) < 0) 294 if (!idev->stats.icmpv6dev)
295 goto err_icmp; 295 goto err_icmp;
296 if (snmp_mib_init((void __percpu **)idev->stats.icmpv6msg, 296 idev->stats.icmpv6msgdev = kzalloc(sizeof(struct icmpv6msg_mib_device),
297 sizeof(struct icmpv6msg_mib), 297 GFP_KERNEL);
298 __alignof__(struct icmpv6msg_mib)) < 0) 298 if (!idev->stats.icmpv6msgdev)
299 goto err_icmpmsg; 299 goto err_icmpmsg;
300 300
301 return 0; 301 return 0;
302 302
303err_icmpmsg: 303err_icmpmsg:
304 snmp_mib_free((void __percpu **)idev->stats.icmpv6); 304 kfree(idev->stats.icmpv6dev);
305err_icmp: 305err_icmp:
306 snmp_mib_free((void __percpu **)idev->stats.ipv6); 306 snmp_mib_free((void __percpu **)idev->stats.ipv6);
307err_ip: 307err_ip:
@@ -310,8 +310,8 @@ err_ip:
310 310
311static void snmp6_free_dev(struct inet6_dev *idev) 311static void snmp6_free_dev(struct inet6_dev *idev)
312{ 312{
313 snmp_mib_free((void __percpu **)idev->stats.icmpv6msg); 313 kfree(idev->stats.icmpv6msgdev);
314 snmp_mib_free((void __percpu **)idev->stats.icmpv6); 314 kfree(idev->stats.icmpv6dev);
315 snmp_mib_free((void __percpu **)idev->stats.ipv6); 315 snmp_mib_free((void __percpu **)idev->stats.ipv6);
316} 316}
317 317
@@ -813,6 +813,8 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)
813 dst_release(&rt->dst); 813 dst_release(&rt->dst);
814 } 814 }
815 815
816 /* clean up prefsrc entries */
817 rt6_remove_prefsrc(ifp);
816out: 818out:
817 in6_ifa_put(ifp); 819 in6_ifa_put(ifp);
818} 820}
@@ -1269,7 +1271,7 @@ static int ipv6_count_addresses(struct inet6_dev *idev)
1269 return cnt; 1271 return cnt;
1270} 1272}
1271 1273
1272int ipv6_chk_addr(struct net *net, struct in6_addr *addr, 1274int ipv6_chk_addr(struct net *net, const struct in6_addr *addr,
1273 struct net_device *dev, int strict) 1275 struct net_device *dev, int strict)
1274{ 1276{
1275 struct inet6_ifaddr *ifp; 1277 struct inet6_ifaddr *ifp;
@@ -1312,7 +1314,7 @@ static bool ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr,
1312 return false; 1314 return false;
1313} 1315}
1314 1316
1315int ipv6_chk_prefix(struct in6_addr *addr, struct net_device *dev) 1317int ipv6_chk_prefix(const struct in6_addr *addr, struct net_device *dev)
1316{ 1318{
1317 struct inet6_dev *idev; 1319 struct inet6_dev *idev;
1318 struct inet6_ifaddr *ifa; 1320 struct inet6_ifaddr *ifa;
@@ -1443,7 +1445,7 @@ void addrconf_dad_failure(struct inet6_ifaddr *ifp)
1443 1445
1444/* Join to solicited addr multicast group. */ 1446/* Join to solicited addr multicast group. */
1445 1447
1446void addrconf_join_solict(struct net_device *dev, struct in6_addr *addr) 1448void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr)
1447{ 1449{
1448 struct in6_addr maddr; 1450 struct in6_addr maddr;
1449 1451
@@ -1454,7 +1456,7 @@ void addrconf_join_solict(struct net_device *dev, struct in6_addr *addr)
1454 ipv6_dev_mc_inc(dev, &maddr); 1456 ipv6_dev_mc_inc(dev, &maddr);
1455} 1457}
1456 1458
1457void addrconf_leave_solict(struct inet6_dev *idev, struct in6_addr *addr) 1459void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr)
1458{ 1460{
1459 struct in6_addr maddr; 1461 struct in6_addr maddr;
1460 1462
@@ -2099,7 +2101,7 @@ err_exit:
2099/* 2101/*
2100 * Manual configuration of address on an interface 2102 * Manual configuration of address on an interface
2101 */ 2103 */
2102static int inet6_addr_add(struct net *net, int ifindex, struct in6_addr *pfx, 2104static int inet6_addr_add(struct net *net, int ifindex, const struct in6_addr *pfx,
2103 unsigned int plen, __u8 ifa_flags, __u32 prefered_lft, 2105 unsigned int plen, __u8 ifa_flags, __u32 prefered_lft,
2104 __u32 valid_lft) 2106 __u32 valid_lft)
2105{ 2107{
@@ -2173,7 +2175,7 @@ static int inet6_addr_add(struct net *net, int ifindex, struct in6_addr *pfx,
2173 return PTR_ERR(ifp); 2175 return PTR_ERR(ifp);
2174} 2176}
2175 2177
2176static int inet6_addr_del(struct net *net, int ifindex, struct in6_addr *pfx, 2178static int inet6_addr_del(struct net *net, int ifindex, const struct in6_addr *pfx,
2177 unsigned int plen) 2179 unsigned int plen)
2178{ 2180{
2179 struct inet6_ifaddr *ifp; 2181 struct inet6_ifaddr *ifp;
@@ -2336,7 +2338,7 @@ static void init_loopback(struct net_device *dev)
2336 add_addr(idev, &in6addr_loopback, 128, IFA_HOST); 2338 add_addr(idev, &in6addr_loopback, 128, IFA_HOST);
2337} 2339}
2338 2340
2339static void addrconf_add_linklocal(struct inet6_dev *idev, struct in6_addr *addr) 2341static void addrconf_add_linklocal(struct inet6_dev *idev, const struct in6_addr *addr)
2340{ 2342{
2341 struct inet6_ifaddr * ifp; 2343 struct inet6_ifaddr * ifp;
2342 u32 addr_flags = IFA_F_PERMANENT; 2344 u32 addr_flags = IFA_F_PERMANENT;
@@ -3107,7 +3109,7 @@ void if6_proc_exit(void)
3107 3109
3108#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) 3110#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
3109/* Check if address is a home address configured on any interface. */ 3111/* Check if address is a home address configured on any interface. */
3110int ipv6_chk_home_addr(struct net *net, struct in6_addr *addr) 3112int ipv6_chk_home_addr(struct net *net, const struct in6_addr *addr)
3111{ 3113{
3112 int ret = 0; 3114 int ret = 0;
3113 struct inet6_ifaddr *ifp = NULL; 3115 struct inet6_ifaddr *ifp = NULL;
@@ -3824,7 +3826,7 @@ static inline size_t inet6_if_nlmsg_size(void)
3824 + nla_total_size(inet6_ifla6_size()); /* IFLA_PROTINFO */ 3826 + nla_total_size(inet6_ifla6_size()); /* IFLA_PROTINFO */
3825} 3827}
3826 3828
3827static inline void __snmp6_fill_stats(u64 *stats, void __percpu **mib, 3829static inline void __snmp6_fill_statsdev(u64 *stats, atomic_long_t *mib,
3828 int items, int bytes) 3830 int items, int bytes)
3829{ 3831{
3830 int i; 3832 int i;
@@ -3834,7 +3836,7 @@ static inline void __snmp6_fill_stats(u64 *stats, void __percpu **mib,
3834 /* Use put_unaligned() because stats may not be aligned for u64. */ 3836 /* Use put_unaligned() because stats may not be aligned for u64. */
3835 put_unaligned(items, &stats[0]); 3837 put_unaligned(items, &stats[0]);
3836 for (i = 1; i < items; i++) 3838 for (i = 1; i < items; i++)
3837 put_unaligned(snmp_fold_field(mib, i), &stats[i]); 3839 put_unaligned(atomic_long_read(&mib[i]), &stats[i]);
3838 3840
3839 memset(&stats[items], 0, pad); 3841 memset(&stats[items], 0, pad);
3840} 3842}
@@ -3863,7 +3865,7 @@ static void snmp6_fill_stats(u64 *stats, struct inet6_dev *idev, int attrtype,
3863 IPSTATS_MIB_MAX, bytes, offsetof(struct ipstats_mib, syncp)); 3865 IPSTATS_MIB_MAX, bytes, offsetof(struct ipstats_mib, syncp));
3864 break; 3866 break;
3865 case IFLA_INET6_ICMP6STATS: 3867 case IFLA_INET6_ICMP6STATS:
3866 __snmp6_fill_stats(stats, (void __percpu **)idev->stats.icmpv6, ICMP6_MIB_MAX, bytes); 3868 __snmp6_fill_statsdev(stats, idev->stats.icmpv6dev->mibs, ICMP6_MIB_MAX, bytes);
3867 break; 3869 break;
3868 } 3870 }
3869} 3871}
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index afcc7099f96d..b7919f901fbf 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -740,7 +740,7 @@ static int ipv6_gso_pull_exthdrs(struct sk_buff *skb, int proto)
740 740
741static int ipv6_gso_send_check(struct sk_buff *skb) 741static int ipv6_gso_send_check(struct sk_buff *skb)
742{ 742{
743 struct ipv6hdr *ipv6h; 743 const struct ipv6hdr *ipv6h;
744 const struct inet6_protocol *ops; 744 const struct inet6_protocol *ops;
745 int err = -EINVAL; 745 int err = -EINVAL;
746 746
diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c
index 0e5e943446f0..674255f5e6b7 100644
--- a/net/ipv6/anycast.c
+++ b/net/ipv6/anycast.c
@@ -44,7 +44,7 @@
44 44
45#include <net/checksum.h> 45#include <net/checksum.h>
46 46
47static int ipv6_dev_ac_dec(struct net_device *dev, struct in6_addr *addr); 47static int ipv6_dev_ac_dec(struct net_device *dev, const struct in6_addr *addr);
48 48
49/* Big ac list lock for all the sockets */ 49/* Big ac list lock for all the sockets */
50static DEFINE_RWLOCK(ipv6_sk_ac_lock); 50static DEFINE_RWLOCK(ipv6_sk_ac_lock);
@@ -54,7 +54,7 @@ static DEFINE_RWLOCK(ipv6_sk_ac_lock);
54 * socket join an anycast group 54 * socket join an anycast group
55 */ 55 */
56 56
57int ipv6_sock_ac_join(struct sock *sk, int ifindex, struct in6_addr *addr) 57int ipv6_sock_ac_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
58{ 58{
59 struct ipv6_pinfo *np = inet6_sk(sk); 59 struct ipv6_pinfo *np = inet6_sk(sk);
60 struct net_device *dev = NULL; 60 struct net_device *dev = NULL;
@@ -145,7 +145,7 @@ error:
145/* 145/*
146 * socket leave an anycast group 146 * socket leave an anycast group
147 */ 147 */
148int ipv6_sock_ac_drop(struct sock *sk, int ifindex, struct in6_addr *addr) 148int ipv6_sock_ac_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
149{ 149{
150 struct ipv6_pinfo *np = inet6_sk(sk); 150 struct ipv6_pinfo *np = inet6_sk(sk);
151 struct net_device *dev; 151 struct net_device *dev;
@@ -252,7 +252,7 @@ static void aca_put(struct ifacaddr6 *ac)
252/* 252/*
253 * device anycast group inc (add if not found) 253 * device anycast group inc (add if not found)
254 */ 254 */
255int ipv6_dev_ac_inc(struct net_device *dev, struct in6_addr *addr) 255int ipv6_dev_ac_inc(struct net_device *dev, const struct in6_addr *addr)
256{ 256{
257 struct ifacaddr6 *aca; 257 struct ifacaddr6 *aca;
258 struct inet6_dev *idev; 258 struct inet6_dev *idev;
@@ -324,7 +324,7 @@ out:
324/* 324/*
325 * device anycast group decrement 325 * device anycast group decrement
326 */ 326 */
327int __ipv6_dev_ac_dec(struct inet6_dev *idev, struct in6_addr *addr) 327int __ipv6_dev_ac_dec(struct inet6_dev *idev, const struct in6_addr *addr)
328{ 328{
329 struct ifacaddr6 *aca, *prev_aca; 329 struct ifacaddr6 *aca, *prev_aca;
330 330
@@ -358,7 +358,7 @@ int __ipv6_dev_ac_dec(struct inet6_dev *idev, struct in6_addr *addr)
358} 358}
359 359
360/* called with rcu_read_lock() */ 360/* called with rcu_read_lock() */
361static int ipv6_dev_ac_dec(struct net_device *dev, struct in6_addr *addr) 361static int ipv6_dev_ac_dec(struct net_device *dev, const struct in6_addr *addr)
362{ 362{
363 struct inet6_dev *idev = __in6_dev_get(dev); 363 struct inet6_dev *idev = __in6_dev_get(dev);
364 364
@@ -371,7 +371,7 @@ static int ipv6_dev_ac_dec(struct net_device *dev, struct in6_addr *addr)
371 * check if the interface has this anycast address 371 * check if the interface has this anycast address
372 * called with rcu_read_lock() 372 * called with rcu_read_lock()
373 */ 373 */
374static int ipv6_chk_acast_dev(struct net_device *dev, struct in6_addr *addr) 374static int ipv6_chk_acast_dev(struct net_device *dev, const struct in6_addr *addr)
375{ 375{
376 struct inet6_dev *idev; 376 struct inet6_dev *idev;
377 struct ifacaddr6 *aca; 377 struct ifacaddr6 *aca;
@@ -392,7 +392,7 @@ static int ipv6_chk_acast_dev(struct net_device *dev, struct in6_addr *addr)
392 * check if given interface (or any, if dev==0) has this anycast address 392 * check if given interface (or any, if dev==0) has this anycast address
393 */ 393 */
394int ipv6_chk_acast_addr(struct net *net, struct net_device *dev, 394int ipv6_chk_acast_addr(struct net *net, struct net_device *dev,
395 struct in6_addr *addr) 395 const struct in6_addr *addr)
396{ 396{
397 int found = 0; 397 int found = 0;
398 398
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 59dccfbb5b11..1ac7938dd9ec 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -430,7 +430,7 @@ static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
430 u8 type, u8 code, int offset, __be32 info) 430 u8 type, u8 code, int offset, __be32 info)
431{ 431{
432 struct net *net = dev_net(skb->dev); 432 struct net *net = dev_net(skb->dev);
433 struct ipv6hdr *iph = (struct ipv6hdr*)skb->data; 433 const struct ipv6hdr *iph = (const struct ipv6hdr *)skb->data;
434 struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data + offset); 434 struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data + offset);
435 struct xfrm_state *x; 435 struct xfrm_state *x;
436 436
@@ -438,7 +438,8 @@ static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
438 type != ICMPV6_PKT_TOOBIG) 438 type != ICMPV6_PKT_TOOBIG)
439 return; 439 return;
440 440
441 x = xfrm_state_lookup(net, skb->mark, (xfrm_address_t *)&iph->daddr, esph->spi, IPPROTO_ESP, AF_INET6); 441 x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
442 esph->spi, IPPROTO_ESP, AF_INET6);
442 if (!x) 443 if (!x)
443 return; 444 return;
444 printk(KERN_DEBUG "pmtu discovery on SA ESP/%08x/%pI6\n", 445 printk(KERN_DEBUG "pmtu discovery on SA ESP/%08x/%pI6\n",
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 83cb4f9add81..11900417b1cc 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -372,7 +372,7 @@ void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
372 struct ipv6hdr *hdr = ipv6_hdr(skb); 372 struct ipv6hdr *hdr = ipv6_hdr(skb);
373 struct sock *sk; 373 struct sock *sk;
374 struct ipv6_pinfo *np; 374 struct ipv6_pinfo *np;
375 struct in6_addr *saddr = NULL; 375 const struct in6_addr *saddr = NULL;
376 struct dst_entry *dst; 376 struct dst_entry *dst;
377 struct icmp6hdr tmp_hdr; 377 struct icmp6hdr tmp_hdr;
378 struct flowi6 fl6; 378 struct flowi6 fl6;
@@ -521,7 +521,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
521 struct sock *sk; 521 struct sock *sk;
522 struct inet6_dev *idev; 522 struct inet6_dev *idev;
523 struct ipv6_pinfo *np; 523 struct ipv6_pinfo *np;
524 struct in6_addr *saddr = NULL; 524 const struct in6_addr *saddr = NULL;
525 struct icmp6hdr *icmph = icmp6_hdr(skb); 525 struct icmp6hdr *icmph = icmp6_hdr(skb);
526 struct icmp6hdr tmp_hdr; 526 struct icmp6hdr tmp_hdr;
527 struct flowi6 fl6; 527 struct flowi6 fl6;
@@ -645,8 +645,8 @@ static int icmpv6_rcv(struct sk_buff *skb)
645{ 645{
646 struct net_device *dev = skb->dev; 646 struct net_device *dev = skb->dev;
647 struct inet6_dev *idev = __in6_dev_get(dev); 647 struct inet6_dev *idev = __in6_dev_get(dev);
648 struct in6_addr *saddr, *daddr; 648 const struct in6_addr *saddr, *daddr;
649 struct ipv6hdr *orig_hdr; 649 const struct ipv6hdr *orig_hdr;
650 struct icmp6hdr *hdr; 650 struct icmp6hdr *hdr;
651 u8 type; 651 u8 type;
652 652
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index f2c5b0fc0f21..8a58e8cf6646 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -203,7 +203,7 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
203 return dst; 203 return dst;
204} 204}
205 205
206int inet6_csk_xmit(struct sk_buff *skb) 206int inet6_csk_xmit(struct sk_buff *skb, struct flowi *fl_unused)
207{ 207{
208 struct sock *sk = skb->sk; 208 struct sock *sk = skb->sk;
209 struct inet_sock *inet = inet_sk(sk); 209 struct inet_sock *inet = inet_sk(sk);
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 7548905e79e1..4076a0b14b20 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -134,9 +134,9 @@ static __inline__ u32 fib6_new_sernum(void)
134# define BITOP_BE32_SWIZZLE 0 134# define BITOP_BE32_SWIZZLE 0
135#endif 135#endif
136 136
137static __inline__ __be32 addr_bit_set(void *token, int fn_bit) 137static __inline__ __be32 addr_bit_set(const void *token, int fn_bit)
138{ 138{
139 __be32 *addr = token; 139 const __be32 *addr = token;
140 /* 140 /*
141 * Here, 141 * Here,
142 * 1 << ((~fn_bit ^ BITOP_BE32_SWIZZLE) & 0x1f) 142 * 1 << ((~fn_bit ^ BITOP_BE32_SWIZZLE) & 0x1f)
@@ -394,10 +394,11 @@ static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
394 arg.net = net; 394 arg.net = net;
395 w->args = &arg; 395 w->args = &arg;
396 396
397 rcu_read_lock();
397 for (h = s_h; h < FIB6_TABLE_HASHSZ; h++, s_e = 0) { 398 for (h = s_h; h < FIB6_TABLE_HASHSZ; h++, s_e = 0) {
398 e = 0; 399 e = 0;
399 head = &net->ipv6.fib_table_hash[h]; 400 head = &net->ipv6.fib_table_hash[h];
400 hlist_for_each_entry(tb, node, head, tb6_hlist) { 401 hlist_for_each_entry_rcu(tb, node, head, tb6_hlist) {
401 if (e < s_e) 402 if (e < s_e)
402 goto next; 403 goto next;
403 res = fib6_dump_table(tb, skb, cb); 404 res = fib6_dump_table(tb, skb, cb);
@@ -408,6 +409,7 @@ next:
408 } 409 }
409 } 410 }
410out: 411out:
412 rcu_read_unlock();
411 cb->args[1] = e; 413 cb->args[1] = e;
412 cb->args[0] = h; 414 cb->args[0] = h;
413 415
@@ -822,7 +824,7 @@ st_failure:
822 824
823struct lookup_args { 825struct lookup_args {
824 int offset; /* key offset on rt6_info */ 826 int offset; /* key offset on rt6_info */
825 struct in6_addr *addr; /* search key */ 827 const struct in6_addr *addr; /* search key */
826}; 828};
827 829
828static struct fib6_node * fib6_lookup_1(struct fib6_node *root, 830static struct fib6_node * fib6_lookup_1(struct fib6_node *root,
@@ -881,8 +883,8 @@ static struct fib6_node * fib6_lookup_1(struct fib6_node *root,
881 return NULL; 883 return NULL;
882} 884}
883 885
884struct fib6_node * fib6_lookup(struct fib6_node *root, struct in6_addr *daddr, 886struct fib6_node * fib6_lookup(struct fib6_node *root, const struct in6_addr *daddr,
885 struct in6_addr *saddr) 887 const struct in6_addr *saddr)
886{ 888{
887 struct fib6_node *fn; 889 struct fib6_node *fn;
888 struct lookup_args args[] = { 890 struct lookup_args args[] = {
@@ -916,7 +918,7 @@ struct fib6_node * fib6_lookup(struct fib6_node *root, struct in6_addr *daddr,
916 918
917 919
918static struct fib6_node * fib6_locate_1(struct fib6_node *root, 920static struct fib6_node * fib6_locate_1(struct fib6_node *root,
919 struct in6_addr *addr, 921 const struct in6_addr *addr,
920 int plen, int offset) 922 int plen, int offset)
921{ 923{
922 struct fib6_node *fn; 924 struct fib6_node *fn;
@@ -946,8 +948,8 @@ static struct fib6_node * fib6_locate_1(struct fib6_node *root,
946} 948}
947 949
948struct fib6_node * fib6_locate(struct fib6_node *root, 950struct fib6_node * fib6_locate(struct fib6_node *root,
949 struct in6_addr *daddr, int dst_len, 951 const struct in6_addr *daddr, int dst_len,
950 struct in6_addr *saddr, int src_len) 952 const struct in6_addr *saddr, int src_len)
951{ 953{
952 struct fib6_node *fn; 954 struct fib6_node *fn;
953 955
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index a83e9209cecc..027c7ff6f1e5 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -57,7 +57,7 @@ inline int ip6_rcv_finish( struct sk_buff *skb)
57 57
58int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) 58int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
59{ 59{
60 struct ipv6hdr *hdr; 60 const struct ipv6hdr *hdr;
61 u32 pkt_len; 61 u32 pkt_len;
62 struct inet6_dev *idev; 62 struct inet6_dev *idev;
63 struct net *net = dev_net(skb->dev); 63 struct net *net = dev_net(skb->dev);
@@ -186,7 +186,7 @@ resubmit:
186 int ret; 186 int ret;
187 187
188 if (ipprot->flags & INET6_PROTO_FINAL) { 188 if (ipprot->flags & INET6_PROTO_FINAL) {
189 struct ipv6hdr *hdr; 189 const struct ipv6hdr *hdr;
190 190
191 /* Free reference early: we don't need it any more, 191 /* Free reference early: we don't need it any more,
192 and it may hold ip_conntrack module loaded 192 and it may hold ip_conntrack module loaded
@@ -242,7 +242,7 @@ int ip6_input(struct sk_buff *skb)
242 242
243int ip6_mc_input(struct sk_buff *skb) 243int ip6_mc_input(struct sk_buff *skb)
244{ 244{
245 struct ipv6hdr *hdr; 245 const struct ipv6hdr *hdr;
246 int deliver; 246 int deliver;
247 247
248 IP6_UPD_PO_STATS_BH(dev_net(skb_dst(skb)->dev), 248 IP6_UPD_PO_STATS_BH(dev_net(skb_dst(skb)->dev),
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 46cf7bea6769..9d4b165837d6 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -869,9 +869,9 @@ fail:
869 return err; 869 return err;
870} 870}
871 871
872static inline int ip6_rt_check(struct rt6key *rt_key, 872static inline int ip6_rt_check(const struct rt6key *rt_key,
873 struct in6_addr *fl_addr, 873 const struct in6_addr *fl_addr,
874 struct in6_addr *addr_cache) 874 const struct in6_addr *addr_cache)
875{ 875{
876 return (rt_key->plen != 128 || !ipv6_addr_equal(fl_addr, &rt_key->addr)) && 876 return (rt_key->plen != 128 || !ipv6_addr_equal(fl_addr, &rt_key->addr)) &&
877 (addr_cache == NULL || !ipv6_addr_equal(fl_addr, addr_cache)); 877 (addr_cache == NULL || !ipv6_addr_equal(fl_addr, addr_cache));
@@ -879,7 +879,7 @@ static inline int ip6_rt_check(struct rt6key *rt_key,
879 879
880static struct dst_entry *ip6_sk_dst_check(struct sock *sk, 880static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
881 struct dst_entry *dst, 881 struct dst_entry *dst,
882 struct flowi6 *fl6) 882 const struct flowi6 *fl6)
883{ 883{
884 struct ipv6_pinfo *np = inet6_sk(sk); 884 struct ipv6_pinfo *np = inet6_sk(sk);
885 struct rt6_info *rt = (struct rt6_info *)dst; 885 struct rt6_info *rt = (struct rt6_info *)dst;
@@ -930,10 +930,10 @@ static int ip6_dst_lookup_tail(struct sock *sk,
930 goto out_err_release; 930 goto out_err_release;
931 931
932 if (ipv6_addr_any(&fl6->saddr)) { 932 if (ipv6_addr_any(&fl6->saddr)) {
933 err = ipv6_dev_get_saddr(net, ip6_dst_idev(*dst)->dev, 933 struct rt6_info *rt = (struct rt6_info *) *dst;
934 &fl6->daddr, 934 err = ip6_route_get_saddr(net, rt, &fl6->daddr,
935 sk ? inet6_sk(sk)->srcprefs : 0, 935 sk ? inet6_sk(sk)->srcprefs : 0,
936 &fl6->saddr); 936 &fl6->saddr);
937 if (err) 937 if (err)
938 goto out_err_release; 938 goto out_err_release;
939 } 939 }
@@ -1150,6 +1150,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1150{ 1150{
1151 struct inet_sock *inet = inet_sk(sk); 1151 struct inet_sock *inet = inet_sk(sk);
1152 struct ipv6_pinfo *np = inet6_sk(sk); 1152 struct ipv6_pinfo *np = inet6_sk(sk);
1153 struct inet_cork *cork;
1153 struct sk_buff *skb; 1154 struct sk_buff *skb;
1154 unsigned int maxfraglen, fragheaderlen; 1155 unsigned int maxfraglen, fragheaderlen;
1155 int exthdrlen; 1156 int exthdrlen;
@@ -1163,6 +1164,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1163 1164
1164 if (flags&MSG_PROBE) 1165 if (flags&MSG_PROBE)
1165 return 0; 1166 return 0;
1167 cork = &inet->cork.base;
1166 if (skb_queue_empty(&sk->sk_write_queue)) { 1168 if (skb_queue_empty(&sk->sk_write_queue)) {
1167 /* 1169 /*
1168 * setup for corking 1170 * setup for corking
@@ -1202,7 +1204,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1202 /* need source address above miyazawa*/ 1204 /* need source address above miyazawa*/
1203 } 1205 }
1204 dst_hold(&rt->dst); 1206 dst_hold(&rt->dst);
1205 inet->cork.dst = &rt->dst; 1207 cork->dst = &rt->dst;
1206 inet->cork.fl.u.ip6 = *fl6; 1208 inet->cork.fl.u.ip6 = *fl6;
1207 np->cork.hop_limit = hlimit; 1209 np->cork.hop_limit = hlimit;
1208 np->cork.tclass = tclass; 1210 np->cork.tclass = tclass;
@@ -1212,10 +1214,10 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1212 if (np->frag_size) 1214 if (np->frag_size)
1213 mtu = np->frag_size; 1215 mtu = np->frag_size;
1214 } 1216 }
1215 inet->cork.fragsize = mtu; 1217 cork->fragsize = mtu;
1216 if (dst_allfrag(rt->dst.path)) 1218 if (dst_allfrag(rt->dst.path))
1217 inet->cork.flags |= IPCORK_ALLFRAG; 1219 cork->flags |= IPCORK_ALLFRAG;
1218 inet->cork.length = 0; 1220 cork->length = 0;
1219 sk->sk_sndmsg_page = NULL; 1221 sk->sk_sndmsg_page = NULL;
1220 sk->sk_sndmsg_off = 0; 1222 sk->sk_sndmsg_off = 0;
1221 exthdrlen = rt->dst.header_len + (opt ? opt->opt_flen : 0) - 1223 exthdrlen = rt->dst.header_len + (opt ? opt->opt_flen : 0) -
@@ -1223,12 +1225,12 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1223 length += exthdrlen; 1225 length += exthdrlen;
1224 transhdrlen += exthdrlen; 1226 transhdrlen += exthdrlen;
1225 } else { 1227 } else {
1226 rt = (struct rt6_info *)inet->cork.dst; 1228 rt = (struct rt6_info *)cork->dst;
1227 fl6 = &inet->cork.fl.u.ip6; 1229 fl6 = &inet->cork.fl.u.ip6;
1228 opt = np->cork.opt; 1230 opt = np->cork.opt;
1229 transhdrlen = 0; 1231 transhdrlen = 0;
1230 exthdrlen = 0; 1232 exthdrlen = 0;
1231 mtu = inet->cork.fragsize; 1233 mtu = cork->fragsize;
1232 } 1234 }
1233 1235
1234 hh_len = LL_RESERVED_SPACE(rt->dst.dev); 1236 hh_len = LL_RESERVED_SPACE(rt->dst.dev);
@@ -1238,7 +1240,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1238 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen - sizeof(struct frag_hdr); 1240 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen - sizeof(struct frag_hdr);
1239 1241
1240 if (mtu <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) { 1242 if (mtu <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) {
1241 if (inet->cork.length + length > sizeof(struct ipv6hdr) + IPV6_MAXPLEN - fragheaderlen) { 1243 if (cork->length + length > sizeof(struct ipv6hdr) + IPV6_MAXPLEN - fragheaderlen) {
1242 ipv6_local_error(sk, EMSGSIZE, fl6, mtu-exthdrlen); 1244 ipv6_local_error(sk, EMSGSIZE, fl6, mtu-exthdrlen);
1243 return -EMSGSIZE; 1245 return -EMSGSIZE;
1244 } 1246 }
@@ -1267,7 +1269,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1267 * --yoshfuji 1269 * --yoshfuji
1268 */ 1270 */
1269 1271
1270 inet->cork.length += length; 1272 cork->length += length;
1271 if (length > mtu) { 1273 if (length > mtu) {
1272 int proto = sk->sk_protocol; 1274 int proto = sk->sk_protocol;
1273 if (dontfrag && (proto == IPPROTO_UDP || proto == IPPROTO_RAW)){ 1275 if (dontfrag && (proto == IPPROTO_UDP || proto == IPPROTO_RAW)){
@@ -1292,7 +1294,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1292 1294
1293 while (length > 0) { 1295 while (length > 0) {
1294 /* Check if the remaining data fits into current packet. */ 1296 /* Check if the remaining data fits into current packet. */
1295 copy = (inet->cork.length <= mtu && !(inet->cork.flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - skb->len; 1297 copy = (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - skb->len;
1296 if (copy < length) 1298 if (copy < length)
1297 copy = maxfraglen - skb->len; 1299 copy = maxfraglen - skb->len;
1298 1300
@@ -1317,7 +1319,7 @@ alloc_new_skb:
1317 * we know we need more fragment(s). 1319 * we know we need more fragment(s).
1318 */ 1320 */
1319 datalen = length + fraggap; 1321 datalen = length + fraggap;
1320 if (datalen > (inet->cork.length <= mtu && !(inet->cork.flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen) 1322 if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
1321 datalen = maxfraglen - fragheaderlen; 1323 datalen = maxfraglen - fragheaderlen;
1322 1324
1323 fraglen = datalen + fragheaderlen; 1325 fraglen = datalen + fragheaderlen;
@@ -1481,7 +1483,7 @@ alloc_new_skb:
1481 } 1483 }
1482 return 0; 1484 return 0;
1483error: 1485error:
1484 inet->cork.length -= length; 1486 cork->length -= length;
1485 IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS); 1487 IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
1486 return err; 1488 return err;
1487} 1489}
@@ -1497,10 +1499,10 @@ static void ip6_cork_release(struct inet_sock *inet, struct ipv6_pinfo *np)
1497 np->cork.opt = NULL; 1499 np->cork.opt = NULL;
1498 } 1500 }
1499 1501
1500 if (inet->cork.dst) { 1502 if (inet->cork.base.dst) {
1501 dst_release(inet->cork.dst); 1503 dst_release(inet->cork.base.dst);
1502 inet->cork.dst = NULL; 1504 inet->cork.base.dst = NULL;
1503 inet->cork.flags &= ~IPCORK_ALLFRAG; 1505 inet->cork.base.flags &= ~IPCORK_ALLFRAG;
1504 } 1506 }
1505 memset(&inet->cork.fl, 0, sizeof(inet->cork.fl)); 1507 memset(&inet->cork.fl, 0, sizeof(inet->cork.fl));
1506} 1508}
@@ -1515,7 +1517,7 @@ int ip6_push_pending_frames(struct sock *sk)
1515 struct net *net = sock_net(sk); 1517 struct net *net = sock_net(sk);
1516 struct ipv6hdr *hdr; 1518 struct ipv6hdr *hdr;
1517 struct ipv6_txoptions *opt = np->cork.opt; 1519 struct ipv6_txoptions *opt = np->cork.opt;
1518 struct rt6_info *rt = (struct rt6_info *)inet->cork.dst; 1520 struct rt6_info *rt = (struct rt6_info *)inet->cork.base.dst;
1519 struct flowi6 *fl6 = &inet->cork.fl.u.ip6; 1521 struct flowi6 *fl6 = &inet->cork.fl.u.ip6;
1520 unsigned char proto = fl6->flowi6_proto; 1522 unsigned char proto = fl6->flowi6_proto;
1521 int err = 0; 1523 int err = 0;
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index c1b1bd312df2..36c2842a86b2 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -162,7 +162,7 @@ static inline void ip6_tnl_dst_store(struct ip6_tnl *t, struct dst_entry *dst)
162 for (t = rcu_dereference(start); t; t = rcu_dereference(t->next)) 162 for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
163 163
164static struct ip6_tnl * 164static struct ip6_tnl *
165ip6_tnl_lookup(struct net *net, struct in6_addr *remote, struct in6_addr *local) 165ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_addr *local)
166{ 166{
167 unsigned int h0 = HASH(remote); 167 unsigned int h0 = HASH(remote);
168 unsigned int h1 = HASH(local); 168 unsigned int h1 = HASH(local);
@@ -194,10 +194,10 @@ ip6_tnl_lookup(struct net *net, struct in6_addr *remote, struct in6_addr *local)
194 **/ 194 **/
195 195
196static struct ip6_tnl __rcu ** 196static struct ip6_tnl __rcu **
197ip6_tnl_bucket(struct ip6_tnl_net *ip6n, struct ip6_tnl_parm *p) 197ip6_tnl_bucket(struct ip6_tnl_net *ip6n, const struct ip6_tnl_parm *p)
198{ 198{
199 struct in6_addr *remote = &p->raddr; 199 const struct in6_addr *remote = &p->raddr;
200 struct in6_addr *local = &p->laddr; 200 const struct in6_addr *local = &p->laddr;
201 unsigned h = 0; 201 unsigned h = 0;
202 int prio = 0; 202 int prio = 0;
203 203
@@ -280,11 +280,6 @@ static struct ip6_tnl *ip6_tnl_create(struct net *net, struct ip6_tnl_parm *p)
280 280
281 dev_net_set(dev, net); 281 dev_net_set(dev, net);
282 282
283 if (strchr(name, '%')) {
284 if (dev_alloc_name(dev, name) < 0)
285 goto failed_free;
286 }
287
288 t = netdev_priv(dev); 283 t = netdev_priv(dev);
289 t->parms = *p; 284 t->parms = *p;
290 err = ip6_tnl_dev_init(dev); 285 err = ip6_tnl_dev_init(dev);
@@ -321,8 +316,8 @@ failed:
321static struct ip6_tnl *ip6_tnl_locate(struct net *net, 316static struct ip6_tnl *ip6_tnl_locate(struct net *net,
322 struct ip6_tnl_parm *p, int create) 317 struct ip6_tnl_parm *p, int create)
323{ 318{
324 struct in6_addr *remote = &p->raddr; 319 const struct in6_addr *remote = &p->raddr;
325 struct in6_addr *local = &p->laddr; 320 const struct in6_addr *local = &p->laddr;
326 struct ip6_tnl __rcu **tp; 321 struct ip6_tnl __rcu **tp;
327 struct ip6_tnl *t; 322 struct ip6_tnl *t;
328 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 323 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
@@ -374,7 +369,7 @@ ip6_tnl_dev_uninit(struct net_device *dev)
374static __u16 369static __u16
375parse_tlv_tnl_enc_lim(struct sk_buff *skb, __u8 * raw) 370parse_tlv_tnl_enc_lim(struct sk_buff *skb, __u8 * raw)
376{ 371{
377 struct ipv6hdr *ipv6h = (struct ipv6hdr *) raw; 372 const struct ipv6hdr *ipv6h = (const struct ipv6hdr *) raw;
378 __u8 nexthdr = ipv6h->nexthdr; 373 __u8 nexthdr = ipv6h->nexthdr;
379 __u16 off = sizeof (*ipv6h); 374 __u16 off = sizeof (*ipv6h);
380 375
@@ -435,7 +430,7 @@ static int
435ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt, 430ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt,
436 u8 *type, u8 *code, int *msg, __u32 *info, int offset) 431 u8 *type, u8 *code, int *msg, __u32 *info, int offset)
437{ 432{
438 struct ipv6hdr *ipv6h = (struct ipv6hdr *) skb->data; 433 const struct ipv6hdr *ipv6h = (const struct ipv6hdr *) skb->data;
439 struct ip6_tnl *t; 434 struct ip6_tnl *t;
440 int rel_msg = 0; 435 int rel_msg = 0;
441 u8 rel_type = ICMPV6_DEST_UNREACH; 436 u8 rel_type = ICMPV6_DEST_UNREACH;
@@ -535,8 +530,9 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
535 __u32 rel_info = ntohl(info); 530 __u32 rel_info = ntohl(info);
536 int err; 531 int err;
537 struct sk_buff *skb2; 532 struct sk_buff *skb2;
538 struct iphdr *eiph; 533 const struct iphdr *eiph;
539 struct rtable *rt; 534 struct rtable *rt;
535 struct flowi4 fl4;
540 536
541 err = ip6_tnl_err(skb, IPPROTO_IPIP, opt, &rel_type, &rel_code, 537 err = ip6_tnl_err(skb, IPPROTO_IPIP, opt, &rel_type, &rel_code,
542 &rel_msg, &rel_info, offset); 538 &rel_msg, &rel_info, offset);
@@ -577,7 +573,7 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
577 eiph = ip_hdr(skb2); 573 eiph = ip_hdr(skb2);
578 574
579 /* Try to guess incoming interface */ 575 /* Try to guess incoming interface */
580 rt = ip_route_output_ports(dev_net(skb->dev), NULL, 576 rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL,
581 eiph->saddr, 0, 577 eiph->saddr, 0,
582 0, 0, 578 0, 0,
583 IPPROTO_IPIP, RT_TOS(eiph->tos), 0); 579 IPPROTO_IPIP, RT_TOS(eiph->tos), 0);
@@ -590,7 +586,7 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
590 if (rt->rt_flags & RTCF_LOCAL) { 586 if (rt->rt_flags & RTCF_LOCAL) {
591 ip_rt_put(rt); 587 ip_rt_put(rt);
592 rt = NULL; 588 rt = NULL;
593 rt = ip_route_output_ports(dev_net(skb->dev), NULL, 589 rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL,
594 eiph->daddr, eiph->saddr, 590 eiph->daddr, eiph->saddr,
595 0, 0, 591 0, 0,
596 IPPROTO_IPIP, 592 IPPROTO_IPIP,
@@ -669,8 +665,8 @@ ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
669 return 0; 665 return 0;
670} 666}
671 667
672static void ip4ip6_dscp_ecn_decapsulate(struct ip6_tnl *t, 668static void ip4ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t,
673 struct ipv6hdr *ipv6h, 669 const struct ipv6hdr *ipv6h,
674 struct sk_buff *skb) 670 struct sk_buff *skb)
675{ 671{
676 __u8 dsfield = ipv6_get_dsfield(ipv6h) & ~INET_ECN_MASK; 672 __u8 dsfield = ipv6_get_dsfield(ipv6h) & ~INET_ECN_MASK;
@@ -682,8 +678,8 @@ static void ip4ip6_dscp_ecn_decapsulate(struct ip6_tnl *t,
682 IP_ECN_set_ce(ip_hdr(skb)); 678 IP_ECN_set_ce(ip_hdr(skb));
683} 679}
684 680
685static void ip6ip6_dscp_ecn_decapsulate(struct ip6_tnl *t, 681static void ip6ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t,
686 struct ipv6hdr *ipv6h, 682 const struct ipv6hdr *ipv6h,
687 struct sk_buff *skb) 683 struct sk_buff *skb)
688{ 684{
689 if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY) 685 if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY)
@@ -726,12 +722,12 @@ static inline int ip6_tnl_rcv_ctl(struct ip6_tnl *t)
726 722
727static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol, 723static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol,
728 __u8 ipproto, 724 __u8 ipproto,
729 void (*dscp_ecn_decapsulate)(struct ip6_tnl *t, 725 void (*dscp_ecn_decapsulate)(const struct ip6_tnl *t,
730 struct ipv6hdr *ipv6h, 726 const struct ipv6hdr *ipv6h,
731 struct sk_buff *skb)) 727 struct sk_buff *skb))
732{ 728{
733 struct ip6_tnl *t; 729 struct ip6_tnl *t;
734 struct ipv6hdr *ipv6h = ipv6_hdr(skb); 730 const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
735 731
736 rcu_read_lock(); 732 rcu_read_lock();
737 733
@@ -828,7 +824,7 @@ static void init_tel_txopt(struct ipv6_tel_txoption *opt, __u8 encap_limit)
828 **/ 824 **/
829 825
830static inline int 826static inline int
831ip6_tnl_addr_conflict(struct ip6_tnl *t, struct ipv6hdr *hdr) 827ip6_tnl_addr_conflict(const struct ip6_tnl *t, const struct ipv6hdr *hdr)
832{ 828{
833 return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr); 829 return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr);
834} 830}
@@ -1005,7 +1001,7 @@ static inline int
1005ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) 1001ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1006{ 1002{
1007 struct ip6_tnl *t = netdev_priv(dev); 1003 struct ip6_tnl *t = netdev_priv(dev);
1008 struct iphdr *iph = ip_hdr(skb); 1004 const struct iphdr *iph = ip_hdr(skb);
1009 int encap_limit = -1; 1005 int encap_limit = -1;
1010 struct flowi6 fl6; 1006 struct flowi6 fl6;
1011 __u8 dsfield; 1007 __u8 dsfield;
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 29e48593bf22..82a809901f8e 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -989,8 +989,8 @@ static int mif6_add(struct net *net, struct mr6_table *mrt,
989} 989}
990 990
991static struct mfc6_cache *ip6mr_cache_find(struct mr6_table *mrt, 991static struct mfc6_cache *ip6mr_cache_find(struct mr6_table *mrt,
992 struct in6_addr *origin, 992 const struct in6_addr *origin,
993 struct in6_addr *mcastgrp) 993 const struct in6_addr *mcastgrp)
994{ 994{
995 int line = MFC6_HASH(mcastgrp, origin); 995 int line = MFC6_HASH(mcastgrp, origin);
996 struct mfc6_cache *c; 996 struct mfc6_cache *c;
diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c
index 85cccd6ed0b7..bba658d9a03c 100644
--- a/net/ipv6/ipcomp6.c
+++ b/net/ipv6/ipcomp6.c
@@ -55,7 +55,7 @@ static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
55{ 55{
56 struct net *net = dev_net(skb->dev); 56 struct net *net = dev_net(skb->dev);
57 __be32 spi; 57 __be32 spi;
58 struct ipv6hdr *iph = (struct ipv6hdr*)skb->data; 58 const struct ipv6hdr *iph = (const struct ipv6hdr *)skb->data;
59 struct ip_comp_hdr *ipcomph = 59 struct ip_comp_hdr *ipcomph =
60 (struct ip_comp_hdr *)(skb->data + offset); 60 (struct ip_comp_hdr *)(skb->data + offset);
61 struct xfrm_state *x; 61 struct xfrm_state *x;
@@ -64,7 +64,8 @@ static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
64 return; 64 return;
65 65
66 spi = htonl(ntohs(ipcomph->cpi)); 66 spi = htonl(ntohs(ipcomph->cpi));
67 x = xfrm_state_lookup(net, skb->mark, (xfrm_address_t *)&iph->daddr, spi, IPPROTO_COMP, AF_INET6); 67 x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
68 spi, IPPROTO_COMP, AF_INET6);
68 if (!x) 69 if (!x)
69 return; 70 return;
70 71
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index f2d98ca7588a..3e6ebcdb4779 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -92,16 +92,16 @@ static void mld_gq_timer_expire(unsigned long data);
92static void mld_ifc_timer_expire(unsigned long data); 92static void mld_ifc_timer_expire(unsigned long data);
93static void mld_ifc_event(struct inet6_dev *idev); 93static void mld_ifc_event(struct inet6_dev *idev);
94static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *pmc); 94static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *pmc);
95static void mld_del_delrec(struct inet6_dev *idev, struct in6_addr *addr); 95static void mld_del_delrec(struct inet6_dev *idev, const struct in6_addr *addr);
96static void mld_clear_delrec(struct inet6_dev *idev); 96static void mld_clear_delrec(struct inet6_dev *idev);
97static int sf_setstate(struct ifmcaddr6 *pmc); 97static int sf_setstate(struct ifmcaddr6 *pmc);
98static void sf_markstate(struct ifmcaddr6 *pmc); 98static void sf_markstate(struct ifmcaddr6 *pmc);
99static void ip6_mc_clear_src(struct ifmcaddr6 *pmc); 99static void ip6_mc_clear_src(struct ifmcaddr6 *pmc);
100static int ip6_mc_del_src(struct inet6_dev *idev, struct in6_addr *pmca, 100static int ip6_mc_del_src(struct inet6_dev *idev, const struct in6_addr *pmca,
101 int sfmode, int sfcount, struct in6_addr *psfsrc, 101 int sfmode, int sfcount, const struct in6_addr *psfsrc,
102 int delta); 102 int delta);
103static int ip6_mc_add_src(struct inet6_dev *idev, struct in6_addr *pmca, 103static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca,
104 int sfmode, int sfcount, struct in6_addr *psfsrc, 104 int sfmode, int sfcount, const struct in6_addr *psfsrc,
105 int delta); 105 int delta);
106static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml, 106static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml,
107 struct inet6_dev *idev); 107 struct inet6_dev *idev);
@@ -246,7 +246,7 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
246 246
247/* called with rcu_read_lock() */ 247/* called with rcu_read_lock() */
248static struct inet6_dev *ip6_mc_find_dev_rcu(struct net *net, 248static struct inet6_dev *ip6_mc_find_dev_rcu(struct net *net,
249 struct in6_addr *group, 249 const struct in6_addr *group,
250 int ifindex) 250 int ifindex)
251{ 251{
252 struct net_device *dev = NULL; 252 struct net_device *dev = NULL;
@@ -447,7 +447,7 @@ done:
447 447
448int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf) 448int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf)
449{ 449{
450 struct in6_addr *group; 450 const struct in6_addr *group;
451 struct ipv6_mc_socklist *pmc; 451 struct ipv6_mc_socklist *pmc;
452 struct inet6_dev *idev; 452 struct inet6_dev *idev;
453 struct ipv6_pinfo *inet6 = inet6_sk(sk); 453 struct ipv6_pinfo *inet6 = inet6_sk(sk);
@@ -538,7 +538,7 @@ int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf,
538 struct group_filter __user *optval, int __user *optlen) 538 struct group_filter __user *optval, int __user *optlen)
539{ 539{
540 int err, i, count, copycount; 540 int err, i, count, copycount;
541 struct in6_addr *group; 541 const struct in6_addr *group;
542 struct ipv6_mc_socklist *pmc; 542 struct ipv6_mc_socklist *pmc;
543 struct inet6_dev *idev; 543 struct inet6_dev *idev;
544 struct ipv6_pinfo *inet6 = inet6_sk(sk); 544 struct ipv6_pinfo *inet6 = inet6_sk(sk);
@@ -748,7 +748,7 @@ static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
748 spin_unlock_bh(&idev->mc_lock); 748 spin_unlock_bh(&idev->mc_lock);
749} 749}
750 750
751static void mld_del_delrec(struct inet6_dev *idev, struct in6_addr *pmca) 751static void mld_del_delrec(struct inet6_dev *idev, const struct in6_addr *pmca)
752{ 752{
753 struct ifmcaddr6 *pmc, *pmc_prev; 753 struct ifmcaddr6 *pmc, *pmc_prev;
754 struct ip6_sf_list *psf, *psf_next; 754 struct ip6_sf_list *psf, *psf_next;
@@ -1048,7 +1048,7 @@ static void igmp6_group_queried(struct ifmcaddr6 *ma, unsigned long resptime)
1048 1048
1049/* mark EXCLUDE-mode sources */ 1049/* mark EXCLUDE-mode sources */
1050static int mld_xmarksources(struct ifmcaddr6 *pmc, int nsrcs, 1050static int mld_xmarksources(struct ifmcaddr6 *pmc, int nsrcs,
1051 struct in6_addr *srcs) 1051 const struct in6_addr *srcs)
1052{ 1052{
1053 struct ip6_sf_list *psf; 1053 struct ip6_sf_list *psf;
1054 int i, scount; 1054 int i, scount;
@@ -1076,7 +1076,7 @@ static int mld_xmarksources(struct ifmcaddr6 *pmc, int nsrcs,
1076} 1076}
1077 1077
1078static int mld_marksources(struct ifmcaddr6 *pmc, int nsrcs, 1078static int mld_marksources(struct ifmcaddr6 *pmc, int nsrcs,
1079 struct in6_addr *srcs) 1079 const struct in6_addr *srcs)
1080{ 1080{
1081 struct ip6_sf_list *psf; 1081 struct ip6_sf_list *psf;
1082 int i, scount; 1082 int i, scount;
@@ -1111,7 +1111,7 @@ int igmp6_event_query(struct sk_buff *skb)
1111{ 1111{
1112 struct mld2_query *mlh2 = NULL; 1112 struct mld2_query *mlh2 = NULL;
1113 struct ifmcaddr6 *ma; 1113 struct ifmcaddr6 *ma;
1114 struct in6_addr *group; 1114 const struct in6_addr *group;
1115 unsigned long max_delay; 1115 unsigned long max_delay;
1116 struct inet6_dev *idev; 1116 struct inet6_dev *idev;
1117 struct mld_msg *mld; 1117 struct mld_msg *mld;
@@ -1817,7 +1817,7 @@ err_out:
1817} 1817}
1818 1818
1819static int ip6_mc_del1_src(struct ifmcaddr6 *pmc, int sfmode, 1819static int ip6_mc_del1_src(struct ifmcaddr6 *pmc, int sfmode,
1820 struct in6_addr *psfsrc) 1820 const struct in6_addr *psfsrc)
1821{ 1821{
1822 struct ip6_sf_list *psf, *psf_prev; 1822 struct ip6_sf_list *psf, *psf_prev;
1823 int rv = 0; 1823 int rv = 0;
@@ -1853,8 +1853,8 @@ static int ip6_mc_del1_src(struct ifmcaddr6 *pmc, int sfmode,
1853 return rv; 1853 return rv;
1854} 1854}
1855 1855
1856static int ip6_mc_del_src(struct inet6_dev *idev, struct in6_addr *pmca, 1856static int ip6_mc_del_src(struct inet6_dev *idev, const struct in6_addr *pmca,
1857 int sfmode, int sfcount, struct in6_addr *psfsrc, 1857 int sfmode, int sfcount, const struct in6_addr *psfsrc,
1858 int delta) 1858 int delta)
1859{ 1859{
1860 struct ifmcaddr6 *pmc; 1860 struct ifmcaddr6 *pmc;
@@ -1914,7 +1914,7 @@ static int ip6_mc_del_src(struct inet6_dev *idev, struct in6_addr *pmca,
1914 * Add multicast single-source filter to the interface list 1914 * Add multicast single-source filter to the interface list
1915 */ 1915 */
1916static int ip6_mc_add1_src(struct ifmcaddr6 *pmc, int sfmode, 1916static int ip6_mc_add1_src(struct ifmcaddr6 *pmc, int sfmode,
1917 struct in6_addr *psfsrc, int delta) 1917 const struct in6_addr *psfsrc, int delta)
1918{ 1918{
1919 struct ip6_sf_list *psf, *psf_prev; 1919 struct ip6_sf_list *psf, *psf_prev;
1920 1920
@@ -2017,8 +2017,8 @@ static int sf_setstate(struct ifmcaddr6 *pmc)
2017/* 2017/*
2018 * Add multicast source filter list to the interface list 2018 * Add multicast source filter list to the interface list
2019 */ 2019 */
2020static int ip6_mc_add_src(struct inet6_dev *idev, struct in6_addr *pmca, 2020static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca,
2021 int sfmode, int sfcount, struct in6_addr *psfsrc, 2021 int sfmode, int sfcount, const struct in6_addr *psfsrc,
2022 int delta) 2022 int delta)
2023{ 2023{
2024 struct ifmcaddr6 *pmc; 2024 struct ifmcaddr6 *pmc;
diff --git a/net/ipv6/mip6.c b/net/ipv6/mip6.c
index 9b210482fb05..43242e6e6103 100644
--- a/net/ipv6/mip6.c
+++ b/net/ipv6/mip6.c
@@ -126,7 +126,7 @@ static struct mip6_report_rate_limiter mip6_report_rl = {
126 126
127static int mip6_destopt_input(struct xfrm_state *x, struct sk_buff *skb) 127static int mip6_destopt_input(struct xfrm_state *x, struct sk_buff *skb)
128{ 128{
129 struct ipv6hdr *iph = ipv6_hdr(skb); 129 const struct ipv6hdr *iph = ipv6_hdr(skb);
130 struct ipv6_destopt_hdr *destopt = (struct ipv6_destopt_hdr *)skb->data; 130 struct ipv6_destopt_hdr *destopt = (struct ipv6_destopt_hdr *)skb->data;
131 int err = destopt->nexthdr; 131 int err = destopt->nexthdr;
132 132
@@ -181,8 +181,8 @@ static int mip6_destopt_output(struct xfrm_state *x, struct sk_buff *skb)
181} 181}
182 182
183static inline int mip6_report_rl_allow(struct timeval *stamp, 183static inline int mip6_report_rl_allow(struct timeval *stamp,
184 struct in6_addr *dst, 184 const struct in6_addr *dst,
185 struct in6_addr *src, int iif) 185 const struct in6_addr *src, int iif)
186{ 186{
187 int allow = 0; 187 int allow = 0;
188 188
@@ -349,7 +349,7 @@ static const struct xfrm_type mip6_destopt_type =
349 349
350static int mip6_rthdr_input(struct xfrm_state *x, struct sk_buff *skb) 350static int mip6_rthdr_input(struct xfrm_state *x, struct sk_buff *skb)
351{ 351{
352 struct ipv6hdr *iph = ipv6_hdr(skb); 352 const struct ipv6hdr *iph = ipv6_hdr(skb);
353 struct rt2_hdr *rt2 = (struct rt2_hdr *)skb->data; 353 struct rt2_hdr *rt2 = (struct rt2_hdr *)skb->data;
354 int err = rt2->rt_hdr.nexthdr; 354 int err = rt2->rt_hdr.nexthdr;
355 355
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 92f952d093db..7596f071d308 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -324,7 +324,7 @@ static inline u8 *ndisc_opt_addr_data(struct nd_opt_hdr *p,
324 return lladdr + prepad; 324 return lladdr + prepad;
325} 325}
326 326
327int ndisc_mc_map(struct in6_addr *addr, char *buf, struct net_device *dev, int dir) 327int ndisc_mc_map(const struct in6_addr *addr, char *buf, struct net_device *dev, int dir)
328{ 328{
329 switch (dev->type) { 329 switch (dev->type) {
330 case ARPHRD_ETHER: 330 case ARPHRD_ETHER:
@@ -611,6 +611,29 @@ static void ndisc_send_na(struct net_device *dev, struct neighbour *neigh,
611 inc_opt ? ND_OPT_TARGET_LL_ADDR : 0); 611 inc_opt ? ND_OPT_TARGET_LL_ADDR : 0);
612} 612}
613 613
614static void ndisc_send_unsol_na(struct net_device *dev)
615{
616 struct inet6_dev *idev;
617 struct inet6_ifaddr *ifa;
618 struct in6_addr mcaddr;
619
620 idev = in6_dev_get(dev);
621 if (!idev)
622 return;
623
624 read_lock_bh(&idev->lock);
625 list_for_each_entry(ifa, &idev->addr_list, if_list) {
626 addrconf_addr_solict_mult(&ifa->addr, &mcaddr);
627 ndisc_send_na(dev, NULL, &mcaddr, &ifa->addr,
628 /*router=*/ !!idev->cnf.forwarding,
629 /*solicited=*/ false, /*override=*/ true,
630 /*inc_opt=*/ true);
631 }
632 read_unlock_bh(&idev->lock);
633
634 in6_dev_put(idev);
635}
636
614void ndisc_send_ns(struct net_device *dev, struct neighbour *neigh, 637void ndisc_send_ns(struct net_device *dev, struct neighbour *neigh,
615 const struct in6_addr *solicit, 638 const struct in6_addr *solicit,
616 const struct in6_addr *daddr, const struct in6_addr *saddr) 639 const struct in6_addr *daddr, const struct in6_addr *saddr)
@@ -725,8 +748,8 @@ static int pndisc_is_router(const void *pkey,
725static void ndisc_recv_ns(struct sk_buff *skb) 748static void ndisc_recv_ns(struct sk_buff *skb)
726{ 749{
727 struct nd_msg *msg = (struct nd_msg *)skb_transport_header(skb); 750 struct nd_msg *msg = (struct nd_msg *)skb_transport_header(skb);
728 struct in6_addr *saddr = &ipv6_hdr(skb)->saddr; 751 const struct in6_addr *saddr = &ipv6_hdr(skb)->saddr;
729 struct in6_addr *daddr = &ipv6_hdr(skb)->daddr; 752 const struct in6_addr *daddr = &ipv6_hdr(skb)->daddr;
730 u8 *lladdr = NULL; 753 u8 *lladdr = NULL;
731 u32 ndoptlen = skb->tail - (skb->transport_header + 754 u32 ndoptlen = skb->tail - (skb->transport_header +
732 offsetof(struct nd_msg, opt)); 755 offsetof(struct nd_msg, opt));
@@ -901,8 +924,8 @@ out:
901static void ndisc_recv_na(struct sk_buff *skb) 924static void ndisc_recv_na(struct sk_buff *skb)
902{ 925{
903 struct nd_msg *msg = (struct nd_msg *)skb_transport_header(skb); 926 struct nd_msg *msg = (struct nd_msg *)skb_transport_header(skb);
904 struct in6_addr *saddr = &ipv6_hdr(skb)->saddr; 927 const struct in6_addr *saddr = &ipv6_hdr(skb)->saddr;
905 struct in6_addr *daddr = &ipv6_hdr(skb)->daddr; 928 const struct in6_addr *daddr = &ipv6_hdr(skb)->daddr;
906 u8 *lladdr = NULL; 929 u8 *lladdr = NULL;
907 u32 ndoptlen = skb->tail - (skb->transport_header + 930 u32 ndoptlen = skb->tail - (skb->transport_header +
908 offsetof(struct nd_msg, opt)); 931 offsetof(struct nd_msg, opt));
@@ -945,9 +968,10 @@ static void ndisc_recv_na(struct sk_buff *skb)
945 } 968 }
946 ifp = ipv6_get_ifaddr(dev_net(dev), &msg->target, dev, 1); 969 ifp = ipv6_get_ifaddr(dev_net(dev), &msg->target, dev, 1);
947 if (ifp) { 970 if (ifp) {
948 if (ifp->flags & IFA_F_TENTATIVE) { 971 if (skb->pkt_type != PACKET_LOOPBACK
949 addrconf_dad_failure(ifp); 972 && (ifp->flags & IFA_F_TENTATIVE)) {
950 return; 973 addrconf_dad_failure(ifp);
974 return;
951 } 975 }
952 /* What should we make now? The advertisement 976 /* What should we make now? The advertisement
953 is invalid, but ndisc specs say nothing 977 is invalid, but ndisc specs say nothing
@@ -1014,7 +1038,7 @@ static void ndisc_recv_rs(struct sk_buff *skb)
1014 unsigned long ndoptlen = skb->len - sizeof(*rs_msg); 1038 unsigned long ndoptlen = skb->len - sizeof(*rs_msg);
1015 struct neighbour *neigh; 1039 struct neighbour *neigh;
1016 struct inet6_dev *idev; 1040 struct inet6_dev *idev;
1017 struct in6_addr *saddr = &ipv6_hdr(skb)->saddr; 1041 const struct in6_addr *saddr = &ipv6_hdr(skb)->saddr;
1018 struct ndisc_options ndopts; 1042 struct ndisc_options ndopts;
1019 u8 *lladdr = NULL; 1043 u8 *lladdr = NULL;
1020 1044
@@ -1411,8 +1435,8 @@ static void ndisc_redirect_rcv(struct sk_buff *skb)
1411{ 1435{
1412 struct inet6_dev *in6_dev; 1436 struct inet6_dev *in6_dev;
1413 struct icmp6hdr *icmph; 1437 struct icmp6hdr *icmph;
1414 struct in6_addr *dest; 1438 const struct in6_addr *dest;
1415 struct in6_addr *target; /* new first hop to destination */ 1439 const struct in6_addr *target; /* new first hop to destination */
1416 struct neighbour *neigh; 1440 struct neighbour *neigh;
1417 int on_link = 0; 1441 int on_link = 0;
1418 struct ndisc_options ndopts; 1442 struct ndisc_options ndopts;
@@ -1445,7 +1469,7 @@ static void ndisc_redirect_rcv(struct sk_buff *skb)
1445 } 1469 }
1446 1470
1447 icmph = icmp6_hdr(skb); 1471 icmph = icmp6_hdr(skb);
1448 target = (struct in6_addr *) (icmph + 1); 1472 target = (const struct in6_addr *) (icmph + 1);
1449 dest = target + 1; 1473 dest = target + 1;
1450 1474
1451 if (ipv6_addr_is_multicast(dest)) { 1475 if (ipv6_addr_is_multicast(dest)) {
@@ -1722,6 +1746,9 @@ static int ndisc_netdev_event(struct notifier_block *this, unsigned long event,
1722 neigh_ifdown(&nd_tbl, dev); 1746 neigh_ifdown(&nd_tbl, dev);
1723 fib6_run_gc(~0UL, net); 1747 fib6_run_gc(~0UL, net);
1724 break; 1748 break;
1749 case NETDEV_NOTIFY_PEERS:
1750 ndisc_send_unsol_na(dev);
1751 break;
1725 default: 1752 default:
1726 break; 1753 break;
1727 } 1754 }
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
index 28bc1f644b7b..30fcee465448 100644
--- a/net/ipv6/netfilter.c
+++ b/net/ipv6/netfilter.c
@@ -13,7 +13,7 @@
13int ip6_route_me_harder(struct sk_buff *skb) 13int ip6_route_me_harder(struct sk_buff *skb)
14{ 14{
15 struct net *net = dev_net(skb_dst(skb)->dev); 15 struct net *net = dev_net(skb_dst(skb)->dev);
16 struct ipv6hdr *iph = ipv6_hdr(skb); 16 const struct ipv6hdr *iph = ipv6_hdr(skb);
17 struct dst_entry *dst; 17 struct dst_entry *dst;
18 struct flowi6 fl6 = { 18 struct flowi6 fl6 = {
19 .flowi6_oif = skb->sk ? skb->sk->sk_bound_dev_if : 0, 19 .flowi6_oif = skb->sk ? skb->sk->sk_bound_dev_if : 0,
@@ -67,7 +67,7 @@ static void nf_ip6_saveroute(const struct sk_buff *skb,
67 struct ip6_rt_info *rt_info = nf_queue_entry_reroute(entry); 67 struct ip6_rt_info *rt_info = nf_queue_entry_reroute(entry);
68 68
69 if (entry->hook == NF_INET_LOCAL_OUT) { 69 if (entry->hook == NF_INET_LOCAL_OUT) {
70 struct ipv6hdr *iph = ipv6_hdr(skb); 70 const struct ipv6hdr *iph = ipv6_hdr(skb);
71 71
72 rt_info->daddr = iph->daddr; 72 rt_info->daddr = iph->daddr;
73 rt_info->saddr = iph->saddr; 73 rt_info->saddr = iph->saddr;
@@ -81,7 +81,7 @@ static int nf_ip6_reroute(struct sk_buff *skb,
81 struct ip6_rt_info *rt_info = nf_queue_entry_reroute(entry); 81 struct ip6_rt_info *rt_info = nf_queue_entry_reroute(entry);
82 82
83 if (entry->hook == NF_INET_LOCAL_OUT) { 83 if (entry->hook == NF_INET_LOCAL_OUT) {
84 struct ipv6hdr *iph = ipv6_hdr(skb); 84 const struct ipv6hdr *iph = ipv6_hdr(skb);
85 if (!ipv6_addr_equal(&iph->daddr, &rt_info->daddr) || 85 if (!ipv6_addr_equal(&iph->daddr, &rt_info->daddr) ||
86 !ipv6_addr_equal(&iph->saddr, &rt_info->saddr) || 86 !ipv6_addr_equal(&iph->saddr, &rt_info->saddr) ||
87 skb->mark != rt_info->mark) 87 skb->mark != rt_info->mark)
@@ -108,7 +108,7 @@ static int nf_ip6_route(struct net *net, struct dst_entry **dst,
108__sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook, 108__sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook,
109 unsigned int dataoff, u_int8_t protocol) 109 unsigned int dataoff, u_int8_t protocol)
110{ 110{
111 struct ipv6hdr *ip6h = ipv6_hdr(skb); 111 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
112 __sum16 csum = 0; 112 __sum16 csum = 0;
113 113
114 switch (skb->ip_summed) { 114 switch (skb->ip_summed) {
@@ -142,7 +142,7 @@ static __sum16 nf_ip6_checksum_partial(struct sk_buff *skb, unsigned int hook,
142 unsigned int dataoff, unsigned int len, 142 unsigned int dataoff, unsigned int len,
143 u_int8_t protocol) 143 u_int8_t protocol)
144{ 144{
145 struct ipv6hdr *ip6h = ipv6_hdr(skb); 145 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
146 __wsum hsum; 146 __wsum hsum;
147 __sum16 csum = 0; 147 __sum16 csum = 0;
148 148
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 5a1c6f27ffaf..94874b0bdcdc 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -340,6 +340,7 @@ ip6t_do_table(struct sk_buff *skb,
340 unsigned int *stackptr, origptr, cpu; 340 unsigned int *stackptr, origptr, cpu;
341 const struct xt_table_info *private; 341 const struct xt_table_info *private;
342 struct xt_action_param acpar; 342 struct xt_action_param acpar;
343 unsigned int addend;
343 344
344 /* Initialization */ 345 /* Initialization */
345 indev = in ? in->name : nulldevname; 346 indev = in ? in->name : nulldevname;
@@ -358,7 +359,8 @@ ip6t_do_table(struct sk_buff *skb,
358 359
359 IP_NF_ASSERT(table->valid_hooks & (1 << hook)); 360 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
360 361
361 xt_info_rdlock_bh(); 362 local_bh_disable();
363 addend = xt_write_recseq_begin();
362 private = table->private; 364 private = table->private;
363 cpu = smp_processor_id(); 365 cpu = smp_processor_id();
364 table_base = private->entries[cpu]; 366 table_base = private->entries[cpu];
@@ -442,7 +444,9 @@ ip6t_do_table(struct sk_buff *skb,
442 } while (!acpar.hotdrop); 444 } while (!acpar.hotdrop);
443 445
444 *stackptr = origptr; 446 *stackptr = origptr;
445 xt_info_rdunlock_bh(); 447
448 xt_write_recseq_end(addend);
449 local_bh_enable();
446 450
447#ifdef DEBUG_ALLOW_ALL 451#ifdef DEBUG_ALLOW_ALL
448 return NF_ACCEPT; 452 return NF_ACCEPT;
@@ -899,7 +903,7 @@ get_counters(const struct xt_table_info *t,
899 unsigned int i; 903 unsigned int i;
900 904
901 for_each_possible_cpu(cpu) { 905 for_each_possible_cpu(cpu) {
902 seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock; 906 seqcount_t *s = &per_cpu(xt_recseq, cpu);
903 907
904 i = 0; 908 i = 0;
905 xt_entry_foreach(iter, t->entries[cpu], t->size) { 909 xt_entry_foreach(iter, t->entries[cpu], t->size) {
@@ -907,10 +911,10 @@ get_counters(const struct xt_table_info *t,
907 unsigned int start; 911 unsigned int start;
908 912
909 do { 913 do {
910 start = read_seqbegin(lock); 914 start = read_seqcount_begin(s);
911 bcnt = iter->counters.bcnt; 915 bcnt = iter->counters.bcnt;
912 pcnt = iter->counters.pcnt; 916 pcnt = iter->counters.pcnt;
913 } while (read_seqretry(lock, start)); 917 } while (read_seqcount_retry(s, start));
914 918
915 ADD_COUNTER(counters[i], bcnt, pcnt); 919 ADD_COUNTER(counters[i], bcnt, pcnt);
916 ++i; 920 ++i;
@@ -1325,6 +1329,7 @@ do_add_counters(struct net *net, const void __user *user, unsigned int len,
1325 int ret = 0; 1329 int ret = 0;
1326 const void *loc_cpu_entry; 1330 const void *loc_cpu_entry;
1327 struct ip6t_entry *iter; 1331 struct ip6t_entry *iter;
1332 unsigned int addend;
1328#ifdef CONFIG_COMPAT 1333#ifdef CONFIG_COMPAT
1329 struct compat_xt_counters_info compat_tmp; 1334 struct compat_xt_counters_info compat_tmp;
1330 1335
@@ -1381,13 +1386,13 @@ do_add_counters(struct net *net, const void __user *user, unsigned int len,
1381 i = 0; 1386 i = 0;
1382 /* Choose the copy that is on our node */ 1387 /* Choose the copy that is on our node */
1383 curcpu = smp_processor_id(); 1388 curcpu = smp_processor_id();
1384 xt_info_wrlock(curcpu); 1389 addend = xt_write_recseq_begin();
1385 loc_cpu_entry = private->entries[curcpu]; 1390 loc_cpu_entry = private->entries[curcpu];
1386 xt_entry_foreach(iter, loc_cpu_entry, private->size) { 1391 xt_entry_foreach(iter, loc_cpu_entry, private->size) {
1387 ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt); 1392 ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt);
1388 ++i; 1393 ++i;
1389 } 1394 }
1390 xt_info_wrunlock(curcpu); 1395 xt_write_recseq_end(addend);
1391 1396
1392 unlock_up_free: 1397 unlock_up_free:
1393 local_bh_enable(); 1398 local_bh_enable();
@@ -1578,7 +1583,6 @@ compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1578 struct xt_table_info *newinfo, unsigned char *base) 1583 struct xt_table_info *newinfo, unsigned char *base)
1579{ 1584{
1580 struct xt_entry_target *t; 1585 struct xt_entry_target *t;
1581 struct xt_target *target;
1582 struct ip6t_entry *de; 1586 struct ip6t_entry *de;
1583 unsigned int origsize; 1587 unsigned int origsize;
1584 int ret, h; 1588 int ret, h;
@@ -1600,7 +1604,6 @@ compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1600 } 1604 }
1601 de->target_offset = e->target_offset - (origsize - *size); 1605 de->target_offset = e->target_offset - (origsize - *size);
1602 t = compat_ip6t_get_target(e); 1606 t = compat_ip6t_get_target(e);
1603 target = t->u.kernel.target;
1604 xt_compat_target_from_user(t, dstptr, size); 1607 xt_compat_target_from_user(t, dstptr, size);
1605 1608
1606 de->next_offset = e->next_offset - (origsize - *size); 1609 de->next_offset = e->next_offset - (origsize - *size);
diff --git a/net/ipv6/netfilter/ip6table_mangle.c b/net/ipv6/netfilter/ip6table_mangle.c
index 679a0a3b7b3c..00d19173db7e 100644
--- a/net/ipv6/netfilter/ip6table_mangle.c
+++ b/net/ipv6/netfilter/ip6table_mangle.c
@@ -64,7 +64,8 @@ ip6t_mangle_out(struct sk_buff *skb, const struct net_device *out)
64 (memcmp(&ipv6_hdr(skb)->saddr, &saddr, sizeof(saddr)) || 64 (memcmp(&ipv6_hdr(skb)->saddr, &saddr, sizeof(saddr)) ||
65 memcmp(&ipv6_hdr(skb)->daddr, &daddr, sizeof(daddr)) || 65 memcmp(&ipv6_hdr(skb)->daddr, &daddr, sizeof(daddr)) ||
66 skb->mark != mark || 66 skb->mark != mark ||
67 ipv6_hdr(skb)->hop_limit != hop_limit)) 67 ipv6_hdr(skb)->hop_limit != hop_limit ||
68 flowlabel != *((u_int32_t *)ipv6_hdr(skb))))
68 return ip6_route_me_harder(skb) == 0 ? ret : NF_DROP; 69 return ip6_route_me_harder(skb) == 0 ? ret : NF_DROP;
69 70
70 return ret; 71 return ret;
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index 24b3558b8e67..18ff5df7ec02 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -141,7 +141,11 @@ static const struct snmp_mib snmp6_udplite6_list[] = {
141 SNMP_MIB_SENTINEL 141 SNMP_MIB_SENTINEL
142}; 142};
143 143
144static void snmp6_seq_show_icmpv6msg(struct seq_file *seq, void __percpu **mib) 144/* can be called either with percpu mib (pcpumib != NULL),
145 * or shared one (smib != NULL)
146 */
147static void snmp6_seq_show_icmpv6msg(struct seq_file *seq, void __percpu **pcpumib,
148 atomic_long_t *smib)
145{ 149{
146 char name[32]; 150 char name[32];
147 int i; 151 int i;
@@ -158,14 +162,14 @@ static void snmp6_seq_show_icmpv6msg(struct seq_file *seq, void __percpu **mib)
158 snprintf(name, sizeof(name), "Icmp6%s%s", 162 snprintf(name, sizeof(name), "Icmp6%s%s",
159 i & 0x100 ? "Out" : "In", p); 163 i & 0x100 ? "Out" : "In", p);
160 seq_printf(seq, "%-32s\t%lu\n", name, 164 seq_printf(seq, "%-32s\t%lu\n", name,
161 snmp_fold_field(mib, i)); 165 pcpumib ? snmp_fold_field(pcpumib, i) : atomic_long_read(smib + i));
162 } 166 }
163 167
164 /* print by number (nonzero only) - ICMPMsgStat format */ 168 /* print by number (nonzero only) - ICMPMsgStat format */
165 for (i = 0; i < ICMP6MSG_MIB_MAX; i++) { 169 for (i = 0; i < ICMP6MSG_MIB_MAX; i++) {
166 unsigned long val; 170 unsigned long val;
167 171
168 val = snmp_fold_field(mib, i); 172 val = pcpumib ? snmp_fold_field(pcpumib, i) : atomic_long_read(smib + i);
169 if (!val) 173 if (!val)
170 continue; 174 continue;
171 snprintf(name, sizeof(name), "Icmp6%sType%u", 175 snprintf(name, sizeof(name), "Icmp6%sType%u",
@@ -174,14 +178,22 @@ static void snmp6_seq_show_icmpv6msg(struct seq_file *seq, void __percpu **mib)
174 } 178 }
175} 179}
176 180
177static void snmp6_seq_show_item(struct seq_file *seq, void __percpu **mib, 181/* can be called either with percpu mib (pcpumib != NULL),
182 * or shared one (smib != NULL)
183 */
184static void snmp6_seq_show_item(struct seq_file *seq, void __percpu **pcpumib,
185 atomic_long_t *smib,
178 const struct snmp_mib *itemlist) 186 const struct snmp_mib *itemlist)
179{ 187{
180 int i; 188 int i;
189 unsigned long val;
181 190
182 for (i = 0; itemlist[i].name; i++) 191 for (i = 0; itemlist[i].name; i++) {
183 seq_printf(seq, "%-32s\t%lu\n", itemlist[i].name, 192 val = pcpumib ?
184 snmp_fold_field(mib, itemlist[i].entry)); 193 snmp_fold_field(pcpumib, itemlist[i].entry) :
194 atomic_long_read(smib + itemlist[i].entry);
195 seq_printf(seq, "%-32s\t%lu\n", itemlist[i].name, val);
196 }
185} 197}
186 198
187static void snmp6_seq_show_item64(struct seq_file *seq, void __percpu **mib, 199static void snmp6_seq_show_item64(struct seq_file *seq, void __percpu **mib,
@@ -201,13 +213,13 @@ static int snmp6_seq_show(struct seq_file *seq, void *v)
201 snmp6_seq_show_item64(seq, (void __percpu **)net->mib.ipv6_statistics, 213 snmp6_seq_show_item64(seq, (void __percpu **)net->mib.ipv6_statistics,
202 snmp6_ipstats_list, offsetof(struct ipstats_mib, syncp)); 214 snmp6_ipstats_list, offsetof(struct ipstats_mib, syncp));
203 snmp6_seq_show_item(seq, (void __percpu **)net->mib.icmpv6_statistics, 215 snmp6_seq_show_item(seq, (void __percpu **)net->mib.icmpv6_statistics,
204 snmp6_icmp6_list); 216 NULL, snmp6_icmp6_list);
205 snmp6_seq_show_icmpv6msg(seq, 217 snmp6_seq_show_icmpv6msg(seq,
206 (void __percpu **)net->mib.icmpv6msg_statistics); 218 (void __percpu **)net->mib.icmpv6msg_statistics, NULL);
207 snmp6_seq_show_item(seq, (void __percpu **)net->mib.udp_stats_in6, 219 snmp6_seq_show_item(seq, (void __percpu **)net->mib.udp_stats_in6,
208 snmp6_udp6_list); 220 NULL, snmp6_udp6_list);
209 snmp6_seq_show_item(seq, (void __percpu **)net->mib.udplite_stats_in6, 221 snmp6_seq_show_item(seq, (void __percpu **)net->mib.udplite_stats_in6,
210 snmp6_udplite6_list); 222 NULL, snmp6_udplite6_list);
211 return 0; 223 return 0;
212} 224}
213 225
@@ -229,11 +241,11 @@ static int snmp6_dev_seq_show(struct seq_file *seq, void *v)
229 struct inet6_dev *idev = (struct inet6_dev *)seq->private; 241 struct inet6_dev *idev = (struct inet6_dev *)seq->private;
230 242
231 seq_printf(seq, "%-32s\t%u\n", "ifIndex", idev->dev->ifindex); 243 seq_printf(seq, "%-32s\t%u\n", "ifIndex", idev->dev->ifindex);
232 snmp6_seq_show_item(seq, (void __percpu **)idev->stats.ipv6, 244 snmp6_seq_show_item(seq, (void __percpu **)idev->stats.ipv6, NULL,
233 snmp6_ipstats_list); 245 snmp6_ipstats_list);
234 snmp6_seq_show_item(seq, (void __percpu **)idev->stats.icmpv6, 246 snmp6_seq_show_item(seq, NULL, idev->stats.icmpv6dev->mibs,
235 snmp6_icmp6_list); 247 snmp6_icmp6_list);
236 snmp6_seq_show_icmpv6msg(seq, (void __percpu **)idev->stats.icmpv6msg); 248 snmp6_seq_show_icmpv6msg(seq, NULL, idev->stats.icmpv6msgdev->mibs);
237 return 0; 249 return 0;
238} 250}
239 251
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 4a1c3b46c56b..ae64984f81aa 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -67,8 +67,8 @@ static struct raw_hashinfo raw_v6_hashinfo = {
67}; 67};
68 68
69static struct sock *__raw_v6_lookup(struct net *net, struct sock *sk, 69static struct sock *__raw_v6_lookup(struct net *net, struct sock *sk,
70 unsigned short num, struct in6_addr *loc_addr, 70 unsigned short num, const struct in6_addr *loc_addr,
71 struct in6_addr *rmt_addr, int dif) 71 const struct in6_addr *rmt_addr, int dif)
72{ 72{
73 struct hlist_node *node; 73 struct hlist_node *node;
74 int is_multicast = ipv6_addr_is_multicast(loc_addr); 74 int is_multicast = ipv6_addr_is_multicast(loc_addr);
@@ -154,8 +154,8 @@ EXPORT_SYMBOL(rawv6_mh_filter_unregister);
154 */ 154 */
155static int ipv6_raw_deliver(struct sk_buff *skb, int nexthdr) 155static int ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
156{ 156{
157 struct in6_addr *saddr; 157 const struct in6_addr *saddr;
158 struct in6_addr *daddr; 158 const struct in6_addr *daddr;
159 struct sock *sk; 159 struct sock *sk;
160 int delivered = 0; 160 int delivered = 0;
161 __u8 hash; 161 __u8 hash;
@@ -348,7 +348,7 @@ void raw6_icmp_error(struct sk_buff *skb, int nexthdr,
348{ 348{
349 struct sock *sk; 349 struct sock *sk;
350 int hash; 350 int hash;
351 struct in6_addr *saddr, *daddr; 351 const struct in6_addr *saddr, *daddr;
352 struct net *net; 352 struct net *net;
353 353
354 hash = nexthdr & (RAW_HTABLE_SIZE - 1); 354 hash = nexthdr & (RAW_HTABLE_SIZE - 1);
@@ -357,7 +357,7 @@ void raw6_icmp_error(struct sk_buff *skb, int nexthdr,
357 sk = sk_head(&raw_v6_hashinfo.ht[hash]); 357 sk = sk_head(&raw_v6_hashinfo.ht[hash]);
358 if (sk != NULL) { 358 if (sk != NULL) {
359 /* Note: ipv6_hdr(skb) != skb->data */ 359 /* Note: ipv6_hdr(skb) != skb->data */
360 struct ipv6hdr *ip6h = (struct ipv6hdr *)skb->data; 360 const struct ipv6hdr *ip6h = (const struct ipv6hdr *)skb->data;
361 saddr = &ip6h->saddr; 361 saddr = &ip6h->saddr;
362 daddr = &ip6h->daddr; 362 daddr = &ip6h->daddr;
363 net = dev_net(skb->dev); 363 net = dev_net(skb->dev);
@@ -542,8 +542,8 @@ static int rawv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6,
542 goto out; 542 goto out;
543 543
544 offset = rp->offset; 544 offset = rp->offset;
545 total_len = inet_sk(sk)->cork.length - (skb_network_header(skb) - 545 total_len = inet_sk(sk)->cork.base.length - (skb_network_header(skb) -
546 skb->data); 546 skb->data);
547 if (offset >= total_len - 1) { 547 if (offset >= total_len - 1) {
548 err = -EINVAL; 548 err = -EINVAL;
549 ip6_flush_pending_frames(sk); 549 ip6_flush_pending_frames(sk);
@@ -1231,7 +1231,7 @@ struct proto rawv6_prot = {
1231static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i) 1231static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
1232{ 1232{
1233 struct ipv6_pinfo *np = inet6_sk(sp); 1233 struct ipv6_pinfo *np = inet6_sk(sp);
1234 struct in6_addr *dest, *src; 1234 const struct in6_addr *dest, *src;
1235 __u16 destp, srcp; 1235 __u16 destp, srcp;
1236 1236
1237 dest = &np->daddr; 1237 dest = &np->daddr;
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 07beeb06f752..7b954e2539d0 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -224,7 +224,7 @@ out:
224} 224}
225 225
226static __inline__ struct frag_queue * 226static __inline__ struct frag_queue *
227fq_find(struct net *net, __be32 id, struct in6_addr *src, struct in6_addr *dst) 227fq_find(struct net *net, __be32 id, const struct in6_addr *src, const struct in6_addr *dst)
228{ 228{
229 struct inet_frag_queue *q; 229 struct inet_frag_queue *q;
230 struct ip6_create_arg arg; 230 struct ip6_create_arg arg;
@@ -535,7 +535,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
535{ 535{
536 struct frag_hdr *fhdr; 536 struct frag_hdr *fhdr;
537 struct frag_queue *fq; 537 struct frag_queue *fq;
538 struct ipv6hdr *hdr = ipv6_hdr(skb); 538 const struct ipv6hdr *hdr = ipv6_hdr(skb);
539 struct net *net = dev_net(skb_dst(skb)->dev); 539 struct net *net = dev_net(skb_dst(skb)->dev);
540 540
541 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMREQDS); 541 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMREQDS);
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index fd0eec6f88c6..f1be5c5c85ef 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -89,12 +89,12 @@ static void ip6_rt_update_pmtu(struct dst_entry *dst, u32 mtu);
89 89
90#ifdef CONFIG_IPV6_ROUTE_INFO 90#ifdef CONFIG_IPV6_ROUTE_INFO
91static struct rt6_info *rt6_add_route_info(struct net *net, 91static struct rt6_info *rt6_add_route_info(struct net *net,
92 struct in6_addr *prefix, int prefixlen, 92 const struct in6_addr *prefix, int prefixlen,
93 struct in6_addr *gwaddr, int ifindex, 93 const struct in6_addr *gwaddr, int ifindex,
94 unsigned pref); 94 unsigned pref);
95static struct rt6_info *rt6_get_route_info(struct net *net, 95static struct rt6_info *rt6_get_route_info(struct net *net,
96 struct in6_addr *prefix, int prefixlen, 96 const struct in6_addr *prefix, int prefixlen,
97 struct in6_addr *gwaddr, int ifindex); 97 const struct in6_addr *gwaddr, int ifindex);
98#endif 98#endif
99 99
100static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old) 100static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)
@@ -227,9 +227,14 @@ static struct rt6_info ip6_blk_hole_entry_template = {
227#endif 227#endif
228 228
229/* allocate dst with ip6_dst_ops */ 229/* allocate dst with ip6_dst_ops */
230static inline struct rt6_info *ip6_dst_alloc(struct dst_ops *ops) 230static inline struct rt6_info *ip6_dst_alloc(struct dst_ops *ops,
231 struct net_device *dev)
231{ 232{
232 return (struct rt6_info *)dst_alloc(ops, 0); 233 struct rt6_info *rt = dst_alloc(ops, dev, 0, 0, 0);
234
235 memset(&rt->rt6i_table, 0, sizeof(*rt) - sizeof(struct dst_entry));
236
237 return rt;
233} 238}
234 239
235static void ip6_dst_destroy(struct dst_entry *dst) 240static void ip6_dst_destroy(struct dst_entry *dst)
@@ -290,7 +295,7 @@ static __inline__ int rt6_check_expired(const struct rt6_info *rt)
290 time_after(jiffies, rt->rt6i_expires); 295 time_after(jiffies, rt->rt6i_expires);
291} 296}
292 297
293static inline int rt6_need_strict(struct in6_addr *daddr) 298static inline int rt6_need_strict(const struct in6_addr *daddr)
294{ 299{
295 return ipv6_addr_type(daddr) & 300 return ipv6_addr_type(daddr) &
296 (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK); 301 (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK);
@@ -302,7 +307,7 @@ static inline int rt6_need_strict(struct in6_addr *daddr)
302 307
303static inline struct rt6_info *rt6_device_match(struct net *net, 308static inline struct rt6_info *rt6_device_match(struct net *net,
304 struct rt6_info *rt, 309 struct rt6_info *rt,
305 struct in6_addr *saddr, 310 const struct in6_addr *saddr,
306 int oif, 311 int oif,
307 int flags) 312 int flags)
308{ 313{
@@ -514,7 +519,7 @@ static struct rt6_info *rt6_select(struct fib6_node *fn, int oif, int strict)
514 519
515#ifdef CONFIG_IPV6_ROUTE_INFO 520#ifdef CONFIG_IPV6_ROUTE_INFO
516int rt6_route_rcv(struct net_device *dev, u8 *opt, int len, 521int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
517 struct in6_addr *gwaddr) 522 const struct in6_addr *gwaddr)
518{ 523{
519 struct net *net = dev_net(dev); 524 struct net *net = dev_net(dev);
520 struct route_info *rinfo = (struct route_info *) opt; 525 struct route_info *rinfo = (struct route_info *) opt;
@@ -677,8 +682,8 @@ int ip6_ins_rt(struct rt6_info *rt)
677 return __ip6_ins_rt(rt, &info); 682 return __ip6_ins_rt(rt, &info);
678} 683}
679 684
680static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort, struct in6_addr *daddr, 685static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort, const struct in6_addr *daddr,
681 struct in6_addr *saddr) 686 const struct in6_addr *saddr)
682{ 687{
683 struct rt6_info *rt; 688 struct rt6_info *rt;
684 689
@@ -746,7 +751,7 @@ static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort, struct in6_addr *dad
746 return rt; 751 return rt;
747} 752}
748 753
749static struct rt6_info *rt6_alloc_clone(struct rt6_info *ort, struct in6_addr *daddr) 754static struct rt6_info *rt6_alloc_clone(struct rt6_info *ort, const struct in6_addr *daddr)
750{ 755{
751 struct rt6_info *rt = ip6_rt_copy(ort); 756 struct rt6_info *rt = ip6_rt_copy(ort);
752 if (rt) { 757 if (rt) {
@@ -837,7 +842,7 @@ static struct rt6_info *ip6_pol_route_input(struct net *net, struct fib6_table *
837 842
838void ip6_route_input(struct sk_buff *skb) 843void ip6_route_input(struct sk_buff *skb)
839{ 844{
840 struct ipv6hdr *iph = ipv6_hdr(skb); 845 const struct ipv6hdr *iph = ipv6_hdr(skb);
841 struct net *net = dev_net(skb->dev); 846 struct net *net = dev_net(skb->dev);
842 int flags = RT6_LOOKUP_F_HAS_SADDR; 847 int flags = RT6_LOOKUP_F_HAS_SADDR;
843 struct flowi6 fl6 = { 848 struct flowi6 fl6 = {
@@ -881,11 +886,13 @@ EXPORT_SYMBOL(ip6_route_output);
881 886
882struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig) 887struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig)
883{ 888{
884 struct rt6_info *rt = dst_alloc(&ip6_dst_blackhole_ops, 1); 889 struct rt6_info *rt, *ort = (struct rt6_info *) dst_orig;
885 struct rt6_info *ort = (struct rt6_info *) dst_orig;
886 struct dst_entry *new = NULL; 890 struct dst_entry *new = NULL;
887 891
892 rt = dst_alloc(&ip6_dst_blackhole_ops, ort->dst.dev, 1, 0, 0);
888 if (rt) { 893 if (rt) {
894 memset(&rt->rt6i_table, 0, sizeof(*rt) - sizeof(struct dst_entry));
895
889 new = &rt->dst; 896 new = &rt->dst;
890 897
891 new->__use = 1; 898 new->__use = 1;
@@ -893,9 +900,6 @@ struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_ori
893 new->output = dst_discard; 900 new->output = dst_discard;
894 901
895 dst_copy_metrics(new, &ort->dst); 902 dst_copy_metrics(new, &ort->dst);
896 new->dev = ort->dst.dev;
897 if (new->dev)
898 dev_hold(new->dev);
899 rt->rt6i_idev = ort->rt6i_idev; 903 rt->rt6i_idev = ort->rt6i_idev;
900 if (rt->rt6i_idev) 904 if (rt->rt6i_idev)
901 in6_dev_hold(rt->rt6i_idev); 905 in6_dev_hold(rt->rt6i_idev);
@@ -1038,13 +1042,12 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
1038 if (unlikely(idev == NULL)) 1042 if (unlikely(idev == NULL))
1039 return NULL; 1043 return NULL;
1040 1044
1041 rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops); 1045 rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, dev);
1042 if (unlikely(rt == NULL)) { 1046 if (unlikely(rt == NULL)) {
1043 in6_dev_put(idev); 1047 in6_dev_put(idev);
1044 goto out; 1048 goto out;
1045 } 1049 }
1046 1050
1047 dev_hold(dev);
1048 if (neigh) 1051 if (neigh)
1049 neigh_hold(neigh); 1052 neigh_hold(neigh);
1050 else { 1053 else {
@@ -1053,7 +1056,6 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
1053 neigh = NULL; 1056 neigh = NULL;
1054 } 1057 }
1055 1058
1056 rt->rt6i_dev = dev;
1057 rt->rt6i_idev = idev; 1059 rt->rt6i_idev = idev;
1058 rt->rt6i_nexthop = neigh; 1060 rt->rt6i_nexthop = neigh;
1059 atomic_set(&rt->dst.__refcnt, 1); 1061 atomic_set(&rt->dst.__refcnt, 1);
@@ -1212,7 +1214,7 @@ int ip6_route_add(struct fib6_config *cfg)
1212 goto out; 1214 goto out;
1213 } 1215 }
1214 1216
1215 rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops); 1217 rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, NULL);
1216 1218
1217 if (rt == NULL) { 1219 if (rt == NULL) {
1218 err = -ENOMEM; 1220 err = -ENOMEM;
@@ -1279,7 +1281,7 @@ int ip6_route_add(struct fib6_config *cfg)
1279 } 1281 }
1280 1282
1281 if (cfg->fc_flags & RTF_GATEWAY) { 1283 if (cfg->fc_flags & RTF_GATEWAY) {
1282 struct in6_addr *gw_addr; 1284 const struct in6_addr *gw_addr;
1283 int gwa_type; 1285 int gwa_type;
1284 1286
1285 gw_addr = &cfg->fc_gateway; 1287 gw_addr = &cfg->fc_gateway;
@@ -1332,6 +1334,16 @@ int ip6_route_add(struct fib6_config *cfg)
1332 if (dev == NULL) 1334 if (dev == NULL)
1333 goto out; 1335 goto out;
1334 1336
1337 if (!ipv6_addr_any(&cfg->fc_prefsrc)) {
1338 if (!ipv6_chk_addr(net, &cfg->fc_prefsrc, dev, 0)) {
1339 err = -EINVAL;
1340 goto out;
1341 }
1342 ipv6_addr_copy(&rt->rt6i_prefsrc.addr, &cfg->fc_prefsrc);
1343 rt->rt6i_prefsrc.plen = 128;
1344 } else
1345 rt->rt6i_prefsrc.plen = 0;
1346
1335 if (cfg->fc_flags & (RTF_GATEWAY | RTF_NONEXTHOP)) { 1347 if (cfg->fc_flags & (RTF_GATEWAY | RTF_NONEXTHOP)) {
1336 rt->rt6i_nexthop = __neigh_lookup_errno(&nd_tbl, &rt->rt6i_gateway, dev); 1348 rt->rt6i_nexthop = __neigh_lookup_errno(&nd_tbl, &rt->rt6i_gateway, dev);
1337 if (IS_ERR(rt->rt6i_nexthop)) { 1349 if (IS_ERR(rt->rt6i_nexthop)) {
@@ -1509,9 +1521,9 @@ out:
1509 return rt; 1521 return rt;
1510}; 1522};
1511 1523
1512static struct rt6_info *ip6_route_redirect(struct in6_addr *dest, 1524static struct rt6_info *ip6_route_redirect(const struct in6_addr *dest,
1513 struct in6_addr *src, 1525 const struct in6_addr *src,
1514 struct in6_addr *gateway, 1526 const struct in6_addr *gateway,
1515 struct net_device *dev) 1527 struct net_device *dev)
1516{ 1528{
1517 int flags = RT6_LOOKUP_F_HAS_SADDR; 1529 int flags = RT6_LOOKUP_F_HAS_SADDR;
@@ -1533,8 +1545,8 @@ static struct rt6_info *ip6_route_redirect(struct in6_addr *dest,
1533 flags, __ip6_route_redirect); 1545 flags, __ip6_route_redirect);
1534} 1546}
1535 1547
1536void rt6_redirect(struct in6_addr *dest, struct in6_addr *src, 1548void rt6_redirect(const struct in6_addr *dest, const struct in6_addr *src,
1537 struct in6_addr *saddr, 1549 const struct in6_addr *saddr,
1538 struct neighbour *neigh, u8 *lladdr, int on_link) 1550 struct neighbour *neigh, u8 *lladdr, int on_link)
1539{ 1551{
1540 struct rt6_info *rt, *nrt = NULL; 1552 struct rt6_info *rt, *nrt = NULL;
@@ -1608,7 +1620,7 @@ out:
1608 * i.e. Path MTU discovery 1620 * i.e. Path MTU discovery
1609 */ 1621 */
1610 1622
1611static void rt6_do_pmtu_disc(struct in6_addr *daddr, struct in6_addr *saddr, 1623static void rt6_do_pmtu_disc(const struct in6_addr *daddr, const struct in6_addr *saddr,
1612 struct net *net, u32 pmtu, int ifindex) 1624 struct net *net, u32 pmtu, int ifindex)
1613{ 1625{
1614 struct rt6_info *rt, *nrt; 1626 struct rt6_info *rt, *nrt;
@@ -1693,7 +1705,7 @@ out:
1693 dst_release(&rt->dst); 1705 dst_release(&rt->dst);
1694} 1706}
1695 1707
1696void rt6_pmtu_discovery(struct in6_addr *daddr, struct in6_addr *saddr, 1708void rt6_pmtu_discovery(const struct in6_addr *daddr, const struct in6_addr *saddr,
1697 struct net_device *dev, u32 pmtu) 1709 struct net_device *dev, u32 pmtu)
1698{ 1710{
1699 struct net *net = dev_net(dev); 1711 struct net *net = dev_net(dev);
@@ -1721,7 +1733,8 @@ void rt6_pmtu_discovery(struct in6_addr *daddr, struct in6_addr *saddr,
1721static struct rt6_info * ip6_rt_copy(struct rt6_info *ort) 1733static struct rt6_info * ip6_rt_copy(struct rt6_info *ort)
1722{ 1734{
1723 struct net *net = dev_net(ort->rt6i_dev); 1735 struct net *net = dev_net(ort->rt6i_dev);
1724 struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops); 1736 struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops,
1737 ort->dst.dev);
1725 1738
1726 if (rt) { 1739 if (rt) {
1727 rt->dst.input = ort->dst.input; 1740 rt->dst.input = ort->dst.input;
@@ -1729,9 +1742,6 @@ static struct rt6_info * ip6_rt_copy(struct rt6_info *ort)
1729 1742
1730 dst_copy_metrics(&rt->dst, &ort->dst); 1743 dst_copy_metrics(&rt->dst, &ort->dst);
1731 rt->dst.error = ort->dst.error; 1744 rt->dst.error = ort->dst.error;
1732 rt->dst.dev = ort->dst.dev;
1733 if (rt->dst.dev)
1734 dev_hold(rt->dst.dev);
1735 rt->rt6i_idev = ort->rt6i_idev; 1745 rt->rt6i_idev = ort->rt6i_idev;
1736 if (rt->rt6i_idev) 1746 if (rt->rt6i_idev)
1737 in6_dev_hold(rt->rt6i_idev); 1747 in6_dev_hold(rt->rt6i_idev);
@@ -1753,8 +1763,8 @@ static struct rt6_info * ip6_rt_copy(struct rt6_info *ort)
1753 1763
1754#ifdef CONFIG_IPV6_ROUTE_INFO 1764#ifdef CONFIG_IPV6_ROUTE_INFO
1755static struct rt6_info *rt6_get_route_info(struct net *net, 1765static struct rt6_info *rt6_get_route_info(struct net *net,
1756 struct in6_addr *prefix, int prefixlen, 1766 const struct in6_addr *prefix, int prefixlen,
1757 struct in6_addr *gwaddr, int ifindex) 1767 const struct in6_addr *gwaddr, int ifindex)
1758{ 1768{
1759 struct fib6_node *fn; 1769 struct fib6_node *fn;
1760 struct rt6_info *rt = NULL; 1770 struct rt6_info *rt = NULL;
@@ -1785,8 +1795,8 @@ out:
1785} 1795}
1786 1796
1787static struct rt6_info *rt6_add_route_info(struct net *net, 1797static struct rt6_info *rt6_add_route_info(struct net *net,
1788 struct in6_addr *prefix, int prefixlen, 1798 const struct in6_addr *prefix, int prefixlen,
1789 struct in6_addr *gwaddr, int ifindex, 1799 const struct in6_addr *gwaddr, int ifindex,
1790 unsigned pref) 1800 unsigned pref)
1791{ 1801{
1792 struct fib6_config cfg = { 1802 struct fib6_config cfg = {
@@ -1814,7 +1824,7 @@ static struct rt6_info *rt6_add_route_info(struct net *net,
1814} 1824}
1815#endif 1825#endif
1816 1826
1817struct rt6_info *rt6_get_dflt_router(struct in6_addr *addr, struct net_device *dev) 1827struct rt6_info *rt6_get_dflt_router(const struct in6_addr *addr, struct net_device *dev)
1818{ 1828{
1819 struct rt6_info *rt; 1829 struct rt6_info *rt;
1820 struct fib6_table *table; 1830 struct fib6_table *table;
@@ -1836,7 +1846,7 @@ struct rt6_info *rt6_get_dflt_router(struct in6_addr *addr, struct net_device *d
1836 return rt; 1846 return rt;
1837} 1847}
1838 1848
1839struct rt6_info *rt6_add_dflt_router(struct in6_addr *gwaddr, 1849struct rt6_info *rt6_add_dflt_router(const struct in6_addr *gwaddr,
1840 struct net_device *dev, 1850 struct net_device *dev,
1841 unsigned int pref) 1851 unsigned int pref)
1842{ 1852{
@@ -2001,7 +2011,8 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
2001 int anycast) 2011 int anycast)
2002{ 2012{
2003 struct net *net = dev_net(idev->dev); 2013 struct net *net = dev_net(idev->dev);
2004 struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops); 2014 struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops,
2015 net->loopback_dev);
2005 struct neighbour *neigh; 2016 struct neighbour *neigh;
2006 2017
2007 if (rt == NULL) { 2018 if (rt == NULL) {
@@ -2011,13 +2022,11 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
2011 return ERR_PTR(-ENOMEM); 2022 return ERR_PTR(-ENOMEM);
2012 } 2023 }
2013 2024
2014 dev_hold(net->loopback_dev);
2015 in6_dev_hold(idev); 2025 in6_dev_hold(idev);
2016 2026
2017 rt->dst.flags = DST_HOST; 2027 rt->dst.flags = DST_HOST;
2018 rt->dst.input = ip6_input; 2028 rt->dst.input = ip6_input;
2019 rt->dst.output = ip6_output; 2029 rt->dst.output = ip6_output;
2020 rt->rt6i_dev = net->loopback_dev;
2021 rt->rt6i_idev = idev; 2030 rt->rt6i_idev = idev;
2022 rt->dst.obsolete = -1; 2031 rt->dst.obsolete = -1;
2023 2032
@@ -2043,6 +2052,55 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
2043 return rt; 2052 return rt;
2044} 2053}
2045 2054
2055int ip6_route_get_saddr(struct net *net,
2056 struct rt6_info *rt,
2057 const struct in6_addr *daddr,
2058 unsigned int prefs,
2059 struct in6_addr *saddr)
2060{
2061 struct inet6_dev *idev = ip6_dst_idev((struct dst_entry*)rt);
2062 int err = 0;
2063 if (rt->rt6i_prefsrc.plen)
2064 ipv6_addr_copy(saddr, &rt->rt6i_prefsrc.addr);
2065 else
2066 err = ipv6_dev_get_saddr(net, idev ? idev->dev : NULL,
2067 daddr, prefs, saddr);
2068 return err;
2069}
2070
2071/* remove deleted ip from prefsrc entries */
2072struct arg_dev_net_ip {
2073 struct net_device *dev;
2074 struct net *net;
2075 struct in6_addr *addr;
2076};
2077
2078static int fib6_remove_prefsrc(struct rt6_info *rt, void *arg)
2079{
2080 struct net_device *dev = ((struct arg_dev_net_ip *)arg)->dev;
2081 struct net *net = ((struct arg_dev_net_ip *)arg)->net;
2082 struct in6_addr *addr = ((struct arg_dev_net_ip *)arg)->addr;
2083
2084 if (((void *)rt->rt6i_dev == dev || dev == NULL) &&
2085 rt != net->ipv6.ip6_null_entry &&
2086 ipv6_addr_equal(addr, &rt->rt6i_prefsrc.addr)) {
2087 /* remove prefsrc entry */
2088 rt->rt6i_prefsrc.plen = 0;
2089 }
2090 return 0;
2091}
2092
2093void rt6_remove_prefsrc(struct inet6_ifaddr *ifp)
2094{
2095 struct net *net = dev_net(ifp->idev->dev);
2096 struct arg_dev_net_ip adni = {
2097 .dev = ifp->idev->dev,
2098 .net = net,
2099 .addr = &ifp->addr,
2100 };
2101 fib6_clean_all(net, fib6_remove_prefsrc, 0, &adni);
2102}
2103
2046struct arg_dev_net { 2104struct arg_dev_net {
2047 struct net_device *dev; 2105 struct net_device *dev;
2048 struct net *net; 2106 struct net *net;
@@ -2189,6 +2247,9 @@ static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
2189 nla_memcpy(&cfg->fc_src, tb[RTA_SRC], plen); 2247 nla_memcpy(&cfg->fc_src, tb[RTA_SRC], plen);
2190 } 2248 }
2191 2249
2250 if (tb[RTA_PREFSRC])
2251 nla_memcpy(&cfg->fc_prefsrc, tb[RTA_PREFSRC], 16);
2252
2192 if (tb[RTA_OIF]) 2253 if (tb[RTA_OIF])
2193 cfg->fc_ifindex = nla_get_u32(tb[RTA_OIF]); 2254 cfg->fc_ifindex = nla_get_u32(tb[RTA_OIF]);
2194 2255
@@ -2331,13 +2392,17 @@ static int rt6_fill_node(struct net *net,
2331#endif 2392#endif
2332 NLA_PUT_U32(skb, RTA_IIF, iif); 2393 NLA_PUT_U32(skb, RTA_IIF, iif);
2333 } else if (dst) { 2394 } else if (dst) {
2334 struct inet6_dev *idev = ip6_dst_idev(&rt->dst);
2335 struct in6_addr saddr_buf; 2395 struct in6_addr saddr_buf;
2336 if (ipv6_dev_get_saddr(net, idev ? idev->dev : NULL, 2396 if (ip6_route_get_saddr(net, rt, dst, 0, &saddr_buf) == 0)
2337 dst, 0, &saddr_buf) == 0)
2338 NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf); 2397 NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf);
2339 } 2398 }
2340 2399
2400 if (rt->rt6i_prefsrc.plen) {
2401 struct in6_addr saddr_buf;
2402 ipv6_addr_copy(&saddr_buf, &rt->rt6i_prefsrc.addr);
2403 NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf);
2404 }
2405
2341 if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0) 2406 if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
2342 goto nla_put_failure; 2407 goto nla_put_failure;
2343 2408
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 5f35d595e4a5..1cca5761aea9 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -250,11 +250,6 @@ static struct ip_tunnel *ipip6_tunnel_locate(struct net *net,
250 250
251 dev_net_set(dev, net); 251 dev_net_set(dev, net);
252 252
253 if (strchr(name, '%')) {
254 if (dev_alloc_name(dev, name) < 0)
255 goto failed_free;
256 }
257
258 nt = netdev_priv(dev); 253 nt = netdev_priv(dev);
259 254
260 nt->parms = *parms; 255 nt->parms = *parms;
@@ -447,7 +442,7 @@ out:
447} 442}
448 443
449static int 444static int
450isatap_chksrc(struct sk_buff *skb, struct iphdr *iph, struct ip_tunnel *t) 445isatap_chksrc(struct sk_buff *skb, const struct iphdr *iph, struct ip_tunnel *t)
451{ 446{
452 struct ip_tunnel_prl_entry *p; 447 struct ip_tunnel_prl_entry *p;
453 int ok = 1; 448 int ok = 1;
@@ -460,7 +455,8 @@ isatap_chksrc(struct sk_buff *skb, struct iphdr *iph, struct ip_tunnel *t)
460 else 455 else
461 skb->ndisc_nodetype = NDISC_NODETYPE_NODEFAULT; 456 skb->ndisc_nodetype = NDISC_NODETYPE_NODEFAULT;
462 } else { 457 } else {
463 struct in6_addr *addr6 = &ipv6_hdr(skb)->saddr; 458 const struct in6_addr *addr6 = &ipv6_hdr(skb)->saddr;
459
464 if (ipv6_addr_is_isatap(addr6) && 460 if (ipv6_addr_is_isatap(addr6) &&
465 (addr6->s6_addr32[3] == iph->saddr) && 461 (addr6->s6_addr32[3] == iph->saddr) &&
466 ipv6_chk_prefix(addr6, t->dev)) 462 ipv6_chk_prefix(addr6, t->dev))
@@ -494,7 +490,7 @@ static int ipip6_err(struct sk_buff *skb, u32 info)
494 8 bytes of packet payload. It means, that precise relaying of 490 8 bytes of packet payload. It means, that precise relaying of
495 ICMP in the real Internet is absolutely infeasible. 491 ICMP in the real Internet is absolutely infeasible.
496 */ 492 */
497 struct iphdr *iph = (struct iphdr*)skb->data; 493 const struct iphdr *iph = (const struct iphdr *)skb->data;
498 const int type = icmp_hdr(skb)->type; 494 const int type = icmp_hdr(skb)->type;
499 const int code = icmp_hdr(skb)->code; 495 const int code = icmp_hdr(skb)->code;
500 struct ip_tunnel *t; 496 struct ip_tunnel *t;
@@ -552,7 +548,7 @@ out:
552 return err; 548 return err;
553} 549}
554 550
555static inline void ipip6_ecn_decapsulate(struct iphdr *iph, struct sk_buff *skb) 551static inline void ipip6_ecn_decapsulate(const struct iphdr *iph, struct sk_buff *skb)
556{ 552{
557 if (INET_ECN_is_ce(iph->tos)) 553 if (INET_ECN_is_ce(iph->tos))
558 IP6_ECN_set_ce(ipv6_hdr(skb)); 554 IP6_ECN_set_ce(ipv6_hdr(skb));
@@ -560,7 +556,7 @@ static inline void ipip6_ecn_decapsulate(struct iphdr *iph, struct sk_buff *skb)
560 556
561static int ipip6_rcv(struct sk_buff *skb) 557static int ipip6_rcv(struct sk_buff *skb)
562{ 558{
563 struct iphdr *iph; 559 const struct iphdr *iph;
564 struct ip_tunnel *tunnel; 560 struct ip_tunnel *tunnel;
565 561
566 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) 562 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
@@ -616,7 +612,7 @@ out:
616 * comes from 6rd / 6to4 (RFC 3056) addr space. 612 * comes from 6rd / 6to4 (RFC 3056) addr space.
617 */ 613 */
618static inline 614static inline
619__be32 try_6rd(struct in6_addr *v6dst, struct ip_tunnel *tunnel) 615__be32 try_6rd(const struct in6_addr *v6dst, struct ip_tunnel *tunnel)
620{ 616{
621 __be32 dst = 0; 617 __be32 dst = 0;
622 618
@@ -659,8 +655,8 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
659{ 655{
660 struct ip_tunnel *tunnel = netdev_priv(dev); 656 struct ip_tunnel *tunnel = netdev_priv(dev);
661 struct pcpu_tstats *tstats; 657 struct pcpu_tstats *tstats;
662 struct iphdr *tiph = &tunnel->parms.iph; 658 const struct iphdr *tiph = &tunnel->parms.iph;
663 struct ipv6hdr *iph6 = ipv6_hdr(skb); 659 const struct ipv6hdr *iph6 = ipv6_hdr(skb);
664 u8 tos = tunnel->parms.iph.tos; 660 u8 tos = tunnel->parms.iph.tos;
665 __be16 df = tiph->frag_off; 661 __be16 df = tiph->frag_off;
666 struct rtable *rt; /* Route to the other host */ 662 struct rtable *rt; /* Route to the other host */
@@ -668,8 +664,9 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
668 struct iphdr *iph; /* Our new IP header */ 664 struct iphdr *iph; /* Our new IP header */
669 unsigned int max_headroom; /* The extra header space needed */ 665 unsigned int max_headroom; /* The extra header space needed */
670 __be32 dst = tiph->daddr; 666 __be32 dst = tiph->daddr;
667 struct flowi4 fl4;
671 int mtu; 668 int mtu;
672 struct in6_addr *addr6; 669 const struct in6_addr *addr6;
673 int addr_type; 670 int addr_type;
674 671
675 if (skb->protocol != htons(ETH_P_IPV6)) 672 if (skb->protocol != htons(ETH_P_IPV6))
@@ -688,7 +685,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
688 goto tx_error; 685 goto tx_error;
689 } 686 }
690 687
691 addr6 = (struct in6_addr*)&neigh->primary_key; 688 addr6 = (const struct in6_addr*)&neigh->primary_key;
692 addr_type = ipv6_addr_type(addr6); 689 addr_type = ipv6_addr_type(addr6);
693 690
694 if ((addr_type & IPV6_ADDR_UNICAST) && 691 if ((addr_type & IPV6_ADDR_UNICAST) &&
@@ -713,7 +710,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
713 goto tx_error; 710 goto tx_error;
714 } 711 }
715 712
716 addr6 = (struct in6_addr*)&neigh->primary_key; 713 addr6 = (const struct in6_addr*)&neigh->primary_key;
717 addr_type = ipv6_addr_type(addr6); 714 addr_type = ipv6_addr_type(addr6);
718 715
719 if (addr_type == IPV6_ADDR_ANY) { 716 if (addr_type == IPV6_ADDR_ANY) {
@@ -727,7 +724,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
727 dst = addr6->s6_addr32[3]; 724 dst = addr6->s6_addr32[3];
728 } 725 }
729 726
730 rt = ip_route_output_ports(dev_net(dev), NULL, 727 rt = ip_route_output_ports(dev_net(dev), &fl4, NULL,
731 dst, tiph->saddr, 728 dst, tiph->saddr,
732 0, 0, 729 0, 0,
733 IPPROTO_IPV6, RT_TOS(tos), 730 IPPROTO_IPV6, RT_TOS(tos),
@@ -821,8 +818,8 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
821 iph->frag_off = df; 818 iph->frag_off = df;
822 iph->protocol = IPPROTO_IPV6; 819 iph->protocol = IPPROTO_IPV6;
823 iph->tos = INET_ECN_encapsulate(tos, ipv6_get_dsfield(iph6)); 820 iph->tos = INET_ECN_encapsulate(tos, ipv6_get_dsfield(iph6));
824 iph->daddr = rt->rt_dst; 821 iph->daddr = fl4.daddr;
825 iph->saddr = rt->rt_src; 822 iph->saddr = fl4.saddr;
826 823
827 if ((iph->ttl = tiph->ttl) == 0) 824 if ((iph->ttl = tiph->ttl) == 0)
828 iph->ttl = iph6->hop_limit; 825 iph->ttl = iph6->hop_limit;
@@ -844,13 +841,14 @@ static void ipip6_tunnel_bind_dev(struct net_device *dev)
844{ 841{
845 struct net_device *tdev = NULL; 842 struct net_device *tdev = NULL;
846 struct ip_tunnel *tunnel; 843 struct ip_tunnel *tunnel;
847 struct iphdr *iph; 844 const struct iphdr *iph;
845 struct flowi4 fl4;
848 846
849 tunnel = netdev_priv(dev); 847 tunnel = netdev_priv(dev);
850 iph = &tunnel->parms.iph; 848 iph = &tunnel->parms.iph;
851 849
852 if (iph->daddr) { 850 if (iph->daddr) {
853 struct rtable *rt = ip_route_output_ports(dev_net(dev), NULL, 851 struct rtable *rt = ip_route_output_ports(dev_net(dev), &fl4, NULL,
854 iph->daddr, iph->saddr, 852 iph->daddr, iph->saddr,
855 0, 0, 853 0, 0,
856 IPPROTO_IPV6, 854 IPPROTO_IPV6,
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 352c26081f5d..8b9644a8b697 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -66,7 +66,7 @@ static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb,
66static DEFINE_PER_CPU(__u32 [16 + 5 + SHA_WORKSPACE_WORDS], 66static DEFINE_PER_CPU(__u32 [16 + 5 + SHA_WORKSPACE_WORDS],
67 ipv6_cookie_scratch); 67 ipv6_cookie_scratch);
68 68
69static u32 cookie_hash(struct in6_addr *saddr, struct in6_addr *daddr, 69static u32 cookie_hash(const struct in6_addr *saddr, const struct in6_addr *daddr,
70 __be16 sport, __be16 dport, u32 count, int c) 70 __be16 sport, __be16 dport, u32 count, int c)
71{ 71{
72 __u32 *tmp = __get_cpu_var(ipv6_cookie_scratch); 72 __u32 *tmp = __get_cpu_var(ipv6_cookie_scratch);
@@ -86,7 +86,8 @@ static u32 cookie_hash(struct in6_addr *saddr, struct in6_addr *daddr,
86 return tmp[17]; 86 return tmp[17];
87} 87}
88 88
89static __u32 secure_tcp_syn_cookie(struct in6_addr *saddr, struct in6_addr *daddr, 89static __u32 secure_tcp_syn_cookie(const struct in6_addr *saddr,
90 const struct in6_addr *daddr,
90 __be16 sport, __be16 dport, __u32 sseq, 91 __be16 sport, __be16 dport, __u32 sseq,
91 __u32 count, __u32 data) 92 __u32 count, __u32 data)
92{ 93{
@@ -96,8 +97,8 @@ static __u32 secure_tcp_syn_cookie(struct in6_addr *saddr, struct in6_addr *dadd
96 & COOKIEMASK)); 97 & COOKIEMASK));
97} 98}
98 99
99static __u32 check_tcp_syn_cookie(__u32 cookie, struct in6_addr *saddr, 100static __u32 check_tcp_syn_cookie(__u32 cookie, const struct in6_addr *saddr,
100 struct in6_addr *daddr, __be16 sport, 101 const struct in6_addr *daddr, __be16 sport,
101 __be16 dport, __u32 sseq, __u32 count, 102 __be16 dport, __u32 sseq, __u32 count,
102 __u32 maxdiff) 103 __u32 maxdiff)
103{ 104{
@@ -116,7 +117,7 @@ static __u32 check_tcp_syn_cookie(__u32 cookie, struct in6_addr *saddr,
116 117
117__u32 cookie_v6_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp) 118__u32 cookie_v6_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp)
118{ 119{
119 struct ipv6hdr *iph = ipv6_hdr(skb); 120 const struct ipv6hdr *iph = ipv6_hdr(skb);
120 const struct tcphdr *th = tcp_hdr(skb); 121 const struct tcphdr *th = tcp_hdr(skb);
121 int mssind; 122 int mssind;
122 const __u16 mss = *mssp; 123 const __u16 mss = *mssp;
@@ -138,7 +139,7 @@ __u32 cookie_v6_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp)
138 139
139static inline int cookie_check(struct sk_buff *skb, __u32 cookie) 140static inline int cookie_check(struct sk_buff *skb, __u32 cookie)
140{ 141{
141 struct ipv6hdr *iph = ipv6_hdr(skb); 142 const struct ipv6hdr *iph = ipv6_hdr(skb);
142 const struct tcphdr *th = tcp_hdr(skb); 143 const struct tcphdr *th = tcp_hdr(skb);
143 __u32 seq = ntohl(th->seq) - 1; 144 __u32 seq = ntohl(th->seq) - 1;
144 __u32 mssind = check_tcp_syn_cookie(cookie, &iph->saddr, &iph->daddr, 145 __u32 mssind = check_tcp_syn_cookie(cookie, &iph->saddr, &iph->daddr,
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 4f49e5dd41bb..868366470b4a 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -76,8 +76,8 @@ static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
76 76
77static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb); 77static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
78static void __tcp_v6_send_check(struct sk_buff *skb, 78static void __tcp_v6_send_check(struct sk_buff *skb,
79 struct in6_addr *saddr, 79 const struct in6_addr *saddr,
80 struct in6_addr *daddr); 80 const struct in6_addr *daddr);
81 81
82static const struct inet_connection_sock_af_ops ipv6_mapped; 82static const struct inet_connection_sock_af_ops ipv6_mapped;
83static const struct inet_connection_sock_af_ops ipv6_specific; 83static const struct inet_connection_sock_af_ops ipv6_specific;
@@ -86,7 +86,7 @@ static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
86static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific; 86static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
87#else 87#else
88static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk, 88static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
89 struct in6_addr *addr) 89 const struct in6_addr *addr)
90{ 90{
91 return NULL; 91 return NULL;
92} 92}
@@ -106,8 +106,8 @@ static void tcp_v6_hash(struct sock *sk)
106} 106}
107 107
108static __inline__ __sum16 tcp_v6_check(int len, 108static __inline__ __sum16 tcp_v6_check(int len,
109 struct in6_addr *saddr, 109 const struct in6_addr *saddr,
110 struct in6_addr *daddr, 110 const struct in6_addr *daddr,
111 __wsum base) 111 __wsum base)
112{ 112{
113 return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base); 113 return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
@@ -331,7 +331,7 @@ failure:
331static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 331static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
332 u8 type, u8 code, int offset, __be32 info) 332 u8 type, u8 code, int offset, __be32 info)
333{ 333{
334 struct ipv6hdr *hdr = (struct ipv6hdr*)skb->data; 334 const struct ipv6hdr *hdr = (const struct ipv6hdr*)skb->data;
335 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset); 335 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
336 struct ipv6_pinfo *np; 336 struct ipv6_pinfo *np;
337 struct sock *sk; 337 struct sock *sk;
@@ -551,7 +551,7 @@ static void tcp_v6_reqsk_destructor(struct request_sock *req)
551 551
552#ifdef CONFIG_TCP_MD5SIG 552#ifdef CONFIG_TCP_MD5SIG
553static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk, 553static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
554 struct in6_addr *addr) 554 const struct in6_addr *addr)
555{ 555{
556 struct tcp_sock *tp = tcp_sk(sk); 556 struct tcp_sock *tp = tcp_sk(sk);
557 int i; 557 int i;
@@ -580,7 +580,7 @@ static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
580 return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr); 580 return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr);
581} 581}
582 582
583static int tcp_v6_md5_do_add(struct sock *sk, struct in6_addr *peer, 583static int tcp_v6_md5_do_add(struct sock *sk, const struct in6_addr *peer,
584 char *newkey, u8 newkeylen) 584 char *newkey, u8 newkeylen)
585{ 585{
586 /* Add key to the list */ 586 /* Add key to the list */
@@ -645,7 +645,7 @@ static int tcp_v6_md5_add_func(struct sock *sk, struct sock *addr_sk,
645 newkey, newkeylen); 645 newkey, newkeylen);
646} 646}
647 647
648static int tcp_v6_md5_do_del(struct sock *sk, struct in6_addr *peer) 648static int tcp_v6_md5_do_del(struct sock *sk, const struct in6_addr *peer)
649{ 649{
650 struct tcp_sock *tp = tcp_sk(sk); 650 struct tcp_sock *tp = tcp_sk(sk);
651 int i; 651 int i;
@@ -753,8 +753,8 @@ static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
753} 753}
754 754
755static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp, 755static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
756 struct in6_addr *daddr, 756 const struct in6_addr *daddr,
757 struct in6_addr *saddr, int nbytes) 757 const struct in6_addr *saddr, int nbytes)
758{ 758{
759 struct tcp6_pseudohdr *bp; 759 struct tcp6_pseudohdr *bp;
760 struct scatterlist sg; 760 struct scatterlist sg;
@@ -771,7 +771,7 @@ static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
771} 771}
772 772
773static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key, 773static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
774 struct in6_addr *daddr, struct in6_addr *saddr, 774 const struct in6_addr *daddr, struct in6_addr *saddr,
775 struct tcphdr *th) 775 struct tcphdr *th)
776{ 776{
777 struct tcp_md5sig_pool *hp; 777 struct tcp_md5sig_pool *hp;
@@ -807,7 +807,7 @@ static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
807 struct sock *sk, struct request_sock *req, 807 struct sock *sk, struct request_sock *req,
808 struct sk_buff *skb) 808 struct sk_buff *skb)
809{ 809{
810 struct in6_addr *saddr, *daddr; 810 const struct in6_addr *saddr, *daddr;
811 struct tcp_md5sig_pool *hp; 811 struct tcp_md5sig_pool *hp;
812 struct hash_desc *desc; 812 struct hash_desc *desc;
813 struct tcphdr *th = tcp_hdr(skb); 813 struct tcphdr *th = tcp_hdr(skb);
@@ -819,7 +819,7 @@ static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
819 saddr = &inet6_rsk(req)->loc_addr; 819 saddr = &inet6_rsk(req)->loc_addr;
820 daddr = &inet6_rsk(req)->rmt_addr; 820 daddr = &inet6_rsk(req)->rmt_addr;
821 } else { 821 } else {
822 struct ipv6hdr *ip6h = ipv6_hdr(skb); 822 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
823 saddr = &ip6h->saddr; 823 saddr = &ip6h->saddr;
824 daddr = &ip6h->daddr; 824 daddr = &ip6h->daddr;
825 } 825 }
@@ -857,7 +857,7 @@ static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb)
857{ 857{
858 __u8 *hash_location = NULL; 858 __u8 *hash_location = NULL;
859 struct tcp_md5sig_key *hash_expected; 859 struct tcp_md5sig_key *hash_expected;
860 struct ipv6hdr *ip6h = ipv6_hdr(skb); 860 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
861 struct tcphdr *th = tcp_hdr(skb); 861 struct tcphdr *th = tcp_hdr(skb);
862 int genhash; 862 int genhash;
863 u8 newhash[16]; 863 u8 newhash[16];
@@ -915,7 +915,7 @@ static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
915#endif 915#endif
916 916
917static void __tcp_v6_send_check(struct sk_buff *skb, 917static void __tcp_v6_send_check(struct sk_buff *skb,
918 struct in6_addr *saddr, struct in6_addr *daddr) 918 const struct in6_addr *saddr, const struct in6_addr *daddr)
919{ 919{
920 struct tcphdr *th = tcp_hdr(skb); 920 struct tcphdr *th = tcp_hdr(skb);
921 921
@@ -939,7 +939,7 @@ static void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)
939 939
940static int tcp_v6_gso_send_check(struct sk_buff *skb) 940static int tcp_v6_gso_send_check(struct sk_buff *skb)
941{ 941{
942 struct ipv6hdr *ipv6h; 942 const struct ipv6hdr *ipv6h;
943 struct tcphdr *th; 943 struct tcphdr *th;
944 944
945 if (!pskb_may_pull(skb, sizeof(*th))) 945 if (!pskb_may_pull(skb, sizeof(*th)))
@@ -957,7 +957,7 @@ static int tcp_v6_gso_send_check(struct sk_buff *skb)
957static struct sk_buff **tcp6_gro_receive(struct sk_buff **head, 957static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
958 struct sk_buff *skb) 958 struct sk_buff *skb)
959{ 959{
960 struct ipv6hdr *iph = skb_gro_network_header(skb); 960 const struct ipv6hdr *iph = skb_gro_network_header(skb);
961 961
962 switch (skb->ip_summed) { 962 switch (skb->ip_summed) {
963 case CHECKSUM_COMPLETE: 963 case CHECKSUM_COMPLETE:
@@ -978,7 +978,7 @@ static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
978 978
979static int tcp6_gro_complete(struct sk_buff *skb) 979static int tcp6_gro_complete(struct sk_buff *skb)
980{ 980{
981 struct ipv6hdr *iph = ipv6_hdr(skb); 981 const struct ipv6hdr *iph = ipv6_hdr(skb);
982 struct tcphdr *th = tcp_hdr(skb); 982 struct tcphdr *th = tcp_hdr(skb);
983 983
984 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb), 984 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
@@ -1469,7 +1469,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1469 1469
1470 First: no IPv4 options. 1470 First: no IPv4 options.
1471 */ 1471 */
1472 newinet->opt = NULL; 1472 newinet->inet_opt = NULL;
1473 newnp->ipv6_fl_list = NULL; 1473 newnp->ipv6_fl_list = NULL;
1474 1474
1475 /* Clone RX bits */ 1475 /* Clone RX bits */
@@ -1702,7 +1702,7 @@ ipv6_pktoptions:
1702static int tcp_v6_rcv(struct sk_buff *skb) 1702static int tcp_v6_rcv(struct sk_buff *skb)
1703{ 1703{
1704 struct tcphdr *th; 1704 struct tcphdr *th;
1705 struct ipv6hdr *hdr; 1705 const struct ipv6hdr *hdr;
1706 struct sock *sk; 1706 struct sock *sk;
1707 int ret; 1707 int ret;
1708 struct net *net = dev_net(skb->dev); 1708 struct net *net = dev_net(skb->dev);
@@ -2028,8 +2028,8 @@ static void get_openreq6(struct seq_file *seq,
2028 struct sock *sk, struct request_sock *req, int i, int uid) 2028 struct sock *sk, struct request_sock *req, int i, int uid)
2029{ 2029{
2030 int ttd = req->expires - jiffies; 2030 int ttd = req->expires - jiffies;
2031 struct in6_addr *src = &inet6_rsk(req)->loc_addr; 2031 const struct in6_addr *src = &inet6_rsk(req)->loc_addr;
2032 struct in6_addr *dest = &inet6_rsk(req)->rmt_addr; 2032 const struct in6_addr *dest = &inet6_rsk(req)->rmt_addr;
2033 2033
2034 if (ttd < 0) 2034 if (ttd < 0)
2035 ttd = 0; 2035 ttd = 0;
@@ -2057,7 +2057,7 @@ static void get_openreq6(struct seq_file *seq,
2057 2057
2058static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i) 2058static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
2059{ 2059{
2060 struct in6_addr *dest, *src; 2060 const struct in6_addr *dest, *src;
2061 __u16 destp, srcp; 2061 __u16 destp, srcp;
2062 int timer_active; 2062 int timer_active;
2063 unsigned long timer_expires; 2063 unsigned long timer_expires;
@@ -2114,7 +2114,7 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
2114static void get_timewait6_sock(struct seq_file *seq, 2114static void get_timewait6_sock(struct seq_file *seq,
2115 struct inet_timewait_sock *tw, int i) 2115 struct inet_timewait_sock *tw, int i)
2116{ 2116{
2117 struct in6_addr *dest, *src; 2117 const struct in6_addr *dest, *src;
2118 __u16 destp, srcp; 2118 __u16 destp, srcp;
2119 struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw); 2119 struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
2120 int ttd = tw->tw_ttd - jiffies; 2120 int ttd = tw->tw_ttd - jiffies;
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 9e305d74b3d4..fc0c42a88e54 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -311,7 +311,7 @@ static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb,
311 struct udp_table *udptable) 311 struct udp_table *udptable)
312{ 312{
313 struct sock *sk; 313 struct sock *sk;
314 struct ipv6hdr *iph = ipv6_hdr(skb); 314 const struct ipv6hdr *iph = ipv6_hdr(skb);
315 315
316 if (unlikely(sk = skb_steal_sock(skb))) 316 if (unlikely(sk = skb_steal_sock(skb)))
317 return sk; 317 return sk;
@@ -463,9 +463,9 @@ void __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
463 struct udp_table *udptable) 463 struct udp_table *udptable)
464{ 464{
465 struct ipv6_pinfo *np; 465 struct ipv6_pinfo *np;
466 struct ipv6hdr *hdr = (struct ipv6hdr*)skb->data; 466 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
467 struct in6_addr *saddr = &hdr->saddr; 467 const struct in6_addr *saddr = &hdr->saddr;
468 struct in6_addr *daddr = &hdr->daddr; 468 const struct in6_addr *daddr = &hdr->daddr;
469 struct udphdr *uh = (struct udphdr*)(skb->data+offset); 469 struct udphdr *uh = (struct udphdr*)(skb->data+offset);
470 struct sock *sk; 470 struct sock *sk;
471 int err; 471 int err;
@@ -553,8 +553,8 @@ drop_no_sk_drops_inc:
553} 553}
554 554
555static struct sock *udp_v6_mcast_next(struct net *net, struct sock *sk, 555static struct sock *udp_v6_mcast_next(struct net *net, struct sock *sk,
556 __be16 loc_port, struct in6_addr *loc_addr, 556 __be16 loc_port, const struct in6_addr *loc_addr,
557 __be16 rmt_port, struct in6_addr *rmt_addr, 557 __be16 rmt_port, const struct in6_addr *rmt_addr,
558 int dif) 558 int dif)
559{ 559{
560 struct hlist_nulls_node *node; 560 struct hlist_nulls_node *node;
@@ -633,7 +633,7 @@ drop:
633 * so we don't need to lock the hashes. 633 * so we don't need to lock the hashes.
634 */ 634 */
635static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb, 635static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
636 struct in6_addr *saddr, struct in6_addr *daddr, 636 const struct in6_addr *saddr, const struct in6_addr *daddr,
637 struct udp_table *udptable) 637 struct udp_table *udptable)
638{ 638{
639 struct sock *sk, *stack[256 / sizeof(struct sock *)]; 639 struct sock *sk, *stack[256 / sizeof(struct sock *)];
@@ -716,7 +716,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
716 struct net *net = dev_net(skb->dev); 716 struct net *net = dev_net(skb->dev);
717 struct sock *sk; 717 struct sock *sk;
718 struct udphdr *uh; 718 struct udphdr *uh;
719 struct in6_addr *saddr, *daddr; 719 const struct in6_addr *saddr, *daddr;
720 u32 ulen = 0; 720 u32 ulen = 0;
721 721
722 if (!pskb_may_pull(skb, sizeof(struct udphdr))) 722 if (!pskb_may_pull(skb, sizeof(struct udphdr)))
@@ -1278,7 +1278,7 @@ int compat_udpv6_getsockopt(struct sock *sk, int level, int optname,
1278 1278
1279static int udp6_ufo_send_check(struct sk_buff *skb) 1279static int udp6_ufo_send_check(struct sk_buff *skb)
1280{ 1280{
1281 struct ipv6hdr *ipv6h; 1281 const struct ipv6hdr *ipv6h;
1282 struct udphdr *uh; 1282 struct udphdr *uh;
1283 1283
1284 if (!pskb_may_pull(skb, sizeof(*uh))) 1284 if (!pskb_may_pull(skb, sizeof(*uh)))
@@ -1328,7 +1328,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, u32 features)
1328 /* Do software UFO. Complete and fill in the UDP checksum as HW cannot 1328 /* Do software UFO. Complete and fill in the UDP checksum as HW cannot
1329 * do checksum of UDP packets sent as multiple IP fragments. 1329 * do checksum of UDP packets sent as multiple IP fragments.
1330 */ 1330 */
1331 offset = skb->csum_start - skb_headroom(skb); 1331 offset = skb_checksum_start_offset(skb);
1332 csum = skb_checksum(skb, offset, skb->len- offset, 0); 1332 csum = skb_checksum(skb, offset, skb->len- offset, 0);
1333 offset += skb->csum_offset; 1333 offset += skb->csum_offset;
1334 *(__sum16 *)(skb->data + offset) = csum_fold(csum); 1334 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
@@ -1382,7 +1382,7 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
1382{ 1382{
1383 struct inet_sock *inet = inet_sk(sp); 1383 struct inet_sock *inet = inet_sk(sp);
1384 struct ipv6_pinfo *np = inet6_sk(sp); 1384 struct ipv6_pinfo *np = inet6_sk(sp);
1385 struct in6_addr *dest, *src; 1385 const struct in6_addr *dest, *src;
1386 __u16 destp, srcp; 1386 __u16 destp, srcp;
1387 1387
1388 dest = &np->daddr; 1388 dest = &np->daddr;
diff --git a/net/ipv6/xfrm6_mode_beet.c b/net/ipv6/xfrm6_mode_beet.c
index bbd48b101bae..3437d7d4eed6 100644
--- a/net/ipv6/xfrm6_mode_beet.c
+++ b/net/ipv6/xfrm6_mode_beet.c
@@ -41,10 +41,8 @@ static int xfrm6_beet_output(struct xfrm_state *x, struct sk_buff *skb)
41{ 41{
42 struct ipv6hdr *top_iph; 42 struct ipv6hdr *top_iph;
43 struct ip_beet_phdr *ph; 43 struct ip_beet_phdr *ph;
44 struct iphdr *iphv4;
45 int optlen, hdr_len; 44 int optlen, hdr_len;
46 45
47 iphv4 = ip_hdr(skb);
48 hdr_len = 0; 46 hdr_len = 0;
49 optlen = XFRM_MODE_SKB_CB(skb)->optlen; 47 optlen = XFRM_MODE_SKB_CB(skb)->optlen;
50 if (unlikely(optlen)) 48 if (unlikely(optlen))
diff --git a/net/ipv6/xfrm6_mode_tunnel.c b/net/ipv6/xfrm6_mode_tunnel.c
index 645cb968d450..4d6edff0498f 100644
--- a/net/ipv6/xfrm6_mode_tunnel.c
+++ b/net/ipv6/xfrm6_mode_tunnel.c
@@ -20,7 +20,7 @@
20 20
21static inline void ipip6_ecn_decapsulate(struct sk_buff *skb) 21static inline void ipip6_ecn_decapsulate(struct sk_buff *skb)
22{ 22{
23 struct ipv6hdr *outer_iph = ipv6_hdr(skb); 23 const struct ipv6hdr *outer_iph = ipv6_hdr(skb);
24 struct ipv6hdr *inner_iph = ipipv6_hdr(skb); 24 struct ipv6hdr *inner_iph = ipipv6_hdr(skb);
25 25
26 if (INET_ECN_is_ce(ipv6_get_dsfield(outer_iph))) 26 if (INET_ECN_is_ce(ipv6_get_dsfield(outer_iph)))
@@ -55,8 +55,8 @@ static int xfrm6_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
55 dsfield &= ~INET_ECN_MASK; 55 dsfield &= ~INET_ECN_MASK;
56 ipv6_change_dsfield(top_iph, 0, dsfield); 56 ipv6_change_dsfield(top_iph, 0, dsfield);
57 top_iph->hop_limit = ip6_dst_hoplimit(dst->child); 57 top_iph->hop_limit = ip6_dst_hoplimit(dst->child);
58 ipv6_addr_copy(&top_iph->saddr, (struct in6_addr *)&x->props.saddr); 58 ipv6_addr_copy(&top_iph->saddr, (const struct in6_addr *)&x->props.saddr);
59 ipv6_addr_copy(&top_iph->daddr, (struct in6_addr *)&x->id.daddr); 59 ipv6_addr_copy(&top_iph->daddr, (const struct in6_addr *)&x->id.daddr);
60 return 0; 60 return 0;
61} 61}
62 62
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 05e34c8ec913..d879f7efbd10 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -124,7 +124,7 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
124 struct flowi6 *fl6 = &fl->u.ip6; 124 struct flowi6 *fl6 = &fl->u.ip6;
125 int onlyproto = 0; 125 int onlyproto = 0;
126 u16 offset = skb_network_header_len(skb); 126 u16 offset = skb_network_header_len(skb);
127 struct ipv6hdr *hdr = ipv6_hdr(skb); 127 const struct ipv6hdr *hdr = ipv6_hdr(skb);
128 struct ipv6_opt_hdr *exthdr; 128 struct ipv6_opt_hdr *exthdr;
129 const unsigned char *nh = skb_network_header(skb); 129 const unsigned char *nh = skb_network_header(skb);
130 u8 nexthdr = nh[IP6CB(skb)->nhoff]; 130 u8 nexthdr = nh[IP6CB(skb)->nhoff];
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
index 2969cad408de..a6770a04e3bd 100644
--- a/net/ipv6/xfrm6_tunnel.c
+++ b/net/ipv6/xfrm6_tunnel.c
@@ -68,7 +68,7 @@ static DEFINE_SPINLOCK(xfrm6_tunnel_spi_lock);
68 68
69static struct kmem_cache *xfrm6_tunnel_spi_kmem __read_mostly; 69static struct kmem_cache *xfrm6_tunnel_spi_kmem __read_mostly;
70 70
71static inline unsigned xfrm6_tunnel_spi_hash_byaddr(xfrm_address_t *addr) 71static inline unsigned xfrm6_tunnel_spi_hash_byaddr(const xfrm_address_t *addr)
72{ 72{
73 unsigned h; 73 unsigned h;
74 74
@@ -85,7 +85,7 @@ static inline unsigned xfrm6_tunnel_spi_hash_byspi(u32 spi)
85 return spi % XFRM6_TUNNEL_SPI_BYSPI_HSIZE; 85 return spi % XFRM6_TUNNEL_SPI_BYSPI_HSIZE;
86} 86}
87 87
88static struct xfrm6_tunnel_spi *__xfrm6_tunnel_spi_lookup(struct net *net, xfrm_address_t *saddr) 88static struct xfrm6_tunnel_spi *__xfrm6_tunnel_spi_lookup(struct net *net, const xfrm_address_t *saddr)
89{ 89{
90 struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net); 90 struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
91 struct xfrm6_tunnel_spi *x6spi; 91 struct xfrm6_tunnel_spi *x6spi;
@@ -101,7 +101,7 @@ static struct xfrm6_tunnel_spi *__xfrm6_tunnel_spi_lookup(struct net *net, xfrm_
101 return NULL; 101 return NULL;
102} 102}
103 103
104__be32 xfrm6_tunnel_spi_lookup(struct net *net, xfrm_address_t *saddr) 104__be32 xfrm6_tunnel_spi_lookup(struct net *net, const xfrm_address_t *saddr)
105{ 105{
106 struct xfrm6_tunnel_spi *x6spi; 106 struct xfrm6_tunnel_spi *x6spi;
107 u32 spi; 107 u32 spi;
@@ -237,10 +237,10 @@ static int xfrm6_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
237static int xfrm6_tunnel_rcv(struct sk_buff *skb) 237static int xfrm6_tunnel_rcv(struct sk_buff *skb)
238{ 238{
239 struct net *net = dev_net(skb->dev); 239 struct net *net = dev_net(skb->dev);
240 struct ipv6hdr *iph = ipv6_hdr(skb); 240 const struct ipv6hdr *iph = ipv6_hdr(skb);
241 __be32 spi; 241 __be32 spi;
242 242
243 spi = xfrm6_tunnel_spi_lookup(net, (xfrm_address_t *)&iph->saddr); 243 spi = xfrm6_tunnel_spi_lookup(net, (const xfrm_address_t *)&iph->saddr);
244 return xfrm6_rcv_spi(skb, IPPROTO_IPV6, spi) > 0 ? : 0; 244 return xfrm6_rcv_spi(skb, IPPROTO_IPV6, spi) > 0 ? : 0;
245} 245}
246 246
diff --git a/net/irda/ircomm/ircomm_core.c b/net/irda/ircomm/ircomm_core.c
index e97082017f4f..52079f19bbbe 100644
--- a/net/irda/ircomm/ircomm_core.c
+++ b/net/irda/ircomm/ircomm_core.c
@@ -244,14 +244,8 @@ EXPORT_SYMBOL(ircomm_connect_request);
244void ircomm_connect_indication(struct ircomm_cb *self, struct sk_buff *skb, 244void ircomm_connect_indication(struct ircomm_cb *self, struct sk_buff *skb,
245 struct ircomm_info *info) 245 struct ircomm_info *info)
246{ 246{
247 int clen = 0;
248
249 IRDA_DEBUG(2, "%s()\n", __func__ ); 247 IRDA_DEBUG(2, "%s()\n", __func__ );
250 248
251 /* Check if the packet contains data on the control channel */
252 if (skb->len > 0)
253 clen = skb->data[0];
254
255 /* 249 /*
256 * If there are any data hiding in the control channel, we must 250 * If there are any data hiding in the control channel, we must
257 * deliver it first. The side effect is that the control channel 251 * deliver it first. The side effect is that the control channel
diff --git a/net/irda/ircomm/ircomm_lmp.c b/net/irda/ircomm/ircomm_lmp.c
index 08fb54dc8c41..3b8095c771d4 100644
--- a/net/irda/ircomm/ircomm_lmp.c
+++ b/net/irda/ircomm/ircomm_lmp.c
@@ -75,7 +75,6 @@ static int ircomm_lmp_connect_response(struct ircomm_cb *self,
75 struct sk_buff *userdata) 75 struct sk_buff *userdata)
76{ 76{
77 struct sk_buff *tx_skb; 77 struct sk_buff *tx_skb;
78 int ret;
79 78
80 IRDA_DEBUG(0, "%s()\n", __func__ ); 79 IRDA_DEBUG(0, "%s()\n", __func__ );
81 80
@@ -100,9 +99,7 @@ static int ircomm_lmp_connect_response(struct ircomm_cb *self,
100 tx_skb = userdata; 99 tx_skb = userdata;
101 } 100 }
102 101
103 ret = irlmp_connect_response(self->lsap, tx_skb); 102 return irlmp_connect_response(self->lsap, tx_skb);
104
105 return 0;
106} 103}
107 104
108static int ircomm_lmp_disconnect_request(struct ircomm_cb *self, 105static int ircomm_lmp_disconnect_request(struct ircomm_cb *self,
diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
index a39cca8331df..b3cc8b3989a9 100644
--- a/net/irda/ircomm/ircomm_tty.c
+++ b/net/irda/ircomm/ircomm_tty.c
@@ -38,6 +38,7 @@
38#include <linux/seq_file.h> 38#include <linux/seq_file.h>
39#include <linux/termios.h> 39#include <linux/termios.h>
40#include <linux/tty.h> 40#include <linux/tty.h>
41#include <linux/tty_flip.h>
41#include <linux/interrupt.h> 42#include <linux/interrupt.h>
42#include <linux/device.h> /* for MODULE_ALIAS_CHARDEV_MAJOR */ 43#include <linux/device.h> /* for MODULE_ALIAS_CHARDEV_MAJOR */
43 44
@@ -1132,7 +1133,6 @@ static int ircomm_tty_data_indication(void *instance, void *sap,
1132 struct sk_buff *skb) 1133 struct sk_buff *skb)
1133{ 1134{
1134 struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance; 1135 struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance;
1135 struct tty_ldisc *ld;
1136 1136
1137 IRDA_DEBUG(2, "%s()\n", __func__ ); 1137 IRDA_DEBUG(2, "%s()\n", __func__ );
1138 1138
@@ -1161,15 +1161,11 @@ static int ircomm_tty_data_indication(void *instance, void *sap,
1161 } 1161 }
1162 1162
1163 /* 1163 /*
1164 * Just give it over to the line discipline. There is no need to 1164 * Use flip buffer functions since the code may be called from interrupt
1165 * involve the flip buffers, since we are not running in an interrupt 1165 * context
1166 * handler
1167 */ 1166 */
1168 1167 tty_insert_flip_string(self->tty, skb->data, skb->len);
1169 ld = tty_ldisc_ref(self->tty); 1168 tty_flip_buffer_push(self->tty);
1170 if (ld)
1171 ld->ops->receive_buf(self->tty, skb->data, NULL, skb->len);
1172 tty_ldisc_deref(ld);
1173 1169
1174 /* No need to kfree_skb - see ircomm_ttp_data_indication() */ 1170 /* No need to kfree_skb - see ircomm_ttp_data_indication() */
1175 1171
diff --git a/net/irda/irlan/irlan_filter.c b/net/irda/irlan/irlan_filter.c
index 9ff7823abec7..7977be7caf0f 100644
--- a/net/irda/irlan/irlan_filter.c
+++ b/net/irda/irlan/irlan_filter.c
@@ -143,12 +143,8 @@ void irlan_filter_request(struct irlan_cb *self, struct sk_buff *skb)
143 */ 143 */
144void irlan_check_command_param(struct irlan_cb *self, char *param, char *value) 144void irlan_check_command_param(struct irlan_cb *self, char *param, char *value)
145{ 145{
146 __u8 *bytes;
147
148 IRDA_DEBUG(4, "%s()\n", __func__ ); 146 IRDA_DEBUG(4, "%s()\n", __func__ );
149 147
150 bytes = value;
151
152 IRDA_ASSERT(self != NULL, return;); 148 IRDA_ASSERT(self != NULL, return;);
153 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 149 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
154 150
diff --git a/net/irda/irlan/irlan_provider.c b/net/irda/irlan/irlan_provider.c
index 5cf5e6c872bb..b8af74ab8b68 100644
--- a/net/irda/irlan/irlan_provider.c
+++ b/net/irda/irlan/irlan_provider.c
@@ -128,7 +128,6 @@ static void irlan_provider_connect_indication(void *instance, void *sap,
128{ 128{
129 struct irlan_cb *self; 129 struct irlan_cb *self;
130 struct tsap_cb *tsap; 130 struct tsap_cb *tsap;
131 __u32 saddr, daddr;
132 131
133 IRDA_DEBUG(0, "%s()\n", __func__ ); 132 IRDA_DEBUG(0, "%s()\n", __func__ );
134 133
@@ -141,8 +140,6 @@ static void irlan_provider_connect_indication(void *instance, void *sap,
141 IRDA_ASSERT(tsap == self->provider.tsap_ctrl,return;); 140 IRDA_ASSERT(tsap == self->provider.tsap_ctrl,return;);
142 IRDA_ASSERT(self->provider.state == IRLAN_IDLE, return;); 141 IRDA_ASSERT(self->provider.state == IRLAN_IDLE, return;);
143 142
144 daddr = irttp_get_daddr(tsap);
145 saddr = irttp_get_saddr(tsap);
146 self->provider.max_sdu_size = max_sdu_size; 143 self->provider.max_sdu_size = max_sdu_size;
147 self->provider.max_header_size = max_header_size; 144 self->provider.max_header_size = max_header_size;
148 145
diff --git a/net/irda/irlap_event.c b/net/irda/irlap_event.c
index bb47021c9a55..ccd214f9d196 100644
--- a/net/irda/irlap_event.c
+++ b/net/irda/irlap_event.c
@@ -2227,8 +2227,6 @@ static int irlap_state_nrm_s(struct irlap_cb *self, IRLAP_EVENT event,
2227static int irlap_state_sclose(struct irlap_cb *self, IRLAP_EVENT event, 2227static int irlap_state_sclose(struct irlap_cb *self, IRLAP_EVENT event,
2228 struct sk_buff *skb, struct irlap_info *info) 2228 struct sk_buff *skb, struct irlap_info *info)
2229{ 2229{
2230 int ret = 0;
2231
2232 IRDA_DEBUG(1, "%s()\n", __func__); 2230 IRDA_DEBUG(1, "%s()\n", __func__);
2233 2231
2234 IRDA_ASSERT(self != NULL, return -ENODEV;); 2232 IRDA_ASSERT(self != NULL, return -ENODEV;);
@@ -2289,7 +2287,6 @@ static int irlap_state_sclose(struct irlap_cb *self, IRLAP_EVENT event,
2289 IRDA_DEBUG(1, "%s(), Unknown event %d, (%s)\n", __func__, 2287 IRDA_DEBUG(1, "%s(), Unknown event %d, (%s)\n", __func__,
2290 event, irlap_event[event]); 2288 event, irlap_event[event]);
2291 2289
2292 ret = -EINVAL;
2293 break; 2290 break;
2294 } 2291 }
2295 2292
diff --git a/net/irda/irproc.c b/net/irda/irproc.c
index 318766e5dbdf..b9ac598e2116 100644
--- a/net/irda/irproc.c
+++ b/net/irda/irproc.c
@@ -65,15 +65,14 @@ static const struct irda_entry irda_dirs[] = {
65void __init irda_proc_register(void) 65void __init irda_proc_register(void)
66{ 66{
67 int i; 67 int i;
68 struct proc_dir_entry *d;
69 68
70 proc_irda = proc_mkdir("irda", init_net.proc_net); 69 proc_irda = proc_mkdir("irda", init_net.proc_net);
71 if (proc_irda == NULL) 70 if (proc_irda == NULL)
72 return; 71 return;
73 72
74 for (i = 0; i < ARRAY_SIZE(irda_dirs); i++) 73 for (i = 0; i < ARRAY_SIZE(irda_dirs); i++)
75 d = proc_create(irda_dirs[i].name, 0, proc_irda, 74 (void) proc_create(irda_dirs[i].name, 0, proc_irda,
76 irda_dirs[i].fops); 75 irda_dirs[i].fops);
77} 76}
78 77
79/* 78/*
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 986b2a5e8769..e2013e434d03 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -190,7 +190,6 @@ static int afiucv_pm_freeze(struct device *dev)
190 */ 190 */
191static int afiucv_pm_restore_thaw(struct device *dev) 191static int afiucv_pm_restore_thaw(struct device *dev)
192{ 192{
193 struct iucv_sock *iucv;
194 struct sock *sk; 193 struct sock *sk;
195 struct hlist_node *node; 194 struct hlist_node *node;
196 195
@@ -199,7 +198,6 @@ static int afiucv_pm_restore_thaw(struct device *dev)
199#endif 198#endif
200 read_lock(&iucv_sk_list.lock); 199 read_lock(&iucv_sk_list.lock);
201 sk_for_each(sk, node, &iucv_sk_list.head) { 200 sk_for_each(sk, node, &iucv_sk_list.head) {
202 iucv = iucv_sk(sk);
203 switch (sk->sk_state) { 201 switch (sk->sk_state) {
204 case IUCV_CONNECTED: 202 case IUCV_CONNECTED:
205 sk->sk_err = EPIPE; 203 sk->sk_err = EPIPE;
@@ -381,7 +379,6 @@ static void iucv_sock_close(struct sock *sk)
381{ 379{
382 unsigned char user_data[16]; 380 unsigned char user_data[16];
383 struct iucv_sock *iucv = iucv_sk(sk); 381 struct iucv_sock *iucv = iucv_sk(sk);
384 int err;
385 unsigned long timeo; 382 unsigned long timeo;
386 383
387 iucv_sock_clear_timer(sk); 384 iucv_sock_clear_timer(sk);
@@ -394,8 +391,6 @@ static void iucv_sock_close(struct sock *sk)
394 391
395 case IUCV_CONNECTED: 392 case IUCV_CONNECTED:
396 case IUCV_DISCONN: 393 case IUCV_DISCONN:
397 err = 0;
398
399 sk->sk_state = IUCV_CLOSING; 394 sk->sk_state = IUCV_CLOSING;
400 sk->sk_state_change(sk); 395 sk->sk_state_change(sk);
401 396
@@ -404,7 +399,7 @@ static void iucv_sock_close(struct sock *sk)
404 timeo = sk->sk_lingertime; 399 timeo = sk->sk_lingertime;
405 else 400 else
406 timeo = IUCV_DISCONN_TIMEOUT; 401 timeo = IUCV_DISCONN_TIMEOUT;
407 err = iucv_sock_wait(sk, 402 iucv_sock_wait(sk,
408 iucv_sock_in_state(sk, IUCV_CLOSED, 0), 403 iucv_sock_in_state(sk, IUCV_CLOSED, 0),
409 timeo); 404 timeo);
410 } 405 }
@@ -417,7 +412,7 @@ static void iucv_sock_close(struct sock *sk)
417 low_nmcpy(user_data, iucv->src_name); 412 low_nmcpy(user_data, iucv->src_name);
418 high_nmcpy(user_data, iucv->dst_name); 413 high_nmcpy(user_data, iucv->dst_name);
419 ASCEBC(user_data, sizeof(user_data)); 414 ASCEBC(user_data, sizeof(user_data));
420 err = iucv_path_sever(iucv->path, user_data); 415 iucv_path_sever(iucv->path, user_data);
421 iucv_path_free(iucv->path); 416 iucv_path_free(iucv->path);
422 iucv->path = NULL; 417 iucv->path = NULL;
423 } 418 }
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index 8f156bd86be7..a15c01524959 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -128,8 +128,8 @@ struct iucv_irq_list {
128}; 128};
129 129
130static struct iucv_irq_data *iucv_irq_data[NR_CPUS]; 130static struct iucv_irq_data *iucv_irq_data[NR_CPUS];
131static cpumask_t iucv_buffer_cpumask = CPU_MASK_NONE; 131static cpumask_t iucv_buffer_cpumask = { CPU_BITS_NONE };
132static cpumask_t iucv_irq_cpumask = CPU_MASK_NONE; 132static cpumask_t iucv_irq_cpumask = { CPU_BITS_NONE };
133 133
134/* 134/*
135 * Queue of interrupt buffers lock for delivery via the tasklet 135 * Queue of interrupt buffers lock for delivery via the tasklet
@@ -406,7 +406,7 @@ static void iucv_allow_cpu(void *data)
406 parm->set_mask.ipmask = 0xf8; 406 parm->set_mask.ipmask = 0xf8;
407 iucv_call_b2f0(IUCV_SETCONTROLMASK, parm); 407 iucv_call_b2f0(IUCV_SETCONTROLMASK, parm);
408 /* Set indication that iucv interrupts are allowed for this cpu. */ 408 /* Set indication that iucv interrupts are allowed for this cpu. */
409 cpu_set(cpu, iucv_irq_cpumask); 409 cpumask_set_cpu(cpu, &iucv_irq_cpumask);
410} 410}
411 411
412/** 412/**
@@ -426,7 +426,7 @@ static void iucv_block_cpu(void *data)
426 iucv_call_b2f0(IUCV_SETMASK, parm); 426 iucv_call_b2f0(IUCV_SETMASK, parm);
427 427
428 /* Clear indication that iucv interrupts are allowed for this cpu. */ 428 /* Clear indication that iucv interrupts are allowed for this cpu. */
429 cpu_clear(cpu, iucv_irq_cpumask); 429 cpumask_clear_cpu(cpu, &iucv_irq_cpumask);
430} 430}
431 431
432/** 432/**
@@ -451,7 +451,7 @@ static void iucv_block_cpu_almost(void *data)
451 iucv_call_b2f0(IUCV_SETCONTROLMASK, parm); 451 iucv_call_b2f0(IUCV_SETCONTROLMASK, parm);
452 452
453 /* Clear indication that iucv interrupts are allowed for this cpu. */ 453 /* Clear indication that iucv interrupts are allowed for this cpu. */
454 cpu_clear(cpu, iucv_irq_cpumask); 454 cpumask_clear_cpu(cpu, &iucv_irq_cpumask);
455} 455}
456 456
457/** 457/**
@@ -466,7 +466,7 @@ static void iucv_declare_cpu(void *data)
466 union iucv_param *parm; 466 union iucv_param *parm;
467 int rc; 467 int rc;
468 468
469 if (cpu_isset(cpu, iucv_buffer_cpumask)) 469 if (cpumask_test_cpu(cpu, &iucv_buffer_cpumask))
470 return; 470 return;
471 471
472 /* Declare interrupt buffer. */ 472 /* Declare interrupt buffer. */
@@ -499,9 +499,9 @@ static void iucv_declare_cpu(void *data)
499 } 499 }
500 500
501 /* Set indication that an iucv buffer exists for this cpu. */ 501 /* Set indication that an iucv buffer exists for this cpu. */
502 cpu_set(cpu, iucv_buffer_cpumask); 502 cpumask_set_cpu(cpu, &iucv_buffer_cpumask);
503 503
504 if (iucv_nonsmp_handler == 0 || cpus_empty(iucv_irq_cpumask)) 504 if (iucv_nonsmp_handler == 0 || cpumask_empty(&iucv_irq_cpumask))
505 /* Enable iucv interrupts on this cpu. */ 505 /* Enable iucv interrupts on this cpu. */
506 iucv_allow_cpu(NULL); 506 iucv_allow_cpu(NULL);
507 else 507 else
@@ -520,7 +520,7 @@ static void iucv_retrieve_cpu(void *data)
520 int cpu = smp_processor_id(); 520 int cpu = smp_processor_id();
521 union iucv_param *parm; 521 union iucv_param *parm;
522 522
523 if (!cpu_isset(cpu, iucv_buffer_cpumask)) 523 if (!cpumask_test_cpu(cpu, &iucv_buffer_cpumask))
524 return; 524 return;
525 525
526 /* Block iucv interrupts. */ 526 /* Block iucv interrupts. */
@@ -531,7 +531,7 @@ static void iucv_retrieve_cpu(void *data)
531 iucv_call_b2f0(IUCV_RETRIEVE_BUFFER, parm); 531 iucv_call_b2f0(IUCV_RETRIEVE_BUFFER, parm);
532 532
533 /* Clear indication that an iucv buffer exists for this cpu. */ 533 /* Clear indication that an iucv buffer exists for this cpu. */
534 cpu_clear(cpu, iucv_buffer_cpumask); 534 cpumask_clear_cpu(cpu, &iucv_buffer_cpumask);
535} 535}
536 536
537/** 537/**
@@ -546,8 +546,8 @@ static void iucv_setmask_mp(void)
546 get_online_cpus(); 546 get_online_cpus();
547 for_each_online_cpu(cpu) 547 for_each_online_cpu(cpu)
548 /* Enable all cpus with a declared buffer. */ 548 /* Enable all cpus with a declared buffer. */
549 if (cpu_isset(cpu, iucv_buffer_cpumask) && 549 if (cpumask_test_cpu(cpu, &iucv_buffer_cpumask) &&
550 !cpu_isset(cpu, iucv_irq_cpumask)) 550 !cpumask_test_cpu(cpu, &iucv_irq_cpumask))
551 smp_call_function_single(cpu, iucv_allow_cpu, 551 smp_call_function_single(cpu, iucv_allow_cpu,
552 NULL, 1); 552 NULL, 1);
553 put_online_cpus(); 553 put_online_cpus();
@@ -564,9 +564,9 @@ static void iucv_setmask_up(void)
564 int cpu; 564 int cpu;
565 565
566 /* Disable all cpu but the first in cpu_irq_cpumask. */ 566 /* Disable all cpu but the first in cpu_irq_cpumask. */
567 cpumask = iucv_irq_cpumask; 567 cpumask_copy(&cpumask, &iucv_irq_cpumask);
568 cpu_clear(first_cpu(iucv_irq_cpumask), cpumask); 568 cpumask_clear_cpu(cpumask_first(&iucv_irq_cpumask), &cpumask);
569 for_each_cpu_mask_nr(cpu, cpumask) 569 for_each_cpu(cpu, &cpumask)
570 smp_call_function_single(cpu, iucv_block_cpu, NULL, 1); 570 smp_call_function_single(cpu, iucv_block_cpu, NULL, 1);
571} 571}
572 572
@@ -593,7 +593,7 @@ static int iucv_enable(void)
593 rc = -EIO; 593 rc = -EIO;
594 for_each_online_cpu(cpu) 594 for_each_online_cpu(cpu)
595 smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1); 595 smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1);
596 if (cpus_empty(iucv_buffer_cpumask)) 596 if (cpumask_empty(&iucv_buffer_cpumask))
597 /* No cpu could declare an iucv buffer. */ 597 /* No cpu could declare an iucv buffer. */
598 goto out; 598 goto out;
599 put_online_cpus(); 599 put_online_cpus();
@@ -675,15 +675,16 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
675 case CPU_DOWN_PREPARE_FROZEN: 675 case CPU_DOWN_PREPARE_FROZEN:
676 if (!iucv_path_table) 676 if (!iucv_path_table)
677 break; 677 break;
678 cpumask = iucv_buffer_cpumask; 678 cpumask_copy(&cpumask, &iucv_buffer_cpumask);
679 cpu_clear(cpu, cpumask); 679 cpumask_clear_cpu(cpu, &cpumask);
680 if (cpus_empty(cpumask)) 680 if (cpumask_empty(&cpumask))
681 /* Can't offline last IUCV enabled cpu. */ 681 /* Can't offline last IUCV enabled cpu. */
682 return notifier_from_errno(-EINVAL); 682 return notifier_from_errno(-EINVAL);
683 smp_call_function_single(cpu, iucv_retrieve_cpu, NULL, 1); 683 smp_call_function_single(cpu, iucv_retrieve_cpu, NULL, 1);
684 if (cpus_empty(iucv_irq_cpumask)) 684 if (cpumask_empty(&iucv_irq_cpumask))
685 smp_call_function_single(first_cpu(iucv_buffer_cpumask), 685 smp_call_function_single(
686 iucv_allow_cpu, NULL, 1); 686 cpumask_first(&iucv_buffer_cpumask),
687 iucv_allow_cpu, NULL, 1);
687 break; 688 break;
688 } 689 }
689 return NOTIFY_OK; 690 return NOTIFY_OK;
@@ -828,14 +829,14 @@ EXPORT_SYMBOL(iucv_unregister);
828static int iucv_reboot_event(struct notifier_block *this, 829static int iucv_reboot_event(struct notifier_block *this,
829 unsigned long event, void *ptr) 830 unsigned long event, void *ptr)
830{ 831{
831 int i, rc; 832 int i;
832 833
833 get_online_cpus(); 834 get_online_cpus();
834 on_each_cpu(iucv_block_cpu, NULL, 1); 835 on_each_cpu(iucv_block_cpu, NULL, 1);
835 preempt_disable(); 836 preempt_disable();
836 for (i = 0; i < iucv_max_pathid; i++) { 837 for (i = 0; i < iucv_max_pathid; i++) {
837 if (iucv_path_table[i]) 838 if (iucv_path_table[i])
838 rc = iucv_sever_pathid(i, NULL); 839 iucv_sever_pathid(i, NULL);
839 } 840 }
840 preempt_enable(); 841 preempt_enable();
841 put_online_cpus(); 842 put_online_cpus();
@@ -866,7 +867,7 @@ int iucv_path_accept(struct iucv_path *path, struct iucv_handler *handler,
866 int rc; 867 int rc;
867 868
868 local_bh_disable(); 869 local_bh_disable();
869 if (cpus_empty(iucv_buffer_cpumask)) { 870 if (cpumask_empty(&iucv_buffer_cpumask)) {
870 rc = -EIO; 871 rc = -EIO;
871 goto out; 872 goto out;
872 } 873 }
@@ -915,7 +916,7 @@ int iucv_path_connect(struct iucv_path *path, struct iucv_handler *handler,
915 916
916 spin_lock_bh(&iucv_table_lock); 917 spin_lock_bh(&iucv_table_lock);
917 iucv_cleanup_queue(); 918 iucv_cleanup_queue();
918 if (cpus_empty(iucv_buffer_cpumask)) { 919 if (cpumask_empty(&iucv_buffer_cpumask)) {
919 rc = -EIO; 920 rc = -EIO;
920 goto out; 921 goto out;
921 } 922 }
@@ -975,7 +976,7 @@ int iucv_path_quiesce(struct iucv_path *path, u8 userdata[16])
975 int rc; 976 int rc;
976 977
977 local_bh_disable(); 978 local_bh_disable();
978 if (cpus_empty(iucv_buffer_cpumask)) { 979 if (cpumask_empty(&iucv_buffer_cpumask)) {
979 rc = -EIO; 980 rc = -EIO;
980 goto out; 981 goto out;
981 } 982 }
@@ -1007,7 +1008,7 @@ int iucv_path_resume(struct iucv_path *path, u8 userdata[16])
1007 int rc; 1008 int rc;
1008 1009
1009 local_bh_disable(); 1010 local_bh_disable();
1010 if (cpus_empty(iucv_buffer_cpumask)) { 1011 if (cpumask_empty(&iucv_buffer_cpumask)) {
1011 rc = -EIO; 1012 rc = -EIO;
1012 goto out; 1013 goto out;
1013 } 1014 }
@@ -1036,7 +1037,7 @@ int iucv_path_sever(struct iucv_path *path, u8 userdata[16])
1036 int rc; 1037 int rc;
1037 1038
1038 preempt_disable(); 1039 preempt_disable();
1039 if (cpus_empty(iucv_buffer_cpumask)) { 1040 if (cpumask_empty(&iucv_buffer_cpumask)) {
1040 rc = -EIO; 1041 rc = -EIO;
1041 goto out; 1042 goto out;
1042 } 1043 }
@@ -1070,7 +1071,7 @@ int iucv_message_purge(struct iucv_path *path, struct iucv_message *msg,
1070 int rc; 1071 int rc;
1071 1072
1072 local_bh_disable(); 1073 local_bh_disable();
1073 if (cpus_empty(iucv_buffer_cpumask)) { 1074 if (cpumask_empty(&iucv_buffer_cpumask)) {
1074 rc = -EIO; 1075 rc = -EIO;
1075 goto out; 1076 goto out;
1076 } 1077 }
@@ -1162,7 +1163,7 @@ int __iucv_message_receive(struct iucv_path *path, struct iucv_message *msg,
1162 if (msg->flags & IUCV_IPRMDATA) 1163 if (msg->flags & IUCV_IPRMDATA)
1163 return iucv_message_receive_iprmdata(path, msg, flags, 1164 return iucv_message_receive_iprmdata(path, msg, flags,
1164 buffer, size, residual); 1165 buffer, size, residual);
1165 if (cpus_empty(iucv_buffer_cpumask)) { 1166 if (cpumask_empty(&iucv_buffer_cpumask)) {
1166 rc = -EIO; 1167 rc = -EIO;
1167 goto out; 1168 goto out;
1168 } 1169 }
@@ -1235,7 +1236,7 @@ int iucv_message_reject(struct iucv_path *path, struct iucv_message *msg)
1235 int rc; 1236 int rc;
1236 1237
1237 local_bh_disable(); 1238 local_bh_disable();
1238 if (cpus_empty(iucv_buffer_cpumask)) { 1239 if (cpumask_empty(&iucv_buffer_cpumask)) {
1239 rc = -EIO; 1240 rc = -EIO;
1240 goto out; 1241 goto out;
1241 } 1242 }
@@ -1274,7 +1275,7 @@ int iucv_message_reply(struct iucv_path *path, struct iucv_message *msg,
1274 int rc; 1275 int rc;
1275 1276
1276 local_bh_disable(); 1277 local_bh_disable();
1277 if (cpus_empty(iucv_buffer_cpumask)) { 1278 if (cpumask_empty(&iucv_buffer_cpumask)) {
1278 rc = -EIO; 1279 rc = -EIO;
1279 goto out; 1280 goto out;
1280 } 1281 }
@@ -1324,7 +1325,7 @@ int __iucv_message_send(struct iucv_path *path, struct iucv_message *msg,
1324 union iucv_param *parm; 1325 union iucv_param *parm;
1325 int rc; 1326 int rc;
1326 1327
1327 if (cpus_empty(iucv_buffer_cpumask)) { 1328 if (cpumask_empty(&iucv_buffer_cpumask)) {
1328 rc = -EIO; 1329 rc = -EIO;
1329 goto out; 1330 goto out;
1330 } 1331 }
@@ -1411,7 +1412,7 @@ int iucv_message_send2way(struct iucv_path *path, struct iucv_message *msg,
1411 int rc; 1412 int rc;
1412 1413
1413 local_bh_disable(); 1414 local_bh_disable();
1414 if (cpus_empty(iucv_buffer_cpumask)) { 1415 if (cpumask_empty(&iucv_buffer_cpumask)) {
1415 rc = -EIO; 1416 rc = -EIO;
1416 goto out; 1417 goto out;
1417 } 1418 }
@@ -1888,7 +1889,7 @@ static int iucv_pm_freeze(struct device *dev)
1888 printk(KERN_WARNING "iucv_pm_freeze\n"); 1889 printk(KERN_WARNING "iucv_pm_freeze\n");
1889#endif 1890#endif
1890 if (iucv_pm_state != IUCV_PM_FREEZING) { 1891 if (iucv_pm_state != IUCV_PM_FREEZING) {
1891 for_each_cpu_mask_nr(cpu, iucv_irq_cpumask) 1892 for_each_cpu(cpu, &iucv_irq_cpumask)
1892 smp_call_function_single(cpu, iucv_block_cpu_almost, 1893 smp_call_function_single(cpu, iucv_block_cpu_almost,
1893 NULL, 1); 1894 NULL, 1);
1894 cancel_work_sync(&iucv_work); 1895 cancel_work_sync(&iucv_work);
@@ -1928,7 +1929,7 @@ static int iucv_pm_thaw(struct device *dev)
1928 if (rc) 1929 if (rc)
1929 goto out; 1930 goto out;
1930 } 1931 }
1931 if (cpus_empty(iucv_irq_cpumask)) { 1932 if (cpumask_empty(&iucv_irq_cpumask)) {
1932 if (iucv_nonsmp_handler) 1933 if (iucv_nonsmp_handler)
1933 /* enable interrupts on one cpu */ 1934 /* enable interrupts on one cpu */
1934 iucv_allow_cpu(NULL); 1935 iucv_allow_cpu(NULL);
@@ -1961,7 +1962,7 @@ static int iucv_pm_restore(struct device *dev)
1961 pr_warning("Suspending Linux did not completely close all IUCV " 1962 pr_warning("Suspending Linux did not completely close all IUCV "
1962 "connections\n"); 1963 "connections\n");
1963 iucv_pm_state = IUCV_PM_RESTORING; 1964 iucv_pm_state = IUCV_PM_RESTORING;
1964 if (cpus_empty(iucv_irq_cpumask)) { 1965 if (cpumask_empty(&iucv_irq_cpumask)) {
1965 rc = iucv_query_maxconn(); 1966 rc = iucv_query_maxconn();
1966 rc = iucv_enable(); 1967 rc = iucv_enable();
1967 if (rc) 1968 if (rc)
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 7db86ffcf070..d62401c25684 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -712,7 +712,7 @@ static unsigned int pfkey_sockaddr_fill(const xfrm_address_t *xaddr, __be16 port
712 sin6->sin6_family = AF_INET6; 712 sin6->sin6_family = AF_INET6;
713 sin6->sin6_port = port; 713 sin6->sin6_port = port;
714 sin6->sin6_flowinfo = 0; 714 sin6->sin6_flowinfo = 0;
715 ipv6_addr_copy(&sin6->sin6_addr, (struct in6_addr *)xaddr->a6); 715 ipv6_addr_copy(&sin6->sin6_addr, (const struct in6_addr *)xaddr->a6);
716 sin6->sin6_scope_id = 0; 716 sin6->sin6_scope_id = 0;
717 return 128; 717 return 128;
718 } 718 }
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index c64ce0a0bb03..ed8a2335442f 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -954,7 +954,7 @@ static int l2tp_build_l2tpv3_header(struct l2tp_session *session, void *buf)
954} 954}
955 955
956static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, 956static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb,
957 size_t data_len) 957 struct flowi *fl, size_t data_len)
958{ 958{
959 struct l2tp_tunnel *tunnel = session->tunnel; 959 struct l2tp_tunnel *tunnel = session->tunnel;
960 unsigned int len = skb->len; 960 unsigned int len = skb->len;
@@ -987,7 +987,7 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb,
987 987
988 /* Queue the packet to IP for output */ 988 /* Queue the packet to IP for output */
989 skb->local_df = 1; 989 skb->local_df = 1;
990 error = ip_queue_xmit(skb); 990 error = ip_queue_xmit(skb, fl);
991 991
992 /* Update stats */ 992 /* Update stats */
993 if (error >= 0) { 993 if (error >= 0) {
@@ -1028,6 +1028,7 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
1028 int data_len = skb->len; 1028 int data_len = skb->len;
1029 struct l2tp_tunnel *tunnel = session->tunnel; 1029 struct l2tp_tunnel *tunnel = session->tunnel;
1030 struct sock *sk = tunnel->sock; 1030 struct sock *sk = tunnel->sock;
1031 struct flowi *fl;
1031 struct udphdr *uh; 1032 struct udphdr *uh;
1032 struct inet_sock *inet; 1033 struct inet_sock *inet;
1033 __wsum csum; 1034 __wsum csum;
@@ -1060,14 +1061,21 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
1060 IPSKB_REROUTED); 1061 IPSKB_REROUTED);
1061 nf_reset(skb); 1062 nf_reset(skb);
1062 1063
1064 bh_lock_sock(sk);
1065 if (sock_owned_by_user(sk)) {
1066 dev_kfree_skb(skb);
1067 goto out_unlock;
1068 }
1069
1063 /* Get routing info from the tunnel socket */ 1070 /* Get routing info from the tunnel socket */
1064 skb_dst_drop(skb); 1071 skb_dst_drop(skb);
1065 skb_dst_set(skb, dst_clone(__sk_dst_get(sk))); 1072 skb_dst_set(skb, dst_clone(__sk_dst_get(sk)));
1066 1073
1074 inet = inet_sk(sk);
1075 fl = &inet->cork.fl;
1067 switch (tunnel->encap) { 1076 switch (tunnel->encap) {
1068 case L2TP_ENCAPTYPE_UDP: 1077 case L2TP_ENCAPTYPE_UDP:
1069 /* Setup UDP header */ 1078 /* Setup UDP header */
1070 inet = inet_sk(sk);
1071 __skb_push(skb, sizeof(*uh)); 1079 __skb_push(skb, sizeof(*uh));
1072 skb_reset_transport_header(skb); 1080 skb_reset_transport_header(skb);
1073 uh = udp_hdr(skb); 1081 uh = udp_hdr(skb);
@@ -1105,7 +1113,9 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
1105 1113
1106 l2tp_skb_set_owner_w(skb, sk); 1114 l2tp_skb_set_owner_w(skb, sk);
1107 1115
1108 l2tp_xmit_core(session, skb, data_len); 1116 l2tp_xmit_core(session, skb, fl, data_len);
1117out_unlock:
1118 bh_unlock_sock(sk);
1109 1119
1110abort: 1120abort:
1111 return 0; 1121 return 0;
@@ -1425,16 +1435,15 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
1425 1435
1426 /* Add tunnel to our list */ 1436 /* Add tunnel to our list */
1427 INIT_LIST_HEAD(&tunnel->list); 1437 INIT_LIST_HEAD(&tunnel->list);
1428 spin_lock_bh(&pn->l2tp_tunnel_list_lock);
1429 list_add_rcu(&tunnel->list, &pn->l2tp_tunnel_list);
1430 spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
1431 synchronize_rcu();
1432 atomic_inc(&l2tp_tunnel_count); 1438 atomic_inc(&l2tp_tunnel_count);
1433 1439
1434 /* Bump the reference count. The tunnel context is deleted 1440 /* Bump the reference count. The tunnel context is deleted
1435 * only when this drops to zero. 1441 * only when this drops to zero. Must be done before list insertion
1436 */ 1442 */
1437 l2tp_tunnel_inc_refcount(tunnel); 1443 l2tp_tunnel_inc_refcount(tunnel);
1444 spin_lock_bh(&pn->l2tp_tunnel_list_lock);
1445 list_add_rcu(&tunnel->list, &pn->l2tp_tunnel_list);
1446 spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
1438 1447
1439 err = 0; 1448 err = 0;
1440err: 1449err:
@@ -1626,7 +1635,6 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn
1626 hlist_add_head_rcu(&session->global_hlist, 1635 hlist_add_head_rcu(&session->global_hlist,
1627 l2tp_session_id_hash_2(pn, session_id)); 1636 l2tp_session_id_hash_2(pn, session_id));
1628 spin_unlock_bh(&pn->l2tp_session_hlist_lock); 1637 spin_unlock_bh(&pn->l2tp_session_hlist_lock);
1629 synchronize_rcu();
1630 } 1638 }
1631 1639
1632 /* Ignore management session in session count value */ 1640 /* Ignore management session in session count value */
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index 5c04f3e42704..b6466e71f5e1 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -296,12 +296,12 @@ out_in_use:
296 296
297static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) 297static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
298{ 298{
299 int rc;
300 struct inet_sock *inet = inet_sk(sk);
301 struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *) uaddr; 299 struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *) uaddr;
300 struct inet_sock *inet = inet_sk(sk);
301 struct flowi4 *fl4;
302 struct rtable *rt; 302 struct rtable *rt;
303 __be32 saddr; 303 __be32 saddr;
304 int oif; 304 int oif, rc;
305 305
306 rc = -EINVAL; 306 rc = -EINVAL;
307 if (addr_len < sizeof(*lsa)) 307 if (addr_len < sizeof(*lsa))
@@ -311,6 +311,8 @@ static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len
311 if (lsa->l2tp_family != AF_INET) 311 if (lsa->l2tp_family != AF_INET)
312 goto out; 312 goto out;
313 313
314 lock_sock(sk);
315
314 sk_dst_reset(sk); 316 sk_dst_reset(sk);
315 317
316 oif = sk->sk_bound_dev_if; 318 oif = sk->sk_bound_dev_if;
@@ -320,7 +322,8 @@ static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len
320 if (ipv4_is_multicast(lsa->l2tp_addr.s_addr)) 322 if (ipv4_is_multicast(lsa->l2tp_addr.s_addr))
321 goto out; 323 goto out;
322 324
323 rt = ip_route_connect(lsa->l2tp_addr.s_addr, saddr, 325 fl4 = &inet->cork.fl.u.ip4;
326 rt = ip_route_connect(fl4, lsa->l2tp_addr.s_addr, saddr,
324 RT_CONN_FLAGS(sk), oif, 327 RT_CONN_FLAGS(sk), oif,
325 IPPROTO_L2TP, 328 IPPROTO_L2TP,
326 0, 0, sk, true); 329 0, 0, sk, true);
@@ -340,10 +343,10 @@ static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len
340 l2tp_ip_sk(sk)->peer_conn_id = lsa->l2tp_conn_id; 343 l2tp_ip_sk(sk)->peer_conn_id = lsa->l2tp_conn_id;
341 344
342 if (!inet->inet_saddr) 345 if (!inet->inet_saddr)
343 inet->inet_saddr = rt->rt_src; 346 inet->inet_saddr = fl4->saddr;
344 if (!inet->inet_rcv_saddr) 347 if (!inet->inet_rcv_saddr)
345 inet->inet_rcv_saddr = rt->rt_src; 348 inet->inet_rcv_saddr = fl4->saddr;
346 inet->inet_daddr = rt->rt_dst; 349 inet->inet_daddr = fl4->daddr;
347 sk->sk_state = TCP_ESTABLISHED; 350 sk->sk_state = TCP_ESTABLISHED;
348 inet->inet_id = jiffies; 351 inet->inet_id = jiffies;
349 352
@@ -356,6 +359,7 @@ static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len
356 359
357 rc = 0; 360 rc = 0;
358out: 361out:
362 release_sock(sk);
359 return rc; 363 return rc;
360} 364}
361 365
@@ -416,23 +420,28 @@ static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
416 int rc; 420 int rc;
417 struct l2tp_ip_sock *lsa = l2tp_ip_sk(sk); 421 struct l2tp_ip_sock *lsa = l2tp_ip_sk(sk);
418 struct inet_sock *inet = inet_sk(sk); 422 struct inet_sock *inet = inet_sk(sk);
419 struct ip_options *opt = inet->opt;
420 struct rtable *rt = NULL; 423 struct rtable *rt = NULL;
424 struct flowi4 *fl4;
421 int connected = 0; 425 int connected = 0;
422 __be32 daddr; 426 __be32 daddr;
423 427
428 lock_sock(sk);
429
430 rc = -ENOTCONN;
424 if (sock_flag(sk, SOCK_DEAD)) 431 if (sock_flag(sk, SOCK_DEAD))
425 return -ENOTCONN; 432 goto out;
426 433
427 /* Get and verify the address. */ 434 /* Get and verify the address. */
428 if (msg->msg_name) { 435 if (msg->msg_name) {
429 struct sockaddr_l2tpip *lip = (struct sockaddr_l2tpip *) msg->msg_name; 436 struct sockaddr_l2tpip *lip = (struct sockaddr_l2tpip *) msg->msg_name;
437 rc = -EINVAL;
430 if (msg->msg_namelen < sizeof(*lip)) 438 if (msg->msg_namelen < sizeof(*lip))
431 return -EINVAL; 439 goto out;
432 440
433 if (lip->l2tp_family != AF_INET) { 441 if (lip->l2tp_family != AF_INET) {
442 rc = -EAFNOSUPPORT;
434 if (lip->l2tp_family != AF_UNSPEC) 443 if (lip->l2tp_family != AF_UNSPEC)
435 return -EAFNOSUPPORT; 444 goto out;
436 } 445 }
437 446
438 daddr = lip->l2tp_addr.s_addr; 447 daddr = lip->l2tp_addr.s_addr;
@@ -467,19 +476,27 @@ static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
467 goto error; 476 goto error;
468 } 477 }
469 478
479 fl4 = &inet->cork.fl.u.ip4;
470 if (connected) 480 if (connected)
471 rt = (struct rtable *) __sk_dst_check(sk, 0); 481 rt = (struct rtable *) __sk_dst_check(sk, 0);
472 482
473 if (rt == NULL) { 483 if (rt == NULL) {
484 struct ip_options_rcu *inet_opt;
485
486 rcu_read_lock();
487 inet_opt = rcu_dereference(inet->inet_opt);
488
474 /* Use correct destination address if we have options. */ 489 /* Use correct destination address if we have options. */
475 if (opt && opt->srr) 490 if (inet_opt && inet_opt->opt.srr)
476 daddr = opt->faddr; 491 daddr = inet_opt->opt.faddr;
492
493 rcu_read_unlock();
477 494
478 /* If this fails, retransmit mechanism of transport layer will 495 /* If this fails, retransmit mechanism of transport layer will
479 * keep trying until route appears or the connection times 496 * keep trying until route appears or the connection times
480 * itself out. 497 * itself out.
481 */ 498 */
482 rt = ip_route_output_ports(sock_net(sk), sk, 499 rt = ip_route_output_ports(sock_net(sk), fl4, sk,
483 daddr, inet->inet_saddr, 500 daddr, inet->inet_saddr,
484 inet->inet_dport, inet->inet_sport, 501 inet->inet_dport, inet->inet_sport,
485 sk->sk_protocol, RT_CONN_FLAGS(sk), 502 sk->sk_protocol, RT_CONN_FLAGS(sk),
@@ -491,7 +508,7 @@ static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
491 skb_dst_set(skb, dst_clone(&rt->dst)); 508 skb_dst_set(skb, dst_clone(&rt->dst));
492 509
493 /* Queue the packet to IP for output */ 510 /* Queue the packet to IP for output */
494 rc = ip_queue_xmit(skb); 511 rc = ip_queue_xmit(skb, &inet->cork.fl);
495 512
496error: 513error:
497 /* Update stats */ 514 /* Update stats */
@@ -503,12 +520,15 @@ error:
503 lsa->tx_errors++; 520 lsa->tx_errors++;
504 } 521 }
505 522
523out:
524 release_sock(sk);
506 return rc; 525 return rc;
507 526
508no_route: 527no_route:
509 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES); 528 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
510 kfree_skb(skb); 529 kfree_skb(skb);
511 return -EHOSTUNREACH; 530 rc = -EHOSTUNREACH;
531 goto out;
512} 532}
513 533
514static int l2tp_ip_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, 534static int l2tp_ip_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
index 4c1e540732d7..93a41a09458b 100644
--- a/net/l2tp/l2tp_netlink.c
+++ b/net/l2tp/l2tp_netlink.c
@@ -795,11 +795,12 @@ int l2tp_nl_register_ops(enum l2tp_pwtype pw_type, const struct l2tp_nl_cmd_ops
795 goto out; 795 goto out;
796 796
797 l2tp_nl_cmd_ops[pw_type] = ops; 797 l2tp_nl_cmd_ops[pw_type] = ops;
798 ret = 0;
798 799
799out: 800out:
800 genl_unlock(); 801 genl_unlock();
801err: 802err:
802 return 0; 803 return ret;
803} 804}
804EXPORT_SYMBOL_GPL(l2tp_nl_register_ops); 805EXPORT_SYMBOL_GPL(l2tp_nl_register_ops);
805 806
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index 513f85cc2ae1..f5fdfcbf552a 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -2,7 +2,6 @@ config MAC80211
2 tristate "Generic IEEE 802.11 Networking Stack (mac80211)" 2 tristate "Generic IEEE 802.11 Networking Stack (mac80211)"
3 depends on CFG80211 3 depends on CFG80211
4 select CRYPTO 4 select CRYPTO
5 select CRYPTO_ECB
6 select CRYPTO_ARC4 5 select CRYPTO_ARC4
7 select CRYPTO_AES 6 select CRYPTO_AES
8 select CRC32 7 select CRC32
diff --git a/net/mac80211/aes_ccm.c b/net/mac80211/aes_ccm.c
index 4bd6ef0be380..b9b595c08112 100644
--- a/net/mac80211/aes_ccm.c
+++ b/net/mac80211/aes_ccm.c
@@ -54,13 +54,12 @@ void ieee80211_aes_ccm_encrypt(struct crypto_cipher *tfm, u8 *scratch,
54 u8 *cdata, u8 *mic) 54 u8 *cdata, u8 *mic)
55{ 55{
56 int i, j, last_len, num_blocks; 56 int i, j, last_len, num_blocks;
57 u8 *pos, *cpos, *b, *s_0, *e, *b_0, *aad; 57 u8 *pos, *cpos, *b, *s_0, *e, *b_0;
58 58
59 b = scratch; 59 b = scratch;
60 s_0 = scratch + AES_BLOCK_LEN; 60 s_0 = scratch + AES_BLOCK_LEN;
61 e = scratch + 2 * AES_BLOCK_LEN; 61 e = scratch + 2 * AES_BLOCK_LEN;
62 b_0 = scratch + 3 * AES_BLOCK_LEN; 62 b_0 = scratch + 3 * AES_BLOCK_LEN;
63 aad = scratch + 4 * AES_BLOCK_LEN;
64 63
65 num_blocks = DIV_ROUND_UP(data_len, AES_BLOCK_LEN); 64 num_blocks = DIV_ROUND_UP(data_len, AES_BLOCK_LEN);
66 last_len = data_len % AES_BLOCK_LEN; 65 last_len = data_len % AES_BLOCK_LEN;
@@ -94,13 +93,12 @@ int ieee80211_aes_ccm_decrypt(struct crypto_cipher *tfm, u8 *scratch,
94 u8 *cdata, size_t data_len, u8 *mic, u8 *data) 93 u8 *cdata, size_t data_len, u8 *mic, u8 *data)
95{ 94{
96 int i, j, last_len, num_blocks; 95 int i, j, last_len, num_blocks;
97 u8 *pos, *cpos, *b, *s_0, *a, *b_0, *aad; 96 u8 *pos, *cpos, *b, *s_0, *a, *b_0;
98 97
99 b = scratch; 98 b = scratch;
100 s_0 = scratch + AES_BLOCK_LEN; 99 s_0 = scratch + AES_BLOCK_LEN;
101 a = scratch + 2 * AES_BLOCK_LEN; 100 a = scratch + 2 * AES_BLOCK_LEN;
102 b_0 = scratch + 3 * AES_BLOCK_LEN; 101 b_0 = scratch + 3 * AES_BLOCK_LEN;
103 aad = scratch + 4 * AES_BLOCK_LEN;
104 102
105 num_blocks = DIV_ROUND_UP(data_len, AES_BLOCK_LEN); 103 num_blocks = DIV_ROUND_UP(data_len, AES_BLOCK_LEN);
106 last_len = data_len % AES_BLOCK_LEN; 104 last_len = data_len % AES_BLOCK_LEN;
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index 0c9d0c07eae6..9c0d76cdca92 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -63,7 +63,8 @@ void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
63 63
64 lockdep_assert_held(&sta->ampdu_mlme.mtx); 64 lockdep_assert_held(&sta->ampdu_mlme.mtx);
65 65
66 tid_rx = sta->ampdu_mlme.tid_rx[tid]; 66 tid_rx = rcu_dereference_protected(sta->ampdu_mlme.tid_rx[tid],
67 lockdep_is_held(&sta->ampdu_mlme.mtx));
67 68
68 if (!tid_rx) 69 if (!tid_rx)
69 return; 70 return;
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index 53defafb9aae..c8be8eff70da 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -136,24 +136,35 @@ void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u1
136 ieee80211_tx_skb(sdata, skb); 136 ieee80211_tx_skb(sdata, skb);
137} 137}
138 138
139void ieee80211_assign_tid_tx(struct sta_info *sta, int tid,
140 struct tid_ampdu_tx *tid_tx)
141{
142 lockdep_assert_held(&sta->ampdu_mlme.mtx);
143 lockdep_assert_held(&sta->lock);
144 rcu_assign_pointer(sta->ampdu_mlme.tid_tx[tid], tid_tx);
145}
146
139int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, 147int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
140 enum ieee80211_back_parties initiator, 148 enum ieee80211_back_parties initiator,
141 bool tx) 149 bool tx)
142{ 150{
143 struct ieee80211_local *local = sta->local; 151 struct ieee80211_local *local = sta->local;
144 struct tid_ampdu_tx *tid_tx = sta->ampdu_mlme.tid_tx[tid]; 152 struct tid_ampdu_tx *tid_tx;
145 int ret; 153 int ret;
146 154
147 lockdep_assert_held(&sta->ampdu_mlme.mtx); 155 lockdep_assert_held(&sta->ampdu_mlme.mtx);
148 156
149 if (!tid_tx)
150 return -ENOENT;
151
152 spin_lock_bh(&sta->lock); 157 spin_lock_bh(&sta->lock);
153 158
159 tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
160 if (!tid_tx) {
161 spin_unlock_bh(&sta->lock);
162 return -ENOENT;
163 }
164
154 if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) { 165 if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) {
155 /* not even started yet! */ 166 /* not even started yet! */
156 rcu_assign_pointer(sta->ampdu_mlme.tid_tx[tid], NULL); 167 ieee80211_assign_tid_tx(sta, tid, NULL);
157 spin_unlock_bh(&sta->lock); 168 spin_unlock_bh(&sta->lock);
158 kfree_rcu(tid_tx, rcu_head); 169 kfree_rcu(tid_tx, rcu_head);
159 return 0; 170 return 0;
@@ -275,13 +286,13 @@ ieee80211_wake_queue_agg(struct ieee80211_local *local, int tid)
275 286
276void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid) 287void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
277{ 288{
278 struct tid_ampdu_tx *tid_tx = sta->ampdu_mlme.tid_tx[tid]; 289 struct tid_ampdu_tx *tid_tx;
279 struct ieee80211_local *local = sta->local; 290 struct ieee80211_local *local = sta->local;
280 struct ieee80211_sub_if_data *sdata = sta->sdata; 291 struct ieee80211_sub_if_data *sdata = sta->sdata;
281 u16 start_seq_num; 292 u16 start_seq_num;
282 int ret; 293 int ret;
283 294
284 lockdep_assert_held(&sta->ampdu_mlme.mtx); 295 tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
285 296
286 /* 297 /*
287 * While we're asking the driver about the aggregation, 298 * While we're asking the driver about the aggregation,
@@ -310,7 +321,7 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
310 " tid %d\n", tid); 321 " tid %d\n", tid);
311#endif 322#endif
312 spin_lock_bh(&sta->lock); 323 spin_lock_bh(&sta->lock);
313 rcu_assign_pointer(sta->ampdu_mlme.tid_tx[tid], NULL); 324 ieee80211_assign_tid_tx(sta, tid, NULL);
314 spin_unlock_bh(&sta->lock); 325 spin_unlock_bh(&sta->lock);
315 326
316 ieee80211_wake_queue_agg(local, tid); 327 ieee80211_wake_queue_agg(local, tid);
@@ -388,9 +399,9 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
388 goto err_unlock_sta; 399 goto err_unlock_sta;
389 } 400 }
390 401
391 tid_tx = sta->ampdu_mlme.tid_tx[tid]; 402 tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
392 /* check if the TID is not in aggregation flow already */ 403 /* check if the TID is not in aggregation flow already */
393 if (tid_tx) { 404 if (tid_tx || sta->ampdu_mlme.tid_start_tx[tid]) {
394#ifdef CONFIG_MAC80211_HT_DEBUG 405#ifdef CONFIG_MAC80211_HT_DEBUG
395 printk(KERN_DEBUG "BA request denied - session is not " 406 printk(KERN_DEBUG "BA request denied - session is not "
396 "idle on tid %u\n", tid); 407 "idle on tid %u\n", tid);
@@ -425,8 +436,11 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
425 sta->ampdu_mlme.dialog_token_allocator++; 436 sta->ampdu_mlme.dialog_token_allocator++;
426 tid_tx->dialog_token = sta->ampdu_mlme.dialog_token_allocator; 437 tid_tx->dialog_token = sta->ampdu_mlme.dialog_token_allocator;
427 438
428 /* finally, assign it to the array */ 439 /*
429 rcu_assign_pointer(sta->ampdu_mlme.tid_tx[tid], tid_tx); 440 * Finally, assign it to the start array; the work item will
441 * collect it and move it to the normal array.
442 */
443 sta->ampdu_mlme.tid_start_tx[tid] = tid_tx;
430 444
431 ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work); 445 ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);
432 446
@@ -472,16 +486,19 @@ ieee80211_agg_splice_finish(struct ieee80211_local *local, u16 tid)
472static void ieee80211_agg_tx_operational(struct ieee80211_local *local, 486static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
473 struct sta_info *sta, u16 tid) 487 struct sta_info *sta, u16 tid)
474{ 488{
489 struct tid_ampdu_tx *tid_tx;
490
475 lockdep_assert_held(&sta->ampdu_mlme.mtx); 491 lockdep_assert_held(&sta->ampdu_mlme.mtx);
476 492
493 tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
494
477#ifdef CONFIG_MAC80211_HT_DEBUG 495#ifdef CONFIG_MAC80211_HT_DEBUG
478 printk(KERN_DEBUG "Aggregation is on for tid %d\n", tid); 496 printk(KERN_DEBUG "Aggregation is on for tid %d\n", tid);
479#endif 497#endif
480 498
481 drv_ampdu_action(local, sta->sdata, 499 drv_ampdu_action(local, sta->sdata,
482 IEEE80211_AMPDU_TX_OPERATIONAL, 500 IEEE80211_AMPDU_TX_OPERATIONAL,
483 &sta->sta, tid, NULL, 501 &sta->sta, tid, NULL, tid_tx->buf_size);
484 sta->ampdu_mlme.tid_tx[tid]->buf_size);
485 502
486 /* 503 /*
487 * synchronize with TX path, while splicing the TX path 504 * synchronize with TX path, while splicing the TX path
@@ -489,13 +506,13 @@ static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
489 */ 506 */
490 spin_lock_bh(&sta->lock); 507 spin_lock_bh(&sta->lock);
491 508
492 ieee80211_agg_splice_packets(local, sta->ampdu_mlme.tid_tx[tid], tid); 509 ieee80211_agg_splice_packets(local, tid_tx, tid);
493 /* 510 /*
494 * Now mark as operational. This will be visible 511 * Now mark as operational. This will be visible
495 * in the TX path, and lets it go lock-free in 512 * in the TX path, and lets it go lock-free in
496 * the common case. 513 * the common case.
497 */ 514 */
498 set_bit(HT_AGG_STATE_OPERATIONAL, &sta->ampdu_mlme.tid_tx[tid]->state); 515 set_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state);
499 ieee80211_agg_splice_finish(local, tid); 516 ieee80211_agg_splice_finish(local, tid);
500 517
501 spin_unlock_bh(&sta->lock); 518 spin_unlock_bh(&sta->lock);
@@ -529,7 +546,7 @@ void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid)
529 } 546 }
530 547
531 mutex_lock(&sta->ampdu_mlme.mtx); 548 mutex_lock(&sta->ampdu_mlme.mtx);
532 tid_tx = sta->ampdu_mlme.tid_tx[tid]; 549 tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
533 550
534 if (WARN_ON(!tid_tx)) { 551 if (WARN_ON(!tid_tx)) {
535#ifdef CONFIG_MAC80211_HT_DEBUG 552#ifdef CONFIG_MAC80211_HT_DEBUG
@@ -607,7 +624,7 @@ int ieee80211_stop_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
607 return -EINVAL; 624 return -EINVAL;
608 625
609 spin_lock_bh(&sta->lock); 626 spin_lock_bh(&sta->lock);
610 tid_tx = sta->ampdu_mlme.tid_tx[tid]; 627 tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
611 628
612 if (!tid_tx) { 629 if (!tid_tx) {
613 ret = -ENOENT; 630 ret = -ENOENT;
@@ -663,7 +680,7 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid)
663 680
664 mutex_lock(&sta->ampdu_mlme.mtx); 681 mutex_lock(&sta->ampdu_mlme.mtx);
665 spin_lock_bh(&sta->lock); 682 spin_lock_bh(&sta->lock);
666 tid_tx = sta->ampdu_mlme.tid_tx[tid]; 683 tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
667 684
668 if (!tid_tx || !test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) { 685 if (!tid_tx || !test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
669#ifdef CONFIG_MAC80211_HT_DEBUG 686#ifdef CONFIG_MAC80211_HT_DEBUG
@@ -689,7 +706,7 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid)
689 ieee80211_agg_splice_packets(local, tid_tx, tid); 706 ieee80211_agg_splice_packets(local, tid_tx, tid);
690 707
691 /* future packets must not find the tid_tx struct any more */ 708 /* future packets must not find the tid_tx struct any more */
692 rcu_assign_pointer(sta->ampdu_mlme.tid_tx[tid], NULL); 709 ieee80211_assign_tid_tx(sta, tid, NULL);
693 710
694 ieee80211_agg_splice_finish(local, tid); 711 ieee80211_agg_splice_finish(local, tid);
695 712
@@ -744,7 +761,7 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
744 761
745 mutex_lock(&sta->ampdu_mlme.mtx); 762 mutex_lock(&sta->ampdu_mlme.mtx);
746 763
747 tid_tx = sta->ampdu_mlme.tid_tx[tid]; 764 tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
748 if (!tid_tx) 765 if (!tid_tx)
749 goto out; 766 goto out;
750 767
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 44049733c4ea..be70c70d3f5b 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -136,7 +136,10 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev,
136 mutex_lock(&sdata->local->sta_mtx); 136 mutex_lock(&sdata->local->sta_mtx);
137 137
138 if (mac_addr) { 138 if (mac_addr) {
139 sta = sta_info_get_bss(sdata, mac_addr); 139 if (ieee80211_vif_is_mesh(&sdata->vif))
140 sta = sta_info_get(sdata, mac_addr);
141 else
142 sta = sta_info_get_bss(sdata, mac_addr);
140 if (!sta) { 143 if (!sta) {
141 ieee80211_key_free(sdata->local, key); 144 ieee80211_key_free(sdata->local, key);
142 err = -ENOENT; 145 err = -ENOENT;
@@ -157,13 +160,14 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev,
157static int ieee80211_del_key(struct wiphy *wiphy, struct net_device *dev, 160static int ieee80211_del_key(struct wiphy *wiphy, struct net_device *dev,
158 u8 key_idx, bool pairwise, const u8 *mac_addr) 161 u8 key_idx, bool pairwise, const u8 *mac_addr)
159{ 162{
160 struct ieee80211_sub_if_data *sdata; 163 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
164 struct ieee80211_local *local = sdata->local;
161 struct sta_info *sta; 165 struct sta_info *sta;
166 struct ieee80211_key *key = NULL;
162 int ret; 167 int ret;
163 168
164 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 169 mutex_lock(&local->sta_mtx);
165 170 mutex_lock(&local->key_mtx);
166 mutex_lock(&sdata->local->sta_mtx);
167 171
168 if (mac_addr) { 172 if (mac_addr) {
169 ret = -ENOENT; 173 ret = -ENOENT;
@@ -172,33 +176,24 @@ static int ieee80211_del_key(struct wiphy *wiphy, struct net_device *dev,
172 if (!sta) 176 if (!sta)
173 goto out_unlock; 177 goto out_unlock;
174 178
175 if (pairwise) { 179 if (pairwise)
176 if (sta->ptk) { 180 key = key_mtx_dereference(local, sta->ptk);
177 ieee80211_key_free(sdata->local, sta->ptk); 181 else
178 ret = 0; 182 key = key_mtx_dereference(local, sta->gtk[key_idx]);
179 } 183 } else
180 } else { 184 key = key_mtx_dereference(local, sdata->keys[key_idx]);
181 if (sta->gtk[key_idx]) {
182 ieee80211_key_free(sdata->local,
183 sta->gtk[key_idx]);
184 ret = 0;
185 }
186 }
187
188 goto out_unlock;
189 }
190 185
191 if (!sdata->keys[key_idx]) { 186 if (!key) {
192 ret = -ENOENT; 187 ret = -ENOENT;
193 goto out_unlock; 188 goto out_unlock;
194 } 189 }
195 190
196 ieee80211_key_free(sdata->local, sdata->keys[key_idx]); 191 __ieee80211_key_free(key);
197 WARN_ON(sdata->keys[key_idx]);
198 192
199 ret = 0; 193 ret = 0;
200 out_unlock: 194 out_unlock:
201 mutex_unlock(&sdata->local->sta_mtx); 195 mutex_unlock(&local->key_mtx);
196 mutex_unlock(&local->sta_mtx);
202 197
203 return ret; 198 return ret;
204} 199}
@@ -228,11 +223,11 @@ static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev,
228 goto out; 223 goto out;
229 224
230 if (pairwise) 225 if (pairwise)
231 key = sta->ptk; 226 key = rcu_dereference(sta->ptk);
232 else if (key_idx < NUM_DEFAULT_KEYS) 227 else if (key_idx < NUM_DEFAULT_KEYS)
233 key = sta->gtk[key_idx]; 228 key = rcu_dereference(sta->gtk[key_idx]);
234 } else 229 } else
235 key = sdata->keys[key_idx]; 230 key = rcu_dereference(sdata->keys[key_idx]);
236 231
237 if (!key) 232 if (!key)
238 goto out; 233 goto out;
@@ -330,6 +325,7 @@ static void rate_idx_to_bitrate(struct rate_info *rate, struct sta_info *sta, in
330static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo) 325static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
331{ 326{
332 struct ieee80211_sub_if_data *sdata = sta->sdata; 327 struct ieee80211_sub_if_data *sdata = sta->sdata;
328 struct timespec uptime;
333 329
334 sinfo->generation = sdata->local->sta_generation; 330 sinfo->generation = sdata->local->sta_generation;
335 331
@@ -342,7 +338,12 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
342 STATION_INFO_TX_FAILED | 338 STATION_INFO_TX_FAILED |
343 STATION_INFO_TX_BITRATE | 339 STATION_INFO_TX_BITRATE |
344 STATION_INFO_RX_BITRATE | 340 STATION_INFO_RX_BITRATE |
345 STATION_INFO_RX_DROP_MISC; 341 STATION_INFO_RX_DROP_MISC |
342 STATION_INFO_BSS_PARAM |
343 STATION_INFO_CONNECTED_TIME;
344
345 do_posix_clock_monotonic_gettime(&uptime);
346 sinfo->connected_time = uptime.tv_sec - sta->last_connected;
346 347
347 sinfo->inactive_time = jiffies_to_msecs(jiffies - sta->last_rx); 348 sinfo->inactive_time = jiffies_to_msecs(jiffies - sta->last_rx);
348 sinfo->rx_bytes = sta->rx_bytes; 349 sinfo->rx_bytes = sta->rx_bytes;
@@ -389,6 +390,16 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
389 sinfo->plink_state = sta->plink_state; 390 sinfo->plink_state = sta->plink_state;
390#endif 391#endif
391 } 392 }
393
394 sinfo->bss_param.flags = 0;
395 if (sdata->vif.bss_conf.use_cts_prot)
396 sinfo->bss_param.flags |= BSS_PARAM_FLAGS_CTS_PROT;
397 if (sdata->vif.bss_conf.use_short_preamble)
398 sinfo->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_PREAMBLE;
399 if (sdata->vif.bss_conf.use_short_slot)
400 sinfo->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_SLOT_TIME;
401 sinfo->bss_param.dtim_period = sdata->local->hw.conf.ps_dtim_period;
402 sinfo->bss_param.beacon_interval = sdata->vif.bss_conf.beacon_int;
392} 403}
393 404
394 405
@@ -452,7 +463,7 @@ static int ieee80211_config_beacon(struct ieee80211_sub_if_data *sdata,
452 int size; 463 int size;
453 int err = -EINVAL; 464 int err = -EINVAL;
454 465
455 old = sdata->u.ap.beacon; 466 old = rtnl_dereference(sdata->u.ap.beacon);
456 467
457 /* head must not be zero-length */ 468 /* head must not be zero-length */
458 if (params->head && !params->head_len) 469 if (params->head && !params->head_len)
@@ -547,8 +558,7 @@ static int ieee80211_add_beacon(struct wiphy *wiphy, struct net_device *dev,
547 558
548 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 559 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
549 560
550 old = sdata->u.ap.beacon; 561 old = rtnl_dereference(sdata->u.ap.beacon);
551
552 if (old) 562 if (old)
553 return -EALREADY; 563 return -EALREADY;
554 564
@@ -563,8 +573,7 @@ static int ieee80211_set_beacon(struct wiphy *wiphy, struct net_device *dev,
563 573
564 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 574 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
565 575
566 old = sdata->u.ap.beacon; 576 old = rtnl_dereference(sdata->u.ap.beacon);
567
568 if (!old) 577 if (!old)
569 return -ENOENT; 578 return -ENOENT;
570 579
@@ -578,8 +587,7 @@ static int ieee80211_del_beacon(struct wiphy *wiphy, struct net_device *dev)
578 587
579 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 588 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
580 589
581 old = sdata->u.ap.beacon; 590 old = rtnl_dereference(sdata->u.ap.beacon);
582
583 if (!old) 591 if (!old)
584 return -ENOENT; 592 return -ENOENT;
585 593
@@ -675,6 +683,12 @@ static void sta_apply_parameters(struct ieee80211_local *local,
675 if (set & BIT(NL80211_STA_FLAG_MFP)) 683 if (set & BIT(NL80211_STA_FLAG_MFP))
676 sta->flags |= WLAN_STA_MFP; 684 sta->flags |= WLAN_STA_MFP;
677 } 685 }
686
687 if (mask & BIT(NL80211_STA_FLAG_AUTHENTICATED)) {
688 sta->flags &= ~WLAN_STA_AUTH;
689 if (set & BIT(NL80211_STA_FLAG_AUTHENTICATED))
690 sta->flags |= WLAN_STA_AUTH;
691 }
678 spin_unlock_irqrestore(&sta->flaglock, flags); 692 spin_unlock_irqrestore(&sta->flaglock, flags);
679 693
680 /* 694 /*
@@ -712,15 +726,29 @@ static void sta_apply_parameters(struct ieee80211_local *local,
712 params->ht_capa, 726 params->ht_capa,
713 &sta->sta.ht_cap); 727 &sta->sta.ht_cap);
714 728
715 if (ieee80211_vif_is_mesh(&sdata->vif) && params->plink_action) { 729 if (ieee80211_vif_is_mesh(&sdata->vif)) {
716 switch (params->plink_action) { 730#ifdef CONFIG_MAC80211_MESH
717 case PLINK_ACTION_OPEN: 731 if (sdata->u.mesh.security & IEEE80211_MESH_SEC_SECURED)
718 mesh_plink_open(sta); 732 switch (params->plink_state) {
719 break; 733 case NL80211_PLINK_LISTEN:
720 case PLINK_ACTION_BLOCK: 734 case NL80211_PLINK_ESTAB:
721 mesh_plink_block(sta); 735 case NL80211_PLINK_BLOCKED:
722 break; 736 sta->plink_state = params->plink_state;
723 } 737 break;
738 default:
739 /* nothing */
740 break;
741 }
742 else
743 switch (params->plink_action) {
744 case PLINK_ACTION_OPEN:
745 mesh_plink_open(sta);
746 break;
747 case PLINK_ACTION_BLOCK:
748 mesh_plink_block(sta);
749 break;
750 }
751#endif
724 } 752 }
725} 753}
726 754
@@ -921,8 +949,10 @@ static int ieee80211_change_mpath(struct wiphy *wiphy,
921static void mpath_set_pinfo(struct mesh_path *mpath, u8 *next_hop, 949static void mpath_set_pinfo(struct mesh_path *mpath, u8 *next_hop,
922 struct mpath_info *pinfo) 950 struct mpath_info *pinfo)
923{ 951{
924 if (mpath->next_hop) 952 struct sta_info *next_hop_sta = rcu_dereference(mpath->next_hop);
925 memcpy(next_hop, mpath->next_hop->sta.addr, ETH_ALEN); 953
954 if (next_hop_sta)
955 memcpy(next_hop, next_hop_sta->sta.addr, ETH_ALEN);
926 else 956 else
927 memset(next_hop, 0, ETH_ALEN); 957 memset(next_hop, 0, ETH_ALEN);
928 958
@@ -1023,26 +1053,30 @@ static int copy_mesh_setup(struct ieee80211_if_mesh *ifmsh,
1023 u8 *new_ie; 1053 u8 *new_ie;
1024 const u8 *old_ie; 1054 const u8 *old_ie;
1025 1055
1026 /* first allocate the new vendor information element */ 1056 /* allocate information elements */
1027 new_ie = NULL; 1057 new_ie = NULL;
1028 old_ie = ifmsh->vendor_ie; 1058 old_ie = ifmsh->ie;
1029 1059
1030 ifmsh->vendor_ie_len = setup->vendor_ie_len; 1060 if (setup->ie_len) {
1031 if (setup->vendor_ie_len) { 1061 new_ie = kmemdup(setup->ie, setup->ie_len,
1032 new_ie = kmemdup(setup->vendor_ie, setup->vendor_ie_len,
1033 GFP_KERNEL); 1062 GFP_KERNEL);
1034 if (!new_ie) 1063 if (!new_ie)
1035 return -ENOMEM; 1064 return -ENOMEM;
1036 } 1065 }
1066 ifmsh->ie_len = setup->ie_len;
1067 ifmsh->ie = new_ie;
1068 kfree(old_ie);
1037 1069
1038 /* now copy the rest of the setup parameters */ 1070 /* now copy the rest of the setup parameters */
1039 ifmsh->mesh_id_len = setup->mesh_id_len; 1071 ifmsh->mesh_id_len = setup->mesh_id_len;
1040 memcpy(ifmsh->mesh_id, setup->mesh_id, ifmsh->mesh_id_len); 1072 memcpy(ifmsh->mesh_id, setup->mesh_id, ifmsh->mesh_id_len);
1041 ifmsh->mesh_pp_id = setup->path_sel_proto; 1073 ifmsh->mesh_pp_id = setup->path_sel_proto;
1042 ifmsh->mesh_pm_id = setup->path_metric; 1074 ifmsh->mesh_pm_id = setup->path_metric;
1043 ifmsh->vendor_ie = new_ie; 1075 ifmsh->security = IEEE80211_MESH_SEC_NONE;
1044 1076 if (setup->is_authenticated)
1045 kfree(old_ie); 1077 ifmsh->security |= IEEE80211_MESH_SEC_AUTHED;
1078 if (setup->is_secure)
1079 ifmsh->security |= IEEE80211_MESH_SEC_SECURED;
1046 1080
1047 return 0; 1081 return 0;
1048} 1082}
@@ -1275,9 +1309,10 @@ static int ieee80211_set_channel(struct wiphy *wiphy,
1275} 1309}
1276 1310
1277#ifdef CONFIG_PM 1311#ifdef CONFIG_PM
1278static int ieee80211_suspend(struct wiphy *wiphy) 1312static int ieee80211_suspend(struct wiphy *wiphy,
1313 struct cfg80211_wowlan *wowlan)
1279{ 1314{
1280 return __ieee80211_suspend(wiphy_priv(wiphy)); 1315 return __ieee80211_suspend(wiphy_priv(wiphy), wowlan);
1281} 1316}
1282 1317
1283static int ieee80211_resume(struct wiphy *wiphy) 1318static int ieee80211_resume(struct wiphy *wiphy)
@@ -1320,6 +1355,30 @@ static int ieee80211_scan(struct wiphy *wiphy,
1320 return ieee80211_request_scan(sdata, req); 1355 return ieee80211_request_scan(sdata, req);
1321} 1356}
1322 1357
1358static int
1359ieee80211_sched_scan_start(struct wiphy *wiphy,
1360 struct net_device *dev,
1361 struct cfg80211_sched_scan_request *req)
1362{
1363 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1364
1365 if (!sdata->local->ops->sched_scan_start)
1366 return -EOPNOTSUPP;
1367
1368 return ieee80211_request_sched_scan_start(sdata, req);
1369}
1370
1371static int
1372ieee80211_sched_scan_stop(struct wiphy *wiphy, struct net_device *dev)
1373{
1374 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1375
1376 if (!sdata->local->ops->sched_scan_stop)
1377 return -EOPNOTSUPP;
1378
1379 return ieee80211_request_sched_scan_stop(sdata);
1380}
1381
1323static int ieee80211_auth(struct wiphy *wiphy, struct net_device *dev, 1382static int ieee80211_auth(struct wiphy *wiphy, struct net_device *dev,
1324 struct cfg80211_auth_request *req) 1383 struct cfg80211_auth_request *req)
1325{ 1384{
@@ -1611,16 +1670,13 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
1611{ 1670{
1612 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 1671 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1613 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 1672 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
1614 int i; 1673 int i, ret;
1615
1616 /*
1617 * This _could_ be supported by providing a hook for
1618 * drivers for this function, but at this point it
1619 * doesn't seem worth bothering.
1620 */
1621 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL)
1622 return -EOPNOTSUPP;
1623 1674
1675 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
1676 ret = drv_set_bitrate_mask(local, sdata, mask);
1677 if (ret)
1678 return ret;
1679 }
1624 1680
1625 for (i = 0; i < IEEE80211_NUM_BANDS; i++) 1681 for (i = 0; i < IEEE80211_NUM_BANDS; i++)
1626 sdata->rc_rateidx_mask[i] = mask->control[i].legacy; 1682 sdata->rc_rateidx_mask[i] = mask->control[i].legacy;
@@ -2064,6 +2120,8 @@ struct cfg80211_ops mac80211_config_ops = {
2064 .suspend = ieee80211_suspend, 2120 .suspend = ieee80211_suspend,
2065 .resume = ieee80211_resume, 2121 .resume = ieee80211_resume,
2066 .scan = ieee80211_scan, 2122 .scan = ieee80211_scan,
2123 .sched_scan_start = ieee80211_sched_scan_start,
2124 .sched_scan_stop = ieee80211_sched_scan_stop,
2067 .auth = ieee80211_auth, 2125 .auth = ieee80211_auth,
2068 .assoc = ieee80211_assoc, 2126 .assoc = ieee80211_assoc,
2069 .deauth = ieee80211_deauth, 2127 .deauth = ieee80211_deauth,
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index 51f0d780dafa..186e02f7cc32 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -37,7 +37,7 @@ int mac80211_format_buffer(char __user *userbuf, size_t count,
37 return simple_read_from_buffer(userbuf, count, ppos, buf, res); 37 return simple_read_from_buffer(userbuf, count, ppos, buf, res);
38} 38}
39 39
40#define DEBUGFS_READONLY_FILE(name, fmt, value...) \ 40#define DEBUGFS_READONLY_FILE_FN(name, fmt, value...) \
41static ssize_t name## _read(struct file *file, char __user *userbuf, \ 41static ssize_t name## _read(struct file *file, char __user *userbuf, \
42 size_t count, loff_t *ppos) \ 42 size_t count, loff_t *ppos) \
43{ \ 43{ \
@@ -45,14 +45,19 @@ static ssize_t name## _read(struct file *file, char __user *userbuf, \
45 \ 45 \
46 return mac80211_format_buffer(userbuf, count, ppos, \ 46 return mac80211_format_buffer(userbuf, count, ppos, \
47 fmt "\n", ##value); \ 47 fmt "\n", ##value); \
48} \ 48}
49 \ 49
50#define DEBUGFS_READONLY_FILE_OPS(name) \
50static const struct file_operations name## _ops = { \ 51static const struct file_operations name## _ops = { \
51 .read = name## _read, \ 52 .read = name## _read, \
52 .open = mac80211_open_file_generic, \ 53 .open = mac80211_open_file_generic, \
53 .llseek = generic_file_llseek, \ 54 .llseek = generic_file_llseek, \
54}; 55};
55 56
57#define DEBUGFS_READONLY_FILE(name, fmt, value...) \
58 DEBUGFS_READONLY_FILE_FN(name, fmt, value) \
59 DEBUGFS_READONLY_FILE_OPS(name)
60
56#define DEBUGFS_ADD(name) \ 61#define DEBUGFS_ADD(name) \
57 debugfs_create_file(#name, 0400, phyd, local, &name## _ops); 62 debugfs_create_file(#name, 0400, phyd, local, &name## _ops);
58 63
@@ -130,7 +135,7 @@ static ssize_t reset_write(struct file *file, const char __user *user_buf,
130 struct ieee80211_local *local = file->private_data; 135 struct ieee80211_local *local = file->private_data;
131 136
132 rtnl_lock(); 137 rtnl_lock();
133 __ieee80211_suspend(&local->hw); 138 __ieee80211_suspend(&local->hw, NULL);
134 __ieee80211_resume(&local->hw); 139 __ieee80211_resume(&local->hw);
135 rtnl_unlock(); 140 rtnl_unlock();
136 141
@@ -291,11 +296,70 @@ static ssize_t channel_type_read(struct file *file, char __user *user_buf,
291 return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf)); 296 return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
292} 297}
293 298
294static const struct file_operations channel_type_ops = { 299static ssize_t hwflags_read(struct file *file, char __user *user_buf,
295 .read = channel_type_read, 300 size_t count, loff_t *ppos)
296 .open = mac80211_open_file_generic, 301{
297 .llseek = default_llseek, 302 struct ieee80211_local *local = file->private_data;
298}; 303 int mxln = 500;
304 ssize_t rv;
305 char *buf = kzalloc(mxln, GFP_KERNEL);
306 int sf = 0; /* how many written so far */
307
308 sf += snprintf(buf, mxln - sf, "0x%x\n", local->hw.flags);
309 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL)
310 sf += snprintf(buf + sf, mxln - sf, "HAS_RATE_CONTROL\n");
311 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
312 sf += snprintf(buf + sf, mxln - sf, "RX_INCLUDES_FCS\n");
313 if (local->hw.flags & IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING)
314 sf += snprintf(buf + sf, mxln - sf,
315 "HOST_BCAST_PS_BUFFERING\n");
316 if (local->hw.flags & IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE)
317 sf += snprintf(buf + sf, mxln - sf,
318 "2GHZ_SHORT_SLOT_INCAPABLE\n");
319 if (local->hw.flags & IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE)
320 sf += snprintf(buf + sf, mxln - sf,
321 "2GHZ_SHORT_PREAMBLE_INCAPABLE\n");
322 if (local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC)
323 sf += snprintf(buf + sf, mxln - sf, "SIGNAL_UNSPEC\n");
324 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
325 sf += snprintf(buf + sf, mxln - sf, "SIGNAL_DBM\n");
326 if (local->hw.flags & IEEE80211_HW_NEED_DTIM_PERIOD)
327 sf += snprintf(buf + sf, mxln - sf, "NEED_DTIM_PERIOD\n");
328 if (local->hw.flags & IEEE80211_HW_SPECTRUM_MGMT)
329 sf += snprintf(buf + sf, mxln - sf, "SPECTRUM_MGMT\n");
330 if (local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION)
331 sf += snprintf(buf + sf, mxln - sf, "AMPDU_AGGREGATION\n");
332 if (local->hw.flags & IEEE80211_HW_SUPPORTS_PS)
333 sf += snprintf(buf + sf, mxln - sf, "SUPPORTS_PS\n");
334 if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)
335 sf += snprintf(buf + sf, mxln - sf, "PS_NULLFUNC_STACK\n");
336 if (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS)
337 sf += snprintf(buf + sf, mxln - sf, "SUPPORTS_DYNAMIC_PS\n");
338 if (local->hw.flags & IEEE80211_HW_MFP_CAPABLE)
339 sf += snprintf(buf + sf, mxln - sf, "MFP_CAPABLE\n");
340 if (local->hw.flags & IEEE80211_HW_BEACON_FILTER)
341 sf += snprintf(buf + sf, mxln - sf, "BEACON_FILTER\n");
342 if (local->hw.flags & IEEE80211_HW_SUPPORTS_STATIC_SMPS)
343 sf += snprintf(buf + sf, mxln - sf, "SUPPORTS_STATIC_SMPS\n");
344 if (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS)
345 sf += snprintf(buf + sf, mxln - sf, "SUPPORTS_DYNAMIC_SMPS\n");
346 if (local->hw.flags & IEEE80211_HW_SUPPORTS_UAPSD)
347 sf += snprintf(buf + sf, mxln - sf, "SUPPORTS_UAPSD\n");
348 if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)
349 sf += snprintf(buf + sf, mxln - sf, "REPORTS_TX_ACK_STATUS\n");
350 if (local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR)
351 sf += snprintf(buf + sf, mxln - sf, "CONNECTION_MONITOR\n");
352 if (local->hw.flags & IEEE80211_HW_SUPPORTS_CQM_RSSI)
353 sf += snprintf(buf + sf, mxln - sf, "SUPPORTS_CQM_RSSI\n");
354 if (local->hw.flags & IEEE80211_HW_SUPPORTS_PER_STA_GTK)
355 sf += snprintf(buf + sf, mxln - sf, "SUPPORTS_PER_STA_GTK\n");
356 if (local->hw.flags & IEEE80211_HW_AP_LINK_PS)
357 sf += snprintf(buf + sf, mxln - sf, "AP_LINK_PS\n");
358
359 rv = simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
360 kfree(buf);
361 return rv;
362}
299 363
300static ssize_t queues_read(struct file *file, char __user *user_buf, 364static ssize_t queues_read(struct file *file, char __user *user_buf,
301 size_t count, loff_t *ppos) 365 size_t count, loff_t *ppos)
@@ -315,11 +379,9 @@ static ssize_t queues_read(struct file *file, char __user *user_buf,
315 return simple_read_from_buffer(user_buf, count, ppos, buf, res); 379 return simple_read_from_buffer(user_buf, count, ppos, buf, res);
316} 380}
317 381
318static const struct file_operations queues_ops = { 382DEBUGFS_READONLY_FILE_OPS(hwflags);
319 .read = queues_read, 383DEBUGFS_READONLY_FILE_OPS(channel_type);
320 .open = mac80211_open_file_generic, 384DEBUGFS_READONLY_FILE_OPS(queues);
321 .llseek = default_llseek,
322};
323 385
324/* statistics stuff */ 386/* statistics stuff */
325 387
@@ -395,6 +457,7 @@ void debugfs_hw_add(struct ieee80211_local *local)
395 DEBUGFS_ADD(uapsd_queues); 457 DEBUGFS_ADD(uapsd_queues);
396 DEBUGFS_ADD(uapsd_max_sp_len); 458 DEBUGFS_ADD(uapsd_max_sp_len);
397 DEBUGFS_ADD(channel_type); 459 DEBUGFS_ADD(channel_type);
460 DEBUGFS_ADD(hwflags);
398 DEBUGFS_ADD(user_power); 461 DEBUGFS_ADD(user_power);
399 DEBUGFS_ADD(power); 462 DEBUGFS_ADD(power);
400 463
diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c
index f7ef3477c24a..33c58b85c911 100644
--- a/net/mac80211/debugfs_key.c
+++ b/net/mac80211/debugfs_key.c
@@ -241,16 +241,12 @@ void ieee80211_debugfs_key_add(struct ieee80211_key *key)
241 if (!key->debugfs.dir) 241 if (!key->debugfs.dir)
242 return; 242 return;
243 243
244 rcu_read_lock(); 244 sta = key->sta;
245 sta = rcu_dereference(key->sta); 245 if (sta) {
246 if (sta)
247 sprintf(buf, "../../stations/%pM", sta->sta.addr); 246 sprintf(buf, "../../stations/%pM", sta->sta.addr);
248 rcu_read_unlock();
249
250 /* using sta as a boolean is fine outside RCU lock */
251 if (sta)
252 key->debugfs.stalink = 247 key->debugfs.stalink =
253 debugfs_create_symlink("station", key->debugfs.dir, buf); 248 debugfs_create_symlink("station", key->debugfs.dir, buf);
249 }
254 250
255 DEBUGFS_ADD(keylen); 251 DEBUGFS_ADD(keylen);
256 DEBUGFS_ADD(flags); 252 DEBUGFS_ADD(flags);
@@ -286,7 +282,8 @@ void ieee80211_debugfs_key_update_default(struct ieee80211_sub_if_data *sdata)
286 lockdep_assert_held(&sdata->local->key_mtx); 282 lockdep_assert_held(&sdata->local->key_mtx);
287 283
288 if (sdata->default_unicast_key) { 284 if (sdata->default_unicast_key) {
289 key = sdata->default_unicast_key; 285 key = key_mtx_dereference(sdata->local,
286 sdata->default_unicast_key);
290 sprintf(buf, "../keys/%d", key->debugfs.cnt); 287 sprintf(buf, "../keys/%d", key->debugfs.cnt);
291 sdata->debugfs.default_unicast_key = 288 sdata->debugfs.default_unicast_key =
292 debugfs_create_symlink("default_unicast_key", 289 debugfs_create_symlink("default_unicast_key",
@@ -297,7 +294,8 @@ void ieee80211_debugfs_key_update_default(struct ieee80211_sub_if_data *sdata)
297 } 294 }
298 295
299 if (sdata->default_multicast_key) { 296 if (sdata->default_multicast_key) {
300 key = sdata->default_multicast_key; 297 key = key_mtx_dereference(sdata->local,
298 sdata->default_multicast_key);
301 sprintf(buf, "../keys/%d", key->debugfs.cnt); 299 sprintf(buf, "../keys/%d", key->debugfs.cnt);
302 sdata->debugfs.default_multicast_key = 300 sdata->debugfs.default_multicast_key =
303 debugfs_create_symlink("default_multicast_key", 301 debugfs_create_symlink("default_multicast_key",
@@ -316,9 +314,8 @@ void ieee80211_debugfs_key_add_mgmt_default(struct ieee80211_sub_if_data *sdata)
316 if (!sdata->debugfs.dir) 314 if (!sdata->debugfs.dir)
317 return; 315 return;
318 316
319 /* this is running under the key lock */ 317 key = key_mtx_dereference(sdata->local,
320 318 sdata->default_mgmt_key);
321 key = sdata->default_mgmt_key;
322 if (key) { 319 if (key) {
323 sprintf(buf, "../keys/%d", key->debugfs.cnt); 320 sprintf(buf, "../keys/%d", key->debugfs.cnt);
324 sdata->debugfs.default_mgmt_key = 321 sdata->debugfs.default_mgmt_key =
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index c04a1396cf8d..a01d2137fddc 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -92,6 +92,31 @@ static ssize_t sta_inactive_ms_read(struct file *file, char __user *userbuf,
92} 92}
93STA_OPS(inactive_ms); 93STA_OPS(inactive_ms);
94 94
95
96static ssize_t sta_connected_time_read(struct file *file, char __user *userbuf,
97 size_t count, loff_t *ppos)
98{
99 struct sta_info *sta = file->private_data;
100 struct timespec uptime;
101 struct tm result;
102 long connected_time_secs;
103 char buf[100];
104 int res;
105 do_posix_clock_monotonic_gettime(&uptime);
106 connected_time_secs = uptime.tv_sec - sta->last_connected;
107 time_to_tm(connected_time_secs, 0, &result);
108 result.tm_year -= 70;
109 result.tm_mday -= 1;
110 res = scnprintf(buf, sizeof(buf),
111 "years - %ld\nmonths - %d\ndays - %d\nclock - %d:%d:%d\n\n",
112 result.tm_year, result.tm_mon, result.tm_mday,
113 result.tm_hour, result.tm_min, result.tm_sec);
114 return simple_read_from_buffer(userbuf, count, ppos, buf, res);
115}
116STA_OPS(connected_time);
117
118
119
95static ssize_t sta_last_seq_ctrl_read(struct file *file, char __user *userbuf, 120static ssize_t sta_last_seq_ctrl_read(struct file *file, char __user *userbuf,
96 size_t count, loff_t *ppos) 121 size_t count, loff_t *ppos)
97{ 122{
@@ -324,6 +349,7 @@ void ieee80211_sta_debugfs_add(struct sta_info *sta)
324 DEBUGFS_ADD(flags); 349 DEBUGFS_ADD(flags);
325 DEBUGFS_ADD(num_ps_buf_frames); 350 DEBUGFS_ADD(num_ps_buf_frames);
326 DEBUGFS_ADD(inactive_ms); 351 DEBUGFS_ADD(inactive_ms);
352 DEBUGFS_ADD(connected_time);
327 DEBUGFS_ADD(last_seq_ctrl); 353 DEBUGFS_ADD(last_seq_ctrl);
328 DEBUGFS_ADD(agg_status); 354 DEBUGFS_ADD(agg_status);
329 DEBUGFS_ADD(dev); 355 DEBUGFS_ADD(dev);
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 9c0d62bb0ea3..eebf7a67daf7 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -41,6 +41,33 @@ static inline void drv_stop(struct ieee80211_local *local)
41 local->started = false; 41 local->started = false;
42} 42}
43 43
44#ifdef CONFIG_PM
45static inline int drv_suspend(struct ieee80211_local *local,
46 struct cfg80211_wowlan *wowlan)
47{
48 int ret;
49
50 might_sleep();
51
52 trace_drv_suspend(local);
53 ret = local->ops->suspend(&local->hw, wowlan);
54 trace_drv_return_int(local, ret);
55 return ret;
56}
57
58static inline int drv_resume(struct ieee80211_local *local)
59{
60 int ret;
61
62 might_sleep();
63
64 trace_drv_resume(local);
65 ret = local->ops->resume(&local->hw);
66 trace_drv_return_int(local, ret);
67 return ret;
68}
69#endif
70
44static inline int drv_add_interface(struct ieee80211_local *local, 71static inline int drv_add_interface(struct ieee80211_local *local,
45 struct ieee80211_vif *vif) 72 struct ieee80211_vif *vif)
46{ 73{
@@ -185,12 +212,39 @@ static inline int drv_hw_scan(struct ieee80211_local *local,
185 212
186 might_sleep(); 213 might_sleep();
187 214
188 trace_drv_hw_scan(local, sdata, req); 215 trace_drv_hw_scan(local, sdata);
189 ret = local->ops->hw_scan(&local->hw, &sdata->vif, req); 216 ret = local->ops->hw_scan(&local->hw, &sdata->vif, req);
190 trace_drv_return_int(local, ret); 217 trace_drv_return_int(local, ret);
191 return ret; 218 return ret;
192} 219}
193 220
221static inline int
222drv_sched_scan_start(struct ieee80211_local *local,
223 struct ieee80211_sub_if_data *sdata,
224 struct cfg80211_sched_scan_request *req,
225 struct ieee80211_sched_scan_ies *ies)
226{
227 int ret;
228
229 might_sleep();
230
231 trace_drv_sched_scan_start(local, sdata);
232 ret = local->ops->sched_scan_start(&local->hw, &sdata->vif,
233 req, ies);
234 trace_drv_return_int(local, ret);
235 return ret;
236}
237
238static inline void drv_sched_scan_stop(struct ieee80211_local *local,
239 struct ieee80211_sub_if_data *sdata)
240{
241 might_sleep();
242
243 trace_drv_sched_scan_stop(local, sdata);
244 local->ops->sched_scan_stop(&local->hw, &sdata->vif);
245 trace_drv_return_void(local);
246}
247
194static inline void drv_sw_scan_start(struct ieee80211_local *local) 248static inline void drv_sw_scan_start(struct ieee80211_local *local)
195{ 249{
196 might_sleep(); 250 might_sleep();
@@ -552,4 +606,35 @@ static inline void drv_get_ringparam(struct ieee80211_local *local,
552 trace_drv_return_void(local); 606 trace_drv_return_void(local);
553} 607}
554 608
609static inline bool drv_tx_frames_pending(struct ieee80211_local *local)
610{
611 bool ret = false;
612
613 might_sleep();
614
615 trace_drv_tx_frames_pending(local);
616 if (local->ops->tx_frames_pending)
617 ret = local->ops->tx_frames_pending(&local->hw);
618 trace_drv_return_bool(local, ret);
619
620 return ret;
621}
622
623static inline int drv_set_bitrate_mask(struct ieee80211_local *local,
624 struct ieee80211_sub_if_data *sdata,
625 const struct cfg80211_bitrate_mask *mask)
626{
627 int ret = -EOPNOTSUPP;
628
629 might_sleep();
630
631 trace_drv_set_bitrate_mask(local, sdata, mask);
632 if (local->ops->set_bitrate_mask)
633 ret = local->ops->set_bitrate_mask(&local->hw,
634 &sdata->vif, mask);
635 trace_drv_return_int(local, ret);
636
637 return ret;
638}
639
555#endif /* __MAC80211_DRIVER_OPS */ 640#endif /* __MAC80211_DRIVER_OPS */
diff --git a/net/mac80211/driver-trace.h b/net/mac80211/driver-trace.h
index 45aab80738e2..ed9edcbd9aa5 100644
--- a/net/mac80211/driver-trace.h
+++ b/net/mac80211/driver-trace.h
@@ -55,6 +55,70 @@ DECLARE_EVENT_CLASS(local_only_evt,
55 TP_printk(LOCAL_PR_FMT, LOCAL_PR_ARG) 55 TP_printk(LOCAL_PR_FMT, LOCAL_PR_ARG)
56); 56);
57 57
58DECLARE_EVENT_CLASS(local_sdata_addr_evt,
59 TP_PROTO(struct ieee80211_local *local,
60 struct ieee80211_sub_if_data *sdata),
61 TP_ARGS(local, sdata),
62
63 TP_STRUCT__entry(
64 LOCAL_ENTRY
65 VIF_ENTRY
66 __array(char, addr, 6)
67 ),
68
69 TP_fast_assign(
70 LOCAL_ASSIGN;
71 VIF_ASSIGN;
72 memcpy(__entry->addr, sdata->vif.addr, 6);
73 ),
74
75 TP_printk(
76 LOCAL_PR_FMT VIF_PR_FMT " addr:%pM",
77 LOCAL_PR_ARG, VIF_PR_ARG, __entry->addr
78 )
79);
80
81DECLARE_EVENT_CLASS(local_u32_evt,
82 TP_PROTO(struct ieee80211_local *local, u32 value),
83 TP_ARGS(local, value),
84
85 TP_STRUCT__entry(
86 LOCAL_ENTRY
87 __field(u32, value)
88 ),
89
90 TP_fast_assign(
91 LOCAL_ASSIGN;
92 __entry->value = value;
93 ),
94
95 TP_printk(
96 LOCAL_PR_FMT " value:%d",
97 LOCAL_PR_ARG, __entry->value
98 )
99);
100
101DECLARE_EVENT_CLASS(local_sdata_evt,
102 TP_PROTO(struct ieee80211_local *local,
103 struct ieee80211_sub_if_data *sdata),
104 TP_ARGS(local, sdata),
105
106 TP_STRUCT__entry(
107 LOCAL_ENTRY
108 VIF_ENTRY
109 ),
110
111 TP_fast_assign(
112 LOCAL_ASSIGN;
113 VIF_ASSIGN;
114 ),
115
116 TP_printk(
117 LOCAL_PR_FMT VIF_PR_FMT,
118 LOCAL_PR_ARG, VIF_PR_ARG
119 )
120);
121
58DEFINE_EVENT(local_only_evt, drv_return_void, 122DEFINE_EVENT(local_only_evt, drv_return_void,
59 TP_PROTO(struct ieee80211_local *local), 123 TP_PROTO(struct ieee80211_local *local),
60 TP_ARGS(local) 124 TP_ARGS(local)
@@ -74,6 +138,21 @@ TRACE_EVENT(drv_return_int,
74 TP_printk(LOCAL_PR_FMT " - %d", LOCAL_PR_ARG, __entry->ret) 138 TP_printk(LOCAL_PR_FMT " - %d", LOCAL_PR_ARG, __entry->ret)
75); 139);
76 140
141TRACE_EVENT(drv_return_bool,
142 TP_PROTO(struct ieee80211_local *local, bool ret),
143 TP_ARGS(local, ret),
144 TP_STRUCT__entry(
145 LOCAL_ENTRY
146 __field(bool, ret)
147 ),
148 TP_fast_assign(
149 LOCAL_ASSIGN;
150 __entry->ret = ret;
151 ),
152 TP_printk(LOCAL_PR_FMT " - %s", LOCAL_PR_ARG, (__entry->ret) ?
153 "true" : "false")
154);
155
77TRACE_EVENT(drv_return_u64, 156TRACE_EVENT(drv_return_u64,
78 TP_PROTO(struct ieee80211_local *local, u64 ret), 157 TP_PROTO(struct ieee80211_local *local, u64 ret),
79 TP_ARGS(local, ret), 158 TP_ARGS(local, ret),
@@ -93,33 +172,25 @@ DEFINE_EVENT(local_only_evt, drv_start,
93 TP_ARGS(local) 172 TP_ARGS(local)
94); 173);
95 174
175DEFINE_EVENT(local_only_evt, drv_suspend,
176 TP_PROTO(struct ieee80211_local *local),
177 TP_ARGS(local)
178);
179
180DEFINE_EVENT(local_only_evt, drv_resume,
181 TP_PROTO(struct ieee80211_local *local),
182 TP_ARGS(local)
183);
184
96DEFINE_EVENT(local_only_evt, drv_stop, 185DEFINE_EVENT(local_only_evt, drv_stop,
97 TP_PROTO(struct ieee80211_local *local), 186 TP_PROTO(struct ieee80211_local *local),
98 TP_ARGS(local) 187 TP_ARGS(local)
99); 188);
100 189
101TRACE_EVENT(drv_add_interface, 190DEFINE_EVENT(local_sdata_addr_evt, drv_add_interface,
102 TP_PROTO(struct ieee80211_local *local, 191 TP_PROTO(struct ieee80211_local *local,
103 struct ieee80211_sub_if_data *sdata), 192 struct ieee80211_sub_if_data *sdata),
104 193 TP_ARGS(local, sdata)
105 TP_ARGS(local, sdata),
106
107 TP_STRUCT__entry(
108 LOCAL_ENTRY
109 VIF_ENTRY
110 __array(char, addr, 6)
111 ),
112
113 TP_fast_assign(
114 LOCAL_ASSIGN;
115 VIF_ASSIGN;
116 memcpy(__entry->addr, sdata->vif.addr, 6);
117 ),
118
119 TP_printk(
120 LOCAL_PR_FMT VIF_PR_FMT " addr:%pM",
121 LOCAL_PR_ARG, VIF_PR_ARG, __entry->addr
122 )
123); 194);
124 195
125TRACE_EVENT(drv_change_interface, 196TRACE_EVENT(drv_change_interface,
@@ -150,27 +221,10 @@ TRACE_EVENT(drv_change_interface,
150 ) 221 )
151); 222);
152 223
153TRACE_EVENT(drv_remove_interface, 224DEFINE_EVENT(local_sdata_addr_evt, drv_remove_interface,
154 TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata), 225 TP_PROTO(struct ieee80211_local *local,
155 226 struct ieee80211_sub_if_data *sdata),
156 TP_ARGS(local, sdata), 227 TP_ARGS(local, sdata)
157
158 TP_STRUCT__entry(
159 LOCAL_ENTRY
160 VIF_ENTRY
161 __array(char, addr, 6)
162 ),
163
164 TP_fast_assign(
165 LOCAL_ASSIGN;
166 VIF_ASSIGN;
167 memcpy(__entry->addr, sdata->vif.addr, 6);
168 ),
169
170 TP_printk(
171 LOCAL_PR_FMT VIF_PR_FMT " addr:%pM",
172 LOCAL_PR_ARG, VIF_PR_ARG, __entry->addr
173 )
174); 228);
175 229
176TRACE_EVENT(drv_config, 230TRACE_EVENT(drv_config,
@@ -400,27 +454,22 @@ TRACE_EVENT(drv_update_tkip_key,
400 ) 454 )
401); 455);
402 456
403TRACE_EVENT(drv_hw_scan, 457DEFINE_EVENT(local_sdata_evt, drv_hw_scan,
404 TP_PROTO(struct ieee80211_local *local, 458 TP_PROTO(struct ieee80211_local *local,
405 struct ieee80211_sub_if_data *sdata, 459 struct ieee80211_sub_if_data *sdata),
406 struct cfg80211_scan_request *req), 460 TP_ARGS(local, sdata)
407 461);
408 TP_ARGS(local, sdata, req),
409
410 TP_STRUCT__entry(
411 LOCAL_ENTRY
412 VIF_ENTRY
413 ),
414 462
415 TP_fast_assign( 463DEFINE_EVENT(local_sdata_evt, drv_sched_scan_start,
416 LOCAL_ASSIGN; 464 TP_PROTO(struct ieee80211_local *local,
417 VIF_ASSIGN; 465 struct ieee80211_sub_if_data *sdata),
418 ), 466 TP_ARGS(local, sdata)
467);
419 468
420 TP_printk( 469DEFINE_EVENT(local_sdata_evt, drv_sched_scan_stop,
421 LOCAL_PR_FMT VIF_PR_FMT, 470 TP_PROTO(struct ieee80211_local *local,
422 LOCAL_PR_ARG,VIF_PR_ARG 471 struct ieee80211_sub_if_data *sdata),
423 ) 472 TP_ARGS(local, sdata)
424); 473);
425 474
426DEFINE_EVENT(local_only_evt, drv_sw_scan_start, 475DEFINE_EVENT(local_only_evt, drv_sw_scan_start,
@@ -489,46 +538,14 @@ TRACE_EVENT(drv_get_tkip_seq,
489 ) 538 )
490); 539);
491 540
492TRACE_EVENT(drv_set_frag_threshold, 541DEFINE_EVENT(local_u32_evt, drv_set_frag_threshold,
493 TP_PROTO(struct ieee80211_local *local, u32 value), 542 TP_PROTO(struct ieee80211_local *local, u32 value),
494 543 TP_ARGS(local, value)
495 TP_ARGS(local, value),
496
497 TP_STRUCT__entry(
498 LOCAL_ENTRY
499 __field(u32, value)
500 ),
501
502 TP_fast_assign(
503 LOCAL_ASSIGN;
504 __entry->value = value;
505 ),
506
507 TP_printk(
508 LOCAL_PR_FMT " value:%d",
509 LOCAL_PR_ARG, __entry->value
510 )
511); 544);
512 545
513TRACE_EVENT(drv_set_rts_threshold, 546DEFINE_EVENT(local_u32_evt, drv_set_rts_threshold,
514 TP_PROTO(struct ieee80211_local *local, u32 value), 547 TP_PROTO(struct ieee80211_local *local, u32 value),
515 548 TP_ARGS(local, value)
516 TP_ARGS(local, value),
517
518 TP_STRUCT__entry(
519 LOCAL_ENTRY
520 __field(u32, value)
521 ),
522
523 TP_fast_assign(
524 LOCAL_ASSIGN;
525 __entry->value = value;
526 ),
527
528 TP_printk(
529 LOCAL_PR_FMT " value:%d",
530 LOCAL_PR_ARG, __entry->value
531 )
532); 549);
533 550
534TRACE_EVENT(drv_set_coverage_class, 551TRACE_EVENT(drv_set_coverage_class,
@@ -964,11 +981,43 @@ TRACE_EVENT(drv_get_ringparam,
964 ) 981 )
965); 982);
966 983
984DEFINE_EVENT(local_only_evt, drv_tx_frames_pending,
985 TP_PROTO(struct ieee80211_local *local),
986 TP_ARGS(local)
987);
988
967DEFINE_EVENT(local_only_evt, drv_offchannel_tx_cancel_wait, 989DEFINE_EVENT(local_only_evt, drv_offchannel_tx_cancel_wait,
968 TP_PROTO(struct ieee80211_local *local), 990 TP_PROTO(struct ieee80211_local *local),
969 TP_ARGS(local) 991 TP_ARGS(local)
970); 992);
971 993
994TRACE_EVENT(drv_set_bitrate_mask,
995 TP_PROTO(struct ieee80211_local *local,
996 struct ieee80211_sub_if_data *sdata,
997 const struct cfg80211_bitrate_mask *mask),
998
999 TP_ARGS(local, sdata, mask),
1000
1001 TP_STRUCT__entry(
1002 LOCAL_ENTRY
1003 VIF_ENTRY
1004 __field(u32, legacy_2g)
1005 __field(u32, legacy_5g)
1006 ),
1007
1008 TP_fast_assign(
1009 LOCAL_ASSIGN;
1010 VIF_ASSIGN;
1011 __entry->legacy_2g = mask->control[IEEE80211_BAND_2GHZ].legacy;
1012 __entry->legacy_5g = mask->control[IEEE80211_BAND_5GHZ].legacy;
1013 ),
1014
1015 TP_printk(
1016 LOCAL_PR_FMT VIF_PR_FMT " 2G Mask:0x%x 5G Mask:0x%x",
1017 LOCAL_PR_ARG, VIF_PR_ARG, __entry->legacy_2g, __entry->legacy_5g
1018 )
1019);
1020
972/* 1021/*
973 * Tracing for API calls that drivers call. 1022 * Tracing for API calls that drivers call.
974 */ 1023 */
@@ -1147,6 +1196,42 @@ TRACE_EVENT(api_scan_completed,
1147 ) 1196 )
1148); 1197);
1149 1198
1199TRACE_EVENT(api_sched_scan_results,
1200 TP_PROTO(struct ieee80211_local *local),
1201
1202 TP_ARGS(local),
1203
1204 TP_STRUCT__entry(
1205 LOCAL_ENTRY
1206 ),
1207
1208 TP_fast_assign(
1209 LOCAL_ASSIGN;
1210 ),
1211
1212 TP_printk(
1213 LOCAL_PR_FMT, LOCAL_PR_ARG
1214 )
1215);
1216
1217TRACE_EVENT(api_sched_scan_stopped,
1218 TP_PROTO(struct ieee80211_local *local),
1219
1220 TP_ARGS(local),
1221
1222 TP_STRUCT__entry(
1223 LOCAL_ENTRY
1224 ),
1225
1226 TP_fast_assign(
1227 LOCAL_ASSIGN;
1228 ),
1229
1230 TP_printk(
1231 LOCAL_PR_FMT, LOCAL_PR_ARG
1232 )
1233);
1234
1150TRACE_EVENT(api_sta_block_awake, 1235TRACE_EVENT(api_sta_block_awake,
1151 TP_PROTO(struct ieee80211_local *local, 1236 TP_PROTO(struct ieee80211_local *local,
1152 struct ieee80211_sta *sta, bool block), 1237 struct ieee80211_sta *sta, bool block),
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index b9e4b9bd2179..591add22bcc0 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -140,14 +140,29 @@ void ieee80211_ba_session_work(struct work_struct *work)
140 sta, tid, WLAN_BACK_RECIPIENT, 140 sta, tid, WLAN_BACK_RECIPIENT,
141 WLAN_REASON_QSTA_TIMEOUT, true); 141 WLAN_REASON_QSTA_TIMEOUT, true);
142 142
143 tid_tx = sta->ampdu_mlme.tid_tx[tid]; 143 tid_tx = sta->ampdu_mlme.tid_start_tx[tid];
144 if (!tid_tx) 144 if (tid_tx) {
145 continue; 145 /*
146 * Assign it over to the normal tid_tx array
147 * where it "goes live".
148 */
149 spin_lock_bh(&sta->lock);
150
151 sta->ampdu_mlme.tid_start_tx[tid] = NULL;
152 /* could there be a race? */
153 if (sta->ampdu_mlme.tid_tx[tid])
154 kfree(tid_tx);
155 else
156 ieee80211_assign_tid_tx(sta, tid, tid_tx);
157 spin_unlock_bh(&sta->lock);
146 158
147 if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state))
148 ieee80211_tx_ba_session_handle_start(sta, tid); 159 ieee80211_tx_ba_session_handle_start(sta, tid);
149 else if (test_and_clear_bit(HT_AGG_STATE_WANT_STOP, 160 continue;
150 &tid_tx->state)) 161 }
162
163 tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
164 if (tid_tx && test_and_clear_bit(HT_AGG_STATE_WANT_STOP,
165 &tid_tx->state))
151 ___ieee80211_stop_tx_ba_session(sta, tid, 166 ___ieee80211_stop_tx_ba_session(sta, tid,
152 WLAN_BACK_INITIATOR, 167 WLAN_BACK_INITIATOR,
153 true); 168 true);
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 3e81af1fce58..421eaa6b0c2b 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -40,7 +40,7 @@ static void ieee80211_rx_mgmt_auth_ibss(struct ieee80211_sub_if_data *sdata,
40 struct ieee80211_mgmt *mgmt, 40 struct ieee80211_mgmt *mgmt,
41 size_t len) 41 size_t len)
42{ 42{
43 u16 auth_alg, auth_transaction, status_code; 43 u16 auth_alg, auth_transaction;
44 44
45 lockdep_assert_held(&sdata->u.ibss.mtx); 45 lockdep_assert_held(&sdata->u.ibss.mtx);
46 46
@@ -49,7 +49,6 @@ static void ieee80211_rx_mgmt_auth_ibss(struct ieee80211_sub_if_data *sdata,
49 49
50 auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg); 50 auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg);
51 auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction); 51 auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction);
52 status_code = le16_to_cpu(mgmt->u.auth.status_code);
53 52
54 /* 53 /*
55 * IEEE 802.11 standard does not require authentication in IBSS 54 * IEEE 802.11 standard does not require authentication in IBSS
@@ -527,8 +526,6 @@ static void ieee80211_sta_merge_ibss(struct ieee80211_sub_if_data *sdata)
527static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata) 526static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata)
528{ 527{
529 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; 528 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
530 struct ieee80211_local *local = sdata->local;
531 struct ieee80211_supported_band *sband;
532 u8 bssid[ETH_ALEN]; 529 u8 bssid[ETH_ALEN];
533 u16 capability; 530 u16 capability;
534 int i; 531 int i;
@@ -551,8 +548,6 @@ static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata)
551 printk(KERN_DEBUG "%s: Creating new IBSS network, BSSID %pM\n", 548 printk(KERN_DEBUG "%s: Creating new IBSS network, BSSID %pM\n",
552 sdata->name, bssid); 549 sdata->name, bssid);
553 550
554 sband = local->hw.wiphy->bands[ifibss->channel->band];
555
556 capability = WLAN_CAPABILITY_IBSS; 551 capability = WLAN_CAPABILITY_IBSS;
557 552
558 if (ifibss->privacy) 553 if (ifibss->privacy)
@@ -661,19 +656,22 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
661static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata, 656static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
662 struct sk_buff *req) 657 struct sk_buff *req)
663{ 658{
664 struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(req);
665 struct ieee80211_mgmt *mgmt = (void *)req->data; 659 struct ieee80211_mgmt *mgmt = (void *)req->data;
666 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; 660 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
667 struct ieee80211_local *local = sdata->local; 661 struct ieee80211_local *local = sdata->local;
668 int tx_last_beacon, len = req->len; 662 int tx_last_beacon, len = req->len;
669 struct sk_buff *skb; 663 struct sk_buff *skb;
670 struct ieee80211_mgmt *resp; 664 struct ieee80211_mgmt *resp;
665 struct sk_buff *presp;
671 u8 *pos, *end; 666 u8 *pos, *end;
672 667
673 lockdep_assert_held(&ifibss->mtx); 668 lockdep_assert_held(&ifibss->mtx);
674 669
670 presp = rcu_dereference_protected(ifibss->presp,
671 lockdep_is_held(&ifibss->mtx));
672
675 if (ifibss->state != IEEE80211_IBSS_MLME_JOINED || 673 if (ifibss->state != IEEE80211_IBSS_MLME_JOINED ||
676 len < 24 + 2 || !ifibss->presp) 674 len < 24 + 2 || !presp)
677 return; 675 return;
678 676
679 tx_last_beacon = drv_tx_last_beacon(local); 677 tx_last_beacon = drv_tx_last_beacon(local);
@@ -685,7 +683,7 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
685 mgmt->bssid, tx_last_beacon); 683 mgmt->bssid, tx_last_beacon);
686#endif /* CONFIG_MAC80211_IBSS_DEBUG */ 684#endif /* CONFIG_MAC80211_IBSS_DEBUG */
687 685
688 if (!tx_last_beacon && !(rx_status->rx_flags & IEEE80211_RX_RA_MATCH)) 686 if (!tx_last_beacon && is_multicast_ether_addr(mgmt->da))
689 return; 687 return;
690 688
691 if (memcmp(mgmt->bssid, ifibss->bssid, ETH_ALEN) != 0 && 689 if (memcmp(mgmt->bssid, ifibss->bssid, ETH_ALEN) != 0 &&
@@ -711,7 +709,7 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
711 } 709 }
712 710
713 /* Reply with ProbeResp */ 711 /* Reply with ProbeResp */
714 skb = skb_copy(ifibss->presp, GFP_KERNEL); 712 skb = skb_copy(presp, GFP_KERNEL);
715 if (!skb) 713 if (!skb)
716 return; 714 return;
717 715
@@ -991,7 +989,8 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
991 989
992 /* remove beacon */ 990 /* remove beacon */
993 kfree(sdata->u.ibss.ie); 991 kfree(sdata->u.ibss.ie);
994 skb = sdata->u.ibss.presp; 992 skb = rcu_dereference_protected(sdata->u.ibss.presp,
993 lockdep_is_held(&sdata->u.ibss.mtx));
995 rcu_assign_pointer(sdata->u.ibss.presp, NULL); 994 rcu_assign_pointer(sdata->u.ibss.presp, NULL);
996 sdata->vif.bss_conf.ibss_joined = false; 995 sdata->vif.bss_conf.ibss_joined = false;
997 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED | 996 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED |
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index c18396c248d7..2025af52b195 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -214,7 +214,7 @@ struct beacon_data {
214}; 214};
215 215
216struct ieee80211_if_ap { 216struct ieee80211_if_ap {
217 struct beacon_data *beacon; 217 struct beacon_data __rcu *beacon;
218 218
219 struct list_head vlans; 219 struct list_head vlans;
220 220
@@ -237,7 +237,7 @@ struct ieee80211_if_vlan {
237 struct list_head list; 237 struct list_head list;
238 238
239 /* used for all tx if the VLAN is configured to 4-addr mode */ 239 /* used for all tx if the VLAN is configured to 4-addr mode */
240 struct sta_info *sta; 240 struct sta_info __rcu *sta;
241}; 241};
242 242
243struct mesh_stats { 243struct mesh_stats {
@@ -442,7 +442,8 @@ struct ieee80211_if_ibss {
442 442
443 unsigned long ibss_join_req; 443 unsigned long ibss_join_req;
444 /* probe response/beacon for IBSS */ 444 /* probe response/beacon for IBSS */
445 struct sk_buff *presp, *skb; 445 struct sk_buff __rcu *presp;
446 struct sk_buff *skb;
446 447
447 enum { 448 enum {
448 IEEE80211_IBSS_MLME_SEARCH, 449 IEEE80211_IBSS_MLME_SEARCH,
@@ -488,8 +489,13 @@ struct ieee80211_if_mesh {
488 struct mesh_config mshcfg; 489 struct mesh_config mshcfg;
489 u32 mesh_seqnum; 490 u32 mesh_seqnum;
490 bool accepting_plinks; 491 bool accepting_plinks;
491 const u8 *vendor_ie; 492 const u8 *ie;
492 u8 vendor_ie_len; 493 u8 ie_len;
494 enum {
495 IEEE80211_MESH_SEC_NONE = 0x0,
496 IEEE80211_MESH_SEC_AUTHED = 0x1,
497 IEEE80211_MESH_SEC_SECURED = 0x2,
498 } security;
493}; 499};
494 500
495#ifdef CONFIG_MAC80211_MESH 501#ifdef CONFIG_MAC80211_MESH
@@ -562,9 +568,10 @@ struct ieee80211_sub_if_data {
562 struct ieee80211_fragment_entry fragments[IEEE80211_FRAGMENT_MAX]; 568 struct ieee80211_fragment_entry fragments[IEEE80211_FRAGMENT_MAX];
563 unsigned int fragment_next; 569 unsigned int fragment_next;
564 570
565 struct ieee80211_key *keys[NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS]; 571 struct ieee80211_key __rcu *keys[NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS];
566 struct ieee80211_key *default_unicast_key, *default_multicast_key; 572 struct ieee80211_key __rcu *default_unicast_key;
567 struct ieee80211_key *default_mgmt_key; 573 struct ieee80211_key __rcu *default_multicast_key;
574 struct ieee80211_key __rcu *default_mgmt_key;
568 575
569 u16 sequence_number; 576 u16 sequence_number;
570 __be16 control_port_protocol; 577 __be16 control_port_protocol;
@@ -763,8 +770,14 @@ struct ieee80211_local {
763 /* device is started */ 770 /* device is started */
764 bool started; 771 bool started;
765 772
773 /* wowlan is enabled -- don't reconfig on resume */
774 bool wowlan;
775
766 int tx_headroom; /* required headroom for hardware/radiotap */ 776 int tx_headroom; /* required headroom for hardware/radiotap */
767 777
778 /* count for keys needing tailroom space allocation */
779 int crypto_tx_tailroom_needed_cnt;
780
768 /* Tasklet and skb queue to process calls from IRQ mode. All frames 781 /* Tasklet and skb queue to process calls from IRQ mode. All frames
769 * added to skb_queue will be processed, but frames in 782 * added to skb_queue will be processed, but frames in
770 * skb_queue_unreliable may be dropped if the total length of these 783 * skb_queue_unreliable may be dropped if the total length of these
@@ -794,7 +807,7 @@ struct ieee80211_local {
794 spinlock_t sta_lock; 807 spinlock_t sta_lock;
795 unsigned long num_sta; 808 unsigned long num_sta;
796 struct list_head sta_list, sta_pending_list; 809 struct list_head sta_list, sta_pending_list;
797 struct sta_info *sta_hash[STA_HASH_SIZE]; 810 struct sta_info __rcu *sta_hash[STA_HASH_SIZE];
798 struct timer_list sta_cleanup; 811 struct timer_list sta_cleanup;
799 struct work_struct sta_finish_work; 812 struct work_struct sta_finish_work;
800 int sta_generation; 813 int sta_generation;
@@ -809,8 +822,8 @@ struct ieee80211_local {
809 822
810 struct rate_control_ref *rate_ctrl; 823 struct rate_control_ref *rate_ctrl;
811 824
812 struct crypto_blkcipher *wep_tx_tfm; 825 struct crypto_cipher *wep_tx_tfm;
813 struct crypto_blkcipher *wep_rx_tfm; 826 struct crypto_cipher *wep_rx_tfm;
814 u32 wep_iv; 827 u32 wep_iv;
815 828
816 /* see iface.c */ 829 /* see iface.c */
@@ -836,6 +849,10 @@ struct ieee80211_local {
836 int scan_channel_idx; 849 int scan_channel_idx;
837 int scan_ies_len; 850 int scan_ies_len;
838 851
852 bool sched_scanning;
853 struct ieee80211_sched_scan_ies sched_scan_ies;
854 struct work_struct sched_scan_stopped_work;
855
839 unsigned long leave_oper_channel_time; 856 unsigned long leave_oper_channel_time;
840 enum mac80211_scan_state next_scan_state; 857 enum mac80211_scan_state next_scan_state;
841 struct delayed_work scan_work; 858 struct delayed_work scan_work;
@@ -1143,6 +1160,12 @@ ieee80211_rx_bss_get(struct ieee80211_local *local, u8 *bssid, int freq,
1143void ieee80211_rx_bss_put(struct ieee80211_local *local, 1160void ieee80211_rx_bss_put(struct ieee80211_local *local,
1144 struct ieee80211_bss *bss); 1161 struct ieee80211_bss *bss);
1145 1162
1163/* scheduled scan handling */
1164int ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata,
1165 struct cfg80211_sched_scan_request *req);
1166int ieee80211_request_sched_scan_stop(struct ieee80211_sub_if_data *sdata);
1167void ieee80211_sched_scan_stopped_work(struct work_struct *work);
1168
1146/* off-channel helpers */ 1169/* off-channel helpers */
1147bool ieee80211_cfg_on_oper_channel(struct ieee80211_local *local); 1170bool ieee80211_cfg_on_oper_channel(struct ieee80211_local *local);
1148void ieee80211_offchannel_enable_all_ps(struct ieee80211_local *local, 1171void ieee80211_offchannel_enable_all_ps(struct ieee80211_local *local,
@@ -1246,7 +1269,8 @@ int ieee80211_reconfig(struct ieee80211_local *local);
1246void ieee80211_stop_device(struct ieee80211_local *local); 1269void ieee80211_stop_device(struct ieee80211_local *local);
1247 1270
1248#ifdef CONFIG_PM 1271#ifdef CONFIG_PM
1249int __ieee80211_suspend(struct ieee80211_hw *hw); 1272int __ieee80211_suspend(struct ieee80211_hw *hw,
1273 struct cfg80211_wowlan *wowlan);
1250 1274
1251static inline int __ieee80211_resume(struct ieee80211_hw *hw) 1275static inline int __ieee80211_resume(struct ieee80211_hw *hw)
1252{ 1276{
@@ -1259,7 +1283,8 @@ static inline int __ieee80211_resume(struct ieee80211_hw *hw)
1259 return ieee80211_reconfig(hw_to_local(hw)); 1283 return ieee80211_reconfig(hw_to_local(hw));
1260} 1284}
1261#else 1285#else
1262static inline int __ieee80211_suspend(struct ieee80211_hw *hw) 1286static inline int __ieee80211_suspend(struct ieee80211_hw *hw,
1287 struct cfg80211_wowlan *wowlan)
1263{ 1288{
1264 return 0; 1289 return 0;
1265} 1290}
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 4054399be907..7dfbe71dc637 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -449,7 +449,8 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
449 /* APs need special treatment */ 449 /* APs need special treatment */
450 if (sdata->vif.type == NL80211_IFTYPE_AP) { 450 if (sdata->vif.type == NL80211_IFTYPE_AP) {
451 struct ieee80211_sub_if_data *vlan, *tmpsdata; 451 struct ieee80211_sub_if_data *vlan, *tmpsdata;
452 struct beacon_data *old_beacon = sdata->u.ap.beacon; 452 struct beacon_data *old_beacon =
453 rtnl_dereference(sdata->u.ap.beacon);
453 454
454 /* sdata_running will return false, so this will disable */ 455 /* sdata_running will return false, so this will disable */
455 ieee80211_bss_info_change_notify(sdata, 456 ieee80211_bss_info_change_notify(sdata,
@@ -1144,10 +1145,6 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
1144 + IEEE80211_ENCRYPT_HEADROOM; 1145 + IEEE80211_ENCRYPT_HEADROOM;
1145 ndev->needed_tailroom = IEEE80211_ENCRYPT_TAILROOM; 1146 ndev->needed_tailroom = IEEE80211_ENCRYPT_TAILROOM;
1146 1147
1147 ret = dev_alloc_name(ndev, ndev->name);
1148 if (ret < 0)
1149 goto fail;
1150
1151 ieee80211_assign_perm_addr(local, ndev, type); 1148 ieee80211_assign_perm_addr(local, ndev, type);
1152 memcpy(ndev->dev_addr, ndev->perm_addr, ETH_ALEN); 1149 memcpy(ndev->dev_addr, ndev->perm_addr, ETH_ALEN);
1153 SET_NETDEV_DEV(ndev, wiphy_dev(local->hw.wiphy)); 1150 SET_NETDEV_DEV(ndev, wiphy_dev(local->hw.wiphy));
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index af3c56482c80..31afd712930d 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -101,6 +101,11 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
101 101
102 if (!ret) { 102 if (!ret) {
103 key->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE; 103 key->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE;
104
105 if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) ||
106 (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)))
107 key->local->crypto_tx_tailroom_needed_cnt--;
108
104 return 0; 109 return 0;
105 } 110 }
106 111
@@ -156,6 +161,10 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key)
156 key->conf.keyidx, sta ? sta->addr : bcast_addr, ret); 161 key->conf.keyidx, sta ? sta->addr : bcast_addr, ret);
157 162
158 key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE; 163 key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE;
164
165 if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) ||
166 (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)))
167 key->local->crypto_tx_tailroom_needed_cnt++;
159} 168}
160 169
161void ieee80211_key_removed(struct ieee80211_key_conf *key_conf) 170void ieee80211_key_removed(struct ieee80211_key_conf *key_conf)
@@ -186,7 +195,7 @@ static void __ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata,
186 assert_key_lock(sdata->local); 195 assert_key_lock(sdata->local);
187 196
188 if (idx >= 0 && idx < NUM_DEFAULT_KEYS) 197 if (idx >= 0 && idx < NUM_DEFAULT_KEYS)
189 key = sdata->keys[idx]; 198 key = key_mtx_dereference(sdata->local, sdata->keys[idx]);
190 199
191 if (uni) 200 if (uni)
192 rcu_assign_pointer(sdata->default_unicast_key, key); 201 rcu_assign_pointer(sdata->default_unicast_key, key);
@@ -213,7 +222,7 @@ __ieee80211_set_default_mgmt_key(struct ieee80211_sub_if_data *sdata, int idx)
213 222
214 if (idx >= NUM_DEFAULT_KEYS && 223 if (idx >= NUM_DEFAULT_KEYS &&
215 idx < NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS) 224 idx < NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS)
216 key = sdata->keys[idx]; 225 key = key_mtx_dereference(sdata->local, sdata->keys[idx]);
217 226
218 rcu_assign_pointer(sdata->default_mgmt_key, key); 227 rcu_assign_pointer(sdata->default_mgmt_key, key);
219 228
@@ -257,9 +266,15 @@ static void __ieee80211_key_replace(struct ieee80211_sub_if_data *sdata,
257 else 266 else
258 idx = new->conf.keyidx; 267 idx = new->conf.keyidx;
259 268
260 defunikey = old && sdata->default_unicast_key == old; 269 defunikey = old &&
261 defmultikey = old && sdata->default_multicast_key == old; 270 old == key_mtx_dereference(sdata->local,
262 defmgmtkey = old && sdata->default_mgmt_key == old; 271 sdata->default_unicast_key);
272 defmultikey = old &&
273 old == key_mtx_dereference(sdata->local,
274 sdata->default_multicast_key);
275 defmgmtkey = old &&
276 old == key_mtx_dereference(sdata->local,
277 sdata->default_mgmt_key);
263 278
264 if (defunikey && !new) 279 if (defunikey && !new)
265 __ieee80211_set_default_key(sdata, -1, true, false); 280 __ieee80211_set_default_key(sdata, -1, true, false);
@@ -388,8 +403,10 @@ static void __ieee80211_key_destroy(struct ieee80211_key *key)
388 ieee80211_aes_key_free(key->u.ccmp.tfm); 403 ieee80211_aes_key_free(key->u.ccmp.tfm);
389 if (key->conf.cipher == WLAN_CIPHER_SUITE_AES_CMAC) 404 if (key->conf.cipher == WLAN_CIPHER_SUITE_AES_CMAC)
390 ieee80211_aes_cmac_key_free(key->u.aes_cmac.tfm); 405 ieee80211_aes_cmac_key_free(key->u.aes_cmac.tfm);
391 if (key->local) 406 if (key->local) {
392 ieee80211_debugfs_key_remove(key); 407 ieee80211_debugfs_key_remove(key);
408 key->local->crypto_tx_tailroom_needed_cnt--;
409 }
393 410
394 kfree(key); 411 kfree(key);
395} 412}
@@ -440,17 +457,19 @@ int ieee80211_key_link(struct ieee80211_key *key,
440 mutex_lock(&sdata->local->key_mtx); 457 mutex_lock(&sdata->local->key_mtx);
441 458
442 if (sta && pairwise) 459 if (sta && pairwise)
443 old_key = sta->ptk; 460 old_key = key_mtx_dereference(sdata->local, sta->ptk);
444 else if (sta) 461 else if (sta)
445 old_key = sta->gtk[idx]; 462 old_key = key_mtx_dereference(sdata->local, sta->gtk[idx]);
446 else 463 else
447 old_key = sdata->keys[idx]; 464 old_key = key_mtx_dereference(sdata->local, sdata->keys[idx]);
448 465
449 __ieee80211_key_replace(sdata, sta, pairwise, old_key, key); 466 __ieee80211_key_replace(sdata, sta, pairwise, old_key, key);
450 __ieee80211_key_destroy(old_key); 467 __ieee80211_key_destroy(old_key);
451 468
452 ieee80211_debugfs_key_add(key); 469 ieee80211_debugfs_key_add(key);
453 470
471 key->local->crypto_tx_tailroom_needed_cnt++;
472
454 ret = ieee80211_key_enable_hw_accel(key); 473 ret = ieee80211_key_enable_hw_accel(key);
455 474
456 mutex_unlock(&sdata->local->key_mtx); 475 mutex_unlock(&sdata->local->key_mtx);
@@ -458,8 +477,11 @@ int ieee80211_key_link(struct ieee80211_key *key,
458 return ret; 477 return ret;
459} 478}
460 479
461static void __ieee80211_key_free(struct ieee80211_key *key) 480void __ieee80211_key_free(struct ieee80211_key *key)
462{ 481{
482 if (!key)
483 return;
484
463 /* 485 /*
464 * Replace key with nothingness if it was ever used. 486 * Replace key with nothingness if it was ever used.
465 */ 487 */
@@ -473,9 +495,6 @@ static void __ieee80211_key_free(struct ieee80211_key *key)
473void ieee80211_key_free(struct ieee80211_local *local, 495void ieee80211_key_free(struct ieee80211_local *local,
474 struct ieee80211_key *key) 496 struct ieee80211_key *key)
475{ 497{
476 if (!key)
477 return;
478
479 mutex_lock(&local->key_mtx); 498 mutex_lock(&local->key_mtx);
480 __ieee80211_key_free(key); 499 __ieee80211_key_free(key);
481 mutex_unlock(&local->key_mtx); 500 mutex_unlock(&local->key_mtx);
@@ -492,8 +511,12 @@ void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata)
492 511
493 mutex_lock(&sdata->local->key_mtx); 512 mutex_lock(&sdata->local->key_mtx);
494 513
495 list_for_each_entry(key, &sdata->key_list, list) 514 sdata->local->crypto_tx_tailroom_needed_cnt = 0;
515
516 list_for_each_entry(key, &sdata->key_list, list) {
517 sdata->local->crypto_tx_tailroom_needed_cnt++;
496 ieee80211_key_enable_hw_accel(key); 518 ieee80211_key_enable_hw_accel(key);
519 }
497 520
498 mutex_unlock(&sdata->local->key_mtx); 521 mutex_unlock(&sdata->local->key_mtx);
499} 522}
diff --git a/net/mac80211/key.h b/net/mac80211/key.h
index 4ddbe27eb570..d801d5351336 100644
--- a/net/mac80211/key.h
+++ b/net/mac80211/key.h
@@ -135,6 +135,7 @@ struct ieee80211_key *ieee80211_key_alloc(u32 cipher, int idx, size_t key_len,
135int __must_check ieee80211_key_link(struct ieee80211_key *key, 135int __must_check ieee80211_key_link(struct ieee80211_key *key,
136 struct ieee80211_sub_if_data *sdata, 136 struct ieee80211_sub_if_data *sdata,
137 struct sta_info *sta); 137 struct sta_info *sta);
138void __ieee80211_key_free(struct ieee80211_key *key);
138void ieee80211_key_free(struct ieee80211_local *local, 139void ieee80211_key_free(struct ieee80211_local *local,
139 struct ieee80211_key *key); 140 struct ieee80211_key *key);
140void ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata, int idx, 141void ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata, int idx,
@@ -145,4 +146,7 @@ void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata);
145void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata); 146void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata);
146void ieee80211_disable_keys(struct ieee80211_sub_if_data *sdata); 147void ieee80211_disable_keys(struct ieee80211_sub_if_data *sdata);
147 148
149#define key_mtx_dereference(local, ref) \
150 rcu_dereference_protected(ref, lockdep_is_held(&((local)->key_mtx)))
151
148#endif /* IEEE80211_KEY_H */ 152#endif /* IEEE80211_KEY_H */
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 562d2984c482..0d7b08db8e56 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -33,12 +33,6 @@
33#include "cfg.h" 33#include "cfg.h"
34#include "debugfs.h" 34#include "debugfs.h"
35 35
36
37static bool ieee80211_disable_40mhz_24ghz;
38module_param(ieee80211_disable_40mhz_24ghz, bool, 0644);
39MODULE_PARM_DESC(ieee80211_disable_40mhz_24ghz,
40 "Disable 40MHz support in the 2.4GHz band");
41
42static struct lock_class_key ieee80211_rx_skb_queue_class; 36static struct lock_class_key ieee80211_rx_skb_queue_class;
43 37
44void ieee80211_configure_filter(struct ieee80211_local *local) 38void ieee80211_configure_filter(struct ieee80211_local *local)
@@ -364,7 +358,8 @@ static void ieee80211_restart_work(struct work_struct *work)
364 flush_workqueue(local->workqueue); 358 flush_workqueue(local->workqueue);
365 359
366 mutex_lock(&local->mtx); 360 mutex_lock(&local->mtx);
367 WARN(test_bit(SCAN_HW_SCANNING, &local->scanning), 361 WARN(test_bit(SCAN_HW_SCANNING, &local->scanning) ||
362 local->sched_scanning,
368 "%s called with hardware scan in progress\n", __func__); 363 "%s called with hardware scan in progress\n", __func__);
369 mutex_unlock(&local->mtx); 364 mutex_unlock(&local->mtx);
370 365
@@ -545,7 +540,9 @@ ieee80211_default_mgmt_stypes[NUM_NL80211_IFTYPES] = {
545 }, 540 },
546 [NL80211_IFTYPE_MESH_POINT] = { 541 [NL80211_IFTYPE_MESH_POINT] = {
547 .tx = 0xffff, 542 .tx = 0xffff,
548 .rx = BIT(IEEE80211_STYPE_ACTION >> 4), 543 .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
544 BIT(IEEE80211_STYPE_AUTH >> 4) |
545 BIT(IEEE80211_STYPE_DEAUTH >> 4),
549 }, 546 },
550}; 547};
551 548
@@ -584,8 +581,7 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
584 581
585 wiphy->flags |= WIPHY_FLAG_NETNS_OK | 582 wiphy->flags |= WIPHY_FLAG_NETNS_OK |
586 WIPHY_FLAG_4ADDR_AP | 583 WIPHY_FLAG_4ADDR_AP |
587 WIPHY_FLAG_4ADDR_STATION | 584 WIPHY_FLAG_4ADDR_STATION;
588 WIPHY_FLAG_SUPPORTS_SEPARATE_DEFAULT_KEYS;
589 585
590 if (!ops->set_key) 586 if (!ops->set_key)
591 wiphy->flags |= WIPHY_FLAG_IBSS_RSN; 587 wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
@@ -656,6 +652,9 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
656 setup_timer(&local->dynamic_ps_timer, 652 setup_timer(&local->dynamic_ps_timer,
657 ieee80211_dynamic_ps_timer, (unsigned long) local); 653 ieee80211_dynamic_ps_timer, (unsigned long) local);
658 654
655 INIT_WORK(&local->sched_scan_stopped_work,
656 ieee80211_sched_scan_stopped_work);
657
659 sta_info_init(local); 658 sta_info_init(local);
660 659
661 for (i = 0; i < IEEE80211_MAX_QUEUES; i++) { 660 for (i = 0; i < IEEE80211_MAX_QUEUES; i++) {
@@ -686,7 +685,7 @@ EXPORT_SYMBOL(ieee80211_alloc_hw);
686int ieee80211_register_hw(struct ieee80211_hw *hw) 685int ieee80211_register_hw(struct ieee80211_hw *hw)
687{ 686{
688 struct ieee80211_local *local = hw_to_local(hw); 687 struct ieee80211_local *local = hw_to_local(hw);
689 int result; 688 int result, i;
690 enum ieee80211_band band; 689 enum ieee80211_band band;
691 int channels, max_bitrates; 690 int channels, max_bitrates;
692 bool supp_ht; 691 bool supp_ht;
@@ -701,6 +700,13 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
701 WLAN_CIPHER_SUITE_AES_CMAC 700 WLAN_CIPHER_SUITE_AES_CMAC
702 }; 701 };
703 702
703 if ((hw->wiphy->wowlan.flags || hw->wiphy->wowlan.n_patterns)
704#ifdef CONFIG_PM
705 && (!local->ops->suspend || !local->ops->resume)
706#endif
707 )
708 return -EINVAL;
709
704 if (hw->max_report_rates == 0) 710 if (hw->max_report_rates == 0)
705 hw->max_report_rates = hw->max_rates; 711 hw->max_report_rates = hw->max_rates;
706 712
@@ -726,18 +732,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
726 } 732 }
727 channels += sband->n_channels; 733 channels += sband->n_channels;
728 734
729 /*
730 * Since ieee80211_disable_40mhz_24ghz is global, we can
731 * modify the sband's ht data even if the driver uses a
732 * global structure for that.
733 */
734 if (ieee80211_disable_40mhz_24ghz &&
735 band == IEEE80211_BAND_2GHZ &&
736 sband->ht_cap.ht_supported) {
737 sband->ht_cap.cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
738 sband->ht_cap.cap &= ~IEEE80211_HT_CAP_SGI_40;
739 }
740
741 if (max_bitrates < sband->n_bitrates) 735 if (max_bitrates < sband->n_bitrates)
742 max_bitrates = sband->n_bitrates; 736 max_bitrates = sband->n_bitrates;
743 supp_ht = supp_ht || sband->ht_cap.ht_supported; 737 supp_ht = supp_ht || sband->ht_cap.ht_supported;
@@ -749,17 +743,30 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
749 return -ENOMEM; 743 return -ENOMEM;
750 744
751 /* if low-level driver supports AP, we also support VLAN */ 745 /* if low-level driver supports AP, we also support VLAN */
752 if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_AP)) 746 if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_AP)) {
753 local->hw.wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP_VLAN); 747 hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP_VLAN);
748 hw->wiphy->software_iftypes |= BIT(NL80211_IFTYPE_AP_VLAN);
749 }
754 750
755 /* mac80211 always supports monitor */ 751 /* mac80211 always supports monitor */
756 local->hw.wiphy->interface_modes |= BIT(NL80211_IFTYPE_MONITOR); 752 hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_MONITOR);
753 hw->wiphy->software_iftypes |= BIT(NL80211_IFTYPE_MONITOR);
754
755 /* mac80211 doesn't support more than 1 channel */
756 for (i = 0; i < hw->wiphy->n_iface_combinations; i++)
757 if (hw->wiphy->iface_combinations[i].num_different_channels > 1)
758 return -EINVAL;
757 759
758#ifndef CONFIG_MAC80211_MESH 760#ifndef CONFIG_MAC80211_MESH
759 /* mesh depends on Kconfig, but drivers should set it if they want */ 761 /* mesh depends on Kconfig, but drivers should set it if they want */
760 local->hw.wiphy->interface_modes &= ~BIT(NL80211_IFTYPE_MESH_POINT); 762 local->hw.wiphy->interface_modes &= ~BIT(NL80211_IFTYPE_MESH_POINT);
761#endif 763#endif
762 764
765 /* if the underlying driver supports mesh, mac80211 will (at least)
766 * provide routing of mesh authentication frames to userspace */
767 if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_MESH_POINT))
768 local->hw.wiphy->flags |= WIPHY_FLAG_MESH_AUTH;
769
763 /* mac80211 supports control port protocol changing */ 770 /* mac80211 supports control port protocol changing */
764 local->hw.wiphy->flags |= WIPHY_FLAG_CONTROL_PORT_PROTOCOL; 771 local->hw.wiphy->flags |= WIPHY_FLAG_CONTROL_PORT_PROTOCOL;
765 772
@@ -838,6 +845,9 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
838 if (!local->ops->remain_on_channel) 845 if (!local->ops->remain_on_channel)
839 local->hw.wiphy->max_remain_on_channel_duration = 5000; 846 local->hw.wiphy->max_remain_on_channel_duration = 5000;
840 847
848 if (local->ops->sched_scan_start)
849 local->hw.wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
850
841 result = wiphy_register(local->hw.wiphy); 851 result = wiphy_register(local->hw.wiphy);
842 if (result < 0) 852 if (result < 0)
843 goto fail_wiphy_register; 853 goto fail_wiphy_register;
@@ -861,8 +871,10 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
861 * and we need some headroom for passing the frame to monitor 871 * and we need some headroom for passing the frame to monitor
862 * interfaces, but never both at the same time. 872 * interfaces, but never both at the same time.
863 */ 873 */
874#ifndef __CHECKER__
864 BUILD_BUG_ON(IEEE80211_TX_STATUS_HEADROOM != 875 BUILD_BUG_ON(IEEE80211_TX_STATUS_HEADROOM !=
865 sizeof(struct ieee80211_tx_status_rtap_hdr)); 876 sizeof(struct ieee80211_tx_status_rtap_hdr));
877#endif
866 local->tx_headroom = max_t(unsigned int , local->hw.extra_tx_headroom, 878 local->tx_headroom = max_t(unsigned int , local->hw.extra_tx_headroom,
867 sizeof(struct ieee80211_tx_status_rtap_hdr)); 879 sizeof(struct ieee80211_tx_status_rtap_hdr));
868 880
@@ -879,10 +891,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
879 891
880 local->dynamic_ps_forced_timeout = -1; 892 local->dynamic_ps_forced_timeout = -1;
881 893
882 result = sta_info_start(local);
883 if (result < 0)
884 goto fail_sta_info;
885
886 result = ieee80211_wep_init(local); 894 result = ieee80211_wep_init(local);
887 if (result < 0) 895 if (result < 0)
888 wiphy_debug(local->hw.wiphy, "Failed to initialize wep: %d\n", 896 wiphy_debug(local->hw.wiphy, "Failed to initialize wep: %d\n",
@@ -945,7 +953,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
945 rtnl_unlock(); 953 rtnl_unlock();
946 ieee80211_wep_free(local); 954 ieee80211_wep_free(local);
947 sta_info_stop(local); 955 sta_info_stop(local);
948 fail_sta_info:
949 destroy_workqueue(local->workqueue); 956 destroy_workqueue(local->workqueue);
950 fail_workqueue: 957 fail_workqueue:
951 wiphy_unregister(local->hw.wiphy); 958 wiphy_unregister(local->hw.wiphy);
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 2a57cc02c618..29e9980c8e60 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -279,57 +279,14 @@ void mesh_mgmt_ies_add(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata)
279 MESHCONF_CAPAB_ACCEPT_PLINKS : 0x00; 279 MESHCONF_CAPAB_ACCEPT_PLINKS : 0x00;
280 *pos++ = 0x00; 280 *pos++ = 0x00;
281 281
282 if (sdata->u.mesh.vendor_ie) { 282 if (sdata->u.mesh.ie) {
283 int len = sdata->u.mesh.vendor_ie_len; 283 int len = sdata->u.mesh.ie_len;
284 const u8 *data = sdata->u.mesh.vendor_ie; 284 const u8 *data = sdata->u.mesh.ie;
285 if (skb_tailroom(skb) > len) 285 if (skb_tailroom(skb) > len)
286 memcpy(skb_put(skb, len), data, len); 286 memcpy(skb_put(skb, len), data, len);
287 } 287 }
288} 288}
289 289
290u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata, struct mesh_table *tbl)
291{
292 /* Use last four bytes of hw addr and interface index as hash index */
293 return jhash_2words(*(u32 *)(addr+2), sdata->dev->ifindex, tbl->hash_rnd)
294 & tbl->hash_mask;
295}
296
297struct mesh_table *mesh_table_alloc(int size_order)
298{
299 int i;
300 struct mesh_table *newtbl;
301
302 newtbl = kmalloc(sizeof(struct mesh_table), GFP_KERNEL);
303 if (!newtbl)
304 return NULL;
305
306 newtbl->hash_buckets = kzalloc(sizeof(struct hlist_head) *
307 (1 << size_order), GFP_KERNEL);
308
309 if (!newtbl->hash_buckets) {
310 kfree(newtbl);
311 return NULL;
312 }
313
314 newtbl->hashwlock = kmalloc(sizeof(spinlock_t) *
315 (1 << size_order), GFP_KERNEL);
316 if (!newtbl->hashwlock) {
317 kfree(newtbl->hash_buckets);
318 kfree(newtbl);
319 return NULL;
320 }
321
322 newtbl->size_order = size_order;
323 newtbl->hash_mask = (1 << size_order) - 1;
324 atomic_set(&newtbl->entries, 0);
325 get_random_bytes(&newtbl->hash_rnd,
326 sizeof(newtbl->hash_rnd));
327 for (i = 0; i <= newtbl->hash_mask; i++)
328 spin_lock_init(&newtbl->hashwlock[i]);
329
330 return newtbl;
331}
332
333 290
334static void ieee80211_mesh_path_timer(unsigned long data) 291static void ieee80211_mesh_path_timer(unsigned long data)
335{ 292{
@@ -573,6 +530,10 @@ static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
573 ieee802_11_parse_elems(mgmt->u.probe_resp.variable, len - baselen, 530 ieee802_11_parse_elems(mgmt->u.probe_resp.variable, len - baselen,
574 &elems); 531 &elems);
575 532
533 /* ignore beacons from secure mesh peers if our security is off */
534 if (elems.rsn_len && sdata->u.mesh.security == IEEE80211_MESH_SEC_NONE)
535 return;
536
576 if (elems.ds_params && elems.ds_params_len == 1) 537 if (elems.ds_params && elems.ds_params_len == 1)
577 freq = ieee80211_channel_to_frequency(elems.ds_params[0], band); 538 freq = ieee80211_channel_to_frequency(elems.ds_params[0], band);
578 else 539 else
@@ -586,9 +547,7 @@ static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
586 if (elems.mesh_id && elems.mesh_config && 547 if (elems.mesh_id && elems.mesh_config &&
587 mesh_matches_local(&elems, sdata)) { 548 mesh_matches_local(&elems, sdata)) {
588 supp_rates = ieee80211_sta_get_rates(local, &elems, band); 549 supp_rates = ieee80211_sta_get_rates(local, &elems, band);
589 550 mesh_neighbour_update(mgmt->sa, supp_rates, sdata, &elems);
590 mesh_neighbour_update(mgmt->sa, supp_rates, sdata,
591 mesh_peer_accepts_plinks(&elems));
592 } 551 }
593} 552}
594 553
@@ -598,7 +557,7 @@ static void ieee80211_mesh_rx_mgmt_action(struct ieee80211_sub_if_data *sdata,
598 struct ieee80211_rx_status *rx_status) 557 struct ieee80211_rx_status *rx_status)
599{ 558{
600 switch (mgmt->u.action.category) { 559 switch (mgmt->u.action.category) {
601 case WLAN_CATEGORY_MESH_PLINK: 560 case WLAN_CATEGORY_MESH_ACTION:
602 mesh_rx_plink_frame(sdata, mgmt, len, rx_status); 561 mesh_rx_plink_frame(sdata, mgmt, len, rx_status);
603 break; 562 break;
604 case WLAN_CATEGORY_MESH_PATH_SEL: 563 case WLAN_CATEGORY_MESH_PATH_SEL:
@@ -611,12 +570,9 @@ void ieee80211_mesh_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
611 struct sk_buff *skb) 570 struct sk_buff *skb)
612{ 571{
613 struct ieee80211_rx_status *rx_status; 572 struct ieee80211_rx_status *rx_status;
614 struct ieee80211_if_mesh *ifmsh;
615 struct ieee80211_mgmt *mgmt; 573 struct ieee80211_mgmt *mgmt;
616 u16 stype; 574 u16 stype;
617 575
618 ifmsh = &sdata->u.mesh;
619
620 rx_status = IEEE80211_SKB_RXCB(skb); 576 rx_status = IEEE80211_SKB_RXCB(skb);
621 mgmt = (struct ieee80211_mgmt *) skb->data; 577 mgmt = (struct ieee80211_mgmt *) skb->data;
622 stype = le16_to_cpu(mgmt->frame_control) & IEEE80211_FCTL_STYPE; 578 stype = le16_to_cpu(mgmt->frame_control) & IEEE80211_FCTL_STYPE;
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index b99e230fe31c..e7c5fddb4804 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -92,7 +92,7 @@ struct mesh_path {
92 u8 dst[ETH_ALEN]; 92 u8 dst[ETH_ALEN];
93 u8 mpp[ETH_ALEN]; /* used for MPP or MAP */ 93 u8 mpp[ETH_ALEN]; /* used for MPP or MAP */
94 struct ieee80211_sub_if_data *sdata; 94 struct ieee80211_sub_if_data *sdata;
95 struct sta_info *next_hop; 95 struct sta_info __rcu *next_hop;
96 struct timer_list timer; 96 struct timer_list timer;
97 struct sk_buff_head frame_queue; 97 struct sk_buff_head frame_queue;
98 struct rcu_head rcu; 98 struct rcu_head rcu;
@@ -226,7 +226,8 @@ void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata,
226int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata); 226int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata);
227/* Mesh plinks */ 227/* Mesh plinks */
228void mesh_neighbour_update(u8 *hw_addr, u32 rates, 228void mesh_neighbour_update(u8 *hw_addr, u32 rates,
229 struct ieee80211_sub_if_data *sdata, bool add); 229 struct ieee80211_sub_if_data *sdata,
230 struct ieee802_11_elems *ie);
230bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie); 231bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie);
231void mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata); 232void mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata);
232void mesh_plink_broken(struct sta_info *sta); 233void mesh_plink_broken(struct sta_info *sta);
@@ -239,12 +240,8 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata,
239 240
240/* Private interfaces */ 241/* Private interfaces */
241/* Mesh tables */ 242/* Mesh tables */
242struct mesh_table *mesh_table_alloc(int size_order);
243void mesh_table_free(struct mesh_table *tbl, bool free_leafs);
244void mesh_mpath_table_grow(void); 243void mesh_mpath_table_grow(void);
245void mesh_mpp_table_grow(void); 244void mesh_mpp_table_grow(void);
246u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata,
247 struct mesh_table *tbl);
248/* Mesh paths */ 245/* Mesh paths */
249int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn, __le16 target_rcode, 246int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn, __le16 target_rcode,
250 const u8 *ra, struct ieee80211_sub_if_data *sdata); 247 const u8 *ra, struct ieee80211_sub_if_data *sdata);
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 5bf64d7112b3..2b18053070c1 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -391,7 +391,6 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
391 (mpath->flags & MESH_PATH_SN_VALID)) { 391 (mpath->flags & MESH_PATH_SN_VALID)) {
392 if (SN_GT(mpath->sn, orig_sn) || 392 if (SN_GT(mpath->sn, orig_sn) ||
393 (mpath->sn == orig_sn && 393 (mpath->sn == orig_sn &&
394 action == MPATH_PREQ &&
395 new_metric >= mpath->metric)) { 394 new_metric >= mpath->metric)) {
396 process = false; 395 process = false;
397 fresh_info = false; 396 fresh_info = false;
@@ -561,6 +560,14 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
561} 560}
562 561
563 562
563static inline struct sta_info *
564next_hop_deref_protected(struct mesh_path *mpath)
565{
566 return rcu_dereference_protected(mpath->next_hop,
567 lockdep_is_held(&mpath->state_lock));
568}
569
570
564static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata, 571static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata,
565 struct ieee80211_mgmt *mgmt, 572 struct ieee80211_mgmt *mgmt,
566 u8 *prep_elem, u32 metric) 573 u8 *prep_elem, u32 metric)
@@ -600,7 +607,7 @@ static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata,
600 spin_unlock_bh(&mpath->state_lock); 607 spin_unlock_bh(&mpath->state_lock);
601 goto fail; 608 goto fail;
602 } 609 }
603 memcpy(next_hop, mpath->next_hop->sta.addr, ETH_ALEN); 610 memcpy(next_hop, next_hop_deref_protected(mpath)->sta.addr, ETH_ALEN);
604 spin_unlock_bh(&mpath->state_lock); 611 spin_unlock_bh(&mpath->state_lock);
605 --ttl; 612 --ttl;
606 flags = PREP_IE_FLAGS(prep_elem); 613 flags = PREP_IE_FLAGS(prep_elem);
@@ -633,7 +640,6 @@ static void hwmp_perr_frame_process(struct ieee80211_sub_if_data *sdata,
633 struct mesh_path *mpath; 640 struct mesh_path *mpath;
634 u8 ttl; 641 u8 ttl;
635 u8 *ta, *target_addr; 642 u8 *ta, *target_addr;
636 u8 target_flags;
637 u32 target_sn; 643 u32 target_sn;
638 u16 target_rcode; 644 u16 target_rcode;
639 645
@@ -644,7 +650,6 @@ static void hwmp_perr_frame_process(struct ieee80211_sub_if_data *sdata,
644 return; 650 return;
645 } 651 }
646 ttl--; 652 ttl--;
647 target_flags = PERR_IE_TARGET_FLAGS(perr_elem);
648 target_addr = PERR_IE_TARGET_ADDR(perr_elem); 653 target_addr = PERR_IE_TARGET_ADDR(perr_elem);
649 target_sn = PERR_IE_TARGET_SN(perr_elem); 654 target_sn = PERR_IE_TARGET_SN(perr_elem);
650 target_rcode = PERR_IE_TARGET_RCODE(perr_elem); 655 target_rcode = PERR_IE_TARGET_RCODE(perr_elem);
@@ -654,7 +659,8 @@ static void hwmp_perr_frame_process(struct ieee80211_sub_if_data *sdata,
654 if (mpath) { 659 if (mpath) {
655 spin_lock_bh(&mpath->state_lock); 660 spin_lock_bh(&mpath->state_lock);
656 if (mpath->flags & MESH_PATH_ACTIVE && 661 if (mpath->flags & MESH_PATH_ACTIVE &&
657 memcmp(ta, mpath->next_hop->sta.addr, ETH_ALEN) == 0 && 662 memcmp(ta, next_hop_deref_protected(mpath)->sta.addr,
663 ETH_ALEN) == 0 &&
658 (!(mpath->flags & MESH_PATH_SN_VALID) || 664 (!(mpath->flags & MESH_PATH_SN_VALID) ||
659 SN_GT(target_sn, mpath->sn))) { 665 SN_GT(target_sn, mpath->sn))) {
660 mpath->flags &= ~MESH_PATH_ACTIVE; 666 mpath->flags &= ~MESH_PATH_ACTIVE;
@@ -675,12 +681,10 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
675{ 681{
676 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 682 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
677 struct mesh_path *mpath; 683 struct mesh_path *mpath;
678 u8 *ta;
679 u8 ttl, flags, hopcount; 684 u8 ttl, flags, hopcount;
680 u8 *orig_addr; 685 u8 *orig_addr;
681 u32 orig_sn, metric; 686 u32 orig_sn, metric;
682 687
683 ta = mgmt->sa;
684 ttl = rann->rann_ttl; 688 ttl = rann->rann_ttl;
685 if (ttl <= 1) { 689 if (ttl <= 1) {
686 ifmsh->mshstats.dropped_frames_ttl++; 690 ifmsh->mshstats.dropped_frames_ttl++;
@@ -918,6 +922,7 @@ int mesh_nexthop_lookup(struct sk_buff *skb,
918{ 922{
919 struct sk_buff *skb_to_free = NULL; 923 struct sk_buff *skb_to_free = NULL;
920 struct mesh_path *mpath; 924 struct mesh_path *mpath;
925 struct sta_info *next_hop;
921 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 926 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
922 u8 *target_addr = hdr->addr3; 927 u8 *target_addr = hdr->addr3;
923 int err = 0; 928 int err = 0;
@@ -945,7 +950,11 @@ int mesh_nexthop_lookup(struct sk_buff *skb,
945 mesh_queue_preq(mpath, 950 mesh_queue_preq(mpath,
946 PREQ_Q_F_START | PREQ_Q_F_REFRESH); 951 PREQ_Q_F_START | PREQ_Q_F_REFRESH);
947 } 952 }
948 memcpy(hdr->addr1, mpath->next_hop->sta.addr, ETH_ALEN); 953 next_hop = rcu_dereference(mpath->next_hop);
954 if (next_hop)
955 memcpy(hdr->addr1, next_hop->sta.addr, ETH_ALEN);
956 else
957 err = -ENOENT;
949 } else { 958 } else {
950 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 959 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
951 if (!(mpath->flags & MESH_PATH_RESOLVING)) { 960 if (!(mpath->flags & MESH_PATH_RESOLVING)) {
@@ -971,20 +980,11 @@ endlookup:
971 980
972void mesh_path_timer(unsigned long data) 981void mesh_path_timer(unsigned long data)
973{ 982{
974 struct ieee80211_sub_if_data *sdata; 983 struct mesh_path *mpath = (void *) data;
975 struct mesh_path *mpath; 984 struct ieee80211_sub_if_data *sdata = mpath->sdata;
976
977 rcu_read_lock();
978 mpath = (struct mesh_path *) data;
979 mpath = rcu_dereference(mpath);
980 if (!mpath)
981 goto endmpathtimer;
982 sdata = mpath->sdata;
983 985
984 if (sdata->local->quiescing) { 986 if (sdata->local->quiescing)
985 rcu_read_unlock();
986 return; 987 return;
987 }
988 988
989 spin_lock_bh(&mpath->state_lock); 989 spin_lock_bh(&mpath->state_lock);
990 if (mpath->flags & MESH_PATH_RESOLVED || 990 if (mpath->flags & MESH_PATH_RESOLVED ||
@@ -1001,8 +1001,6 @@ void mesh_path_timer(unsigned long data)
1001 } 1001 }
1002 1002
1003 spin_unlock_bh(&mpath->state_lock); 1003 spin_unlock_bh(&mpath->state_lock);
1004endmpathtimer:
1005 rcu_read_unlock();
1006} 1004}
1007 1005
1008void 1006void
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 336ca9d0c5c4..83ce48e31913 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -40,6 +40,50 @@ static struct mesh_table *mesh_paths;
40static struct mesh_table *mpp_paths; /* Store paths for MPP&MAP */ 40static struct mesh_table *mpp_paths; /* Store paths for MPP&MAP */
41 41
42int mesh_paths_generation; 42int mesh_paths_generation;
43
44/* This lock will have the grow table function as writer and add / delete nodes
45 * as readers. When reading the table (i.e. doing lookups) we are well protected
46 * by RCU
47 */
48static DEFINE_RWLOCK(pathtbl_resize_lock);
49
50
51static struct mesh_table *mesh_table_alloc(int size_order)
52{
53 int i;
54 struct mesh_table *newtbl;
55
56 newtbl = kmalloc(sizeof(struct mesh_table), GFP_KERNEL);
57 if (!newtbl)
58 return NULL;
59
60 newtbl->hash_buckets = kzalloc(sizeof(struct hlist_head) *
61 (1 << size_order), GFP_KERNEL);
62
63 if (!newtbl->hash_buckets) {
64 kfree(newtbl);
65 return NULL;
66 }
67
68 newtbl->hashwlock = kmalloc(sizeof(spinlock_t) *
69 (1 << size_order), GFP_KERNEL);
70 if (!newtbl->hashwlock) {
71 kfree(newtbl->hash_buckets);
72 kfree(newtbl);
73 return NULL;
74 }
75
76 newtbl->size_order = size_order;
77 newtbl->hash_mask = (1 << size_order) - 1;
78 atomic_set(&newtbl->entries, 0);
79 get_random_bytes(&newtbl->hash_rnd,
80 sizeof(newtbl->hash_rnd));
81 for (i = 0; i <= newtbl->hash_mask; i++)
82 spin_lock_init(&newtbl->hashwlock[i]);
83
84 return newtbl;
85}
86
43static void __mesh_table_free(struct mesh_table *tbl) 87static void __mesh_table_free(struct mesh_table *tbl)
44{ 88{
45 kfree(tbl->hash_buckets); 89 kfree(tbl->hash_buckets);
@@ -47,7 +91,7 @@ static void __mesh_table_free(struct mesh_table *tbl)
47 kfree(tbl); 91 kfree(tbl);
48} 92}
49 93
50void mesh_table_free(struct mesh_table *tbl, bool free_leafs) 94static void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
51{ 95{
52 struct hlist_head *mesh_hash; 96 struct hlist_head *mesh_hash;
53 struct hlist_node *p, *q; 97 struct hlist_node *p, *q;
@@ -55,60 +99,56 @@ void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
55 99
56 mesh_hash = tbl->hash_buckets; 100 mesh_hash = tbl->hash_buckets;
57 for (i = 0; i <= tbl->hash_mask; i++) { 101 for (i = 0; i <= tbl->hash_mask; i++) {
58 spin_lock(&tbl->hashwlock[i]); 102 spin_lock_bh(&tbl->hashwlock[i]);
59 hlist_for_each_safe(p, q, &mesh_hash[i]) { 103 hlist_for_each_safe(p, q, &mesh_hash[i]) {
60 tbl->free_node(p, free_leafs); 104 tbl->free_node(p, free_leafs);
61 atomic_dec(&tbl->entries); 105 atomic_dec(&tbl->entries);
62 } 106 }
63 spin_unlock(&tbl->hashwlock[i]); 107 spin_unlock_bh(&tbl->hashwlock[i]);
64 } 108 }
65 __mesh_table_free(tbl); 109 __mesh_table_free(tbl);
66} 110}
67 111
68static struct mesh_table *mesh_table_grow(struct mesh_table *tbl) 112static int mesh_table_grow(struct mesh_table *oldtbl,
113 struct mesh_table *newtbl)
69{ 114{
70 struct mesh_table *newtbl;
71 struct hlist_head *oldhash; 115 struct hlist_head *oldhash;
72 struct hlist_node *p, *q; 116 struct hlist_node *p, *q;
73 int i; 117 int i;
74 118
75 if (atomic_read(&tbl->entries) 119 if (atomic_read(&oldtbl->entries)
76 < tbl->mean_chain_len * (tbl->hash_mask + 1)) 120 < oldtbl->mean_chain_len * (oldtbl->hash_mask + 1))
77 goto endgrow; 121 return -EAGAIN;
78
79 newtbl = mesh_table_alloc(tbl->size_order + 1);
80 if (!newtbl)
81 goto endgrow;
82 122
83 newtbl->free_node = tbl->free_node; 123 newtbl->free_node = oldtbl->free_node;
84 newtbl->mean_chain_len = tbl->mean_chain_len; 124 newtbl->mean_chain_len = oldtbl->mean_chain_len;
85 newtbl->copy_node = tbl->copy_node; 125 newtbl->copy_node = oldtbl->copy_node;
86 atomic_set(&newtbl->entries, atomic_read(&tbl->entries)); 126 atomic_set(&newtbl->entries, atomic_read(&oldtbl->entries));
87 127
88 oldhash = tbl->hash_buckets; 128 oldhash = oldtbl->hash_buckets;
89 for (i = 0; i <= tbl->hash_mask; i++) 129 for (i = 0; i <= oldtbl->hash_mask; i++)
90 hlist_for_each(p, &oldhash[i]) 130 hlist_for_each(p, &oldhash[i])
91 if (tbl->copy_node(p, newtbl) < 0) 131 if (oldtbl->copy_node(p, newtbl) < 0)
92 goto errcopy; 132 goto errcopy;
93 133
94 return newtbl; 134 return 0;
95 135
96errcopy: 136errcopy:
97 for (i = 0; i <= newtbl->hash_mask; i++) { 137 for (i = 0; i <= newtbl->hash_mask; i++) {
98 hlist_for_each_safe(p, q, &newtbl->hash_buckets[i]) 138 hlist_for_each_safe(p, q, &newtbl->hash_buckets[i])
99 tbl->free_node(p, 0); 139 oldtbl->free_node(p, 0);
100 } 140 }
101 __mesh_table_free(newtbl); 141 return -ENOMEM;
102endgrow:
103 return NULL;
104} 142}
105 143
144static u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata,
145 struct mesh_table *tbl)
146{
147 /* Use last four bytes of hw addr and interface index as hash index */
148 return jhash_2words(*(u32 *)(addr+2), sdata->dev->ifindex, tbl->hash_rnd)
149 & tbl->hash_mask;
150}
106 151
107/* This lock will have the grow table function as writer and add / delete nodes
108 * as readers. When reading the table (i.e. doing lookups) we are well protected
109 * by RCU
110 */
111static DEFINE_RWLOCK(pathtbl_resize_lock);
112 152
113/** 153/**
114 * 154 *
@@ -280,7 +320,7 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
280 if (!new_node) 320 if (!new_node)
281 goto err_node_alloc; 321 goto err_node_alloc;
282 322
283 read_lock(&pathtbl_resize_lock); 323 read_lock_bh(&pathtbl_resize_lock);
284 memcpy(new_mpath->dst, dst, ETH_ALEN); 324 memcpy(new_mpath->dst, dst, ETH_ALEN);
285 new_mpath->sdata = sdata; 325 new_mpath->sdata = sdata;
286 new_mpath->flags = 0; 326 new_mpath->flags = 0;
@@ -295,7 +335,7 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
295 hash_idx = mesh_table_hash(dst, sdata, mesh_paths); 335 hash_idx = mesh_table_hash(dst, sdata, mesh_paths);
296 bucket = &mesh_paths->hash_buckets[hash_idx]; 336 bucket = &mesh_paths->hash_buckets[hash_idx];
297 337
298 spin_lock(&mesh_paths->hashwlock[hash_idx]); 338 spin_lock_bh(&mesh_paths->hashwlock[hash_idx]);
299 339
300 err = -EEXIST; 340 err = -EEXIST;
301 hlist_for_each_entry(node, n, bucket, list) { 341 hlist_for_each_entry(node, n, bucket, list) {
@@ -311,8 +351,8 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
311 351
312 mesh_paths_generation++; 352 mesh_paths_generation++;
313 353
314 spin_unlock(&mesh_paths->hashwlock[hash_idx]); 354 spin_unlock_bh(&mesh_paths->hashwlock[hash_idx]);
315 read_unlock(&pathtbl_resize_lock); 355 read_unlock_bh(&pathtbl_resize_lock);
316 if (grow) { 356 if (grow) {
317 set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags); 357 set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags);
318 ieee80211_queue_work(&local->hw, &sdata->work); 358 ieee80211_queue_work(&local->hw, &sdata->work);
@@ -320,8 +360,8 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
320 return 0; 360 return 0;
321 361
322err_exists: 362err_exists:
323 spin_unlock(&mesh_paths->hashwlock[hash_idx]); 363 spin_unlock_bh(&mesh_paths->hashwlock[hash_idx]);
324 read_unlock(&pathtbl_resize_lock); 364 read_unlock_bh(&pathtbl_resize_lock);
325 kfree(new_node); 365 kfree(new_node);
326err_node_alloc: 366err_node_alloc:
327 kfree(new_mpath); 367 kfree(new_mpath);
@@ -334,15 +374,21 @@ void mesh_mpath_table_grow(void)
334{ 374{
335 struct mesh_table *oldtbl, *newtbl; 375 struct mesh_table *oldtbl, *newtbl;
336 376
337 write_lock(&pathtbl_resize_lock); 377 rcu_read_lock();
378 newtbl = mesh_table_alloc(rcu_dereference(mesh_paths)->size_order + 1);
379 if (!newtbl)
380 return;
381 write_lock_bh(&pathtbl_resize_lock);
338 oldtbl = mesh_paths; 382 oldtbl = mesh_paths;
339 newtbl = mesh_table_grow(mesh_paths); 383 if (mesh_table_grow(mesh_paths, newtbl) < 0) {
340 if (!newtbl) { 384 rcu_read_unlock();
341 write_unlock(&pathtbl_resize_lock); 385 __mesh_table_free(newtbl);
386 write_unlock_bh(&pathtbl_resize_lock);
342 return; 387 return;
343 } 388 }
389 rcu_read_unlock();
344 rcu_assign_pointer(mesh_paths, newtbl); 390 rcu_assign_pointer(mesh_paths, newtbl);
345 write_unlock(&pathtbl_resize_lock); 391 write_unlock_bh(&pathtbl_resize_lock);
346 392
347 synchronize_rcu(); 393 synchronize_rcu();
348 mesh_table_free(oldtbl, false); 394 mesh_table_free(oldtbl, false);
@@ -352,15 +398,21 @@ void mesh_mpp_table_grow(void)
352{ 398{
353 struct mesh_table *oldtbl, *newtbl; 399 struct mesh_table *oldtbl, *newtbl;
354 400
355 write_lock(&pathtbl_resize_lock); 401 rcu_read_lock();
402 newtbl = mesh_table_alloc(rcu_dereference(mpp_paths)->size_order + 1);
403 if (!newtbl)
404 return;
405 write_lock_bh(&pathtbl_resize_lock);
356 oldtbl = mpp_paths; 406 oldtbl = mpp_paths;
357 newtbl = mesh_table_grow(mpp_paths); 407 if (mesh_table_grow(mpp_paths, newtbl) < 0) {
358 if (!newtbl) { 408 rcu_read_unlock();
359 write_unlock(&pathtbl_resize_lock); 409 __mesh_table_free(newtbl);
410 write_unlock_bh(&pathtbl_resize_lock);
360 return; 411 return;
361 } 412 }
413 rcu_read_unlock();
362 rcu_assign_pointer(mpp_paths, newtbl); 414 rcu_assign_pointer(mpp_paths, newtbl);
363 write_unlock(&pathtbl_resize_lock); 415 write_unlock_bh(&pathtbl_resize_lock);
364 416
365 synchronize_rcu(); 417 synchronize_rcu();
366 mesh_table_free(oldtbl, false); 418 mesh_table_free(oldtbl, false);
@@ -394,7 +446,7 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
394 if (!new_node) 446 if (!new_node)
395 goto err_node_alloc; 447 goto err_node_alloc;
396 448
397 read_lock(&pathtbl_resize_lock); 449 read_lock_bh(&pathtbl_resize_lock);
398 memcpy(new_mpath->dst, dst, ETH_ALEN); 450 memcpy(new_mpath->dst, dst, ETH_ALEN);
399 memcpy(new_mpath->mpp, mpp, ETH_ALEN); 451 memcpy(new_mpath->mpp, mpp, ETH_ALEN);
400 new_mpath->sdata = sdata; 452 new_mpath->sdata = sdata;
@@ -407,7 +459,7 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
407 hash_idx = mesh_table_hash(dst, sdata, mpp_paths); 459 hash_idx = mesh_table_hash(dst, sdata, mpp_paths);
408 bucket = &mpp_paths->hash_buckets[hash_idx]; 460 bucket = &mpp_paths->hash_buckets[hash_idx];
409 461
410 spin_lock(&mpp_paths->hashwlock[hash_idx]); 462 spin_lock_bh(&mpp_paths->hashwlock[hash_idx]);
411 463
412 err = -EEXIST; 464 err = -EEXIST;
413 hlist_for_each_entry(node, n, bucket, list) { 465 hlist_for_each_entry(node, n, bucket, list) {
@@ -421,8 +473,8 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
421 mpp_paths->mean_chain_len * (mpp_paths->hash_mask + 1)) 473 mpp_paths->mean_chain_len * (mpp_paths->hash_mask + 1))
422 grow = 1; 474 grow = 1;
423 475
424 spin_unlock(&mpp_paths->hashwlock[hash_idx]); 476 spin_unlock_bh(&mpp_paths->hashwlock[hash_idx]);
425 read_unlock(&pathtbl_resize_lock); 477 read_unlock_bh(&pathtbl_resize_lock);
426 if (grow) { 478 if (grow) {
427 set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags); 479 set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags);
428 ieee80211_queue_work(&local->hw, &sdata->work); 480 ieee80211_queue_work(&local->hw, &sdata->work);
@@ -430,8 +482,8 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
430 return 0; 482 return 0;
431 483
432err_exists: 484err_exists:
433 spin_unlock(&mpp_paths->hashwlock[hash_idx]); 485 spin_unlock_bh(&mpp_paths->hashwlock[hash_idx]);
434 read_unlock(&pathtbl_resize_lock); 486 read_unlock_bh(&pathtbl_resize_lock);
435 kfree(new_node); 487 kfree(new_node);
436err_node_alloc: 488err_node_alloc:
437 kfree(new_mpath); 489 kfree(new_mpath);
@@ -544,11 +596,11 @@ int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata)
544 int hash_idx; 596 int hash_idx;
545 int err = 0; 597 int err = 0;
546 598
547 read_lock(&pathtbl_resize_lock); 599 read_lock_bh(&pathtbl_resize_lock);
548 hash_idx = mesh_table_hash(addr, sdata, mesh_paths); 600 hash_idx = mesh_table_hash(addr, sdata, mesh_paths);
549 bucket = &mesh_paths->hash_buckets[hash_idx]; 601 bucket = &mesh_paths->hash_buckets[hash_idx];
550 602
551 spin_lock(&mesh_paths->hashwlock[hash_idx]); 603 spin_lock_bh(&mesh_paths->hashwlock[hash_idx]);
552 hlist_for_each_entry(node, n, bucket, list) { 604 hlist_for_each_entry(node, n, bucket, list) {
553 mpath = node->mpath; 605 mpath = node->mpath;
554 if (mpath->sdata == sdata && 606 if (mpath->sdata == sdata &&
@@ -566,8 +618,8 @@ int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata)
566 err = -ENXIO; 618 err = -ENXIO;
567enddel: 619enddel:
568 mesh_paths_generation++; 620 mesh_paths_generation++;
569 spin_unlock(&mesh_paths->hashwlock[hash_idx]); 621 spin_unlock_bh(&mesh_paths->hashwlock[hash_idx]);
570 read_unlock(&pathtbl_resize_lock); 622 read_unlock_bh(&pathtbl_resize_lock);
571 return err; 623 return err;
572} 624}
573 625
@@ -719,7 +771,7 @@ void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
719 struct hlist_node *p; 771 struct hlist_node *p;
720 int i; 772 int i;
721 773
722 read_lock(&pathtbl_resize_lock); 774 read_lock_bh(&pathtbl_resize_lock);
723 for_each_mesh_entry(mesh_paths, p, node, i) { 775 for_each_mesh_entry(mesh_paths, p, node, i) {
724 if (node->mpath->sdata != sdata) 776 if (node->mpath->sdata != sdata)
725 continue; 777 continue;
@@ -734,7 +786,7 @@ void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
734 } else 786 } else
735 spin_unlock_bh(&mpath->state_lock); 787 spin_unlock_bh(&mpath->state_lock);
736 } 788 }
737 read_unlock(&pathtbl_resize_lock); 789 read_unlock_bh(&pathtbl_resize_lock);
738} 790}
739 791
740void mesh_pathtbl_unregister(void) 792void mesh_pathtbl_unregister(void)
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 44b53931ba5e..f4adc0917888 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -43,7 +43,7 @@
43#define dot11MeshMaxPeerLinks(s) (s->u.mesh.mshcfg.dot11MeshMaxPeerLinks) 43#define dot11MeshMaxPeerLinks(s) (s->u.mesh.mshcfg.dot11MeshMaxPeerLinks)
44 44
45enum plink_frame_type { 45enum plink_frame_type {
46 PLINK_OPEN = 0, 46 PLINK_OPEN = 1,
47 PLINK_CONFIRM, 47 PLINK_CONFIRM,
48 PLINK_CLOSE 48 PLINK_CLOSE
49}; 49};
@@ -83,7 +83,7 @@ void mesh_plink_dec_estab_count(struct ieee80211_sub_if_data *sdata)
83 */ 83 */
84static inline void mesh_plink_fsm_restart(struct sta_info *sta) 84static inline void mesh_plink_fsm_restart(struct sta_info *sta)
85{ 85{
86 sta->plink_state = PLINK_LISTEN; 86 sta->plink_state = NL80211_PLINK_LISTEN;
87 sta->llid = sta->plid = sta->reason = 0; 87 sta->llid = sta->plid = sta->reason = 0;
88 sta->plink_retries = 0; 88 sta->plink_retries = 0;
89} 89}
@@ -105,7 +105,7 @@ static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata,
105 if (!sta) 105 if (!sta)
106 return NULL; 106 return NULL;
107 107
108 sta->flags = WLAN_STA_AUTHORIZED; 108 sta->flags = WLAN_STA_AUTHORIZED | WLAN_STA_AUTH;
109 sta->sta.supp_rates[local->hw.conf.channel->band] = rates; 109 sta->sta.supp_rates[local->hw.conf.channel->band] = rates;
110 rate_control_rate_init(sta); 110 rate_control_rate_init(sta);
111 111
@@ -126,11 +126,11 @@ static bool __mesh_plink_deactivate(struct sta_info *sta)
126 struct ieee80211_sub_if_data *sdata = sta->sdata; 126 struct ieee80211_sub_if_data *sdata = sta->sdata;
127 bool deactivated = false; 127 bool deactivated = false;
128 128
129 if (sta->plink_state == PLINK_ESTAB) { 129 if (sta->plink_state == NL80211_PLINK_ESTAB) {
130 mesh_plink_dec_estab_count(sdata); 130 mesh_plink_dec_estab_count(sdata);
131 deactivated = true; 131 deactivated = true;
132 } 132 }
133 sta->plink_state = PLINK_BLOCKED; 133 sta->plink_state = NL80211_PLINK_BLOCKED;
134 mesh_path_flush_by_nexthop(sta); 134 mesh_path_flush_by_nexthop(sta);
135 135
136 return deactivated; 136 return deactivated;
@@ -161,7 +161,7 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
161 __le16 reason) { 161 __le16 reason) {
162 struct ieee80211_local *local = sdata->local; 162 struct ieee80211_local *local = sdata->local;
163 struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400 + 163 struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400 +
164 sdata->u.mesh.vendor_ie_len); 164 sdata->u.mesh.ie_len);
165 struct ieee80211_mgmt *mgmt; 165 struct ieee80211_mgmt *mgmt;
166 bool include_plid = false; 166 bool include_plid = false;
167 static const u8 meshpeeringproto[] = { 0x00, 0x0F, 0xAC, 0x2A }; 167 static const u8 meshpeeringproto[] = { 0x00, 0x0F, 0xAC, 0x2A };
@@ -181,8 +181,8 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
181 IEEE80211_STYPE_ACTION); 181 IEEE80211_STYPE_ACTION);
182 memcpy(mgmt->da, da, ETH_ALEN); 182 memcpy(mgmt->da, da, ETH_ALEN);
183 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); 183 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
184 /* BSSID is left zeroed, wildcard value */ 184 memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
185 mgmt->u.action.category = WLAN_CATEGORY_MESH_PLINK; 185 mgmt->u.action.category = WLAN_CATEGORY_MESH_ACTION;
186 mgmt->u.action.u.plink_action.action_code = action; 186 mgmt->u.action.u.plink_action.action_code = action;
187 187
188 if (action == PLINK_CLOSE) 188 if (action == PLINK_CLOSE)
@@ -237,8 +237,9 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
237 return 0; 237 return 0;
238} 238}
239 239
240void mesh_neighbour_update(u8 *hw_addr, u32 rates, struct ieee80211_sub_if_data *sdata, 240void mesh_neighbour_update(u8 *hw_addr, u32 rates,
241 bool peer_accepting_plinks) 241 struct ieee80211_sub_if_data *sdata,
242 struct ieee802_11_elems *elems)
242{ 243{
243 struct ieee80211_local *local = sdata->local; 244 struct ieee80211_local *local = sdata->local;
244 struct sta_info *sta; 245 struct sta_info *sta;
@@ -248,8 +249,14 @@ void mesh_neighbour_update(u8 *hw_addr, u32 rates, struct ieee80211_sub_if_data
248 sta = sta_info_get(sdata, hw_addr); 249 sta = sta_info_get(sdata, hw_addr);
249 if (!sta) { 250 if (!sta) {
250 rcu_read_unlock(); 251 rcu_read_unlock();
251 252 /* Userspace handles peer allocation when security is enabled
252 sta = mesh_plink_alloc(sdata, hw_addr, rates); 253 * */
254 if (sdata->u.mesh.security & IEEE80211_MESH_SEC_AUTHED)
255 cfg80211_notify_new_peer_candidate(sdata->dev, hw_addr,
256 elems->ie_start, elems->total_len,
257 GFP_KERNEL);
258 else
259 sta = mesh_plink_alloc(sdata, hw_addr, rates);
253 if (!sta) 260 if (!sta)
254 return; 261 return;
255 if (sta_info_insert_rcu(sta)) { 262 if (sta_info_insert_rcu(sta)) {
@@ -260,7 +267,8 @@ void mesh_neighbour_update(u8 *hw_addr, u32 rates, struct ieee80211_sub_if_data
260 267
261 sta->last_rx = jiffies; 268 sta->last_rx = jiffies;
262 sta->sta.supp_rates[local->hw.conf.channel->band] = rates; 269 sta->sta.supp_rates[local->hw.conf.channel->band] = rates;
263 if (peer_accepting_plinks && sta->plink_state == PLINK_LISTEN && 270 if (mesh_peer_accepts_plinks(elems) &&
271 sta->plink_state == NL80211_PLINK_LISTEN &&
264 sdata->u.mesh.accepting_plinks && 272 sdata->u.mesh.accepting_plinks &&
265 sdata->u.mesh.mshcfg.auto_open_plinks) 273 sdata->u.mesh.mshcfg.auto_open_plinks)
266 mesh_plink_open(sta); 274 mesh_plink_open(sta);
@@ -300,8 +308,8 @@ static void mesh_plink_timer(unsigned long data)
300 sdata = sta->sdata; 308 sdata = sta->sdata;
301 309
302 switch (sta->plink_state) { 310 switch (sta->plink_state) {
303 case PLINK_OPN_RCVD: 311 case NL80211_PLINK_OPN_RCVD:
304 case PLINK_OPN_SNT: 312 case NL80211_PLINK_OPN_SNT:
305 /* retry timer */ 313 /* retry timer */
306 if (sta->plink_retries < dot11MeshMaxRetries(sdata)) { 314 if (sta->plink_retries < dot11MeshMaxRetries(sdata)) {
307 u32 rand; 315 u32 rand;
@@ -320,17 +328,17 @@ static void mesh_plink_timer(unsigned long data)
320 } 328 }
321 reason = cpu_to_le16(MESH_MAX_RETRIES); 329 reason = cpu_to_le16(MESH_MAX_RETRIES);
322 /* fall through on else */ 330 /* fall through on else */
323 case PLINK_CNF_RCVD: 331 case NL80211_PLINK_CNF_RCVD:
324 /* confirm timer */ 332 /* confirm timer */
325 if (!reason) 333 if (!reason)
326 reason = cpu_to_le16(MESH_CONFIRM_TIMEOUT); 334 reason = cpu_to_le16(MESH_CONFIRM_TIMEOUT);
327 sta->plink_state = PLINK_HOLDING; 335 sta->plink_state = NL80211_PLINK_HOLDING;
328 mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata)); 336 mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata));
329 spin_unlock_bh(&sta->lock); 337 spin_unlock_bh(&sta->lock);
330 mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr, llid, plid, 338 mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr, llid, plid,
331 reason); 339 reason);
332 break; 340 break;
333 case PLINK_HOLDING: 341 case NL80211_PLINK_HOLDING:
334 /* holding timer */ 342 /* holding timer */
335 del_timer(&sta->plink_timer); 343 del_timer(&sta->plink_timer);
336 mesh_plink_fsm_restart(sta); 344 mesh_plink_fsm_restart(sta);
@@ -372,14 +380,17 @@ int mesh_plink_open(struct sta_info *sta)
372 __le16 llid; 380 __le16 llid;
373 struct ieee80211_sub_if_data *sdata = sta->sdata; 381 struct ieee80211_sub_if_data *sdata = sta->sdata;
374 382
383 if (!test_sta_flags(sta, WLAN_STA_AUTH))
384 return -EPERM;
385
375 spin_lock_bh(&sta->lock); 386 spin_lock_bh(&sta->lock);
376 get_random_bytes(&llid, 2); 387 get_random_bytes(&llid, 2);
377 sta->llid = llid; 388 sta->llid = llid;
378 if (sta->plink_state != PLINK_LISTEN) { 389 if (sta->plink_state != NL80211_PLINK_LISTEN) {
379 spin_unlock_bh(&sta->lock); 390 spin_unlock_bh(&sta->lock);
380 return -EBUSY; 391 return -EBUSY;
381 } 392 }
382 sta->plink_state = PLINK_OPN_SNT; 393 sta->plink_state = NL80211_PLINK_OPN_SNT;
383 mesh_plink_timer_set(sta, dot11MeshRetryTimeout(sdata)); 394 mesh_plink_timer_set(sta, dot11MeshRetryTimeout(sdata));
384 spin_unlock_bh(&sta->lock); 395 spin_unlock_bh(&sta->lock);
385 mpl_dbg("Mesh plink: starting establishment with %pM\n", 396 mpl_dbg("Mesh plink: starting establishment with %pM\n",
@@ -396,7 +407,7 @@ void mesh_plink_block(struct sta_info *sta)
396 407
397 spin_lock_bh(&sta->lock); 408 spin_lock_bh(&sta->lock);
398 deactivated = __mesh_plink_deactivate(sta); 409 deactivated = __mesh_plink_deactivate(sta);
399 sta->plink_state = PLINK_BLOCKED; 410 sta->plink_state = NL80211_PLINK_BLOCKED;
400 spin_unlock_bh(&sta->lock); 411 spin_unlock_bh(&sta->lock);
401 412
402 if (deactivated) 413 if (deactivated)
@@ -419,13 +430,13 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
419 __le16 plid, llid, reason; 430 __le16 plid, llid, reason;
420#ifdef CONFIG_MAC80211_VERBOSE_MPL_DEBUG 431#ifdef CONFIG_MAC80211_VERBOSE_MPL_DEBUG
421 static const char *mplstates[] = { 432 static const char *mplstates[] = {
422 [PLINK_LISTEN] = "LISTEN", 433 [NL80211_PLINK_LISTEN] = "LISTEN",
423 [PLINK_OPN_SNT] = "OPN-SNT", 434 [NL80211_PLINK_OPN_SNT] = "OPN-SNT",
424 [PLINK_OPN_RCVD] = "OPN-RCVD", 435 [NL80211_PLINK_OPN_RCVD] = "OPN-RCVD",
425 [PLINK_CNF_RCVD] = "CNF_RCVD", 436 [NL80211_PLINK_CNF_RCVD] = "CNF_RCVD",
426 [PLINK_ESTAB] = "ESTAB", 437 [NL80211_PLINK_ESTAB] = "ESTAB",
427 [PLINK_HOLDING] = "HOLDING", 438 [NL80211_PLINK_HOLDING] = "HOLDING",
428 [PLINK_BLOCKED] = "BLOCKED" 439 [NL80211_PLINK_BLOCKED] = "BLOCKED"
429 }; 440 };
430#endif 441#endif
431 442
@@ -449,6 +460,11 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
449 mpl_dbg("Mesh plink: missing necessary peer link ie\n"); 460 mpl_dbg("Mesh plink: missing necessary peer link ie\n");
450 return; 461 return;
451 } 462 }
463 if (elems.rsn_len &&
464 sdata->u.mesh.security == IEEE80211_MESH_SEC_NONE) {
465 mpl_dbg("Mesh plink: can't establish link with secure peer\n");
466 return;
467 }
452 468
453 ftype = mgmt->u.action.u.plink_action.action_code; 469 ftype = mgmt->u.action.u.plink_action.action_code;
454 ie_len = elems.peer_link_len; 470 ie_len = elems.peer_link_len;
@@ -480,7 +496,13 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
480 return; 496 return;
481 } 497 }
482 498
483 if (sta && sta->plink_state == PLINK_BLOCKED) { 499 if (sta && !test_sta_flags(sta, WLAN_STA_AUTH)) {
500 mpl_dbg("Mesh plink: Action frame from non-authed peer\n");
501 rcu_read_unlock();
502 return;
503 }
504
505 if (sta && sta->plink_state == NL80211_PLINK_BLOCKED) {
484 rcu_read_unlock(); 506 rcu_read_unlock();
485 return; 507 return;
486 } 508 }
@@ -550,7 +572,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
550 event = CNF_ACPT; 572 event = CNF_ACPT;
551 break; 573 break;
552 case PLINK_CLOSE: 574 case PLINK_CLOSE:
553 if (sta->plink_state == PLINK_ESTAB) 575 if (sta->plink_state == NL80211_PLINK_ESTAB)
554 /* Do not check for llid or plid. This does not 576 /* Do not check for llid or plid. This does not
555 * follow the standard but since multiple plinks 577 * follow the standard but since multiple plinks
556 * per sta are not supported, it is necessary in 578 * per sta are not supported, it is necessary in
@@ -585,14 +607,14 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
585 reason = 0; 607 reason = 0;
586 switch (sta->plink_state) { 608 switch (sta->plink_state) {
587 /* spin_unlock as soon as state is updated at each case */ 609 /* spin_unlock as soon as state is updated at each case */
588 case PLINK_LISTEN: 610 case NL80211_PLINK_LISTEN:
589 switch (event) { 611 switch (event) {
590 case CLS_ACPT: 612 case CLS_ACPT:
591 mesh_plink_fsm_restart(sta); 613 mesh_plink_fsm_restart(sta);
592 spin_unlock_bh(&sta->lock); 614 spin_unlock_bh(&sta->lock);
593 break; 615 break;
594 case OPN_ACPT: 616 case OPN_ACPT:
595 sta->plink_state = PLINK_OPN_RCVD; 617 sta->plink_state = NL80211_PLINK_OPN_RCVD;
596 sta->plid = plid; 618 sta->plid = plid;
597 get_random_bytes(&llid, 2); 619 get_random_bytes(&llid, 2);
598 sta->llid = llid; 620 sta->llid = llid;
@@ -609,7 +631,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
609 } 631 }
610 break; 632 break;
611 633
612 case PLINK_OPN_SNT: 634 case NL80211_PLINK_OPN_SNT:
613 switch (event) { 635 switch (event) {
614 case OPN_RJCT: 636 case OPN_RJCT:
615 case CNF_RJCT: 637 case CNF_RJCT:
@@ -618,7 +640,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
618 if (!reason) 640 if (!reason)
619 reason = cpu_to_le16(MESH_CLOSE_RCVD); 641 reason = cpu_to_le16(MESH_CLOSE_RCVD);
620 sta->reason = reason; 642 sta->reason = reason;
621 sta->plink_state = PLINK_HOLDING; 643 sta->plink_state = NL80211_PLINK_HOLDING;
622 if (!mod_plink_timer(sta, 644 if (!mod_plink_timer(sta,
623 dot11MeshHoldingTimeout(sdata))) 645 dot11MeshHoldingTimeout(sdata)))
624 sta->ignore_plink_timer = true; 646 sta->ignore_plink_timer = true;
@@ -630,7 +652,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
630 break; 652 break;
631 case OPN_ACPT: 653 case OPN_ACPT:
632 /* retry timer is left untouched */ 654 /* retry timer is left untouched */
633 sta->plink_state = PLINK_OPN_RCVD; 655 sta->plink_state = NL80211_PLINK_OPN_RCVD;
634 sta->plid = plid; 656 sta->plid = plid;
635 llid = sta->llid; 657 llid = sta->llid;
636 spin_unlock_bh(&sta->lock); 658 spin_unlock_bh(&sta->lock);
@@ -638,7 +660,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
638 plid, 0); 660 plid, 0);
639 break; 661 break;
640 case CNF_ACPT: 662 case CNF_ACPT:
641 sta->plink_state = PLINK_CNF_RCVD; 663 sta->plink_state = NL80211_PLINK_CNF_RCVD;
642 if (!mod_plink_timer(sta, 664 if (!mod_plink_timer(sta,
643 dot11MeshConfirmTimeout(sdata))) 665 dot11MeshConfirmTimeout(sdata)))
644 sta->ignore_plink_timer = true; 666 sta->ignore_plink_timer = true;
@@ -651,7 +673,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
651 } 673 }
652 break; 674 break;
653 675
654 case PLINK_OPN_RCVD: 676 case NL80211_PLINK_OPN_RCVD:
655 switch (event) { 677 switch (event) {
656 case OPN_RJCT: 678 case OPN_RJCT:
657 case CNF_RJCT: 679 case CNF_RJCT:
@@ -660,7 +682,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
660 if (!reason) 682 if (!reason)
661 reason = cpu_to_le16(MESH_CLOSE_RCVD); 683 reason = cpu_to_le16(MESH_CLOSE_RCVD);
662 sta->reason = reason; 684 sta->reason = reason;
663 sta->plink_state = PLINK_HOLDING; 685 sta->plink_state = NL80211_PLINK_HOLDING;
664 if (!mod_plink_timer(sta, 686 if (!mod_plink_timer(sta,
665 dot11MeshHoldingTimeout(sdata))) 687 dot11MeshHoldingTimeout(sdata)))
666 sta->ignore_plink_timer = true; 688 sta->ignore_plink_timer = true;
@@ -678,7 +700,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
678 break; 700 break;
679 case CNF_ACPT: 701 case CNF_ACPT:
680 del_timer(&sta->plink_timer); 702 del_timer(&sta->plink_timer);
681 sta->plink_state = PLINK_ESTAB; 703 sta->plink_state = NL80211_PLINK_ESTAB;
682 spin_unlock_bh(&sta->lock); 704 spin_unlock_bh(&sta->lock);
683 mesh_plink_inc_estab_count(sdata); 705 mesh_plink_inc_estab_count(sdata);
684 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON); 706 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
@@ -691,7 +713,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
691 } 713 }
692 break; 714 break;
693 715
694 case PLINK_CNF_RCVD: 716 case NL80211_PLINK_CNF_RCVD:
695 switch (event) { 717 switch (event) {
696 case OPN_RJCT: 718 case OPN_RJCT:
697 case CNF_RJCT: 719 case CNF_RJCT:
@@ -700,7 +722,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
700 if (!reason) 722 if (!reason)
701 reason = cpu_to_le16(MESH_CLOSE_RCVD); 723 reason = cpu_to_le16(MESH_CLOSE_RCVD);
702 sta->reason = reason; 724 sta->reason = reason;
703 sta->plink_state = PLINK_HOLDING; 725 sta->plink_state = NL80211_PLINK_HOLDING;
704 if (!mod_plink_timer(sta, 726 if (!mod_plink_timer(sta,
705 dot11MeshHoldingTimeout(sdata))) 727 dot11MeshHoldingTimeout(sdata)))
706 sta->ignore_plink_timer = true; 728 sta->ignore_plink_timer = true;
@@ -712,7 +734,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
712 break; 734 break;
713 case OPN_ACPT: 735 case OPN_ACPT:
714 del_timer(&sta->plink_timer); 736 del_timer(&sta->plink_timer);
715 sta->plink_state = PLINK_ESTAB; 737 sta->plink_state = NL80211_PLINK_ESTAB;
716 spin_unlock_bh(&sta->lock); 738 spin_unlock_bh(&sta->lock);
717 mesh_plink_inc_estab_count(sdata); 739 mesh_plink_inc_estab_count(sdata);
718 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON); 740 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
@@ -727,13 +749,13 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
727 } 749 }
728 break; 750 break;
729 751
730 case PLINK_ESTAB: 752 case NL80211_PLINK_ESTAB:
731 switch (event) { 753 switch (event) {
732 case CLS_ACPT: 754 case CLS_ACPT:
733 reason = cpu_to_le16(MESH_CLOSE_RCVD); 755 reason = cpu_to_le16(MESH_CLOSE_RCVD);
734 sta->reason = reason; 756 sta->reason = reason;
735 deactivated = __mesh_plink_deactivate(sta); 757 deactivated = __mesh_plink_deactivate(sta);
736 sta->plink_state = PLINK_HOLDING; 758 sta->plink_state = NL80211_PLINK_HOLDING;
737 llid = sta->llid; 759 llid = sta->llid;
738 mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata)); 760 mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata));
739 spin_unlock_bh(&sta->lock); 761 spin_unlock_bh(&sta->lock);
@@ -753,7 +775,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
753 break; 775 break;
754 } 776 }
755 break; 777 break;
756 case PLINK_HOLDING: 778 case NL80211_PLINK_HOLDING:
757 switch (event) { 779 switch (event) {
758 case CLS_ACPT: 780 case CLS_ACPT:
759 if (del_timer(&sta->plink_timer)) 781 if (del_timer(&sta->plink_timer))
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 64d92d5a7f40..4f6b2675e41d 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -90,20 +90,11 @@ enum rx_mgmt_action {
90 /* no action required */ 90 /* no action required */
91 RX_MGMT_NONE, 91 RX_MGMT_NONE,
92 92
93 /* caller must call cfg80211_send_rx_auth() */
94 RX_MGMT_CFG80211_AUTH,
95
96 /* caller must call cfg80211_send_rx_assoc() */
97 RX_MGMT_CFG80211_ASSOC,
98
99 /* caller must call cfg80211_send_deauth() */ 93 /* caller must call cfg80211_send_deauth() */
100 RX_MGMT_CFG80211_DEAUTH, 94 RX_MGMT_CFG80211_DEAUTH,
101 95
102 /* caller must call cfg80211_send_disassoc() */ 96 /* caller must call cfg80211_send_disassoc() */
103 RX_MGMT_CFG80211_DISASSOC, 97 RX_MGMT_CFG80211_DISASSOC,
104
105 /* caller must tell cfg80211 about internal error */
106 RX_MGMT_CFG80211_ASSOC_ERROR,
107}; 98};
108 99
109/* utils */ 100/* utils */
@@ -759,6 +750,8 @@ void ieee80211_dynamic_ps_enable_work(struct work_struct *work)
759 dynamic_ps_enable_work); 750 dynamic_ps_enable_work);
760 struct ieee80211_sub_if_data *sdata = local->ps_sdata; 751 struct ieee80211_sub_if_data *sdata = local->ps_sdata;
761 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 752 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
753 unsigned long flags;
754 int q;
762 755
763 /* can only happen when PS was just disabled anyway */ 756 /* can only happen when PS was just disabled anyway */
764 if (!sdata) 757 if (!sdata)
@@ -767,18 +760,37 @@ void ieee80211_dynamic_ps_enable_work(struct work_struct *work)
767 if (local->hw.conf.flags & IEEE80211_CONF_PS) 760 if (local->hw.conf.flags & IEEE80211_CONF_PS)
768 return; 761 return;
769 762
763 /*
764 * transmission can be stopped by others which leads to
765 * dynamic_ps_timer expiry. Postpond the ps timer if it
766 * is not the actual idle state.
767 */
768 spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
769 for (q = 0; q < local->hw.queues; q++) {
770 if (local->queue_stop_reasons[q]) {
771 spin_unlock_irqrestore(&local->queue_stop_reason_lock,
772 flags);
773 mod_timer(&local->dynamic_ps_timer, jiffies +
774 msecs_to_jiffies(
775 local->hw.conf.dynamic_ps_timeout));
776 return;
777 }
778 }
779 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
780
770 if ((local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) && 781 if ((local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) &&
771 (!(ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED))) { 782 (!(ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED))) {
772 netif_tx_stop_all_queues(sdata->dev); 783 netif_tx_stop_all_queues(sdata->dev);
773 /*
774 * Flush all the frames queued in the driver before
775 * going to power save
776 */
777 drv_flush(local, false);
778 ieee80211_send_nullfunc(local, sdata, 1);
779 784
780 /* Flush once again to get the tx status of nullfunc frame */ 785 if (drv_tx_frames_pending(local))
781 drv_flush(local, false); 786 mod_timer(&local->dynamic_ps_timer, jiffies +
787 msecs_to_jiffies(
788 local->hw.conf.dynamic_ps_timeout));
789 else {
790 ieee80211_send_nullfunc(local, sdata, 1);
791 /* Flush to get the tx status of nullfunc frame */
792 drv_flush(local, false);
793 }
782 } 794 }
783 795
784 if (!((local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) && 796 if (!((local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) &&
@@ -789,7 +801,7 @@ void ieee80211_dynamic_ps_enable_work(struct work_struct *work)
789 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); 801 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
790 } 802 }
791 803
792 netif_tx_start_all_queues(sdata->dev); 804 netif_tx_wake_all_queues(sdata->dev);
793} 805}
794 806
795void ieee80211_dynamic_ps_timer(unsigned long data) 807void ieee80211_dynamic_ps_timer(unsigned long data)
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index e37355193ed1..730778a2c90c 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -6,7 +6,7 @@
6#include "driver-ops.h" 6#include "driver-ops.h"
7#include "led.h" 7#include "led.h"
8 8
9int __ieee80211_suspend(struct ieee80211_hw *hw) 9int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
10{ 10{
11 struct ieee80211_local *local = hw_to_local(hw); 11 struct ieee80211_local *local = hw_to_local(hw);
12 struct ieee80211_sub_if_data *sdata; 12 struct ieee80211_sub_if_data *sdata;
@@ -14,12 +14,23 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
14 14
15 ieee80211_scan_cancel(local); 15 ieee80211_scan_cancel(local);
16 16
17 if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) {
18 mutex_lock(&local->sta_mtx);
19 list_for_each_entry(sta, &local->sta_list, list) {
20 set_sta_flags(sta, WLAN_STA_BLOCK_BA);
21 ieee80211_sta_tear_down_BA_sessions(sta, true);
22 }
23 mutex_unlock(&local->sta_mtx);
24 }
25
17 ieee80211_stop_queues_by_reason(hw, 26 ieee80211_stop_queues_by_reason(hw,
18 IEEE80211_QUEUE_STOP_REASON_SUSPEND); 27 IEEE80211_QUEUE_STOP_REASON_SUSPEND);
19 28
20 /* flush out all packets */ 29 /* flush out all packets */
21 synchronize_net(); 30 synchronize_net();
22 31
32 drv_flush(local, false);
33
23 local->quiescing = true; 34 local->quiescing = true;
24 /* make quiescing visible to timers everywhere */ 35 /* make quiescing visible to timers everywhere */
25 mb(); 36 mb();
@@ -36,6 +47,16 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
36 cancel_work_sync(&local->dynamic_ps_enable_work); 47 cancel_work_sync(&local->dynamic_ps_enable_work);
37 del_timer_sync(&local->dynamic_ps_timer); 48 del_timer_sync(&local->dynamic_ps_timer);
38 49
50 local->wowlan = wowlan && local->open_count;
51 if (local->wowlan) {
52 int err = drv_suspend(local, wowlan);
53 if (err) {
54 local->quiescing = false;
55 return err;
56 }
57 goto suspend;
58 }
59
39 /* disable keys */ 60 /* disable keys */
40 list_for_each_entry(sdata, &local->interfaces, list) 61 list_for_each_entry(sdata, &local->interfaces, list)
41 ieee80211_disable_keys(sdata); 62 ieee80211_disable_keys(sdata);
@@ -43,11 +64,6 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
43 /* tear down aggregation sessions and remove STAs */ 64 /* tear down aggregation sessions and remove STAs */
44 mutex_lock(&local->sta_mtx); 65 mutex_lock(&local->sta_mtx);
45 list_for_each_entry(sta, &local->sta_list, list) { 66 list_for_each_entry(sta, &local->sta_list, list) {
46 if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) {
47 set_sta_flags(sta, WLAN_STA_BLOCK_BA);
48 ieee80211_sta_tear_down_BA_sessions(sta, true);
49 }
50
51 if (sta->uploaded) { 67 if (sta->uploaded) {
52 sdata = sta->sdata; 68 sdata = sta->sdata;
53 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 69 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
@@ -98,6 +114,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
98 if (local->open_count) 114 if (local->open_count)
99 ieee80211_stop_device(local); 115 ieee80211_stop_device(local);
100 116
117 suspend:
101 local->suspended = true; 118 local->suspended = true;
102 /* need suspended to be visible before quiescing is false */ 119 /* need suspended to be visible before quiescing is false */
103 barrier(); 120 barrier();
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c
index 778c604d7939..8adac67395f7 100644
--- a/net/mac80211/rc80211_minstrel.c
+++ b/net/mac80211/rc80211_minstrel.c
@@ -417,8 +417,8 @@ minstrel_rate_init(void *priv, struct ieee80211_supported_band *sband,
417 tx_time_single = mr->ack_time + mr->perfect_tx_time; 417 tx_time_single = mr->ack_time + mr->perfect_tx_time;
418 418
419 /* contention window */ 419 /* contention window */
420 tx_time_single += t_slot + min(cw, mp->cw_max); 420 tx_time_single += (t_slot * cw) >> 1;
421 cw = (cw << 1) | 1; 421 cw = min((cw << 1) | 1, mp->cw_max);
422 422
423 tx_time += tx_time_single; 423 tx_time += tx_time_single;
424 tx_time_cts += tx_time_single + mi->sp_ack_dur; 424 tx_time_cts += tx_time_single + mi->sp_ack_dur;
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index c06aa3ac6b9d..333b5118be6d 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -464,6 +464,7 @@ minstrel_calc_retransmit(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
464 const struct mcs_group *group; 464 const struct mcs_group *group;
465 unsigned int tx_time, tx_time_rtscts, tx_time_data; 465 unsigned int tx_time, tx_time_rtscts, tx_time_data;
466 unsigned int cw = mp->cw_min; 466 unsigned int cw = mp->cw_min;
467 unsigned int ctime = 0;
467 unsigned int t_slot = 9; /* FIXME */ 468 unsigned int t_slot = 9; /* FIXME */
468 unsigned int ampdu_len = MINSTREL_TRUNC(mi->avg_ampdu_len); 469 unsigned int ampdu_len = MINSTREL_TRUNC(mi->avg_ampdu_len);
469 470
@@ -480,13 +481,27 @@ minstrel_calc_retransmit(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
480 481
481 group = &minstrel_mcs_groups[index / MCS_GROUP_RATES]; 482 group = &minstrel_mcs_groups[index / MCS_GROUP_RATES];
482 tx_time_data = group->duration[index % MCS_GROUP_RATES] * ampdu_len; 483 tx_time_data = group->duration[index % MCS_GROUP_RATES] * ampdu_len;
483 tx_time = 2 * (t_slot + mi->overhead + tx_time_data); 484
484 tx_time_rtscts = 2 * (t_slot + mi->overhead_rtscts + tx_time_data); 485 /* Contention time for first 2 tries */
486 ctime = (t_slot * cw) >> 1;
487 cw = min((cw << 1) | 1, mp->cw_max);
488 ctime += (t_slot * cw) >> 1;
489 cw = min((cw << 1) | 1, mp->cw_max);
490
491 /* Total TX time for data and Contention after first 2 tries */
492 tx_time = ctime + 2 * (mi->overhead + tx_time_data);
493 tx_time_rtscts = ctime + 2 * (mi->overhead_rtscts + tx_time_data);
494
495 /* See how many more tries we can fit inside segment size */
485 do { 496 do {
486 cw = (cw << 1) | 1; 497 /* Contention time for this try */
487 cw = min(cw, mp->cw_max); 498 ctime = (t_slot * cw) >> 1;
488 tx_time += cw + t_slot + mi->overhead; 499 cw = min((cw << 1) | 1, mp->cw_max);
489 tx_time_rtscts += cw + t_slot + mi->overhead_rtscts; 500
501 /* Total TX time after this try */
502 tx_time += ctime + mi->overhead + tx_time_data;
503 tx_time_rtscts += ctime + mi->overhead_rtscts + tx_time_data;
504
490 if (tx_time_rtscts < mp->segment_size) 505 if (tx_time_rtscts < mp->segment_size)
491 mr->retry_count_rtscts++; 506 mr->retry_count_rtscts++;
492 } while ((tx_time < mp->segment_size) && 507 } while ((tx_time < mp->segment_size) &&
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index c5d4530d8284..7fa8c6be7bf0 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -143,7 +143,8 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
143 if (status->flag & RX_FLAG_HT) { 143 if (status->flag & RX_FLAG_HT) {
144 /* 144 /*
145 * MCS information is a separate field in radiotap, 145 * MCS information is a separate field in radiotap,
146 * added below. 146 * added below. The byte here is needed as padding
147 * for the channel though, so initialise it to 0.
147 */ 148 */
148 *pos = 0; 149 *pos = 0;
149 } else { 150 } else {
@@ -403,11 +404,13 @@ ieee80211_rx_h_passive_scan(struct ieee80211_rx_data *rx)
403 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 404 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
404 struct sk_buff *skb = rx->skb; 405 struct sk_buff *skb = rx->skb;
405 406
406 if (likely(!(status->rx_flags & IEEE80211_RX_IN_SCAN))) 407 if (likely(!(status->rx_flags & IEEE80211_RX_IN_SCAN) &&
408 !local->sched_scanning))
407 return RX_CONTINUE; 409 return RX_CONTINUE;
408 410
409 if (test_bit(SCAN_HW_SCANNING, &local->scanning) || 411 if (test_bit(SCAN_HW_SCANNING, &local->scanning) ||
410 test_bit(SCAN_SW_SCANNING, &local->scanning)) 412 test_bit(SCAN_SW_SCANNING, &local->scanning) ||
413 local->sched_scanning)
411 return ieee80211_scan_rx(rx->sdata, skb); 414 return ieee80211_scan_rx(rx->sdata, skb);
412 415
413 /* scanning finished during invoking of handlers */ 416 /* scanning finished during invoking of handlers */
@@ -487,22 +490,26 @@ ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
487 * establisment frame, beacon or probe, drop the frame. 490 * establisment frame, beacon or probe, drop the frame.
488 */ 491 */
489 492
490 if (!rx->sta || sta_plink_state(rx->sta) != PLINK_ESTAB) { 493 if (!rx->sta || sta_plink_state(rx->sta) != NL80211_PLINK_ESTAB) {
491 struct ieee80211_mgmt *mgmt; 494 struct ieee80211_mgmt *mgmt;
492 495
493 if (!ieee80211_is_mgmt(hdr->frame_control)) 496 if (!ieee80211_is_mgmt(hdr->frame_control))
494 return RX_DROP_MONITOR; 497 return RX_DROP_MONITOR;
495 498
496 if (ieee80211_is_action(hdr->frame_control)) { 499 if (ieee80211_is_action(hdr->frame_control)) {
500 u8 category;
497 mgmt = (struct ieee80211_mgmt *)hdr; 501 mgmt = (struct ieee80211_mgmt *)hdr;
498 if (mgmt->u.action.category != WLAN_CATEGORY_MESH_PLINK) 502 category = mgmt->u.action.category;
503 if (category != WLAN_CATEGORY_MESH_ACTION &&
504 category != WLAN_CATEGORY_SELF_PROTECTED)
499 return RX_DROP_MONITOR; 505 return RX_DROP_MONITOR;
500 return RX_CONTINUE; 506 return RX_CONTINUE;
501 } 507 }
502 508
503 if (ieee80211_is_probe_req(hdr->frame_control) || 509 if (ieee80211_is_probe_req(hdr->frame_control) ||
504 ieee80211_is_probe_resp(hdr->frame_control) || 510 ieee80211_is_probe_resp(hdr->frame_control) ||
505 ieee80211_is_beacon(hdr->frame_control)) 511 ieee80211_is_beacon(hdr->frame_control) ||
512 ieee80211_is_auth(hdr->frame_control))
506 return RX_CONTINUE; 513 return RX_CONTINUE;
507 514
508 return RX_DROP_MONITOR; 515 return RX_DROP_MONITOR;
@@ -650,7 +657,7 @@ static void ieee80211_sta_reorder_release(struct ieee80211_hw *hw,
650 set_release_timer: 657 set_release_timer:
651 658
652 mod_timer(&tid_agg_rx->reorder_timer, 659 mod_timer(&tid_agg_rx->reorder_timer,
653 tid_agg_rx->reorder_time[j] + 660 tid_agg_rx->reorder_time[j] + 1 +
654 HT_RX_REORDER_BUF_TIMEOUT); 661 HT_RX_REORDER_BUF_TIMEOUT);
655 } else { 662 } else {
656 del_timer(&tid_agg_rx->reorder_timer); 663 del_timer(&tid_agg_rx->reorder_timer);
@@ -707,6 +714,8 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
707 /* 714 /*
708 * If the current MPDU is in the right order and nothing else 715 * If the current MPDU is in the right order and nothing else
709 * is stored we can process it directly, no need to buffer it. 716 * is stored we can process it directly, no need to buffer it.
717 * If it is first but there's something stored, we may be able
718 * to release frames after this one.
710 */ 719 */
711 if (mpdu_seq_num == tid_agg_rx->head_seq_num && 720 if (mpdu_seq_num == tid_agg_rx->head_seq_num &&
712 tid_agg_rx->stored_mpdu_num == 0) { 721 tid_agg_rx->stored_mpdu_num == 0) {
@@ -1583,7 +1592,7 @@ ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx)
1583} 1592}
1584 1593
1585static int 1594static int
1586__ieee80211_data_to_8023(struct ieee80211_rx_data *rx) 1595__ieee80211_data_to_8023(struct ieee80211_rx_data *rx, bool *port_control)
1587{ 1596{
1588 struct ieee80211_sub_if_data *sdata = rx->sdata; 1597 struct ieee80211_sub_if_data *sdata = rx->sdata;
1589 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 1598 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
@@ -1591,6 +1600,7 @@ __ieee80211_data_to_8023(struct ieee80211_rx_data *rx)
1591 struct ethhdr *ehdr; 1600 struct ethhdr *ehdr;
1592 int ret; 1601 int ret;
1593 1602
1603 *port_control = false;
1594 if (ieee80211_has_a4(hdr->frame_control) && 1604 if (ieee80211_has_a4(hdr->frame_control) &&
1595 sdata->vif.type == NL80211_IFTYPE_AP_VLAN && !sdata->u.vlan.sta) 1605 sdata->vif.type == NL80211_IFTYPE_AP_VLAN && !sdata->u.vlan.sta)
1596 return -1; 1606 return -1;
@@ -1609,11 +1619,13 @@ __ieee80211_data_to_8023(struct ieee80211_rx_data *rx)
1609 return -1; 1619 return -1;
1610 1620
1611 ret = ieee80211_data_to_8023(rx->skb, sdata->vif.addr, sdata->vif.type); 1621 ret = ieee80211_data_to_8023(rx->skb, sdata->vif.addr, sdata->vif.type);
1612 if (ret < 0 || !check_port_control) 1622 if (ret < 0)
1613 return ret; 1623 return ret;
1614 1624
1615 ehdr = (struct ethhdr *) rx->skb->data; 1625 ehdr = (struct ethhdr *) rx->skb->data;
1616 if (ehdr->h_proto != rx->sdata->control_port_protocol) 1626 if (ehdr->h_proto == rx->sdata->control_port_protocol)
1627 *port_control = true;
1628 else if (check_port_control)
1617 return -1; 1629 return -1;
1618 1630
1619 return 0; 1631 return 0;
@@ -1771,7 +1783,7 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
1771 1783
1772 ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr, 1784 ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr,
1773 rx->sdata->vif.type, 1785 rx->sdata->vif.type,
1774 rx->local->hw.extra_tx_headroom); 1786 rx->local->hw.extra_tx_headroom, true);
1775 1787
1776 while (!skb_queue_empty(&frame_list)) { 1788 while (!skb_queue_empty(&frame_list)) {
1777 rx->skb = __skb_dequeue(&frame_list); 1789 rx->skb = __skb_dequeue(&frame_list);
@@ -1914,6 +1926,7 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
1914 struct net_device *dev = sdata->dev; 1926 struct net_device *dev = sdata->dev;
1915 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 1927 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1916 __le16 fc = hdr->frame_control; 1928 __le16 fc = hdr->frame_control;
1929 bool port_control;
1917 int err; 1930 int err;
1918 1931
1919 if (unlikely(!ieee80211_is_data(hdr->frame_control))) 1932 if (unlikely(!ieee80211_is_data(hdr->frame_control)))
@@ -1930,13 +1943,21 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
1930 sdata->vif.type == NL80211_IFTYPE_AP) 1943 sdata->vif.type == NL80211_IFTYPE_AP)
1931 return RX_DROP_MONITOR; 1944 return RX_DROP_MONITOR;
1932 1945
1933 err = __ieee80211_data_to_8023(rx); 1946 err = __ieee80211_data_to_8023(rx, &port_control);
1934 if (unlikely(err)) 1947 if (unlikely(err))
1935 return RX_DROP_UNUSABLE; 1948 return RX_DROP_UNUSABLE;
1936 1949
1937 if (!ieee80211_frame_allowed(rx, fc)) 1950 if (!ieee80211_frame_allowed(rx, fc))
1938 return RX_DROP_MONITOR; 1951 return RX_DROP_MONITOR;
1939 1952
1953 if (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
1954 unlikely(port_control) && sdata->bss) {
1955 sdata = container_of(sdata->bss, struct ieee80211_sub_if_data,
1956 u.ap);
1957 dev = sdata->dev;
1958 rx->sdata = sdata;
1959 }
1960
1940 rx->skb->dev = dev; 1961 rx->skb->dev = dev;
1941 1962
1942 dev->stats.rx_packets++; 1963 dev->stats.rx_packets++;
@@ -2189,7 +2210,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
2189 goto handled; 2210 goto handled;
2190 } 2211 }
2191 break; 2212 break;
2192 case WLAN_CATEGORY_MESH_PLINK: 2213 case WLAN_CATEGORY_MESH_ACTION:
2193 if (!ieee80211_vif_is_mesh(&sdata->vif)) 2214 if (!ieee80211_vif_is_mesh(&sdata->vif))
2194 break; 2215 break;
2195 goto queue; 2216 goto queue;
@@ -2352,47 +2373,6 @@ ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
2352 return RX_QUEUED; 2373 return RX_QUEUED;
2353} 2374}
2354 2375
2355static void ieee80211_rx_michael_mic_report(struct ieee80211_hdr *hdr,
2356 struct ieee80211_rx_data *rx)
2357{
2358 int keyidx;
2359 unsigned int hdrlen;
2360
2361 hdrlen = ieee80211_hdrlen(hdr->frame_control);
2362 if (rx->skb->len >= hdrlen + 4)
2363 keyidx = rx->skb->data[hdrlen + 3] >> 6;
2364 else
2365 keyidx = -1;
2366
2367 if (!rx->sta) {
2368 /*
2369 * Some hardware seem to generate incorrect Michael MIC
2370 * reports; ignore them to avoid triggering countermeasures.
2371 */
2372 return;
2373 }
2374
2375 if (!ieee80211_has_protected(hdr->frame_control))
2376 return;
2377
2378 if (rx->sdata->vif.type == NL80211_IFTYPE_AP && keyidx) {
2379 /*
2380 * APs with pairwise keys should never receive Michael MIC
2381 * errors for non-zero keyidx because these are reserved for
2382 * group keys and only the AP is sending real multicast
2383 * frames in the BSS.
2384 */
2385 return;
2386 }
2387
2388 if (!ieee80211_is_data(hdr->frame_control) &&
2389 !ieee80211_is_auth(hdr->frame_control))
2390 return;
2391
2392 mac80211_ev_michael_mic_failure(rx->sdata, keyidx, hdr, NULL,
2393 GFP_ATOMIC);
2394}
2395
2396/* TODO: use IEEE80211_RX_FRAGMENTED */ 2376/* TODO: use IEEE80211_RX_FRAGMENTED */
2397static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx, 2377static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
2398 struct ieee80211_rate *rate) 2378 struct ieee80211_rate *rate)
@@ -2736,12 +2716,6 @@ static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx,
2736 if (!prepares) 2716 if (!prepares)
2737 return false; 2717 return false;
2738 2718
2739 if (status->flag & RX_FLAG_MMIC_ERROR) {
2740 if (status->rx_flags & IEEE80211_RX_RA_MATCH)
2741 ieee80211_rx_michael_mic_report(hdr, rx);
2742 return false;
2743 }
2744
2745 if (!consume) { 2719 if (!consume) {
2746 skb = skb_copy(skb, GFP_ATOMIC); 2720 skb = skb_copy(skb, GFP_ATOMIC);
2747 if (!skb) { 2721 if (!skb) {
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 489b6ad200d4..d20046b5d8f4 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -15,6 +15,7 @@
15#include <linux/if_arp.h> 15#include <linux/if_arp.h>
16#include <linux/rtnetlink.h> 16#include <linux/rtnetlink.h>
17#include <linux/pm_qos_params.h> 17#include <linux/pm_qos_params.h>
18#include <linux/slab.h>
18#include <net/sch_generic.h> 19#include <net/sch_generic.h>
19#include <linux/slab.h> 20#include <linux/slab.h>
20#include <net/mac80211.h> 21#include <net/mac80211.h>
@@ -170,7 +171,7 @@ ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
170 return RX_CONTINUE; 171 return RX_CONTINUE;
171 172
172 if (skb->len < 24) 173 if (skb->len < 24)
173 return RX_DROP_MONITOR; 174 return RX_CONTINUE;
174 175
175 presp = ieee80211_is_probe_resp(fc); 176 presp = ieee80211_is_probe_resp(fc);
176 if (presp) { 177 if (presp) {
@@ -850,3 +851,122 @@ void ieee80211_scan_cancel(struct ieee80211_local *local)
850 } 851 }
851 mutex_unlock(&local->mtx); 852 mutex_unlock(&local->mtx);
852} 853}
854
855int ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata,
856 struct cfg80211_sched_scan_request *req)
857{
858 struct ieee80211_local *local = sdata->local;
859 int ret, i;
860
861 mutex_lock(&sdata->local->mtx);
862
863 if (local->sched_scanning) {
864 ret = -EBUSY;
865 goto out;
866 }
867
868 if (!local->ops->sched_scan_start) {
869 ret = -ENOTSUPP;
870 goto out;
871 }
872
873 for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
874 local->sched_scan_ies.ie[i] = kzalloc(2 +
875 IEEE80211_MAX_SSID_LEN +
876 local->scan_ies_len,
877 GFP_KERNEL);
878 if (!local->sched_scan_ies.ie[i]) {
879 ret = -ENOMEM;
880 goto out_free;
881 }
882
883 local->sched_scan_ies.len[i] =
884 ieee80211_build_preq_ies(local,
885 local->sched_scan_ies.ie[i],
886 req->ie, req->ie_len, i,
887 (u32) -1, 0);
888 }
889
890 ret = drv_sched_scan_start(local, sdata, req,
891 &local->sched_scan_ies);
892 if (ret == 0) {
893 local->sched_scanning = true;
894 goto out;
895 }
896
897out_free:
898 while (i > 0)
899 kfree(local->sched_scan_ies.ie[--i]);
900out:
901 mutex_unlock(&sdata->local->mtx);
902 return ret;
903}
904
905int ieee80211_request_sched_scan_stop(struct ieee80211_sub_if_data *sdata)
906{
907 struct ieee80211_local *local = sdata->local;
908 int ret = 0, i;
909
910 mutex_lock(&sdata->local->mtx);
911
912 if (!local->ops->sched_scan_stop) {
913 ret = -ENOTSUPP;
914 goto out;
915 }
916
917 if (local->sched_scanning) {
918 for (i = 0; i < IEEE80211_NUM_BANDS; i++)
919 kfree(local->sched_scan_ies.ie[i]);
920
921 drv_sched_scan_stop(local, sdata);
922 local->sched_scanning = false;
923 }
924out:
925 mutex_unlock(&sdata->local->mtx);
926
927 return ret;
928}
929
930void ieee80211_sched_scan_results(struct ieee80211_hw *hw)
931{
932 struct ieee80211_local *local = hw_to_local(hw);
933
934 trace_api_sched_scan_results(local);
935
936 cfg80211_sched_scan_results(hw->wiphy);
937}
938EXPORT_SYMBOL(ieee80211_sched_scan_results);
939
940void ieee80211_sched_scan_stopped_work(struct work_struct *work)
941{
942 struct ieee80211_local *local =
943 container_of(work, struct ieee80211_local,
944 sched_scan_stopped_work);
945 int i;
946
947 mutex_lock(&local->mtx);
948
949 if (!local->sched_scanning) {
950 mutex_unlock(&local->mtx);
951 return;
952 }
953
954 for (i = 0; i < IEEE80211_NUM_BANDS; i++)
955 kfree(local->sched_scan_ies.ie[i]);
956
957 local->sched_scanning = false;
958
959 mutex_unlock(&local->mtx);
960
961 cfg80211_sched_scan_stopped(local->hw.wiphy);
962}
963
964void ieee80211_sched_scan_stopped(struct ieee80211_hw *hw)
965{
966 struct ieee80211_local *local = hw_to_local(hw);
967
968 trace_api_sched_scan_stopped(local);
969
970 ieee80211_queue_work(&local->hw, &local->sched_scan_stopped_work);
971}
972EXPORT_SYMBOL(ieee80211_sched_scan_stopped);
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 13e8c30adf01..b83870bf60fa 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -67,7 +67,8 @@ static int sta_info_hash_del(struct ieee80211_local *local,
67{ 67{
68 struct sta_info *s; 68 struct sta_info *s;
69 69
70 s = local->sta_hash[STA_HASH(sta->sta.addr)]; 70 s = rcu_dereference_protected(local->sta_hash[STA_HASH(sta->sta.addr)],
71 lockdep_is_held(&local->sta_lock));
71 if (!s) 72 if (!s)
72 return -ENOENT; 73 return -ENOENT;
73 if (s == sta) { 74 if (s == sta) {
@@ -76,9 +77,11 @@ static int sta_info_hash_del(struct ieee80211_local *local,
76 return 0; 77 return 0;
77 } 78 }
78 79
79 while (s->hnext && s->hnext != sta) 80 while (rcu_access_pointer(s->hnext) &&
80 s = s->hnext; 81 rcu_access_pointer(s->hnext) != sta)
81 if (s->hnext) { 82 s = rcu_dereference_protected(s->hnext,
83 lockdep_is_held(&local->sta_lock));
84 if (rcu_access_pointer(s->hnext)) {
82 rcu_assign_pointer(s->hnext, sta->hnext); 85 rcu_assign_pointer(s->hnext, sta->hnext);
83 return 0; 86 return 0;
84 } 87 }
@@ -228,6 +231,7 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
228{ 231{
229 struct ieee80211_local *local = sdata->local; 232 struct ieee80211_local *local = sdata->local;
230 struct sta_info *sta; 233 struct sta_info *sta;
234 struct timespec uptime;
231 int i; 235 int i;
232 236
233 sta = kzalloc(sizeof(*sta) + local->hw.sta_data_size, gfp); 237 sta = kzalloc(sizeof(*sta) + local->hw.sta_data_size, gfp);
@@ -245,6 +249,8 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
245 sta->sdata = sdata; 249 sta->sdata = sdata;
246 sta->last_rx = jiffies; 250 sta->last_rx = jiffies;
247 251
252 do_posix_clock_monotonic_gettime(&uptime);
253 sta->last_connected = uptime.tv_sec;
248 ewma_init(&sta->avg_signal, 1024, 8); 254 ewma_init(&sta->avg_signal, 1024, 8);
249 255
250 if (sta_prepare_rate_control(local, sta, gfp)) { 256 if (sta_prepare_rate_control(local, sta, gfp)) {
@@ -271,7 +277,7 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
271#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ 277#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
272 278
273#ifdef CONFIG_MAC80211_MESH 279#ifdef CONFIG_MAC80211_MESH
274 sta->plink_state = PLINK_LISTEN; 280 sta->plink_state = NL80211_PLINK_LISTEN;
275 init_timer(&sta->plink_timer); 281 init_timer(&sta->plink_timer);
276#endif 282#endif
277 283
@@ -584,7 +590,6 @@ static bool sta_info_cleanup_expire_buffered(struct ieee80211_local *local,
584{ 590{
585 unsigned long flags; 591 unsigned long flags;
586 struct sk_buff *skb; 592 struct sk_buff *skb;
587 struct ieee80211_sub_if_data *sdata;
588 593
589 if (skb_queue_empty(&sta->ps_tx_buf)) 594 if (skb_queue_empty(&sta->ps_tx_buf))
590 return false; 595 return false;
@@ -601,7 +606,6 @@ static bool sta_info_cleanup_expire_buffered(struct ieee80211_local *local,
601 if (!skb) 606 if (!skb)
602 break; 607 break;
603 608
604 sdata = sta->sdata;
605 local->total_ps_buffered--; 609 local->total_ps_buffered--;
606#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 610#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
607 printk(KERN_DEBUG "Buffered frame expired (STA %pM)\n", 611 printk(KERN_DEBUG "Buffered frame expired (STA %pM)\n",
@@ -609,7 +613,8 @@ static bool sta_info_cleanup_expire_buffered(struct ieee80211_local *local,
609#endif 613#endif
610 dev_kfree_skb(skb); 614 dev_kfree_skb(skb);
611 615
612 if (skb_queue_empty(&sta->ps_tx_buf)) 616 if (skb_queue_empty(&sta->ps_tx_buf) &&
617 !test_sta_flags(sta, WLAN_STA_PS_DRIVER_BUF))
613 sta_info_clear_tim_bit(sta); 618 sta_info_clear_tim_bit(sta);
614 } 619 }
615 620
@@ -650,10 +655,12 @@ static int __must_check __sta_info_destroy(struct sta_info *sta)
650 if (ret) 655 if (ret)
651 return ret; 656 return ret;
652 657
658 mutex_lock(&local->key_mtx);
653 for (i = 0; i < NUM_DEFAULT_KEYS; i++) 659 for (i = 0; i < NUM_DEFAULT_KEYS; i++)
654 ieee80211_key_free(local, sta->gtk[i]); 660 __ieee80211_key_free(key_mtx_dereference(local, sta->gtk[i]));
655 if (sta->ptk) 661 if (sta->ptk)
656 ieee80211_key_free(local, sta->ptk); 662 __ieee80211_key_free(key_mtx_dereference(local, sta->ptk));
663 mutex_unlock(&local->key_mtx);
657 664
658 sta->dead = true; 665 sta->dead = true;
659 666
@@ -698,6 +705,8 @@ static int __must_check __sta_info_destroy(struct sta_info *sta)
698#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ 705#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
699 cancel_work_sync(&sta->drv_unblock_wk); 706 cancel_work_sync(&sta->drv_unblock_wk);
700 707
708 cfg80211_del_sta(sdata->dev, sta->sta.addr, GFP_KERNEL);
709
701 rate_control_remove_sta_debugfs(sta); 710 rate_control_remove_sta_debugfs(sta);
702 ieee80211_sta_debugfs_remove(sta); 711 ieee80211_sta_debugfs_remove(sta);
703 712
@@ -766,9 +775,8 @@ static void sta_info_cleanup(unsigned long data)
766 if (!timer_needed) 775 if (!timer_needed)
767 return; 776 return;
768 777
769 local->sta_cleanup.expires = 778 mod_timer(&local->sta_cleanup,
770 round_jiffies(jiffies + STA_INFO_CLEANUP_INTERVAL); 779 round_jiffies(jiffies + STA_INFO_CLEANUP_INTERVAL));
771 add_timer(&local->sta_cleanup);
772} 780}
773 781
774void sta_info_init(struct ieee80211_local *local) 782void sta_info_init(struct ieee80211_local *local)
@@ -781,14 +789,6 @@ void sta_info_init(struct ieee80211_local *local)
781 789
782 setup_timer(&local->sta_cleanup, sta_info_cleanup, 790 setup_timer(&local->sta_cleanup, sta_info_cleanup,
783 (unsigned long)local); 791 (unsigned long)local);
784 local->sta_cleanup.expires =
785 round_jiffies(jiffies + STA_INFO_CLEANUP_INTERVAL);
786}
787
788int sta_info_start(struct ieee80211_local *local)
789{
790 add_timer(&local->sta_cleanup);
791 return 0;
792} 792}
793 793
794void sta_info_stop(struct ieee80211_local *local) 794void sta_info_stop(struct ieee80211_local *local)
@@ -900,6 +900,7 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
900 struct ieee80211_local *local = sdata->local; 900 struct ieee80211_local *local = sdata->local;
901 int sent, buffered; 901 int sent, buffered;
902 902
903 clear_sta_flags(sta, WLAN_STA_PS_DRIVER_BUF);
903 if (!(local->hw.flags & IEEE80211_HW_AP_LINK_PS)) 904 if (!(local->hw.flags & IEEE80211_HW_AP_LINK_PS))
904 drv_sta_notify(local, sdata, STA_NOTIFY_AWAKE, &sta->sta); 905 drv_sta_notify(local, sdata, STA_NOTIFY_AWAKE, &sta->sta);
905 906
@@ -992,3 +993,12 @@ void ieee80211_sta_block_awake(struct ieee80211_hw *hw,
992 ieee80211_queue_work(hw, &sta->drv_unblock_wk); 993 ieee80211_queue_work(hw, &sta->drv_unblock_wk);
993} 994}
994EXPORT_SYMBOL(ieee80211_sta_block_awake); 995EXPORT_SYMBOL(ieee80211_sta_block_awake);
996
997void ieee80211_sta_set_tim(struct ieee80211_sta *pubsta)
998{
999 struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
1000
1001 set_sta_flags(sta, WLAN_STA_PS_DRIVER_BUF);
1002 sta_info_set_tim_bit(sta);
1003}
1004EXPORT_SYMBOL(ieee80211_sta_set_tim);
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index b2f95966c7f4..c6ae8718bd57 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -43,6 +43,8 @@
43 * be in the queues 43 * be in the queues
44 * @WLAN_STA_PSPOLL: Station sent PS-poll while driver was keeping 44 * @WLAN_STA_PSPOLL: Station sent PS-poll while driver was keeping
45 * station in power-save mode, reply when the driver unblocks. 45 * station in power-save mode, reply when the driver unblocks.
46 * @WLAN_STA_PS_DRIVER_BUF: Station has frames pending in driver internal
47 * buffers. Automatically cleared on station wake-up.
46 */ 48 */
47enum ieee80211_sta_info_flags { 49enum ieee80211_sta_info_flags {
48 WLAN_STA_AUTH = 1<<0, 50 WLAN_STA_AUTH = 1<<0,
@@ -58,6 +60,7 @@ enum ieee80211_sta_info_flags {
58 WLAN_STA_BLOCK_BA = 1<<11, 60 WLAN_STA_BLOCK_BA = 1<<11,
59 WLAN_STA_PS_DRIVER = 1<<12, 61 WLAN_STA_PS_DRIVER = 1<<12,
60 WLAN_STA_PSPOLL = 1<<13, 62 WLAN_STA_PSPOLL = 1<<13,
63 WLAN_STA_PS_DRIVER_BUF = 1<<14,
61}; 64};
62 65
63#define STA_TID_NUM 16 66#define STA_TID_NUM 16
@@ -149,6 +152,7 @@ struct tid_ampdu_rx {
149 * 152 *
150 * @tid_rx: aggregation info for Rx per TID -- RCU protected 153 * @tid_rx: aggregation info for Rx per TID -- RCU protected
151 * @tid_tx: aggregation info for Tx per TID 154 * @tid_tx: aggregation info for Tx per TID
155 * @tid_start_tx: sessions where start was requested
152 * @addba_req_num: number of times addBA request has been sent. 156 * @addba_req_num: number of times addBA request has been sent.
153 * @dialog_token_allocator: dialog token enumerator for each new session; 157 * @dialog_token_allocator: dialog token enumerator for each new session;
154 * @work: work struct for starting/stopping aggregation 158 * @work: work struct for starting/stopping aggregation
@@ -160,40 +164,18 @@ struct tid_ampdu_rx {
160struct sta_ampdu_mlme { 164struct sta_ampdu_mlme {
161 struct mutex mtx; 165 struct mutex mtx;
162 /* rx */ 166 /* rx */
163 struct tid_ampdu_rx *tid_rx[STA_TID_NUM]; 167 struct tid_ampdu_rx __rcu *tid_rx[STA_TID_NUM];
164 unsigned long tid_rx_timer_expired[BITS_TO_LONGS(STA_TID_NUM)]; 168 unsigned long tid_rx_timer_expired[BITS_TO_LONGS(STA_TID_NUM)];
165 /* tx */ 169 /* tx */
166 struct work_struct work; 170 struct work_struct work;
167 struct tid_ampdu_tx *tid_tx[STA_TID_NUM]; 171 struct tid_ampdu_tx __rcu *tid_tx[STA_TID_NUM];
172 struct tid_ampdu_tx *tid_start_tx[STA_TID_NUM];
168 u8 addba_req_num[STA_TID_NUM]; 173 u8 addba_req_num[STA_TID_NUM];
169 u8 dialog_token_allocator; 174 u8 dialog_token_allocator;
170}; 175};
171 176
172 177
173/** 178/**
174 * enum plink_state - state of a mesh peer link finite state machine
175 *
176 * @PLINK_LISTEN: initial state, considered the implicit state of non existent
177 * mesh peer links
178 * @PLINK_OPN_SNT: mesh plink open frame has been sent to this mesh peer
179 * @PLINK_OPN_RCVD: mesh plink open frame has been received from this mesh peer
180 * @PLINK_CNF_RCVD: mesh plink confirm frame has been received from this mesh
181 * peer
182 * @PLINK_ESTAB: mesh peer link is established
183 * @PLINK_HOLDING: mesh peer link is being closed or cancelled
184 * @PLINK_BLOCKED: all frames transmitted from this mesh plink are discarded
185 */
186enum plink_state {
187 PLINK_LISTEN,
188 PLINK_OPN_SNT,
189 PLINK_OPN_RCVD,
190 PLINK_CNF_RCVD,
191 PLINK_ESTAB,
192 PLINK_HOLDING,
193 PLINK_BLOCKED
194};
195
196/**
197 * struct sta_info - STA information 179 * struct sta_info - STA information
198 * 180 *
199 * This structure collects information about a station that 181 * This structure collects information about a station that
@@ -226,6 +208,7 @@ enum plink_state {
226 * @rx_bytes: Number of bytes received from this STA 208 * @rx_bytes: Number of bytes received from this STA
227 * @wep_weak_iv_count: number of weak WEP IVs received from this station 209 * @wep_weak_iv_count: number of weak WEP IVs received from this station
228 * @last_rx: time (in jiffies) when last frame was received from this STA 210 * @last_rx: time (in jiffies) when last frame was received from this STA
211 * @last_connected: time (in seconds) when a station got connected
229 * @num_duplicates: number of duplicate frames received from this STA 212 * @num_duplicates: number of duplicate frames received from this STA
230 * @rx_fragments: number of received MPDUs 213 * @rx_fragments: number of received MPDUs
231 * @rx_dropped: number of dropped MPDUs from this STA 214 * @rx_dropped: number of dropped MPDUs from this STA
@@ -260,11 +243,11 @@ enum plink_state {
260struct sta_info { 243struct sta_info {
261 /* General information, mostly static */ 244 /* General information, mostly static */
262 struct list_head list; 245 struct list_head list;
263 struct sta_info *hnext; 246 struct sta_info __rcu *hnext;
264 struct ieee80211_local *local; 247 struct ieee80211_local *local;
265 struct ieee80211_sub_if_data *sdata; 248 struct ieee80211_sub_if_data *sdata;
266 struct ieee80211_key *gtk[NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS]; 249 struct ieee80211_key __rcu *gtk[NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS];
267 struct ieee80211_key *ptk; 250 struct ieee80211_key __rcu *ptk;
268 struct rate_control_ref *rate_ctrl; 251 struct rate_control_ref *rate_ctrl;
269 void *rate_ctrl_priv; 252 void *rate_ctrl_priv;
270 spinlock_t lock; 253 spinlock_t lock;
@@ -295,6 +278,7 @@ struct sta_info {
295 unsigned long rx_packets, rx_bytes; 278 unsigned long rx_packets, rx_bytes;
296 unsigned long wep_weak_iv_count; 279 unsigned long wep_weak_iv_count;
297 unsigned long last_rx; 280 unsigned long last_rx;
281 long last_connected;
298 unsigned long num_duplicates; 282 unsigned long num_duplicates;
299 unsigned long rx_fragments; 283 unsigned long rx_fragments;
300 unsigned long rx_dropped; 284 unsigned long rx_dropped;
@@ -334,7 +318,7 @@ struct sta_info {
334 u8 plink_retries; 318 u8 plink_retries;
335 bool ignore_plink_timer; 319 bool ignore_plink_timer;
336 bool plink_timer_was_running; 320 bool plink_timer_was_running;
337 enum plink_state plink_state; 321 enum nl80211_plink_state plink_state;
338 u32 plink_timeout; 322 u32 plink_timeout;
339 struct timer_list plink_timer; 323 struct timer_list plink_timer;
340#endif 324#endif
@@ -352,12 +336,12 @@ struct sta_info {
352 struct ieee80211_sta sta; 336 struct ieee80211_sta sta;
353}; 337};
354 338
355static inline enum plink_state sta_plink_state(struct sta_info *sta) 339static inline enum nl80211_plink_state sta_plink_state(struct sta_info *sta)
356{ 340{
357#ifdef CONFIG_MAC80211_MESH 341#ifdef CONFIG_MAC80211_MESH
358 return sta->plink_state; 342 return sta->plink_state;
359#endif 343#endif
360 return PLINK_LISTEN; 344 return NL80211_PLINK_LISTEN;
361} 345}
362 346
363static inline void set_sta_flags(struct sta_info *sta, const u32 flags) 347static inline void set_sta_flags(struct sta_info *sta, const u32 flags)
@@ -416,7 +400,16 @@ static inline u32 get_sta_flags(struct sta_info *sta)
416 return ret; 400 return ret;
417} 401}
418 402
403void ieee80211_assign_tid_tx(struct sta_info *sta, int tid,
404 struct tid_ampdu_tx *tid_tx);
419 405
406static inline struct tid_ampdu_tx *
407rcu_dereference_protected_tid_tx(struct sta_info *sta, int tid)
408{
409 return rcu_dereference_protected(sta->ampdu_mlme.tid_tx[tid],
410 lockdep_is_held(&sta->lock) ||
411 lockdep_is_held(&sta->ampdu_mlme.mtx));
412}
420 413
421#define STA_HASH_SIZE 256 414#define STA_HASH_SIZE 256
422#define STA_HASH(sta) (sta[5]) 415#define STA_HASH(sta) (sta[5])
@@ -497,7 +490,6 @@ void sta_info_set_tim_bit(struct sta_info *sta);
497void sta_info_clear_tim_bit(struct sta_info *sta); 490void sta_info_clear_tim_bit(struct sta_info *sta);
498 491
499void sta_info_init(struct ieee80211_local *local); 492void sta_info_init(struct ieee80211_local *local);
500int sta_info_start(struct ieee80211_local *local);
501void sta_info_stop(struct ieee80211_local *local); 493void sta_info_stop(struct ieee80211_local *local);
502int sta_info_flush(struct ieee80211_local *local, 494int sta_info_flush(struct ieee80211_local *local,
503 struct ieee80211_sub_if_data *sdata); 495 struct ieee80211_sub_if_data *sdata);
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index b936dd29e92b..1658efaa2e8e 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -189,16 +189,19 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
189 bool acked; 189 bool acked;
190 190
191 for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { 191 for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
192 /* the HW cannot have attempted that rate */ 192 if (info->status.rates[i].idx < 0) {
193 if (i >= hw->max_report_rates) { 193 break;
194 } else if (i >= hw->max_report_rates) {
195 /* the HW cannot have attempted that rate */
194 info->status.rates[i].idx = -1; 196 info->status.rates[i].idx = -1;
195 info->status.rates[i].count = 0; 197 info->status.rates[i].count = 0;
196 } else if (info->status.rates[i].idx >= 0) { 198 break;
197 rates_idx = i;
198 } 199 }
199 200
200 retry_count += info->status.rates[i].count; 201 retry_count += info->status.rates[i].count;
201 } 202 }
203 rates_idx = i - 1;
204
202 if (retry_count < 0) 205 if (retry_count < 0)
203 retry_count = 0; 206 retry_count = 0;
204 207
@@ -443,3 +446,11 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
443 dev_kfree_skb(skb); 446 dev_kfree_skb(skb);
444} 447}
445EXPORT_SYMBOL(ieee80211_tx_status); 448EXPORT_SYMBOL(ieee80211_tx_status);
449
450void ieee80211_report_low_ack(struct ieee80211_sta *pubsta, u32 num_packets)
451{
452 struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
453 cfg80211_cqm_pktloss_notify(sta->sdata->dev, sta->sta.addr,
454 num_packets, GFP_ATOMIC);
455}
456EXPORT_SYMBOL(ieee80211_report_low_ack);
diff --git a/net/mac80211/tkip.c b/net/mac80211/tkip.c
index e840c9cd46db..757e4eb2baf7 100644
--- a/net/mac80211/tkip.c
+++ b/net/mac80211/tkip.c
@@ -202,7 +202,7 @@ EXPORT_SYMBOL(ieee80211_get_tkip_key);
202 * @payload_len is the length of payload (_not_ including IV/ICV length). 202 * @payload_len is the length of payload (_not_ including IV/ICV length).
203 * @ta is the transmitter addresses. 203 * @ta is the transmitter addresses.
204 */ 204 */
205int ieee80211_tkip_encrypt_data(struct crypto_blkcipher *tfm, 205int ieee80211_tkip_encrypt_data(struct crypto_cipher *tfm,
206 struct ieee80211_key *key, 206 struct ieee80211_key *key,
207 u8 *pos, size_t payload_len, u8 *ta) 207 u8 *pos, size_t payload_len, u8 *ta)
208{ 208{
@@ -223,7 +223,7 @@ int ieee80211_tkip_encrypt_data(struct crypto_blkcipher *tfm,
223 * beginning of the buffer containing IEEE 802.11 header payload, i.e., 223 * beginning of the buffer containing IEEE 802.11 header payload, i.e.,
224 * including IV, Ext. IV, real data, Michael MIC, ICV. @payload_len is the 224 * including IV, Ext. IV, real data, Michael MIC, ICV. @payload_len is the
225 * length of payload, including IV, Ext. IV, MIC, ICV. */ 225 * length of payload, including IV, Ext. IV, MIC, ICV. */
226int ieee80211_tkip_decrypt_data(struct crypto_blkcipher *tfm, 226int ieee80211_tkip_decrypt_data(struct crypto_cipher *tfm,
227 struct ieee80211_key *key, 227 struct ieee80211_key *key,
228 u8 *payload, size_t payload_len, u8 *ta, 228 u8 *payload, size_t payload_len, u8 *ta,
229 u8 *ra, int only_iv, int queue, 229 u8 *ra, int only_iv, int queue,
diff --git a/net/mac80211/tkip.h b/net/mac80211/tkip.h
index 7e83dee976fa..1cab9c86978f 100644
--- a/net/mac80211/tkip.h
+++ b/net/mac80211/tkip.h
@@ -15,7 +15,7 @@
15 15
16u8 *ieee80211_tkip_add_iv(u8 *pos, struct ieee80211_key *key, u16 iv16); 16u8 *ieee80211_tkip_add_iv(u8 *pos, struct ieee80211_key *key, u16 iv16);
17 17
18int ieee80211_tkip_encrypt_data(struct crypto_blkcipher *tfm, 18int ieee80211_tkip_encrypt_data(struct crypto_cipher *tfm,
19 struct ieee80211_key *key, 19 struct ieee80211_key *key,
20 u8 *pos, size_t payload_len, u8 *ta); 20 u8 *pos, size_t payload_len, u8 *ta);
21enum { 21enum {
@@ -24,7 +24,7 @@ enum {
24 TKIP_DECRYPT_INVALID_KEYIDX = -2, 24 TKIP_DECRYPT_INVALID_KEYIDX = -2,
25 TKIP_DECRYPT_REPLAY = -3, 25 TKIP_DECRYPT_REPLAY = -3,
26}; 26};
27int ieee80211_tkip_decrypt_data(struct crypto_blkcipher *tfm, 27int ieee80211_tkip_decrypt_data(struct crypto_cipher *tfm,
28 struct ieee80211_key *key, 28 struct ieee80211_key *key,
29 u8 *payload, size_t payload_len, u8 *ta, 29 u8 *payload, size_t payload_len, u8 *ta,
30 u8 *ra, int only_iv, int queue, 30 u8 *ra, int only_iv, int queue,
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index bd1224fd216a..64e0f7587e6d 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1040,14 +1040,11 @@ static bool __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx,
1040 struct ieee80211_radiotap_iterator iterator; 1040 struct ieee80211_radiotap_iterator iterator;
1041 struct ieee80211_radiotap_header *rthdr = 1041 struct ieee80211_radiotap_header *rthdr =
1042 (struct ieee80211_radiotap_header *) skb->data; 1042 (struct ieee80211_radiotap_header *) skb->data;
1043 struct ieee80211_supported_band *sband;
1044 bool hw_frag; 1043 bool hw_frag;
1045 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1044 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1046 int ret = ieee80211_radiotap_iterator_init(&iterator, rthdr, skb->len, 1045 int ret = ieee80211_radiotap_iterator_init(&iterator, rthdr, skb->len,
1047 NULL); 1046 NULL);
1048 1047
1049 sband = tx->local->hw.wiphy->bands[tx->channel->band];
1050
1051 info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; 1048 info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
1052 tx->flags &= ~IEEE80211_TX_FRAGMENTED; 1049 tx->flags &= ~IEEE80211_TX_FRAGMENTED;
1053 1050
@@ -1154,7 +1151,7 @@ static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx,
1154 * packet pass through because splicing the frames 1151 * packet pass through because splicing the frames
1155 * back is already done. 1152 * back is already done.
1156 */ 1153 */
1157 tid_tx = tx->sta->ampdu_mlme.tid_tx[tid]; 1154 tid_tx = rcu_dereference_protected_tid_tx(tx->sta, tid);
1158 1155
1159 if (!tid_tx) { 1156 if (!tid_tx) {
1160 /* do nothing, let packet pass through */ 1157 /* do nothing, let packet pass through */
@@ -1446,11 +1443,8 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata,
1446 struct ieee80211_tx_data tx; 1443 struct ieee80211_tx_data tx;
1447 ieee80211_tx_result res_prepare; 1444 ieee80211_tx_result res_prepare;
1448 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1445 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1449 u16 queue;
1450 bool result = true; 1446 bool result = true;
1451 1447
1452 queue = skb_get_queue_mapping(skb);
1453
1454 if (unlikely(skb->len < 10)) { 1448 if (unlikely(skb->len < 10)) {
1455 dev_kfree_skb(skb); 1449 dev_kfree_skb(skb);
1456 return true; 1450 return true;
@@ -1486,12 +1480,7 @@ static int ieee80211_skb_resize(struct ieee80211_local *local,
1486{ 1480{
1487 int tail_need = 0; 1481 int tail_need = 0;
1488 1482
1489 /* 1483 if (may_encrypt && local->crypto_tx_tailroom_needed_cnt) {
1490 * This could be optimised, devices that do full hardware
1491 * crypto (including TKIP MMIC) need no tailroom... But we
1492 * have no drivers for such devices currently.
1493 */
1494 if (may_encrypt) {
1495 tail_need = IEEE80211_ENCRYPT_TAILROOM; 1484 tail_need = IEEE80211_ENCRYPT_TAILROOM;
1496 tail_need -= skb_tailroom(skb); 1485 tail_need -= skb_tailroom(skb);
1497 tail_need = max_t(int, tail_need, 0); 1486 tail_need = max_t(int, tail_need, 0);
@@ -1766,6 +1755,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1766 ret = NETDEV_TX_OK; 1755 ret = NETDEV_TX_OK;
1767 goto fail; 1756 goto fail;
1768 } 1757 }
1758 rcu_read_lock();
1769 if (!is_multicast_ether_addr(skb->data)) 1759 if (!is_multicast_ether_addr(skb->data))
1770 mppath = mpp_path_lookup(skb->data, sdata); 1760 mppath = mpp_path_lookup(skb->data, sdata);
1771 1761
@@ -1780,13 +1770,13 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1780 !(mppath && compare_ether_addr(mppath->mpp, skb->data))) { 1770 !(mppath && compare_ether_addr(mppath->mpp, skb->data))) {
1781 hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc, 1771 hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc,
1782 skb->data, skb->data + ETH_ALEN); 1772 skb->data, skb->data + ETH_ALEN);
1773 rcu_read_unlock();
1783 meshhdrlen = ieee80211_new_mesh_header(&mesh_hdr, 1774 meshhdrlen = ieee80211_new_mesh_header(&mesh_hdr,
1784 sdata, NULL, NULL); 1775 sdata, NULL, NULL);
1785 } else { 1776 } else {
1786 int is_mesh_mcast = 1; 1777 int is_mesh_mcast = 1;
1787 const u8 *mesh_da; 1778 const u8 *mesh_da;
1788 1779
1789 rcu_read_lock();
1790 if (is_multicast_ether_addr(skb->data)) 1780 if (is_multicast_ether_addr(skb->data))
1791 /* DA TA mSA AE:SA */ 1781 /* DA TA mSA AE:SA */
1792 mesh_da = skb->data; 1782 mesh_da = skb->data;
@@ -2266,7 +2256,7 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
2266 2256
2267 /* headroom, head length, tail length and maximum TIM length */ 2257 /* headroom, head length, tail length and maximum TIM length */
2268 skb = dev_alloc_skb(local->tx_headroom + 400 + 2258 skb = dev_alloc_skb(local->tx_headroom + 400 +
2269 sdata->u.mesh.vendor_ie_len); 2259 sdata->u.mesh.ie_len);
2270 if (!skb) 2260 if (!skb)
2271 goto out; 2261 goto out;
2272 2262
@@ -2489,7 +2479,6 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw,
2489{ 2479{
2490 struct ieee80211_local *local = hw_to_local(hw); 2480 struct ieee80211_local *local = hw_to_local(hw);
2491 struct sk_buff *skb = NULL; 2481 struct sk_buff *skb = NULL;
2492 struct sta_info *sta;
2493 struct ieee80211_tx_data tx; 2482 struct ieee80211_tx_data tx;
2494 struct ieee80211_sub_if_data *sdata; 2483 struct ieee80211_sub_if_data *sdata;
2495 struct ieee80211_if_ap *bss = NULL; 2484 struct ieee80211_if_ap *bss = NULL;
@@ -2531,7 +2520,6 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw,
2531 2520
2532 info = IEEE80211_SKB_CB(skb); 2521 info = IEEE80211_SKB_CB(skb);
2533 2522
2534 sta = tx.sta;
2535 tx.flags |= IEEE80211_TX_PS_BUFFERED; 2523 tx.flags |= IEEE80211_TX_PS_BUFFERED;
2536 tx.channel = local->hw.conf.channel; 2524 tx.channel = local->hw.conf.channel;
2537 info->band = tx.channel->band; 2525 info->band = tx.channel->band;
@@ -2551,8 +2539,9 @@ void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
2551 skb_set_network_header(skb, 0); 2539 skb_set_network_header(skb, 0);
2552 skb_set_transport_header(skb, 0); 2540 skb_set_transport_header(skb, 0);
2553 2541
2554 /* send all internal mgmt frames on VO */ 2542 /* Send all internal mgmt frames on VO. Accordingly set TID to 7. */
2555 skb_set_queue_mapping(skb, 0); 2543 skb_set_queue_mapping(skb, IEEE80211_AC_VO);
2544 skb->priority = 7;
2556 2545
2557 /* 2546 /*
2558 * The other path calling ieee80211_xmit is from the tasklet, 2547 * The other path calling ieee80211_xmit is from the tasklet,
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 556647a910ac..d3fe2d237485 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -1125,9 +1125,27 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1125 struct sta_info *sta; 1125 struct sta_info *sta;
1126 int res; 1126 int res;
1127 1127
1128#ifdef CONFIG_PM
1128 if (local->suspended) 1129 if (local->suspended)
1129 local->resuming = true; 1130 local->resuming = true;
1130 1131
1132 if (local->wowlan) {
1133 local->wowlan = false;
1134 res = drv_resume(local);
1135 if (res < 0) {
1136 local->resuming = false;
1137 return res;
1138 }
1139 if (res == 0)
1140 goto wake_up;
1141 WARN_ON(res > 1);
1142 /*
1143 * res is 1, which means the driver requested
1144 * to go through a regular reset on wakeup.
1145 */
1146 }
1147#endif
1148
1131 /* restart hardware */ 1149 /* restart hardware */
1132 if (local->open_count) { 1150 if (local->open_count) {
1133 /* 1151 /*
@@ -1258,6 +1276,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1258 if (ieee80211_sdata_running(sdata)) 1276 if (ieee80211_sdata_running(sdata))
1259 ieee80211_enable_keys(sdata); 1277 ieee80211_enable_keys(sdata);
1260 1278
1279 wake_up:
1261 ieee80211_wake_queues_by_reason(hw, 1280 ieee80211_wake_queues_by_reason(hw,
1262 IEEE80211_QUEUE_STOP_REASON_SUSPEND); 1281 IEEE80211_QUEUE_STOP_REASON_SUSPEND);
1263 1282
@@ -1290,7 +1309,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1290 } 1309 }
1291 } 1310 }
1292 1311
1293 add_timer(&local->sta_cleanup); 1312 mod_timer(&local->sta_cleanup, jiffies + 1);
1294 1313
1295 mutex_lock(&local->sta_mtx); 1314 mutex_lock(&local->sta_mtx);
1296 list_for_each_entry(sta, &local->sta_list, list) 1315 list_for_each_entry(sta, &local->sta_list, list)
diff --git a/net/mac80211/wep.c b/net/mac80211/wep.c
index 2ff6d1e3ed21..a1c6bfd55f0f 100644
--- a/net/mac80211/wep.c
+++ b/net/mac80211/wep.c
@@ -30,17 +30,15 @@ int ieee80211_wep_init(struct ieee80211_local *local)
30 /* start WEP IV from a random value */ 30 /* start WEP IV from a random value */
31 get_random_bytes(&local->wep_iv, WEP_IV_LEN); 31 get_random_bytes(&local->wep_iv, WEP_IV_LEN);
32 32
33 local->wep_tx_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, 33 local->wep_tx_tfm = crypto_alloc_cipher("arc4", 0, CRYPTO_ALG_ASYNC);
34 CRYPTO_ALG_ASYNC);
35 if (IS_ERR(local->wep_tx_tfm)) { 34 if (IS_ERR(local->wep_tx_tfm)) {
36 local->wep_rx_tfm = ERR_PTR(-EINVAL); 35 local->wep_rx_tfm = ERR_PTR(-EINVAL);
37 return PTR_ERR(local->wep_tx_tfm); 36 return PTR_ERR(local->wep_tx_tfm);
38 } 37 }
39 38
40 local->wep_rx_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, 39 local->wep_rx_tfm = crypto_alloc_cipher("arc4", 0, CRYPTO_ALG_ASYNC);
41 CRYPTO_ALG_ASYNC);
42 if (IS_ERR(local->wep_rx_tfm)) { 40 if (IS_ERR(local->wep_rx_tfm)) {
43 crypto_free_blkcipher(local->wep_tx_tfm); 41 crypto_free_cipher(local->wep_tx_tfm);
44 local->wep_tx_tfm = ERR_PTR(-EINVAL); 42 local->wep_tx_tfm = ERR_PTR(-EINVAL);
45 return PTR_ERR(local->wep_rx_tfm); 43 return PTR_ERR(local->wep_rx_tfm);
46 } 44 }
@@ -51,9 +49,9 @@ int ieee80211_wep_init(struct ieee80211_local *local)
51void ieee80211_wep_free(struct ieee80211_local *local) 49void ieee80211_wep_free(struct ieee80211_local *local)
52{ 50{
53 if (!IS_ERR(local->wep_tx_tfm)) 51 if (!IS_ERR(local->wep_tx_tfm))
54 crypto_free_blkcipher(local->wep_tx_tfm); 52 crypto_free_cipher(local->wep_tx_tfm);
55 if (!IS_ERR(local->wep_rx_tfm)) 53 if (!IS_ERR(local->wep_rx_tfm))
56 crypto_free_blkcipher(local->wep_rx_tfm); 54 crypto_free_cipher(local->wep_rx_tfm);
57} 55}
58 56
59static inline bool ieee80211_wep_weak_iv(u32 iv, int keylen) 57static inline bool ieee80211_wep_weak_iv(u32 iv, int keylen)
@@ -127,12 +125,11 @@ static void ieee80211_wep_remove_iv(struct ieee80211_local *local,
127/* Perform WEP encryption using given key. data buffer must have tailroom 125/* Perform WEP encryption using given key. data buffer must have tailroom
128 * for 4-byte ICV. data_len must not include this ICV. Note: this function 126 * for 4-byte ICV. data_len must not include this ICV. Note: this function
129 * does _not_ add IV. data = RC4(data | CRC32(data)) */ 127 * does _not_ add IV. data = RC4(data | CRC32(data)) */
130int ieee80211_wep_encrypt_data(struct crypto_blkcipher *tfm, u8 *rc4key, 128int ieee80211_wep_encrypt_data(struct crypto_cipher *tfm, u8 *rc4key,
131 size_t klen, u8 *data, size_t data_len) 129 size_t klen, u8 *data, size_t data_len)
132{ 130{
133 struct blkcipher_desc desc = { .tfm = tfm };
134 struct scatterlist sg;
135 __le32 icv; 131 __le32 icv;
132 int i;
136 133
137 if (IS_ERR(tfm)) 134 if (IS_ERR(tfm))
138 return -1; 135 return -1;
@@ -140,9 +137,9 @@ int ieee80211_wep_encrypt_data(struct crypto_blkcipher *tfm, u8 *rc4key,
140 icv = cpu_to_le32(~crc32_le(~0, data, data_len)); 137 icv = cpu_to_le32(~crc32_le(~0, data, data_len));
141 put_unaligned(icv, (__le32 *)(data + data_len)); 138 put_unaligned(icv, (__le32 *)(data + data_len));
142 139
143 crypto_blkcipher_setkey(tfm, rc4key, klen); 140 crypto_cipher_setkey(tfm, rc4key, klen);
144 sg_init_one(&sg, data, data_len + WEP_ICV_LEN); 141 for (i = 0; i < data_len + WEP_ICV_LEN; i++)
145 crypto_blkcipher_encrypt(&desc, &sg, &sg, sg.length); 142 crypto_cipher_encrypt_one(tfm, data + i, data + i);
146 143
147 return 0; 144 return 0;
148} 145}
@@ -186,19 +183,18 @@ int ieee80211_wep_encrypt(struct ieee80211_local *local,
186/* Perform WEP decryption using given key. data buffer includes encrypted 183/* Perform WEP decryption using given key. data buffer includes encrypted
187 * payload, including 4-byte ICV, but _not_ IV. data_len must not include ICV. 184 * payload, including 4-byte ICV, but _not_ IV. data_len must not include ICV.
188 * Return 0 on success and -1 on ICV mismatch. */ 185 * Return 0 on success and -1 on ICV mismatch. */
189int ieee80211_wep_decrypt_data(struct crypto_blkcipher *tfm, u8 *rc4key, 186int ieee80211_wep_decrypt_data(struct crypto_cipher *tfm, u8 *rc4key,
190 size_t klen, u8 *data, size_t data_len) 187 size_t klen, u8 *data, size_t data_len)
191{ 188{
192 struct blkcipher_desc desc = { .tfm = tfm };
193 struct scatterlist sg;
194 __le32 crc; 189 __le32 crc;
190 int i;
195 191
196 if (IS_ERR(tfm)) 192 if (IS_ERR(tfm))
197 return -1; 193 return -1;
198 194
199 crypto_blkcipher_setkey(tfm, rc4key, klen); 195 crypto_cipher_setkey(tfm, rc4key, klen);
200 sg_init_one(&sg, data, data_len + WEP_ICV_LEN); 196 for (i = 0; i < data_len + WEP_ICV_LEN; i++)
201 crypto_blkcipher_decrypt(&desc, &sg, &sg, sg.length); 197 crypto_cipher_decrypt_one(tfm, data + i, data + i);
202 198
203 crc = cpu_to_le32(~crc32_le(~0, data, data_len)); 199 crc = cpu_to_le32(~crc32_le(~0, data, data_len));
204 if (memcmp(&crc, data + data_len, WEP_ICV_LEN) != 0) 200 if (memcmp(&crc, data + data_len, WEP_ICV_LEN) != 0)
diff --git a/net/mac80211/wep.h b/net/mac80211/wep.h
index 58654ee33518..01e54840a628 100644
--- a/net/mac80211/wep.h
+++ b/net/mac80211/wep.h
@@ -18,12 +18,12 @@
18 18
19int ieee80211_wep_init(struct ieee80211_local *local); 19int ieee80211_wep_init(struct ieee80211_local *local);
20void ieee80211_wep_free(struct ieee80211_local *local); 20void ieee80211_wep_free(struct ieee80211_local *local);
21int ieee80211_wep_encrypt_data(struct crypto_blkcipher *tfm, u8 *rc4key, 21int ieee80211_wep_encrypt_data(struct crypto_cipher *tfm, u8 *rc4key,
22 size_t klen, u8 *data, size_t data_len); 22 size_t klen, u8 *data, size_t data_len);
23int ieee80211_wep_encrypt(struct ieee80211_local *local, 23int ieee80211_wep_encrypt(struct ieee80211_local *local,
24 struct sk_buff *skb, 24 struct sk_buff *skb,
25 const u8 *key, int keylen, int keyidx); 25 const u8 *key, int keylen, int keyidx);
26int ieee80211_wep_decrypt_data(struct crypto_blkcipher *tfm, u8 *rc4key, 26int ieee80211_wep_decrypt_data(struct crypto_cipher *tfm, u8 *rc4key,
27 size_t klen, u8 *data, size_t data_len); 27 size_t klen, u8 *data, size_t data_len);
28bool ieee80211_wep_is_weak_iv(struct sk_buff *skb, struct ieee80211_key *key); 28bool ieee80211_wep_is_weak_iv(struct sk_buff *skb, struct ieee80211_key *key);
29 29
diff --git a/net/mac80211/work.c b/net/mac80211/work.c
index ac3549690b8e..d2e7f0e86677 100644
--- a/net/mac80211/work.c
+++ b/net/mac80211/work.c
@@ -190,9 +190,8 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata,
190 struct sk_buff *skb; 190 struct sk_buff *skb;
191 struct ieee80211_mgmt *mgmt; 191 struct ieee80211_mgmt *mgmt;
192 u8 *pos, qos_info; 192 u8 *pos, qos_info;
193 const u8 *ies;
194 size_t offset = 0, noffset; 193 size_t offset = 0, noffset;
195 int i, len, count, rates_len, supp_rates_len; 194 int i, count, rates_len, supp_rates_len;
196 u16 capab; 195 u16 capab;
197 struct ieee80211_supported_band *sband; 196 struct ieee80211_supported_band *sband;
198 u32 rates = 0; 197 u32 rates = 0;
@@ -277,7 +276,7 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata,
277 } 276 }
278 277
279 /* SSID */ 278 /* SSID */
280 ies = pos = skb_put(skb, 2 + wk->assoc.ssid_len); 279 pos = skb_put(skb, 2 + wk->assoc.ssid_len);
281 *pos++ = WLAN_EID_SSID; 280 *pos++ = WLAN_EID_SSID;
282 *pos++ = wk->assoc.ssid_len; 281 *pos++ = wk->assoc.ssid_len;
283 memcpy(pos, wk->assoc.ssid, wk->assoc.ssid_len); 282 memcpy(pos, wk->assoc.ssid, wk->assoc.ssid_len);
@@ -287,7 +286,6 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata,
287 if (supp_rates_len > 8) 286 if (supp_rates_len > 8)
288 supp_rates_len = 8; 287 supp_rates_len = 8;
289 288
290 len = sband->n_bitrates;
291 pos = skb_put(skb, supp_rates_len + 2); 289 pos = skb_put(skb, supp_rates_len + 2);
292 *pos++ = WLAN_EID_SUPP_RATES; 290 *pos++ = WLAN_EID_SUPP_RATES;
293 *pos++ = supp_rates_len; 291 *pos++ = supp_rates_len;
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index f1765de2f4bf..9dc3b5f26e80 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -87,42 +87,76 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx)
87 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 87 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
88 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 88 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
89 89
90 /* No way to verify the MIC if the hardware stripped it */ 90 /*
91 if (status->flag & RX_FLAG_MMIC_STRIPPED) 91 * it makes no sense to check for MIC errors on anything other
92 * than data frames.
93 */
94 if (!ieee80211_is_data_present(hdr->frame_control))
95 return RX_CONTINUE;
96
97 /*
98 * No way to verify the MIC if the hardware stripped it or
99 * the IV with the key index. In this case we have solely rely
100 * on the driver to set RX_FLAG_MMIC_ERROR in the event of a
101 * MIC failure report.
102 */
103 if (status->flag & (RX_FLAG_MMIC_STRIPPED | RX_FLAG_IV_STRIPPED)) {
104 if (status->flag & RX_FLAG_MMIC_ERROR)
105 goto mic_fail;
106
107 if (!(status->flag & RX_FLAG_IV_STRIPPED))
108 goto update_iv;
109
92 return RX_CONTINUE; 110 return RX_CONTINUE;
111 }
93 112
113 /*
114 * Some hardware seems to generate Michael MIC failure reports; even
115 * though, the frame was not encrypted with TKIP and therefore has no
116 * MIC. Ignore the flag them to avoid triggering countermeasures.
117 */
94 if (!rx->key || rx->key->conf.cipher != WLAN_CIPHER_SUITE_TKIP || 118 if (!rx->key || rx->key->conf.cipher != WLAN_CIPHER_SUITE_TKIP ||
95 !ieee80211_has_protected(hdr->frame_control) || 119 !(status->flag & RX_FLAG_DECRYPTED))
96 !ieee80211_is_data_present(hdr->frame_control))
97 return RX_CONTINUE; 120 return RX_CONTINUE;
98 121
122 if (rx->sdata->vif.type == NL80211_IFTYPE_AP && rx->key->conf.keyidx) {
123 /*
124 * APs with pairwise keys should never receive Michael MIC
125 * errors for non-zero keyidx because these are reserved for
126 * group keys and only the AP is sending real multicast
127 * frames in the BSS. (
128 */
129 return RX_DROP_UNUSABLE;
130 }
131
132 if (status->flag & RX_FLAG_MMIC_ERROR)
133 goto mic_fail;
134
99 hdrlen = ieee80211_hdrlen(hdr->frame_control); 135 hdrlen = ieee80211_hdrlen(hdr->frame_control);
100 if (skb->len < hdrlen + MICHAEL_MIC_LEN) 136 if (skb->len < hdrlen + MICHAEL_MIC_LEN)
101 return RX_DROP_UNUSABLE; 137 return RX_DROP_UNUSABLE;
102 138
103 data = skb->data + hdrlen; 139 data = skb->data + hdrlen;
104 data_len = skb->len - hdrlen - MICHAEL_MIC_LEN; 140 data_len = skb->len - hdrlen - MICHAEL_MIC_LEN;
105
106 key = &rx->key->conf.key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY]; 141 key = &rx->key->conf.key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY];
107 michael_mic(key, hdr, data, data_len, mic); 142 michael_mic(key, hdr, data, data_len, mic);
108 if (memcmp(mic, data + data_len, MICHAEL_MIC_LEN) != 0) { 143 if (memcmp(mic, data + data_len, MICHAEL_MIC_LEN) != 0)
109 if (!(status->rx_flags & IEEE80211_RX_RA_MATCH)) 144 goto mic_fail;
110 return RX_DROP_UNUSABLE;
111
112 mac80211_ev_michael_mic_failure(rx->sdata, rx->key->conf.keyidx,
113 (void *) skb->data, NULL,
114 GFP_ATOMIC);
115 return RX_DROP_UNUSABLE;
116 }
117 145
118 /* remove Michael MIC from payload */ 146 /* remove Michael MIC from payload */
119 skb_trim(skb, skb->len - MICHAEL_MIC_LEN); 147 skb_trim(skb, skb->len - MICHAEL_MIC_LEN);
120 148
149update_iv:
121 /* update IV in key information to be able to detect replays */ 150 /* update IV in key information to be able to detect replays */
122 rx->key->u.tkip.rx[rx->queue].iv32 = rx->tkip_iv32; 151 rx->key->u.tkip.rx[rx->queue].iv32 = rx->tkip_iv32;
123 rx->key->u.tkip.rx[rx->queue].iv16 = rx->tkip_iv16; 152 rx->key->u.tkip.rx[rx->queue].iv16 = rx->tkip_iv16;
124 153
125 return RX_CONTINUE; 154 return RX_CONTINUE;
155
156mic_fail:
157 mac80211_ev_michael_mic_failure(rx->sdata, rx->key->conf.keyidx,
158 (void *) skb->data, NULL, GFP_ATOMIC);
159 return RX_DROP_UNUSABLE;
126} 160}
127 161
128 162
diff --git a/net/netfilter/ipset/ip_set_getport.c b/net/netfilter/ipset/ip_set_getport.c
index 8d5227212686..757143b2240a 100644
--- a/net/netfilter/ipset/ip_set_getport.c
+++ b/net/netfilter/ipset/ip_set_getport.c
@@ -11,6 +11,7 @@
11#include <linux/skbuff.h> 11#include <linux/skbuff.h>
12#include <linux/icmp.h> 12#include <linux/icmp.h>
13#include <linux/icmpv6.h> 13#include <linux/icmpv6.h>
14#include <linux/sctp.h>
14#include <linux/netfilter_ipv6/ip6_tables.h> 15#include <linux/netfilter_ipv6/ip6_tables.h>
15#include <net/ip.h> 16#include <net/ip.h>
16#include <net/ipv6.h> 17#include <net/ipv6.h>
@@ -35,7 +36,20 @@ get_port(const struct sk_buff *skb, int protocol, unsigned int protooff,
35 *port = src ? th->source : th->dest; 36 *port = src ? th->source : th->dest;
36 break; 37 break;
37 } 38 }
38 case IPPROTO_UDP: { 39 case IPPROTO_SCTP: {
40 sctp_sctphdr_t _sh;
41 const sctp_sctphdr_t *sh;
42
43 sh = skb_header_pointer(skb, protooff, sizeof(_sh), &_sh);
44 if (sh == NULL)
45 /* No choice either */
46 return false;
47
48 *port = src ? sh->source : sh->dest;
49 break;
50 }
51 case IPPROTO_UDP:
52 case IPPROTO_UDPLITE: {
39 struct udphdr _udph; 53 struct udphdr _udph;
40 const struct udphdr *uh; 54 const struct udphdr *uh;
41 55
diff --git a/net/netfilter/ipset/ip_set_hash_ipport.c b/net/netfilter/ipset/ip_set_hash_ipport.c
index b9214145d357..14281b6b8074 100644
--- a/net/netfilter/ipset/ip_set_hash_ipport.c
+++ b/net/netfilter/ipset/ip_set_hash_ipport.c
@@ -491,7 +491,7 @@ static struct ip_set_type hash_ipport_type __read_mostly = {
491 .features = IPSET_TYPE_IP | IPSET_TYPE_PORT, 491 .features = IPSET_TYPE_IP | IPSET_TYPE_PORT,
492 .dimension = IPSET_DIM_TWO, 492 .dimension = IPSET_DIM_TWO,
493 .family = AF_UNSPEC, 493 .family = AF_UNSPEC,
494 .revision = 0, 494 .revision = 1,
495 .create = hash_ipport_create, 495 .create = hash_ipport_create,
496 .create_policy = { 496 .create_policy = {
497 [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 }, 497 [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
diff --git a/net/netfilter/ipset/ip_set_hash_ipportip.c b/net/netfilter/ipset/ip_set_hash_ipportip.c
index 4642872df6e1..401c8a2531db 100644
--- a/net/netfilter/ipset/ip_set_hash_ipportip.c
+++ b/net/netfilter/ipset/ip_set_hash_ipportip.c
@@ -509,7 +509,7 @@ static struct ip_set_type hash_ipportip_type __read_mostly = {
509 .features = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_IP2, 509 .features = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_IP2,
510 .dimension = IPSET_DIM_THREE, 510 .dimension = IPSET_DIM_THREE,
511 .family = AF_UNSPEC, 511 .family = AF_UNSPEC,
512 .revision = 0, 512 .revision = 1,
513 .create = hash_ipportip_create, 513 .create = hash_ipportip_create,
514 .create_policy = { 514 .create_policy = {
515 [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 }, 515 [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
diff --git a/net/netfilter/ipset/ip_set_hash_ipportnet.c b/net/netfilter/ipset/ip_set_hash_ipportnet.c
index 2cb84a54b7ad..4743e5402522 100644
--- a/net/netfilter/ipset/ip_set_hash_ipportnet.c
+++ b/net/netfilter/ipset/ip_set_hash_ipportnet.c
@@ -574,7 +574,7 @@ static struct ip_set_type hash_ipportnet_type __read_mostly = {
574 .features = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_IP2, 574 .features = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_IP2,
575 .dimension = IPSET_DIM_THREE, 575 .dimension = IPSET_DIM_THREE,
576 .family = AF_UNSPEC, 576 .family = AF_UNSPEC,
577 .revision = 0, 577 .revision = 1,
578 .create = hash_ipportnet_create, 578 .create = hash_ipportnet_create,
579 .create_policy = { 579 .create_policy = {
580 [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 }, 580 [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
diff --git a/net/netfilter/ipset/ip_set_hash_netport.c b/net/netfilter/ipset/ip_set_hash_netport.c
index 8598676f2a05..d2a40362dd3a 100644
--- a/net/netfilter/ipset/ip_set_hash_netport.c
+++ b/net/netfilter/ipset/ip_set_hash_netport.c
@@ -526,7 +526,7 @@ static struct ip_set_type hash_netport_type __read_mostly = {
526 .features = IPSET_TYPE_IP | IPSET_TYPE_PORT, 526 .features = IPSET_TYPE_IP | IPSET_TYPE_PORT,
527 .dimension = IPSET_DIM_TWO, 527 .dimension = IPSET_DIM_TWO,
528 .family = AF_UNSPEC, 528 .family = AF_UNSPEC,
529 .revision = 0, 529 .revision = 1,
530 .create = hash_netport_create, 530 .create = hash_netport_create,
531 .create_policy = { 531 .create_policy = {
532 [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 }, 532 [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index a74dae6c5dbc..bfa808f4da13 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -1382,15 +1382,7 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
1382 ip_vs_in_stats(cp, skb); 1382 ip_vs_in_stats(cp, skb);
1383 if (IPPROTO_TCP == cih->protocol || IPPROTO_UDP == cih->protocol) 1383 if (IPPROTO_TCP == cih->protocol || IPPROTO_UDP == cih->protocol)
1384 offset += 2 * sizeof(__u16); 1384 offset += 2 * sizeof(__u16);
1385 verdict = ip_vs_icmp_xmit(skb, cp, pp, offset); 1385 verdict = ip_vs_icmp_xmit(skb, cp, pp, offset, hooknum);
1386 /* LOCALNODE from FORWARD hook is not supported */
1387 if (verdict == NF_ACCEPT && hooknum == NF_INET_FORWARD &&
1388 skb_rtable(skb)->rt_flags & RTCF_LOCAL) {
1389 IP_VS_DBG(1, "%s(): "
1390 "local delivery to %pI4 but in FORWARD\n",
1391 __func__, &skb_rtable(skb)->rt_dst);
1392 verdict = NF_DROP;
1393 }
1394 1386
1395 out: 1387 out:
1396 __ip_vs_conn_put(cp); 1388 __ip_vs_conn_put(cp);
@@ -1412,7 +1404,6 @@ ip_vs_in_icmp_v6(struct sk_buff *skb, int *related, unsigned int hooknum)
1412 struct ip_vs_protocol *pp; 1404 struct ip_vs_protocol *pp;
1413 struct ip_vs_proto_data *pd; 1405 struct ip_vs_proto_data *pd;
1414 unsigned int offset, verdict; 1406 unsigned int offset, verdict;
1415 struct rt6_info *rt;
1416 1407
1417 *related = 1; 1408 *related = 1;
1418 1409
@@ -1474,23 +1465,12 @@ ip_vs_in_icmp_v6(struct sk_buff *skb, int *related, unsigned int hooknum)
1474 if (!cp) 1465 if (!cp)
1475 return NF_ACCEPT; 1466 return NF_ACCEPT;
1476 1467
1477 verdict = NF_DROP;
1478
1479 /* do the statistics and put it back */ 1468 /* do the statistics and put it back */
1480 ip_vs_in_stats(cp, skb); 1469 ip_vs_in_stats(cp, skb);
1481 if (IPPROTO_TCP == cih->nexthdr || IPPROTO_UDP == cih->nexthdr || 1470 if (IPPROTO_TCP == cih->nexthdr || IPPROTO_UDP == cih->nexthdr ||
1482 IPPROTO_SCTP == cih->nexthdr) 1471 IPPROTO_SCTP == cih->nexthdr)
1483 offset += 2 * sizeof(__u16); 1472 offset += 2 * sizeof(__u16);
1484 verdict = ip_vs_icmp_xmit_v6(skb, cp, pp, offset); 1473 verdict = ip_vs_icmp_xmit_v6(skb, cp, pp, offset, hooknum);
1485 /* LOCALNODE from FORWARD hook is not supported */
1486 if (verdict == NF_ACCEPT && hooknum == NF_INET_FORWARD &&
1487 (rt = (struct rt6_info *) skb_dst(skb)) &&
1488 rt->rt6i_dev && rt->rt6i_dev->flags & IFF_LOOPBACK) {
1489 IP_VS_DBG(1, "%s(): "
1490 "local delivery to %pI6 but in FORWARD\n",
1491 __func__, &rt->rt6i_dst);
1492 verdict = NF_DROP;
1493 }
1494 1474
1495 __ip_vs_conn_put(cp); 1475 __ip_vs_conn_put(cp);
1496 1476
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 37890f228b19..699c79a55657 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -2069,9 +2069,6 @@ static const struct file_operations ip_vs_info_fops = {
2069 .release = seq_release_net, 2069 .release = seq_release_net,
2070}; 2070};
2071 2071
2072#endif
2073
2074#ifdef CONFIG_PROC_FS
2075static int ip_vs_stats_show(struct seq_file *seq, void *v) 2072static int ip_vs_stats_show(struct seq_file *seq, void *v)
2076{ 2073{
2077 struct net *net = seq_file_single_net(seq); 2074 struct net *net = seq_file_single_net(seq);
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index 6132b213eddc..ee319a4338b0 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -87,7 +87,7 @@ __ip_vs_dst_check(struct ip_vs_dest *dest, u32 rtos)
87/* Get route to destination or remote server */ 87/* Get route to destination or remote server */
88static struct rtable * 88static struct rtable *
89__ip_vs_get_out_rt(struct sk_buff *skb, struct ip_vs_dest *dest, 89__ip_vs_get_out_rt(struct sk_buff *skb, struct ip_vs_dest *dest,
90 __be32 daddr, u32 rtos, int rt_mode) 90 __be32 daddr, u32 rtos, int rt_mode, __be32 *ret_saddr)
91{ 91{
92 struct net *net = dev_net(skb_dst(skb)->dev); 92 struct net *net = dev_net(skb_dst(skb)->dev);
93 struct rtable *rt; /* Route to the other host */ 93 struct rtable *rt; /* Route to the other host */
@@ -98,7 +98,12 @@ __ip_vs_get_out_rt(struct sk_buff *skb, struct ip_vs_dest *dest,
98 spin_lock(&dest->dst_lock); 98 spin_lock(&dest->dst_lock);
99 if (!(rt = (struct rtable *) 99 if (!(rt = (struct rtable *)
100 __ip_vs_dst_check(dest, rtos))) { 100 __ip_vs_dst_check(dest, rtos))) {
101 rt = ip_route_output(net, dest->addr.ip, 0, rtos, 0); 101 struct flowi4 fl4;
102
103 memset(&fl4, 0, sizeof(fl4));
104 fl4.daddr = dest->addr.ip;
105 fl4.flowi4_tos = rtos;
106 rt = ip_route_output_key(net, &fl4);
102 if (IS_ERR(rt)) { 107 if (IS_ERR(rt)) {
103 spin_unlock(&dest->dst_lock); 108 spin_unlock(&dest->dst_lock);
104 IP_VS_DBG_RL("ip_route_output error, dest: %pI4\n", 109 IP_VS_DBG_RL("ip_route_output error, dest: %pI4\n",
@@ -106,18 +111,30 @@ __ip_vs_get_out_rt(struct sk_buff *skb, struct ip_vs_dest *dest,
106 return NULL; 111 return NULL;
107 } 112 }
108 __ip_vs_dst_set(dest, rtos, dst_clone(&rt->dst), 0); 113 __ip_vs_dst_set(dest, rtos, dst_clone(&rt->dst), 0);
109 IP_VS_DBG(10, "new dst %pI4, refcnt=%d, rtos=%X\n", 114 dest->dst_saddr.ip = fl4.saddr;
110 &dest->addr.ip, 115 IP_VS_DBG(10, "new dst %pI4, src %pI4, refcnt=%d, "
116 "rtos=%X\n",
117 &dest->addr.ip, &dest->dst_saddr.ip,
111 atomic_read(&rt->dst.__refcnt), rtos); 118 atomic_read(&rt->dst.__refcnt), rtos);
112 } 119 }
120 daddr = dest->addr.ip;
121 if (ret_saddr)
122 *ret_saddr = dest->dst_saddr.ip;
113 spin_unlock(&dest->dst_lock); 123 spin_unlock(&dest->dst_lock);
114 } else { 124 } else {
115 rt = ip_route_output(net, daddr, 0, rtos, 0); 125 struct flowi4 fl4;
126
127 memset(&fl4, 0, sizeof(fl4));
128 fl4.daddr = daddr;
129 fl4.flowi4_tos = rtos;
130 rt = ip_route_output_key(net, &fl4);
116 if (IS_ERR(rt)) { 131 if (IS_ERR(rt)) {
117 IP_VS_DBG_RL("ip_route_output error, dest: %pI4\n", 132 IP_VS_DBG_RL("ip_route_output error, dest: %pI4\n",
118 &daddr); 133 &daddr);
119 return NULL; 134 return NULL;
120 } 135 }
136 if (ret_saddr)
137 *ret_saddr = fl4.saddr;
121 } 138 }
122 139
123 local = rt->rt_flags & RTCF_LOCAL; 140 local = rt->rt_flags & RTCF_LOCAL;
@@ -125,7 +142,7 @@ __ip_vs_get_out_rt(struct sk_buff *skb, struct ip_vs_dest *dest,
125 rt_mode)) { 142 rt_mode)) {
126 IP_VS_DBG_RL("Stopping traffic to %s address, dest: %pI4\n", 143 IP_VS_DBG_RL("Stopping traffic to %s address, dest: %pI4\n",
127 (rt->rt_flags & RTCF_LOCAL) ? 144 (rt->rt_flags & RTCF_LOCAL) ?
128 "local":"non-local", &rt->rt_dst); 145 "local":"non-local", &daddr);
129 ip_rt_put(rt); 146 ip_rt_put(rt);
130 return NULL; 147 return NULL;
131 } 148 }
@@ -133,14 +150,14 @@ __ip_vs_get_out_rt(struct sk_buff *skb, struct ip_vs_dest *dest,
133 !((ort = skb_rtable(skb)) && ort->rt_flags & RTCF_LOCAL)) { 150 !((ort = skb_rtable(skb)) && ort->rt_flags & RTCF_LOCAL)) {
134 IP_VS_DBG_RL("Redirect from non-local address %pI4 to local " 151 IP_VS_DBG_RL("Redirect from non-local address %pI4 to local "
135 "requires NAT method, dest: %pI4\n", 152 "requires NAT method, dest: %pI4\n",
136 &ip_hdr(skb)->daddr, &rt->rt_dst); 153 &ip_hdr(skb)->daddr, &daddr);
137 ip_rt_put(rt); 154 ip_rt_put(rt);
138 return NULL; 155 return NULL;
139 } 156 }
140 if (unlikely(!local && ipv4_is_loopback(ip_hdr(skb)->saddr))) { 157 if (unlikely(!local && ipv4_is_loopback(ip_hdr(skb)->saddr))) {
141 IP_VS_DBG_RL("Stopping traffic from loopback address %pI4 " 158 IP_VS_DBG_RL("Stopping traffic from loopback address %pI4 "
142 "to non-local address, dest: %pI4\n", 159 "to non-local address, dest: %pI4\n",
143 &ip_hdr(skb)->saddr, &rt->rt_dst); 160 &ip_hdr(skb)->saddr, &daddr);
144 ip_rt_put(rt); 161 ip_rt_put(rt);
145 return NULL; 162 return NULL;
146 } 163 }
@@ -229,8 +246,6 @@ out_err:
229 246
230/* 247/*
231 * Get route to destination or remote server 248 * Get route to destination or remote server
232 * rt_mode: flags, &1=Allow local dest, &2=Allow non-local dest,
233 * &4=Allow redirect from remote daddr to local
234 */ 249 */
235static struct rt6_info * 250static struct rt6_info *
236__ip_vs_get_out_rt_v6(struct sk_buff *skb, struct ip_vs_dest *dest, 251__ip_vs_get_out_rt_v6(struct sk_buff *skb, struct ip_vs_dest *dest,
@@ -250,7 +265,7 @@ __ip_vs_get_out_rt_v6(struct sk_buff *skb, struct ip_vs_dest *dest,
250 u32 cookie; 265 u32 cookie;
251 266
252 dst = __ip_vs_route_output_v6(net, &dest->addr.in6, 267 dst = __ip_vs_route_output_v6(net, &dest->addr.in6,
253 &dest->dst_saddr, 268 &dest->dst_saddr.in6,
254 do_xfrm); 269 do_xfrm);
255 if (!dst) { 270 if (!dst) {
256 spin_unlock(&dest->dst_lock); 271 spin_unlock(&dest->dst_lock);
@@ -260,11 +275,11 @@ __ip_vs_get_out_rt_v6(struct sk_buff *skb, struct ip_vs_dest *dest,
260 cookie = rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0; 275 cookie = rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0;
261 __ip_vs_dst_set(dest, 0, dst_clone(&rt->dst), cookie); 276 __ip_vs_dst_set(dest, 0, dst_clone(&rt->dst), cookie);
262 IP_VS_DBG(10, "new dst %pI6, src %pI6, refcnt=%d\n", 277 IP_VS_DBG(10, "new dst %pI6, src %pI6, refcnt=%d\n",
263 &dest->addr.in6, &dest->dst_saddr, 278 &dest->addr.in6, &dest->dst_saddr.in6,
264 atomic_read(&rt->dst.__refcnt)); 279 atomic_read(&rt->dst.__refcnt));
265 } 280 }
266 if (ret_saddr) 281 if (ret_saddr)
267 ipv6_addr_copy(ret_saddr, &dest->dst_saddr); 282 ipv6_addr_copy(ret_saddr, &dest->dst_saddr.in6);
268 spin_unlock(&dest->dst_lock); 283 spin_unlock(&dest->dst_lock);
269 } else { 284 } else {
270 dst = __ip_vs_route_output_v6(net, daddr, ret_saddr, do_xfrm); 285 dst = __ip_vs_route_output_v6(net, daddr, ret_saddr, do_xfrm);
@@ -274,13 +289,14 @@ __ip_vs_get_out_rt_v6(struct sk_buff *skb, struct ip_vs_dest *dest,
274 } 289 }
275 290
276 local = __ip_vs_is_local_route6(rt); 291 local = __ip_vs_is_local_route6(rt);
277 if (!((local ? 1 : 2) & rt_mode)) { 292 if (!((local ? IP_VS_RT_MODE_LOCAL : IP_VS_RT_MODE_NON_LOCAL) &
293 rt_mode)) {
278 IP_VS_DBG_RL("Stopping traffic to %s address, dest: %pI6\n", 294 IP_VS_DBG_RL("Stopping traffic to %s address, dest: %pI6\n",
279 local ? "local":"non-local", daddr); 295 local ? "local":"non-local", daddr);
280 dst_release(&rt->dst); 296 dst_release(&rt->dst);
281 return NULL; 297 return NULL;
282 } 298 }
283 if (local && !(rt_mode & 4) && 299 if (local && !(rt_mode & IP_VS_RT_MODE_RDR) &&
284 !((ort = (struct rt6_info *) skb_dst(skb)) && 300 !((ort = (struct rt6_info *) skb_dst(skb)) &&
285 __ip_vs_is_local_route6(ort))) { 301 __ip_vs_is_local_route6(ort))) {
286 IP_VS_DBG_RL("Redirect from non-local address %pI6 to local " 302 IP_VS_DBG_RL("Redirect from non-local address %pI6 to local "
@@ -386,7 +402,7 @@ ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
386 EnterFunction(10); 402 EnterFunction(10);
387 403
388 if (!(rt = __ip_vs_get_out_rt(skb, NULL, iph->daddr, RT_TOS(iph->tos), 404 if (!(rt = __ip_vs_get_out_rt(skb, NULL, iph->daddr, RT_TOS(iph->tos),
389 IP_VS_RT_MODE_NON_LOCAL))) 405 IP_VS_RT_MODE_NON_LOCAL, NULL)))
390 goto tx_error_icmp; 406 goto tx_error_icmp;
391 407
392 /* MTU checking */ 408 /* MTU checking */
@@ -440,7 +456,8 @@ ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
440 456
441 EnterFunction(10); 457 EnterFunction(10);
442 458
443 if (!(rt = __ip_vs_get_out_rt_v6(skb, NULL, &iph->daddr, NULL, 0, 2))) 459 if (!(rt = __ip_vs_get_out_rt_v6(skb, NULL, &iph->daddr, NULL, 0,
460 IP_VS_RT_MODE_NON_LOCAL)))
444 goto tx_error_icmp; 461 goto tx_error_icmp;
445 462
446 /* MTU checking */ 463 /* MTU checking */
@@ -517,7 +534,7 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
517 RT_TOS(iph->tos), 534 RT_TOS(iph->tos),
518 IP_VS_RT_MODE_LOCAL | 535 IP_VS_RT_MODE_LOCAL |
519 IP_VS_RT_MODE_NON_LOCAL | 536 IP_VS_RT_MODE_NON_LOCAL |
520 IP_VS_RT_MODE_RDR))) 537 IP_VS_RT_MODE_RDR, NULL)))
521 goto tx_error_icmp; 538 goto tx_error_icmp;
522 local = rt->rt_flags & RTCF_LOCAL; 539 local = rt->rt_flags & RTCF_LOCAL;
523 /* 540 /*
@@ -539,7 +556,7 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
539#endif 556#endif
540 557
541 /* From world but DNAT to loopback address? */ 558 /* From world but DNAT to loopback address? */
542 if (local && ipv4_is_loopback(rt->rt_dst) && 559 if (local && ipv4_is_loopback(cp->daddr.ip) &&
543 rt_is_input_route(skb_rtable(skb))) { 560 rt_is_input_route(skb_rtable(skb))) {
544 IP_VS_DBG_RL_PKT(1, AF_INET, pp, skb, 0, "ip_vs_nat_xmit(): " 561 IP_VS_DBG_RL_PKT(1, AF_INET, pp, skb, 0, "ip_vs_nat_xmit(): "
545 "stopping DNAT to loopback address"); 562 "stopping DNAT to loopback address");
@@ -632,7 +649,9 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
632 } 649 }
633 650
634 if (!(rt = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6, NULL, 651 if (!(rt = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6, NULL,
635 0, 1|2|4))) 652 0, (IP_VS_RT_MODE_LOCAL |
653 IP_VS_RT_MODE_NON_LOCAL |
654 IP_VS_RT_MODE_RDR))))
636 goto tx_error_icmp; 655 goto tx_error_icmp;
637 local = __ip_vs_is_local_route6(rt); 656 local = __ip_vs_is_local_route6(rt);
638 /* 657 /*
@@ -748,6 +767,7 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
748 struct ip_vs_protocol *pp) 767 struct ip_vs_protocol *pp)
749{ 768{
750 struct rtable *rt; /* Route to the other host */ 769 struct rtable *rt; /* Route to the other host */
770 __be32 saddr; /* Source for tunnel */
751 struct net_device *tdev; /* Device to other host */ 771 struct net_device *tdev; /* Device to other host */
752 struct iphdr *old_iph = ip_hdr(skb); 772 struct iphdr *old_iph = ip_hdr(skb);
753 u8 tos = old_iph->tos; 773 u8 tos = old_iph->tos;
@@ -761,7 +781,8 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
761 781
762 if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip, 782 if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip,
763 RT_TOS(tos), IP_VS_RT_MODE_LOCAL | 783 RT_TOS(tos), IP_VS_RT_MODE_LOCAL |
764 IP_VS_RT_MODE_NON_LOCAL))) 784 IP_VS_RT_MODE_NON_LOCAL,
785 &saddr)))
765 goto tx_error_icmp; 786 goto tx_error_icmp;
766 if (rt->rt_flags & RTCF_LOCAL) { 787 if (rt->rt_flags & RTCF_LOCAL) {
767 ip_rt_put(rt); 788 ip_rt_put(rt);
@@ -829,8 +850,8 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
829 iph->frag_off = df; 850 iph->frag_off = df;
830 iph->protocol = IPPROTO_IPIP; 851 iph->protocol = IPPROTO_IPIP;
831 iph->tos = tos; 852 iph->tos = tos;
832 iph->daddr = rt->rt_dst; 853 iph->daddr = cp->daddr.ip;
833 iph->saddr = rt->rt_src; 854 iph->saddr = saddr;
834 iph->ttl = old_iph->ttl; 855 iph->ttl = old_iph->ttl;
835 ip_select_ident(iph, &rt->dst, NULL); 856 ip_select_ident(iph, &rt->dst, NULL);
836 857
@@ -875,7 +896,8 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
875 EnterFunction(10); 896 EnterFunction(10);
876 897
877 if (!(rt = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6, 898 if (!(rt = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6,
878 &saddr, 1, 1|2))) 899 &saddr, 1, (IP_VS_RT_MODE_LOCAL |
900 IP_VS_RT_MODE_NON_LOCAL))))
879 goto tx_error_icmp; 901 goto tx_error_icmp;
880 if (__ip_vs_is_local_route6(rt)) { 902 if (__ip_vs_is_local_route6(rt)) {
881 dst_release(&rt->dst); 903 dst_release(&rt->dst);
@@ -992,7 +1014,7 @@ ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
992 if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip, 1014 if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip,
993 RT_TOS(iph->tos), 1015 RT_TOS(iph->tos),
994 IP_VS_RT_MODE_LOCAL | 1016 IP_VS_RT_MODE_LOCAL |
995 IP_VS_RT_MODE_NON_LOCAL))) 1017 IP_VS_RT_MODE_NON_LOCAL, NULL)))
996 goto tx_error_icmp; 1018 goto tx_error_icmp;
997 if (rt->rt_flags & RTCF_LOCAL) { 1019 if (rt->rt_flags & RTCF_LOCAL) {
998 ip_rt_put(rt); 1020 ip_rt_put(rt);
@@ -1050,7 +1072,8 @@ ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
1050 EnterFunction(10); 1072 EnterFunction(10);
1051 1073
1052 if (!(rt = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6, NULL, 1074 if (!(rt = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6, NULL,
1053 0, 1|2))) 1075 0, (IP_VS_RT_MODE_LOCAL |
1076 IP_VS_RT_MODE_NON_LOCAL))))
1054 goto tx_error_icmp; 1077 goto tx_error_icmp;
1055 if (__ip_vs_is_local_route6(rt)) { 1078 if (__ip_vs_is_local_route6(rt)) {
1056 dst_release(&rt->dst); 1079 dst_release(&rt->dst);
@@ -1109,12 +1132,13 @@ tx_error:
1109 */ 1132 */
1110int 1133int
1111ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, 1134ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
1112 struct ip_vs_protocol *pp, int offset) 1135 struct ip_vs_protocol *pp, int offset, unsigned int hooknum)
1113{ 1136{
1114 struct rtable *rt; /* Route to the other host */ 1137 struct rtable *rt; /* Route to the other host */
1115 int mtu; 1138 int mtu;
1116 int rc; 1139 int rc;
1117 int local; 1140 int local;
1141 int rt_mode;
1118 1142
1119 EnterFunction(10); 1143 EnterFunction(10);
1120 1144
@@ -1135,11 +1159,13 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
1135 * mangle and send the packet here (only for VS/NAT) 1159 * mangle and send the packet here (only for VS/NAT)
1136 */ 1160 */
1137 1161
1162 /* LOCALNODE from FORWARD hook is not supported */
1163 rt_mode = (hooknum != NF_INET_FORWARD) ?
1164 IP_VS_RT_MODE_LOCAL | IP_VS_RT_MODE_NON_LOCAL |
1165 IP_VS_RT_MODE_RDR : IP_VS_RT_MODE_NON_LOCAL;
1138 if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip, 1166 if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip,
1139 RT_TOS(ip_hdr(skb)->tos), 1167 RT_TOS(ip_hdr(skb)->tos),
1140 IP_VS_RT_MODE_LOCAL | 1168 rt_mode, NULL)))
1141 IP_VS_RT_MODE_NON_LOCAL |
1142 IP_VS_RT_MODE_RDR)))
1143 goto tx_error_icmp; 1169 goto tx_error_icmp;
1144 local = rt->rt_flags & RTCF_LOCAL; 1170 local = rt->rt_flags & RTCF_LOCAL;
1145 1171
@@ -1162,7 +1188,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
1162#endif 1188#endif
1163 1189
1164 /* From world but DNAT to loopback address? */ 1190 /* From world but DNAT to loopback address? */
1165 if (local && ipv4_is_loopback(rt->rt_dst) && 1191 if (local && ipv4_is_loopback(cp->daddr.ip) &&
1166 rt_is_input_route(skb_rtable(skb))) { 1192 rt_is_input_route(skb_rtable(skb))) {
1167 IP_VS_DBG(1, "%s(): " 1193 IP_VS_DBG(1, "%s(): "
1168 "stopping DNAT to loopback %pI4\n", 1194 "stopping DNAT to loopback %pI4\n",
@@ -1227,12 +1253,13 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
1227#ifdef CONFIG_IP_VS_IPV6 1253#ifdef CONFIG_IP_VS_IPV6
1228int 1254int
1229ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, 1255ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
1230 struct ip_vs_protocol *pp, int offset) 1256 struct ip_vs_protocol *pp, int offset, unsigned int hooknum)
1231{ 1257{
1232 struct rt6_info *rt; /* Route to the other host */ 1258 struct rt6_info *rt; /* Route to the other host */
1233 int mtu; 1259 int mtu;
1234 int rc; 1260 int rc;
1235 int local; 1261 int local;
1262 int rt_mode;
1236 1263
1237 EnterFunction(10); 1264 EnterFunction(10);
1238 1265
@@ -1253,8 +1280,12 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
1253 * mangle and send the packet here (only for VS/NAT) 1280 * mangle and send the packet here (only for VS/NAT)
1254 */ 1281 */
1255 1282
1283 /* LOCALNODE from FORWARD hook is not supported */
1284 rt_mode = (hooknum != NF_INET_FORWARD) ?
1285 IP_VS_RT_MODE_LOCAL | IP_VS_RT_MODE_NON_LOCAL |
1286 IP_VS_RT_MODE_RDR : IP_VS_RT_MODE_NON_LOCAL;
1256 if (!(rt = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6, NULL, 1287 if (!(rt = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6, NULL,
1257 0, 1|2|4))) 1288 0, rt_mode)))
1258 goto tx_error_icmp; 1289 goto tx_error_icmp;
1259 1290
1260 local = __ip_vs_is_local_route6(rt); 1291 local = __ip_vs_is_local_route6(rt);
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
index 237cc1981b89..cb5a28581782 100644
--- a/net/netfilter/nf_conntrack_sip.c
+++ b/net/netfilter/nf_conntrack_sip.c
@@ -1419,6 +1419,7 @@ static int sip_help_tcp(struct sk_buff *skb, unsigned int protoff,
1419 const char *dptr, *end; 1419 const char *dptr, *end;
1420 s16 diff, tdiff = 0; 1420 s16 diff, tdiff = 0;
1421 int ret = NF_ACCEPT; 1421 int ret = NF_ACCEPT;
1422 bool term;
1422 typeof(nf_nat_sip_seq_adjust_hook) nf_nat_sip_seq_adjust; 1423 typeof(nf_nat_sip_seq_adjust_hook) nf_nat_sip_seq_adjust;
1423 1424
1424 if (ctinfo != IP_CT_ESTABLISHED && 1425 if (ctinfo != IP_CT_ESTABLISHED &&
@@ -1453,14 +1454,21 @@ static int sip_help_tcp(struct sk_buff *skb, unsigned int protoff,
1453 if (dptr + matchoff == end) 1454 if (dptr + matchoff == end)
1454 break; 1455 break;
1455 1456
1456 if (end + strlen("\r\n\r\n") > dptr + datalen) 1457 term = false;
1457 break; 1458 for (; end + strlen("\r\n\r\n") <= dptr + datalen; end++) {
1458 if (end[0] != '\r' || end[1] != '\n' || 1459 if (end[0] == '\r' && end[1] == '\n' &&
1459 end[2] != '\r' || end[3] != '\n') 1460 end[2] == '\r' && end[3] == '\n') {
1461 term = true;
1462 break;
1463 }
1464 }
1465 if (!term)
1460 break; 1466 break;
1461 end += strlen("\r\n\r\n") + clen; 1467 end += strlen("\r\n\r\n") + clen;
1462 1468
1463 msglen = origlen = end - dptr; 1469 msglen = origlen = end - dptr;
1470 if (msglen > datalen)
1471 return NF_DROP;
1464 1472
1465 ret = process_sip_msg(skb, ct, dataoff, &dptr, &msglen); 1473 ret = process_sip_msg(skb, ct, dataoff, &dptr, &msglen);
1466 if (ret != NF_ACCEPT) 1474 if (ret != NF_ACCEPT)
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index 0ae142825881..05e9feb101c3 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -245,7 +245,7 @@ static int ct_seq_show(struct seq_file *s, void *v)
245 ret = 0; 245 ret = 0;
246release: 246release:
247 nf_ct_put(ct); 247 nf_ct_put(ct);
248 return 0; 248 return ret;
249} 249}
250 250
251static const struct seq_operations ct_seq_ops = { 251static const struct seq_operations ct_seq_ops = {
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 985e9b76c916..e0ee010935e7 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -381,7 +381,6 @@ __build_packet_message(struct nfulnl_instance *inst,
381 struct nfulnl_msg_packet_hdr pmsg; 381 struct nfulnl_msg_packet_hdr pmsg;
382 struct nlmsghdr *nlh; 382 struct nlmsghdr *nlh;
383 struct nfgenmsg *nfmsg; 383 struct nfgenmsg *nfmsg;
384 __be32 tmp_uint;
385 sk_buff_data_t old_tail = inst->skb->tail; 384 sk_buff_data_t old_tail = inst->skb->tail;
386 385
387 nlh = NLMSG_PUT(inst->skb, 0, 0, 386 nlh = NLMSG_PUT(inst->skb, 0, 0,
@@ -428,7 +427,6 @@ __build_packet_message(struct nfulnl_instance *inst,
428 } 427 }
429 428
430 if (outdev) { 429 if (outdev) {
431 tmp_uint = htonl(outdev->ifindex);
432#ifndef CONFIG_BRIDGE_NETFILTER 430#ifndef CONFIG_BRIDGE_NETFILTER
433 NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_OUTDEV, 431 NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_OUTDEV,
434 htonl(outdev->ifindex)); 432 htonl(outdev->ifindex));
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index 8a025a585d2f..b0869fe3633b 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -762,8 +762,8 @@ void xt_compat_unlock(u_int8_t af)
762EXPORT_SYMBOL_GPL(xt_compat_unlock); 762EXPORT_SYMBOL_GPL(xt_compat_unlock);
763#endif 763#endif
764 764
765DEFINE_PER_CPU(struct xt_info_lock, xt_info_locks); 765DEFINE_PER_CPU(seqcount_t, xt_recseq);
766EXPORT_PER_CPU_SYMBOL_GPL(xt_info_locks); 766EXPORT_PER_CPU_SYMBOL_GPL(xt_recseq);
767 767
768static int xt_jumpstack_alloc(struct xt_table_info *i) 768static int xt_jumpstack_alloc(struct xt_table_info *i)
769{ 769{
@@ -1362,10 +1362,7 @@ static int __init xt_init(void)
1362 int rv; 1362 int rv;
1363 1363
1364 for_each_possible_cpu(i) { 1364 for_each_possible_cpu(i) {
1365 struct xt_info_lock *lock = &per_cpu(xt_info_locks, i); 1365 seqcount_init(&per_cpu(xt_recseq, i));
1366
1367 seqlock_init(&lock->lock);
1368 lock->readers = 0;
1369 } 1366 }
1370 1367
1371 xt = kmalloc(sizeof(struct xt_af) * NFPROTO_NUMPROTO, GFP_KERNEL); 1368 xt = kmalloc(sizeof(struct xt_af) * NFPROTO_NUMPROTO, GFP_KERNEL);
diff --git a/net/netlabel/netlabel_cipso_v4.c b/net/netlabel/netlabel_cipso_v4.c
index 5f14c8462e30..bae5756b1626 100644
--- a/net/netlabel/netlabel_cipso_v4.c
+++ b/net/netlabel/netlabel_cipso_v4.c
@@ -422,7 +422,6 @@ static int netlbl_cipsov4_add(struct sk_buff *skb, struct genl_info *info)
422 422
423{ 423{
424 int ret_val = -EINVAL; 424 int ret_val = -EINVAL;
425 const char *type_str = "(unknown)";
426 struct netlbl_audit audit_info; 425 struct netlbl_audit audit_info;
427 426
428 if (!info->attrs[NLBL_CIPSOV4_A_DOI] || 427 if (!info->attrs[NLBL_CIPSOV4_A_DOI] ||
@@ -432,15 +431,12 @@ static int netlbl_cipsov4_add(struct sk_buff *skb, struct genl_info *info)
432 netlbl_netlink_auditinfo(skb, &audit_info); 431 netlbl_netlink_auditinfo(skb, &audit_info);
433 switch (nla_get_u32(info->attrs[NLBL_CIPSOV4_A_MTYPE])) { 432 switch (nla_get_u32(info->attrs[NLBL_CIPSOV4_A_MTYPE])) {
434 case CIPSO_V4_MAP_TRANS: 433 case CIPSO_V4_MAP_TRANS:
435 type_str = "trans";
436 ret_val = netlbl_cipsov4_add_std(info, &audit_info); 434 ret_val = netlbl_cipsov4_add_std(info, &audit_info);
437 break; 435 break;
438 case CIPSO_V4_MAP_PASS: 436 case CIPSO_V4_MAP_PASS:
439 type_str = "pass";
440 ret_val = netlbl_cipsov4_add_pass(info, &audit_info); 437 ret_val = netlbl_cipsov4_add_pass(info, &audit_info);
441 break; 438 break;
442 case CIPSO_V4_MAP_LOCAL: 439 case CIPSO_V4_MAP_LOCAL:
443 type_str = "local";
444 ret_val = netlbl_cipsov4_add_local(info, &audit_info); 440 ret_val = netlbl_cipsov4_add_local(info, &audit_info);
445 break; 441 break;
446 } 442 }
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 06cb02796a0e..732152f718e0 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -591,7 +591,6 @@ static int nr_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
591 return -EINVAL; 591 return -EINVAL;
592 } 592 }
593 if ((dev = nr_dev_get(&addr->fsa_ax25.sax25_call)) == NULL) { 593 if ((dev = nr_dev_get(&addr->fsa_ax25.sax25_call)) == NULL) {
594 SOCK_DEBUG(sk, "NET/ROM: bind failed: invalid node callsign\n");
595 release_sock(sk); 594 release_sock(sk);
596 return -EADDRNOTAVAIL; 595 return -EADDRNOTAVAIL;
597 } 596 }
@@ -632,7 +631,7 @@ static int nr_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
632 sock_reset_flag(sk, SOCK_ZAPPED); 631 sock_reset_flag(sk, SOCK_ZAPPED);
633 dev_put(dev); 632 dev_put(dev);
634 release_sock(sk); 633 release_sock(sk);
635 SOCK_DEBUG(sk, "NET/ROM: socket is bound\n"); 634
636 return 0; 635 return 0;
637} 636}
638 637
@@ -1082,8 +1081,6 @@ static int nr_sendmsg(struct kiocb *iocb, struct socket *sock,
1082 sax.sax25_call = nr->dest_addr; 1081 sax.sax25_call = nr->dest_addr;
1083 } 1082 }
1084 1083
1085 SOCK_DEBUG(sk, "NET/ROM: sendto: Addresses built.\n");
1086
1087 /* Build a packet - the conventional user limit is 236 bytes. We can 1084 /* Build a packet - the conventional user limit is 236 bytes. We can
1088 do ludicrously large NetROM frames but must not overflow */ 1085 do ludicrously large NetROM frames but must not overflow */
1089 if (len > 65536) { 1086 if (len > 65536) {
@@ -1091,7 +1088,6 @@ static int nr_sendmsg(struct kiocb *iocb, struct socket *sock,
1091 goto out; 1088 goto out;
1092 } 1089 }
1093 1090
1094 SOCK_DEBUG(sk, "NET/ROM: sendto: building packet.\n");
1095 size = len + NR_NETWORK_LEN + NR_TRANSPORT_LEN; 1091 size = len + NR_NETWORK_LEN + NR_TRANSPORT_LEN;
1096 1092
1097 if ((skb = sock_alloc_send_skb(sk, size, msg->msg_flags & MSG_DONTWAIT, &err)) == NULL) 1093 if ((skb = sock_alloc_send_skb(sk, size, msg->msg_flags & MSG_DONTWAIT, &err)) == NULL)
@@ -1105,7 +1101,6 @@ static int nr_sendmsg(struct kiocb *iocb, struct socket *sock,
1105 */ 1101 */
1106 1102
1107 asmptr = skb_push(skb, NR_TRANSPORT_LEN); 1103 asmptr = skb_push(skb, NR_TRANSPORT_LEN);
1108 SOCK_DEBUG(sk, "Building NET/ROM Header.\n");
1109 1104
1110 /* Build a NET/ROM Transport header */ 1105 /* Build a NET/ROM Transport header */
1111 1106
@@ -1114,15 +1109,12 @@ static int nr_sendmsg(struct kiocb *iocb, struct socket *sock,
1114 *asmptr++ = 0; /* To be filled in later */ 1109 *asmptr++ = 0; /* To be filled in later */
1115 *asmptr++ = 0; /* Ditto */ 1110 *asmptr++ = 0; /* Ditto */
1116 *asmptr++ = NR_INFO; 1111 *asmptr++ = NR_INFO;
1117 SOCK_DEBUG(sk, "Built header.\n");
1118 1112
1119 /* 1113 /*
1120 * Put the data on the end 1114 * Put the data on the end
1121 */ 1115 */
1122 skb_put(skb, len); 1116 skb_put(skb, len);
1123 1117
1124 SOCK_DEBUG(sk, "NET/ROM: Appending user data\n");
1125
1126 /* User data follows immediately after the NET/ROM transport header */ 1118 /* User data follows immediately after the NET/ROM transport header */
1127 if (memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len)) { 1119 if (memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len)) {
1128 kfree_skb(skb); 1120 kfree_skb(skb);
@@ -1130,8 +1122,6 @@ static int nr_sendmsg(struct kiocb *iocb, struct socket *sock,
1130 goto out; 1122 goto out;
1131 } 1123 }
1132 1124
1133 SOCK_DEBUG(sk, "NET/ROM: Transmitting buffer\n");
1134
1135 if (sk->sk_state != TCP_ESTABLISHED) { 1125 if (sk->sk_state != TCP_ESTABLISHED) {
1136 kfree_skb(skb); 1126 kfree_skb(skb);
1137 err = -ENOTCONN; 1127 err = -ENOTCONN;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index b5362e96022b..549527bca87a 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -538,7 +538,7 @@ static inline unsigned int run_filter(const struct sk_buff *skb,
538 rcu_read_lock(); 538 rcu_read_lock();
539 filter = rcu_dereference(sk->sk_filter); 539 filter = rcu_dereference(sk->sk_filter);
540 if (filter != NULL) 540 if (filter != NULL)
541 res = sk_run_filter(skb, filter->insns); 541 res = SK_RUN_FILTER(filter, skb);
542 rcu_read_unlock(); 542 rcu_read_unlock();
543 543
544 return res; 544 return res;
diff --git a/net/phonet/pn_dev.c b/net/phonet/pn_dev.c
index 1566672235dd..d2df8f33160b 100644
--- a/net/phonet/pn_dev.c
+++ b/net/phonet/pn_dev.c
@@ -418,18 +418,14 @@ int phonet_route_del(struct net_device *dev, u8 daddr)
418 return 0; 418 return 0;
419} 419}
420 420
421struct net_device *phonet_route_get(struct net *net, u8 daddr) 421struct net_device *phonet_route_get_rcu(struct net *net, u8 daddr)
422{ 422{
423 struct phonet_net *pnn = phonet_pernet(net); 423 struct phonet_net *pnn = phonet_pernet(net);
424 struct phonet_routes *routes = &pnn->routes; 424 struct phonet_routes *routes = &pnn->routes;
425 struct net_device *dev; 425 struct net_device *dev;
426 426
427 ASSERT_RTNL(); /* no need to hold the device */
428
429 daddr >>= 2; 427 daddr >>= 2;
430 rcu_read_lock();
431 dev = rcu_dereference(routes->table[daddr]); 428 dev = rcu_dereference(routes->table[daddr]);
432 rcu_read_unlock();
433 return dev; 429 return dev;
434} 430}
435 431
diff --git a/net/phonet/pn_netlink.c b/net/phonet/pn_netlink.c
index 58b3b1f991ed..438accb7a5a8 100644
--- a/net/phonet/pn_netlink.c
+++ b/net/phonet/pn_netlink.c
@@ -264,10 +264,11 @@ static int route_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
264 struct net *net = sock_net(skb->sk); 264 struct net *net = sock_net(skb->sk);
265 u8 addr, addr_idx = 0, addr_start_idx = cb->args[0]; 265 u8 addr, addr_idx = 0, addr_start_idx = cb->args[0];
266 266
267 rcu_read_lock();
267 for (addr = 0; addr < 64; addr++) { 268 for (addr = 0; addr < 64; addr++) {
268 struct net_device *dev; 269 struct net_device *dev;
269 270
270 dev = phonet_route_get(net, addr << 2); 271 dev = phonet_route_get_rcu(net, addr << 2);
271 if (!dev) 272 if (!dev)
272 continue; 273 continue;
273 274
@@ -279,6 +280,7 @@ static int route_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
279 } 280 }
280 281
281out: 282out:
283 rcu_read_unlock();
282 cb->args[0] = addr_idx; 284 cb->args[0] = addr_idx;
283 cb->args[1] = 0; 285 cb->args[1] = 0;
284 286
diff --git a/net/phonet/socket.c b/net/phonet/socket.c
index b1adafab377c..8c5bfcef92cb 100644
--- a/net/phonet/socket.c
+++ b/net/phonet/socket.c
@@ -52,7 +52,7 @@ static int pn_socket_release(struct socket *sock)
52 52
53static struct { 53static struct {
54 struct hlist_head hlist[PN_HASHSIZE]; 54 struct hlist_head hlist[PN_HASHSIZE];
55 spinlock_t lock; 55 struct mutex lock;
56} pnsocks; 56} pnsocks;
57 57
58void __init pn_sock_init(void) 58void __init pn_sock_init(void)
@@ -61,7 +61,7 @@ void __init pn_sock_init(void)
61 61
62 for (i = 0; i < PN_HASHSIZE; i++) 62 for (i = 0; i < PN_HASHSIZE; i++)
63 INIT_HLIST_HEAD(pnsocks.hlist + i); 63 INIT_HLIST_HEAD(pnsocks.hlist + i);
64 spin_lock_init(&pnsocks.lock); 64 mutex_init(&pnsocks.lock);
65} 65}
66 66
67static struct hlist_head *pn_hash_list(u16 obj) 67static struct hlist_head *pn_hash_list(u16 obj)
@@ -82,9 +82,8 @@ struct sock *pn_find_sock_by_sa(struct net *net, const struct sockaddr_pn *spn)
82 u8 res = spn->spn_resource; 82 u8 res = spn->spn_resource;
83 struct hlist_head *hlist = pn_hash_list(obj); 83 struct hlist_head *hlist = pn_hash_list(obj);
84 84
85 spin_lock_bh(&pnsocks.lock); 85 rcu_read_lock();
86 86 sk_for_each_rcu(sknode, node, hlist) {
87 sk_for_each(sknode, node, hlist) {
88 struct pn_sock *pn = pn_sk(sknode); 87 struct pn_sock *pn = pn_sk(sknode);
89 BUG_ON(!pn->sobject); /* unbound socket */ 88 BUG_ON(!pn->sobject); /* unbound socket */
90 89
@@ -107,8 +106,7 @@ struct sock *pn_find_sock_by_sa(struct net *net, const struct sockaddr_pn *spn)
107 sock_hold(sknode); 106 sock_hold(sknode);
108 break; 107 break;
109 } 108 }
110 109 rcu_read_unlock();
111 spin_unlock_bh(&pnsocks.lock);
112 110
113 return rval; 111 return rval;
114} 112}
@@ -119,7 +117,7 @@ void pn_deliver_sock_broadcast(struct net *net, struct sk_buff *skb)
119 struct hlist_head *hlist = pnsocks.hlist; 117 struct hlist_head *hlist = pnsocks.hlist;
120 unsigned h; 118 unsigned h;
121 119
122 spin_lock(&pnsocks.lock); 120 rcu_read_lock();
123 for (h = 0; h < PN_HASHSIZE; h++) { 121 for (h = 0; h < PN_HASHSIZE; h++) {
124 struct hlist_node *node; 122 struct hlist_node *node;
125 struct sock *sknode; 123 struct sock *sknode;
@@ -140,25 +138,26 @@ void pn_deliver_sock_broadcast(struct net *net, struct sk_buff *skb)
140 } 138 }
141 hlist++; 139 hlist++;
142 } 140 }
143 spin_unlock(&pnsocks.lock); 141 rcu_read_unlock();
144} 142}
145 143
146void pn_sock_hash(struct sock *sk) 144void pn_sock_hash(struct sock *sk)
147{ 145{
148 struct hlist_head *hlist = pn_hash_list(pn_sk(sk)->sobject); 146 struct hlist_head *hlist = pn_hash_list(pn_sk(sk)->sobject);
149 147
150 spin_lock_bh(&pnsocks.lock); 148 mutex_lock(&pnsocks.lock);
151 sk_add_node(sk, hlist); 149 sk_add_node_rcu(sk, hlist);
152 spin_unlock_bh(&pnsocks.lock); 150 mutex_unlock(&pnsocks.lock);
153} 151}
154EXPORT_SYMBOL(pn_sock_hash); 152EXPORT_SYMBOL(pn_sock_hash);
155 153
156void pn_sock_unhash(struct sock *sk) 154void pn_sock_unhash(struct sock *sk)
157{ 155{
158 spin_lock_bh(&pnsocks.lock); 156 mutex_lock(&pnsocks.lock);
159 sk_del_node_init(sk); 157 sk_del_node_init_rcu(sk);
160 spin_unlock_bh(&pnsocks.lock); 158 mutex_unlock(&pnsocks.lock);
161 pn_sock_unbind_all_res(sk); 159 pn_sock_unbind_all_res(sk);
160 synchronize_rcu();
162} 161}
163EXPORT_SYMBOL(pn_sock_unhash); 162EXPORT_SYMBOL(pn_sock_unhash);
164 163
@@ -548,7 +547,7 @@ static struct sock *pn_sock_get_idx(struct seq_file *seq, loff_t pos)
548 unsigned h; 547 unsigned h;
549 548
550 for (h = 0; h < PN_HASHSIZE; h++) { 549 for (h = 0; h < PN_HASHSIZE; h++) {
551 sk_for_each(sknode, node, hlist) { 550 sk_for_each_rcu(sknode, node, hlist) {
552 if (!net_eq(net, sock_net(sknode))) 551 if (!net_eq(net, sock_net(sknode)))
553 continue; 552 continue;
554 if (!pos) 553 if (!pos)
@@ -572,9 +571,9 @@ static struct sock *pn_sock_get_next(struct seq_file *seq, struct sock *sk)
572} 571}
573 572
574static void *pn_sock_seq_start(struct seq_file *seq, loff_t *pos) 573static void *pn_sock_seq_start(struct seq_file *seq, loff_t *pos)
575 __acquires(pnsocks.lock) 574 __acquires(rcu)
576{ 575{
577 spin_lock_bh(&pnsocks.lock); 576 rcu_read_lock();
578 return *pos ? pn_sock_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; 577 return *pos ? pn_sock_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
579} 578}
580 579
@@ -591,9 +590,9 @@ static void *pn_sock_seq_next(struct seq_file *seq, void *v, loff_t *pos)
591} 590}
592 591
593static void pn_sock_seq_stop(struct seq_file *seq, void *v) 592static void pn_sock_seq_stop(struct seq_file *seq, void *v)
594 __releases(pnsocks.lock) 593 __releases(rcu)
595{ 594{
596 spin_unlock_bh(&pnsocks.lock); 595 rcu_read_unlock();
597} 596}
598 597
599static int pn_sock_seq_show(struct seq_file *seq, void *v) 598static int pn_sock_seq_show(struct seq_file *seq, void *v)
@@ -721,13 +720,11 @@ void pn_sock_unbind_all_res(struct sock *sk)
721 } 720 }
722 mutex_unlock(&resource_mutex); 721 mutex_unlock(&resource_mutex);
723 722
724 if (match == 0)
725 return;
726 synchronize_rcu();
727 while (match > 0) { 723 while (match > 0) {
728 sock_put(sk); 724 __sock_put(sk);
729 match--; 725 match--;
730 } 726 }
727 /* Caller is responsible for RCU sync before final sock_put() */
731} 728}
732 729
733#ifdef CONFIG_PROC_FS 730#ifdef CONFIG_PROC_FS
diff --git a/net/rfkill/Kconfig b/net/rfkill/Kconfig
index 7fce6dfd2180..48464ca13b24 100644
--- a/net/rfkill/Kconfig
+++ b/net/rfkill/Kconfig
@@ -22,3 +22,14 @@ config RFKILL_INPUT
22 depends on RFKILL 22 depends on RFKILL
23 depends on INPUT = y || RFKILL = INPUT 23 depends on INPUT = y || RFKILL = INPUT
24 default y if !EXPERT 24 default y if !EXPERT
25
26config RFKILL_REGULATOR
27 tristate "Generic rfkill regulator driver"
28 depends on RFKILL || !RFKILL
29 depends on REGULATOR
30 help
31 This options enable controlling radio transmitters connected to
32 voltage regulator using the regulator framework.
33
34 To compile this driver as a module, choose M here: the module will
35 be called rfkill-regulator.
diff --git a/net/rfkill/Makefile b/net/rfkill/Makefile
index 662105352691..d9a5a58ffd8c 100644
--- a/net/rfkill/Makefile
+++ b/net/rfkill/Makefile
@@ -5,3 +5,4 @@
5rfkill-y += core.o 5rfkill-y += core.o
6rfkill-$(CONFIG_RFKILL_INPUT) += input.o 6rfkill-$(CONFIG_RFKILL_INPUT) += input.o
7obj-$(CONFIG_RFKILL) += rfkill.o 7obj-$(CONFIG_RFKILL) += rfkill.o
8obj-$(CONFIG_RFKILL_REGULATOR) += rfkill-regulator.o
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index 0198191b756d..be90640a2774 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -1024,7 +1024,6 @@ static int rfkill_fop_open(struct inode *inode, struct file *file)
1024 * start getting events from elsewhere but hold mtx to get 1024 * start getting events from elsewhere but hold mtx to get
1025 * startup events added first 1025 * startup events added first
1026 */ 1026 */
1027 list_add(&data->list, &rfkill_fds);
1028 1027
1029 list_for_each_entry(rfkill, &rfkill_list, node) { 1028 list_for_each_entry(rfkill, &rfkill_list, node) {
1030 ev = kzalloc(sizeof(*ev), GFP_KERNEL); 1029 ev = kzalloc(sizeof(*ev), GFP_KERNEL);
@@ -1033,6 +1032,7 @@ static int rfkill_fop_open(struct inode *inode, struct file *file)
1033 rfkill_fill_event(&ev->ev, rfkill, RFKILL_OP_ADD); 1032 rfkill_fill_event(&ev->ev, rfkill, RFKILL_OP_ADD);
1034 list_add_tail(&ev->list, &data->events); 1033 list_add_tail(&ev->list, &data->events);
1035 } 1034 }
1035 list_add(&data->list, &rfkill_fds);
1036 mutex_unlock(&data->mtx); 1036 mutex_unlock(&data->mtx);
1037 mutex_unlock(&rfkill_global_mutex); 1037 mutex_unlock(&rfkill_global_mutex);
1038 1038
diff --git a/net/rfkill/rfkill-regulator.c b/net/rfkill/rfkill-regulator.c
new file mode 100644
index 000000000000..18dc512a10f3
--- /dev/null
+++ b/net/rfkill/rfkill-regulator.c
@@ -0,0 +1,164 @@
1/*
2 * rfkill-regulator.c - Regulator consumer driver for rfkill
3 *
4 * Copyright (C) 2009 Guiming Zhuo <gmzhuo@gmail.com>
5 * Copyright (C) 2011 Antonio Ospite <ospite@studenti.unina.it>
6 *
7 * Implementation inspired by leds-regulator driver.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 */
14
15#include <linux/module.h>
16#include <linux/err.h>
17#include <linux/slab.h>
18#include <linux/platform_device.h>
19#include <linux/regulator/consumer.h>
20#include <linux/rfkill.h>
21#include <linux/rfkill-regulator.h>
22
23struct rfkill_regulator_data {
24 struct rfkill *rf_kill;
25 bool reg_enabled;
26
27 struct regulator *vcc;
28};
29
30static int rfkill_regulator_set_block(void *data, bool blocked)
31{
32 struct rfkill_regulator_data *rfkill_data = data;
33
34 pr_debug("%s: blocked: %d\n", __func__, blocked);
35
36 if (blocked) {
37 if (rfkill_data->reg_enabled) {
38 regulator_disable(rfkill_data->vcc);
39 rfkill_data->reg_enabled = 0;
40 }
41 } else {
42 if (!rfkill_data->reg_enabled) {
43 regulator_enable(rfkill_data->vcc);
44 rfkill_data->reg_enabled = 1;
45 }
46 }
47
48 pr_debug("%s: regulator_is_enabled after set_block: %d\n", __func__,
49 regulator_is_enabled(rfkill_data->vcc));
50
51 return 0;
52}
53
54struct rfkill_ops rfkill_regulator_ops = {
55 .set_block = rfkill_regulator_set_block,
56};
57
58static int __devinit rfkill_regulator_probe(struct platform_device *pdev)
59{
60 struct rfkill_regulator_platform_data *pdata = pdev->dev.platform_data;
61 struct rfkill_regulator_data *rfkill_data;
62 struct regulator *vcc;
63 struct rfkill *rf_kill;
64 int ret = 0;
65
66 if (pdata == NULL) {
67 dev_err(&pdev->dev, "no platform data\n");
68 return -ENODEV;
69 }
70
71 if (pdata->name == NULL || pdata->type == 0) {
72 dev_err(&pdev->dev, "invalid name or type in platform data\n");
73 return -EINVAL;
74 }
75
76 vcc = regulator_get_exclusive(&pdev->dev, "vrfkill");
77 if (IS_ERR(vcc)) {
78 dev_err(&pdev->dev, "Cannot get vcc for %s\n", pdata->name);
79 ret = PTR_ERR(vcc);
80 goto out;
81 }
82
83 rfkill_data = kzalloc(sizeof(*rfkill_data), GFP_KERNEL);
84 if (rfkill_data == NULL) {
85 ret = -ENOMEM;
86 goto err_data_alloc;
87 }
88
89 rf_kill = rfkill_alloc(pdata->name, &pdev->dev,
90 pdata->type,
91 &rfkill_regulator_ops, rfkill_data);
92 if (rf_kill == NULL) {
93 dev_err(&pdev->dev, "Cannot alloc rfkill device\n");
94 ret = -ENOMEM;
95 goto err_rfkill_alloc;
96 }
97
98 if (regulator_is_enabled(vcc)) {
99 dev_dbg(&pdev->dev, "Regulator already enabled\n");
100 rfkill_data->reg_enabled = 1;
101 }
102 rfkill_data->vcc = vcc;
103 rfkill_data->rf_kill = rf_kill;
104
105 ret = rfkill_register(rf_kill);
106 if (ret) {
107 dev_err(&pdev->dev, "Cannot register rfkill device\n");
108 goto err_rfkill_register;
109 }
110
111 platform_set_drvdata(pdev, rfkill_data);
112 dev_info(&pdev->dev, "%s initialized\n", pdata->name);
113
114 return 0;
115
116err_rfkill_register:
117 rfkill_destroy(rf_kill);
118err_rfkill_alloc:
119 kfree(rfkill_data);
120err_data_alloc:
121 regulator_put(vcc);
122out:
123 return ret;
124}
125
126static int __devexit rfkill_regulator_remove(struct platform_device *pdev)
127{
128 struct rfkill_regulator_data *rfkill_data = platform_get_drvdata(pdev);
129 struct rfkill *rf_kill = rfkill_data->rf_kill;
130
131 rfkill_unregister(rf_kill);
132 rfkill_destroy(rf_kill);
133 regulator_put(rfkill_data->vcc);
134 kfree(rfkill_data);
135
136 return 0;
137}
138
139static struct platform_driver rfkill_regulator_driver = {
140 .probe = rfkill_regulator_probe,
141 .remove = __devexit_p(rfkill_regulator_remove),
142 .driver = {
143 .name = "rfkill-regulator",
144 .owner = THIS_MODULE,
145 },
146};
147
148static int __init rfkill_regulator_init(void)
149{
150 return platform_driver_register(&rfkill_regulator_driver);
151}
152module_init(rfkill_regulator_init);
153
154static void __exit rfkill_regulator_exit(void)
155{
156 platform_driver_unregister(&rfkill_regulator_driver);
157}
158module_exit(rfkill_regulator_exit);
159
160MODULE_AUTHOR("Guiming Zhuo <gmzhuo@gmail.com>");
161MODULE_AUTHOR("Antonio Ospite <ospite@studenti.unina.it>");
162MODULE_DESCRIPTION("Regulator consumer driver for rfkill");
163MODULE_LICENSE("GPL");
164MODULE_ALIAS("platform:rfkill-regulator");
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index a80aef6e3d1f..f9ea925ad9cb 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -682,10 +682,8 @@ static int rose_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
682 if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS) 682 if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS)
683 return -EINVAL; 683 return -EINVAL;
684 684
685 if ((dev = rose_dev_get(&addr->srose_addr)) == NULL) { 685 if ((dev = rose_dev_get(&addr->srose_addr)) == NULL)
686 SOCK_DEBUG(sk, "ROSE: bind failed: invalid address\n");
687 return -EADDRNOTAVAIL; 686 return -EADDRNOTAVAIL;
688 }
689 687
690 source = &addr->srose_call; 688 source = &addr->srose_call;
691 689
@@ -716,7 +714,7 @@ static int rose_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
716 rose_insert_socket(sk); 714 rose_insert_socket(sk);
717 715
718 sock_reset_flag(sk, SOCK_ZAPPED); 716 sock_reset_flag(sk, SOCK_ZAPPED);
719 SOCK_DEBUG(sk, "ROSE: socket is bound\n"); 717
720 return 0; 718 return 0;
721} 719}
722 720
@@ -1109,10 +1107,7 @@ static int rose_sendmsg(struct kiocb *iocb, struct socket *sock,
1109 srose.srose_digis[n] = rose->dest_digis[n]; 1107 srose.srose_digis[n] = rose->dest_digis[n];
1110 } 1108 }
1111 1109
1112 SOCK_DEBUG(sk, "ROSE: sendto: Addresses built.\n");
1113
1114 /* Build a packet */ 1110 /* Build a packet */
1115 SOCK_DEBUG(sk, "ROSE: sendto: building packet.\n");
1116 /* Sanity check the packet size */ 1111 /* Sanity check the packet size */
1117 if (len > 65535) 1112 if (len > 65535)
1118 return -EMSGSIZE; 1113 return -EMSGSIZE;
@@ -1127,7 +1122,6 @@ static int rose_sendmsg(struct kiocb *iocb, struct socket *sock,
1127 /* 1122 /*
1128 * Put the data on the end 1123 * Put the data on the end
1129 */ 1124 */
1130 SOCK_DEBUG(sk, "ROSE: Appending user data\n");
1131 1125
1132 skb_reset_transport_header(skb); 1126 skb_reset_transport_header(skb);
1133 skb_put(skb, len); 1127 skb_put(skb, len);
@@ -1152,8 +1146,6 @@ static int rose_sendmsg(struct kiocb *iocb, struct socket *sock,
1152 */ 1146 */
1153 asmptr = skb_push(skb, ROSE_MIN_LEN); 1147 asmptr = skb_push(skb, ROSE_MIN_LEN);
1154 1148
1155 SOCK_DEBUG(sk, "ROSE: Building Network Header.\n");
1156
1157 /* Build a ROSE Network header */ 1149 /* Build a ROSE Network header */
1158 asmptr[0] = ((rose->lci >> 8) & 0x0F) | ROSE_GFI; 1150 asmptr[0] = ((rose->lci >> 8) & 0x0F) | ROSE_GFI;
1159 asmptr[1] = (rose->lci >> 0) & 0xFF; 1151 asmptr[1] = (rose->lci >> 0) & 0xFF;
@@ -1162,10 +1154,6 @@ static int rose_sendmsg(struct kiocb *iocb, struct socket *sock,
1162 if (qbit) 1154 if (qbit)
1163 asmptr[0] |= ROSE_Q_BIT; 1155 asmptr[0] |= ROSE_Q_BIT;
1164 1156
1165 SOCK_DEBUG(sk, "ROSE: Built header.\n");
1166
1167 SOCK_DEBUG(sk, "ROSE: Transmitting buffer\n");
1168
1169 if (sk->sk_state != TCP_ESTABLISHED) { 1157 if (sk->sk_state != TCP_ESTABLISHED) {
1170 kfree_skb(skb); 1158 kfree_skb(skb);
1171 return -ENOTCONN; 1159 return -ENOTCONN;
diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c
index b6ffe4e1b84a..f99cfce7ca97 100644
--- a/net/rxrpc/ar-ack.c
+++ b/net/rxrpc/ar-ack.c
@@ -375,7 +375,6 @@ protocol_error:
375 */ 375 */
376static void rxrpc_rotate_tx_window(struct rxrpc_call *call, u32 hard) 376static void rxrpc_rotate_tx_window(struct rxrpc_call *call, u32 hard)
377{ 377{
378 struct rxrpc_skb_priv *sp;
379 unsigned long _skb; 378 unsigned long _skb;
380 int tail = call->acks_tail, old_tail; 379 int tail = call->acks_tail, old_tail;
381 int win = CIRC_CNT(call->acks_head, tail, call->acks_winsz); 380 int win = CIRC_CNT(call->acks_head, tail, call->acks_winsz);
@@ -387,7 +386,6 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, u32 hard)
387 while (call->acks_hard < hard) { 386 while (call->acks_hard < hard) {
388 smp_read_barrier_depends(); 387 smp_read_barrier_depends();
389 _skb = call->acks_window[tail] & ~1; 388 _skb = call->acks_window[tail] & ~1;
390 sp = rxrpc_skb((struct sk_buff *) _skb);
391 rxrpc_free_skb((struct sk_buff *) _skb); 389 rxrpc_free_skb((struct sk_buff *) _skb);
392 old_tail = tail; 390 old_tail = tail;
393 tail = (tail + 1) & (call->acks_winsz - 1); 391 tail = (tail + 1) & (call->acks_winsz - 1);
diff --git a/net/rxrpc/ar-connevent.c b/net/rxrpc/ar-connevent.c
index 0505cdc4d6d4..e7ed43a54c41 100644
--- a/net/rxrpc/ar-connevent.c
+++ b/net/rxrpc/ar-connevent.c
@@ -259,7 +259,6 @@ void rxrpc_process_connection(struct work_struct *work)
259{ 259{
260 struct rxrpc_connection *conn = 260 struct rxrpc_connection *conn =
261 container_of(work, struct rxrpc_connection, processor); 261 container_of(work, struct rxrpc_connection, processor);
262 struct rxrpc_skb_priv *sp;
263 struct sk_buff *skb; 262 struct sk_buff *skb;
264 u32 abort_code = RX_PROTOCOL_ERROR; 263 u32 abort_code = RX_PROTOCOL_ERROR;
265 int ret; 264 int ret;
@@ -276,8 +275,6 @@ void rxrpc_process_connection(struct work_struct *work)
276 /* go through the conn-level event packets, releasing the ref on this 275 /* go through the conn-level event packets, releasing the ref on this
277 * connection that each one has when we've finished with it */ 276 * connection that each one has when we've finished with it */
278 while ((skb = skb_dequeue(&conn->rx_queue))) { 277 while ((skb = skb_dequeue(&conn->rx_queue))) {
279 sp = rxrpc_skb(skb);
280
281 ret = rxrpc_process_event(conn, skb, &abort_code); 278 ret = rxrpc_process_event(conn, skb, &abort_code);
282 switch (ret) { 279 switch (ret) {
283 case -EPROTO: 280 case -EPROTO:
diff --git a/net/rxrpc/ar-error.c b/net/rxrpc/ar-error.c
index d4d1ae26d293..5d6b572a6704 100644
--- a/net/rxrpc/ar-error.c
+++ b/net/rxrpc/ar-error.c
@@ -139,7 +139,7 @@ void rxrpc_UDP_error_handler(struct work_struct *work)
139 struct rxrpc_transport *trans = 139 struct rxrpc_transport *trans =
140 container_of(work, struct rxrpc_transport, error_handler); 140 container_of(work, struct rxrpc_transport, error_handler);
141 struct sk_buff *skb; 141 struct sk_buff *skb;
142 int local, err; 142 int err;
143 143
144 _enter(""); 144 _enter("");
145 145
@@ -157,7 +157,6 @@ void rxrpc_UDP_error_handler(struct work_struct *work)
157 157
158 switch (ee->ee_origin) { 158 switch (ee->ee_origin) {
159 case SO_EE_ORIGIN_ICMP: 159 case SO_EE_ORIGIN_ICMP:
160 local = 0;
161 switch (ee->ee_type) { 160 switch (ee->ee_type) {
162 case ICMP_DEST_UNREACH: 161 case ICMP_DEST_UNREACH:
163 switch (ee->ee_code) { 162 switch (ee->ee_code) {
@@ -207,7 +206,6 @@ void rxrpc_UDP_error_handler(struct work_struct *work)
207 case SO_EE_ORIGIN_LOCAL: 206 case SO_EE_ORIGIN_LOCAL:
208 _proto("Rx Received local error { error=%d }", 207 _proto("Rx Received local error { error=%d }",
209 ee->ee_errno); 208 ee->ee_errno);
210 local = 1;
211 break; 209 break;
212 210
213 case SO_EE_ORIGIN_NONE: 211 case SO_EE_ORIGIN_NONE:
@@ -215,7 +213,6 @@ void rxrpc_UDP_error_handler(struct work_struct *work)
215 default: 213 default:
216 _proto("Rx Received error report { orig=%u }", 214 _proto("Rx Received error report { orig=%u }",
217 ee->ee_origin); 215 ee->ee_origin);
218 local = 0;
219 break; 216 break;
220 } 217 }
221 218
diff --git a/net/rxrpc/ar-peer.c b/net/rxrpc/ar-peer.c
index 55b93dc60d0c..2754f098d436 100644
--- a/net/rxrpc/ar-peer.c
+++ b/net/rxrpc/ar-peer.c
@@ -36,10 +36,11 @@ static void rxrpc_destroy_peer(struct work_struct *work);
36static void rxrpc_assess_MTU_size(struct rxrpc_peer *peer) 36static void rxrpc_assess_MTU_size(struct rxrpc_peer *peer)
37{ 37{
38 struct rtable *rt; 38 struct rtable *rt;
39 struct flowi4 fl4;
39 40
40 peer->if_mtu = 1500; 41 peer->if_mtu = 1500;
41 42
42 rt = ip_route_output_ports(&init_net, NULL, 43 rt = ip_route_output_ports(&init_net, &fl4, NULL,
43 peer->srx.transport.sin.sin_addr.s_addr, 0, 44 peer->srx.transport.sin.sin_addr.s_addr, 0,
44 htons(7000), htons(7001), 45 htons(7000), htons(7001),
45 IPPROTO_UDP, 0, 0); 46 IPPROTO_UDP, 0, 0);
@@ -156,6 +157,7 @@ struct rxrpc_peer *rxrpc_get_peer(struct sockaddr_rxrpc *srx, gfp_t gfp)
156 /* we can now add the new candidate to the list */ 157 /* we can now add the new candidate to the list */
157 peer = candidate; 158 peer = candidate;
158 candidate = NULL; 159 candidate = NULL;
160 usage = atomic_read(&peer->usage);
159 161
160 list_add_tail(&peer->link, &rxrpc_peers); 162 list_add_tail(&peer->link, &rxrpc_peers);
161 write_unlock_bh(&rxrpc_peer_lock); 163 write_unlock_bh(&rxrpc_peer_lock);
@@ -170,7 +172,7 @@ success:
170 &peer->srx.transport.sin.sin_addr, 172 &peer->srx.transport.sin.sin_addr,
171 ntohs(peer->srx.transport.sin.sin_port)); 173 ntohs(peer->srx.transport.sin.sin_port));
172 174
173 _leave(" = %p {u=%d}", peer, atomic_read(&peer->usage)); 175 _leave(" = %p {u=%d}", peer, usage);
174 return peer; 176 return peer;
175 177
176 /* we found the peer in the list immediately */ 178 /* we found the peer in the list immediately */
diff --git a/net/rxrpc/ar-transport.c b/net/rxrpc/ar-transport.c
index 5e0226fe587e..92df566930b9 100644
--- a/net/rxrpc/ar-transport.c
+++ b/net/rxrpc/ar-transport.c
@@ -111,6 +111,7 @@ struct rxrpc_transport *rxrpc_get_transport(struct rxrpc_local *local,
111 /* we can now add the new candidate to the list */ 111 /* we can now add the new candidate to the list */
112 trans = candidate; 112 trans = candidate;
113 candidate = NULL; 113 candidate = NULL;
114 usage = atomic_read(&trans->usage);
114 115
115 rxrpc_get_local(trans->local); 116 rxrpc_get_local(trans->local);
116 atomic_inc(&trans->peer->usage); 117 atomic_inc(&trans->peer->usage);
@@ -125,7 +126,7 @@ success:
125 trans->local->debug_id, 126 trans->local->debug_id,
126 trans->peer->debug_id); 127 trans->peer->debug_id);
127 128
128 _leave(" = %p {u=%d}", trans, atomic_read(&trans->usage)); 129 _leave(" = %p {u=%d}", trans, usage);
129 return trans; 130 return trans;
130 131
131 /* we found the transport in the list immediately */ 132 /* we found the transport in the list immediately */
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index a7a5583d4f68..2590e91b3289 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -239,6 +239,17 @@ config NET_SCH_CHOKE
239 To compile this code as a module, choose M here: the 239 To compile this code as a module, choose M here: the
240 module will be called sch_choke. 240 module will be called sch_choke.
241 241
242config NET_SCH_QFQ
243 tristate "Quick Fair Queueing scheduler (QFQ)"
244 help
245 Say Y here if you want to use the Quick Fair Queueing Scheduler (QFQ)
246 packet scheduling algorithm.
247
248 To compile this driver as a module, choose M here: the module
249 will be called sch_qfq.
250
251 If unsure, say N.
252
242config NET_SCH_INGRESS 253config NET_SCH_INGRESS
243 tristate "Ingress Qdisc" 254 tristate "Ingress Qdisc"
244 depends on NET_CLS_ACT 255 depends on NET_CLS_ACT
@@ -277,6 +288,7 @@ config NET_CLS_TCINDEX
277 288
278config NET_CLS_ROUTE4 289config NET_CLS_ROUTE4
279 tristate "Routing decision (ROUTE)" 290 tristate "Routing decision (ROUTE)"
291 depends on INET
280 select IP_ROUTE_CLASSID 292 select IP_ROUTE_CLASSID
281 select NET_CLS 293 select NET_CLS
282 ---help--- 294 ---help---
diff --git a/net/sched/Makefile b/net/sched/Makefile
index 2e77b8dba22e..dc5889c0a15a 100644
--- a/net/sched/Makefile
+++ b/net/sched/Makefile
@@ -35,6 +35,7 @@ obj-$(CONFIG_NET_SCH_NETEM) += sch_netem.o
35obj-$(CONFIG_NET_SCH_DRR) += sch_drr.o 35obj-$(CONFIG_NET_SCH_DRR) += sch_drr.o
36obj-$(CONFIG_NET_SCH_MQPRIO) += sch_mqprio.o 36obj-$(CONFIG_NET_SCH_MQPRIO) += sch_mqprio.o
37obj-$(CONFIG_NET_SCH_CHOKE) += sch_choke.o 37obj-$(CONFIG_NET_SCH_CHOKE) += sch_choke.o
38obj-$(CONFIG_NET_SCH_QFQ) += sch_qfq.o
38 39
39obj-$(CONFIG_NET_CLS_U32) += cls_u32.o 40obj-$(CONFIG_NET_CLS_U32) += cls_u32.o
40obj-$(CONFIG_NET_CLS_ROUTE4) += cls_route.o 41obj-$(CONFIG_NET_CLS_ROUTE4) += cls_route.o
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 7490f3f2db8b..6b8627661c98 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -1673,10 +1673,8 @@ int tc_classify(struct sk_buff *skb, struct tcf_proto *tp,
1673{ 1673{
1674 int err = 0; 1674 int err = 0;
1675#ifdef CONFIG_NET_CLS_ACT 1675#ifdef CONFIG_NET_CLS_ACT
1676 __be16 protocol;
1677 struct tcf_proto *otp = tp; 1676 struct tcf_proto *otp = tp;
1678reclassify: 1677reclassify:
1679 protocol = skb->protocol;
1680#endif 1678#endif
1681 1679
1682 err = tc_classify_compat(skb, tp, res); 1680 err = tc_classify_compat(skb, tp, res);
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
new file mode 100644
index 000000000000..103343408593
--- /dev/null
+++ b/net/sched/sch_qfq.c
@@ -0,0 +1,1137 @@
1/*
2 * net/sched/sch_qfq.c Quick Fair Queueing Scheduler.
3 *
4 * Copyright (c) 2009 Fabio Checconi, Luigi Rizzo, and Paolo Valente.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 */
10
11#include <linux/module.h>
12#include <linux/init.h>
13#include <linux/bitops.h>
14#include <linux/errno.h>
15#include <linux/netdevice.h>
16#include <linux/pkt_sched.h>
17#include <net/sch_generic.h>
18#include <net/pkt_sched.h>
19#include <net/pkt_cls.h>
20
21
22/* Quick Fair Queueing
23 ===================
24
25 Sources:
26
27 Fabio Checconi, Luigi Rizzo, and Paolo Valente: "QFQ: Efficient
28 Packet Scheduling with Tight Bandwidth Distribution Guarantees."
29
30 See also:
31 http://retis.sssup.it/~fabio/linux/qfq/
32 */
33
34/*
35
36 Virtual time computations.
37
38 S, F and V are all computed in fixed point arithmetic with
39 FRAC_BITS decimal bits.
40
41 QFQ_MAX_INDEX is the maximum index allowed for a group. We need
42 one bit per index.
43 QFQ_MAX_WSHIFT is the maximum power of two supported as a weight.
44
45 The layout of the bits is as below:
46
47 [ MTU_SHIFT ][ FRAC_BITS ]
48 [ MAX_INDEX ][ MIN_SLOT_SHIFT ]
49 ^.__grp->index = 0
50 *.__grp->slot_shift
51
52 where MIN_SLOT_SHIFT is derived by difference from the others.
53
54 The max group index corresponds to Lmax/w_min, where
55 Lmax=1<<MTU_SHIFT, w_min = 1 .
56 From this, and knowing how many groups (MAX_INDEX) we want,
57 we can derive the shift corresponding to each group.
58
59 Because we often need to compute
60 F = S + len/w_i and V = V + len/wsum
61 instead of storing w_i store the value
62 inv_w = (1<<FRAC_BITS)/w_i
63 so we can do F = S + len * inv_w * wsum.
64 We use W_TOT in the formulas so we can easily move between
65 static and adaptive weight sum.
66
67 The per-scheduler-instance data contain all the data structures
68 for the scheduler: bitmaps and bucket lists.
69
70 */
71
72/*
73 * Maximum number of consecutive slots occupied by backlogged classes
74 * inside a group.
75 */
76#define QFQ_MAX_SLOTS 32
77
78/*
79 * Shifts used for class<->group mapping. We allow class weights that are
80 * in the range [1, 2^MAX_WSHIFT], and we try to map each class i to the
81 * group with the smallest index that can support the L_i / r_i configured
82 * for the class.
83 *
84 * grp->index is the index of the group; and grp->slot_shift
85 * is the shift for the corresponding (scaled) sigma_i.
86 */
87#define QFQ_MAX_INDEX 19
88#define QFQ_MAX_WSHIFT 16
89
90#define QFQ_MAX_WEIGHT (1<<QFQ_MAX_WSHIFT)
91#define QFQ_MAX_WSUM (2*QFQ_MAX_WEIGHT)
92
93#define FRAC_BITS 30 /* fixed point arithmetic */
94#define ONE_FP (1UL << FRAC_BITS)
95#define IWSUM (ONE_FP/QFQ_MAX_WSUM)
96
97#define QFQ_MTU_SHIFT 11
98#define QFQ_MIN_SLOT_SHIFT (FRAC_BITS + QFQ_MTU_SHIFT - QFQ_MAX_INDEX)
99
100/*
101 * Possible group states. These values are used as indexes for the bitmaps
102 * array of struct qfq_queue.
103 */
104enum qfq_state { ER, IR, EB, IB, QFQ_MAX_STATE };
105
106struct qfq_group;
107
108struct qfq_class {
109 struct Qdisc_class_common common;
110
111 unsigned int refcnt;
112 unsigned int filter_cnt;
113
114 struct gnet_stats_basic_packed bstats;
115 struct gnet_stats_queue qstats;
116 struct gnet_stats_rate_est rate_est;
117 struct Qdisc *qdisc;
118
119 struct hlist_node next; /* Link for the slot list. */
120 u64 S, F; /* flow timestamps (exact) */
121
122 /* group we belong to. In principle we would need the index,
123 * which is log_2(lmax/weight), but we never reference it
124 * directly, only the group.
125 */
126 struct qfq_group *grp;
127
128 /* these are copied from the flowset. */
129 u32 inv_w; /* ONE_FP/weight */
130 u32 lmax; /* Max packet size for this flow. */
131};
132
133struct qfq_group {
134 u64 S, F; /* group timestamps (approx). */
135 unsigned int slot_shift; /* Slot shift. */
136 unsigned int index; /* Group index. */
137 unsigned int front; /* Index of the front slot. */
138 unsigned long full_slots; /* non-empty slots */
139
140 /* Array of RR lists of active classes. */
141 struct hlist_head slots[QFQ_MAX_SLOTS];
142};
143
144struct qfq_sched {
145 struct tcf_proto *filter_list;
146 struct Qdisc_class_hash clhash;
147
148 u64 V; /* Precise virtual time. */
149 u32 wsum; /* weight sum */
150
151 unsigned long bitmaps[QFQ_MAX_STATE]; /* Group bitmaps. */
152 struct qfq_group groups[QFQ_MAX_INDEX + 1]; /* The groups. */
153};
154
155static struct qfq_class *qfq_find_class(struct Qdisc *sch, u32 classid)
156{
157 struct qfq_sched *q = qdisc_priv(sch);
158 struct Qdisc_class_common *clc;
159
160 clc = qdisc_class_find(&q->clhash, classid);
161 if (clc == NULL)
162 return NULL;
163 return container_of(clc, struct qfq_class, common);
164}
165
166static void qfq_purge_queue(struct qfq_class *cl)
167{
168 unsigned int len = cl->qdisc->q.qlen;
169
170 qdisc_reset(cl->qdisc);
171 qdisc_tree_decrease_qlen(cl->qdisc, len);
172}
173
174static const struct nla_policy qfq_policy[TCA_QFQ_MAX + 1] = {
175 [TCA_QFQ_WEIGHT] = { .type = NLA_U32 },
176 [TCA_QFQ_LMAX] = { .type = NLA_U32 },
177};
178
179/*
180 * Calculate a flow index, given its weight and maximum packet length.
181 * index = log_2(maxlen/weight) but we need to apply the scaling.
182 * This is used only once at flow creation.
183 */
184static int qfq_calc_index(u32 inv_w, unsigned int maxlen)
185{
186 u64 slot_size = (u64)maxlen * inv_w;
187 unsigned long size_map;
188 int index = 0;
189
190 size_map = slot_size >> QFQ_MIN_SLOT_SHIFT;
191 if (!size_map)
192 goto out;
193
194 index = __fls(size_map) + 1; /* basically a log_2 */
195 index -= !(slot_size - (1ULL << (index + QFQ_MIN_SLOT_SHIFT - 1)));
196
197 if (index < 0)
198 index = 0;
199out:
200 pr_debug("qfq calc_index: W = %lu, L = %u, I = %d\n",
201 (unsigned long) ONE_FP/inv_w, maxlen, index);
202
203 return index;
204}
205
206static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
207 struct nlattr **tca, unsigned long *arg)
208{
209 struct qfq_sched *q = qdisc_priv(sch);
210 struct qfq_class *cl = (struct qfq_class *)*arg;
211 struct nlattr *tb[TCA_QFQ_MAX + 1];
212 u32 weight, lmax, inv_w;
213 int i, err;
214
215 if (tca[TCA_OPTIONS] == NULL) {
216 pr_notice("qfq: no options\n");
217 return -EINVAL;
218 }
219
220 err = nla_parse_nested(tb, TCA_QFQ_MAX, tca[TCA_OPTIONS], qfq_policy);
221 if (err < 0)
222 return err;
223
224 if (tb[TCA_QFQ_WEIGHT]) {
225 weight = nla_get_u32(tb[TCA_QFQ_WEIGHT]);
226 if (!weight || weight > (1UL << QFQ_MAX_WSHIFT)) {
227 pr_notice("qfq: invalid weight %u\n", weight);
228 return -EINVAL;
229 }
230 } else
231 weight = 1;
232
233 inv_w = ONE_FP / weight;
234 weight = ONE_FP / inv_w;
235 if (q->wsum + weight > QFQ_MAX_WSUM) {
236 pr_notice("qfq: total weight out of range (%u + %u)\n",
237 weight, q->wsum);
238 return -EINVAL;
239 }
240
241 if (tb[TCA_QFQ_LMAX]) {
242 lmax = nla_get_u32(tb[TCA_QFQ_LMAX]);
243 if (!lmax || lmax > (1UL << QFQ_MTU_SHIFT)) {
244 pr_notice("qfq: invalid max length %u\n", lmax);
245 return -EINVAL;
246 }
247 } else
248 lmax = 1UL << QFQ_MTU_SHIFT;
249
250 if (cl != NULL) {
251 if (tca[TCA_RATE]) {
252 err = gen_replace_estimator(&cl->bstats, &cl->rate_est,
253 qdisc_root_sleeping_lock(sch),
254 tca[TCA_RATE]);
255 if (err)
256 return err;
257 }
258
259 sch_tree_lock(sch);
260 if (tb[TCA_QFQ_WEIGHT]) {
261 q->wsum = weight - ONE_FP / cl->inv_w;
262 cl->inv_w = inv_w;
263 }
264 sch_tree_unlock(sch);
265
266 return 0;
267 }
268
269 cl = kzalloc(sizeof(struct qfq_class), GFP_KERNEL);
270 if (cl == NULL)
271 return -ENOBUFS;
272
273 cl->refcnt = 1;
274 cl->common.classid = classid;
275 cl->lmax = lmax;
276 cl->inv_w = inv_w;
277 i = qfq_calc_index(cl->inv_w, cl->lmax);
278
279 cl->grp = &q->groups[i];
280 q->wsum += weight;
281
282 cl->qdisc = qdisc_create_dflt(sch->dev_queue,
283 &pfifo_qdisc_ops, classid);
284 if (cl->qdisc == NULL)
285 cl->qdisc = &noop_qdisc;
286
287 if (tca[TCA_RATE]) {
288 err = gen_new_estimator(&cl->bstats, &cl->rate_est,
289 qdisc_root_sleeping_lock(sch),
290 tca[TCA_RATE]);
291 if (err) {
292 qdisc_destroy(cl->qdisc);
293 kfree(cl);
294 return err;
295 }
296 }
297
298 sch_tree_lock(sch);
299 qdisc_class_hash_insert(&q->clhash, &cl->common);
300 sch_tree_unlock(sch);
301
302 qdisc_class_hash_grow(sch, &q->clhash);
303
304 *arg = (unsigned long)cl;
305 return 0;
306}
307
308static void qfq_destroy_class(struct Qdisc *sch, struct qfq_class *cl)
309{
310 struct qfq_sched *q = qdisc_priv(sch);
311
312 if (cl->inv_w) {
313 q->wsum -= ONE_FP / cl->inv_w;
314 cl->inv_w = 0;
315 }
316
317 gen_kill_estimator(&cl->bstats, &cl->rate_est);
318 qdisc_destroy(cl->qdisc);
319 kfree(cl);
320}
321
322static int qfq_delete_class(struct Qdisc *sch, unsigned long arg)
323{
324 struct qfq_sched *q = qdisc_priv(sch);
325 struct qfq_class *cl = (struct qfq_class *)arg;
326
327 if (cl->filter_cnt > 0)
328 return -EBUSY;
329
330 sch_tree_lock(sch);
331
332 qfq_purge_queue(cl);
333 qdisc_class_hash_remove(&q->clhash, &cl->common);
334
335 BUG_ON(--cl->refcnt == 0);
336 /*
337 * This shouldn't happen: we "hold" one cops->get() when called
338 * from tc_ctl_tclass; the destroy method is done from cops->put().
339 */
340
341 sch_tree_unlock(sch);
342 return 0;
343}
344
345static unsigned long qfq_get_class(struct Qdisc *sch, u32 classid)
346{
347 struct qfq_class *cl = qfq_find_class(sch, classid);
348
349 if (cl != NULL)
350 cl->refcnt++;
351
352 return (unsigned long)cl;
353}
354
355static void qfq_put_class(struct Qdisc *sch, unsigned long arg)
356{
357 struct qfq_class *cl = (struct qfq_class *)arg;
358
359 if (--cl->refcnt == 0)
360 qfq_destroy_class(sch, cl);
361}
362
363static struct tcf_proto **qfq_tcf_chain(struct Qdisc *sch, unsigned long cl)
364{
365 struct qfq_sched *q = qdisc_priv(sch);
366
367 if (cl)
368 return NULL;
369
370 return &q->filter_list;
371}
372
373static unsigned long qfq_bind_tcf(struct Qdisc *sch, unsigned long parent,
374 u32 classid)
375{
376 struct qfq_class *cl = qfq_find_class(sch, classid);
377
378 if (cl != NULL)
379 cl->filter_cnt++;
380
381 return (unsigned long)cl;
382}
383
384static void qfq_unbind_tcf(struct Qdisc *sch, unsigned long arg)
385{
386 struct qfq_class *cl = (struct qfq_class *)arg;
387
388 cl->filter_cnt--;
389}
390
391static int qfq_graft_class(struct Qdisc *sch, unsigned long arg,
392 struct Qdisc *new, struct Qdisc **old)
393{
394 struct qfq_class *cl = (struct qfq_class *)arg;
395
396 if (new == NULL) {
397 new = qdisc_create_dflt(sch->dev_queue,
398 &pfifo_qdisc_ops, cl->common.classid);
399 if (new == NULL)
400 new = &noop_qdisc;
401 }
402
403 sch_tree_lock(sch);
404 qfq_purge_queue(cl);
405 *old = cl->qdisc;
406 cl->qdisc = new;
407 sch_tree_unlock(sch);
408 return 0;
409}
410
411static struct Qdisc *qfq_class_leaf(struct Qdisc *sch, unsigned long arg)
412{
413 struct qfq_class *cl = (struct qfq_class *)arg;
414
415 return cl->qdisc;
416}
417
418static int qfq_dump_class(struct Qdisc *sch, unsigned long arg,
419 struct sk_buff *skb, struct tcmsg *tcm)
420{
421 struct qfq_class *cl = (struct qfq_class *)arg;
422 struct nlattr *nest;
423
424 tcm->tcm_parent = TC_H_ROOT;
425 tcm->tcm_handle = cl->common.classid;
426 tcm->tcm_info = cl->qdisc->handle;
427
428 nest = nla_nest_start(skb, TCA_OPTIONS);
429 if (nest == NULL)
430 goto nla_put_failure;
431 NLA_PUT_U32(skb, TCA_QFQ_WEIGHT, ONE_FP/cl->inv_w);
432 NLA_PUT_U32(skb, TCA_QFQ_LMAX, cl->lmax);
433 return nla_nest_end(skb, nest);
434
435nla_put_failure:
436 nla_nest_cancel(skb, nest);
437 return -EMSGSIZE;
438}
439
440static int qfq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
441 struct gnet_dump *d)
442{
443 struct qfq_class *cl = (struct qfq_class *)arg;
444 struct tc_qfq_stats xstats;
445
446 memset(&xstats, 0, sizeof(xstats));
447 cl->qdisc->qstats.qlen = cl->qdisc->q.qlen;
448
449 xstats.weight = ONE_FP/cl->inv_w;
450 xstats.lmax = cl->lmax;
451
452 if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
453 gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 ||
454 gnet_stats_copy_queue(d, &cl->qdisc->qstats) < 0)
455 return -1;
456
457 return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
458}
459
460static void qfq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
461{
462 struct qfq_sched *q = qdisc_priv(sch);
463 struct qfq_class *cl;
464 struct hlist_node *n;
465 unsigned int i;
466
467 if (arg->stop)
468 return;
469
470 for (i = 0; i < q->clhash.hashsize; i++) {
471 hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) {
472 if (arg->count < arg->skip) {
473 arg->count++;
474 continue;
475 }
476 if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
477 arg->stop = 1;
478 return;
479 }
480 arg->count++;
481 }
482 }
483}
484
485static struct qfq_class *qfq_classify(struct sk_buff *skb, struct Qdisc *sch,
486 int *qerr)
487{
488 struct qfq_sched *q = qdisc_priv(sch);
489 struct qfq_class *cl;
490 struct tcf_result res;
491 int result;
492
493 if (TC_H_MAJ(skb->priority ^ sch->handle) == 0) {
494 pr_debug("qfq_classify: found %d\n", skb->priority);
495 cl = qfq_find_class(sch, skb->priority);
496 if (cl != NULL)
497 return cl;
498 }
499
500 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
501 result = tc_classify(skb, q->filter_list, &res);
502 if (result >= 0) {
503#ifdef CONFIG_NET_CLS_ACT
504 switch (result) {
505 case TC_ACT_QUEUED:
506 case TC_ACT_STOLEN:
507 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
508 case TC_ACT_SHOT:
509 return NULL;
510 }
511#endif
512 cl = (struct qfq_class *)res.class;
513 if (cl == NULL)
514 cl = qfq_find_class(sch, res.classid);
515 return cl;
516 }
517
518 return NULL;
519}
520
521/* Generic comparison function, handling wraparound. */
522static inline int qfq_gt(u64 a, u64 b)
523{
524 return (s64)(a - b) > 0;
525}
526
527/* Round a precise timestamp to its slotted value. */
528static inline u64 qfq_round_down(u64 ts, unsigned int shift)
529{
530 return ts & ~((1ULL << shift) - 1);
531}
532
533/* return the pointer to the group with lowest index in the bitmap */
534static inline struct qfq_group *qfq_ffs(struct qfq_sched *q,
535 unsigned long bitmap)
536{
537 int index = __ffs(bitmap);
538 return &q->groups[index];
539}
540/* Calculate a mask to mimic what would be ffs_from(). */
541static inline unsigned long mask_from(unsigned long bitmap, int from)
542{
543 return bitmap & ~((1UL << from) - 1);
544}
545
546/*
547 * The state computation relies on ER=0, IR=1, EB=2, IB=3
548 * First compute eligibility comparing grp->S, q->V,
549 * then check if someone is blocking us and possibly add EB
550 */
551static int qfq_calc_state(struct qfq_sched *q, const struct qfq_group *grp)
552{
553 /* if S > V we are not eligible */
554 unsigned int state = qfq_gt(grp->S, q->V);
555 unsigned long mask = mask_from(q->bitmaps[ER], grp->index);
556 struct qfq_group *next;
557
558 if (mask) {
559 next = qfq_ffs(q, mask);
560 if (qfq_gt(grp->F, next->F))
561 state |= EB;
562 }
563
564 return state;
565}
566
567
568/*
569 * In principle
570 * q->bitmaps[dst] |= q->bitmaps[src] & mask;
571 * q->bitmaps[src] &= ~mask;
572 * but we should make sure that src != dst
573 */
574static inline void qfq_move_groups(struct qfq_sched *q, unsigned long mask,
575 int src, int dst)
576{
577 q->bitmaps[dst] |= q->bitmaps[src] & mask;
578 q->bitmaps[src] &= ~mask;
579}
580
581static void qfq_unblock_groups(struct qfq_sched *q, int index, u64 old_F)
582{
583 unsigned long mask = mask_from(q->bitmaps[ER], index + 1);
584 struct qfq_group *next;
585
586 if (mask) {
587 next = qfq_ffs(q, mask);
588 if (!qfq_gt(next->F, old_F))
589 return;
590 }
591
592 mask = (1UL << index) - 1;
593 qfq_move_groups(q, mask, EB, ER);
594 qfq_move_groups(q, mask, IB, IR);
595}
596
597/*
598 * perhaps
599 *
600 old_V ^= q->V;
601 old_V >>= QFQ_MIN_SLOT_SHIFT;
602 if (old_V) {
603 ...
604 }
605 *
606 */
607static void qfq_make_eligible(struct qfq_sched *q, u64 old_V)
608{
609 unsigned long vslot = q->V >> QFQ_MIN_SLOT_SHIFT;
610 unsigned long old_vslot = old_V >> QFQ_MIN_SLOT_SHIFT;
611
612 if (vslot != old_vslot) {
613 unsigned long mask = (1UL << fls(vslot ^ old_vslot)) - 1;
614 qfq_move_groups(q, mask, IR, ER);
615 qfq_move_groups(q, mask, IB, EB);
616 }
617}
618
619
620/*
621 * XXX we should make sure that slot becomes less than 32.
622 * This is guaranteed by the input values.
623 * roundedS is always cl->S rounded on grp->slot_shift bits.
624 */
625static void qfq_slot_insert(struct qfq_group *grp, struct qfq_class *cl,
626 u64 roundedS)
627{
628 u64 slot = (roundedS - grp->S) >> grp->slot_shift;
629 unsigned int i = (grp->front + slot) % QFQ_MAX_SLOTS;
630
631 hlist_add_head(&cl->next, &grp->slots[i]);
632 __set_bit(slot, &grp->full_slots);
633}
634
635/* Maybe introduce hlist_first_entry?? */
636static struct qfq_class *qfq_slot_head(struct qfq_group *grp)
637{
638 return hlist_entry(grp->slots[grp->front].first,
639 struct qfq_class, next);
640}
641
642/*
643 * remove the entry from the slot
644 */
645static void qfq_front_slot_remove(struct qfq_group *grp)
646{
647 struct qfq_class *cl = qfq_slot_head(grp);
648
649 BUG_ON(!cl);
650 hlist_del(&cl->next);
651 if (hlist_empty(&grp->slots[grp->front]))
652 __clear_bit(0, &grp->full_slots);
653}
654
655/*
656 * Returns the first full queue in a group. As a side effect,
657 * adjust the bucket list so the first non-empty bucket is at
658 * position 0 in full_slots.
659 */
660static struct qfq_class *qfq_slot_scan(struct qfq_group *grp)
661{
662 unsigned int i;
663
664 pr_debug("qfq slot_scan: grp %u full %#lx\n",
665 grp->index, grp->full_slots);
666
667 if (grp->full_slots == 0)
668 return NULL;
669
670 i = __ffs(grp->full_slots); /* zero based */
671 if (i > 0) {
672 grp->front = (grp->front + i) % QFQ_MAX_SLOTS;
673 grp->full_slots >>= i;
674 }
675
676 return qfq_slot_head(grp);
677}
678
679/*
680 * adjust the bucket list. When the start time of a group decreases,
681 * we move the index down (modulo QFQ_MAX_SLOTS) so we don't need to
682 * move the objects. The mask of occupied slots must be shifted
683 * because we use ffs() to find the first non-empty slot.
684 * This covers decreases in the group's start time, but what about
685 * increases of the start time ?
686 * Here too we should make sure that i is less than 32
687 */
688static void qfq_slot_rotate(struct qfq_group *grp, u64 roundedS)
689{
690 unsigned int i = (grp->S - roundedS) >> grp->slot_shift;
691
692 grp->full_slots <<= i;
693 grp->front = (grp->front - i) % QFQ_MAX_SLOTS;
694}
695
696static void qfq_update_eligible(struct qfq_sched *q, u64 old_V)
697{
698 struct qfq_group *grp;
699 unsigned long ineligible;
700
701 ineligible = q->bitmaps[IR] | q->bitmaps[IB];
702 if (ineligible) {
703 if (!q->bitmaps[ER]) {
704 grp = qfq_ffs(q, ineligible);
705 if (qfq_gt(grp->S, q->V))
706 q->V = grp->S;
707 }
708 qfq_make_eligible(q, old_V);
709 }
710}
711
712/* What is length of next packet in queue (0 if queue is empty) */
713static unsigned int qdisc_peek_len(struct Qdisc *sch)
714{
715 struct sk_buff *skb;
716
717 skb = sch->ops->peek(sch);
718 return skb ? qdisc_pkt_len(skb) : 0;
719}
720
721/*
722 * Updates the class, returns true if also the group needs to be updated.
723 */
724static bool qfq_update_class(struct qfq_group *grp, struct qfq_class *cl)
725{
726 unsigned int len = qdisc_peek_len(cl->qdisc);
727
728 cl->S = cl->F;
729 if (!len)
730 qfq_front_slot_remove(grp); /* queue is empty */
731 else {
732 u64 roundedS;
733
734 cl->F = cl->S + (u64)len * cl->inv_w;
735 roundedS = qfq_round_down(cl->S, grp->slot_shift);
736 if (roundedS == grp->S)
737 return false;
738
739 qfq_front_slot_remove(grp);
740 qfq_slot_insert(grp, cl, roundedS);
741 }
742
743 return true;
744}
745
746static struct sk_buff *qfq_dequeue(struct Qdisc *sch)
747{
748 struct qfq_sched *q = qdisc_priv(sch);
749 struct qfq_group *grp;
750 struct qfq_class *cl;
751 struct sk_buff *skb;
752 unsigned int len;
753 u64 old_V;
754
755 if (!q->bitmaps[ER])
756 return NULL;
757
758 grp = qfq_ffs(q, q->bitmaps[ER]);
759
760 cl = qfq_slot_head(grp);
761 skb = qdisc_dequeue_peeked(cl->qdisc);
762 if (!skb) {
763 WARN_ONCE(1, "qfq_dequeue: non-workconserving leaf\n");
764 return NULL;
765 }
766
767 sch->q.qlen--;
768 qdisc_bstats_update(sch, skb);
769
770 old_V = q->V;
771 len = qdisc_pkt_len(skb);
772 q->V += (u64)len * IWSUM;
773 pr_debug("qfq dequeue: len %u F %lld now %lld\n",
774 len, (unsigned long long) cl->F, (unsigned long long) q->V);
775
776 if (qfq_update_class(grp, cl)) {
777 u64 old_F = grp->F;
778
779 cl = qfq_slot_scan(grp);
780 if (!cl)
781 __clear_bit(grp->index, &q->bitmaps[ER]);
782 else {
783 u64 roundedS = qfq_round_down(cl->S, grp->slot_shift);
784 unsigned int s;
785
786 if (grp->S == roundedS)
787 goto skip_unblock;
788 grp->S = roundedS;
789 grp->F = roundedS + (2ULL << grp->slot_shift);
790 __clear_bit(grp->index, &q->bitmaps[ER]);
791 s = qfq_calc_state(q, grp);
792 __set_bit(grp->index, &q->bitmaps[s]);
793 }
794
795 qfq_unblock_groups(q, grp->index, old_F);
796 }
797
798skip_unblock:
799 qfq_update_eligible(q, old_V);
800
801 return skb;
802}
803
804/*
805 * Assign a reasonable start time for a new flow k in group i.
806 * Admissible values for \hat(F) are multiples of \sigma_i
807 * no greater than V+\sigma_i . Larger values mean that
808 * we had a wraparound so we consider the timestamp to be stale.
809 *
810 * If F is not stale and F >= V then we set S = F.
811 * Otherwise we should assign S = V, but this may violate
812 * the ordering in ER. So, if we have groups in ER, set S to
813 * the F_j of the first group j which would be blocking us.
814 * We are guaranteed not to move S backward because
815 * otherwise our group i would still be blocked.
816 */
817static void qfq_update_start(struct qfq_sched *q, struct qfq_class *cl)
818{
819 unsigned long mask;
820 uint32_t limit, roundedF;
821 int slot_shift = cl->grp->slot_shift;
822
823 roundedF = qfq_round_down(cl->F, slot_shift);
824 limit = qfq_round_down(q->V, slot_shift) + (1UL << slot_shift);
825
826 if (!qfq_gt(cl->F, q->V) || qfq_gt(roundedF, limit)) {
827 /* timestamp was stale */
828 mask = mask_from(q->bitmaps[ER], cl->grp->index);
829 if (mask) {
830 struct qfq_group *next = qfq_ffs(q, mask);
831 if (qfq_gt(roundedF, next->F)) {
832 cl->S = next->F;
833 return;
834 }
835 }
836 cl->S = q->V;
837 } else /* timestamp is not stale */
838 cl->S = cl->F;
839}
840
841static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
842{
843 struct qfq_sched *q = qdisc_priv(sch);
844 struct qfq_group *grp;
845 struct qfq_class *cl;
846 int err;
847 u64 roundedS;
848 int s;
849
850 cl = qfq_classify(skb, sch, &err);
851 if (cl == NULL) {
852 if (err & __NET_XMIT_BYPASS)
853 sch->qstats.drops++;
854 kfree_skb(skb);
855 return err;
856 }
857 pr_debug("qfq_enqueue: cl = %x\n", cl->common.classid);
858
859 err = qdisc_enqueue(skb, cl->qdisc);
860 if (unlikely(err != NET_XMIT_SUCCESS)) {
861 pr_debug("qfq_enqueue: enqueue failed %d\n", err);
862 if (net_xmit_drop_count(err)) {
863 cl->qstats.drops++;
864 sch->qstats.drops++;
865 }
866 return err;
867 }
868
869 bstats_update(&cl->bstats, skb);
870 ++sch->q.qlen;
871
872 /* If the new skb is not the head of queue, then done here. */
873 if (cl->qdisc->q.qlen != 1)
874 return err;
875
876 /* If reach this point, queue q was idle */
877 grp = cl->grp;
878 qfq_update_start(q, cl);
879
880 /* compute new finish time and rounded start. */
881 cl->F = cl->S + (u64)qdisc_pkt_len(skb) * cl->inv_w;
882 roundedS = qfq_round_down(cl->S, grp->slot_shift);
883
884 /*
885 * insert cl in the correct bucket.
886 * If cl->S >= grp->S we don't need to adjust the
887 * bucket list and simply go to the insertion phase.
888 * Otherwise grp->S is decreasing, we must make room
889 * in the bucket list, and also recompute the group state.
890 * Finally, if there were no flows in this group and nobody
891 * was in ER make sure to adjust V.
892 */
893 if (grp->full_slots) {
894 if (!qfq_gt(grp->S, cl->S))
895 goto skip_update;
896
897 /* create a slot for this cl->S */
898 qfq_slot_rotate(grp, roundedS);
899 /* group was surely ineligible, remove */
900 __clear_bit(grp->index, &q->bitmaps[IR]);
901 __clear_bit(grp->index, &q->bitmaps[IB]);
902 } else if (!q->bitmaps[ER] && qfq_gt(roundedS, q->V))
903 q->V = roundedS;
904
905 grp->S = roundedS;
906 grp->F = roundedS + (2ULL << grp->slot_shift);
907 s = qfq_calc_state(q, grp);
908 __set_bit(grp->index, &q->bitmaps[s]);
909
910 pr_debug("qfq enqueue: new state %d %#lx S %lld F %lld V %lld\n",
911 s, q->bitmaps[s],
912 (unsigned long long) cl->S,
913 (unsigned long long) cl->F,
914 (unsigned long long) q->V);
915
916skip_update:
917 qfq_slot_insert(grp, cl, roundedS);
918
919 return err;
920}
921
922
923static void qfq_slot_remove(struct qfq_sched *q, struct qfq_group *grp,
924 struct qfq_class *cl)
925{
926 unsigned int i, offset;
927 u64 roundedS;
928
929 roundedS = qfq_round_down(cl->S, grp->slot_shift);
930 offset = (roundedS - grp->S) >> grp->slot_shift;
931 i = (grp->front + offset) % QFQ_MAX_SLOTS;
932
933 hlist_del(&cl->next);
934 if (hlist_empty(&grp->slots[i]))
935 __clear_bit(offset, &grp->full_slots);
936}
937
938/*
939 * called to forcibly destroy a queue.
940 * If the queue is not in the front bucket, or if it has
941 * other queues in the front bucket, we can simply remove
942 * the queue with no other side effects.
943 * Otherwise we must propagate the event up.
944 */
945static void qfq_deactivate_class(struct qfq_sched *q, struct qfq_class *cl)
946{
947 struct qfq_group *grp = cl->grp;
948 unsigned long mask;
949 u64 roundedS;
950 int s;
951
952 cl->F = cl->S;
953 qfq_slot_remove(q, grp, cl);
954
955 if (!grp->full_slots) {
956 __clear_bit(grp->index, &q->bitmaps[IR]);
957 __clear_bit(grp->index, &q->bitmaps[EB]);
958 __clear_bit(grp->index, &q->bitmaps[IB]);
959
960 if (test_bit(grp->index, &q->bitmaps[ER]) &&
961 !(q->bitmaps[ER] & ~((1UL << grp->index) - 1))) {
962 mask = q->bitmaps[ER] & ((1UL << grp->index) - 1);
963 if (mask)
964 mask = ~((1UL << __fls(mask)) - 1);
965 else
966 mask = ~0UL;
967 qfq_move_groups(q, mask, EB, ER);
968 qfq_move_groups(q, mask, IB, IR);
969 }
970 __clear_bit(grp->index, &q->bitmaps[ER]);
971 } else if (hlist_empty(&grp->slots[grp->front])) {
972 cl = qfq_slot_scan(grp);
973 roundedS = qfq_round_down(cl->S, grp->slot_shift);
974 if (grp->S != roundedS) {
975 __clear_bit(grp->index, &q->bitmaps[ER]);
976 __clear_bit(grp->index, &q->bitmaps[IR]);
977 __clear_bit(grp->index, &q->bitmaps[EB]);
978 __clear_bit(grp->index, &q->bitmaps[IB]);
979 grp->S = roundedS;
980 grp->F = roundedS + (2ULL << grp->slot_shift);
981 s = qfq_calc_state(q, grp);
982 __set_bit(grp->index, &q->bitmaps[s]);
983 }
984 }
985
986 qfq_update_eligible(q, q->V);
987}
988
989static void qfq_qlen_notify(struct Qdisc *sch, unsigned long arg)
990{
991 struct qfq_sched *q = qdisc_priv(sch);
992 struct qfq_class *cl = (struct qfq_class *)arg;
993
994 if (cl->qdisc->q.qlen == 0)
995 qfq_deactivate_class(q, cl);
996}
997
998static unsigned int qfq_drop(struct Qdisc *sch)
999{
1000 struct qfq_sched *q = qdisc_priv(sch);
1001 struct qfq_group *grp;
1002 unsigned int i, j, len;
1003
1004 for (i = 0; i <= QFQ_MAX_INDEX; i++) {
1005 grp = &q->groups[i];
1006 for (j = 0; j < QFQ_MAX_SLOTS; j++) {
1007 struct qfq_class *cl;
1008 struct hlist_node *n;
1009
1010 hlist_for_each_entry(cl, n, &grp->slots[j], next) {
1011
1012 if (!cl->qdisc->ops->drop)
1013 continue;
1014
1015 len = cl->qdisc->ops->drop(cl->qdisc);
1016 if (len > 0) {
1017 sch->q.qlen--;
1018 if (!cl->qdisc->q.qlen)
1019 qfq_deactivate_class(q, cl);
1020
1021 return len;
1022 }
1023 }
1024 }
1025 }
1026
1027 return 0;
1028}
1029
1030static int qfq_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
1031{
1032 struct qfq_sched *q = qdisc_priv(sch);
1033 struct qfq_group *grp;
1034 int i, j, err;
1035
1036 err = qdisc_class_hash_init(&q->clhash);
1037 if (err < 0)
1038 return err;
1039
1040 for (i = 0; i <= QFQ_MAX_INDEX; i++) {
1041 grp = &q->groups[i];
1042 grp->index = i;
1043 grp->slot_shift = QFQ_MTU_SHIFT + FRAC_BITS
1044 - (QFQ_MAX_INDEX - i);
1045 for (j = 0; j < QFQ_MAX_SLOTS; j++)
1046 INIT_HLIST_HEAD(&grp->slots[j]);
1047 }
1048
1049 return 0;
1050}
1051
1052static void qfq_reset_qdisc(struct Qdisc *sch)
1053{
1054 struct qfq_sched *q = qdisc_priv(sch);
1055 struct qfq_group *grp;
1056 struct qfq_class *cl;
1057 struct hlist_node *n, *tmp;
1058 unsigned int i, j;
1059
1060 for (i = 0; i <= QFQ_MAX_INDEX; i++) {
1061 grp = &q->groups[i];
1062 for (j = 0; j < QFQ_MAX_SLOTS; j++) {
1063 hlist_for_each_entry_safe(cl, n, tmp,
1064 &grp->slots[j], next) {
1065 qfq_deactivate_class(q, cl);
1066 }
1067 }
1068 }
1069
1070 for (i = 0; i < q->clhash.hashsize; i++) {
1071 hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode)
1072 qdisc_reset(cl->qdisc);
1073 }
1074 sch->q.qlen = 0;
1075}
1076
1077static void qfq_destroy_qdisc(struct Qdisc *sch)
1078{
1079 struct qfq_sched *q = qdisc_priv(sch);
1080 struct qfq_class *cl;
1081 struct hlist_node *n, *next;
1082 unsigned int i;
1083
1084 tcf_destroy_chain(&q->filter_list);
1085
1086 for (i = 0; i < q->clhash.hashsize; i++) {
1087 hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[i],
1088 common.hnode) {
1089 qfq_destroy_class(sch, cl);
1090 }
1091 }
1092 qdisc_class_hash_destroy(&q->clhash);
1093}
1094
1095static const struct Qdisc_class_ops qfq_class_ops = {
1096 .change = qfq_change_class,
1097 .delete = qfq_delete_class,
1098 .get = qfq_get_class,
1099 .put = qfq_put_class,
1100 .tcf_chain = qfq_tcf_chain,
1101 .bind_tcf = qfq_bind_tcf,
1102 .unbind_tcf = qfq_unbind_tcf,
1103 .graft = qfq_graft_class,
1104 .leaf = qfq_class_leaf,
1105 .qlen_notify = qfq_qlen_notify,
1106 .dump = qfq_dump_class,
1107 .dump_stats = qfq_dump_class_stats,
1108 .walk = qfq_walk,
1109};
1110
1111static struct Qdisc_ops qfq_qdisc_ops __read_mostly = {
1112 .cl_ops = &qfq_class_ops,
1113 .id = "qfq",
1114 .priv_size = sizeof(struct qfq_sched),
1115 .enqueue = qfq_enqueue,
1116 .dequeue = qfq_dequeue,
1117 .peek = qdisc_peek_dequeued,
1118 .drop = qfq_drop,
1119 .init = qfq_init_qdisc,
1120 .reset = qfq_reset_qdisc,
1121 .destroy = qfq_destroy_qdisc,
1122 .owner = THIS_MODULE,
1123};
1124
1125static int __init qfq_init(void)
1126{
1127 return register_qdisc(&qfq_qdisc_ops);
1128}
1129
1130static void __exit qfq_exit(void)
1131{
1132 unregister_qdisc(&qfq_qdisc_ops);
1133}
1134
1135module_init(qfq_init);
1136module_exit(qfq_exit);
1137MODULE_LICENSE("GPL");
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index c2e628dfaacc..7ef87f9eb675 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -169,7 +169,7 @@ static unsigned int sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
169 } 169 }
170 case htons(ETH_P_IPV6): 170 case htons(ETH_P_IPV6):
171 { 171 {
172 struct ipv6hdr *iph; 172 const struct ipv6hdr *iph;
173 int poff; 173 int poff;
174 174
175 if (!pskb_network_may_pull(skb, sizeof(*iph))) 175 if (!pskb_network_may_pull(skb, sizeof(*iph)))
diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c
index 3c06c87cd280..6338413376c8 100644
--- a/net/sctp/bind_addr.c
+++ b/net/sctp/bind_addr.c
@@ -140,14 +140,12 @@ void sctp_bind_addr_init(struct sctp_bind_addr *bp, __u16 port)
140/* Dispose of the address list. */ 140/* Dispose of the address list. */
141static void sctp_bind_addr_clean(struct sctp_bind_addr *bp) 141static void sctp_bind_addr_clean(struct sctp_bind_addr *bp)
142{ 142{
143 struct sctp_sockaddr_entry *addr; 143 struct sctp_sockaddr_entry *addr, *temp;
144 struct list_head *pos, *temp;
145 144
146 /* Empty the bind address list. */ 145 /* Empty the bind address list. */
147 list_for_each_safe(pos, temp, &bp->address_list) { 146 list_for_each_entry_safe(addr, temp, &bp->address_list, list) {
148 addr = list_entry(pos, struct sctp_sockaddr_entry, list); 147 list_del_rcu(&addr->list);
149 list_del(pos); 148 call_rcu(&addr->rcu, sctp_local_addr_free);
150 kfree(addr);
151 SCTP_DBG_OBJCNT_DEC(addr); 149 SCTP_DBG_OBJCNT_DEC(addr);
152 } 150 }
153} 151}
diff --git a/net/sctp/debug.c b/net/sctp/debug.c
index bf24fa697de2..ec997cfe0a7e 100644
--- a/net/sctp/debug.c
+++ b/net/sctp/debug.c
@@ -98,7 +98,6 @@ const char *sctp_cname(const sctp_subtype_t cid)
98 98
99/* These are printable forms of the states. */ 99/* These are printable forms of the states. */
100const char *const sctp_state_tbl[SCTP_STATE_NUM_STATES] = { 100const char *const sctp_state_tbl[SCTP_STATE_NUM_STATES] = {
101 "STATE_EMPTY",
102 "STATE_CLOSED", 101 "STATE_CLOSED",
103 "STATE_COOKIE_WAIT", 102 "STATE_COOKIE_WAIT",
104 "STATE_COOKIE_ECHOED", 103 "STATE_COOKIE_ECHOED",
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index e10acc01c75f..c8cc24e282c3 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -325,6 +325,7 @@ static struct sctp_association *__sctp_endpoint_lookup_assoc(
325 struct sctp_transport **transport) 325 struct sctp_transport **transport)
326{ 326{
327 struct sctp_association *asoc = NULL; 327 struct sctp_association *asoc = NULL;
328 struct sctp_association *tmp;
328 struct sctp_transport *t = NULL; 329 struct sctp_transport *t = NULL;
329 struct sctp_hashbucket *head; 330 struct sctp_hashbucket *head;
330 struct sctp_ep_common *epb; 331 struct sctp_ep_common *epb;
@@ -333,25 +334,32 @@ static struct sctp_association *__sctp_endpoint_lookup_assoc(
333 int rport; 334 int rport;
334 335
335 *transport = NULL; 336 *transport = NULL;
337
338 /* If the local port is not set, there can't be any associations
339 * on this endpoint.
340 */
341 if (!ep->base.bind_addr.port)
342 goto out;
343
336 rport = ntohs(paddr->v4.sin_port); 344 rport = ntohs(paddr->v4.sin_port);
337 345
338 hash = sctp_assoc_hashfn(ep->base.bind_addr.port, rport); 346 hash = sctp_assoc_hashfn(ep->base.bind_addr.port, rport);
339 head = &sctp_assoc_hashtable[hash]; 347 head = &sctp_assoc_hashtable[hash];
340 read_lock(&head->lock); 348 read_lock(&head->lock);
341 sctp_for_each_hentry(epb, node, &head->chain) { 349 sctp_for_each_hentry(epb, node, &head->chain) {
342 asoc = sctp_assoc(epb); 350 tmp = sctp_assoc(epb);
343 if (asoc->ep != ep || rport != asoc->peer.port) 351 if (tmp->ep != ep || rport != tmp->peer.port)
344 goto next; 352 continue;
345 353
346 t = sctp_assoc_lookup_paddr(asoc, paddr); 354 t = sctp_assoc_lookup_paddr(tmp, paddr);
347 if (t) { 355 if (t) {
356 asoc = tmp;
348 *transport = t; 357 *transport = t;
349 break; 358 break;
350 } 359 }
351next:
352 asoc = NULL;
353 } 360 }
354 read_unlock(&head->lock); 361 read_unlock(&head->lock);
362out:
355 return asoc; 363 return asoc;
356} 364}
357 365
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 5436c6921167..741ed1648838 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -565,7 +565,7 @@ void sctp_err_finish(struct sock *sk, struct sctp_association *asoc)
565 */ 565 */
566void sctp_v4_err(struct sk_buff *skb, __u32 info) 566void sctp_v4_err(struct sk_buff *skb, __u32 info)
567{ 567{
568 struct iphdr *iph = (struct iphdr *)skb->data; 568 const struct iphdr *iph = (const struct iphdr *)skb->data;
569 const int ihlen = iph->ihl * 4; 569 const int ihlen = iph->ihl * 4;
570 const int type = icmp_hdr(skb)->type; 570 const int type = icmp_hdr(skb)->type;
571 const int code = icmp_hdr(skb)->code; 571 const int code = icmp_hdr(skb)->code;
@@ -661,7 +661,6 @@ static int sctp_rcv_ootb(struct sk_buff *skb)
661{ 661{
662 sctp_chunkhdr_t *ch; 662 sctp_chunkhdr_t *ch;
663 __u8 *ch_end; 663 __u8 *ch_end;
664 sctp_errhdr_t *err;
665 664
666 ch = (sctp_chunkhdr_t *) skb->data; 665 ch = (sctp_chunkhdr_t *) skb->data;
667 666
@@ -697,20 +696,6 @@ static int sctp_rcv_ootb(struct sk_buff *skb)
697 if (SCTP_CID_INIT == ch->type && (void *)ch != skb->data) 696 if (SCTP_CID_INIT == ch->type && (void *)ch != skb->data)
698 goto discard; 697 goto discard;
699 698
700 /* RFC 8.4, 7) If the packet contains a "Stale cookie" ERROR
701 * or a COOKIE ACK the SCTP Packet should be silently
702 * discarded.
703 */
704 if (SCTP_CID_COOKIE_ACK == ch->type)
705 goto discard;
706
707 if (SCTP_CID_ERROR == ch->type) {
708 sctp_walk_errors(err, ch) {
709 if (SCTP_ERROR_STALE_COOKIE == err->cause)
710 goto discard;
711 }
712 }
713
714 ch = (sctp_chunkhdr_t *) ch_end; 699 ch = (sctp_chunkhdr_t *) ch_end;
715 } while (ch_end < skb_tail_pointer(skb)); 700 } while (ch_end < skb_tail_pointer(skb));
716 701
@@ -1017,7 +1002,7 @@ static struct sctp_association *__sctp_rcv_asconf_lookup(
1017 /* Skip over the ADDIP header and find the Address parameter */ 1002 /* Skip over the ADDIP header and find the Address parameter */
1018 param = (union sctp_addr_param *)(asconf + 1); 1003 param = (union sctp_addr_param *)(asconf + 1);
1019 1004
1020 af = sctp_get_af_specific(param_type2af(param->v4.param_hdr.type)); 1005 af = sctp_get_af_specific(param_type2af(param->p.type));
1021 if (unlikely(!af)) 1006 if (unlikely(!af))
1022 return NULL; 1007 return NULL;
1023 1008
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 185fe058db11..0bb0d7cb9f10 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -80,6 +80,13 @@
80 80
81#include <asm/uaccess.h> 81#include <asm/uaccess.h>
82 82
83static inline int sctp_v6_addr_match_len(union sctp_addr *s1,
84 union sctp_addr *s2);
85static void sctp_v6_to_addr(union sctp_addr *addr, struct in6_addr *saddr,
86 __be16 port);
87static int sctp_v6_cmp_addr(const union sctp_addr *addr1,
88 const union sctp_addr *addr2);
89
83/* Event handler for inet6 address addition/deletion events. 90/* Event handler for inet6 address addition/deletion events.
84 * The sctp_local_addr_list needs to be protocted by a spin lock since 91 * The sctp_local_addr_list needs to be protocted by a spin lock since
85 * multiple notifiers (say IPv4 and IPv6) may be running at the same 92 * multiple notifiers (say IPv4 and IPv6) may be running at the same
@@ -240,37 +247,107 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport)
240/* Returns the dst cache entry for the given source and destination ip 247/* Returns the dst cache entry for the given source and destination ip
241 * addresses. 248 * addresses.
242 */ 249 */
243static struct dst_entry *sctp_v6_get_dst(struct sctp_association *asoc, 250static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
244 union sctp_addr *daddr, 251 struct flowi *fl, struct sock *sk)
245 union sctp_addr *saddr)
246{ 252{
247 struct dst_entry *dst; 253 struct sctp_association *asoc = t->asoc;
248 struct flowi6 fl6; 254 struct dst_entry *dst = NULL;
255 struct flowi6 *fl6 = &fl->u.ip6;
256 struct sctp_bind_addr *bp;
257 struct sctp_sockaddr_entry *laddr;
258 union sctp_addr *baddr = NULL;
259 union sctp_addr *daddr = &t->ipaddr;
260 union sctp_addr dst_saddr;
261 __u8 matchlen = 0;
262 __u8 bmatchlen;
263 sctp_scope_t scope;
249 264
250 memset(&fl6, 0, sizeof(fl6)); 265 memset(fl6, 0, sizeof(struct flowi6));
251 ipv6_addr_copy(&fl6.daddr, &daddr->v6.sin6_addr); 266 ipv6_addr_copy(&fl6->daddr, &daddr->v6.sin6_addr);
267 fl6->fl6_dport = daddr->v6.sin6_port;
268 fl6->flowi6_proto = IPPROTO_SCTP;
252 if (ipv6_addr_type(&daddr->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL) 269 if (ipv6_addr_type(&daddr->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL)
253 fl6.flowi6_oif = daddr->v6.sin6_scope_id; 270 fl6->flowi6_oif = daddr->v6.sin6_scope_id;
254 271
272 SCTP_DEBUG_PRINTK("%s: DST=%pI6 ", __func__, &fl6->daddr);
255 273
256 SCTP_DEBUG_PRINTK("%s: DST=%pI6 ", __func__, &fl6.daddr); 274 if (asoc)
275 fl6->fl6_sport = htons(asoc->base.bind_addr.port);
257 276
258 if (saddr) { 277 if (saddr) {
259 ipv6_addr_copy(&fl6.saddr, &saddr->v6.sin6_addr); 278 ipv6_addr_copy(&fl6->saddr, &saddr->v6.sin6_addr);
260 SCTP_DEBUG_PRINTK("SRC=%pI6 - ", &fl6.saddr); 279 fl6->fl6_sport = saddr->v6.sin6_port;
280 SCTP_DEBUG_PRINTK("SRC=%pI6 - ", &fl6->saddr);
281 }
282
283 dst = ip6_dst_lookup_flow(sk, fl6, NULL, false);
284 if (!asoc || saddr)
285 goto out;
286
287 bp = &asoc->base.bind_addr;
288 scope = sctp_scope(daddr);
289 /* ip6_dst_lookup has filled in the fl6->saddr for us. Check
290 * to see if we can use it.
291 */
292 if (!IS_ERR(dst)) {
293 /* Walk through the bind address list and look for a bind
294 * address that matches the source address of the returned dst.
295 */
296 sctp_v6_to_addr(&dst_saddr, &fl6->saddr, htons(bp->port));
297 rcu_read_lock();
298 list_for_each_entry_rcu(laddr, &bp->address_list, list) {
299 if (!laddr->valid || (laddr->state != SCTP_ADDR_SRC))
300 continue;
301
302 /* Do not compare against v4 addrs */
303 if ((laddr->a.sa.sa_family == AF_INET6) &&
304 (sctp_v6_cmp_addr(&dst_saddr, &laddr->a))) {
305 rcu_read_unlock();
306 goto out;
307 }
308 }
309 rcu_read_unlock();
310 /* None of the bound addresses match the source address of the
311 * dst. So release it.
312 */
313 dst_release(dst);
314 dst = NULL;
261 } 315 }
262 316
263 dst = ip6_route_output(&init_net, NULL, &fl6); 317 /* Walk through the bind address list and try to get the
264 if (!dst->error) { 318 * best source address for a given destination.
319 */
320 rcu_read_lock();
321 list_for_each_entry_rcu(laddr, &bp->address_list, list) {
322 if (!laddr->valid && laddr->state != SCTP_ADDR_SRC)
323 continue;
324 if ((laddr->a.sa.sa_family == AF_INET6) &&
325 (scope <= sctp_scope(&laddr->a))) {
326 bmatchlen = sctp_v6_addr_match_len(daddr, &laddr->a);
327 if (!baddr || (matchlen < bmatchlen)) {
328 baddr = &laddr->a;
329 matchlen = bmatchlen;
330 }
331 }
332 }
333 rcu_read_unlock();
334 if (baddr) {
335 ipv6_addr_copy(&fl6->saddr, &baddr->v6.sin6_addr);
336 fl6->fl6_sport = baddr->v6.sin6_port;
337 dst = ip6_dst_lookup_flow(sk, fl6, NULL, false);
338 }
339
340out:
341 if (!IS_ERR(dst)) {
265 struct rt6_info *rt; 342 struct rt6_info *rt;
266 rt = (struct rt6_info *)dst; 343 rt = (struct rt6_info *)dst;
344 t->dst = dst;
267 SCTP_DEBUG_PRINTK("rt6_dst:%pI6 rt6_src:%pI6\n", 345 SCTP_DEBUG_PRINTK("rt6_dst:%pI6 rt6_src:%pI6\n",
268 &rt->rt6i_dst.addr, &rt->rt6i_src.addr); 346 &rt->rt6i_dst.addr, &fl6->saddr);
269 return dst; 347 } else {
348 t->dst = NULL;
349 SCTP_DEBUG_PRINTK("NO ROUTE\n");
270 } 350 }
271 SCTP_DEBUG_PRINTK("NO ROUTE\n");
272 dst_release(dst);
273 return NULL;
274} 351}
275 352
276/* Returns the number of consecutive initial bits that match in the 2 ipv6 353/* Returns the number of consecutive initial bits that match in the 2 ipv6
@@ -286,64 +363,18 @@ static inline int sctp_v6_addr_match_len(union sctp_addr *s1,
286 * and asoc's bind address list. 363 * and asoc's bind address list.
287 */ 364 */
288static void sctp_v6_get_saddr(struct sctp_sock *sk, 365static void sctp_v6_get_saddr(struct sctp_sock *sk,
289 struct sctp_association *asoc, 366 struct sctp_transport *t,
290 struct dst_entry *dst, 367 struct flowi *fl)
291 union sctp_addr *daddr,
292 union sctp_addr *saddr)
293{ 368{
294 struct sctp_bind_addr *bp; 369 struct flowi6 *fl6 = &fl->u.ip6;
295 struct sctp_sockaddr_entry *laddr; 370 union sctp_addr *saddr = &t->saddr;
296 sctp_scope_t scope;
297 union sctp_addr *baddr = NULL;
298 __u8 matchlen = 0;
299 __u8 bmatchlen;
300 371
301 SCTP_DEBUG_PRINTK("%s: asoc:%p dst:%p daddr:%pI6 ", 372 SCTP_DEBUG_PRINTK("%s: asoc:%p dst:%p\n", __func__, t->asoc, t->dst);
302 __func__, asoc, dst, &daddr->v6.sin6_addr);
303
304 if (!asoc) {
305 ipv6_dev_get_saddr(sock_net(sctp_opt2sk(sk)),
306 dst ? ip6_dst_idev(dst)->dev : NULL,
307 &daddr->v6.sin6_addr,
308 inet6_sk(&sk->inet.sk)->srcprefs,
309 &saddr->v6.sin6_addr);
310 SCTP_DEBUG_PRINTK("saddr from ipv6_get_saddr: %pI6\n",
311 &saddr->v6.sin6_addr);
312 return;
313 }
314
315 scope = sctp_scope(daddr);
316
317 bp = &asoc->base.bind_addr;
318 373
319 /* Go through the bind address list and find the best source address 374 if (t->dst) {
320 * that matches the scope of the destination address. 375 saddr->v6.sin6_family = AF_INET6;
321 */ 376 ipv6_addr_copy(&saddr->v6.sin6_addr, &fl6->saddr);
322 rcu_read_lock();
323 list_for_each_entry_rcu(laddr, &bp->address_list, list) {
324 if (!laddr->valid)
325 continue;
326 if ((laddr->state == SCTP_ADDR_SRC) &&
327 (laddr->a.sa.sa_family == AF_INET6) &&
328 (scope <= sctp_scope(&laddr->a))) {
329 bmatchlen = sctp_v6_addr_match_len(daddr, &laddr->a);
330 if (!baddr || (matchlen < bmatchlen)) {
331 baddr = &laddr->a;
332 matchlen = bmatchlen;
333 }
334 }
335 }
336
337 if (baddr) {
338 memcpy(saddr, baddr, sizeof(union sctp_addr));
339 SCTP_DEBUG_PRINTK("saddr: %pI6\n", &saddr->v6.sin6_addr);
340 } else {
341 pr_err("%s: asoc:%p Could not find a valid source "
342 "address for the dest:%pI6\n",
343 __func__, asoc, &daddr->v6.sin6_addr);
344 } 377 }
345
346 rcu_read_unlock();
347} 378}
348 379
349/* Make a copy of all potential local addresses. */ 380/* Make a copy of all potential local addresses. */
@@ -465,14 +496,13 @@ static int sctp_v6_to_addr_param(const union sctp_addr *addr,
465 return length; 496 return length;
466} 497}
467 498
468/* Initialize a sctp_addr from a dst_entry. */ 499/* Initialize a sctp_addr from struct in6_addr. */
469static void sctp_v6_dst_saddr(union sctp_addr *addr, struct dst_entry *dst, 500static void sctp_v6_to_addr(union sctp_addr *addr, struct in6_addr *saddr,
470 __be16 port) 501 __be16 port)
471{ 502{
472 struct rt6_info *rt = (struct rt6_info *)dst;
473 addr->sa.sa_family = AF_INET6; 503 addr->sa.sa_family = AF_INET6;
474 addr->v6.sin6_port = port; 504 addr->v6.sin6_port = port;
475 ipv6_addr_copy(&addr->v6.sin6_addr, &rt->rt6i_src.addr); 505 ipv6_addr_copy(&addr->v6.sin6_addr, saddr);
476} 506}
477 507
478/* Compare addresses exactly. 508/* Compare addresses exactly.
@@ -531,7 +561,7 @@ static int sctp_v6_is_any(const union sctp_addr *addr)
531static int sctp_v6_available(union sctp_addr *addr, struct sctp_sock *sp) 561static int sctp_v6_available(union sctp_addr *addr, struct sctp_sock *sp)
532{ 562{
533 int type; 563 int type;
534 struct in6_addr *in6 = (struct in6_addr *)&addr->v6.sin6_addr; 564 const struct in6_addr *in6 = (const struct in6_addr *)&addr->v6.sin6_addr;
535 565
536 type = ipv6_addr_type(in6); 566 type = ipv6_addr_type(in6);
537 if (IPV6_ADDR_ANY == type) 567 if (IPV6_ADDR_ANY == type)
@@ -959,7 +989,6 @@ static struct sctp_af sctp_af_inet6 = {
959 .to_sk_daddr = sctp_v6_to_sk_daddr, 989 .to_sk_daddr = sctp_v6_to_sk_daddr,
960 .from_addr_param = sctp_v6_from_addr_param, 990 .from_addr_param = sctp_v6_from_addr_param,
961 .to_addr_param = sctp_v6_to_addr_param, 991 .to_addr_param = sctp_v6_to_addr_param,
962 .dst_saddr = sctp_v6_dst_saddr,
963 .cmp_addr = sctp_v6_cmp_addr, 992 .cmp_addr = sctp_v6_cmp_addr,
964 .scope = sctp_v6_scope, 993 .scope = sctp_v6_scope,
965 .addr_valid = sctp_v6_addr_valid, 994 .addr_valid = sctp_v6_addr_valid,
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index bf92a5b68f8b..1c88c8911dc5 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -131,7 +131,8 @@ static inline int sctp_cacc_skip_3_1_d(struct sctp_transport *primary,
131static inline int sctp_cacc_skip_3_1_f(struct sctp_transport *transport, 131static inline int sctp_cacc_skip_3_1_f(struct sctp_transport *transport,
132 int count_of_newacks) 132 int count_of_newacks)
133{ 133{
134 if (count_of_newacks < 2 && !transport->cacc.cacc_saw_newack) 134 if (count_of_newacks < 2 &&
135 (transport && !transport->cacc.cacc_saw_newack))
135 return 1; 136 return 1;
136 return 0; 137 return 0;
137} 138}
@@ -319,7 +320,6 @@ int sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk)
319 * chunk. 320 * chunk.
320 */ 321 */
321 switch (q->asoc->state) { 322 switch (q->asoc->state) {
322 case SCTP_STATE_EMPTY:
323 case SCTP_STATE_CLOSED: 323 case SCTP_STATE_CLOSED:
324 case SCTP_STATE_SHUTDOWN_PENDING: 324 case SCTP_STATE_SHUTDOWN_PENDING:
325 case SCTP_STATE_SHUTDOWN_SENT: 325 case SCTP_STATE_SHUTDOWN_SENT:
@@ -577,6 +577,13 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
577 * try to send as much as possible. 577 * try to send as much as possible.
578 */ 578 */
579 list_for_each_entry_safe(chunk, chunk1, lqueue, transmitted_list) { 579 list_for_each_entry_safe(chunk, chunk1, lqueue, transmitted_list) {
580 /* If the chunk is abandoned, move it to abandoned list. */
581 if (sctp_chunk_abandoned(chunk)) {
582 list_del_init(&chunk->transmitted_list);
583 sctp_insert_list(&q->abandoned,
584 &chunk->transmitted_list);
585 continue;
586 }
580 587
581 /* Make sure that Gap Acked TSNs are not retransmitted. A 588 /* Make sure that Gap Acked TSNs are not retransmitted. A
582 * simple approach is just to move such TSNs out of the 589 * simple approach is just to move such TSNs out of the
@@ -618,9 +625,12 @@ redo:
618 625
619 /* If we are retransmitting, we should only 626 /* If we are retransmitting, we should only
620 * send a single packet. 627 * send a single packet.
628 * Otherwise, try appending this chunk again.
621 */ 629 */
622 if (rtx_timeout || fast_rtx) 630 if (rtx_timeout || fast_rtx)
623 done = 1; 631 done = 1;
632 else
633 goto redo;
624 634
625 /* Bundle next chunk in the next round. */ 635 /* Bundle next chunk in the next round. */
626 break; 636 break;
@@ -1683,8 +1693,9 @@ static void sctp_mark_missing(struct sctp_outq *q,
1683 /* SFR-CACC may require us to skip marking 1693 /* SFR-CACC may require us to skip marking
1684 * this chunk as missing. 1694 * this chunk as missing.
1685 */ 1695 */
1686 if (!transport || !sctp_cacc_skip(primary, transport, 1696 if (!transport || !sctp_cacc_skip(primary,
1687 count_of_newacks, tsn)) { 1697 chunk->transport,
1698 count_of_newacks, tsn)) {
1688 chunk->tsn_missing_report++; 1699 chunk->tsn_missing_report++;
1689 1700
1690 SCTP_DEBUG_PRINTK( 1701 SCTP_DEBUG_PRINTK(
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 065d99958ced..67380a29e2e9 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -332,13 +332,12 @@ static int sctp_v4_to_addr_param(const union sctp_addr *addr,
332} 332}
333 333
334/* Initialize a sctp_addr from a dst_entry. */ 334/* Initialize a sctp_addr from a dst_entry. */
335static void sctp_v4_dst_saddr(union sctp_addr *saddr, struct dst_entry *dst, 335static void sctp_v4_dst_saddr(union sctp_addr *saddr, struct flowi4 *fl4,
336 __be16 port) 336 __be16 port)
337{ 337{
338 struct rtable *rt = (struct rtable *)dst;
339 saddr->v4.sin_family = AF_INET; 338 saddr->v4.sin_family = AF_INET;
340 saddr->v4.sin_port = port; 339 saddr->v4.sin_port = port;
341 saddr->v4.sin_addr.s_addr = rt->rt_src; 340 saddr->v4.sin_addr.s_addr = fl4->saddr;
342} 341}
343 342
344/* Compare two addresses exactly. */ 343/* Compare two addresses exactly. */
@@ -456,35 +455,36 @@ static sctp_scope_t sctp_v4_scope(union sctp_addr *addr)
456 * addresses. If an association is passed, trys to get a dst entry with a 455 * addresses. If an association is passed, trys to get a dst entry with a
457 * source address that matches an address in the bind address list. 456 * source address that matches an address in the bind address list.
458 */ 457 */
459static struct dst_entry *sctp_v4_get_dst(struct sctp_association *asoc, 458static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
460 union sctp_addr *daddr, 459 struct flowi *fl, struct sock *sk)
461 union sctp_addr *saddr)
462{ 460{
461 struct sctp_association *asoc = t->asoc;
463 struct rtable *rt; 462 struct rtable *rt;
464 struct flowi4 fl4; 463 struct flowi4 *fl4 = &fl->u.ip4;
465 struct sctp_bind_addr *bp; 464 struct sctp_bind_addr *bp;
466 struct sctp_sockaddr_entry *laddr; 465 struct sctp_sockaddr_entry *laddr;
467 struct dst_entry *dst = NULL; 466 struct dst_entry *dst = NULL;
467 union sctp_addr *daddr = &t->ipaddr;
468 union sctp_addr dst_saddr; 468 union sctp_addr dst_saddr;
469 469
470 memset(&fl4, 0x0, sizeof(struct flowi4)); 470 memset(fl4, 0x0, sizeof(struct flowi4));
471 fl4.daddr = daddr->v4.sin_addr.s_addr; 471 fl4->daddr = daddr->v4.sin_addr.s_addr;
472 fl4.fl4_dport = daddr->v4.sin_port; 472 fl4->fl4_dport = daddr->v4.sin_port;
473 fl4.flowi4_proto = IPPROTO_SCTP; 473 fl4->flowi4_proto = IPPROTO_SCTP;
474 if (asoc) { 474 if (asoc) {
475 fl4.flowi4_tos = RT_CONN_FLAGS(asoc->base.sk); 475 fl4->flowi4_tos = RT_CONN_FLAGS(asoc->base.sk);
476 fl4.flowi4_oif = asoc->base.sk->sk_bound_dev_if; 476 fl4->flowi4_oif = asoc->base.sk->sk_bound_dev_if;
477 fl4.fl4_sport = htons(asoc->base.bind_addr.port); 477 fl4->fl4_sport = htons(asoc->base.bind_addr.port);
478 } 478 }
479 if (saddr) { 479 if (saddr) {
480 fl4.saddr = saddr->v4.sin_addr.s_addr; 480 fl4->saddr = saddr->v4.sin_addr.s_addr;
481 fl4.fl4_sport = saddr->v4.sin_port; 481 fl4->fl4_sport = saddr->v4.sin_port;
482 } 482 }
483 483
484 SCTP_DEBUG_PRINTK("%s: DST:%pI4, SRC:%pI4 - ", 484 SCTP_DEBUG_PRINTK("%s: DST:%pI4, SRC:%pI4 - ",
485 __func__, &fl4.daddr, &fl4.saddr); 485 __func__, &fl4->daddr, &fl4->saddr);
486 486
487 rt = ip_route_output_key(&init_net, &fl4); 487 rt = ip_route_output_key(&init_net, fl4);
488 if (!IS_ERR(rt)) 488 if (!IS_ERR(rt))
489 dst = &rt->dst; 489 dst = &rt->dst;
490 490
@@ -500,7 +500,7 @@ static struct dst_entry *sctp_v4_get_dst(struct sctp_association *asoc,
500 /* Walk through the bind address list and look for a bind 500 /* Walk through the bind address list and look for a bind
501 * address that matches the source address of the returned dst. 501 * address that matches the source address of the returned dst.
502 */ 502 */
503 sctp_v4_dst_saddr(&dst_saddr, dst, htons(bp->port)); 503 sctp_v4_dst_saddr(&dst_saddr, fl4, htons(bp->port));
504 rcu_read_lock(); 504 rcu_read_lock();
505 list_for_each_entry_rcu(laddr, &bp->address_list, list) { 505 list_for_each_entry_rcu(laddr, &bp->address_list, list) {
506 if (!laddr->valid || (laddr->state != SCTP_ADDR_SRC)) 506 if (!laddr->valid || (laddr->state != SCTP_ADDR_SRC))
@@ -526,9 +526,9 @@ static struct dst_entry *sctp_v4_get_dst(struct sctp_association *asoc,
526 continue; 526 continue;
527 if ((laddr->state == SCTP_ADDR_SRC) && 527 if ((laddr->state == SCTP_ADDR_SRC) &&
528 (AF_INET == laddr->a.sa.sa_family)) { 528 (AF_INET == laddr->a.sa.sa_family)) {
529 fl4.saddr = laddr->a.v4.sin_addr.s_addr; 529 fl4->saddr = laddr->a.v4.sin_addr.s_addr;
530 fl4.fl4_sport = laddr->a.v4.sin_port; 530 fl4->fl4_sport = laddr->a.v4.sin_port;
531 rt = ip_route_output_key(&init_net, &fl4); 531 rt = ip_route_output_key(&init_net, fl4);
532 if (!IS_ERR(rt)) { 532 if (!IS_ERR(rt)) {
533 dst = &rt->dst; 533 dst = &rt->dst;
534 goto out_unlock; 534 goto out_unlock;
@@ -539,33 +539,27 @@ static struct dst_entry *sctp_v4_get_dst(struct sctp_association *asoc,
539out_unlock: 539out_unlock:
540 rcu_read_unlock(); 540 rcu_read_unlock();
541out: 541out:
542 t->dst = dst;
542 if (dst) 543 if (dst)
543 SCTP_DEBUG_PRINTK("rt_dst:%pI4, rt_src:%pI4\n", 544 SCTP_DEBUG_PRINTK("rt_dst:%pI4, rt_src:%pI4\n",
544 &rt->rt_dst, &rt->rt_src); 545 &fl4->daddr, &fl4->saddr);
545 else 546 else
546 SCTP_DEBUG_PRINTK("NO ROUTE\n"); 547 SCTP_DEBUG_PRINTK("NO ROUTE\n");
547
548 return dst;
549} 548}
550 549
551/* For v4, the source address is cached in the route entry(dst). So no need 550/* For v4, the source address is cached in the route entry(dst). So no need
552 * to cache it separately and hence this is an empty routine. 551 * to cache it separately and hence this is an empty routine.
553 */ 552 */
554static void sctp_v4_get_saddr(struct sctp_sock *sk, 553static void sctp_v4_get_saddr(struct sctp_sock *sk,
555 struct sctp_association *asoc, 554 struct sctp_transport *t,
556 struct dst_entry *dst, 555 struct flowi *fl)
557 union sctp_addr *daddr,
558 union sctp_addr *saddr)
559{ 556{
560 struct rtable *rt = (struct rtable *)dst; 557 union sctp_addr *saddr = &t->saddr;
561 558 struct rtable *rt = (struct rtable *)t->dst;
562 if (!asoc)
563 return;
564 559
565 if (rt) { 560 if (rt) {
566 saddr->v4.sin_family = AF_INET; 561 saddr->v4.sin_family = AF_INET;
567 saddr->v4.sin_port = htons(asoc->base.bind_addr.port); 562 saddr->v4.sin_addr.s_addr = fl->u.ip4.saddr;
568 saddr->v4.sin_addr.s_addr = rt->rt_src;
569 } 563 }
570} 564}
571 565
@@ -847,14 +841,14 @@ static inline int sctp_v4_xmit(struct sk_buff *skb,
847 841
848 SCTP_DEBUG_PRINTK("%s: skb:%p, len:%d, src:%pI4, dst:%pI4\n", 842 SCTP_DEBUG_PRINTK("%s: skb:%p, len:%d, src:%pI4, dst:%pI4\n",
849 __func__, skb, skb->len, 843 __func__, skb, skb->len,
850 &skb_rtable(skb)->rt_src, 844 &transport->fl.u.ip4.saddr,
851 &skb_rtable(skb)->rt_dst); 845 &transport->fl.u.ip4.daddr);
852 846
853 inet->pmtudisc = transport->param_flags & SPP_PMTUD_ENABLE ? 847 inet->pmtudisc = transport->param_flags & SPP_PMTUD_ENABLE ?
854 IP_PMTUDISC_DO : IP_PMTUDISC_DONT; 848 IP_PMTUDISC_DO : IP_PMTUDISC_DONT;
855 849
856 SCTP_INC_STATS(SCTP_MIB_OUTSCTPPACKS); 850 SCTP_INC_STATS(SCTP_MIB_OUTSCTPPACKS);
857 return ip_queue_xmit(skb); 851 return ip_queue_xmit(skb, &transport->fl);
858} 852}
859 853
860static struct sctp_af sctp_af_inet; 854static struct sctp_af sctp_af_inet;
@@ -943,7 +937,6 @@ static struct sctp_af sctp_af_inet = {
943 .to_sk_daddr = sctp_v4_to_sk_daddr, 937 .to_sk_daddr = sctp_v4_to_sk_daddr,
944 .from_addr_param = sctp_v4_from_addr_param, 938 .from_addr_param = sctp_v4_from_addr_param,
945 .to_addr_param = sctp_v4_to_addr_param, 939 .to_addr_param = sctp_v4_to_addr_param,
946 .dst_saddr = sctp_v4_dst_saddr,
947 .cmp_addr = sctp_v4_cmp_addr, 940 .cmp_addr = sctp_v4_cmp_addr,
948 .addr_valid = sctp_v4_addr_valid, 941 .addr_valid = sctp_v4_addr_valid,
949 .inaddr_any = sctp_v4_inaddr_any, 942 .inaddr_any = sctp_v4_inaddr_any,
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index b3434cc7d0cf..58eb27fed4b4 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -1075,20 +1075,28 @@ nodata:
1075 1075
1076/* Make a HEARTBEAT chunk. */ 1076/* Make a HEARTBEAT chunk. */
1077struct sctp_chunk *sctp_make_heartbeat(const struct sctp_association *asoc, 1077struct sctp_chunk *sctp_make_heartbeat(const struct sctp_association *asoc,
1078 const struct sctp_transport *transport, 1078 const struct sctp_transport *transport)
1079 const void *payload, const size_t paylen)
1080{ 1079{
1081 struct sctp_chunk *retval = sctp_make_chunk(asoc, SCTP_CID_HEARTBEAT, 1080 struct sctp_chunk *retval;
1082 0, paylen); 1081 sctp_sender_hb_info_t hbinfo;
1082
1083 retval = sctp_make_chunk(asoc, SCTP_CID_HEARTBEAT, 0, sizeof(hbinfo));
1083 1084
1084 if (!retval) 1085 if (!retval)
1085 goto nodata; 1086 goto nodata;
1086 1087
1088 hbinfo.param_hdr.type = SCTP_PARAM_HEARTBEAT_INFO;
1089 hbinfo.param_hdr.length = htons(sizeof(sctp_sender_hb_info_t));
1090 hbinfo.daddr = transport->ipaddr;
1091 hbinfo.sent_at = jiffies;
1092 hbinfo.hb_nonce = transport->hb_nonce;
1093
1087 /* Cast away the 'const', as this is just telling the chunk 1094 /* Cast away the 'const', as this is just telling the chunk
1088 * what transport it belongs to. 1095 * what transport it belongs to.
1089 */ 1096 */
1090 retval->transport = (struct sctp_transport *) transport; 1097 retval->transport = (struct sctp_transport *) transport;
1091 retval->subh.hbs_hdr = sctp_addto_chunk(retval, paylen, payload); 1098 retval->subh.hbs_hdr = sctp_addto_chunk(retval, sizeof(hbinfo),
1099 &hbinfo);
1092 1100
1093nodata: 1101nodata:
1094 return retval; 1102 return retval;
@@ -2242,14 +2250,17 @@ int sctp_verify_init(const struct sctp_association *asoc,
2242 * Returns 0 on failure, else success. 2250 * Returns 0 on failure, else success.
2243 * FIXME: This is an association method. 2251 * FIXME: This is an association method.
2244 */ 2252 */
2245int sctp_process_init(struct sctp_association *asoc, sctp_cid_t cid, 2253int sctp_process_init(struct sctp_association *asoc, struct sctp_chunk *chunk,
2246 const union sctp_addr *peer_addr, 2254 const union sctp_addr *peer_addr,
2247 sctp_init_chunk_t *peer_init, gfp_t gfp) 2255 sctp_init_chunk_t *peer_init, gfp_t gfp)
2248{ 2256{
2249 union sctp_params param; 2257 union sctp_params param;
2250 struct sctp_transport *transport; 2258 struct sctp_transport *transport;
2251 struct list_head *pos, *temp; 2259 struct list_head *pos, *temp;
2260 struct sctp_af *af;
2261 union sctp_addr addr;
2252 char *cookie; 2262 char *cookie;
2263 int src_match = 0;
2253 2264
2254 /* We must include the address that the INIT packet came from. 2265 /* We must include the address that the INIT packet came from.
2255 * This is the only address that matters for an INIT packet. 2266 * This is the only address that matters for an INIT packet.
@@ -2261,18 +2272,31 @@ int sctp_process_init(struct sctp_association *asoc, sctp_cid_t cid,
2261 * added as the primary transport. The source address seems to 2272 * added as the primary transport. The source address seems to
2262 * be a a better choice than any of the embedded addresses. 2273 * be a a better choice than any of the embedded addresses.
2263 */ 2274 */
2264 if (peer_addr) { 2275 if(!sctp_assoc_add_peer(asoc, peer_addr, gfp, SCTP_ACTIVE))
2265 if(!sctp_assoc_add_peer(asoc, peer_addr, gfp, SCTP_ACTIVE)) 2276 goto nomem;
2266 goto nomem; 2277
2267 } 2278 if (sctp_cmp_addr_exact(sctp_source(chunk), peer_addr))
2279 src_match = 1;
2268 2280
2269 /* Process the initialization parameters. */ 2281 /* Process the initialization parameters. */
2270 sctp_walk_params(param, peer_init, init_hdr.params) { 2282 sctp_walk_params(param, peer_init, init_hdr.params) {
2283 if (!src_match && (param.p->type == SCTP_PARAM_IPV4_ADDRESS ||
2284 param.p->type == SCTP_PARAM_IPV6_ADDRESS)) {
2285 af = sctp_get_af_specific(param_type2af(param.p->type));
2286 af->from_addr_param(&addr, param.addr,
2287 chunk->sctp_hdr->source, 0);
2288 if (sctp_cmp_addr_exact(sctp_source(chunk), &addr))
2289 src_match = 1;
2290 }
2271 2291
2272 if (!sctp_process_param(asoc, param, peer_addr, gfp)) 2292 if (!sctp_process_param(asoc, param, peer_addr, gfp))
2273 goto clean_up; 2293 goto clean_up;
2274 } 2294 }
2275 2295
2296 /* source address of chunk may not match any valid address */
2297 if (!src_match)
2298 goto clean_up;
2299
2276 /* AUTH: After processing the parameters, make sure that we 2300 /* AUTH: After processing the parameters, make sure that we
2277 * have all the required info to potentially do authentications. 2301 * have all the required info to potentially do authentications.
2278 */ 2302 */
@@ -2923,7 +2947,7 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc,
2923 asconf_param->param_hdr.type != SCTP_PARAM_SET_PRIMARY) 2947 asconf_param->param_hdr.type != SCTP_PARAM_SET_PRIMARY)
2924 return SCTP_ERROR_UNKNOWN_PARAM; 2948 return SCTP_ERROR_UNKNOWN_PARAM;
2925 2949
2926 switch (addr_param->v4.param_hdr.type) { 2950 switch (addr_param->p.type) {
2927 case SCTP_PARAM_IPV6_ADDRESS: 2951 case SCTP_PARAM_IPV6_ADDRESS:
2928 if (!asoc->peer.ipv6_address) 2952 if (!asoc->peer.ipv6_address)
2929 return SCTP_ERROR_DNS_FAILED; 2953 return SCTP_ERROR_DNS_FAILED;
@@ -2936,7 +2960,7 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc,
2936 return SCTP_ERROR_DNS_FAILED; 2960 return SCTP_ERROR_DNS_FAILED;
2937 } 2961 }
2938 2962
2939 af = sctp_get_af_specific(param_type2af(addr_param->v4.param_hdr.type)); 2963 af = sctp_get_af_specific(param_type2af(addr_param->p.type));
2940 if (unlikely(!af)) 2964 if (unlikely(!af))
2941 return SCTP_ERROR_DNS_FAILED; 2965 return SCTP_ERROR_DNS_FAILED;
2942 2966
@@ -3100,7 +3124,7 @@ struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
3100 /* Skip the address parameter and store a pointer to the first 3124 /* Skip the address parameter and store a pointer to the first
3101 * asconf parameter. 3125 * asconf parameter.
3102 */ 3126 */
3103 length = ntohs(addr_param->v4.param_hdr.length); 3127 length = ntohs(addr_param->p.length);
3104 asconf_param = (sctp_addip_param_t *)((void *)addr_param + length); 3128 asconf_param = (sctp_addip_param_t *)((void *)addr_param + length);
3105 chunk_len -= length; 3129 chunk_len -= length;
3106 3130
@@ -3177,7 +3201,7 @@ static void sctp_asconf_param_success(struct sctp_association *asoc,
3177 ((void *)asconf_param + sizeof(sctp_addip_param_t)); 3201 ((void *)asconf_param + sizeof(sctp_addip_param_t));
3178 3202
3179 /* We have checked the packet before, so we do not check again. */ 3203 /* We have checked the packet before, so we do not check again. */
3180 af = sctp_get_af_specific(param_type2af(addr_param->v4.param_hdr.type)); 3204 af = sctp_get_af_specific(param_type2af(addr_param->p.type));
3181 af->from_addr_param(&addr, addr_param, htons(bp->port), 0); 3205 af->from_addr_param(&addr, addr_param, htons(bp->port), 0);
3182 3206
3183 switch (asconf_param->param_hdr.type) { 3207 switch (asconf_param->param_hdr.type) {
@@ -3193,11 +3217,8 @@ static void sctp_asconf_param_success(struct sctp_association *asoc,
3193 local_bh_enable(); 3217 local_bh_enable();
3194 list_for_each_entry(transport, &asoc->peer.transport_addr_list, 3218 list_for_each_entry(transport, &asoc->peer.transport_addr_list,
3195 transports) { 3219 transports) {
3196 if (transport->state == SCTP_ACTIVE)
3197 continue;
3198 dst_release(transport->dst); 3220 dst_release(transport->dst);
3199 sctp_transport_route(transport, NULL, 3221 transport->dst = NULL;
3200 sctp_sk(asoc->base.sk));
3201 } 3222 }
3202 break; 3223 break;
3203 case SCTP_PARAM_DEL_IP: 3224 case SCTP_PARAM_DEL_IP:
@@ -3207,8 +3228,7 @@ static void sctp_asconf_param_success(struct sctp_association *asoc,
3207 list_for_each_entry(transport, &asoc->peer.transport_addr_list, 3228 list_for_each_entry(transport, &asoc->peer.transport_addr_list,
3208 transports) { 3229 transports) {
3209 dst_release(transport->dst); 3230 dst_release(transport->dst);
3210 sctp_transport_route(transport, NULL, 3231 transport->dst = NULL;
3211 sctp_sk(asoc->base.sk));
3212 } 3232 }
3213 break; 3233 break;
3214 default: 3234 default:
@@ -3304,7 +3324,7 @@ int sctp_process_asconf_ack(struct sctp_association *asoc,
3304 /* Skip the address parameter in the last asconf sent and store a 3324 /* Skip the address parameter in the last asconf sent and store a
3305 * pointer to the first asconf parameter. 3325 * pointer to the first asconf parameter.
3306 */ 3326 */
3307 length = ntohs(addr_param->v4.param_hdr.length); 3327 length = ntohs(addr_param->p.length);
3308 asconf_param = (sctp_addip_param_t *)((void *)addr_param + length); 3328 asconf_param = (sctp_addip_param_t *)((void *)addr_param + length);
3309 asconf_len -= length; 3329 asconf_len -= length;
3310 3330
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 5f86ee4b54c1..d612ca1ca6c0 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -595,8 +595,7 @@ static int sctp_cmd_process_init(sctp_cmd_seq_t *commands,
595 * fail during INIT processing (due to malloc problems), 595 * fail during INIT processing (due to malloc problems),
596 * just return the error and stop processing the stack. 596 * just return the error and stop processing the stack.
597 */ 597 */
598 if (!sctp_process_init(asoc, chunk->chunk_hdr->type, 598 if (!sctp_process_init(asoc, chunk, sctp_source(chunk), peer_init, gfp))
599 sctp_source(chunk), peer_init, gfp))
600 error = -ENOMEM; 599 error = -ENOMEM;
601 else 600 else
602 error = 0; 601 error = 0;
@@ -1415,12 +1414,6 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
1415 SCTP_RTXR_T3_RTX); 1414 SCTP_RTXR_T3_RTX);
1416 break; 1415 break;
1417 1416
1418 case SCTP_CMD_TRANSMIT:
1419 /* Kick start transmission. */
1420 error = sctp_outq_uncork(&asoc->outqueue);
1421 local_cork = 0;
1422 break;
1423
1424 case SCTP_CMD_ECN_CE: 1417 case SCTP_CMD_ECN_CE:
1425 /* Do delayed CE processing. */ 1418 /* Do delayed CE processing. */
1426 sctp_do_ecn_ce_work(asoc, cmd->obj.u32); 1419 sctp_do_ecn_ce_work(asoc, cmd->obj.u32);
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 76792083c379..7f4a4f8368ee 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -393,8 +393,7 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep,
393 goto nomem_init; 393 goto nomem_init;
394 394
395 /* The call, sctp_process_init(), can fail on memory allocation. */ 395 /* The call, sctp_process_init(), can fail on memory allocation. */
396 if (!sctp_process_init(new_asoc, chunk->chunk_hdr->type, 396 if (!sctp_process_init(new_asoc, chunk, sctp_source(chunk),
397 sctp_source(chunk),
398 (sctp_init_chunk_t *)chunk->chunk_hdr, 397 (sctp_init_chunk_t *)chunk->chunk_hdr,
399 GFP_ATOMIC)) 398 GFP_ATOMIC))
400 goto nomem_init; 399 goto nomem_init;
@@ -725,7 +724,7 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep,
725 */ 724 */
726 peer_init = &chunk->subh.cookie_hdr->c.peer_init[0]; 725 peer_init = &chunk->subh.cookie_hdr->c.peer_init[0];
727 726
728 if (!sctp_process_init(new_asoc, chunk->chunk_hdr->type, 727 if (!sctp_process_init(new_asoc, chunk,
729 &chunk->subh.cookie_hdr->c.peer_addr, 728 &chunk->subh.cookie_hdr->c.peer_addr,
730 peer_init, GFP_ATOMIC)) 729 peer_init, GFP_ATOMIC))
731 goto nomem_init; 730 goto nomem_init;
@@ -942,18 +941,9 @@ static sctp_disposition_t sctp_sf_heartbeat(const struct sctp_endpoint *ep,
942{ 941{
943 struct sctp_transport *transport = (struct sctp_transport *) arg; 942 struct sctp_transport *transport = (struct sctp_transport *) arg;
944 struct sctp_chunk *reply; 943 struct sctp_chunk *reply;
945 sctp_sender_hb_info_t hbinfo;
946 size_t paylen = 0;
947
948 hbinfo.param_hdr.type = SCTP_PARAM_HEARTBEAT_INFO;
949 hbinfo.param_hdr.length = htons(sizeof(sctp_sender_hb_info_t));
950 hbinfo.daddr = transport->ipaddr;
951 hbinfo.sent_at = jiffies;
952 hbinfo.hb_nonce = transport->hb_nonce;
953 944
954 /* Send a heartbeat to our peer. */ 945 /* Send a heartbeat to our peer. */
955 paylen = sizeof(sctp_sender_hb_info_t); 946 reply = sctp_make_heartbeat(asoc, transport);
956 reply = sctp_make_heartbeat(asoc, transport, &hbinfo, paylen);
957 if (!reply) 947 if (!reply)
958 return SCTP_DISPOSITION_NOMEM; 948 return SCTP_DISPOSITION_NOMEM;
959 949
@@ -1464,8 +1454,7 @@ static sctp_disposition_t sctp_sf_do_unexpected_init(
1464 * Verification Tag and Peers Verification tag into a reserved 1454 * Verification Tag and Peers Verification tag into a reserved
1465 * place (local tie-tag and per tie-tag) within the state cookie. 1455 * place (local tie-tag and per tie-tag) within the state cookie.
1466 */ 1456 */
1467 if (!sctp_process_init(new_asoc, chunk->chunk_hdr->type, 1457 if (!sctp_process_init(new_asoc, chunk, sctp_source(chunk),
1468 sctp_source(chunk),
1469 (sctp_init_chunk_t *)chunk->chunk_hdr, 1458 (sctp_init_chunk_t *)chunk->chunk_hdr,
1470 GFP_ATOMIC)) 1459 GFP_ATOMIC))
1471 goto nomem; 1460 goto nomem;
@@ -1694,8 +1683,7 @@ static sctp_disposition_t sctp_sf_do_dupcook_a(const struct sctp_endpoint *ep,
1694 */ 1683 */
1695 peer_init = &chunk->subh.cookie_hdr->c.peer_init[0]; 1684 peer_init = &chunk->subh.cookie_hdr->c.peer_init[0];
1696 1685
1697 if (!sctp_process_init(new_asoc, chunk->chunk_hdr->type, 1686 if (!sctp_process_init(new_asoc, chunk, sctp_source(chunk), peer_init,
1698 sctp_source(chunk), peer_init,
1699 GFP_ATOMIC)) 1687 GFP_ATOMIC))
1700 goto nomem; 1688 goto nomem;
1701 1689
@@ -1780,8 +1768,7 @@ static sctp_disposition_t sctp_sf_do_dupcook_b(const struct sctp_endpoint *ep,
1780 * side effects--it is safe to run them here. 1768 * side effects--it is safe to run them here.
1781 */ 1769 */
1782 peer_init = &chunk->subh.cookie_hdr->c.peer_init[0]; 1770 peer_init = &chunk->subh.cookie_hdr->c.peer_init[0];
1783 if (!sctp_process_init(new_asoc, chunk->chunk_hdr->type, 1771 if (!sctp_process_init(new_asoc, chunk, sctp_source(chunk), peer_init,
1784 sctp_source(chunk), peer_init,
1785 GFP_ATOMIC)) 1772 GFP_ATOMIC))
1786 goto nomem; 1773 goto nomem;
1787 1774
@@ -2412,8 +2399,15 @@ static sctp_disposition_t __sctp_sf_do_9_1_abort(const struct sctp_endpoint *ep,
2412 2399
2413 /* See if we have an error cause code in the chunk. */ 2400 /* See if we have an error cause code in the chunk. */
2414 len = ntohs(chunk->chunk_hdr->length); 2401 len = ntohs(chunk->chunk_hdr->length);
2415 if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_errhdr)) 2402 if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_errhdr)) {
2403
2404 sctp_errhdr_t *err;
2405 sctp_walk_errors(err, chunk->chunk_hdr);
2406 if ((void *)err != (void *)chunk->chunk_end)
2407 return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
2408
2416 error = ((sctp_errhdr_t *)chunk->skb->data)->cause; 2409 error = ((sctp_errhdr_t *)chunk->skb->data)->cause;
2410 }
2417 2411
2418 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ECONNRESET)); 2412 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ECONNRESET));
2419 /* ASSOC_FAILED will DELETE_TCB. */ 2413 /* ASSOC_FAILED will DELETE_TCB. */
@@ -3204,6 +3198,7 @@ sctp_disposition_t sctp_sf_operr_notify(const struct sctp_endpoint *ep,
3204 sctp_cmd_seq_t *commands) 3198 sctp_cmd_seq_t *commands)
3205{ 3199{
3206 struct sctp_chunk *chunk = arg; 3200 struct sctp_chunk *chunk = arg;
3201 sctp_errhdr_t *err;
3207 3202
3208 if (!sctp_vtag_verify(chunk, asoc)) 3203 if (!sctp_vtag_verify(chunk, asoc))
3209 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 3204 return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
@@ -3212,6 +3207,10 @@ sctp_disposition_t sctp_sf_operr_notify(const struct sctp_endpoint *ep,
3212 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_operr_chunk_t))) 3207 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_operr_chunk_t)))
3213 return sctp_sf_violation_chunklen(ep, asoc, type, arg, 3208 return sctp_sf_violation_chunklen(ep, asoc, type, arg,
3214 commands); 3209 commands);
3210 sctp_walk_errors(err, chunk->chunk_hdr);
3211 if ((void *)err != (void *)chunk->chunk_end)
3212 return sctp_sf_violation_paramlen(ep, asoc, type, arg,
3213 (void *)err, commands);
3215 3214
3216 sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_OPERR, 3215 sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_OPERR,
3217 SCTP_CHUNK(chunk)); 3216 SCTP_CHUNK(chunk));
@@ -3320,8 +3319,10 @@ sctp_disposition_t sctp_sf_ootb(const struct sctp_endpoint *ep,
3320 struct sctp_chunk *chunk = arg; 3319 struct sctp_chunk *chunk = arg;
3321 struct sk_buff *skb = chunk->skb; 3320 struct sk_buff *skb = chunk->skb;
3322 sctp_chunkhdr_t *ch; 3321 sctp_chunkhdr_t *ch;
3322 sctp_errhdr_t *err;
3323 __u8 *ch_end; 3323 __u8 *ch_end;
3324 int ootb_shut_ack = 0; 3324 int ootb_shut_ack = 0;
3325 int ootb_cookie_ack = 0;
3325 3326
3326 SCTP_INC_STATS(SCTP_MIB_OUTOFBLUES); 3327 SCTP_INC_STATS(SCTP_MIB_OUTOFBLUES);
3327 3328
@@ -3346,6 +3347,23 @@ sctp_disposition_t sctp_sf_ootb(const struct sctp_endpoint *ep,
3346 if (SCTP_CID_ABORT == ch->type) 3347 if (SCTP_CID_ABORT == ch->type)
3347 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 3348 return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
3348 3349
3350 /* RFC 8.4, 7) If the packet contains a "Stale cookie" ERROR
3351 * or a COOKIE ACK the SCTP Packet should be silently
3352 * discarded.
3353 */
3354
3355 if (SCTP_CID_COOKIE_ACK == ch->type)
3356 ootb_cookie_ack = 1;
3357
3358 if (SCTP_CID_ERROR == ch->type) {
3359 sctp_walk_errors(err, ch) {
3360 if (SCTP_ERROR_STALE_COOKIE == err->cause) {
3361 ootb_cookie_ack = 1;
3362 break;
3363 }
3364 }
3365 }
3366
3349 /* Report violation if chunk len overflows */ 3367 /* Report violation if chunk len overflows */
3350 ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length)); 3368 ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
3351 if (ch_end > skb_tail_pointer(skb)) 3369 if (ch_end > skb_tail_pointer(skb))
@@ -3357,6 +3375,8 @@ sctp_disposition_t sctp_sf_ootb(const struct sctp_endpoint *ep,
3357 3375
3358 if (ootb_shut_ack) 3376 if (ootb_shut_ack)
3359 return sctp_sf_shut_8_4_5(ep, asoc, type, arg, commands); 3377 return sctp_sf_shut_8_4_5(ep, asoc, type, arg, commands);
3378 else if (ootb_cookie_ack)
3379 return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
3360 else 3380 else
3361 return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands); 3381 return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
3362} 3382}
@@ -4343,8 +4363,9 @@ static sctp_disposition_t sctp_sf_violation_chunklen(
4343 4363
4344/* 4364/*
4345 * Handle a protocol violation when the parameter length is invalid. 4365 * Handle a protocol violation when the parameter length is invalid.
4346 * "Invalid" length is identified as smaller than the minimal length a 4366 * If the length is smaller than the minimum length of a given parameter,
4347 * given parameter can be. 4367 * or accumulated length in multi parameters exceeds the end of the chunk,
4368 * the length is considered as invalid.
4348 */ 4369 */
4349static sctp_disposition_t sctp_sf_violation_paramlen( 4370static sctp_disposition_t sctp_sf_violation_paramlen(
4350 const struct sctp_endpoint *ep, 4371 const struct sctp_endpoint *ep,
@@ -5056,6 +5077,30 @@ sctp_disposition_t sctp_sf_ignore_primitive(
5056 ***************************************************************************/ 5077 ***************************************************************************/
5057 5078
5058/* 5079/*
5080 * When the SCTP stack has no more user data to send or retransmit, this
5081 * notification is given to the user. Also, at the time when a user app
5082 * subscribes to this event, if there is no data to be sent or
5083 * retransmit, the stack will immediately send up this notification.
5084 */
5085sctp_disposition_t sctp_sf_do_no_pending_tsn(
5086 const struct sctp_endpoint *ep,
5087 const struct sctp_association *asoc,
5088 const sctp_subtype_t type,
5089 void *arg,
5090 sctp_cmd_seq_t *commands)
5091{
5092 struct sctp_ulpevent *event;
5093
5094 event = sctp_ulpevent_make_sender_dry_event(asoc, GFP_ATOMIC);
5095 if (!event)
5096 return SCTP_DISPOSITION_NOMEM;
5097
5098 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(event));
5099
5100 return SCTP_DISPOSITION_CONSUME;
5101}
5102
5103/*
5059 * Start the shutdown negotiation. 5104 * Start the shutdown negotiation.
5060 * 5105 *
5061 * From Section 9.2: 5106 * From Section 9.2:
diff --git a/net/sctp/sm_statetable.c b/net/sctp/sm_statetable.c
index 546d4387fb3c..0338dc6fdc9d 100644
--- a/net/sctp/sm_statetable.c
+++ b/net/sctp/sm_statetable.c
@@ -107,8 +107,6 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
107#define TYPE_SCTP_FUNC(func) {.fn = func, .name = #func} 107#define TYPE_SCTP_FUNC(func) {.fn = func, .name = #func}
108 108
109#define TYPE_SCTP_DATA { \ 109#define TYPE_SCTP_DATA { \
110 /* SCTP_STATE_EMPTY */ \
111 TYPE_SCTP_FUNC(sctp_sf_ootb), \
112 /* SCTP_STATE_CLOSED */ \ 110 /* SCTP_STATE_CLOSED */ \
113 TYPE_SCTP_FUNC(sctp_sf_ootb), \ 111 TYPE_SCTP_FUNC(sctp_sf_ootb), \
114 /* SCTP_STATE_COOKIE_WAIT */ \ 112 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -128,8 +126,6 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
128} /* TYPE_SCTP_DATA */ 126} /* TYPE_SCTP_DATA */
129 127
130#define TYPE_SCTP_INIT { \ 128#define TYPE_SCTP_INIT { \
131 /* SCTP_STATE_EMPTY */ \
132 TYPE_SCTP_FUNC(sctp_sf_bug), \
133 /* SCTP_STATE_CLOSED */ \ 129 /* SCTP_STATE_CLOSED */ \
134 TYPE_SCTP_FUNC(sctp_sf_do_5_1B_init), \ 130 TYPE_SCTP_FUNC(sctp_sf_do_5_1B_init), \
135 /* SCTP_STATE_COOKIE_WAIT */ \ 131 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -149,8 +145,6 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
149} /* TYPE_SCTP_INIT */ 145} /* TYPE_SCTP_INIT */
150 146
151#define TYPE_SCTP_INIT_ACK { \ 147#define TYPE_SCTP_INIT_ACK { \
152 /* SCTP_STATE_EMPTY */ \
153 TYPE_SCTP_FUNC(sctp_sf_ootb), \
154 /* SCTP_STATE_CLOSED */ \ 148 /* SCTP_STATE_CLOSED */ \
155 TYPE_SCTP_FUNC(sctp_sf_do_5_2_3_initack), \ 149 TYPE_SCTP_FUNC(sctp_sf_do_5_2_3_initack), \
156 /* SCTP_STATE_COOKIE_WAIT */ \ 150 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -170,8 +164,6 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
170} /* TYPE_SCTP_INIT_ACK */ 164} /* TYPE_SCTP_INIT_ACK */
171 165
172#define TYPE_SCTP_SACK { \ 166#define TYPE_SCTP_SACK { \
173 /* SCTP_STATE_EMPTY */ \
174 TYPE_SCTP_FUNC(sctp_sf_ootb), \
175 /* SCTP_STATE_CLOSED */ \ 167 /* SCTP_STATE_CLOSED */ \
176 TYPE_SCTP_FUNC(sctp_sf_ootb), \ 168 TYPE_SCTP_FUNC(sctp_sf_ootb), \
177 /* SCTP_STATE_COOKIE_WAIT */ \ 169 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -191,8 +183,6 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
191} /* TYPE_SCTP_SACK */ 183} /* TYPE_SCTP_SACK */
192 184
193#define TYPE_SCTP_HEARTBEAT { \ 185#define TYPE_SCTP_HEARTBEAT { \
194 /* SCTP_STATE_EMPTY */ \
195 TYPE_SCTP_FUNC(sctp_sf_ootb), \
196 /* SCTP_STATE_CLOSED */ \ 186 /* SCTP_STATE_CLOSED */ \
197 TYPE_SCTP_FUNC(sctp_sf_ootb), \ 187 TYPE_SCTP_FUNC(sctp_sf_ootb), \
198 /* SCTP_STATE_COOKIE_WAIT */ \ 188 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -213,8 +203,6 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
213} /* TYPE_SCTP_HEARTBEAT */ 203} /* TYPE_SCTP_HEARTBEAT */
214 204
215#define TYPE_SCTP_HEARTBEAT_ACK { \ 205#define TYPE_SCTP_HEARTBEAT_ACK { \
216 /* SCTP_STATE_EMPTY */ \
217 TYPE_SCTP_FUNC(sctp_sf_ootb), \
218 /* SCTP_STATE_CLOSED */ \ 206 /* SCTP_STATE_CLOSED */ \
219 TYPE_SCTP_FUNC(sctp_sf_ootb), \ 207 TYPE_SCTP_FUNC(sctp_sf_ootb), \
220 /* SCTP_STATE_COOKIE_WAIT */ \ 208 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -234,8 +222,6 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
234} /* TYPE_SCTP_HEARTBEAT_ACK */ 222} /* TYPE_SCTP_HEARTBEAT_ACK */
235 223
236#define TYPE_SCTP_ABORT { \ 224#define TYPE_SCTP_ABORT { \
237 /* SCTP_STATE_EMPTY */ \
238 TYPE_SCTP_FUNC(sctp_sf_ootb), \
239 /* SCTP_STATE_CLOSED */ \ 225 /* SCTP_STATE_CLOSED */ \
240 TYPE_SCTP_FUNC(sctp_sf_pdiscard), \ 226 TYPE_SCTP_FUNC(sctp_sf_pdiscard), \
241 /* SCTP_STATE_COOKIE_WAIT */ \ 227 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -255,8 +241,6 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
255} /* TYPE_SCTP_ABORT */ 241} /* TYPE_SCTP_ABORT */
256 242
257#define TYPE_SCTP_SHUTDOWN { \ 243#define TYPE_SCTP_SHUTDOWN { \
258 /* SCTP_STATE_EMPTY */ \
259 TYPE_SCTP_FUNC(sctp_sf_ootb), \
260 /* SCTP_STATE_CLOSED */ \ 244 /* SCTP_STATE_CLOSED */ \
261 TYPE_SCTP_FUNC(sctp_sf_ootb), \ 245 TYPE_SCTP_FUNC(sctp_sf_ootb), \
262 /* SCTP_STATE_COOKIE_WAIT */ \ 246 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -276,8 +260,6 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
276} /* TYPE_SCTP_SHUTDOWN */ 260} /* TYPE_SCTP_SHUTDOWN */
277 261
278#define TYPE_SCTP_SHUTDOWN_ACK { \ 262#define TYPE_SCTP_SHUTDOWN_ACK { \
279 /* SCTP_STATE_EMPTY */ \
280 TYPE_SCTP_FUNC(sctp_sf_ootb), \
281 /* SCTP_STATE_CLOSED */ \ 263 /* SCTP_STATE_CLOSED */ \
282 TYPE_SCTP_FUNC(sctp_sf_ootb), \ 264 TYPE_SCTP_FUNC(sctp_sf_ootb), \
283 /* SCTP_STATE_COOKIE_WAIT */ \ 265 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -297,8 +279,6 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
297} /* TYPE_SCTP_SHUTDOWN_ACK */ 279} /* TYPE_SCTP_SHUTDOWN_ACK */
298 280
299#define TYPE_SCTP_ERROR { \ 281#define TYPE_SCTP_ERROR { \
300 /* SCTP_STATE_EMPTY */ \
301 TYPE_SCTP_FUNC(sctp_sf_ootb), \
302 /* SCTP_STATE_CLOSED */ \ 282 /* SCTP_STATE_CLOSED */ \
303 TYPE_SCTP_FUNC(sctp_sf_ootb), \ 283 TYPE_SCTP_FUNC(sctp_sf_ootb), \
304 /* SCTP_STATE_COOKIE_WAIT */ \ 284 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -318,8 +298,6 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
318} /* TYPE_SCTP_ERROR */ 298} /* TYPE_SCTP_ERROR */
319 299
320#define TYPE_SCTP_COOKIE_ECHO { \ 300#define TYPE_SCTP_COOKIE_ECHO { \
321 /* SCTP_STATE_EMPTY */ \
322 TYPE_SCTP_FUNC(sctp_sf_bug), \
323 /* SCTP_STATE_CLOSED */ \ 301 /* SCTP_STATE_CLOSED */ \
324 TYPE_SCTP_FUNC(sctp_sf_do_5_1D_ce), \ 302 TYPE_SCTP_FUNC(sctp_sf_do_5_1D_ce), \
325 /* SCTP_STATE_COOKIE_WAIT */ \ 303 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -339,8 +317,6 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
339} /* TYPE_SCTP_COOKIE_ECHO */ 317} /* TYPE_SCTP_COOKIE_ECHO */
340 318
341#define TYPE_SCTP_COOKIE_ACK { \ 319#define TYPE_SCTP_COOKIE_ACK { \
342 /* SCTP_STATE_EMPTY */ \
343 TYPE_SCTP_FUNC(sctp_sf_ootb), \
344 /* SCTP_STATE_CLOSED */ \ 320 /* SCTP_STATE_CLOSED */ \
345 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ 321 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
346 /* SCTP_STATE_COOKIE_WAIT */ \ 322 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -360,8 +336,6 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
360} /* TYPE_SCTP_COOKIE_ACK */ 336} /* TYPE_SCTP_COOKIE_ACK */
361 337
362#define TYPE_SCTP_ECN_ECNE { \ 338#define TYPE_SCTP_ECN_ECNE { \
363 /* SCTP_STATE_EMPTY */ \
364 TYPE_SCTP_FUNC(sctp_sf_ootb), \
365 /* SCTP_STATE_CLOSED */ \ 339 /* SCTP_STATE_CLOSED */ \
366 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ 340 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
367 /* SCTP_STATE_COOKIE_WAIT */ \ 341 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -381,8 +355,6 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
381} /* TYPE_SCTP_ECN_ECNE */ 355} /* TYPE_SCTP_ECN_ECNE */
382 356
383#define TYPE_SCTP_ECN_CWR { \ 357#define TYPE_SCTP_ECN_CWR { \
384 /* SCTP_STATE_EMPTY */ \
385 TYPE_SCTP_FUNC(sctp_sf_ootb), \
386 /* SCTP_STATE_CLOSED */ \ 358 /* SCTP_STATE_CLOSED */ \
387 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ 359 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
388 /* SCTP_STATE_COOKIE_WAIT */ \ 360 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -402,8 +374,6 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
402} /* TYPE_SCTP_ECN_CWR */ 374} /* TYPE_SCTP_ECN_CWR */
403 375
404#define TYPE_SCTP_SHUTDOWN_COMPLETE { \ 376#define TYPE_SCTP_SHUTDOWN_COMPLETE { \
405 /* SCTP_STATE_EMPTY */ \
406 TYPE_SCTP_FUNC(sctp_sf_ootb), \
407 /* SCTP_STATE_CLOSED */ \ 377 /* SCTP_STATE_CLOSED */ \
408 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ 378 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
409 /* SCTP_STATE_COOKIE_WAIT */ \ 379 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -446,8 +416,6 @@ static const sctp_sm_table_entry_t chunk_event_table[SCTP_NUM_BASE_CHUNK_TYPES][
446}; /* state_fn_t chunk_event_table[][] */ 416}; /* state_fn_t chunk_event_table[][] */
447 417
448#define TYPE_SCTP_ASCONF { \ 418#define TYPE_SCTP_ASCONF { \
449 /* SCTP_STATE_EMPTY */ \
450 TYPE_SCTP_FUNC(sctp_sf_ootb), \
451 /* SCTP_STATE_CLOSED */ \ 419 /* SCTP_STATE_CLOSED */ \
452 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ 420 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
453 /* SCTP_STATE_COOKIE_WAIT */ \ 421 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -467,8 +435,6 @@ static const sctp_sm_table_entry_t chunk_event_table[SCTP_NUM_BASE_CHUNK_TYPES][
467} /* TYPE_SCTP_ASCONF */ 435} /* TYPE_SCTP_ASCONF */
468 436
469#define TYPE_SCTP_ASCONF_ACK { \ 437#define TYPE_SCTP_ASCONF_ACK { \
470 /* SCTP_STATE_EMPTY */ \
471 TYPE_SCTP_FUNC(sctp_sf_ootb), \
472 /* SCTP_STATE_CLOSED */ \ 438 /* SCTP_STATE_CLOSED */ \
473 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ 439 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
474 /* SCTP_STATE_COOKIE_WAIT */ \ 440 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -496,8 +462,6 @@ static const sctp_sm_table_entry_t addip_chunk_event_table[SCTP_NUM_ADDIP_CHUNK_
496}; /*state_fn_t addip_chunk_event_table[][] */ 462}; /*state_fn_t addip_chunk_event_table[][] */
497 463
498#define TYPE_SCTP_FWD_TSN { \ 464#define TYPE_SCTP_FWD_TSN { \
499 /* SCTP_STATE_EMPTY */ \
500 TYPE_SCTP_FUNC(sctp_sf_ootb), \
501 /* SCTP_STATE_CLOSED */ \ 465 /* SCTP_STATE_CLOSED */ \
502 TYPE_SCTP_FUNC(sctp_sf_ootb), \ 466 TYPE_SCTP_FUNC(sctp_sf_ootb), \
503 /* SCTP_STATE_COOKIE_WAIT */ \ 467 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -524,8 +488,6 @@ static const sctp_sm_table_entry_t prsctp_chunk_event_table[SCTP_NUM_PRSCTP_CHUN
524}; /*state_fn_t prsctp_chunk_event_table[][] */ 488}; /*state_fn_t prsctp_chunk_event_table[][] */
525 489
526#define TYPE_SCTP_AUTH { \ 490#define TYPE_SCTP_AUTH { \
527 /* SCTP_STATE_EMPTY */ \
528 TYPE_SCTP_FUNC(sctp_sf_ootb), \
529 /* SCTP_STATE_CLOSED */ \ 491 /* SCTP_STATE_CLOSED */ \
530 TYPE_SCTP_FUNC(sctp_sf_ootb), \ 492 TYPE_SCTP_FUNC(sctp_sf_ootb), \
531 /* SCTP_STATE_COOKIE_WAIT */ \ 493 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -553,8 +515,6 @@ static const sctp_sm_table_entry_t auth_chunk_event_table[SCTP_NUM_AUTH_CHUNK_TY
553 515
554static const sctp_sm_table_entry_t 516static const sctp_sm_table_entry_t
555chunk_event_table_unknown[SCTP_STATE_NUM_STATES] = { 517chunk_event_table_unknown[SCTP_STATE_NUM_STATES] = {
556 /* SCTP_STATE_EMPTY */
557 TYPE_SCTP_FUNC(sctp_sf_ootb),
558 /* SCTP_STATE_CLOSED */ 518 /* SCTP_STATE_CLOSED */
559 TYPE_SCTP_FUNC(sctp_sf_ootb), 519 TYPE_SCTP_FUNC(sctp_sf_ootb),
560 /* SCTP_STATE_COOKIE_WAIT */ 520 /* SCTP_STATE_COOKIE_WAIT */
@@ -575,8 +535,6 @@ chunk_event_table_unknown[SCTP_STATE_NUM_STATES] = {
575 535
576 536
577#define TYPE_SCTP_PRIMITIVE_ASSOCIATE { \ 537#define TYPE_SCTP_PRIMITIVE_ASSOCIATE { \
578 /* SCTP_STATE_EMPTY */ \
579 TYPE_SCTP_FUNC(sctp_sf_bug), \
580 /* SCTP_STATE_CLOSED */ \ 538 /* SCTP_STATE_CLOSED */ \
581 TYPE_SCTP_FUNC(sctp_sf_do_prm_asoc), \ 539 TYPE_SCTP_FUNC(sctp_sf_do_prm_asoc), \
582 /* SCTP_STATE_COOKIE_WAIT */ \ 540 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -596,8 +554,6 @@ chunk_event_table_unknown[SCTP_STATE_NUM_STATES] = {
596} /* TYPE_SCTP_PRIMITIVE_ASSOCIATE */ 554} /* TYPE_SCTP_PRIMITIVE_ASSOCIATE */
597 555
598#define TYPE_SCTP_PRIMITIVE_SHUTDOWN { \ 556#define TYPE_SCTP_PRIMITIVE_SHUTDOWN { \
599 /* SCTP_STATE_EMPTY */ \
600 TYPE_SCTP_FUNC(sctp_sf_bug), \
601 /* SCTP_STATE_CLOSED */ \ 557 /* SCTP_STATE_CLOSED */ \
602 TYPE_SCTP_FUNC(sctp_sf_error_closed), \ 558 TYPE_SCTP_FUNC(sctp_sf_error_closed), \
603 /* SCTP_STATE_COOKIE_WAIT */ \ 559 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -617,8 +573,6 @@ chunk_event_table_unknown[SCTP_STATE_NUM_STATES] = {
617} /* TYPE_SCTP_PRIMITIVE_SHUTDOWN */ 573} /* TYPE_SCTP_PRIMITIVE_SHUTDOWN */
618 574
619#define TYPE_SCTP_PRIMITIVE_ABORT { \ 575#define TYPE_SCTP_PRIMITIVE_ABORT { \
620 /* SCTP_STATE_EMPTY */ \
621 TYPE_SCTP_FUNC(sctp_sf_bug), \
622 /* SCTP_STATE_CLOSED */ \ 576 /* SCTP_STATE_CLOSED */ \
623 TYPE_SCTP_FUNC(sctp_sf_error_closed), \ 577 TYPE_SCTP_FUNC(sctp_sf_error_closed), \
624 /* SCTP_STATE_COOKIE_WAIT */ \ 578 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -638,8 +592,6 @@ chunk_event_table_unknown[SCTP_STATE_NUM_STATES] = {
638} /* TYPE_SCTP_PRIMITIVE_ABORT */ 592} /* TYPE_SCTP_PRIMITIVE_ABORT */
639 593
640#define TYPE_SCTP_PRIMITIVE_SEND { \ 594#define TYPE_SCTP_PRIMITIVE_SEND { \
641 /* SCTP_STATE_EMPTY */ \
642 TYPE_SCTP_FUNC(sctp_sf_bug), \
643 /* SCTP_STATE_CLOSED */ \ 595 /* SCTP_STATE_CLOSED */ \
644 TYPE_SCTP_FUNC(sctp_sf_error_closed), \ 596 TYPE_SCTP_FUNC(sctp_sf_error_closed), \
645 /* SCTP_STATE_COOKIE_WAIT */ \ 597 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -659,8 +611,6 @@ chunk_event_table_unknown[SCTP_STATE_NUM_STATES] = {
659} /* TYPE_SCTP_PRIMITIVE_SEND */ 611} /* TYPE_SCTP_PRIMITIVE_SEND */
660 612
661#define TYPE_SCTP_PRIMITIVE_REQUESTHEARTBEAT { \ 613#define TYPE_SCTP_PRIMITIVE_REQUESTHEARTBEAT { \
662 /* SCTP_STATE_EMPTY */ \
663 TYPE_SCTP_FUNC(sctp_sf_bug), \
664 /* SCTP_STATE_CLOSED */ \ 614 /* SCTP_STATE_CLOSED */ \
665 TYPE_SCTP_FUNC(sctp_sf_error_closed), \ 615 TYPE_SCTP_FUNC(sctp_sf_error_closed), \
666 /* SCTP_STATE_COOKIE_WAIT */ \ 616 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -680,8 +630,6 @@ chunk_event_table_unknown[SCTP_STATE_NUM_STATES] = {
680} /* TYPE_SCTP_PRIMITIVE_REQUESTHEARTBEAT */ 630} /* TYPE_SCTP_PRIMITIVE_REQUESTHEARTBEAT */
681 631
682#define TYPE_SCTP_PRIMITIVE_ASCONF { \ 632#define TYPE_SCTP_PRIMITIVE_ASCONF { \
683 /* SCTP_STATE_EMPTY */ \
684 TYPE_SCTP_FUNC(sctp_sf_bug), \
685 /* SCTP_STATE_CLOSED */ \ 633 /* SCTP_STATE_CLOSED */ \
686 TYPE_SCTP_FUNC(sctp_sf_error_closed), \ 634 TYPE_SCTP_FUNC(sctp_sf_error_closed), \
687 /* SCTP_STATE_COOKIE_WAIT */ \ 635 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -713,8 +661,6 @@ static const sctp_sm_table_entry_t primitive_event_table[SCTP_NUM_PRIMITIVE_TYPE
713}; 661};
714 662
715#define TYPE_SCTP_OTHER_NO_PENDING_TSN { \ 663#define TYPE_SCTP_OTHER_NO_PENDING_TSN { \
716 /* SCTP_STATE_EMPTY */ \
717 TYPE_SCTP_FUNC(sctp_sf_bug), \
718 /* SCTP_STATE_CLOSED */ \ 664 /* SCTP_STATE_CLOSED */ \
719 TYPE_SCTP_FUNC(sctp_sf_ignore_other), \ 665 TYPE_SCTP_FUNC(sctp_sf_ignore_other), \
720 /* SCTP_STATE_COOKIE_WAIT */ \ 666 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -722,7 +668,7 @@ static const sctp_sm_table_entry_t primitive_event_table[SCTP_NUM_PRIMITIVE_TYPE
722 /* SCTP_STATE_COOKIE_ECHOED */ \ 668 /* SCTP_STATE_COOKIE_ECHOED */ \
723 TYPE_SCTP_FUNC(sctp_sf_ignore_other), \ 669 TYPE_SCTP_FUNC(sctp_sf_ignore_other), \
724 /* SCTP_STATE_ESTABLISHED */ \ 670 /* SCTP_STATE_ESTABLISHED */ \
725 TYPE_SCTP_FUNC(sctp_sf_ignore_other), \ 671 TYPE_SCTP_FUNC(sctp_sf_do_no_pending_tsn), \
726 /* SCTP_STATE_SHUTDOWN_PENDING */ \ 672 /* SCTP_STATE_SHUTDOWN_PENDING */ \
727 TYPE_SCTP_FUNC(sctp_sf_do_9_2_start_shutdown), \ 673 TYPE_SCTP_FUNC(sctp_sf_do_9_2_start_shutdown), \
728 /* SCTP_STATE_SHUTDOWN_SENT */ \ 674 /* SCTP_STATE_SHUTDOWN_SENT */ \
@@ -734,8 +680,6 @@ static const sctp_sm_table_entry_t primitive_event_table[SCTP_NUM_PRIMITIVE_TYPE
734} 680}
735 681
736#define TYPE_SCTP_OTHER_ICMP_PROTO_UNREACH { \ 682#define TYPE_SCTP_OTHER_ICMP_PROTO_UNREACH { \
737 /* SCTP_STATE_EMPTY */ \
738 TYPE_SCTP_FUNC(sctp_sf_bug), \
739 /* SCTP_STATE_CLOSED */ \ 683 /* SCTP_STATE_CLOSED */ \
740 TYPE_SCTP_FUNC(sctp_sf_ignore_other), \ 684 TYPE_SCTP_FUNC(sctp_sf_ignore_other), \
741 /* SCTP_STATE_COOKIE_WAIT */ \ 685 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -760,8 +704,6 @@ static const sctp_sm_table_entry_t other_event_table[SCTP_NUM_OTHER_TYPES][SCTP_
760}; 704};
761 705
762#define TYPE_SCTP_EVENT_TIMEOUT_NONE { \ 706#define TYPE_SCTP_EVENT_TIMEOUT_NONE { \
763 /* SCTP_STATE_EMPTY */ \
764 TYPE_SCTP_FUNC(sctp_sf_bug), \
765 /* SCTP_STATE_CLOSED */ \ 707 /* SCTP_STATE_CLOSED */ \
766 TYPE_SCTP_FUNC(sctp_sf_bug), \ 708 TYPE_SCTP_FUNC(sctp_sf_bug), \
767 /* SCTP_STATE_COOKIE_WAIT */ \ 709 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -781,8 +723,6 @@ static const sctp_sm_table_entry_t other_event_table[SCTP_NUM_OTHER_TYPES][SCTP_
781} 723}
782 724
783#define TYPE_SCTP_EVENT_TIMEOUT_T1_COOKIE { \ 725#define TYPE_SCTP_EVENT_TIMEOUT_T1_COOKIE { \
784 /* SCTP_STATE_EMPTY */ \
785 TYPE_SCTP_FUNC(sctp_sf_bug), \
786 /* SCTP_STATE_CLOSED */ \ 726 /* SCTP_STATE_CLOSED */ \
787 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ 727 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
788 /* SCTP_STATE_COOKIE_WAIT */ \ 728 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -802,8 +742,6 @@ static const sctp_sm_table_entry_t other_event_table[SCTP_NUM_OTHER_TYPES][SCTP_
802} 742}
803 743
804#define TYPE_SCTP_EVENT_TIMEOUT_T1_INIT { \ 744#define TYPE_SCTP_EVENT_TIMEOUT_T1_INIT { \
805 /* SCTP_STATE_EMPTY */ \
806 TYPE_SCTP_FUNC(sctp_sf_bug), \
807 /* SCTP_STATE_CLOSED */ \ 745 /* SCTP_STATE_CLOSED */ \
808 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ 746 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
809 /* SCTP_STATE_COOKIE_WAIT */ \ 747 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -823,8 +761,6 @@ static const sctp_sm_table_entry_t other_event_table[SCTP_NUM_OTHER_TYPES][SCTP_
823} 761}
824 762
825#define TYPE_SCTP_EVENT_TIMEOUT_T2_SHUTDOWN { \ 763#define TYPE_SCTP_EVENT_TIMEOUT_T2_SHUTDOWN { \
826 /* SCTP_STATE_EMPTY */ \
827 TYPE_SCTP_FUNC(sctp_sf_bug), \
828 /* SCTP_STATE_CLOSED */ \ 764 /* SCTP_STATE_CLOSED */ \
829 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ 765 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
830 /* SCTP_STATE_COOKIE_WAIT */ \ 766 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -844,8 +780,6 @@ static const sctp_sm_table_entry_t other_event_table[SCTP_NUM_OTHER_TYPES][SCTP_
844} 780}
845 781
846#define TYPE_SCTP_EVENT_TIMEOUT_T3_RTX { \ 782#define TYPE_SCTP_EVENT_TIMEOUT_T3_RTX { \
847 /* SCTP_STATE_EMPTY */ \
848 TYPE_SCTP_FUNC(sctp_sf_bug), \
849 /* SCTP_STATE_CLOSED */ \ 783 /* SCTP_STATE_CLOSED */ \
850 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ 784 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
851 /* SCTP_STATE_COOKIE_WAIT */ \ 785 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -865,8 +799,6 @@ static const sctp_sm_table_entry_t other_event_table[SCTP_NUM_OTHER_TYPES][SCTP_
865} 799}
866 800
867#define TYPE_SCTP_EVENT_TIMEOUT_T4_RTO { \ 801#define TYPE_SCTP_EVENT_TIMEOUT_T4_RTO { \
868 /* SCTP_STATE_EMPTY */ \
869 TYPE_SCTP_FUNC(sctp_sf_bug), \
870 /* SCTP_STATE_CLOSED */ \ 802 /* SCTP_STATE_CLOSED */ \
871 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ 803 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
872 /* SCTP_STATE_COOKIE_WAIT */ \ 804 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -886,8 +818,6 @@ static const sctp_sm_table_entry_t other_event_table[SCTP_NUM_OTHER_TYPES][SCTP_
886} 818}
887 819
888#define TYPE_SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD { \ 820#define TYPE_SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD { \
889 /* SCTP_STATE_EMPTY */ \
890 TYPE_SCTP_FUNC(sctp_sf_bug), \
891 /* SCTP_STATE_CLOSED */ \ 821 /* SCTP_STATE_CLOSED */ \
892 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ 822 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
893 /* SCTP_STATE_COOKIE_WAIT */ \ 823 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -907,8 +837,6 @@ static const sctp_sm_table_entry_t other_event_table[SCTP_NUM_OTHER_TYPES][SCTP_
907} 837}
908 838
909#define TYPE_SCTP_EVENT_TIMEOUT_HEARTBEAT { \ 839#define TYPE_SCTP_EVENT_TIMEOUT_HEARTBEAT { \
910 /* SCTP_STATE_EMPTY */ \
911 TYPE_SCTP_FUNC(sctp_sf_bug), \
912 /* SCTP_STATE_CLOSED */ \ 840 /* SCTP_STATE_CLOSED */ \
913 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ 841 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
914 /* SCTP_STATE_COOKIE_WAIT */ \ 842 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -928,8 +856,6 @@ static const sctp_sm_table_entry_t other_event_table[SCTP_NUM_OTHER_TYPES][SCTP_
928} 856}
929 857
930#define TYPE_SCTP_EVENT_TIMEOUT_SACK { \ 858#define TYPE_SCTP_EVENT_TIMEOUT_SACK { \
931 /* SCTP_STATE_EMPTY */ \
932 TYPE_SCTP_FUNC(sctp_sf_bug), \
933 /* SCTP_STATE_CLOSED */ \ 859 /* SCTP_STATE_CLOSED */ \
934 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ 860 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
935 /* SCTP_STATE_COOKIE_WAIT */ \ 861 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -949,8 +875,6 @@ static const sctp_sm_table_entry_t other_event_table[SCTP_NUM_OTHER_TYPES][SCTP_
949} 875}
950 876
951#define TYPE_SCTP_EVENT_TIMEOUT_AUTOCLOSE { \ 877#define TYPE_SCTP_EVENT_TIMEOUT_AUTOCLOSE { \
952 /* SCTP_STATE_EMPTY */ \
953 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
954 /* SCTP_STATE_CLOSED */ \ 878 /* SCTP_STATE_CLOSED */ \
955 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ 879 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
956 /* SCTP_STATE_COOKIE_WAIT */ \ 880 /* SCTP_STATE_COOKIE_WAIT */ \
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index deb82e35a107..6766913a53e6 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -658,11 +658,15 @@ static int sctp_bindx_rem(struct sock *sk, struct sockaddr *addrs, int addrcnt)
658 goto err_bindx_rem; 658 goto err_bindx_rem;
659 } 659 }
660 660
661 if (sa_addr->v4.sin_port != htons(bp->port)) { 661 if (sa_addr->v4.sin_port &&
662 sa_addr->v4.sin_port != htons(bp->port)) {
662 retval = -EINVAL; 663 retval = -EINVAL;
663 goto err_bindx_rem; 664 goto err_bindx_rem;
664 } 665 }
665 666
667 if (!sa_addr->v4.sin_port)
668 sa_addr->v4.sin_port = htons(bp->port);
669
666 /* FIXME - There is probably a need to check if sk->sk_saddr and 670 /* FIXME - There is probably a need to check if sk->sk_saddr and
667 * sk->sk_rcv_addr are currently set to one of the addresses to 671 * sk->sk_rcv_addr are currently set to one of the addresses to
668 * be removed. This is something which needs to be looked into 672 * be removed. This is something which needs to be looked into
@@ -1492,7 +1496,7 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
1492 struct sctp_chunk *chunk; 1496 struct sctp_chunk *chunk;
1493 union sctp_addr to; 1497 union sctp_addr to;
1494 struct sockaddr *msg_name = NULL; 1498 struct sockaddr *msg_name = NULL;
1495 struct sctp_sndrcvinfo default_sinfo = { 0 }; 1499 struct sctp_sndrcvinfo default_sinfo;
1496 struct sctp_sndrcvinfo *sinfo; 1500 struct sctp_sndrcvinfo *sinfo;
1497 struct sctp_initmsg *sinit; 1501 struct sctp_initmsg *sinit;
1498 sctp_assoc_t associd = 0; 1502 sctp_assoc_t associd = 0;
@@ -1756,6 +1760,7 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
1756 /* If the user didn't specify SNDRCVINFO, make up one with 1760 /* If the user didn't specify SNDRCVINFO, make up one with
1757 * some defaults. 1761 * some defaults.
1758 */ 1762 */
1763 memset(&default_sinfo, 0, sizeof(default_sinfo));
1759 default_sinfo.sinfo_stream = asoc->default_stream; 1764 default_sinfo.sinfo_stream = asoc->default_stream;
1760 default_sinfo.sinfo_flags = asoc->default_flags; 1765 default_sinfo.sinfo_flags = asoc->default_flags;
1761 default_sinfo.sinfo_ppid = asoc->default_ppid; 1766 default_sinfo.sinfo_ppid = asoc->default_ppid;
@@ -1786,12 +1791,10 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
1786 goto out_free; 1791 goto out_free;
1787 } 1792 }
1788 1793
1789 if (sinfo) { 1794 /* Check for invalid stream. */
1790 /* Check for invalid stream. */ 1795 if (sinfo->sinfo_stream >= asoc->c.sinit_num_ostreams) {
1791 if (sinfo->sinfo_stream >= asoc->c.sinit_num_ostreams) { 1796 err = -EINVAL;
1792 err = -EINVAL; 1797 goto out_free;
1793 goto out_free;
1794 }
1795 } 1798 }
1796 1799
1797 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); 1800 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
@@ -2283,7 +2286,7 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
2283 trans->param_flags = 2286 trans->param_flags =
2284 (trans->param_flags & ~SPP_PMTUD) | pmtud_change; 2287 (trans->param_flags & ~SPP_PMTUD) | pmtud_change;
2285 if (update) { 2288 if (update) {
2286 sctp_transport_pmtu(trans); 2289 sctp_transport_pmtu(trans, sctp_opt2sk(sp));
2287 sctp_assoc_sync_pmtu(asoc); 2290 sctp_assoc_sync_pmtu(asoc);
2288 } 2291 }
2289 } else if (asoc) { 2292 } else if (asoc) {
@@ -3215,14 +3218,9 @@ static int sctp_setsockopt_hmac_ident(struct sock *sk,
3215 if (optlen < sizeof(struct sctp_hmacalgo)) 3218 if (optlen < sizeof(struct sctp_hmacalgo))
3216 return -EINVAL; 3219 return -EINVAL;
3217 3220
3218 hmacs = kmalloc(optlen, GFP_KERNEL); 3221 hmacs= memdup_user(optval, optlen);
3219 if (!hmacs) 3222 if (IS_ERR(hmacs))
3220 return -ENOMEM; 3223 return PTR_ERR(hmacs);
3221
3222 if (copy_from_user(hmacs, optval, optlen)) {
3223 err = -EFAULT;
3224 goto out;
3225 }
3226 3224
3227 idents = hmacs->shmac_num_idents; 3225 idents = hmacs->shmac_num_idents;
3228 if (idents == 0 || idents > SCTP_AUTH_NUM_HMACS || 3226 if (idents == 0 || idents > SCTP_AUTH_NUM_HMACS ||
@@ -3257,14 +3255,9 @@ static int sctp_setsockopt_auth_key(struct sock *sk,
3257 if (optlen <= sizeof(struct sctp_authkey)) 3255 if (optlen <= sizeof(struct sctp_authkey))
3258 return -EINVAL; 3256 return -EINVAL;
3259 3257
3260 authkey = kmalloc(optlen, GFP_KERNEL); 3258 authkey= memdup_user(optval, optlen);
3261 if (!authkey) 3259 if (IS_ERR(authkey))
3262 return -ENOMEM; 3260 return PTR_ERR(authkey);
3263
3264 if (copy_from_user(authkey, optval, optlen)) {
3265 ret = -EFAULT;
3266 goto out;
3267 }
3268 3261
3269 if (authkey->sca_keylength > optlen - sizeof(struct sctp_authkey)) { 3262 if (authkey->sca_keylength > optlen - sizeof(struct sctp_authkey)) {
3270 ret = -EINVAL; 3263 ret = -EINVAL;
@@ -5283,6 +5276,55 @@ static int sctp_getsockopt_assoc_number(struct sock *sk, int len,
5283 return 0; 5276 return 0;
5284} 5277}
5285 5278
5279/*
5280 * 8.2.6. Get the Current Identifiers of Associations
5281 * (SCTP_GET_ASSOC_ID_LIST)
5282 *
5283 * This option gets the current list of SCTP association identifiers of
5284 * the SCTP associations handled by a one-to-many style socket.
5285 */
5286static int sctp_getsockopt_assoc_ids(struct sock *sk, int len,
5287 char __user *optval, int __user *optlen)
5288{
5289 struct sctp_sock *sp = sctp_sk(sk);
5290 struct sctp_association *asoc;
5291 struct sctp_assoc_ids *ids;
5292 u32 num = 0;
5293
5294 if (sctp_style(sk, TCP))
5295 return -EOPNOTSUPP;
5296
5297 if (len < sizeof(struct sctp_assoc_ids))
5298 return -EINVAL;
5299
5300 list_for_each_entry(asoc, &(sp->ep->asocs), asocs) {
5301 num++;
5302 }
5303
5304 if (len < sizeof(struct sctp_assoc_ids) + sizeof(sctp_assoc_t) * num)
5305 return -EINVAL;
5306
5307 len = sizeof(struct sctp_assoc_ids) + sizeof(sctp_assoc_t) * num;
5308
5309 ids = kmalloc(len, GFP_KERNEL);
5310 if (unlikely(!ids))
5311 return -ENOMEM;
5312
5313 ids->gaids_number_of_ids = num;
5314 num = 0;
5315 list_for_each_entry(asoc, &(sp->ep->asocs), asocs) {
5316 ids->gaids_assoc_id[num++] = asoc->assoc_id;
5317 }
5318
5319 if (put_user(len, optlen) || copy_to_user(optval, ids, len)) {
5320 kfree(ids);
5321 return -EFAULT;
5322 }
5323
5324 kfree(ids);
5325 return 0;
5326}
5327
5286SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname, 5328SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname,
5287 char __user *optval, int __user *optlen) 5329 char __user *optval, int __user *optlen)
5288{ 5330{
@@ -5415,6 +5457,9 @@ SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname,
5415 case SCTP_GET_ASSOC_NUMBER: 5457 case SCTP_GET_ASSOC_NUMBER:
5416 retval = sctp_getsockopt_assoc_number(sk, len, optval, optlen); 5458 retval = sctp_getsockopt_assoc_number(sk, len, optval, optlen);
5417 break; 5459 break;
5460 case SCTP_GET_ASSOC_ID_LIST:
5461 retval = sctp_getsockopt_assoc_ids(sk, len, optval, optlen);
5462 break;
5418 default: 5463 default:
5419 retval = -ENOPROTOOPT; 5464 retval = -ENOPROTOOPT;
5420 break; 5465 break;
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index d3ae493d234a..394c57ca2f54 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -211,15 +211,17 @@ void sctp_transport_set_owner(struct sctp_transport *transport,
211} 211}
212 212
213/* Initialize the pmtu of a transport. */ 213/* Initialize the pmtu of a transport. */
214void sctp_transport_pmtu(struct sctp_transport *transport) 214void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk)
215{ 215{
216 struct dst_entry *dst; 216 /* If we don't have a fresh route, look one up */
217 217 if (!transport->dst || transport->dst->obsolete > 1) {
218 dst = transport->af_specific->get_dst(NULL, &transport->ipaddr, NULL); 218 dst_release(transport->dst);
219 transport->af_specific->get_dst(transport, &transport->saddr,
220 &transport->fl, sk);
221 }
219 222
220 if (dst) { 223 if (transport->dst) {
221 transport->pathmtu = dst_mtu(dst); 224 transport->pathmtu = dst_mtu(transport->dst);
222 dst_release(dst);
223 } else 225 } else
224 transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT; 226 transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
225} 227}
@@ -270,22 +272,19 @@ void sctp_transport_route(struct sctp_transport *transport,
270{ 272{
271 struct sctp_association *asoc = transport->asoc; 273 struct sctp_association *asoc = transport->asoc;
272 struct sctp_af *af = transport->af_specific; 274 struct sctp_af *af = transport->af_specific;
273 union sctp_addr *daddr = &transport->ipaddr;
274 struct dst_entry *dst;
275 275
276 dst = af->get_dst(asoc, daddr, saddr); 276 af->get_dst(transport, saddr, &transport->fl, sctp_opt2sk(opt));
277 277
278 if (saddr) 278 if (saddr)
279 memcpy(&transport->saddr, saddr, sizeof(union sctp_addr)); 279 memcpy(&transport->saddr, saddr, sizeof(union sctp_addr));
280 else 280 else
281 af->get_saddr(opt, asoc, dst, daddr, &transport->saddr); 281 af->get_saddr(opt, transport, &transport->fl);
282 282
283 transport->dst = dst;
284 if ((transport->param_flags & SPP_PMTUD_DISABLE) && transport->pathmtu) { 283 if ((transport->param_flags & SPP_PMTUD_DISABLE) && transport->pathmtu) {
285 return; 284 return;
286 } 285 }
287 if (dst) { 286 if (transport->dst) {
288 transport->pathmtu = dst_mtu(dst); 287 transport->pathmtu = dst_mtu(transport->dst);
289 288
290 /* Initialize sk->sk_rcv_saddr, if the transport is the 289 /* Initialize sk->sk_rcv_saddr, if the transport is the
291 * association's active path for getsockname(). 290 * association's active path for getsockname().
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
index 61b1f5ada96a..e70e5fc87890 100644
--- a/net/sctp/ulpevent.c
+++ b/net/sctp/ulpevent.c
@@ -843,7 +843,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_authkey(
843 ak = (struct sctp_authkey_event *) 843 ak = (struct sctp_authkey_event *)
844 skb_put(skb, sizeof(struct sctp_authkey_event)); 844 skb_put(skb, sizeof(struct sctp_authkey_event));
845 845
846 ak->auth_type = SCTP_AUTHENTICATION_INDICATION; 846 ak->auth_type = SCTP_AUTHENTICATION_EVENT;
847 ak->auth_flags = 0; 847 ak->auth_flags = 0;
848 ak->auth_length = sizeof(struct sctp_authkey_event); 848 ak->auth_length = sizeof(struct sctp_authkey_event);
849 849
@@ -862,6 +862,34 @@ fail:
862 return NULL; 862 return NULL;
863} 863}
864 864
865/*
866 * Socket Extensions for SCTP
867 * 6.3.10. SCTP_SENDER_DRY_EVENT
868 */
869struct sctp_ulpevent *sctp_ulpevent_make_sender_dry_event(
870 const struct sctp_association *asoc, gfp_t gfp)
871{
872 struct sctp_ulpevent *event;
873 struct sctp_sender_dry_event *sdry;
874 struct sk_buff *skb;
875
876 event = sctp_ulpevent_new(sizeof(struct sctp_sender_dry_event),
877 MSG_NOTIFICATION, gfp);
878 if (!event)
879 return NULL;
880
881 skb = sctp_event2skb(event);
882 sdry = (struct sctp_sender_dry_event *)
883 skb_put(skb, sizeof(struct sctp_sender_dry_event));
884
885 sdry->sender_dry_type = SCTP_SENDER_DRY_EVENT;
886 sdry->sender_dry_flags = 0;
887 sdry->sender_dry_length = sizeof(struct sctp_sender_dry_event);
888 sctp_ulpevent_set_owner(event, asoc);
889 sdry->sender_dry_assoc_id = sctp_assoc2id(asoc);
890
891 return event;
892}
865 893
866/* Return the notification type, assuming this is a notification 894/* Return the notification type, assuming this is a notification
867 * event. 895 * event.
diff --git a/net/socket.c b/net/socket.c
index c2ed7c95ce87..02dc82db3d23 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -542,11 +542,10 @@ int sock_tx_timestamp(struct sock *sk, __u8 *tx_flags)
542} 542}
543EXPORT_SYMBOL(sock_tx_timestamp); 543EXPORT_SYMBOL(sock_tx_timestamp);
544 544
545static inline int __sock_sendmsg(struct kiocb *iocb, struct socket *sock, 545static inline int __sock_sendmsg_nosec(struct kiocb *iocb, struct socket *sock,
546 struct msghdr *msg, size_t size) 546 struct msghdr *msg, size_t size)
547{ 547{
548 struct sock_iocb *si = kiocb_to_siocb(iocb); 548 struct sock_iocb *si = kiocb_to_siocb(iocb);
549 int err;
550 549
551 sock_update_classid(sock->sk); 550 sock_update_classid(sock->sk);
552 551
@@ -555,13 +554,17 @@ static inline int __sock_sendmsg(struct kiocb *iocb, struct socket *sock,
555 si->msg = msg; 554 si->msg = msg;
556 si->size = size; 555 si->size = size;
557 556
558 err = security_socket_sendmsg(sock, msg, size);
559 if (err)
560 return err;
561
562 return sock->ops->sendmsg(iocb, sock, msg, size); 557 return sock->ops->sendmsg(iocb, sock, msg, size);
563} 558}
564 559
560static inline int __sock_sendmsg(struct kiocb *iocb, struct socket *sock,
561 struct msghdr *msg, size_t size)
562{
563 int err = security_socket_sendmsg(sock, msg, size);
564
565 return err ?: __sock_sendmsg_nosec(iocb, sock, msg, size);
566}
567
565int sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) 568int sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
566{ 569{
567 struct kiocb iocb; 570 struct kiocb iocb;
@@ -577,6 +580,20 @@ int sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
577} 580}
578EXPORT_SYMBOL(sock_sendmsg); 581EXPORT_SYMBOL(sock_sendmsg);
579 582
583int sock_sendmsg_nosec(struct socket *sock, struct msghdr *msg, size_t size)
584{
585 struct kiocb iocb;
586 struct sock_iocb siocb;
587 int ret;
588
589 init_sync_kiocb(&iocb, NULL);
590 iocb.private = &siocb;
591 ret = __sock_sendmsg_nosec(&iocb, sock, msg, size);
592 if (-EIOCBQUEUED == ret)
593 ret = wait_on_sync_kiocb(&iocb);
594 return ret;
595}
596
580int kernel_sendmsg(struct socket *sock, struct msghdr *msg, 597int kernel_sendmsg(struct socket *sock, struct msghdr *msg,
581 struct kvec *vec, size_t num, size_t size) 598 struct kvec *vec, size_t num, size_t size)
582{ 599{
@@ -1854,57 +1871,47 @@ SYSCALL_DEFINE2(shutdown, int, fd, int, how)
1854#define COMPAT_NAMELEN(msg) COMPAT_MSG(msg, msg_namelen) 1871#define COMPAT_NAMELEN(msg) COMPAT_MSG(msg, msg_namelen)
1855#define COMPAT_FLAGS(msg) COMPAT_MSG(msg, msg_flags) 1872#define COMPAT_FLAGS(msg) COMPAT_MSG(msg, msg_flags)
1856 1873
1857/* 1874static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
1858 * BSD sendmsg interface 1875 struct msghdr *msg_sys, unsigned flags, int nosec)
1859 */
1860
1861SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned, flags)
1862{ 1876{
1863 struct compat_msghdr __user *msg_compat = 1877 struct compat_msghdr __user *msg_compat =
1864 (struct compat_msghdr __user *)msg; 1878 (struct compat_msghdr __user *)msg;
1865 struct socket *sock;
1866 struct sockaddr_storage address; 1879 struct sockaddr_storage address;
1867 struct iovec iovstack[UIO_FASTIOV], *iov = iovstack; 1880 struct iovec iovstack[UIO_FASTIOV], *iov = iovstack;
1868 unsigned char ctl[sizeof(struct cmsghdr) + 20] 1881 unsigned char ctl[sizeof(struct cmsghdr) + 20]
1869 __attribute__ ((aligned(sizeof(__kernel_size_t)))); 1882 __attribute__ ((aligned(sizeof(__kernel_size_t))));
1870 /* 20 is size of ipv6_pktinfo */ 1883 /* 20 is size of ipv6_pktinfo */
1871 unsigned char *ctl_buf = ctl; 1884 unsigned char *ctl_buf = ctl;
1872 struct msghdr msg_sys;
1873 int err, ctl_len, iov_size, total_len; 1885 int err, ctl_len, iov_size, total_len;
1874 int fput_needed;
1875 1886
1876 err = -EFAULT; 1887 err = -EFAULT;
1877 if (MSG_CMSG_COMPAT & flags) { 1888 if (MSG_CMSG_COMPAT & flags) {
1878 if (get_compat_msghdr(&msg_sys, msg_compat)) 1889 if (get_compat_msghdr(msg_sys, msg_compat))
1879 return -EFAULT; 1890 return -EFAULT;
1880 } else if (copy_from_user(&msg_sys, msg, sizeof(struct msghdr))) 1891 } else if (copy_from_user(msg_sys, msg, sizeof(struct msghdr)))
1881 return -EFAULT; 1892 return -EFAULT;
1882 1893
1883 sock = sockfd_lookup_light(fd, &err, &fput_needed);
1884 if (!sock)
1885 goto out;
1886
1887 /* do not move before msg_sys is valid */ 1894 /* do not move before msg_sys is valid */
1888 err = -EMSGSIZE; 1895 err = -EMSGSIZE;
1889 if (msg_sys.msg_iovlen > UIO_MAXIOV) 1896 if (msg_sys->msg_iovlen > UIO_MAXIOV)
1890 goto out_put; 1897 goto out;
1891 1898
1892 /* Check whether to allocate the iovec area */ 1899 /* Check whether to allocate the iovec area */
1893 err = -ENOMEM; 1900 err = -ENOMEM;
1894 iov_size = msg_sys.msg_iovlen * sizeof(struct iovec); 1901 iov_size = msg_sys->msg_iovlen * sizeof(struct iovec);
1895 if (msg_sys.msg_iovlen > UIO_FASTIOV) { 1902 if (msg_sys->msg_iovlen > UIO_FASTIOV) {
1896 iov = sock_kmalloc(sock->sk, iov_size, GFP_KERNEL); 1903 iov = sock_kmalloc(sock->sk, iov_size, GFP_KERNEL);
1897 if (!iov) 1904 if (!iov)
1898 goto out_put; 1905 goto out;
1899 } 1906 }
1900 1907
1901 /* This will also move the address data into kernel space */ 1908 /* This will also move the address data into kernel space */
1902 if (MSG_CMSG_COMPAT & flags) { 1909 if (MSG_CMSG_COMPAT & flags) {
1903 err = verify_compat_iovec(&msg_sys, iov, 1910 err = verify_compat_iovec(msg_sys, iov,
1904 (struct sockaddr *)&address, 1911 (struct sockaddr *)&address,
1905 VERIFY_READ); 1912 VERIFY_READ);
1906 } else 1913 } else
1907 err = verify_iovec(&msg_sys, iov, 1914 err = verify_iovec(msg_sys, iov,
1908 (struct sockaddr *)&address, 1915 (struct sockaddr *)&address,
1909 VERIFY_READ); 1916 VERIFY_READ);
1910 if (err < 0) 1917 if (err < 0)
@@ -1913,17 +1920,17 @@ SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned, flags)
1913 1920
1914 err = -ENOBUFS; 1921 err = -ENOBUFS;
1915 1922
1916 if (msg_sys.msg_controllen > INT_MAX) 1923 if (msg_sys->msg_controllen > INT_MAX)
1917 goto out_freeiov; 1924 goto out_freeiov;
1918 ctl_len = msg_sys.msg_controllen; 1925 ctl_len = msg_sys->msg_controllen;
1919 if ((MSG_CMSG_COMPAT & flags) && ctl_len) { 1926 if ((MSG_CMSG_COMPAT & flags) && ctl_len) {
1920 err = 1927 err =
1921 cmsghdr_from_user_compat_to_kern(&msg_sys, sock->sk, ctl, 1928 cmsghdr_from_user_compat_to_kern(msg_sys, sock->sk, ctl,
1922 sizeof(ctl)); 1929 sizeof(ctl));
1923 if (err) 1930 if (err)
1924 goto out_freeiov; 1931 goto out_freeiov;
1925 ctl_buf = msg_sys.msg_control; 1932 ctl_buf = msg_sys->msg_control;
1926 ctl_len = msg_sys.msg_controllen; 1933 ctl_len = msg_sys->msg_controllen;
1927 } else if (ctl_len) { 1934 } else if (ctl_len) {
1928 if (ctl_len > sizeof(ctl)) { 1935 if (ctl_len > sizeof(ctl)) {
1929 ctl_buf = sock_kmalloc(sock->sk, ctl_len, GFP_KERNEL); 1936 ctl_buf = sock_kmalloc(sock->sk, ctl_len, GFP_KERNEL);
@@ -1932,21 +1939,22 @@ SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned, flags)
1932 } 1939 }
1933 err = -EFAULT; 1940 err = -EFAULT;
1934 /* 1941 /*
1935 * Careful! Before this, msg_sys.msg_control contains a user pointer. 1942 * Careful! Before this, msg_sys->msg_control contains a user pointer.
1936 * Afterwards, it will be a kernel pointer. Thus the compiler-assisted 1943 * Afterwards, it will be a kernel pointer. Thus the compiler-assisted
1937 * checking falls down on this. 1944 * checking falls down on this.
1938 */ 1945 */
1939 if (copy_from_user(ctl_buf, 1946 if (copy_from_user(ctl_buf,
1940 (void __user __force *)msg_sys.msg_control, 1947 (void __user __force *)msg_sys->msg_control,
1941 ctl_len)) 1948 ctl_len))
1942 goto out_freectl; 1949 goto out_freectl;
1943 msg_sys.msg_control = ctl_buf; 1950 msg_sys->msg_control = ctl_buf;
1944 } 1951 }
1945 msg_sys.msg_flags = flags; 1952 msg_sys->msg_flags = flags;
1946 1953
1947 if (sock->file->f_flags & O_NONBLOCK) 1954 if (sock->file->f_flags & O_NONBLOCK)
1948 msg_sys.msg_flags |= MSG_DONTWAIT; 1955 msg_sys->msg_flags |= MSG_DONTWAIT;
1949 err = sock_sendmsg(sock, &msg_sys, total_len); 1956 err = (nosec ? sock_sendmsg_nosec : sock_sendmsg)(sock, msg_sys,
1957 total_len);
1950 1958
1951out_freectl: 1959out_freectl:
1952 if (ctl_buf != ctl) 1960 if (ctl_buf != ctl)
@@ -1954,12 +1962,114 @@ out_freectl:
1954out_freeiov: 1962out_freeiov:
1955 if (iov != iovstack) 1963 if (iov != iovstack)
1956 sock_kfree_s(sock->sk, iov, iov_size); 1964 sock_kfree_s(sock->sk, iov, iov_size);
1957out_put: 1965out:
1966 return err;
1967}
1968
1969/*
1970 * BSD sendmsg interface
1971 */
1972
1973SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned, flags)
1974{
1975 int fput_needed, err;
1976 struct msghdr msg_sys;
1977 struct socket *sock = sockfd_lookup_light(fd, &err, &fput_needed);
1978
1979 if (!sock)
1980 goto out;
1981
1982 err = __sys_sendmsg(sock, msg, &msg_sys, flags, 0);
1983
1958 fput_light(sock->file, fput_needed); 1984 fput_light(sock->file, fput_needed);
1959out: 1985out:
1960 return err; 1986 return err;
1961} 1987}
1962 1988
1989/*
1990 * Linux sendmmsg interface
1991 */
1992
1993int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
1994 unsigned int flags)
1995{
1996 int fput_needed, err, datagrams;
1997 struct socket *sock;
1998 struct mmsghdr __user *entry;
1999 struct compat_mmsghdr __user *compat_entry;
2000 struct msghdr msg_sys;
2001
2002 datagrams = 0;
2003
2004 sock = sockfd_lookup_light(fd, &err, &fput_needed);
2005 if (!sock)
2006 return err;
2007
2008 err = sock_error(sock->sk);
2009 if (err)
2010 goto out_put;
2011
2012 entry = mmsg;
2013 compat_entry = (struct compat_mmsghdr __user *)mmsg;
2014
2015 while (datagrams < vlen) {
2016 /*
2017 * No need to ask LSM for more than the first datagram.
2018 */
2019 if (MSG_CMSG_COMPAT & flags) {
2020 err = __sys_sendmsg(sock, (struct msghdr __user *)compat_entry,
2021 &msg_sys, flags, datagrams);
2022 if (err < 0)
2023 break;
2024 err = __put_user(err, &compat_entry->msg_len);
2025 ++compat_entry;
2026 } else {
2027 err = __sys_sendmsg(sock, (struct msghdr __user *)entry,
2028 &msg_sys, flags, datagrams);
2029 if (err < 0)
2030 break;
2031 err = put_user(err, &entry->msg_len);
2032 ++entry;
2033 }
2034
2035 if (err)
2036 break;
2037 ++datagrams;
2038 }
2039
2040out_put:
2041 fput_light(sock->file, fput_needed);
2042
2043 if (err == 0)
2044 return datagrams;
2045
2046 if (datagrams != 0) {
2047 /*
2048 * We may send less entries than requested (vlen) if the
2049 * sock is non blocking...
2050 */
2051 if (err != -EAGAIN) {
2052 /*
2053 * ... or if sendmsg returns an error after we
2054 * send some datagrams, where we record the
2055 * error to return on the next call or if the
2056 * app asks about it using getsockopt(SO_ERROR).
2057 */
2058 sock->sk->sk_err = -err;
2059 }
2060
2061 return datagrams;
2062 }
2063
2064 return err;
2065}
2066
2067SYSCALL_DEFINE4(sendmmsg, int, fd, struct mmsghdr __user *, mmsg,
2068 unsigned int, vlen, unsigned int, flags)
2069{
2070 return __sys_sendmmsg(fd, mmsg, vlen, flags);
2071}
2072
1963static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg, 2073static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
1964 struct msghdr *msg_sys, unsigned flags, int nosec) 2074 struct msghdr *msg_sys, unsigned flags, int nosec)
1965{ 2075{
@@ -2113,14 +2223,16 @@ int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
2113 */ 2223 */
2114 if (MSG_CMSG_COMPAT & flags) { 2224 if (MSG_CMSG_COMPAT & flags) {
2115 err = __sys_recvmsg(sock, (struct msghdr __user *)compat_entry, 2225 err = __sys_recvmsg(sock, (struct msghdr __user *)compat_entry,
2116 &msg_sys, flags, datagrams); 2226 &msg_sys, flags & ~MSG_WAITFORONE,
2227 datagrams);
2117 if (err < 0) 2228 if (err < 0)
2118 break; 2229 break;
2119 err = __put_user(err, &compat_entry->msg_len); 2230 err = __put_user(err, &compat_entry->msg_len);
2120 ++compat_entry; 2231 ++compat_entry;
2121 } else { 2232 } else {
2122 err = __sys_recvmsg(sock, (struct msghdr __user *)entry, 2233 err = __sys_recvmsg(sock, (struct msghdr __user *)entry,
2123 &msg_sys, flags, datagrams); 2234 &msg_sys, flags & ~MSG_WAITFORONE,
2235 datagrams);
2124 if (err < 0) 2236 if (err < 0)
2125 break; 2237 break;
2126 err = put_user(err, &entry->msg_len); 2238 err = put_user(err, &entry->msg_len);
@@ -2205,11 +2317,11 @@ SYSCALL_DEFINE5(recvmmsg, int, fd, struct mmsghdr __user *, mmsg,
2205#ifdef __ARCH_WANT_SYS_SOCKETCALL 2317#ifdef __ARCH_WANT_SYS_SOCKETCALL
2206/* Argument list sizes for sys_socketcall */ 2318/* Argument list sizes for sys_socketcall */
2207#define AL(x) ((x) * sizeof(unsigned long)) 2319#define AL(x) ((x) * sizeof(unsigned long))
2208static const unsigned char nargs[20] = { 2320static const unsigned char nargs[21] = {
2209 AL(0), AL(3), AL(3), AL(3), AL(2), AL(3), 2321 AL(0), AL(3), AL(3), AL(3), AL(2), AL(3),
2210 AL(3), AL(3), AL(4), AL(4), AL(4), AL(6), 2322 AL(3), AL(3), AL(4), AL(4), AL(4), AL(6),
2211 AL(6), AL(2), AL(5), AL(5), AL(3), AL(3), 2323 AL(6), AL(2), AL(5), AL(5), AL(3), AL(3),
2212 AL(4), AL(5) 2324 AL(4), AL(5), AL(4)
2213}; 2325};
2214 2326
2215#undef AL 2327#undef AL
@@ -2229,7 +2341,7 @@ SYSCALL_DEFINE2(socketcall, int, call, unsigned long __user *, args)
2229 int err; 2341 int err;
2230 unsigned int len; 2342 unsigned int len;
2231 2343
2232 if (call < 1 || call > SYS_RECVMMSG) 2344 if (call < 1 || call > SYS_SENDMMSG)
2233 return -EINVAL; 2345 return -EINVAL;
2234 2346
2235 len = nargs[call]; 2347 len = nargs[call];
@@ -2304,6 +2416,9 @@ SYSCALL_DEFINE2(socketcall, int, call, unsigned long __user *, args)
2304 case SYS_SENDMSG: 2416 case SYS_SENDMSG:
2305 err = sys_sendmsg(a0, (struct msghdr __user *)a1, a[2]); 2417 err = sys_sendmsg(a0, (struct msghdr __user *)a1, a[2]);
2306 break; 2418 break;
2419 case SYS_SENDMMSG:
2420 err = sys_sendmmsg(a0, (struct mmsghdr __user *)a1, a[2], a[3]);
2421 break;
2307 case SYS_RECVMSG: 2422 case SYS_RECVMSG:
2308 err = sys_recvmsg(a0, (struct msghdr __user *)a1, a[2]); 2423 err = sys_recvmsg(a0, (struct msghdr __user *)a1, a[2]);
2309 break; 2424 break;
@@ -2634,13 +2749,13 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
2634 return -EFAULT; 2749 return -EFAULT;
2635 2750
2636 if (convert_in) { 2751 if (convert_in) {
2637 /* We expect there to be holes between fs.m_u and 2752 /* We expect there to be holes between fs.m_ext and
2638 * fs.ring_cookie and at the end of fs, but nowhere else. 2753 * fs.ring_cookie and at the end of fs, but nowhere else.
2639 */ 2754 */
2640 BUILD_BUG_ON(offsetof(struct compat_ethtool_rxnfc, fs.m_u) + 2755 BUILD_BUG_ON(offsetof(struct compat_ethtool_rxnfc, fs.m_ext) +
2641 sizeof(compat_rxnfc->fs.m_u) != 2756 sizeof(compat_rxnfc->fs.m_ext) !=
2642 offsetof(struct ethtool_rxnfc, fs.m_u) + 2757 offsetof(struct ethtool_rxnfc, fs.m_ext) +
2643 sizeof(rxnfc->fs.m_u)); 2758 sizeof(rxnfc->fs.m_ext));
2644 BUILD_BUG_ON( 2759 BUILD_BUG_ON(
2645 offsetof(struct compat_ethtool_rxnfc, fs.location) - 2760 offsetof(struct compat_ethtool_rxnfc, fs.location) -
2646 offsetof(struct compat_ethtool_rxnfc, fs.ring_cookie) != 2761 offsetof(struct compat_ethtool_rxnfc, fs.ring_cookie) !=
@@ -2648,7 +2763,7 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
2648 offsetof(struct ethtool_rxnfc, fs.ring_cookie)); 2763 offsetof(struct ethtool_rxnfc, fs.ring_cookie));
2649 2764
2650 if (copy_in_user(rxnfc, compat_rxnfc, 2765 if (copy_in_user(rxnfc, compat_rxnfc,
2651 (void *)(&rxnfc->fs.m_u + 1) - 2766 (void *)(&rxnfc->fs.m_ext + 1) -
2652 (void *)rxnfc) || 2767 (void *)rxnfc) ||
2653 copy_in_user(&rxnfc->fs.ring_cookie, 2768 copy_in_user(&rxnfc->fs.ring_cookie,
2654 &compat_rxnfc->fs.ring_cookie, 2769 &compat_rxnfc->fs.ring_cookie,
@@ -2665,7 +2780,7 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
2665 2780
2666 if (convert_out) { 2781 if (convert_out) {
2667 if (copy_in_user(compat_rxnfc, rxnfc, 2782 if (copy_in_user(compat_rxnfc, rxnfc,
2668 (const void *)(&rxnfc->fs.m_u + 1) - 2783 (const void *)(&rxnfc->fs.m_ext + 1) -
2669 (const void *)rxnfc) || 2784 (const void *)rxnfc) ||
2670 copy_in_user(&compat_rxnfc->fs.ring_cookie, 2785 copy_in_user(&compat_rxnfc->fs.ring_cookie,
2671 &rxnfc->fs.ring_cookie, 2786 &rxnfc->fs.ring_cookie,
diff --git a/net/tipc/addr.h b/net/tipc/addr.h
index 8971aba99aea..e4f35afe3207 100644
--- a/net/tipc/addr.h
+++ b/net/tipc/addr.h
@@ -37,14 +37,17 @@
37#ifndef _TIPC_ADDR_H 37#ifndef _TIPC_ADDR_H
38#define _TIPC_ADDR_H 38#define _TIPC_ADDR_H
39 39
40#define TIPC_ZONE_MASK 0xff000000u
41#define TIPC_CLUSTER_MASK 0xfffff000u
42
40static inline u32 tipc_zone_mask(u32 addr) 43static inline u32 tipc_zone_mask(u32 addr)
41{ 44{
42 return addr & 0xff000000u; 45 return addr & TIPC_ZONE_MASK;
43} 46}
44 47
45static inline u32 tipc_cluster_mask(u32 addr) 48static inline u32 tipc_cluster_mask(u32 addr)
46{ 49{
47 return addr & 0xfffff000u; 50 return addr & TIPC_CLUSTER_MASK;
48} 51}
49 52
50static inline int in_own_cluster(u32 addr) 53static inline int in_own_cluster(u32 addr)
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 7dc1dc7151ea..fa68d1e9ff4b 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -44,13 +44,6 @@
44 44
45#define BCLINK_WIN_DEFAULT 20 /* bcast link window size (default) */ 45#define BCLINK_WIN_DEFAULT 20 /* bcast link window size (default) */
46 46
47/*
48 * Loss rate for incoming broadcast frames; used to test retransmission code.
49 * Set to N to cause every N'th frame to be discarded; 0 => don't discard any.
50 */
51
52#define TIPC_BCAST_LOSS_RATE 0
53
54/** 47/**
55 * struct bcbearer_pair - a pair of bearers used by broadcast link 48 * struct bcbearer_pair - a pair of bearers used by broadcast link
56 * @primary: pointer to primary bearer 49 * @primary: pointer to primary bearer
@@ -414,9 +407,7 @@ int tipc_bclink_send_msg(struct sk_buff *buf)
414 spin_lock_bh(&bc_lock); 407 spin_lock_bh(&bc_lock);
415 408
416 res = tipc_link_send_buf(bcl, buf); 409 res = tipc_link_send_buf(bcl, buf);
417 if (unlikely(res == -ELINKCONG)) 410 if (likely(res > 0))
418 buf_discard(buf);
419 else
420 bclink_set_last_sent(); 411 bclink_set_last_sent();
421 412
422 bcl->stats.queue_sz_counts++; 413 bcl->stats.queue_sz_counts++;
@@ -434,9 +425,6 @@ int tipc_bclink_send_msg(struct sk_buff *buf)
434 425
435void tipc_bclink_recv_pkt(struct sk_buff *buf) 426void tipc_bclink_recv_pkt(struct sk_buff *buf)
436{ 427{
437#if (TIPC_BCAST_LOSS_RATE)
438 static int rx_count;
439#endif
440 struct tipc_msg *msg = buf_msg(buf); 428 struct tipc_msg *msg = buf_msg(buf);
441 struct tipc_node *node = tipc_node_find(msg_prevnode(msg)); 429 struct tipc_node *node = tipc_node_find(msg_prevnode(msg));
442 u32 next_in; 430 u32 next_in;
@@ -470,14 +458,6 @@ void tipc_bclink_recv_pkt(struct sk_buff *buf)
470 return; 458 return;
471 } 459 }
472 460
473#if (TIPC_BCAST_LOSS_RATE)
474 if (++rx_count == TIPC_BCAST_LOSS_RATE) {
475 rx_count = 0;
476 buf_discard(buf);
477 return;
478 }
479#endif
480
481 tipc_node_lock(node); 461 tipc_node_lock(node);
482receive: 462receive:
483 deferred = node->bclink.deferred_head; 463 deferred = node->bclink.deferred_head;
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 411719feb803..85209eadfae6 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -46,6 +46,8 @@ static u32 media_count;
46 46
47struct tipc_bearer tipc_bearers[MAX_BEARERS]; 47struct tipc_bearer tipc_bearers[MAX_BEARERS];
48 48
49static void bearer_disable(struct tipc_bearer *b_ptr);
50
49/** 51/**
50 * media_name_valid - validate media name 52 * media_name_valid - validate media name
51 * 53 *
@@ -342,15 +344,15 @@ struct sk_buff *tipc_bearer_get_names(void)
342void tipc_bearer_add_dest(struct tipc_bearer *b_ptr, u32 dest) 344void tipc_bearer_add_dest(struct tipc_bearer *b_ptr, u32 dest)
343{ 345{
344 tipc_nmap_add(&b_ptr->nodes, dest); 346 tipc_nmap_add(&b_ptr->nodes, dest);
345 tipc_disc_update_link_req(b_ptr->link_req);
346 tipc_bcbearer_sort(); 347 tipc_bcbearer_sort();
348 tipc_disc_add_dest(b_ptr->link_req);
347} 349}
348 350
349void tipc_bearer_remove_dest(struct tipc_bearer *b_ptr, u32 dest) 351void tipc_bearer_remove_dest(struct tipc_bearer *b_ptr, u32 dest)
350{ 352{
351 tipc_nmap_remove(&b_ptr->nodes, dest); 353 tipc_nmap_remove(&b_ptr->nodes, dest);
352 tipc_disc_update_link_req(b_ptr->link_req);
353 tipc_bcbearer_sort(); 354 tipc_bcbearer_sort();
355 tipc_disc_remove_dest(b_ptr->link_req);
354} 356}
355 357
356/* 358/*
@@ -493,8 +495,15 @@ int tipc_enable_bearer(const char *name, u32 disc_domain, u32 priority)
493 warn("Bearer <%s> rejected, illegal name\n", name); 495 warn("Bearer <%s> rejected, illegal name\n", name);
494 return -EINVAL; 496 return -EINVAL;
495 } 497 }
496 if (!tipc_addr_domain_valid(disc_domain) || 498 if (tipc_addr_domain_valid(disc_domain) &&
497 !tipc_in_scope(disc_domain, tipc_own_addr)) { 499 (disc_domain != tipc_own_addr)) {
500 if (tipc_in_scope(disc_domain, tipc_own_addr)) {
501 disc_domain = tipc_own_addr & TIPC_CLUSTER_MASK;
502 res = 0; /* accept any node in own cluster */
503 } else if (in_own_cluster(disc_domain))
504 res = 0; /* accept specified node in own cluster */
505 }
506 if (res) {
498 warn("Bearer <%s> rejected, illegal discovery domain\n", name); 507 warn("Bearer <%s> rejected, illegal discovery domain\n", name);
499 return -EINVAL; 508 return -EINVAL;
500 } 509 }
@@ -511,7 +520,7 @@ int tipc_enable_bearer(const char *name, u32 disc_domain, u32 priority)
511 if (!m_ptr) { 520 if (!m_ptr) {
512 warn("Bearer <%s> rejected, media <%s> not registered\n", name, 521 warn("Bearer <%s> rejected, media <%s> not registered\n", name,
513 b_name.media_name); 522 b_name.media_name);
514 goto failed; 523 goto exit;
515 } 524 }
516 525
517 if (priority == TIPC_MEDIA_LINK_PRI) 526 if (priority == TIPC_MEDIA_LINK_PRI)
@@ -527,14 +536,14 @@ restart:
527 } 536 }
528 if (!strcmp(name, tipc_bearers[i].name)) { 537 if (!strcmp(name, tipc_bearers[i].name)) {
529 warn("Bearer <%s> rejected, already enabled\n", name); 538 warn("Bearer <%s> rejected, already enabled\n", name);
530 goto failed; 539 goto exit;
531 } 540 }
532 if ((tipc_bearers[i].priority == priority) && 541 if ((tipc_bearers[i].priority == priority) &&
533 (++with_this_prio > 2)) { 542 (++with_this_prio > 2)) {
534 if (priority-- == 0) { 543 if (priority-- == 0) {
535 warn("Bearer <%s> rejected, duplicate priority\n", 544 warn("Bearer <%s> rejected, duplicate priority\n",
536 name); 545 name);
537 goto failed; 546 goto exit;
538 } 547 }
539 warn("Bearer <%s> priority adjustment required %u->%u\n", 548 warn("Bearer <%s> priority adjustment required %u->%u\n",
540 name, priority + 1, priority); 549 name, priority + 1, priority);
@@ -544,7 +553,7 @@ restart:
544 if (bearer_id >= MAX_BEARERS) { 553 if (bearer_id >= MAX_BEARERS) {
545 warn("Bearer <%s> rejected, bearer limit reached (%u)\n", 554 warn("Bearer <%s> rejected, bearer limit reached (%u)\n",
546 name, MAX_BEARERS); 555 name, MAX_BEARERS);
547 goto failed; 556 goto exit;
548 } 557 }
549 558
550 b_ptr = &tipc_bearers[bearer_id]; 559 b_ptr = &tipc_bearers[bearer_id];
@@ -552,7 +561,7 @@ restart:
552 res = m_ptr->enable_bearer(b_ptr); 561 res = m_ptr->enable_bearer(b_ptr);
553 if (res) { 562 if (res) {
554 warn("Bearer <%s> rejected, enable failure (%d)\n", name, -res); 563 warn("Bearer <%s> rejected, enable failure (%d)\n", name, -res);
555 goto failed; 564 goto exit;
556 } 565 }
557 566
558 b_ptr->identity = bearer_id; 567 b_ptr->identity = bearer_id;
@@ -562,14 +571,18 @@ restart:
562 b_ptr->priority = priority; 571 b_ptr->priority = priority;
563 INIT_LIST_HEAD(&b_ptr->cong_links); 572 INIT_LIST_HEAD(&b_ptr->cong_links);
564 INIT_LIST_HEAD(&b_ptr->links); 573 INIT_LIST_HEAD(&b_ptr->links);
565 b_ptr->link_req = tipc_disc_init_link_req(b_ptr, &m_ptr->bcast_addr,
566 disc_domain);
567 spin_lock_init(&b_ptr->lock); 574 spin_lock_init(&b_ptr->lock);
568 write_unlock_bh(&tipc_net_lock); 575
576 res = tipc_disc_create(b_ptr, &m_ptr->bcast_addr, disc_domain);
577 if (res) {
578 bearer_disable(b_ptr);
579 warn("Bearer <%s> rejected, discovery object creation failed\n",
580 name);
581 goto exit;
582 }
569 info("Enabled bearer <%s>, discovery domain %s, priority %u\n", 583 info("Enabled bearer <%s>, discovery domain %s, priority %u\n",
570 name, tipc_addr_string_fill(addr_string, disc_domain), priority); 584 name, tipc_addr_string_fill(addr_string, disc_domain), priority);
571 return 0; 585exit:
572failed:
573 write_unlock_bh(&tipc_net_lock); 586 write_unlock_bh(&tipc_net_lock);
574 return res; 587 return res;
575} 588}
@@ -620,14 +633,14 @@ static void bearer_disable(struct tipc_bearer *b_ptr)
620 struct link *temp_l_ptr; 633 struct link *temp_l_ptr;
621 634
622 info("Disabling bearer <%s>\n", b_ptr->name); 635 info("Disabling bearer <%s>\n", b_ptr->name);
623 tipc_disc_stop_link_req(b_ptr->link_req);
624 spin_lock_bh(&b_ptr->lock); 636 spin_lock_bh(&b_ptr->lock);
625 b_ptr->link_req = NULL;
626 b_ptr->blocked = 1; 637 b_ptr->blocked = 1;
627 b_ptr->media->disable_bearer(b_ptr); 638 b_ptr->media->disable_bearer(b_ptr);
628 list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) { 639 list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) {
629 tipc_link_delete(l_ptr); 640 tipc_link_delete(l_ptr);
630 } 641 }
642 if (b_ptr->link_req)
643 tipc_disc_delete(b_ptr->link_req);
631 spin_unlock_bh(&b_ptr->lock); 644 spin_unlock_bh(&b_ptr->lock);
632 memset(b_ptr, 0, sizeof(struct tipc_bearer)); 645 memset(b_ptr, 0, sizeof(struct tipc_bearer));
633} 646}
diff --git a/net/tipc/core.c b/net/tipc/core.c
index c9a73e7763f6..943b6af84265 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -179,8 +179,7 @@ static int __init tipc_init(void)
179 if (tipc_log_resize(CONFIG_TIPC_LOG) != 0) 179 if (tipc_log_resize(CONFIG_TIPC_LOG) != 0)
180 warn("Unable to create log buffer\n"); 180 warn("Unable to create log buffer\n");
181 181
182 info("Activated (version " TIPC_MOD_VER 182 info("Activated (version " TIPC_MOD_VER ")\n");
183 " compiled " __DATE__ " " __TIME__ ")\n");
184 183
185 tipc_own_addr = 0; 184 tipc_own_addr = 0;
186 tipc_remote_management = 1; 185 tipc_remote_management = 1;
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index 491eff56b9da..0987933155b9 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -39,19 +39,17 @@
39#include "discover.h" 39#include "discover.h"
40 40
41#define TIPC_LINK_REQ_INIT 125 /* min delay during bearer start up */ 41#define TIPC_LINK_REQ_INIT 125 /* min delay during bearer start up */
42#define TIPC_LINK_REQ_FAST 2000 /* normal delay if bearer has no links */ 42#define TIPC_LINK_REQ_FAST 1000 /* max delay if bearer has no links */
43#define TIPC_LINK_REQ_SLOW 600000 /* normal delay if bearer has links */ 43#define TIPC_LINK_REQ_SLOW 60000 /* max delay if bearer has links */
44 44#define TIPC_LINK_REQ_INACTIVE 0xffffffff /* indicates no timer in use */
45/*
46 * TODO: Most of the inter-cluster setup stuff should be
47 * rewritten, and be made conformant with specification.
48 */
49 45
50 46
51/** 47/**
52 * struct link_req - information about an ongoing link setup request 48 * struct link_req - information about an ongoing link setup request
53 * @bearer: bearer issuing requests 49 * @bearer: bearer issuing requests
54 * @dest: destination address for request messages 50 * @dest: destination address for request messages
51 * @domain: network domain to which links can be established
52 * @num_nodes: number of nodes currently discovered (i.e. with an active link)
55 * @buf: request message to be (repeatedly) sent 53 * @buf: request message to be (repeatedly) sent
56 * @timer: timer governing period between requests 54 * @timer: timer governing period between requests
57 * @timer_intv: current interval between requests (in ms) 55 * @timer_intv: current interval between requests (in ms)
@@ -59,6 +57,8 @@
59struct link_req { 57struct link_req {
60 struct tipc_bearer *bearer; 58 struct tipc_bearer *bearer;
61 struct tipc_media_addr dest; 59 struct tipc_media_addr dest;
60 u32 domain;
61 int num_nodes;
62 struct sk_buff *buf; 62 struct sk_buff *buf;
63 struct timer_list timer; 63 struct timer_list timer;
64 unsigned int timer_intv; 64 unsigned int timer_intv;
@@ -147,7 +147,7 @@ void tipc_disc_recv_msg(struct sk_buff *buf, struct tipc_bearer *b_ptr)
147 } 147 }
148 if (!tipc_in_scope(dest, tipc_own_addr)) 148 if (!tipc_in_scope(dest, tipc_own_addr))
149 return; 149 return;
150 if (!in_own_cluster(orig)) 150 if (!tipc_in_scope(b_ptr->link_req->domain, orig))
151 return; 151 return;
152 152
153 /* Locate structure corresponding to requesting node */ 153 /* Locate structure corresponding to requesting node */
@@ -214,44 +214,54 @@ void tipc_disc_recv_msg(struct sk_buff *buf, struct tipc_bearer *b_ptr)
214} 214}
215 215
216/** 216/**
217 * tipc_disc_stop_link_req - stop sending periodic link setup requests 217 * disc_update - update frequency of periodic link setup requests
218 * @req: ptr to link request structure 218 * @req: ptr to link request structure
219 *
220 * Reinitiates discovery process if discovery object has no associated nodes
221 * and is either not currently searching or is searching at a slow rate
219 */ 222 */
220 223
221void tipc_disc_stop_link_req(struct link_req *req) 224static void disc_update(struct link_req *req)
222{ 225{
223 if (!req) 226 if (!req->num_nodes) {
224 return; 227 if ((req->timer_intv == TIPC_LINK_REQ_INACTIVE) ||
228 (req->timer_intv > TIPC_LINK_REQ_FAST)) {
229 req->timer_intv = TIPC_LINK_REQ_INIT;
230 k_start_timer(&req->timer, req->timer_intv);
231 }
232 }
233}
225 234
226 k_cancel_timer(&req->timer); 235/**
227 k_term_timer(&req->timer); 236 * tipc_disc_add_dest - increment set of discovered nodes
228 buf_discard(req->buf); 237 * @req: ptr to link request structure
229 kfree(req); 238 */
239
240void tipc_disc_add_dest(struct link_req *req)
241{
242 req->num_nodes++;
230} 243}
231 244
232/** 245/**
233 * tipc_disc_update_link_req - update frequency of periodic link setup requests 246 * tipc_disc_remove_dest - decrement set of discovered nodes
234 * @req: ptr to link request structure 247 * @req: ptr to link request structure
235 */ 248 */
236 249
237void tipc_disc_update_link_req(struct link_req *req) 250void tipc_disc_remove_dest(struct link_req *req)
238{ 251{
239 if (!req) 252 req->num_nodes--;
240 return; 253 disc_update(req);
254}
241 255
242 if (req->timer_intv == TIPC_LINK_REQ_SLOW) { 256/**
243 if (!req->bearer->nodes.count) { 257 * disc_send_msg - send link setup request message
244 req->timer_intv = TIPC_LINK_REQ_FAST; 258 * @req: ptr to link request structure
245 k_start_timer(&req->timer, req->timer_intv); 259 */
246 } 260
247 } else if (req->timer_intv == TIPC_LINK_REQ_FAST) { 261static void disc_send_msg(struct link_req *req)
248 if (req->bearer->nodes.count) { 262{
249 req->timer_intv = TIPC_LINK_REQ_SLOW; 263 if (!req->bearer->blocked)
250 k_start_timer(&req->timer, req->timer_intv); 264 tipc_bearer_send(req->bearer, req->buf, &req->dest);
251 }
252 } else {
253 /* leave timer "as is" if haven't yet reached a "normal" rate */
254 }
255} 265}
256 266
257/** 267/**
@@ -263,56 +273,86 @@ void tipc_disc_update_link_req(struct link_req *req)
263 273
264static void disc_timeout(struct link_req *req) 274static void disc_timeout(struct link_req *req)
265{ 275{
276 int max_delay;
277
266 spin_lock_bh(&req->bearer->lock); 278 spin_lock_bh(&req->bearer->lock);
267 279
268 req->bearer->media->send_msg(req->buf, req->bearer, &req->dest); 280 /* Stop searching if only desired node has been found */
269 281
270 if ((req->timer_intv == TIPC_LINK_REQ_SLOW) || 282 if (tipc_node(req->domain) && req->num_nodes) {
271 (req->timer_intv == TIPC_LINK_REQ_FAST)) { 283 req->timer_intv = TIPC_LINK_REQ_INACTIVE;
272 /* leave timer interval "as is" if already at a "normal" rate */ 284 goto exit;
273 } else {
274 req->timer_intv *= 2;
275 if (req->timer_intv > TIPC_LINK_REQ_FAST)
276 req->timer_intv = TIPC_LINK_REQ_FAST;
277 if ((req->timer_intv == TIPC_LINK_REQ_FAST) &&
278 (req->bearer->nodes.count))
279 req->timer_intv = TIPC_LINK_REQ_SLOW;
280 } 285 }
281 k_start_timer(&req->timer, req->timer_intv);
282 286
287 /*
288 * Send discovery message, then update discovery timer
289 *
290 * Keep doubling time between requests until limit is reached;
291 * hold at fast polling rate if don't have any associated nodes,
292 * otherwise hold at slow polling rate
293 */
294
295 disc_send_msg(req);
296
297 req->timer_intv *= 2;
298 if (req->num_nodes)
299 max_delay = TIPC_LINK_REQ_SLOW;
300 else
301 max_delay = TIPC_LINK_REQ_FAST;
302 if (req->timer_intv > max_delay)
303 req->timer_intv = max_delay;
304
305 k_start_timer(&req->timer, req->timer_intv);
306exit:
283 spin_unlock_bh(&req->bearer->lock); 307 spin_unlock_bh(&req->bearer->lock);
284} 308}
285 309
286/** 310/**
287 * tipc_disc_init_link_req - start sending periodic link setup requests 311 * tipc_disc_create - create object to send periodic link setup requests
288 * @b_ptr: ptr to bearer issuing requests 312 * @b_ptr: ptr to bearer issuing requests
289 * @dest: destination address for request messages 313 * @dest: destination address for request messages
290 * @dest_domain: network domain of node(s) which should respond to message 314 * @dest_domain: network domain to which links can be established
291 * 315 *
292 * Returns pointer to link request structure, or NULL if unable to create. 316 * Returns 0 if successful, otherwise -errno.
293 */ 317 */
294 318
295struct link_req *tipc_disc_init_link_req(struct tipc_bearer *b_ptr, 319int tipc_disc_create(struct tipc_bearer *b_ptr,
296 const struct tipc_media_addr *dest, 320 struct tipc_media_addr *dest, u32 dest_domain)
297 u32 dest_domain)
298{ 321{
299 struct link_req *req; 322 struct link_req *req;
300 323
301 req = kmalloc(sizeof(*req), GFP_ATOMIC); 324 req = kmalloc(sizeof(*req), GFP_ATOMIC);
302 if (!req) 325 if (!req)
303 return NULL; 326 return -ENOMEM;
304 327
305 req->buf = tipc_disc_init_msg(DSC_REQ_MSG, dest_domain, b_ptr); 328 req->buf = tipc_disc_init_msg(DSC_REQ_MSG, dest_domain, b_ptr);
306 if (!req->buf) { 329 if (!req->buf) {
307 kfree(req); 330 kfree(req);
308 return NULL; 331 return -ENOMSG;
309 } 332 }
310 333
311 memcpy(&req->dest, dest, sizeof(*dest)); 334 memcpy(&req->dest, dest, sizeof(*dest));
312 req->bearer = b_ptr; 335 req->bearer = b_ptr;
336 req->domain = dest_domain;
337 req->num_nodes = 0;
313 req->timer_intv = TIPC_LINK_REQ_INIT; 338 req->timer_intv = TIPC_LINK_REQ_INIT;
314 k_init_timer(&req->timer, (Handler)disc_timeout, (unsigned long)req); 339 k_init_timer(&req->timer, (Handler)disc_timeout, (unsigned long)req);
315 k_start_timer(&req->timer, req->timer_intv); 340 k_start_timer(&req->timer, req->timer_intv);
316 return req; 341 b_ptr->link_req = req;
342 disc_send_msg(req);
343 return 0;
344}
345
346/**
347 * tipc_disc_delete - destroy object sending periodic link setup requests
348 * @req: ptr to link request structure
349 */
350
351void tipc_disc_delete(struct link_req *req)
352{
353 k_cancel_timer(&req->timer);
354 k_term_timer(&req->timer);
355 buf_discard(req->buf);
356 kfree(req);
317} 357}
318 358
diff --git a/net/tipc/discover.h b/net/tipc/discover.h
index e48a167e47b2..a3af595b86cb 100644
--- a/net/tipc/discover.h
+++ b/net/tipc/discover.h
@@ -39,12 +39,11 @@
39 39
40struct link_req; 40struct link_req;
41 41
42struct link_req *tipc_disc_init_link_req(struct tipc_bearer *b_ptr, 42int tipc_disc_create(struct tipc_bearer *b_ptr, struct tipc_media_addr *dest,
43 const struct tipc_media_addr *dest, 43 u32 dest_domain);
44 u32 dest_domain); 44void tipc_disc_delete(struct link_req *req);
45void tipc_disc_update_link_req(struct link_req *req); 45void tipc_disc_add_dest(struct link_req *req);
46void tipc_disc_stop_link_req(struct link_req *req); 46void tipc_disc_remove_dest(struct link_req *req);
47
48void tipc_disc_recv_msg(struct sk_buff *buf, struct tipc_bearer *b_ptr); 47void tipc_disc_recv_msg(struct sk_buff *buf, struct tipc_bearer *b_ptr);
49 48
50#endif 49#endif
diff --git a/net/tipc/link.c b/net/tipc/link.c
index ebf338f7b14e..5ed4b4f7452d 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -92,7 +92,8 @@ static int link_recv_changeover_msg(struct link **l_ptr, struct sk_buff **buf);
92static void link_set_supervision_props(struct link *l_ptr, u32 tolerance); 92static void link_set_supervision_props(struct link *l_ptr, u32 tolerance);
93static int link_send_sections_long(struct tipc_port *sender, 93static int link_send_sections_long(struct tipc_port *sender,
94 struct iovec const *msg_sect, 94 struct iovec const *msg_sect,
95 u32 num_sect, u32 destnode); 95 u32 num_sect, unsigned int total_len,
96 u32 destnode);
96static void link_check_defragm_bufs(struct link *l_ptr); 97static void link_check_defragm_bufs(struct link *l_ptr);
97static void link_state_event(struct link *l_ptr, u32 event); 98static void link_state_event(struct link *l_ptr, u32 event);
98static void link_reset_statistics(struct link *l_ptr); 99static void link_reset_statistics(struct link *l_ptr);
@@ -842,6 +843,25 @@ static void link_add_to_outqueue(struct link *l_ptr,
842 l_ptr->stats.max_queue_sz = l_ptr->out_queue_size; 843 l_ptr->stats.max_queue_sz = l_ptr->out_queue_size;
843} 844}
844 845
846static void link_add_chain_to_outqueue(struct link *l_ptr,
847 struct sk_buff *buf_chain,
848 u32 long_msgno)
849{
850 struct sk_buff *buf;
851 struct tipc_msg *msg;
852
853 if (!l_ptr->next_out)
854 l_ptr->next_out = buf_chain;
855 while (buf_chain) {
856 buf = buf_chain;
857 buf_chain = buf_chain->next;
858
859 msg = buf_msg(buf);
860 msg_set_long_msgno(msg, long_msgno);
861 link_add_to_outqueue(l_ptr, buf, msg);
862 }
863}
864
845/* 865/*
846 * tipc_link_send_buf() is the 'full path' for messages, called from 866 * tipc_link_send_buf() is the 'full path' for messages, called from
847 * inside TIPC when the 'fast path' in tipc_send_buf 867 * inside TIPC when the 'fast path' in tipc_send_buf
@@ -864,8 +884,9 @@ int tipc_link_send_buf(struct link *l_ptr, struct sk_buff *buf)
864 884
865 if (unlikely(queue_size >= queue_limit)) { 885 if (unlikely(queue_size >= queue_limit)) {
866 if (imp <= TIPC_CRITICAL_IMPORTANCE) { 886 if (imp <= TIPC_CRITICAL_IMPORTANCE) {
867 return link_schedule_port(l_ptr, msg_origport(msg), 887 link_schedule_port(l_ptr, msg_origport(msg), size);
868 size); 888 buf_discard(buf);
889 return -ELINKCONG;
869 } 890 }
870 buf_discard(buf); 891 buf_discard(buf);
871 if (imp > CONN_MANAGER) { 892 if (imp > CONN_MANAGER) {
@@ -1042,6 +1063,7 @@ int tipc_send_buf_fast(struct sk_buff *buf, u32 destnode)
1042int tipc_link_send_sections_fast(struct tipc_port *sender, 1063int tipc_link_send_sections_fast(struct tipc_port *sender,
1043 struct iovec const *msg_sect, 1064 struct iovec const *msg_sect,
1044 const u32 num_sect, 1065 const u32 num_sect,
1066 unsigned int total_len,
1045 u32 destaddr) 1067 u32 destaddr)
1046{ 1068{
1047 struct tipc_msg *hdr = &sender->phdr; 1069 struct tipc_msg *hdr = &sender->phdr;
@@ -1057,8 +1079,8 @@ again:
1057 * (Must not hold any locks while building message.) 1079 * (Must not hold any locks while building message.)
1058 */ 1080 */
1059 1081
1060 res = tipc_msg_build(hdr, msg_sect, num_sect, sender->max_pkt, 1082 res = tipc_msg_build(hdr, msg_sect, num_sect, total_len,
1061 !sender->user_port, &buf); 1083 sender->max_pkt, !sender->user_port, &buf);
1062 1084
1063 read_lock_bh(&tipc_net_lock); 1085 read_lock_bh(&tipc_net_lock);
1064 node = tipc_node_find(destaddr); 1086 node = tipc_node_find(destaddr);
@@ -1069,8 +1091,6 @@ again:
1069 if (likely(buf)) { 1091 if (likely(buf)) {
1070 res = link_send_buf_fast(l_ptr, buf, 1092 res = link_send_buf_fast(l_ptr, buf,
1071 &sender->max_pkt); 1093 &sender->max_pkt);
1072 if (unlikely(res < 0))
1073 buf_discard(buf);
1074exit: 1094exit:
1075 tipc_node_unlock(node); 1095 tipc_node_unlock(node);
1076 read_unlock_bh(&tipc_net_lock); 1096 read_unlock_bh(&tipc_net_lock);
@@ -1105,7 +1125,8 @@ exit:
1105 goto again; 1125 goto again;
1106 1126
1107 return link_send_sections_long(sender, msg_sect, 1127 return link_send_sections_long(sender, msg_sect,
1108 num_sect, destaddr); 1128 num_sect, total_len,
1129 destaddr);
1109 } 1130 }
1110 tipc_node_unlock(node); 1131 tipc_node_unlock(node);
1111 } 1132 }
@@ -1117,7 +1138,7 @@ exit:
1117 return tipc_reject_msg(buf, TIPC_ERR_NO_NODE); 1138 return tipc_reject_msg(buf, TIPC_ERR_NO_NODE);
1118 if (res >= 0) 1139 if (res >= 0)
1119 return tipc_port_reject_sections(sender, hdr, msg_sect, num_sect, 1140 return tipc_port_reject_sections(sender, hdr, msg_sect, num_sect,
1120 TIPC_ERR_NO_NODE); 1141 total_len, TIPC_ERR_NO_NODE);
1121 return res; 1142 return res;
1122} 1143}
1123 1144
@@ -1138,12 +1159,13 @@ exit:
1138static int link_send_sections_long(struct tipc_port *sender, 1159static int link_send_sections_long(struct tipc_port *sender,
1139 struct iovec const *msg_sect, 1160 struct iovec const *msg_sect,
1140 u32 num_sect, 1161 u32 num_sect,
1162 unsigned int total_len,
1141 u32 destaddr) 1163 u32 destaddr)
1142{ 1164{
1143 struct link *l_ptr; 1165 struct link *l_ptr;
1144 struct tipc_node *node; 1166 struct tipc_node *node;
1145 struct tipc_msg *hdr = &sender->phdr; 1167 struct tipc_msg *hdr = &sender->phdr;
1146 u32 dsz = msg_data_sz(hdr); 1168 u32 dsz = total_len;
1147 u32 max_pkt, fragm_sz, rest; 1169 u32 max_pkt, fragm_sz, rest;
1148 struct tipc_msg fragm_hdr; 1170 struct tipc_msg fragm_hdr;
1149 struct sk_buff *buf, *buf_chain, *prev; 1171 struct sk_buff *buf, *buf_chain, *prev;
@@ -1169,7 +1191,6 @@ again:
1169 1191
1170 tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT, 1192 tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
1171 INT_H_SIZE, msg_destnode(hdr)); 1193 INT_H_SIZE, msg_destnode(hdr));
1172 msg_set_link_selector(&fragm_hdr, sender->ref);
1173 msg_set_size(&fragm_hdr, max_pkt); 1194 msg_set_size(&fragm_hdr, max_pkt);
1174 msg_set_fragm_no(&fragm_hdr, 1); 1195 msg_set_fragm_no(&fragm_hdr, 1);
1175 1196
@@ -1271,28 +1292,15 @@ reject:
1271 buf_discard(buf_chain); 1292 buf_discard(buf_chain);
1272 } 1293 }
1273 return tipc_port_reject_sections(sender, hdr, msg_sect, num_sect, 1294 return tipc_port_reject_sections(sender, hdr, msg_sect, num_sect,
1274 TIPC_ERR_NO_NODE); 1295 total_len, TIPC_ERR_NO_NODE);
1275 } 1296 }
1276 1297
1277 /* Append whole chain to send queue: */ 1298 /* Append chain of fragments to send queue & send them */
1278 1299
1279 buf = buf_chain; 1300 l_ptr->long_msg_seq_no++;
1280 l_ptr->long_msg_seq_no = mod(l_ptr->long_msg_seq_no + 1); 1301 link_add_chain_to_outqueue(l_ptr, buf_chain, l_ptr->long_msg_seq_no);
1281 if (!l_ptr->next_out) 1302 l_ptr->stats.sent_fragments += fragm_no;
1282 l_ptr->next_out = buf_chain;
1283 l_ptr->stats.sent_fragmented++; 1303 l_ptr->stats.sent_fragmented++;
1284 while (buf) {
1285 struct sk_buff *next = buf->next;
1286 struct tipc_msg *msg = buf_msg(buf);
1287
1288 l_ptr->stats.sent_fragments++;
1289 msg_set_long_msgno(msg, l_ptr->long_msg_seq_no);
1290 link_add_to_outqueue(l_ptr, buf, msg);
1291 buf = next;
1292 }
1293
1294 /* Send it, if possible: */
1295
1296 tipc_link_push_queue(l_ptr); 1304 tipc_link_push_queue(l_ptr);
1297 tipc_node_unlock(node); 1305 tipc_node_unlock(node);
1298 return dsz; 1306 return dsz;
@@ -2407,6 +2415,8 @@ void tipc_link_recv_bundle(struct sk_buff *buf)
2407 */ 2415 */
2408static int link_send_long_buf(struct link *l_ptr, struct sk_buff *buf) 2416static int link_send_long_buf(struct link *l_ptr, struct sk_buff *buf)
2409{ 2417{
2418 struct sk_buff *buf_chain = NULL;
2419 struct sk_buff *buf_chain_tail = (struct sk_buff *)&buf_chain;
2410 struct tipc_msg *inmsg = buf_msg(buf); 2420 struct tipc_msg *inmsg = buf_msg(buf);
2411 struct tipc_msg fragm_hdr; 2421 struct tipc_msg fragm_hdr;
2412 u32 insize = msg_size(inmsg); 2422 u32 insize = msg_size(inmsg);
@@ -2415,7 +2425,7 @@ static int link_send_long_buf(struct link *l_ptr, struct sk_buff *buf)
2415 u32 rest = insize; 2425 u32 rest = insize;
2416 u32 pack_sz = l_ptr->max_pkt; 2426 u32 pack_sz = l_ptr->max_pkt;
2417 u32 fragm_sz = pack_sz - INT_H_SIZE; 2427 u32 fragm_sz = pack_sz - INT_H_SIZE;
2418 u32 fragm_no = 1; 2428 u32 fragm_no = 0;
2419 u32 destaddr; 2429 u32 destaddr;
2420 2430
2421 if (msg_short(inmsg)) 2431 if (msg_short(inmsg))
@@ -2427,10 +2437,6 @@ static int link_send_long_buf(struct link *l_ptr, struct sk_buff *buf)
2427 2437
2428 tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT, 2438 tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
2429 INT_H_SIZE, destaddr); 2439 INT_H_SIZE, destaddr);
2430 msg_set_link_selector(&fragm_hdr, msg_link_selector(inmsg));
2431 msg_set_long_msgno(&fragm_hdr, mod(l_ptr->long_msg_seq_no++));
2432 msg_set_fragm_no(&fragm_hdr, fragm_no);
2433 l_ptr->stats.sent_fragmented++;
2434 2440
2435 /* Chop up message: */ 2441 /* Chop up message: */
2436 2442
@@ -2443,27 +2449,37 @@ static int link_send_long_buf(struct link *l_ptr, struct sk_buff *buf)
2443 } 2449 }
2444 fragm = tipc_buf_acquire(fragm_sz + INT_H_SIZE); 2450 fragm = tipc_buf_acquire(fragm_sz + INT_H_SIZE);
2445 if (fragm == NULL) { 2451 if (fragm == NULL) {
2446 warn("Link unable to fragment message\n"); 2452 buf_discard(buf);
2447 dsz = -ENOMEM; 2453 while (buf_chain) {
2448 goto exit; 2454 buf = buf_chain;
2455 buf_chain = buf_chain->next;
2456 buf_discard(buf);
2457 }
2458 return -ENOMEM;
2449 } 2459 }
2450 msg_set_size(&fragm_hdr, fragm_sz + INT_H_SIZE); 2460 msg_set_size(&fragm_hdr, fragm_sz + INT_H_SIZE);
2461 fragm_no++;
2462 msg_set_fragm_no(&fragm_hdr, fragm_no);
2451 skb_copy_to_linear_data(fragm, &fragm_hdr, INT_H_SIZE); 2463 skb_copy_to_linear_data(fragm, &fragm_hdr, INT_H_SIZE);
2452 skb_copy_to_linear_data_offset(fragm, INT_H_SIZE, crs, 2464 skb_copy_to_linear_data_offset(fragm, INT_H_SIZE, crs,
2453 fragm_sz); 2465 fragm_sz);
2454 /* Send queued messages first, if any: */ 2466 buf_chain_tail->next = fragm;
2467 buf_chain_tail = fragm;
2455 2468
2456 l_ptr->stats.sent_fragments++;
2457 tipc_link_send_buf(l_ptr, fragm);
2458 if (!tipc_link_is_up(l_ptr))
2459 return dsz;
2460 msg_set_fragm_no(&fragm_hdr, ++fragm_no);
2461 rest -= fragm_sz; 2469 rest -= fragm_sz;
2462 crs += fragm_sz; 2470 crs += fragm_sz;
2463 msg_set_type(&fragm_hdr, FRAGMENT); 2471 msg_set_type(&fragm_hdr, FRAGMENT);
2464 } 2472 }
2465exit:
2466 buf_discard(buf); 2473 buf_discard(buf);
2474
2475 /* Append chain of fragments to send queue & send them */
2476
2477 l_ptr->long_msg_seq_no++;
2478 link_add_chain_to_outqueue(l_ptr, buf_chain, l_ptr->long_msg_seq_no);
2479 l_ptr->stats.sent_fragments += fragm_no;
2480 l_ptr->stats.sent_fragmented++;
2481 tipc_link_push_queue(l_ptr);
2482
2467 return dsz; 2483 return dsz;
2468} 2484}
2469 2485
diff --git a/net/tipc/link.h b/net/tipc/link.h
index e6a30dbe1aaa..74fbecab1ea0 100644
--- a/net/tipc/link.h
+++ b/net/tipc/link.h
@@ -228,6 +228,7 @@ u32 tipc_link_get_max_pkt(u32 dest, u32 selector);
228int tipc_link_send_sections_fast(struct tipc_port *sender, 228int tipc_link_send_sections_fast(struct tipc_port *sender,
229 struct iovec const *msg_sect, 229 struct iovec const *msg_sect,
230 const u32 num_sect, 230 const u32 num_sect,
231 unsigned int total_len,
231 u32 destnode); 232 u32 destnode);
232void tipc_link_recv_bundle(struct sk_buff *buf); 233void tipc_link_recv_bundle(struct sk_buff *buf);
233int tipc_link_recv_fragment(struct sk_buff **pending, 234int tipc_link_recv_fragment(struct sk_buff **pending,
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index 6d92d17e7fb5..03e57bf92c73 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -68,20 +68,6 @@ void tipc_msg_init(struct tipc_msg *m, u32 user, u32 type,
68} 68}
69 69
70/** 70/**
71 * tipc_msg_calc_data_size - determine total data size for message
72 */
73
74int tipc_msg_calc_data_size(struct iovec const *msg_sect, u32 num_sect)
75{
76 int dsz = 0;
77 int i;
78
79 for (i = 0; i < num_sect; i++)
80 dsz += msg_sect[i].iov_len;
81 return dsz;
82}
83
84/**
85 * tipc_msg_build - create message using specified header and data 71 * tipc_msg_build - create message using specified header and data
86 * 72 *
87 * Note: Caller must not hold any locks in case copy_from_user() is interrupted! 73 * Note: Caller must not hold any locks in case copy_from_user() is interrupted!
@@ -89,18 +75,13 @@ int tipc_msg_calc_data_size(struct iovec const *msg_sect, u32 num_sect)
89 * Returns message data size or errno 75 * Returns message data size or errno
90 */ 76 */
91 77
92int tipc_msg_build(struct tipc_msg *hdr, 78int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
93 struct iovec const *msg_sect, u32 num_sect, 79 u32 num_sect, unsigned int total_len,
94 int max_size, int usrmem, struct sk_buff **buf) 80 int max_size, int usrmem, struct sk_buff **buf)
95{ 81{
96 int dsz, sz, hsz, pos, res, cnt; 82 int dsz, sz, hsz, pos, res, cnt;
97 83
98 dsz = tipc_msg_calc_data_size(msg_sect, num_sect); 84 dsz = total_len;
99 if (unlikely(dsz > TIPC_MAX_USER_MSG_SIZE)) {
100 *buf = NULL;
101 return -EINVAL;
102 }
103
104 pos = hsz = msg_hdr_sz(hdr); 85 pos = hsz = msg_hdr_sz(hdr);
105 sz = hsz + dsz; 86 sz = hsz + dsz;
106 msg_set_size(hdr, sz); 87 msg_set_size(hdr, sz);
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index de02339fc175..8452454731fa 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -39,41 +39,24 @@
39 39
40#include "bearer.h" 40#include "bearer.h"
41 41
42/*
43 * Constants and routines used to read and write TIPC payload message headers
44 *
45 * Note: Some items are also used with TIPC internal message headers
46 */
47
42#define TIPC_VERSION 2 48#define TIPC_VERSION 2
43 49
44/* 50/*
45 * TIPC user data message header format, version 2: 51 * Payload message users are defined in TIPC's public API:
46 * 52 * - TIPC_LOW_IMPORTANCE
47 * 53 * - TIPC_MEDIUM_IMPORTANCE
48 * 1 0 9 8 7 6 5 4|3 2 1 0 9 8 7 6|5 4 3 2 1 0 9 8|7 6 5 4 3 2 1 0 54 * - TIPC_HIGH_IMPORTANCE
49 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 55 * - TIPC_CRITICAL_IMPORTANCE
50 * w0:|vers | user |hdr sz |n|d|s|-| message size | 56 */
51 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 57
52 * w1:|mstyp| error |rer cnt|lsc|opt p| broadcast ack no | 58/*
53 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 59 * Payload message types
54 * w2:| link level ack no | broadcast/link level seq no |
55 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
56 * w3:| previous node |
57 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
58 * w4:| originating port |
59 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
60 * w5:| destination port |
61 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
62 * w6:| originating node |
63 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
64 * w7:| destination node |
65 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
66 * w8:| name type / transport sequence number |
67 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
68 * w9:| name instance/multicast lower bound |
69 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
70 * wA:| multicast upper bound |
71 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
72 * / /
73 * \ options \
74 * / /
75 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
76 *
77 */ 60 */
78 61
79#define TIPC_CONN_MSG 0 62#define TIPC_CONN_MSG 0
@@ -81,6 +64,9 @@
81#define TIPC_NAMED_MSG 2 64#define TIPC_NAMED_MSG 2
82#define TIPC_DIRECT_MSG 3 65#define TIPC_DIRECT_MSG 3
83 66
67/*
68 * Message header sizes
69 */
84 70
85#define SHORT_H_SIZE 24 /* Connected, in-cluster messages */ 71#define SHORT_H_SIZE 24 /* Connected, in-cluster messages */
86#define DIR_MSG_H_SIZE 32 /* Directly addressed messages */ 72#define DIR_MSG_H_SIZE 32 /* Directly addressed messages */
@@ -473,40 +459,11 @@ static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m)
473 459
474 460
475/* 461/*
476 TIPC internal message header format, version 2 462 * Constants and routines used to read and write TIPC internal message headers
477 463 */
478 1 0 9 8 7 6 5 4|3 2 1 0 9 8 7 6|5 4 3 2 1 0 9 8|7 6 5 4 3 2 1 0
479 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
480 w0:|vers |msg usr|hdr sz |n|resrv| packet size |
481 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
482 w1:|m typ| sequence gap | broadcast ack no |
483 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
484 w2:| link level ack no/bc_gap_from | seq no / bcast_gap_to |
485 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
486 w3:| previous node |
487 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
488 w4:| next sent broadcast/fragm no | next sent pkt/ fragm msg no |
489 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
490 w5:| session no |rsv=0|r|berid|link prio|netpl|p|
491 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
492 w6:| originating node |
493 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
494 w7:| destination node |
495 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
496 w8:| transport sequence number |
497 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
498 w9:| msg count / bcast tag | link tolerance |
499 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
500 \ \
501 / User Specific Data /
502 \ \
503 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
504
505 NB: CONN_MANAGER use data message format. LINK_CONFIG has own format.
506*/
507 464
508/* 465/*
509 * Internal users 466 * Internal message users
510 */ 467 */
511 468
512#define BCAST_PROTOCOL 5 469#define BCAST_PROTOCOL 5
@@ -520,7 +477,7 @@ static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m)
520#define LINK_CONFIG 13 477#define LINK_CONFIG 13
521 478
522/* 479/*
523 * Connection management protocol messages 480 * Connection management protocol message types
524 */ 481 */
525 482
526#define CONN_PROBE 0 483#define CONN_PROBE 0
@@ -528,12 +485,41 @@ static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m)
528#define CONN_ACK 2 485#define CONN_ACK 2
529 486
530/* 487/*
531 * Name distributor messages 488 * Name distributor message types
532 */ 489 */
533 490
534#define PUBLICATION 0 491#define PUBLICATION 0
535#define WITHDRAWAL 1 492#define WITHDRAWAL 1
536 493
494/*
495 * Segmentation message types
496 */
497
498#define FIRST_FRAGMENT 0
499#define FRAGMENT 1
500#define LAST_FRAGMENT 2
501
502/*
503 * Link management protocol message types
504 */
505
506#define STATE_MSG 0
507#define RESET_MSG 1
508#define ACTIVATE_MSG 2
509
510/*
511 * Changeover tunnel message types
512 */
513#define DUPLICATE_MSG 0
514#define ORIGINAL_MSG 1
515
516/*
517 * Config protocol message types
518 */
519
520#define DSC_REQ_MSG 0
521#define DSC_RESP_MSG 1
522
537 523
538/* 524/*
539 * Word 1 525 * Word 1
@@ -761,50 +747,11 @@ static inline void msg_set_link_tolerance(struct tipc_msg *m, u32 n)
761 msg_set_bits(m, 9, 0, 0xffff, n); 747 msg_set_bits(m, 9, 0, 0xffff, n);
762} 748}
763 749
764/*
765 * Segmentation message types
766 */
767
768#define FIRST_FRAGMENT 0
769#define FRAGMENT 1
770#define LAST_FRAGMENT 2
771
772/*
773 * Link management protocol message types
774 */
775
776#define STATE_MSG 0
777#define RESET_MSG 1
778#define ACTIVATE_MSG 2
779
780/*
781 * Changeover tunnel message types
782 */
783#define DUPLICATE_MSG 0
784#define ORIGINAL_MSG 1
785
786/*
787 * Routing table message types
788 */
789#define EXT_ROUTING_TABLE 0
790#define LOCAL_ROUTING_TABLE 1 /* obsoleted */
791#define SLAVE_ROUTING_TABLE 2
792#define ROUTE_ADDITION 3
793#define ROUTE_REMOVAL 4
794
795/*
796 * Config protocol message types
797 */
798
799#define DSC_REQ_MSG 0
800#define DSC_RESP_MSG 1
801
802u32 tipc_msg_tot_importance(struct tipc_msg *m); 750u32 tipc_msg_tot_importance(struct tipc_msg *m);
803void tipc_msg_init(struct tipc_msg *m, u32 user, u32 type, 751void tipc_msg_init(struct tipc_msg *m, u32 user, u32 type,
804 u32 hsize, u32 destnode); 752 u32 hsize, u32 destnode);
805int tipc_msg_calc_data_size(struct iovec const *msg_sect, u32 num_sect); 753int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
806int tipc_msg_build(struct tipc_msg *hdr, 754 u32 num_sect, unsigned int total_len,
807 struct iovec const *msg_sect, u32 num_sect,
808 int max_size, int usrmem, struct sk_buff **buf); 755 int max_size, int usrmem, struct sk_buff **buf);
809 756
810static inline void msg_set_media_addr(struct tipc_msg *m, struct tipc_media_addr *a) 757static inline void msg_set_media_addr(struct tipc_msg *m, struct tipc_media_addr *a)
diff --git a/net/tipc/port.c b/net/tipc/port.c
index 6ff78f9c7d65..c68dc956a423 100644
--- a/net/tipc/port.c
+++ b/net/tipc/port.c
@@ -74,7 +74,8 @@ static u32 port_peerport(struct tipc_port *p_ptr)
74 */ 74 */
75 75
76int tipc_multicast(u32 ref, struct tipc_name_seq const *seq, 76int tipc_multicast(u32 ref, struct tipc_name_seq const *seq,
77 u32 num_sect, struct iovec const *msg_sect) 77 u32 num_sect, struct iovec const *msg_sect,
78 unsigned int total_len)
78{ 79{
79 struct tipc_msg *hdr; 80 struct tipc_msg *hdr;
80 struct sk_buff *buf; 81 struct sk_buff *buf;
@@ -91,11 +92,14 @@ int tipc_multicast(u32 ref, struct tipc_name_seq const *seq,
91 92
92 hdr = &oport->phdr; 93 hdr = &oport->phdr;
93 msg_set_type(hdr, TIPC_MCAST_MSG); 94 msg_set_type(hdr, TIPC_MCAST_MSG);
95 msg_set_lookup_scope(hdr, TIPC_CLUSTER_SCOPE);
96 msg_set_destport(hdr, 0);
97 msg_set_destnode(hdr, 0);
94 msg_set_nametype(hdr, seq->type); 98 msg_set_nametype(hdr, seq->type);
95 msg_set_namelower(hdr, seq->lower); 99 msg_set_namelower(hdr, seq->lower);
96 msg_set_nameupper(hdr, seq->upper); 100 msg_set_nameupper(hdr, seq->upper);
97 msg_set_hdr_sz(hdr, MCAST_H_SIZE); 101 msg_set_hdr_sz(hdr, MCAST_H_SIZE);
98 res = tipc_msg_build(hdr, msg_sect, num_sect, MAX_MSG_SIZE, 102 res = tipc_msg_build(hdr, msg_sect, num_sect, total_len, MAX_MSG_SIZE,
99 !oport->user_port, &buf); 103 !oport->user_port, &buf);
100 if (unlikely(!buf)) 104 if (unlikely(!buf))
101 return res; 105 return res;
@@ -161,6 +165,7 @@ void tipc_port_recv_mcast(struct sk_buff *buf, struct port_list *dp)
161 /* Deliver a copy of message to each destination port */ 165 /* Deliver a copy of message to each destination port */
162 166
163 if (dp->count != 0) { 167 if (dp->count != 0) {
168 msg_set_destnode(msg, tipc_own_addr);
164 if (dp->count == 1) { 169 if (dp->count == 1) {
165 msg_set_destport(msg, dp->ports[0]); 170 msg_set_destport(msg, dp->ports[0]);
166 tipc_port_recv_msg(buf); 171 tipc_port_recv_msg(buf);
@@ -414,12 +419,12 @@ int tipc_reject_msg(struct sk_buff *buf, u32 err)
414 419
415int tipc_port_reject_sections(struct tipc_port *p_ptr, struct tipc_msg *hdr, 420int tipc_port_reject_sections(struct tipc_port *p_ptr, struct tipc_msg *hdr,
416 struct iovec const *msg_sect, u32 num_sect, 421 struct iovec const *msg_sect, u32 num_sect,
417 int err) 422 unsigned int total_len, int err)
418{ 423{
419 struct sk_buff *buf; 424 struct sk_buff *buf;
420 int res; 425 int res;
421 426
422 res = tipc_msg_build(hdr, msg_sect, num_sect, MAX_MSG_SIZE, 427 res = tipc_msg_build(hdr, msg_sect, num_sect, total_len, MAX_MSG_SIZE,
423 !p_ptr->user_port, &buf); 428 !p_ptr->user_port, &buf);
424 if (!buf) 429 if (!buf)
425 return res; 430 return res;
@@ -1065,6 +1070,7 @@ int tipc_connect2port(u32 ref, struct tipc_portid const *peer)
1065 msg_set_orignode(msg, tipc_own_addr); 1070 msg_set_orignode(msg, tipc_own_addr);
1066 msg_set_origport(msg, p_ptr->ref); 1071 msg_set_origport(msg, p_ptr->ref);
1067 msg_set_type(msg, TIPC_CONN_MSG); 1072 msg_set_type(msg, TIPC_CONN_MSG);
1073 msg_set_lookup_scope(msg, 0);
1068 msg_set_hdr_sz(msg, SHORT_H_SIZE); 1074 msg_set_hdr_sz(msg, SHORT_H_SIZE);
1069 1075
1070 p_ptr->probing_interval = PROBING_INTERVAL; 1076 p_ptr->probing_interval = PROBING_INTERVAL;
@@ -1158,12 +1164,13 @@ int tipc_shutdown(u32 ref)
1158 */ 1164 */
1159 1165
1160static int tipc_port_recv_sections(struct tipc_port *sender, unsigned int num_sect, 1166static int tipc_port_recv_sections(struct tipc_port *sender, unsigned int num_sect,
1161 struct iovec const *msg_sect) 1167 struct iovec const *msg_sect,
1168 unsigned int total_len)
1162{ 1169{
1163 struct sk_buff *buf; 1170 struct sk_buff *buf;
1164 int res; 1171 int res;
1165 1172
1166 res = tipc_msg_build(&sender->phdr, msg_sect, num_sect, 1173 res = tipc_msg_build(&sender->phdr, msg_sect, num_sect, total_len,
1167 MAX_MSG_SIZE, !sender->user_port, &buf); 1174 MAX_MSG_SIZE, !sender->user_port, &buf);
1168 if (likely(buf)) 1175 if (likely(buf))
1169 tipc_port_recv_msg(buf); 1176 tipc_port_recv_msg(buf);
@@ -1174,7 +1181,8 @@ static int tipc_port_recv_sections(struct tipc_port *sender, unsigned int num_se
1174 * tipc_send - send message sections on connection 1181 * tipc_send - send message sections on connection
1175 */ 1182 */
1176 1183
1177int tipc_send(u32 ref, unsigned int num_sect, struct iovec const *msg_sect) 1184int tipc_send(u32 ref, unsigned int num_sect, struct iovec const *msg_sect,
1185 unsigned int total_len)
1178{ 1186{
1179 struct tipc_port *p_ptr; 1187 struct tipc_port *p_ptr;
1180 u32 destnode; 1188 u32 destnode;
@@ -1189,9 +1197,10 @@ int tipc_send(u32 ref, unsigned int num_sect, struct iovec const *msg_sect)
1189 destnode = port_peernode(p_ptr); 1197 destnode = port_peernode(p_ptr);
1190 if (likely(destnode != tipc_own_addr)) 1198 if (likely(destnode != tipc_own_addr))
1191 res = tipc_link_send_sections_fast(p_ptr, msg_sect, num_sect, 1199 res = tipc_link_send_sections_fast(p_ptr, msg_sect, num_sect,
1192 destnode); 1200 total_len, destnode);
1193 else 1201 else
1194 res = tipc_port_recv_sections(p_ptr, num_sect, msg_sect); 1202 res = tipc_port_recv_sections(p_ptr, num_sect, msg_sect,
1203 total_len);
1195 1204
1196 if (likely(res != -ELINKCONG)) { 1205 if (likely(res != -ELINKCONG)) {
1197 p_ptr->congested = 0; 1206 p_ptr->congested = 0;
@@ -1202,8 +1211,7 @@ int tipc_send(u32 ref, unsigned int num_sect, struct iovec const *msg_sect)
1202 } 1211 }
1203 if (port_unreliable(p_ptr)) { 1212 if (port_unreliable(p_ptr)) {
1204 p_ptr->congested = 0; 1213 p_ptr->congested = 0;
1205 /* Just calculate msg length and return */ 1214 return total_len;
1206 return tipc_msg_calc_data_size(msg_sect, num_sect);
1207 } 1215 }
1208 return -ELINKCONG; 1216 return -ELINKCONG;
1209} 1217}
@@ -1213,7 +1221,8 @@ int tipc_send(u32 ref, unsigned int num_sect, struct iovec const *msg_sect)
1213 */ 1221 */
1214 1222
1215int tipc_send2name(u32 ref, struct tipc_name const *name, unsigned int domain, 1223int tipc_send2name(u32 ref, struct tipc_name const *name, unsigned int domain,
1216 unsigned int num_sect, struct iovec const *msg_sect) 1224 unsigned int num_sect, struct iovec const *msg_sect,
1225 unsigned int total_len)
1217{ 1226{
1218 struct tipc_port *p_ptr; 1227 struct tipc_port *p_ptr;
1219 struct tipc_msg *msg; 1228 struct tipc_msg *msg;
@@ -1240,23 +1249,23 @@ int tipc_send2name(u32 ref, struct tipc_name const *name, unsigned int domain,
1240 if (likely(destport)) { 1249 if (likely(destport)) {
1241 if (likely(destnode == tipc_own_addr)) 1250 if (likely(destnode == tipc_own_addr))
1242 res = tipc_port_recv_sections(p_ptr, num_sect, 1251 res = tipc_port_recv_sections(p_ptr, num_sect,
1243 msg_sect); 1252 msg_sect, total_len);
1244 else 1253 else
1245 res = tipc_link_send_sections_fast(p_ptr, msg_sect, 1254 res = tipc_link_send_sections_fast(p_ptr, msg_sect,
1246 num_sect, destnode); 1255 num_sect, total_len,
1256 destnode);
1247 if (likely(res != -ELINKCONG)) { 1257 if (likely(res != -ELINKCONG)) {
1248 if (res > 0) 1258 if (res > 0)
1249 p_ptr->sent++; 1259 p_ptr->sent++;
1250 return res; 1260 return res;
1251 } 1261 }
1252 if (port_unreliable(p_ptr)) { 1262 if (port_unreliable(p_ptr)) {
1253 /* Just calculate msg length and return */ 1263 return total_len;
1254 return tipc_msg_calc_data_size(msg_sect, num_sect);
1255 } 1264 }
1256 return -ELINKCONG; 1265 return -ELINKCONG;
1257 } 1266 }
1258 return tipc_port_reject_sections(p_ptr, msg, msg_sect, num_sect, 1267 return tipc_port_reject_sections(p_ptr, msg, msg_sect, num_sect,
1259 TIPC_ERR_NO_NAME); 1268 total_len, TIPC_ERR_NO_NAME);
1260} 1269}
1261 1270
1262/** 1271/**
@@ -1264,7 +1273,8 @@ int tipc_send2name(u32 ref, struct tipc_name const *name, unsigned int domain,
1264 */ 1273 */
1265 1274
1266int tipc_send2port(u32 ref, struct tipc_portid const *dest, 1275int tipc_send2port(u32 ref, struct tipc_portid const *dest,
1267 unsigned int num_sect, struct iovec const *msg_sect) 1276 unsigned int num_sect, struct iovec const *msg_sect,
1277 unsigned int total_len)
1268{ 1278{
1269 struct tipc_port *p_ptr; 1279 struct tipc_port *p_ptr;
1270 struct tipc_msg *msg; 1280 struct tipc_msg *msg;
@@ -1276,6 +1286,7 @@ int tipc_send2port(u32 ref, struct tipc_portid const *dest,
1276 1286
1277 msg = &p_ptr->phdr; 1287 msg = &p_ptr->phdr;
1278 msg_set_type(msg, TIPC_DIRECT_MSG); 1288 msg_set_type(msg, TIPC_DIRECT_MSG);
1289 msg_set_lookup_scope(msg, 0);
1279 msg_set_orignode(msg, tipc_own_addr); 1290 msg_set_orignode(msg, tipc_own_addr);
1280 msg_set_origport(msg, ref); 1291 msg_set_origport(msg, ref);
1281 msg_set_destnode(msg, dest->node); 1292 msg_set_destnode(msg, dest->node);
@@ -1283,18 +1294,18 @@ int tipc_send2port(u32 ref, struct tipc_portid const *dest,
1283 msg_set_hdr_sz(msg, DIR_MSG_H_SIZE); 1294 msg_set_hdr_sz(msg, DIR_MSG_H_SIZE);
1284 1295
1285 if (dest->node == tipc_own_addr) 1296 if (dest->node == tipc_own_addr)
1286 res = tipc_port_recv_sections(p_ptr, num_sect, msg_sect); 1297 res = tipc_port_recv_sections(p_ptr, num_sect, msg_sect,
1298 total_len);
1287 else 1299 else
1288 res = tipc_link_send_sections_fast(p_ptr, msg_sect, num_sect, 1300 res = tipc_link_send_sections_fast(p_ptr, msg_sect, num_sect,
1289 dest->node); 1301 total_len, dest->node);
1290 if (likely(res != -ELINKCONG)) { 1302 if (likely(res != -ELINKCONG)) {
1291 if (res > 0) 1303 if (res > 0)
1292 p_ptr->sent++; 1304 p_ptr->sent++;
1293 return res; 1305 return res;
1294 } 1306 }
1295 if (port_unreliable(p_ptr)) { 1307 if (port_unreliable(p_ptr)) {
1296 /* Just calculate msg length and return */ 1308 return total_len;
1297 return tipc_msg_calc_data_size(msg_sect, num_sect);
1298 } 1309 }
1299 return -ELINKCONG; 1310 return -ELINKCONG;
1300} 1311}
diff --git a/net/tipc/port.h b/net/tipc/port.h
index 87b9424ae0ec..b9aa34195aec 100644
--- a/net/tipc/port.h
+++ b/net/tipc/port.h
@@ -205,23 +205,27 @@ int tipc_disconnect_port(struct tipc_port *tp_ptr);
205/* 205/*
206 * TIPC messaging routines 206 * TIPC messaging routines
207 */ 207 */
208int tipc_send(u32 portref, unsigned int num_sect, struct iovec const *msg_sect); 208int tipc_send(u32 portref, unsigned int num_sect, struct iovec const *msg_sect,
209 unsigned int total_len);
209 210
210int tipc_send2name(u32 portref, struct tipc_name const *name, u32 domain, 211int tipc_send2name(u32 portref, struct tipc_name const *name, u32 domain,
211 unsigned int num_sect, struct iovec const *msg_sect); 212 unsigned int num_sect, struct iovec const *msg_sect,
213 unsigned int total_len);
212 214
213int tipc_send2port(u32 portref, struct tipc_portid const *dest, 215int tipc_send2port(u32 portref, struct tipc_portid const *dest,
214 unsigned int num_sect, struct iovec const *msg_sect); 216 unsigned int num_sect, struct iovec const *msg_sect,
217 unsigned int total_len);
215 218
216int tipc_send_buf2port(u32 portref, struct tipc_portid const *dest, 219int tipc_send_buf2port(u32 portref, struct tipc_portid const *dest,
217 struct sk_buff *buf, unsigned int dsz); 220 struct sk_buff *buf, unsigned int dsz);
218 221
219int tipc_multicast(u32 portref, struct tipc_name_seq const *seq, 222int tipc_multicast(u32 portref, struct tipc_name_seq const *seq,
220 unsigned int section_count, struct iovec const *msg); 223 unsigned int section_count, struct iovec const *msg,
224 unsigned int total_len);
221 225
222int tipc_port_reject_sections(struct tipc_port *p_ptr, struct tipc_msg *hdr, 226int tipc_port_reject_sections(struct tipc_port *p_ptr, struct tipc_msg *hdr,
223 struct iovec const *msg_sect, u32 num_sect, 227 struct iovec const *msg_sect, u32 num_sect,
224 int err); 228 unsigned int total_len, int err);
225struct sk_buff *tipc_port_get_ports(void); 229struct sk_buff *tipc_port_get_ports(void);
226void tipc_port_recv_proto_msg(struct sk_buff *buf); 230void tipc_port_recv_proto_msg(struct sk_buff *buf);
227void tipc_port_recv_mcast(struct sk_buff *buf, struct port_list *dp); 231void tipc_port_recv_mcast(struct sk_buff *buf, struct port_list *dp);
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 29d94d53198d..338837396642 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -535,6 +535,9 @@ static int send_msg(struct kiocb *iocb, struct socket *sock,
535 if (unlikely((m->msg_namelen < sizeof(*dest)) || 535 if (unlikely((m->msg_namelen < sizeof(*dest)) ||
536 (dest->family != AF_TIPC))) 536 (dest->family != AF_TIPC)))
537 return -EINVAL; 537 return -EINVAL;
538 if ((total_len > TIPC_MAX_USER_MSG_SIZE) ||
539 (m->msg_iovlen > (unsigned)INT_MAX))
540 return -EMSGSIZE;
538 541
539 if (iocb) 542 if (iocb)
540 lock_sock(sk); 543 lock_sock(sk);
@@ -573,12 +576,14 @@ static int send_msg(struct kiocb *iocb, struct socket *sock,
573 &dest->addr.name.name, 576 &dest->addr.name.name,
574 dest->addr.name.domain, 577 dest->addr.name.domain,
575 m->msg_iovlen, 578 m->msg_iovlen,
576 m->msg_iov); 579 m->msg_iov,
580 total_len);
577 } else if (dest->addrtype == TIPC_ADDR_ID) { 581 } else if (dest->addrtype == TIPC_ADDR_ID) {
578 res = tipc_send2port(tport->ref, 582 res = tipc_send2port(tport->ref,
579 &dest->addr.id, 583 &dest->addr.id,
580 m->msg_iovlen, 584 m->msg_iovlen,
581 m->msg_iov); 585 m->msg_iov,
586 total_len);
582 } else if (dest->addrtype == TIPC_ADDR_MCAST) { 587 } else if (dest->addrtype == TIPC_ADDR_MCAST) {
583 if (needs_conn) { 588 if (needs_conn) {
584 res = -EOPNOTSUPP; 589 res = -EOPNOTSUPP;
@@ -590,7 +595,8 @@ static int send_msg(struct kiocb *iocb, struct socket *sock,
590 res = tipc_multicast(tport->ref, 595 res = tipc_multicast(tport->ref,
591 &dest->addr.nameseq, 596 &dest->addr.nameseq,
592 m->msg_iovlen, 597 m->msg_iovlen,
593 m->msg_iov); 598 m->msg_iov,
599 total_len);
594 } 600 }
595 if (likely(res != -ELINKCONG)) { 601 if (likely(res != -ELINKCONG)) {
596 if (needs_conn && (res >= 0)) 602 if (needs_conn && (res >= 0))
@@ -640,6 +646,10 @@ static int send_packet(struct kiocb *iocb, struct socket *sock,
640 if (unlikely(dest)) 646 if (unlikely(dest))
641 return send_msg(iocb, sock, m, total_len); 647 return send_msg(iocb, sock, m, total_len);
642 648
649 if ((total_len > TIPC_MAX_USER_MSG_SIZE) ||
650 (m->msg_iovlen > (unsigned)INT_MAX))
651 return -EMSGSIZE;
652
643 if (iocb) 653 if (iocb)
644 lock_sock(sk); 654 lock_sock(sk);
645 655
@@ -652,7 +662,8 @@ static int send_packet(struct kiocb *iocb, struct socket *sock,
652 break; 662 break;
653 } 663 }
654 664
655 res = tipc_send(tport->ref, m->msg_iovlen, m->msg_iov); 665 res = tipc_send(tport->ref, m->msg_iovlen, m->msg_iov,
666 total_len);
656 if (likely(res != -ELINKCONG)) 667 if (likely(res != -ELINKCONG))
657 break; 668 break;
658 if (m->msg_flags & MSG_DONTWAIT) { 669 if (m->msg_flags & MSG_DONTWAIT) {
@@ -723,6 +734,12 @@ static int send_stream(struct kiocb *iocb, struct socket *sock,
723 goto exit; 734 goto exit;
724 } 735 }
725 736
737 if ((total_len > (unsigned)INT_MAX) ||
738 (m->msg_iovlen > (unsigned)INT_MAX)) {
739 res = -EMSGSIZE;
740 goto exit;
741 }
742
726 /* 743 /*
727 * Send each iovec entry using one or more messages 744 * Send each iovec entry using one or more messages
728 * 745 *
@@ -753,7 +770,7 @@ static int send_stream(struct kiocb *iocb, struct socket *sock,
753 bytes_to_send = curr_left; 770 bytes_to_send = curr_left;
754 my_iov.iov_base = curr_start; 771 my_iov.iov_base = curr_start;
755 my_iov.iov_len = bytes_to_send; 772 my_iov.iov_len = bytes_to_send;
756 res = send_packet(NULL, sock, &my_msg, 0); 773 res = send_packet(NULL, sock, &my_msg, bytes_to_send);
757 if (res < 0) { 774 if (res < 0) {
758 if (bytes_sent) 775 if (bytes_sent)
759 res = bytes_sent; 776 res = bytes_sent;
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index aae9eae13404..6cf726863485 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -109,7 +109,7 @@ static void subscr_send_event(struct subscription *sub,
109 sub->evt.found_upper = htohl(found_upper, sub->swap); 109 sub->evt.found_upper = htohl(found_upper, sub->swap);
110 sub->evt.port.ref = htohl(port_ref, sub->swap); 110 sub->evt.port.ref = htohl(port_ref, sub->swap);
111 sub->evt.port.node = htohl(node, sub->swap); 111 sub->evt.port.node = htohl(node, sub->swap);
112 tipc_send(sub->server_ref, 1, &msg_sect); 112 tipc_send(sub->server_ref, 1, &msg_sect, msg_sect.iov_len);
113} 113}
114 114
115/** 115/**
@@ -521,7 +521,7 @@ static void subscr_named_msg_event(void *usr_handle,
521 521
522 /* Send an ACK- to complete connection handshaking */ 522 /* Send an ACK- to complete connection handshaking */
523 523
524 tipc_send(server_port_ref, 0, NULL); 524 tipc_send(server_port_ref, 0, NULL, 0);
525 525
526 /* Handle optional subscription request */ 526 /* Handle optional subscription request */
527 527
diff --git a/net/wireless/core.c b/net/wireless/core.c
index fe01de29bfe8..c22ef3492ee6 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -46,6 +46,11 @@ static struct dentry *ieee80211_debugfs_dir;
46/* for the cleanup, scan and event works */ 46/* for the cleanup, scan and event works */
47struct workqueue_struct *cfg80211_wq; 47struct workqueue_struct *cfg80211_wq;
48 48
49static bool cfg80211_disable_40mhz_24ghz;
50module_param(cfg80211_disable_40mhz_24ghz, bool, 0644);
51MODULE_PARM_DESC(cfg80211_disable_40mhz_24ghz,
52 "Disable 40MHz support in the 2.4GHz band");
53
49/* requires cfg80211_mutex to be held! */ 54/* requires cfg80211_mutex to be held! */
50struct cfg80211_registered_device *cfg80211_rdev_by_wiphy_idx(int wiphy_idx) 55struct cfg80211_registered_device *cfg80211_rdev_by_wiphy_idx(int wiphy_idx)
51{ 56{
@@ -365,7 +370,7 @@ struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv)
365 spin_lock_init(&rdev->bss_lock); 370 spin_lock_init(&rdev->bss_lock);
366 INIT_LIST_HEAD(&rdev->bss_list); 371 INIT_LIST_HEAD(&rdev->bss_list);
367 INIT_WORK(&rdev->scan_done_wk, __cfg80211_scan_done); 372 INIT_WORK(&rdev->scan_done_wk, __cfg80211_scan_done);
368 373 INIT_WORK(&rdev->sched_scan_results_wk, __cfg80211_sched_scan_results);
369#ifdef CONFIG_CFG80211_WEXT 374#ifdef CONFIG_CFG80211_WEXT
370 rdev->wiphy.wext = &cfg80211_wext_handler; 375 rdev->wiphy.wext = &cfg80211_wext_handler;
371#endif 376#endif
@@ -411,6 +416,67 @@ struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv)
411} 416}
412EXPORT_SYMBOL(wiphy_new); 417EXPORT_SYMBOL(wiphy_new);
413 418
419static int wiphy_verify_combinations(struct wiphy *wiphy)
420{
421 const struct ieee80211_iface_combination *c;
422 int i, j;
423
424 /* If we have combinations enforce them */
425 if (wiphy->n_iface_combinations)
426 wiphy->flags |= WIPHY_FLAG_ENFORCE_COMBINATIONS;
427
428 for (i = 0; i < wiphy->n_iface_combinations; i++) {
429 u32 cnt = 0;
430 u16 all_iftypes = 0;
431
432 c = &wiphy->iface_combinations[i];
433
434 /* Combinations with just one interface aren't real */
435 if (WARN_ON(c->max_interfaces < 2))
436 return -EINVAL;
437
438 /* Need at least one channel */
439 if (WARN_ON(!c->num_different_channels))
440 return -EINVAL;
441
442 if (WARN_ON(!c->n_limits))
443 return -EINVAL;
444
445 for (j = 0; j < c->n_limits; j++) {
446 u16 types = c->limits[j].types;
447
448 /*
449 * interface types shouldn't overlap, this is
450 * used in cfg80211_can_change_interface()
451 */
452 if (WARN_ON(types & all_iftypes))
453 return -EINVAL;
454 all_iftypes |= types;
455
456 if (WARN_ON(!c->limits[j].max))
457 return -EINVAL;
458
459 /* Shouldn't list software iftypes in combinations! */
460 if (WARN_ON(wiphy->software_iftypes & types))
461 return -EINVAL;
462
463 cnt += c->limits[j].max;
464 /*
465 * Don't advertise an unsupported type
466 * in a combination.
467 */
468 if (WARN_ON((wiphy->interface_modes & types) != types))
469 return -EINVAL;
470 }
471
472 /* You can't even choose that many! */
473 if (WARN_ON(cnt < c->max_interfaces))
474 return -EINVAL;
475 }
476
477 return 0;
478}
479
414int wiphy_register(struct wiphy *wiphy) 480int wiphy_register(struct wiphy *wiphy)
415{ 481{
416 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 482 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
@@ -439,6 +505,10 @@ int wiphy_register(struct wiphy *wiphy)
439 if (WARN_ON(ifmodes != wiphy->interface_modes)) 505 if (WARN_ON(ifmodes != wiphy->interface_modes))
440 wiphy->interface_modes = ifmodes; 506 wiphy->interface_modes = ifmodes;
441 507
508 res = wiphy_verify_combinations(wiphy);
509 if (res)
510 return res;
511
442 /* sanity check supported bands/channels */ 512 /* sanity check supported bands/channels */
443 for (band = 0; band < IEEE80211_NUM_BANDS; band++) { 513 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
444 sband = wiphy->bands[band]; 514 sband = wiphy->bands[band];
@@ -451,6 +521,18 @@ int wiphy_register(struct wiphy *wiphy)
451 return -EINVAL; 521 return -EINVAL;
452 522
453 /* 523 /*
524 * Since cfg80211_disable_40mhz_24ghz is global, we can
525 * modify the sband's ht data even if the driver uses a
526 * global structure for that.
527 */
528 if (cfg80211_disable_40mhz_24ghz &&
529 band == IEEE80211_BAND_2GHZ &&
530 sband->ht_cap.ht_supported) {
531 sband->ht_cap.cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
532 sband->ht_cap.cap &= ~IEEE80211_HT_CAP_SGI_40;
533 }
534
535 /*
454 * Since we use a u32 for rate bitmaps in 536 * Since we use a u32 for rate bitmaps in
455 * ieee80211_get_response_rate, we cannot 537 * ieee80211_get_response_rate, we cannot
456 * have more than 32 legacy rates. 538 * have more than 32 legacy rates.
@@ -476,6 +558,13 @@ int wiphy_register(struct wiphy *wiphy)
476 return -EINVAL; 558 return -EINVAL;
477 } 559 }
478 560
561 if (rdev->wiphy.wowlan.n_patterns) {
562 if (WARN_ON(!rdev->wiphy.wowlan.pattern_min_len ||
563 rdev->wiphy.wowlan.pattern_min_len >
564 rdev->wiphy.wowlan.pattern_max_len))
565 return -EINVAL;
566 }
567
479 /* check and set up bitrates */ 568 /* check and set up bitrates */
480 ieee80211_set_bitrate_flags(wiphy); 569 ieee80211_set_bitrate_flags(wiphy);
481 570
@@ -614,6 +703,7 @@ void cfg80211_dev_free(struct cfg80211_registered_device *rdev)
614 mutex_destroy(&rdev->devlist_mtx); 703 mutex_destroy(&rdev->devlist_mtx);
615 list_for_each_entry_safe(scan, tmp, &rdev->bss_list, list) 704 list_for_each_entry_safe(scan, tmp, &rdev->bss_list, list)
616 cfg80211_put_bss(&scan->pub); 705 cfg80211_put_bss(&scan->pub);
706 cfg80211_rdev_free_wowlan(rdev);
617 kfree(rdev); 707 kfree(rdev);
618} 708}
619 709
@@ -647,6 +737,11 @@ static void wdev_cleanup_work(struct work_struct *work)
647 ___cfg80211_scan_done(rdev, true); 737 ___cfg80211_scan_done(rdev, true);
648 } 738 }
649 739
740 if (WARN_ON(rdev->sched_scan_req &&
741 rdev->sched_scan_req->dev == wdev->netdev)) {
742 __cfg80211_stop_sched_scan(rdev, false);
743 }
744
650 cfg80211_unlock_rdev(rdev); 745 cfg80211_unlock_rdev(rdev);
651 746
652 mutex_lock(&rdev->devlist_mtx); 747 mutex_lock(&rdev->devlist_mtx);
@@ -668,6 +763,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
668 struct net_device *dev = ndev; 763 struct net_device *dev = ndev;
669 struct wireless_dev *wdev = dev->ieee80211_ptr; 764 struct wireless_dev *wdev = dev->ieee80211_ptr;
670 struct cfg80211_registered_device *rdev; 765 struct cfg80211_registered_device *rdev;
766 int ret;
671 767
672 if (!wdev) 768 if (!wdev)
673 return NOTIFY_DONE; 769 return NOTIFY_DONE;
@@ -734,6 +830,10 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
734 break; 830 break;
735 case NL80211_IFTYPE_P2P_CLIENT: 831 case NL80211_IFTYPE_P2P_CLIENT:
736 case NL80211_IFTYPE_STATION: 832 case NL80211_IFTYPE_STATION:
833 cfg80211_lock_rdev(rdev);
834 __cfg80211_stop_sched_scan(rdev, false);
835 cfg80211_unlock_rdev(rdev);
836
737 wdev_lock(wdev); 837 wdev_lock(wdev);
738#ifdef CONFIG_CFG80211_WEXT 838#ifdef CONFIG_CFG80211_WEXT
739 kfree(wdev->wext.ie); 839 kfree(wdev->wext.ie);
@@ -752,6 +852,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
752 default: 852 default:
753 break; 853 break;
754 } 854 }
855 wdev->beacon_interval = 0;
755 break; 856 break;
756 case NETDEV_DOWN: 857 case NETDEV_DOWN:
757 dev_hold(dev); 858 dev_hold(dev);
@@ -858,6 +959,9 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
858 return notifier_from_errno(-EOPNOTSUPP); 959 return notifier_from_errno(-EOPNOTSUPP);
859 if (rfkill_blocked(rdev->rfkill)) 960 if (rfkill_blocked(rdev->rfkill))
860 return notifier_from_errno(-ERFKILL); 961 return notifier_from_errno(-ERFKILL);
962 ret = cfg80211_can_add_interface(rdev, wdev->iftype);
963 if (ret)
964 return notifier_from_errno(ret);
861 break; 965 break;
862 } 966 }
863 967
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 26a0a084e16b..bf0fb40e3c8b 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -60,8 +60,10 @@ struct cfg80211_registered_device {
60 struct rb_root bss_tree; 60 struct rb_root bss_tree;
61 u32 bss_generation; 61 u32 bss_generation;
62 struct cfg80211_scan_request *scan_req; /* protected by RTNL */ 62 struct cfg80211_scan_request *scan_req; /* protected by RTNL */
63 struct cfg80211_sched_scan_request *sched_scan_req;
63 unsigned long suspend_at; 64 unsigned long suspend_at;
64 struct work_struct scan_done_wk; 65 struct work_struct scan_done_wk;
66 struct work_struct sched_scan_results_wk;
65 67
66#ifdef CONFIG_NL80211_TESTMODE 68#ifdef CONFIG_NL80211_TESTMODE
67 struct genl_info *testmode_info; 69 struct genl_info *testmode_info;
@@ -70,6 +72,8 @@ struct cfg80211_registered_device {
70 struct work_struct conn_work; 72 struct work_struct conn_work;
71 struct work_struct event_work; 73 struct work_struct event_work;
72 74
75 struct cfg80211_wowlan *wowlan;
76
73 /* must be last because of the way we do wiphy_priv(), 77 /* must be last because of the way we do wiphy_priv(),
74 * and it should at least be aligned to NETDEV_ALIGN */ 78 * and it should at least be aligned to NETDEV_ALIGN */
75 struct wiphy wiphy __attribute__((__aligned__(NETDEV_ALIGN))); 79 struct wiphy wiphy __attribute__((__aligned__(NETDEV_ALIGN)));
@@ -89,6 +93,18 @@ bool wiphy_idx_valid(int wiphy_idx)
89 return wiphy_idx >= 0; 93 return wiphy_idx >= 0;
90} 94}
91 95
96static inline void
97cfg80211_rdev_free_wowlan(struct cfg80211_registered_device *rdev)
98{
99 int i;
100
101 if (!rdev->wowlan)
102 return;
103 for (i = 0; i < rdev->wowlan->n_patterns; i++)
104 kfree(rdev->wowlan->patterns[i].mask);
105 kfree(rdev->wowlan->patterns);
106 kfree(rdev->wowlan);
107}
92 108
93extern struct workqueue_struct *cfg80211_wq; 109extern struct workqueue_struct *cfg80211_wq;
94extern struct mutex cfg80211_mutex; 110extern struct mutex cfg80211_mutex;
@@ -397,12 +413,26 @@ void cfg80211_sme_rx_auth(struct net_device *dev, const u8 *buf, size_t len);
397void cfg80211_sme_disassoc(struct net_device *dev, int idx); 413void cfg80211_sme_disassoc(struct net_device *dev, int idx);
398void __cfg80211_scan_done(struct work_struct *wk); 414void __cfg80211_scan_done(struct work_struct *wk);
399void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, bool leak); 415void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, bool leak);
416void __cfg80211_sched_scan_results(struct work_struct *wk);
417int __cfg80211_stop_sched_scan(struct cfg80211_registered_device *rdev,
418 bool driver_initiated);
400void cfg80211_upload_connect_keys(struct wireless_dev *wdev); 419void cfg80211_upload_connect_keys(struct wireless_dev *wdev);
401int cfg80211_change_iface(struct cfg80211_registered_device *rdev, 420int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
402 struct net_device *dev, enum nl80211_iftype ntype, 421 struct net_device *dev, enum nl80211_iftype ntype,
403 u32 *flags, struct vif_params *params); 422 u32 *flags, struct vif_params *params);
404void cfg80211_process_rdev_events(struct cfg80211_registered_device *rdev); 423void cfg80211_process_rdev_events(struct cfg80211_registered_device *rdev);
405 424
425int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
426 struct wireless_dev *wdev,
427 enum nl80211_iftype iftype);
428
429static inline int
430cfg80211_can_add_interface(struct cfg80211_registered_device *rdev,
431 enum nl80211_iftype iftype)
432{
433 return cfg80211_can_change_interface(rdev, NULL, iftype);
434}
435
406struct ieee80211_channel * 436struct ieee80211_channel *
407rdev_freq_to_chan(struct cfg80211_registered_device *rdev, 437rdev_freq_to_chan(struct cfg80211_registered_device *rdev,
408 int freq, enum nl80211_channel_type channel_type); 438 int freq, enum nl80211_channel_type channel_type);
@@ -412,6 +442,9 @@ int cfg80211_set_freq(struct cfg80211_registered_device *rdev,
412 442
413u16 cfg80211_calculate_bitrate(struct rate_info *rate); 443u16 cfg80211_calculate_bitrate(struct rate_info *rate);
414 444
445int cfg80211_validate_beacon_int(struct cfg80211_registered_device *rdev,
446 u32 beacon_int);
447
415#ifdef CONFIG_CFG80211_DEVELOPER_WARNINGS 448#ifdef CONFIG_CFG80211_DEVELOPER_WARNINGS
416#define CFG80211_DEV_WARN_ON(cond) WARN_ON(cond) 449#define CFG80211_DEV_WARN_ON(cond) WARN_ON(cond)
417#else 450#else
diff --git a/net/wireless/lib80211_crypt_wep.c b/net/wireless/lib80211_crypt_wep.c
index e2e88878ba35..2f265e033ae2 100644
--- a/net/wireless/lib80211_crypt_wep.c
+++ b/net/wireless/lib80211_crypt_wep.c
@@ -96,13 +96,12 @@ static int lib80211_wep_build_iv(struct sk_buff *skb, int hdr_len,
96 u8 *key, int keylen, void *priv) 96 u8 *key, int keylen, void *priv)
97{ 97{
98 struct lib80211_wep_data *wep = priv; 98 struct lib80211_wep_data *wep = priv;
99 u32 klen, len; 99 u32 klen;
100 u8 *pos; 100 u8 *pos;
101 101
102 if (skb_headroom(skb) < 4 || skb->len < hdr_len) 102 if (skb_headroom(skb) < 4 || skb->len < hdr_len)
103 return -1; 103 return -1;
104 104
105 len = skb->len - hdr_len;
106 pos = skb_push(skb, 4); 105 pos = skb_push(skb, 4);
107 memmove(pos, pos + 4, hdr_len); 106 memmove(pos, pos + 4, hdr_len);
108 pos += hdr_len; 107 pos += hdr_len;
diff --git a/net/wireless/mesh.c b/net/wireless/mesh.c
index 73e39c171ffb..5c116083eeca 100644
--- a/net/wireless/mesh.c
+++ b/net/wireless/mesh.c
@@ -1,5 +1,6 @@
1#include <linux/ieee80211.h> 1#include <linux/ieee80211.h>
2#include <net/cfg80211.h> 2#include <net/cfg80211.h>
3#include "nl80211.h"
3#include "core.h" 4#include "core.h"
4 5
5/* Default values, timeouts in ms */ 6/* Default values, timeouts in ms */
@@ -53,8 +54,9 @@ const struct mesh_config default_mesh_config = {
53const struct mesh_setup default_mesh_setup = { 54const struct mesh_setup default_mesh_setup = {
54 .path_sel_proto = IEEE80211_PATH_PROTOCOL_HWMP, 55 .path_sel_proto = IEEE80211_PATH_PROTOCOL_HWMP,
55 .path_metric = IEEE80211_PATH_METRIC_AIRTIME, 56 .path_metric = IEEE80211_PATH_METRIC_AIRTIME,
56 .vendor_ie = NULL, 57 .ie = NULL,
57 .vendor_ie_len = 0, 58 .ie_len = 0,
59 .is_secure = false,
58}; 60};
59 61
60int __cfg80211_join_mesh(struct cfg80211_registered_device *rdev, 62int __cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
@@ -72,6 +74,10 @@ int __cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
72 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT) 74 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT)
73 return -EOPNOTSUPP; 75 return -EOPNOTSUPP;
74 76
77 if (!(rdev->wiphy.flags & WIPHY_FLAG_MESH_AUTH) &&
78 setup->is_secure)
79 return -EOPNOTSUPP;
80
75 if (wdev->mesh_id_len) 81 if (wdev->mesh_id_len)
76 return -EALREADY; 82 return -EALREADY;
77 83
@@ -105,6 +111,19 @@ int cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
105 return err; 111 return err;
106} 112}
107 113
114void cfg80211_notify_new_peer_candidate(struct net_device *dev,
115 const u8 *macaddr, const u8* ie, u8 ie_len, gfp_t gfp)
116{
117 struct wireless_dev *wdev = dev->ieee80211_ptr;
118
119 if (WARN_ON(wdev->iftype != NL80211_IFTYPE_MESH_POINT))
120 return;
121
122 nl80211_send_new_peer_candidate(wiphy_to_dev(wdev->wiphy), dev,
123 macaddr, ie, ie_len, gfp);
124}
125EXPORT_SYMBOL(cfg80211_notify_new_peer_candidate);
126
108static int __cfg80211_leave_mesh(struct cfg80211_registered_device *rdev, 127static int __cfg80211_leave_mesh(struct cfg80211_registered_device *rdev,
109 struct net_device *dev) 128 struct net_device *dev)
110{ 129{
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index aa5df8865ff7..493b939970cd 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -770,6 +770,15 @@ void cfg80211_new_sta(struct net_device *dev, const u8 *mac_addr,
770} 770}
771EXPORT_SYMBOL(cfg80211_new_sta); 771EXPORT_SYMBOL(cfg80211_new_sta);
772 772
773void cfg80211_del_sta(struct net_device *dev, const u8 *mac_addr, gfp_t gfp)
774{
775 struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
776 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
777
778 nl80211_send_sta_del_event(rdev, dev, mac_addr, gfp);
779}
780EXPORT_SYMBOL(cfg80211_del_sta);
781
773struct cfg80211_mgmt_registration { 782struct cfg80211_mgmt_registration {
774 struct list_head list; 783 struct list_head list;
775 784
@@ -954,6 +963,16 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
954 if (memcmp(mgmt->bssid, dev->dev_addr, ETH_ALEN)) 963 if (memcmp(mgmt->bssid, dev->dev_addr, ETH_ALEN))
955 err = -EINVAL; 964 err = -EINVAL;
956 break; 965 break;
966 case NL80211_IFTYPE_MESH_POINT:
967 if (memcmp(mgmt->sa, mgmt->bssid, ETH_ALEN)) {
968 err = -EINVAL;
969 break;
970 }
971 /*
972 * check for mesh DA must be done by driver as
973 * cfg80211 doesn't track the stations
974 */
975 break;
957 default: 976 default:
958 err = -EOPNOTSUPP; 977 err = -EOPNOTSUPP;
959 break; 978 break;
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 4ebce4284e9d..2222ce08ee91 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -124,6 +124,7 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
124 [NL80211_ATTR_BSS_HT_OPMODE] = { .type = NLA_U16 }, 124 [NL80211_ATTR_BSS_HT_OPMODE] = { .type = NLA_U16 },
125 125
126 [NL80211_ATTR_MESH_CONFIG] = { .type = NLA_NESTED }, 126 [NL80211_ATTR_MESH_CONFIG] = { .type = NLA_NESTED },
127 [NL80211_ATTR_SUPPORT_MESH_AUTH] = { .type = NLA_FLAG },
127 128
128 [NL80211_ATTR_HT_CAPABILITY] = { .type = NLA_BINARY, 129 [NL80211_ATTR_HT_CAPABILITY] = { .type = NLA_BINARY,
129 .len = NL80211_HT_CAPABILITY_LEN }, 130 .len = NL80211_HT_CAPABILITY_LEN },
@@ -172,6 +173,9 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
172 [NL80211_ATTR_MCAST_RATE] = { .type = NLA_U32 }, 173 [NL80211_ATTR_MCAST_RATE] = { .type = NLA_U32 },
173 [NL80211_ATTR_OFFCHANNEL_TX_OK] = { .type = NLA_FLAG }, 174 [NL80211_ATTR_OFFCHANNEL_TX_OK] = { .type = NLA_FLAG },
174 [NL80211_ATTR_KEY_DEFAULT_TYPES] = { .type = NLA_NESTED }, 175 [NL80211_ATTR_KEY_DEFAULT_TYPES] = { .type = NLA_NESTED },
176 [NL80211_ATTR_WOWLAN_TRIGGERS] = { .type = NLA_NESTED },
177 [NL80211_ATTR_STA_PLINK_STATE] = { .type = NLA_U8 },
178 [NL80211_ATTR_SCHED_SCAN_INTERVAL] = { .type = NLA_U32 },
175}; 179};
176 180
177/* policy for the key attributes */ 181/* policy for the key attributes */
@@ -193,6 +197,15 @@ nl80211_key_default_policy[NUM_NL80211_KEY_DEFAULT_TYPES] = {
193 [NL80211_KEY_DEFAULT_TYPE_MULTICAST] = { .type = NLA_FLAG }, 197 [NL80211_KEY_DEFAULT_TYPE_MULTICAST] = { .type = NLA_FLAG },
194}; 198};
195 199
200/* policy for WoWLAN attributes */
201static const struct nla_policy
202nl80211_wowlan_policy[NUM_NL80211_WOWLAN_TRIG] = {
203 [NL80211_WOWLAN_TRIG_ANY] = { .type = NLA_FLAG },
204 [NL80211_WOWLAN_TRIG_DISCONNECT] = { .type = NLA_FLAG },
205 [NL80211_WOWLAN_TRIG_MAGIC_PKT] = { .type = NLA_FLAG },
206 [NL80211_WOWLAN_TRIG_PKT_PATTERN] = { .type = NLA_NESTED },
207};
208
196/* ifidx get helper */ 209/* ifidx get helper */
197static int nl80211_get_ifidx(struct netlink_callback *cb) 210static int nl80211_get_ifidx(struct netlink_callback *cb)
198{ 211{
@@ -533,6 +546,7 @@ static int nl80211_key_allowed(struct wireless_dev *wdev)
533 case NL80211_IFTYPE_AP: 546 case NL80211_IFTYPE_AP:
534 case NL80211_IFTYPE_AP_VLAN: 547 case NL80211_IFTYPE_AP_VLAN:
535 case NL80211_IFTYPE_P2P_GO: 548 case NL80211_IFTYPE_P2P_GO:
549 case NL80211_IFTYPE_MESH_POINT:
536 break; 550 break;
537 case NL80211_IFTYPE_ADHOC: 551 case NL80211_IFTYPE_ADHOC:
538 if (!wdev->current_bss) 552 if (!wdev->current_bss)
@@ -550,6 +564,88 @@ static int nl80211_key_allowed(struct wireless_dev *wdev)
550 return 0; 564 return 0;
551} 565}
552 566
567static int nl80211_put_iftypes(struct sk_buff *msg, u32 attr, u16 ifmodes)
568{
569 struct nlattr *nl_modes = nla_nest_start(msg, attr);
570 int i;
571
572 if (!nl_modes)
573 goto nla_put_failure;
574
575 i = 0;
576 while (ifmodes) {
577 if (ifmodes & 1)
578 NLA_PUT_FLAG(msg, i);
579 ifmodes >>= 1;
580 i++;
581 }
582
583 nla_nest_end(msg, nl_modes);
584 return 0;
585
586nla_put_failure:
587 return -ENOBUFS;
588}
589
590static int nl80211_put_iface_combinations(struct wiphy *wiphy,
591 struct sk_buff *msg)
592{
593 struct nlattr *nl_combis;
594 int i, j;
595
596 nl_combis = nla_nest_start(msg,
597 NL80211_ATTR_INTERFACE_COMBINATIONS);
598 if (!nl_combis)
599 goto nla_put_failure;
600
601 for (i = 0; i < wiphy->n_iface_combinations; i++) {
602 const struct ieee80211_iface_combination *c;
603 struct nlattr *nl_combi, *nl_limits;
604
605 c = &wiphy->iface_combinations[i];
606
607 nl_combi = nla_nest_start(msg, i + 1);
608 if (!nl_combi)
609 goto nla_put_failure;
610
611 nl_limits = nla_nest_start(msg, NL80211_IFACE_COMB_LIMITS);
612 if (!nl_limits)
613 goto nla_put_failure;
614
615 for (j = 0; j < c->n_limits; j++) {
616 struct nlattr *nl_limit;
617
618 nl_limit = nla_nest_start(msg, j + 1);
619 if (!nl_limit)
620 goto nla_put_failure;
621 NLA_PUT_U32(msg, NL80211_IFACE_LIMIT_MAX,
622 c->limits[j].max);
623 if (nl80211_put_iftypes(msg, NL80211_IFACE_LIMIT_TYPES,
624 c->limits[j].types))
625 goto nla_put_failure;
626 nla_nest_end(msg, nl_limit);
627 }
628
629 nla_nest_end(msg, nl_limits);
630
631 if (c->beacon_int_infra_match)
632 NLA_PUT_FLAG(msg,
633 NL80211_IFACE_COMB_STA_AP_BI_MATCH);
634 NLA_PUT_U32(msg, NL80211_IFACE_COMB_NUM_CHANNELS,
635 c->num_different_channels);
636 NLA_PUT_U32(msg, NL80211_IFACE_COMB_MAXNUM,
637 c->max_interfaces);
638
639 nla_nest_end(msg, nl_combi);
640 }
641
642 nla_nest_end(msg, nl_combis);
643
644 return 0;
645nla_put_failure:
646 return -ENOBUFS;
647}
648
553static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags, 649static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
554 struct cfg80211_registered_device *dev) 650 struct cfg80211_registered_device *dev)
555{ 651{
@@ -557,13 +653,11 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
557 struct nlattr *nl_bands, *nl_band; 653 struct nlattr *nl_bands, *nl_band;
558 struct nlattr *nl_freqs, *nl_freq; 654 struct nlattr *nl_freqs, *nl_freq;
559 struct nlattr *nl_rates, *nl_rate; 655 struct nlattr *nl_rates, *nl_rate;
560 struct nlattr *nl_modes;
561 struct nlattr *nl_cmds; 656 struct nlattr *nl_cmds;
562 enum ieee80211_band band; 657 enum ieee80211_band band;
563 struct ieee80211_channel *chan; 658 struct ieee80211_channel *chan;
564 struct ieee80211_rate *rate; 659 struct ieee80211_rate *rate;
565 int i; 660 int i;
566 u16 ifmodes = dev->wiphy.interface_modes;
567 const struct ieee80211_txrx_stypes *mgmt_stypes = 661 const struct ieee80211_txrx_stypes *mgmt_stypes =
568 dev->wiphy.mgmt_stypes; 662 dev->wiphy.mgmt_stypes;
569 663
@@ -594,6 +688,8 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
594 688
595 if (dev->wiphy.flags & WIPHY_FLAG_IBSS_RSN) 689 if (dev->wiphy.flags & WIPHY_FLAG_IBSS_RSN)
596 NLA_PUT_FLAG(msg, NL80211_ATTR_SUPPORT_IBSS_RSN); 690 NLA_PUT_FLAG(msg, NL80211_ATTR_SUPPORT_IBSS_RSN);
691 if (dev->wiphy.flags & WIPHY_FLAG_MESH_AUTH)
692 NLA_PUT_FLAG(msg, NL80211_ATTR_SUPPORT_MESH_AUTH);
597 693
598 NLA_PUT(msg, NL80211_ATTR_CIPHER_SUITES, 694 NLA_PUT(msg, NL80211_ATTR_CIPHER_SUITES,
599 sizeof(u32) * dev->wiphy.n_cipher_suites, 695 sizeof(u32) * dev->wiphy.n_cipher_suites,
@@ -621,20 +717,10 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
621 } 717 }
622 } 718 }
623 719
624 nl_modes = nla_nest_start(msg, NL80211_ATTR_SUPPORTED_IFTYPES); 720 if (nl80211_put_iftypes(msg, NL80211_ATTR_SUPPORTED_IFTYPES,
625 if (!nl_modes) 721 dev->wiphy.interface_modes))
626 goto nla_put_failure; 722 goto nla_put_failure;
627 723
628 i = 0;
629 while (ifmodes) {
630 if (ifmodes & 1)
631 NLA_PUT_FLAG(msg, i);
632 ifmodes >>= 1;
633 i++;
634 }
635
636 nla_nest_end(msg, nl_modes);
637
638 nl_bands = nla_nest_start(msg, NL80211_ATTR_WIPHY_BANDS); 724 nl_bands = nla_nest_start(msg, NL80211_ATTR_WIPHY_BANDS);
639 if (!nl_bands) 725 if (!nl_bands)
640 goto nla_put_failure; 726 goto nla_put_failure;
@@ -746,6 +832,8 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
746 } 832 }
747 CMD(set_channel, SET_CHANNEL); 833 CMD(set_channel, SET_CHANNEL);
748 CMD(set_wds_peer, SET_WDS_PEER); 834 CMD(set_wds_peer, SET_WDS_PEER);
835 if (dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN)
836 CMD(sched_scan_start, START_SCHED_SCAN);
749 837
750#undef CMD 838#undef CMD
751 839
@@ -818,6 +906,42 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
818 nla_nest_end(msg, nl_ifs); 906 nla_nest_end(msg, nl_ifs);
819 } 907 }
820 908
909 if (dev->wiphy.wowlan.flags || dev->wiphy.wowlan.n_patterns) {
910 struct nlattr *nl_wowlan;
911
912 nl_wowlan = nla_nest_start(msg,
913 NL80211_ATTR_WOWLAN_TRIGGERS_SUPPORTED);
914 if (!nl_wowlan)
915 goto nla_put_failure;
916
917 if (dev->wiphy.wowlan.flags & WIPHY_WOWLAN_ANY)
918 NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_ANY);
919 if (dev->wiphy.wowlan.flags & WIPHY_WOWLAN_DISCONNECT)
920 NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_DISCONNECT);
921 if (dev->wiphy.wowlan.flags & WIPHY_WOWLAN_MAGIC_PKT)
922 NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_MAGIC_PKT);
923 if (dev->wiphy.wowlan.n_patterns) {
924 struct nl80211_wowlan_pattern_support pat = {
925 .max_patterns = dev->wiphy.wowlan.n_patterns,
926 .min_pattern_len =
927 dev->wiphy.wowlan.pattern_min_len,
928 .max_pattern_len =
929 dev->wiphy.wowlan.pattern_max_len,
930 };
931 NLA_PUT(msg, NL80211_WOWLAN_TRIG_PKT_PATTERN,
932 sizeof(pat), &pat);
933 }
934
935 nla_nest_end(msg, nl_wowlan);
936 }
937
938 if (nl80211_put_iftypes(msg, NL80211_ATTR_SOFTWARE_IFTYPES,
939 dev->wiphy.software_iftypes))
940 goto nla_put_failure;
941
942 if (nl80211_put_iface_combinations(&dev->wiphy, msg))
943 goto nla_put_failure;
944
821 return genlmsg_end(msg, hdr); 945 return genlmsg_end(msg, hdr);
822 946
823 nla_put_failure: 947 nla_put_failure:
@@ -1679,14 +1803,6 @@ static int nl80211_set_key(struct sk_buff *skb, struct genl_info *info)
1679 if (err) 1803 if (err)
1680 goto out; 1804 goto out;
1681 1805
1682 if (!(rdev->wiphy.flags &
1683 WIPHY_FLAG_SUPPORTS_SEPARATE_DEFAULT_KEYS)) {
1684 if (!key.def_uni || !key.def_multi) {
1685 err = -EOPNOTSUPP;
1686 goto out;
1687 }
1688 }
1689
1690 err = rdev->ops->set_default_key(&rdev->wiphy, dev, key.idx, 1806 err = rdev->ops->set_default_key(&rdev->wiphy, dev, key.idx,
1691 key.def_uni, key.def_multi); 1807 key.def_uni, key.def_multi);
1692 1808
@@ -1837,8 +1953,9 @@ static int nl80211_addset_beacon(struct sk_buff *skb, struct genl_info *info)
1837 struct beacon_parameters *info); 1953 struct beacon_parameters *info);
1838 struct cfg80211_registered_device *rdev = info->user_ptr[0]; 1954 struct cfg80211_registered_device *rdev = info->user_ptr[0];
1839 struct net_device *dev = info->user_ptr[1]; 1955 struct net_device *dev = info->user_ptr[1];
1956 struct wireless_dev *wdev = dev->ieee80211_ptr;
1840 struct beacon_parameters params; 1957 struct beacon_parameters params;
1841 int haveinfo = 0; 1958 int haveinfo = 0, err;
1842 1959
1843 if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_BEACON_TAIL])) 1960 if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_BEACON_TAIL]))
1844 return -EINVAL; 1961 return -EINVAL;
@@ -1847,6 +1964,8 @@ static int nl80211_addset_beacon(struct sk_buff *skb, struct genl_info *info)
1847 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) 1964 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO)
1848 return -EOPNOTSUPP; 1965 return -EOPNOTSUPP;
1849 1966
1967 memset(&params, 0, sizeof(params));
1968
1850 switch (info->genlhdr->cmd) { 1969 switch (info->genlhdr->cmd) {
1851 case NL80211_CMD_NEW_BEACON: 1970 case NL80211_CMD_NEW_BEACON:
1852 /* these are required for NEW_BEACON */ 1971 /* these are required for NEW_BEACON */
@@ -1855,6 +1974,15 @@ static int nl80211_addset_beacon(struct sk_buff *skb, struct genl_info *info)
1855 !info->attrs[NL80211_ATTR_BEACON_HEAD]) 1974 !info->attrs[NL80211_ATTR_BEACON_HEAD])
1856 return -EINVAL; 1975 return -EINVAL;
1857 1976
1977 params.interval =
1978 nla_get_u32(info->attrs[NL80211_ATTR_BEACON_INTERVAL]);
1979 params.dtim_period =
1980 nla_get_u32(info->attrs[NL80211_ATTR_DTIM_PERIOD]);
1981
1982 err = cfg80211_validate_beacon_int(rdev, params.interval);
1983 if (err)
1984 return err;
1985
1858 call = rdev->ops->add_beacon; 1986 call = rdev->ops->add_beacon;
1859 break; 1987 break;
1860 case NL80211_CMD_SET_BEACON: 1988 case NL80211_CMD_SET_BEACON:
@@ -1868,20 +1996,6 @@ static int nl80211_addset_beacon(struct sk_buff *skb, struct genl_info *info)
1868 if (!call) 1996 if (!call)
1869 return -EOPNOTSUPP; 1997 return -EOPNOTSUPP;
1870 1998
1871 memset(&params, 0, sizeof(params));
1872
1873 if (info->attrs[NL80211_ATTR_BEACON_INTERVAL]) {
1874 params.interval =
1875 nla_get_u32(info->attrs[NL80211_ATTR_BEACON_INTERVAL]);
1876 haveinfo = 1;
1877 }
1878
1879 if (info->attrs[NL80211_ATTR_DTIM_PERIOD]) {
1880 params.dtim_period =
1881 nla_get_u32(info->attrs[NL80211_ATTR_DTIM_PERIOD]);
1882 haveinfo = 1;
1883 }
1884
1885 if (info->attrs[NL80211_ATTR_BEACON_HEAD]) { 1999 if (info->attrs[NL80211_ATTR_BEACON_HEAD]) {
1886 params.head = nla_data(info->attrs[NL80211_ATTR_BEACON_HEAD]); 2000 params.head = nla_data(info->attrs[NL80211_ATTR_BEACON_HEAD]);
1887 params.head_len = 2001 params.head_len =
@@ -1899,13 +2013,18 @@ static int nl80211_addset_beacon(struct sk_buff *skb, struct genl_info *info)
1899 if (!haveinfo) 2013 if (!haveinfo)
1900 return -EINVAL; 2014 return -EINVAL;
1901 2015
1902 return call(&rdev->wiphy, dev, &params); 2016 err = call(&rdev->wiphy, dev, &params);
2017 if (!err && params.interval)
2018 wdev->beacon_interval = params.interval;
2019 return err;
1903} 2020}
1904 2021
1905static int nl80211_del_beacon(struct sk_buff *skb, struct genl_info *info) 2022static int nl80211_del_beacon(struct sk_buff *skb, struct genl_info *info)
1906{ 2023{
1907 struct cfg80211_registered_device *rdev = info->user_ptr[0]; 2024 struct cfg80211_registered_device *rdev = info->user_ptr[0];
1908 struct net_device *dev = info->user_ptr[1]; 2025 struct net_device *dev = info->user_ptr[1];
2026 struct wireless_dev *wdev = dev->ieee80211_ptr;
2027 int err;
1909 2028
1910 if (!rdev->ops->del_beacon) 2029 if (!rdev->ops->del_beacon)
1911 return -EOPNOTSUPP; 2030 return -EOPNOTSUPP;
@@ -1914,7 +2033,10 @@ static int nl80211_del_beacon(struct sk_buff *skb, struct genl_info *info)
1914 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) 2033 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO)
1915 return -EOPNOTSUPP; 2034 return -EOPNOTSUPP;
1916 2035
1917 return rdev->ops->del_beacon(&rdev->wiphy, dev); 2036 err = rdev->ops->del_beacon(&rdev->wiphy, dev);
2037 if (!err)
2038 wdev->beacon_interval = 0;
2039 return err;
1918} 2040}
1919 2041
1920static const struct nla_policy sta_flags_policy[NL80211_STA_FLAG_MAX + 1] = { 2042static const struct nla_policy sta_flags_policy[NL80211_STA_FLAG_MAX + 1] = {
@@ -1922,6 +2044,7 @@ static const struct nla_policy sta_flags_policy[NL80211_STA_FLAG_MAX + 1] = {
1922 [NL80211_STA_FLAG_SHORT_PREAMBLE] = { .type = NLA_FLAG }, 2044 [NL80211_STA_FLAG_SHORT_PREAMBLE] = { .type = NLA_FLAG },
1923 [NL80211_STA_FLAG_WME] = { .type = NLA_FLAG }, 2045 [NL80211_STA_FLAG_WME] = { .type = NLA_FLAG },
1924 [NL80211_STA_FLAG_MFP] = { .type = NLA_FLAG }, 2046 [NL80211_STA_FLAG_MFP] = { .type = NLA_FLAG },
2047 [NL80211_STA_FLAG_AUTHENTICATED] = { .type = NLA_FLAG },
1925}; 2048};
1926 2049
1927static int parse_station_flags(struct genl_info *info, 2050static int parse_station_flags(struct genl_info *info,
@@ -2002,7 +2125,7 @@ static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq,
2002 const u8 *mac_addr, struct station_info *sinfo) 2125 const u8 *mac_addr, struct station_info *sinfo)
2003{ 2126{
2004 void *hdr; 2127 void *hdr;
2005 struct nlattr *sinfoattr; 2128 struct nlattr *sinfoattr, *bss_param;
2006 2129
2007 hdr = nl80211hdr_put(msg, pid, seq, flags, NL80211_CMD_NEW_STATION); 2130 hdr = nl80211hdr_put(msg, pid, seq, flags, NL80211_CMD_NEW_STATION);
2008 if (!hdr) 2131 if (!hdr)
@@ -2016,6 +2139,9 @@ static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq,
2016 sinfoattr = nla_nest_start(msg, NL80211_ATTR_STA_INFO); 2139 sinfoattr = nla_nest_start(msg, NL80211_ATTR_STA_INFO);
2017 if (!sinfoattr) 2140 if (!sinfoattr)
2018 goto nla_put_failure; 2141 goto nla_put_failure;
2142 if (sinfo->filled & STATION_INFO_CONNECTED_TIME)
2143 NLA_PUT_U32(msg, NL80211_STA_INFO_CONNECTED_TIME,
2144 sinfo->connected_time);
2019 if (sinfo->filled & STATION_INFO_INACTIVE_TIME) 2145 if (sinfo->filled & STATION_INFO_INACTIVE_TIME)
2020 NLA_PUT_U32(msg, NL80211_STA_INFO_INACTIVE_TIME, 2146 NLA_PUT_U32(msg, NL80211_STA_INFO_INACTIVE_TIME,
2021 sinfo->inactive_time); 2147 sinfo->inactive_time);
@@ -2062,6 +2188,25 @@ static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq,
2062 if (sinfo->filled & STATION_INFO_TX_FAILED) 2188 if (sinfo->filled & STATION_INFO_TX_FAILED)
2063 NLA_PUT_U32(msg, NL80211_STA_INFO_TX_FAILED, 2189 NLA_PUT_U32(msg, NL80211_STA_INFO_TX_FAILED,
2064 sinfo->tx_failed); 2190 sinfo->tx_failed);
2191 if (sinfo->filled & STATION_INFO_BSS_PARAM) {
2192 bss_param = nla_nest_start(msg, NL80211_STA_INFO_BSS_PARAM);
2193 if (!bss_param)
2194 goto nla_put_failure;
2195
2196 if (sinfo->bss_param.flags & BSS_PARAM_FLAGS_CTS_PROT)
2197 NLA_PUT_FLAG(msg, NL80211_STA_BSS_PARAM_CTS_PROT);
2198 if (sinfo->bss_param.flags & BSS_PARAM_FLAGS_SHORT_PREAMBLE)
2199 NLA_PUT_FLAG(msg, NL80211_STA_BSS_PARAM_SHORT_PREAMBLE);
2200 if (sinfo->bss_param.flags & BSS_PARAM_FLAGS_SHORT_SLOT_TIME)
2201 NLA_PUT_FLAG(msg,
2202 NL80211_STA_BSS_PARAM_SHORT_SLOT_TIME);
2203 NLA_PUT_U8(msg, NL80211_STA_BSS_PARAM_DTIM_PERIOD,
2204 sinfo->bss_param.dtim_period);
2205 NLA_PUT_U16(msg, NL80211_STA_BSS_PARAM_BEACON_INTERVAL,
2206 sinfo->bss_param.beacon_interval);
2207
2208 nla_nest_end(msg, bss_param);
2209 }
2065 nla_nest_end(msg, sinfoattr); 2210 nla_nest_end(msg, sinfoattr);
2066 2211
2067 return genlmsg_end(msg, hdr); 2212 return genlmsg_end(msg, hdr);
@@ -2190,6 +2335,7 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
2190 memset(&params, 0, sizeof(params)); 2335 memset(&params, 0, sizeof(params));
2191 2336
2192 params.listen_interval = -1; 2337 params.listen_interval = -1;
2338 params.plink_state = -1;
2193 2339
2194 if (info->attrs[NL80211_ATTR_STA_AID]) 2340 if (info->attrs[NL80211_ATTR_STA_AID])
2195 return -EINVAL; 2341 return -EINVAL;
@@ -2221,6 +2367,10 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
2221 params.plink_action = 2367 params.plink_action =
2222 nla_get_u8(info->attrs[NL80211_ATTR_STA_PLINK_ACTION]); 2368 nla_get_u8(info->attrs[NL80211_ATTR_STA_PLINK_ACTION]);
2223 2369
2370 if (info->attrs[NL80211_ATTR_STA_PLINK_STATE])
2371 params.plink_state =
2372 nla_get_u8(info->attrs[NL80211_ATTR_STA_PLINK_STATE]);
2373
2224 err = get_vlan(info, rdev, &params.vlan); 2374 err = get_vlan(info, rdev, &params.vlan);
2225 if (err) 2375 if (err)
2226 goto out; 2376 goto out;
@@ -2260,9 +2410,10 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
2260 err = -EINVAL; 2410 err = -EINVAL;
2261 if (params.listen_interval >= 0) 2411 if (params.listen_interval >= 0)
2262 err = -EINVAL; 2412 err = -EINVAL;
2263 if (params.supported_rates) 2413 if (params.sta_flags_mask &
2264 err = -EINVAL; 2414 ~(BIT(NL80211_STA_FLAG_AUTHENTICATED) |
2265 if (params.sta_flags_mask) 2415 BIT(NL80211_STA_FLAG_MFP) |
2416 BIT(NL80211_STA_FLAG_AUTHORIZED)))
2266 err = -EINVAL; 2417 err = -EINVAL;
2267 break; 2418 break;
2268 default: 2419 default:
@@ -2324,11 +2475,16 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
2324 params.ht_capa = 2475 params.ht_capa =
2325 nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]); 2476 nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]);
2326 2477
2478 if (info->attrs[NL80211_ATTR_STA_PLINK_ACTION])
2479 params.plink_action =
2480 nla_get_u8(info->attrs[NL80211_ATTR_STA_PLINK_ACTION]);
2481
2327 if (parse_station_flags(info, &params)) 2482 if (parse_station_flags(info, &params))
2328 return -EINVAL; 2483 return -EINVAL;
2329 2484
2330 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP && 2485 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
2331 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN && 2486 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN &&
2487 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT &&
2332 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) 2488 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO)
2333 return -EINVAL; 2489 return -EINVAL;
2334 2490
@@ -2804,8 +2960,10 @@ static const struct nla_policy
2804 nl80211_mesh_setup_params_policy[NL80211_MESH_SETUP_ATTR_MAX+1] = { 2960 nl80211_mesh_setup_params_policy[NL80211_MESH_SETUP_ATTR_MAX+1] = {
2805 [NL80211_MESH_SETUP_ENABLE_VENDOR_PATH_SEL] = { .type = NLA_U8 }, 2961 [NL80211_MESH_SETUP_ENABLE_VENDOR_PATH_SEL] = { .type = NLA_U8 },
2806 [NL80211_MESH_SETUP_ENABLE_VENDOR_METRIC] = { .type = NLA_U8 }, 2962 [NL80211_MESH_SETUP_ENABLE_VENDOR_METRIC] = { .type = NLA_U8 },
2807 [NL80211_MESH_SETUP_VENDOR_PATH_SEL_IE] = { .type = NLA_BINARY, 2963 [NL80211_MESH_SETUP_USERSPACE_AUTH] = { .type = NLA_FLAG },
2964 [NL80211_MESH_SETUP_IE] = { .type = NLA_BINARY,
2808 .len = IEEE80211_MAX_DATA_LEN }, 2965 .len = IEEE80211_MAX_DATA_LEN },
2966 [NL80211_MESH_SETUP_USERSPACE_AMPE] = { .type = NLA_FLAG },
2809}; 2967};
2810 2968
2811static int nl80211_parse_mesh_config(struct genl_info *info, 2969static int nl80211_parse_mesh_config(struct genl_info *info,
@@ -2906,14 +3064,17 @@ static int nl80211_parse_mesh_setup(struct genl_info *info,
2906 IEEE80211_PATH_METRIC_VENDOR : 3064 IEEE80211_PATH_METRIC_VENDOR :
2907 IEEE80211_PATH_METRIC_AIRTIME; 3065 IEEE80211_PATH_METRIC_AIRTIME;
2908 3066
2909 if (tb[NL80211_MESH_SETUP_VENDOR_PATH_SEL_IE]) { 3067
3068 if (tb[NL80211_MESH_SETUP_IE]) {
2910 struct nlattr *ieattr = 3069 struct nlattr *ieattr =
2911 tb[NL80211_MESH_SETUP_VENDOR_PATH_SEL_IE]; 3070 tb[NL80211_MESH_SETUP_IE];
2912 if (!is_valid_ie_attr(ieattr)) 3071 if (!is_valid_ie_attr(ieattr))
2913 return -EINVAL; 3072 return -EINVAL;
2914 setup->vendor_ie = nla_data(ieattr); 3073 setup->ie = nla_data(ieattr);
2915 setup->vendor_ie_len = nla_len(ieattr); 3074 setup->ie_len = nla_len(ieattr);
2916 } 3075 }
3076 setup->is_authenticated = nla_get_flag(tb[NL80211_MESH_SETUP_USERSPACE_AUTH]);
3077 setup->is_secure = nla_get_flag(tb[NL80211_MESH_SETUP_USERSPACE_AMPE]);
2917 3078
2918 return 0; 3079 return 0;
2919} 3080}
@@ -3282,6 +3443,188 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
3282 return err; 3443 return err;
3283} 3444}
3284 3445
3446static int nl80211_start_sched_scan(struct sk_buff *skb,
3447 struct genl_info *info)
3448{
3449 struct cfg80211_sched_scan_request *request;
3450 struct cfg80211_registered_device *rdev = info->user_ptr[0];
3451 struct net_device *dev = info->user_ptr[1];
3452 struct cfg80211_ssid *ssid;
3453 struct ieee80211_channel *channel;
3454 struct nlattr *attr;
3455 struct wiphy *wiphy;
3456 int err, tmp, n_ssids = 0, n_channels, i;
3457 u32 interval;
3458 enum ieee80211_band band;
3459 size_t ie_len;
3460
3461 if (!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN) ||
3462 !rdev->ops->sched_scan_start)
3463 return -EOPNOTSUPP;
3464
3465 if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
3466 return -EINVAL;
3467
3468 if (rdev->sched_scan_req)
3469 return -EINPROGRESS;
3470
3471 if (!info->attrs[NL80211_ATTR_SCHED_SCAN_INTERVAL])
3472 return -EINVAL;
3473
3474 interval = nla_get_u32(info->attrs[NL80211_ATTR_SCHED_SCAN_INTERVAL]);
3475 if (interval == 0)
3476 return -EINVAL;
3477
3478 wiphy = &rdev->wiphy;
3479
3480 if (info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]) {
3481 n_channels = validate_scan_freqs(
3482 info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]);
3483 if (!n_channels)
3484 return -EINVAL;
3485 } else {
3486 n_channels = 0;
3487
3488 for (band = 0; band < IEEE80211_NUM_BANDS; band++)
3489 if (wiphy->bands[band])
3490 n_channels += wiphy->bands[band]->n_channels;
3491 }
3492
3493 if (info->attrs[NL80211_ATTR_SCAN_SSIDS])
3494 nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS],
3495 tmp)
3496 n_ssids++;
3497
3498 if (n_ssids > wiphy->max_scan_ssids)
3499 return -EINVAL;
3500
3501 if (info->attrs[NL80211_ATTR_IE])
3502 ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
3503 else
3504 ie_len = 0;
3505
3506 if (ie_len > wiphy->max_scan_ie_len)
3507 return -EINVAL;
3508
3509 request = kzalloc(sizeof(*request)
3510 + sizeof(*ssid) * n_ssids
3511 + sizeof(channel) * n_channels
3512 + ie_len, GFP_KERNEL);
3513 if (!request)
3514 return -ENOMEM;
3515
3516 if (n_ssids)
3517 request->ssids = (void *)&request->channels[n_channels];
3518 request->n_ssids = n_ssids;
3519 if (ie_len) {
3520 if (request->ssids)
3521 request->ie = (void *)(request->ssids + n_ssids);
3522 else
3523 request->ie = (void *)(request->channels + n_channels);
3524 }
3525
3526 i = 0;
3527 if (info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]) {
3528 /* user specified, bail out if channel not found */
3529 nla_for_each_nested(attr,
3530 info->attrs[NL80211_ATTR_SCAN_FREQUENCIES],
3531 tmp) {
3532 struct ieee80211_channel *chan;
3533
3534 chan = ieee80211_get_channel(wiphy, nla_get_u32(attr));
3535
3536 if (!chan) {
3537 err = -EINVAL;
3538 goto out_free;
3539 }
3540
3541 /* ignore disabled channels */
3542 if (chan->flags & IEEE80211_CHAN_DISABLED)
3543 continue;
3544
3545 request->channels[i] = chan;
3546 i++;
3547 }
3548 } else {
3549 /* all channels */
3550 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
3551 int j;
3552 if (!wiphy->bands[band])
3553 continue;
3554 for (j = 0; j < wiphy->bands[band]->n_channels; j++) {
3555 struct ieee80211_channel *chan;
3556
3557 chan = &wiphy->bands[band]->channels[j];
3558
3559 if (chan->flags & IEEE80211_CHAN_DISABLED)
3560 continue;
3561
3562 request->channels[i] = chan;
3563 i++;
3564 }
3565 }
3566 }
3567
3568 if (!i) {
3569 err = -EINVAL;
3570 goto out_free;
3571 }
3572
3573 request->n_channels = i;
3574
3575 i = 0;
3576 if (info->attrs[NL80211_ATTR_SCAN_SSIDS]) {
3577 nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS],
3578 tmp) {
3579 if (request->ssids[i].ssid_len >
3580 IEEE80211_MAX_SSID_LEN) {
3581 err = -EINVAL;
3582 goto out_free;
3583 }
3584 memcpy(request->ssids[i].ssid, nla_data(attr),
3585 nla_len(attr));
3586 request->ssids[i].ssid_len = nla_len(attr);
3587 i++;
3588 }
3589 }
3590
3591 if (info->attrs[NL80211_ATTR_IE]) {
3592 request->ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
3593 memcpy((void *)request->ie,
3594 nla_data(info->attrs[NL80211_ATTR_IE]),
3595 request->ie_len);
3596 }
3597
3598 request->dev = dev;
3599 request->wiphy = &rdev->wiphy;
3600 request->interval = interval;
3601
3602 err = rdev->ops->sched_scan_start(&rdev->wiphy, dev, request);
3603 if (!err) {
3604 rdev->sched_scan_req = request;
3605 nl80211_send_sched_scan(rdev, dev,
3606 NL80211_CMD_START_SCHED_SCAN);
3607 goto out;
3608 }
3609
3610out_free:
3611 kfree(request);
3612out:
3613 return err;
3614}
3615
3616static int nl80211_stop_sched_scan(struct sk_buff *skb,
3617 struct genl_info *info)
3618{
3619 struct cfg80211_registered_device *rdev = info->user_ptr[0];
3620
3621 if (!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN) ||
3622 !rdev->ops->sched_scan_stop)
3623 return -EOPNOTSUPP;
3624
3625 return __cfg80211_stop_sched_scan(rdev, false);
3626}
3627
3285static int nl80211_send_bss(struct sk_buff *msg, u32 pid, u32 seq, int flags, 3628static int nl80211_send_bss(struct sk_buff *msg, u32 pid, u32 seq, int flags,
3286 struct cfg80211_registered_device *rdev, 3629 struct cfg80211_registered_device *rdev,
3287 struct wireless_dev *wdev, 3630 struct wireless_dev *wdev,
@@ -4780,6 +5123,194 @@ static int nl80211_leave_mesh(struct sk_buff *skb, struct genl_info *info)
4780 return cfg80211_leave_mesh(rdev, dev); 5123 return cfg80211_leave_mesh(rdev, dev);
4781} 5124}
4782 5125
5126static int nl80211_get_wowlan(struct sk_buff *skb, struct genl_info *info)
5127{
5128 struct cfg80211_registered_device *rdev = info->user_ptr[0];
5129 struct sk_buff *msg;
5130 void *hdr;
5131
5132 if (!rdev->wiphy.wowlan.flags && !rdev->wiphy.wowlan.n_patterns)
5133 return -EOPNOTSUPP;
5134
5135 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
5136 if (!msg)
5137 return -ENOMEM;
5138
5139 hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0,
5140 NL80211_CMD_GET_WOWLAN);
5141 if (!hdr)
5142 goto nla_put_failure;
5143
5144 if (rdev->wowlan) {
5145 struct nlattr *nl_wowlan;
5146
5147 nl_wowlan = nla_nest_start(msg, NL80211_ATTR_WOWLAN_TRIGGERS);
5148 if (!nl_wowlan)
5149 goto nla_put_failure;
5150
5151 if (rdev->wowlan->any)
5152 NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_ANY);
5153 if (rdev->wowlan->disconnect)
5154 NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_DISCONNECT);
5155 if (rdev->wowlan->magic_pkt)
5156 NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_MAGIC_PKT);
5157 if (rdev->wowlan->n_patterns) {
5158 struct nlattr *nl_pats, *nl_pat;
5159 int i, pat_len;
5160
5161 nl_pats = nla_nest_start(msg,
5162 NL80211_WOWLAN_TRIG_PKT_PATTERN);
5163 if (!nl_pats)
5164 goto nla_put_failure;
5165
5166 for (i = 0; i < rdev->wowlan->n_patterns; i++) {
5167 nl_pat = nla_nest_start(msg, i + 1);
5168 if (!nl_pat)
5169 goto nla_put_failure;
5170 pat_len = rdev->wowlan->patterns[i].pattern_len;
5171 NLA_PUT(msg, NL80211_WOWLAN_PKTPAT_MASK,
5172 DIV_ROUND_UP(pat_len, 8),
5173 rdev->wowlan->patterns[i].mask);
5174 NLA_PUT(msg, NL80211_WOWLAN_PKTPAT_PATTERN,
5175 pat_len,
5176 rdev->wowlan->patterns[i].pattern);
5177 nla_nest_end(msg, nl_pat);
5178 }
5179 nla_nest_end(msg, nl_pats);
5180 }
5181
5182 nla_nest_end(msg, nl_wowlan);
5183 }
5184
5185 genlmsg_end(msg, hdr);
5186 return genlmsg_reply(msg, info);
5187
5188nla_put_failure:
5189 nlmsg_free(msg);
5190 return -ENOBUFS;
5191}
5192
5193static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
5194{
5195 struct cfg80211_registered_device *rdev = info->user_ptr[0];
5196 struct nlattr *tb[NUM_NL80211_WOWLAN_TRIG];
5197 struct cfg80211_wowlan no_triggers = {};
5198 struct cfg80211_wowlan new_triggers = {};
5199 struct wiphy_wowlan_support *wowlan = &rdev->wiphy.wowlan;
5200 int err, i;
5201
5202 if (!rdev->wiphy.wowlan.flags && !rdev->wiphy.wowlan.n_patterns)
5203 return -EOPNOTSUPP;
5204
5205 if (!info->attrs[NL80211_ATTR_WOWLAN_TRIGGERS])
5206 goto no_triggers;
5207
5208 err = nla_parse(tb, MAX_NL80211_WOWLAN_TRIG,
5209 nla_data(info->attrs[NL80211_ATTR_WOWLAN_TRIGGERS]),
5210 nla_len(info->attrs[NL80211_ATTR_WOWLAN_TRIGGERS]),
5211 nl80211_wowlan_policy);
5212 if (err)
5213 return err;
5214
5215 if (tb[NL80211_WOWLAN_TRIG_ANY]) {
5216 if (!(wowlan->flags & WIPHY_WOWLAN_ANY))
5217 return -EINVAL;
5218 new_triggers.any = true;
5219 }
5220
5221 if (tb[NL80211_WOWLAN_TRIG_DISCONNECT]) {
5222 if (!(wowlan->flags & WIPHY_WOWLAN_DISCONNECT))
5223 return -EINVAL;
5224 new_triggers.disconnect = true;
5225 }
5226
5227 if (tb[NL80211_WOWLAN_TRIG_MAGIC_PKT]) {
5228 if (!(wowlan->flags & WIPHY_WOWLAN_MAGIC_PKT))
5229 return -EINVAL;
5230 new_triggers.magic_pkt = true;
5231 }
5232
5233 if (tb[NL80211_WOWLAN_TRIG_PKT_PATTERN]) {
5234 struct nlattr *pat;
5235 int n_patterns = 0;
5236 int rem, pat_len, mask_len;
5237 struct nlattr *pat_tb[NUM_NL80211_WOWLAN_PKTPAT];
5238
5239 nla_for_each_nested(pat, tb[NL80211_WOWLAN_TRIG_PKT_PATTERN],
5240 rem)
5241 n_patterns++;
5242 if (n_patterns > wowlan->n_patterns)
5243 return -EINVAL;
5244
5245 new_triggers.patterns = kcalloc(n_patterns,
5246 sizeof(new_triggers.patterns[0]),
5247 GFP_KERNEL);
5248 if (!new_triggers.patterns)
5249 return -ENOMEM;
5250
5251 new_triggers.n_patterns = n_patterns;
5252 i = 0;
5253
5254 nla_for_each_nested(pat, tb[NL80211_WOWLAN_TRIG_PKT_PATTERN],
5255 rem) {
5256 nla_parse(pat_tb, MAX_NL80211_WOWLAN_PKTPAT,
5257 nla_data(pat), nla_len(pat), NULL);
5258 err = -EINVAL;
5259 if (!pat_tb[NL80211_WOWLAN_PKTPAT_MASK] ||
5260 !pat_tb[NL80211_WOWLAN_PKTPAT_PATTERN])
5261 goto error;
5262 pat_len = nla_len(pat_tb[NL80211_WOWLAN_PKTPAT_PATTERN]);
5263 mask_len = DIV_ROUND_UP(pat_len, 8);
5264 if (nla_len(pat_tb[NL80211_WOWLAN_PKTPAT_MASK]) !=
5265 mask_len)
5266 goto error;
5267 if (pat_len > wowlan->pattern_max_len ||
5268 pat_len < wowlan->pattern_min_len)
5269 goto error;
5270
5271 new_triggers.patterns[i].mask =
5272 kmalloc(mask_len + pat_len, GFP_KERNEL);
5273 if (!new_triggers.patterns[i].mask) {
5274 err = -ENOMEM;
5275 goto error;
5276 }
5277 new_triggers.patterns[i].pattern =
5278 new_triggers.patterns[i].mask + mask_len;
5279 memcpy(new_triggers.patterns[i].mask,
5280 nla_data(pat_tb[NL80211_WOWLAN_PKTPAT_MASK]),
5281 mask_len);
5282 new_triggers.patterns[i].pattern_len = pat_len;
5283 memcpy(new_triggers.patterns[i].pattern,
5284 nla_data(pat_tb[NL80211_WOWLAN_PKTPAT_PATTERN]),
5285 pat_len);
5286 i++;
5287 }
5288 }
5289
5290 if (memcmp(&new_triggers, &no_triggers, sizeof(new_triggers))) {
5291 struct cfg80211_wowlan *ntrig;
5292 ntrig = kmemdup(&new_triggers, sizeof(new_triggers),
5293 GFP_KERNEL);
5294 if (!ntrig) {
5295 err = -ENOMEM;
5296 goto error;
5297 }
5298 cfg80211_rdev_free_wowlan(rdev);
5299 rdev->wowlan = ntrig;
5300 } else {
5301 no_triggers:
5302 cfg80211_rdev_free_wowlan(rdev);
5303 rdev->wowlan = NULL;
5304 }
5305
5306 return 0;
5307 error:
5308 for (i = 0; i < new_triggers.n_patterns; i++)
5309 kfree(new_triggers.patterns[i].mask);
5310 kfree(new_triggers.patterns);
5311 return err;
5312}
5313
4783#define NL80211_FLAG_NEED_WIPHY 0x01 5314#define NL80211_FLAG_NEED_WIPHY 0x01
4784#define NL80211_FLAG_NEED_NETDEV 0x02 5315#define NL80211_FLAG_NEED_NETDEV 0x02
4785#define NL80211_FLAG_NEED_RTNL 0x04 5316#define NL80211_FLAG_NEED_RTNL 0x04
@@ -5064,6 +5595,22 @@ static struct genl_ops nl80211_ops[] = {
5064 .dumpit = nl80211_dump_scan, 5595 .dumpit = nl80211_dump_scan,
5065 }, 5596 },
5066 { 5597 {
5598 .cmd = NL80211_CMD_START_SCHED_SCAN,
5599 .doit = nl80211_start_sched_scan,
5600 .policy = nl80211_policy,
5601 .flags = GENL_ADMIN_PERM,
5602 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
5603 NL80211_FLAG_NEED_RTNL,
5604 },
5605 {
5606 .cmd = NL80211_CMD_STOP_SCHED_SCAN,
5607 .doit = nl80211_stop_sched_scan,
5608 .policy = nl80211_policy,
5609 .flags = GENL_ADMIN_PERM,
5610 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
5611 NL80211_FLAG_NEED_RTNL,
5612 },
5613 {
5067 .cmd = NL80211_CMD_AUTHENTICATE, 5614 .cmd = NL80211_CMD_AUTHENTICATE,
5068 .doit = nl80211_authenticate, 5615 .doit = nl80211_authenticate,
5069 .policy = nl80211_policy, 5616 .policy = nl80211_policy,
@@ -5278,6 +5825,22 @@ static struct genl_ops nl80211_ops[] = {
5278 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | 5825 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
5279 NL80211_FLAG_NEED_RTNL, 5826 NL80211_FLAG_NEED_RTNL,
5280 }, 5827 },
5828 {
5829 .cmd = NL80211_CMD_GET_WOWLAN,
5830 .doit = nl80211_get_wowlan,
5831 .policy = nl80211_policy,
5832 /* can be retrieved by unprivileged users */
5833 .internal_flags = NL80211_FLAG_NEED_WIPHY |
5834 NL80211_FLAG_NEED_RTNL,
5835 },
5836 {
5837 .cmd = NL80211_CMD_SET_WOWLAN,
5838 .doit = nl80211_set_wowlan,
5839 .policy = nl80211_policy,
5840 .flags = GENL_ADMIN_PERM,
5841 .internal_flags = NL80211_FLAG_NEED_WIPHY |
5842 NL80211_FLAG_NEED_RTNL,
5843 },
5281}; 5844};
5282 5845
5283static struct genl_multicast_group nl80211_mlme_mcgrp = { 5846static struct genl_multicast_group nl80211_mlme_mcgrp = {
@@ -5373,6 +5936,28 @@ static int nl80211_send_scan_msg(struct sk_buff *msg,
5373 return -EMSGSIZE; 5936 return -EMSGSIZE;
5374} 5937}
5375 5938
5939static int
5940nl80211_send_sched_scan_msg(struct sk_buff *msg,
5941 struct cfg80211_registered_device *rdev,
5942 struct net_device *netdev,
5943 u32 pid, u32 seq, int flags, u32 cmd)
5944{
5945 void *hdr;
5946
5947 hdr = nl80211hdr_put(msg, pid, seq, flags, cmd);
5948 if (!hdr)
5949 return -1;
5950
5951 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
5952 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
5953
5954 return genlmsg_end(msg, hdr);
5955
5956 nla_put_failure:
5957 genlmsg_cancel(msg, hdr);
5958 return -EMSGSIZE;
5959}
5960
5376void nl80211_send_scan_start(struct cfg80211_registered_device *rdev, 5961void nl80211_send_scan_start(struct cfg80211_registered_device *rdev,
5377 struct net_device *netdev) 5962 struct net_device *netdev)
5378{ 5963{
@@ -5430,6 +6015,43 @@ void nl80211_send_scan_aborted(struct cfg80211_registered_device *rdev,
5430 nl80211_scan_mcgrp.id, GFP_KERNEL); 6015 nl80211_scan_mcgrp.id, GFP_KERNEL);
5431} 6016}
5432 6017
6018void nl80211_send_sched_scan_results(struct cfg80211_registered_device *rdev,
6019 struct net_device *netdev)
6020{
6021 struct sk_buff *msg;
6022
6023 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
6024 if (!msg)
6025 return;
6026
6027 if (nl80211_send_sched_scan_msg(msg, rdev, netdev, 0, 0, 0,
6028 NL80211_CMD_SCHED_SCAN_RESULTS) < 0) {
6029 nlmsg_free(msg);
6030 return;
6031 }
6032
6033 genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
6034 nl80211_scan_mcgrp.id, GFP_KERNEL);
6035}
6036
6037void nl80211_send_sched_scan(struct cfg80211_registered_device *rdev,
6038 struct net_device *netdev, u32 cmd)
6039{
6040 struct sk_buff *msg;
6041
6042 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
6043 if (!msg)
6044 return;
6045
6046 if (nl80211_send_sched_scan_msg(msg, rdev, netdev, 0, 0, 0, cmd) < 0) {
6047 nlmsg_free(msg);
6048 return;
6049 }
6050
6051 genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
6052 nl80211_scan_mcgrp.id, GFP_KERNEL);
6053}
6054
5433/* 6055/*
5434 * This can happen on global regulatory changes or device specific settings 6056 * This can happen on global regulatory changes or device specific settings
5435 * based on custom world regulatory domains. 6057 * based on custom world regulatory domains.
@@ -5785,6 +6407,44 @@ void nl80211_send_ibss_bssid(struct cfg80211_registered_device *rdev,
5785 nlmsg_free(msg); 6407 nlmsg_free(msg);
5786} 6408}
5787 6409
6410void nl80211_send_new_peer_candidate(struct cfg80211_registered_device *rdev,
6411 struct net_device *netdev,
6412 const u8 *macaddr, const u8* ie, u8 ie_len,
6413 gfp_t gfp)
6414{
6415 struct sk_buff *msg;
6416 void *hdr;
6417
6418 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
6419 if (!msg)
6420 return;
6421
6422 hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_NEW_PEER_CANDIDATE);
6423 if (!hdr) {
6424 nlmsg_free(msg);
6425 return;
6426 }
6427
6428 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
6429 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
6430 NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, macaddr);
6431 if (ie_len && ie)
6432 NLA_PUT(msg, NL80211_ATTR_IE, ie_len , ie);
6433
6434 if (genlmsg_end(msg, hdr) < 0) {
6435 nlmsg_free(msg);
6436 return;
6437 }
6438
6439 genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
6440 nl80211_mlme_mcgrp.id, gfp);
6441 return;
6442
6443 nla_put_failure:
6444 genlmsg_cancel(msg, hdr);
6445 nlmsg_free(msg);
6446}
6447
5788void nl80211_michael_mic_failure(struct cfg80211_registered_device *rdev, 6448void nl80211_michael_mic_failure(struct cfg80211_registered_device *rdev,
5789 struct net_device *netdev, const u8 *addr, 6449 struct net_device *netdev, const u8 *addr,
5790 enum nl80211_key_type key_type, int key_id, 6450 enum nl80211_key_type key_type, int key_id,
@@ -5966,6 +6626,40 @@ void nl80211_send_sta_event(struct cfg80211_registered_device *rdev,
5966 nl80211_mlme_mcgrp.id, gfp); 6626 nl80211_mlme_mcgrp.id, gfp);
5967} 6627}
5968 6628
6629void nl80211_send_sta_del_event(struct cfg80211_registered_device *rdev,
6630 struct net_device *dev, const u8 *mac_addr,
6631 gfp_t gfp)
6632{
6633 struct sk_buff *msg;
6634 void *hdr;
6635
6636 msg = nlmsg_new(NLMSG_GOODSIZE, gfp);
6637 if (!msg)
6638 return;
6639
6640 hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_DEL_STATION);
6641 if (!hdr) {
6642 nlmsg_free(msg);
6643 return;
6644 }
6645
6646 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex);
6647 NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr);
6648
6649 if (genlmsg_end(msg, hdr) < 0) {
6650 nlmsg_free(msg);
6651 return;
6652 }
6653
6654 genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
6655 nl80211_mlme_mcgrp.id, gfp);
6656 return;
6657
6658 nla_put_failure:
6659 genlmsg_cancel(msg, hdr);
6660 nlmsg_free(msg);
6661}
6662
5969int nl80211_send_mgmt(struct cfg80211_registered_device *rdev, 6663int nl80211_send_mgmt(struct cfg80211_registered_device *rdev,
5970 struct net_device *netdev, u32 nlpid, 6664 struct net_device *netdev, u32 nlpid,
5971 int freq, const u8 *buf, size_t len, gfp_t gfp) 6665 int freq, const u8 *buf, size_t len, gfp_t gfp)
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h
index e3f7fa886966..2f1bfb87a651 100644
--- a/net/wireless/nl80211.h
+++ b/net/wireless/nl80211.h
@@ -12,6 +12,10 @@ void nl80211_send_scan_done(struct cfg80211_registered_device *rdev,
12 struct net_device *netdev); 12 struct net_device *netdev);
13void nl80211_send_scan_aborted(struct cfg80211_registered_device *rdev, 13void nl80211_send_scan_aborted(struct cfg80211_registered_device *rdev,
14 struct net_device *netdev); 14 struct net_device *netdev);
15void nl80211_send_sched_scan(struct cfg80211_registered_device *rdev,
16 struct net_device *netdev, u32 cmd);
17void nl80211_send_sched_scan_results(struct cfg80211_registered_device *rdev,
18 struct net_device *netdev);
15void nl80211_send_reg_change_event(struct regulatory_request *request); 19void nl80211_send_reg_change_event(struct regulatory_request *request);
16void nl80211_send_rx_auth(struct cfg80211_registered_device *rdev, 20void nl80211_send_rx_auth(struct cfg80211_registered_device *rdev,
17 struct net_device *netdev, 21 struct net_device *netdev,
@@ -50,6 +54,10 @@ void nl80211_send_disconnected(struct cfg80211_registered_device *rdev,
50 struct net_device *netdev, u16 reason, 54 struct net_device *netdev, u16 reason,
51 const u8 *ie, size_t ie_len, bool from_ap); 55 const u8 *ie, size_t ie_len, bool from_ap);
52 56
57void nl80211_send_new_peer_candidate(struct cfg80211_registered_device *rdev,
58 struct net_device *netdev,
59 const u8 *macaddr, const u8* ie, u8 ie_len,
60 gfp_t gfp);
53void 61void
54nl80211_michael_mic_failure(struct cfg80211_registered_device *rdev, 62nl80211_michael_mic_failure(struct cfg80211_registered_device *rdev,
55 struct net_device *netdev, const u8 *addr, 63 struct net_device *netdev, const u8 *addr,
@@ -79,6 +87,9 @@ void nl80211_send_remain_on_channel_cancel(
79void nl80211_send_sta_event(struct cfg80211_registered_device *rdev, 87void nl80211_send_sta_event(struct cfg80211_registered_device *rdev,
80 struct net_device *dev, const u8 *mac_addr, 88 struct net_device *dev, const u8 *mac_addr,
81 struct station_info *sinfo, gfp_t gfp); 89 struct station_info *sinfo, gfp_t gfp);
90void nl80211_send_sta_del_event(struct cfg80211_registered_device *rdev,
91 struct net_device *dev, const u8 *mac_addr,
92 gfp_t gfp);
82 93
83int nl80211_send_mgmt(struct cfg80211_registered_device *rdev, 94int nl80211_send_mgmt(struct cfg80211_registered_device *rdev,
84 struct net_device *netdev, u32 nlpid, int freq, 95 struct net_device *netdev, u32 nlpid, int freq,
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index ab801a1097b2..1ad0f39fe091 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -106,6 +106,9 @@ struct reg_beacon {
106static void reg_todo(struct work_struct *work); 106static void reg_todo(struct work_struct *work);
107static DECLARE_WORK(reg_work, reg_todo); 107static DECLARE_WORK(reg_work, reg_todo);
108 108
109static void reg_timeout_work(struct work_struct *work);
110static DECLARE_DELAYED_WORK(reg_timeout, reg_timeout_work);
111
109/* We keep a static world regulatory domain in case of the absence of CRDA */ 112/* We keep a static world regulatory domain in case of the absence of CRDA */
110static const struct ieee80211_regdomain world_regdom = { 113static const struct ieee80211_regdomain world_regdom = {
111 .n_reg_rules = 5, 114 .n_reg_rules = 5,
@@ -669,11 +672,9 @@ static int freq_reg_info_regd(struct wiphy *wiphy,
669 for (i = 0; i < regd->n_reg_rules; i++) { 672 for (i = 0; i < regd->n_reg_rules; i++) {
670 const struct ieee80211_reg_rule *rr; 673 const struct ieee80211_reg_rule *rr;
671 const struct ieee80211_freq_range *fr = NULL; 674 const struct ieee80211_freq_range *fr = NULL;
672 const struct ieee80211_power_rule *pr = NULL;
673 675
674 rr = &regd->reg_rules[i]; 676 rr = &regd->reg_rules[i];
675 fr = &rr->freq_range; 677 fr = &rr->freq_range;
676 pr = &rr->power_rule;
677 678
678 /* 679 /*
679 * We only need to know if one frequency rule was 680 * We only need to know if one frequency rule was
@@ -1330,6 +1331,9 @@ static void reg_set_request_processed(void)
1330 need_more_processing = true; 1331 need_more_processing = true;
1331 spin_unlock(&reg_requests_lock); 1332 spin_unlock(&reg_requests_lock);
1332 1333
1334 if (last_request->initiator == NL80211_REGDOM_SET_BY_USER)
1335 cancel_delayed_work_sync(&reg_timeout);
1336
1333 if (need_more_processing) 1337 if (need_more_processing)
1334 schedule_work(&reg_work); 1338 schedule_work(&reg_work);
1335} 1339}
@@ -1440,8 +1444,18 @@ static void reg_process_hint(struct regulatory_request *reg_request)
1440 r = __regulatory_hint(wiphy, reg_request); 1444 r = __regulatory_hint(wiphy, reg_request);
1441 /* This is required so that the orig_* parameters are saved */ 1445 /* This is required so that the orig_* parameters are saved */
1442 if (r == -EALREADY && wiphy && 1446 if (r == -EALREADY && wiphy &&
1443 wiphy->flags & WIPHY_FLAG_STRICT_REGULATORY) 1447 wiphy->flags & WIPHY_FLAG_STRICT_REGULATORY) {
1444 wiphy_update_regulatory(wiphy, initiator); 1448 wiphy_update_regulatory(wiphy, initiator);
1449 return;
1450 }
1451
1452 /*
1453 * We only time out user hints, given that they should be the only
1454 * source of bogus requests.
1455 */
1456 if (r != -EALREADY &&
1457 reg_request->initiator == NL80211_REGDOM_SET_BY_USER)
1458 schedule_delayed_work(&reg_timeout, msecs_to_jiffies(3142));
1445} 1459}
1446 1460
1447/* 1461/*
@@ -1744,6 +1758,8 @@ static void restore_regulatory_settings(bool reset_user)
1744{ 1758{
1745 char alpha2[2]; 1759 char alpha2[2];
1746 struct reg_beacon *reg_beacon, *btmp; 1760 struct reg_beacon *reg_beacon, *btmp;
1761 struct regulatory_request *reg_request, *tmp;
1762 LIST_HEAD(tmp_reg_req_list);
1747 1763
1748 mutex_lock(&cfg80211_mutex); 1764 mutex_lock(&cfg80211_mutex);
1749 mutex_lock(&reg_mutex); 1765 mutex_lock(&reg_mutex);
@@ -1751,6 +1767,25 @@ static void restore_regulatory_settings(bool reset_user)
1751 reset_regdomains(); 1767 reset_regdomains();
1752 restore_alpha2(alpha2, reset_user); 1768 restore_alpha2(alpha2, reset_user);
1753 1769
1770 /*
1771 * If there's any pending requests we simply
1772 * stash them to a temporary pending queue and
1773 * add then after we've restored regulatory
1774 * settings.
1775 */
1776 spin_lock(&reg_requests_lock);
1777 if (!list_empty(&reg_requests_list)) {
1778 list_for_each_entry_safe(reg_request, tmp,
1779 &reg_requests_list, list) {
1780 if (reg_request->initiator !=
1781 NL80211_REGDOM_SET_BY_USER)
1782 continue;
1783 list_del(&reg_request->list);
1784 list_add_tail(&reg_request->list, &tmp_reg_req_list);
1785 }
1786 }
1787 spin_unlock(&reg_requests_lock);
1788
1754 /* Clear beacon hints */ 1789 /* Clear beacon hints */
1755 spin_lock_bh(&reg_pending_beacons_lock); 1790 spin_lock_bh(&reg_pending_beacons_lock);
1756 if (!list_empty(&reg_pending_beacons)) { 1791 if (!list_empty(&reg_pending_beacons)) {
@@ -1785,8 +1820,31 @@ static void restore_regulatory_settings(bool reset_user)
1785 */ 1820 */
1786 if (is_an_alpha2(alpha2)) 1821 if (is_an_alpha2(alpha2))
1787 regulatory_hint_user(user_alpha2); 1822 regulatory_hint_user(user_alpha2);
1788}
1789 1823
1824 if (list_empty(&tmp_reg_req_list))
1825 return;
1826
1827 mutex_lock(&cfg80211_mutex);
1828 mutex_lock(&reg_mutex);
1829
1830 spin_lock(&reg_requests_lock);
1831 list_for_each_entry_safe(reg_request, tmp, &tmp_reg_req_list, list) {
1832 REG_DBG_PRINT("Adding request for country %c%c back "
1833 "into the queue\n",
1834 reg_request->alpha2[0],
1835 reg_request->alpha2[1]);
1836 list_del(&reg_request->list);
1837 list_add_tail(&reg_request->list, &reg_requests_list);
1838 }
1839 spin_unlock(&reg_requests_lock);
1840
1841 mutex_unlock(&reg_mutex);
1842 mutex_unlock(&cfg80211_mutex);
1843
1844 REG_DBG_PRINT("Kicking the queue\n");
1845
1846 schedule_work(&reg_work);
1847}
1790 1848
1791void regulatory_hint_disconnect(void) 1849void regulatory_hint_disconnect(void)
1792{ 1850{
@@ -2125,6 +2183,13 @@ out:
2125 mutex_unlock(&reg_mutex); 2183 mutex_unlock(&reg_mutex);
2126} 2184}
2127 2185
2186static void reg_timeout_work(struct work_struct *work)
2187{
2188 REG_DBG_PRINT("Timeout while waiting for CRDA to reply, "
2189 "restoring regulatory settings");
2190 restore_regulatory_settings(true);
2191}
2192
2128int __init regulatory_init(void) 2193int __init regulatory_init(void)
2129{ 2194{
2130 int err = 0; 2195 int err = 0;
@@ -2178,6 +2243,7 @@ void /* __init_or_exit */ regulatory_exit(void)
2178 struct reg_beacon *reg_beacon, *btmp; 2243 struct reg_beacon *reg_beacon, *btmp;
2179 2244
2180 cancel_work_sync(&reg_work); 2245 cancel_work_sync(&reg_work);
2246 cancel_delayed_work_sync(&reg_timeout);
2181 2247
2182 mutex_lock(&cfg80211_mutex); 2248 mutex_lock(&cfg80211_mutex);
2183 mutex_lock(&reg_mutex); 2249 mutex_lock(&reg_mutex);
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index fbf6f33ae4d0..73a441d237b5 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -93,6 +93,69 @@ void cfg80211_scan_done(struct cfg80211_scan_request *request, bool aborted)
93} 93}
94EXPORT_SYMBOL(cfg80211_scan_done); 94EXPORT_SYMBOL(cfg80211_scan_done);
95 95
96void __cfg80211_sched_scan_results(struct work_struct *wk)
97{
98 struct cfg80211_registered_device *rdev;
99
100 rdev = container_of(wk, struct cfg80211_registered_device,
101 sched_scan_results_wk);
102
103 cfg80211_lock_rdev(rdev);
104
105 /* we don't have sched_scan_req anymore if the scan is stopping */
106 if (rdev->sched_scan_req)
107 nl80211_send_sched_scan_results(rdev,
108 rdev->sched_scan_req->dev);
109
110 cfg80211_unlock_rdev(rdev);
111}
112
113void cfg80211_sched_scan_results(struct wiphy *wiphy)
114{
115 /* ignore if we're not scanning */
116 if (wiphy_to_dev(wiphy)->sched_scan_req)
117 queue_work(cfg80211_wq,
118 &wiphy_to_dev(wiphy)->sched_scan_results_wk);
119}
120EXPORT_SYMBOL(cfg80211_sched_scan_results);
121
122void cfg80211_sched_scan_stopped(struct wiphy *wiphy)
123{
124 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
125
126 cfg80211_lock_rdev(rdev);
127 __cfg80211_stop_sched_scan(rdev, true);
128 cfg80211_unlock_rdev(rdev);
129}
130EXPORT_SYMBOL(cfg80211_sched_scan_stopped);
131
132int __cfg80211_stop_sched_scan(struct cfg80211_registered_device *rdev,
133 bool driver_initiated)
134{
135 int err;
136 struct net_device *dev;
137
138 ASSERT_RDEV_LOCK(rdev);
139
140 if (!rdev->sched_scan_req)
141 return 0;
142
143 dev = rdev->sched_scan_req->dev;
144
145 if (!driver_initiated) {
146 err = rdev->ops->sched_scan_stop(&rdev->wiphy, dev);
147 if (err)
148 return err;
149 }
150
151 nl80211_send_sched_scan(rdev, dev, NL80211_CMD_SCHED_SCAN_STOPPED);
152
153 kfree(rdev->sched_scan_req);
154 rdev->sched_scan_req = NULL;
155
156 return err;
157}
158
96static void bss_release(struct kref *ref) 159static void bss_release(struct kref *ref)
97{ 160{
98 struct cfg80211_internal_bss *bss; 161 struct cfg80211_internal_bss *bss;
@@ -210,7 +273,7 @@ static bool is_mesh(struct cfg80211_bss *a,
210{ 273{
211 const u8 *ie; 274 const u8 *ie;
212 275
213 if (!is_zero_ether_addr(a->bssid)) 276 if (!WLAN_CAPABILITY_IS_MBSS(a->capability))
214 return false; 277 return false;
215 278
216 ie = cfg80211_find_ie(WLAN_EID_MESH_ID, 279 ie = cfg80211_find_ie(WLAN_EID_MESH_ID,
@@ -248,11 +311,7 @@ static int cmp_bss(struct cfg80211_bss *a,
248 if (a->channel != b->channel) 311 if (a->channel != b->channel)
249 return b->channel->center_freq - a->channel->center_freq; 312 return b->channel->center_freq - a->channel->center_freq;
250 313
251 r = memcmp(a->bssid, b->bssid, ETH_ALEN); 314 if (WLAN_CAPABILITY_IS_MBSS(a->capability | b->capability)) {
252 if (r)
253 return r;
254
255 if (is_zero_ether_addr(a->bssid)) {
256 r = cmp_ies(WLAN_EID_MESH_ID, 315 r = cmp_ies(WLAN_EID_MESH_ID,
257 a->information_elements, 316 a->information_elements,
258 a->len_information_elements, 317 a->len_information_elements,
@@ -267,6 +326,10 @@ static int cmp_bss(struct cfg80211_bss *a,
267 b->len_information_elements); 326 b->len_information_elements);
268 } 327 }
269 328
329 r = memcmp(a->bssid, b->bssid, ETH_ALEN);
330 if (r)
331 return r;
332
270 return cmp_ies(WLAN_EID_SSID, 333 return cmp_ies(WLAN_EID_SSID,
271 a->information_elements, 334 a->information_elements,
272 a->len_information_elements, 335 a->len_information_elements,
@@ -407,7 +470,7 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
407 470
408 res->ts = jiffies; 471 res->ts = jiffies;
409 472
410 if (is_zero_ether_addr(res->pub.bssid)) { 473 if (WLAN_CAPABILITY_IS_MBSS(res->pub.capability)) {
411 /* must be mesh, verify */ 474 /* must be mesh, verify */
412 meshid = cfg80211_find_ie(WLAN_EID_MESH_ID, 475 meshid = cfg80211_find_ie(WLAN_EID_MESH_ID,
413 res->pub.information_elements, 476 res->pub.information_elements,
diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c
index 4294fa22bb2d..c6e4ca6a7d2e 100644
--- a/net/wireless/sysfs.c
+++ b/net/wireless/sysfs.c
@@ -93,7 +93,7 @@ static int wiphy_suspend(struct device *dev, pm_message_t state)
93 93
94 if (rdev->ops->suspend) { 94 if (rdev->ops->suspend) {
95 rtnl_lock(); 95 rtnl_lock();
96 ret = rdev->ops->suspend(&rdev->wiphy); 96 ret = rdev->ops->suspend(&rdev->wiphy, rdev->wowlan);
97 rtnl_unlock(); 97 rtnl_unlock();
98 } 98 }
99 99
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 6a750bc6bcfe..f0536d44d43c 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -544,7 +544,8 @@ EXPORT_SYMBOL(ieee80211_data_from_8023);
544 544
545void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list, 545void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
546 const u8 *addr, enum nl80211_iftype iftype, 546 const u8 *addr, enum nl80211_iftype iftype,
547 const unsigned int extra_headroom) 547 const unsigned int extra_headroom,
548 bool has_80211_header)
548{ 549{
549 struct sk_buff *frame = NULL; 550 struct sk_buff *frame = NULL;
550 u16 ethertype; 551 u16 ethertype;
@@ -553,14 +554,18 @@ void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
553 int remaining, err; 554 int remaining, err;
554 u8 dst[ETH_ALEN], src[ETH_ALEN]; 555 u8 dst[ETH_ALEN], src[ETH_ALEN];
555 556
556 err = ieee80211_data_to_8023(skb, addr, iftype); 557 if (has_80211_header) {
557 if (err) 558 err = ieee80211_data_to_8023(skb, addr, iftype);
558 goto out; 559 if (err)
560 goto out;
559 561
560 /* skip the wrapping header */ 562 /* skip the wrapping header */
561 eth = (struct ethhdr *) skb_pull(skb, sizeof(struct ethhdr)); 563 eth = (struct ethhdr *) skb_pull(skb, sizeof(struct ethhdr));
562 if (!eth) 564 if (!eth)
563 goto out; 565 goto out;
566 } else {
567 eth = (struct ethhdr *) skb->data;
568 }
564 569
565 while (skb != frame) { 570 while (skb != frame) {
566 u8 padding; 571 u8 padding;
@@ -803,6 +808,11 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
803 return -EBUSY; 808 return -EBUSY;
804 809
805 if (ntype != otype) { 810 if (ntype != otype) {
811 err = cfg80211_can_change_interface(rdev, dev->ieee80211_ptr,
812 ntype);
813 if (err)
814 return err;
815
806 dev->ieee80211_ptr->use_4addr = false; 816 dev->ieee80211_ptr->use_4addr = false;
807 dev->ieee80211_ptr->mesh_id_up_len = 0; 817 dev->ieee80211_ptr->mesh_id_up_len = 0;
808 818
@@ -896,3 +906,103 @@ u16 cfg80211_calculate_bitrate(struct rate_info *rate)
896 /* do NOT round down here */ 906 /* do NOT round down here */
897 return (bitrate + 50000) / 100000; 907 return (bitrate + 50000) / 100000;
898} 908}
909
910int cfg80211_validate_beacon_int(struct cfg80211_registered_device *rdev,
911 u32 beacon_int)
912{
913 struct wireless_dev *wdev;
914 int res = 0;
915
916 if (!beacon_int)
917 return -EINVAL;
918
919 mutex_lock(&rdev->devlist_mtx);
920
921 list_for_each_entry(wdev, &rdev->netdev_list, list) {
922 if (!wdev->beacon_interval)
923 continue;
924 if (wdev->beacon_interval != beacon_int) {
925 res = -EINVAL;
926 break;
927 }
928 }
929
930 mutex_unlock(&rdev->devlist_mtx);
931
932 return res;
933}
934
935int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
936 struct wireless_dev *wdev,
937 enum nl80211_iftype iftype)
938{
939 struct wireless_dev *wdev_iter;
940 int num[NUM_NL80211_IFTYPES];
941 int total = 1;
942 int i, j;
943
944 ASSERT_RTNL();
945
946 /* Always allow software iftypes */
947 if (rdev->wiphy.software_iftypes & BIT(iftype))
948 return 0;
949
950 /*
951 * Drivers will gradually all set this flag, until all
952 * have it we only enforce for those that set it.
953 */
954 if (!(rdev->wiphy.flags & WIPHY_FLAG_ENFORCE_COMBINATIONS))
955 return 0;
956
957 memset(num, 0, sizeof(num));
958
959 num[iftype] = 1;
960
961 mutex_lock(&rdev->devlist_mtx);
962 list_for_each_entry(wdev_iter, &rdev->netdev_list, list) {
963 if (wdev_iter == wdev)
964 continue;
965 if (!netif_running(wdev_iter->netdev))
966 continue;
967
968 if (rdev->wiphy.software_iftypes & BIT(wdev_iter->iftype))
969 continue;
970
971 num[wdev_iter->iftype]++;
972 total++;
973 }
974 mutex_unlock(&rdev->devlist_mtx);
975
976 for (i = 0; i < rdev->wiphy.n_iface_combinations; i++) {
977 const struct ieee80211_iface_combination *c;
978 struct ieee80211_iface_limit *limits;
979
980 c = &rdev->wiphy.iface_combinations[i];
981
982 limits = kmemdup(c->limits, sizeof(limits[0]) * c->n_limits,
983 GFP_KERNEL);
984 if (!limits)
985 return -ENOMEM;
986 if (total > c->max_interfaces)
987 goto cont;
988
989 for (iftype = 0; iftype < NUM_NL80211_IFTYPES; iftype++) {
990 if (rdev->wiphy.software_iftypes & BIT(iftype))
991 continue;
992 for (j = 0; j < c->n_limits; j++) {
993 if (!(limits[j].types & iftype))
994 continue;
995 if (limits[j].max < num[iftype])
996 goto cont;
997 limits[j].max -= num[iftype];
998 }
999 }
1000 /* yay, it fits */
1001 kfree(limits);
1002 return 0;
1003 cont:
1004 kfree(limits);
1005 }
1006
1007 return -EBUSY;
1008}
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index b4d745ea8ee1..9bec2e8a838c 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1348,7 +1348,8 @@ static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
1348 default: 1348 default:
1349 BUG(); 1349 BUG();
1350 } 1350 }
1351 xdst = dst_alloc(dst_ops, 0); 1351 xdst = dst_alloc(dst_ops, NULL, 0, 0, 0);
1352 memset(&xdst->u.rt6.rt6i_table, 0, sizeof(*xdst) - sizeof(struct dst_entry));
1352 xfrm_policy_put_afinfo(afinfo); 1353 xfrm_policy_put_afinfo(afinfo);
1353 1354
1354 if (likely(xdst)) 1355 if (likely(xdst))
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index dd78536d40de..d70f85eb7864 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -1036,15 +1036,15 @@ static struct xfrm_state *__find_acq_core(struct net *net, struct xfrm_mark *m,
1036 1036
1037 case AF_INET6: 1037 case AF_INET6:
1038 ipv6_addr_copy((struct in6_addr *)x->sel.daddr.a6, 1038 ipv6_addr_copy((struct in6_addr *)x->sel.daddr.a6,
1039 (struct in6_addr *)daddr); 1039 (const struct in6_addr *)daddr);
1040 ipv6_addr_copy((struct in6_addr *)x->sel.saddr.a6, 1040 ipv6_addr_copy((struct in6_addr *)x->sel.saddr.a6,
1041 (struct in6_addr *)saddr); 1041 (const struct in6_addr *)saddr);
1042 x->sel.prefixlen_d = 128; 1042 x->sel.prefixlen_d = 128;
1043 x->sel.prefixlen_s = 128; 1043 x->sel.prefixlen_s = 128;
1044 ipv6_addr_copy((struct in6_addr *)x->props.saddr.a6, 1044 ipv6_addr_copy((struct in6_addr *)x->props.saddr.a6,
1045 (struct in6_addr *)saddr); 1045 (const struct in6_addr *)saddr);
1046 ipv6_addr_copy((struct in6_addr *)x->id.daddr.a6, 1046 ipv6_addr_copy((struct in6_addr *)x->id.daddr.a6,
1047 (struct in6_addr *)daddr); 1047 (const struct in6_addr *)daddr);
1048 break; 1048 break;
1049 } 1049 }
1050 1050
@@ -2092,8 +2092,8 @@ static void xfrm_audit_helper_sainfo(struct xfrm_state *x,
2092static void xfrm_audit_helper_pktinfo(struct sk_buff *skb, u16 family, 2092static void xfrm_audit_helper_pktinfo(struct sk_buff *skb, u16 family,
2093 struct audit_buffer *audit_buf) 2093 struct audit_buffer *audit_buf)
2094{ 2094{
2095 struct iphdr *iph4; 2095 const struct iphdr *iph4;
2096 struct ipv6hdr *iph6; 2096 const struct ipv6hdr *iph6;
2097 2097
2098 switch (family) { 2098 switch (family) {
2099 case AF_INET: 2099 case AF_INET: