aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/802/garp.c6
-rw-r--r--net/802/psnap.c1
-rw-r--r--net/8021q/vlan.c45
-rw-r--r--net/8021q/vlan.h7
-rw-r--r--net/8021q/vlan_core.c121
-rw-r--r--net/8021q/vlan_dev.c236
-rw-r--r--net/9p/Kconfig8
-rw-r--r--net/9p/client.c39
-rw-r--r--net/9p/mod.c4
-rw-r--r--net/9p/trans_fd.c9
-rw-r--r--net/9p/trans_rdma.c5
-rw-r--r--net/9p/util.c2
-rw-r--r--net/Kconfig14
-rw-r--r--net/atm/atm_sysfs.c10
-rw-r--r--net/atm/lec.c6
-rw-r--r--net/atm/mpc.c2
-rw-r--r--net/atm/proc.c4
-rw-r--r--net/ax25/af_ax25.c16
-rw-r--r--net/ax25/ax25_iface.c3
-rw-r--r--net/batman-adv/aggregation.c31
-rw-r--r--net/batman-adv/aggregation.h4
-rw-r--r--net/batman-adv/bat_debugfs.c4
-rw-r--r--net/batman-adv/bat_sysfs.c16
-rw-r--r--net/batman-adv/gateway_client.c306
-rw-r--r--net/batman-adv/gateway_client.h2
-rw-r--r--net/batman-adv/hard-interface.c107
-rw-r--r--net/batman-adv/hard-interface.h18
-rw-r--r--net/batman-adv/icmp_socket.c37
-rw-r--r--net/batman-adv/main.c20
-rw-r--r--net/batman-adv/main.h44
-rw-r--r--net/batman-adv/originator.c90
-rw-r--r--net/batman-adv/originator.h1
-rw-r--r--net/batman-adv/packet.h5
-rw-r--r--net/batman-adv/routing.c572
-rw-r--r--net/batman-adv/routing.h6
-rw-r--r--net/batman-adv/send.c72
-rw-r--r--net/batman-adv/send.h2
-rw-r--r--net/batman-adv/soft-interface.c473
-rw-r--r--net/batman-adv/translation-table.c474
-rw-r--r--net/batman-adv/translation-table.h24
-rw-r--r--net/batman-adv/types.h56
-rw-r--r--net/batman-adv/unicast.c20
-rw-r--r--net/batman-adv/vis.c146
-rw-r--r--net/bluetooth/bnep/bnep.h148
-rw-r--r--net/bluetooth/bnep/core.c71
-rw-r--r--net/bluetooth/bnep/sock.c2
-rw-r--r--net/bluetooth/cmtp/capi.c6
-rw-r--r--net/bluetooth/cmtp/cmtp.h11
-rw-r--r--net/bluetooth/cmtp/core.c28
-rw-r--r--net/bluetooth/cmtp/sock.c2
-rw-r--r--net/bluetooth/hci_conn.c98
-rw-r--r--net/bluetooth/hci_core.c154
-rw-r--r--net/bluetooth/hci_event.c285
-rw-r--r--net/bluetooth/hci_sysfs.c71
-rw-r--r--net/bluetooth/hidp/core.c94
-rw-r--r--net/bluetooth/hidp/hidp.h7
-rw-r--r--net/bluetooth/hidp/sock.c7
-rw-r--r--net/bluetooth/l2cap_core.c1993
-rw-r--r--net/bluetooth/l2cap_sock.c331
-rw-r--r--net/bluetooth/mgmt.c612
-rw-r--r--net/bluetooth/rfcomm/core.c23
-rw-r--r--net/bluetooth/rfcomm/sock.c6
-rw-r--r--net/bluetooth/sco.c13
-rw-r--r--net/bridge/br.c1
-rw-r--r--net/bridge/br_device.c105
-rw-r--r--net/bridge/br_fdb.c311
-rw-r--r--net/bridge/br_if.c107
-rw-r--r--net/bridge/br_input.c11
-rw-r--r--net/bridge/br_ioctl.c40
-rw-r--r--net/bridge/br_multicast.c21
-rw-r--r--net/bridge/br_netfilter.c16
-rw-r--r--net/bridge/br_netlink.c60
-rw-r--r--net/bridge/br_notify.c11
-rw-r--r--net/bridge/br_private.h22
-rw-r--r--net/bridge/br_private_stp.h13
-rw-r--r--net/bridge/br_stp.c48
-rw-r--r--net/bridge/br_stp_if.c21
-rw-r--r--net/bridge/br_sysfs_br.c39
-rw-r--r--net/bridge/br_sysfs_if.c26
-rw-r--r--net/bridge/netfilter/ebtables.c6
-rw-r--r--net/caif/Makefile2
-rw-r--r--net/caif/caif_config_util.c99
-rw-r--r--net/caif/caif_dev.c392
-rw-r--r--net/caif/caif_socket.c117
-rw-r--r--net/caif/cfcnfg.c531
-rw-r--r--net/caif/cfctrl.c234
-rw-r--r--net/caif/cfdgml.c7
-rw-r--r--net/caif/cffrml.c60
-rw-r--r--net/caif/cfmuxl.c197
-rw-r--r--net/caif/cfpkt_skbuff.c205
-rw-r--r--net/caif/cfrfml.c4
-rw-r--r--net/caif/cfserl.c7
-rw-r--r--net/caif/cfsrvl.c40
-rw-r--r--net/caif/cfutill.c7
-rw-r--r--net/caif/cfveil.c11
-rw-r--r--net/caif/cfvidl.c5
-rw-r--r--net/caif/chnl_net.c54
-rw-r--r--net/can/af_can.c67
-rw-r--r--net/can/bcm.c8
-rw-r--r--net/can/proc.c7
-rw-r--r--net/can/raw.c2
-rw-r--r--net/compat.c16
-rw-r--r--net/core/dev.c233
-rw-r--r--net/core/dev_addr_lists.c12
-rw-r--r--net/core/drop_monitor.c12
-rw-r--r--net/core/dst.c63
-rw-r--r--net/core/ethtool.c221
-rw-r--r--net/core/fib_rules.c4
-rw-r--r--net/core/filter.c70
-rw-r--r--net/core/gen_estimator.c9
-rw-r--r--net/core/net-sysfs.c83
-rw-r--r--net/core/net_namespace.c97
-rw-r--r--net/core/netpoll.c35
-rw-r--r--net/core/pktgen.c222
-rw-r--r--net/core/rtnetlink.c25
-rw-r--r--net/core/skbuff.c4
-rw-r--r--net/core/sysctl_net_core.c10
-rw-r--r--net/core/utils.c25
-rw-r--r--net/dccp/ipv4.c53
-rw-r--r--net/dccp/ipv6.c10
-rw-r--r--net/dccp/output.c4
-rw-r--r--net/decnet/dn_dev.c17
-rw-r--r--net/decnet/dn_route.c15
-rw-r--r--net/decnet/dn_table.c4
-rw-r--r--net/dns_resolver/dns_key.c10
-rw-r--r--net/dsa/slave.c1
-rw-r--r--net/econet/af_econet.c8
-rw-r--r--net/ieee802154/nl-phy.c3
-rw-r--r--net/ipv4/Makefile2
-rw-r--r--net/ipv4/af_inet.c59
-rw-r--r--net/ipv4/ah4.c7
-rw-r--r--net/ipv4/cipso_ipv4.c113
-rw-r--r--net/ipv4/datagram.c22
-rw-r--r--net/ipv4/devinet.c4
-rw-r--r--net/ipv4/esp4.c7
-rw-r--r--net/ipv4/fib_frontend.c16
-rw-r--r--net/ipv4/fib_semantics.c12
-rw-r--r--net/ipv4/fib_trie.c118
-rw-r--r--net/ipv4/icmp.c133
-rw-r--r--net/ipv4/igmp.c64
-rw-r--r--net/ipv4/inet_connection_sock.c61
-rw-r--r--net/ipv4/inet_diag.c16
-rw-r--r--net/ipv4/inet_lro.c4
-rw-r--r--net/ipv4/inetpeer.c42
-rw-r--r--net/ipv4/ip_forward.c2
-rw-r--r--net/ipv4/ip_fragment.c58
-rw-r--r--net/ipv4/ip_gre.c70
-rw-r--r--net/ipv4/ip_input.c4
-rw-r--r--net/ipv4/ip_options.c72
-rw-r--r--net/ipv4/ip_output.c183
-rw-r--r--net/ipv4/ip_sockglue.c37
-rw-r--r--net/ipv4/ipcomp.c4
-rw-r--r--net/ipv4/ipconfig.c35
-rw-r--r--net/ipv4/ipip.c36
-rw-r--r--net/ipv4/ipmr.c39
-rw-r--r--net/ipv4/netfilter.c60
-rw-r--r--net/ipv4/netfilter/arp_tables.c18
-rw-r--r--net/ipv4/netfilter/ip_queue.c6
-rw-r--r--net/ipv4/netfilter/ip_tables.c30
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c6
-rw-r--r--net/ipv4/netfilter/ipt_MASQUERADE.c2
-rw-r--r--net/ipv4/netfilter/ipt_REJECT.c14
-rw-r--r--net/ipv4/netfilter/ipt_ecn.c7
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c6
-rw-r--r--net/ipv4/netfilter/nf_conntrack_proto_icmp.c2
-rw-r--r--net/ipv4/netfilter/nf_nat_core.c2
-rw-r--r--net/ipv4/netfilter/nf_nat_helper.c4
-rw-r--r--net/ipv4/netfilter/nf_nat_rule.c2
-rw-r--r--net/ipv4/netfilter/nf_nat_standalone.c4
-rw-r--r--net/ipv4/ping.c931
-rw-r--r--net/ipv4/raw.c94
-rw-r--r--net/ipv4/route.c468
-rw-r--r--net/ipv4/syncookies.c22
-rw-r--r--net/ipv4/sysctl_net_ipv4.c68
-rw-r--r--net/ipv4/tcp.c17
-rw-r--r--net/ipv4/tcp_ipv4.c105
-rw-r--r--net/ipv4/tcp_output.c2
-rw-r--r--net/ipv4/udp.c93
-rw-r--r--net/ipv4/xfrm4_output.c7
-rw-r--r--net/ipv4/xfrm4_policy.c38
-rw-r--r--net/ipv4/xfrm4_state.c2
-rw-r--r--net/ipv6/addrconf.c58
-rw-r--r--net/ipv6/af_inet6.c6
-rw-r--r--net/ipv6/anycast.c16
-rw-r--r--net/ipv6/esp6.c5
-rw-r--r--net/ipv6/icmp.c8
-rw-r--r--net/ipv6/inet6_connection_sock.c2
-rw-r--r--net/ipv6/ip6_fib.c20
-rw-r--r--net/ipv6/ip6_input.c6
-rw-r--r--net/ipv6/ip6_output.c50
-rw-r--r--net/ipv6/ip6_tunnel.c46
-rw-r--r--net/ipv6/ip6mr.c4
-rw-r--r--net/ipv6/ipcomp6.c5
-rw-r--r--net/ipv6/mcast.c44
-rw-r--r--net/ipv6/mip6.c8
-rw-r--r--net/ipv6/ndisc.c51
-rw-r--r--net/ipv6/netfilter.c10
-rw-r--r--net/ipv6/netfilter/ip6_queue.c6
-rw-r--r--net/ipv6/netfilter/ip6_tables.c21
-rw-r--r--net/ipv6/netfilter/ip6table_mangle.c3
-rw-r--r--net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c2
-rw-r--r--net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c2
-rw-r--r--net/ipv6/proc.c40
-rw-r--r--net/ipv6/raw.c20
-rw-r--r--net/ipv6/reassembly.c4
-rw-r--r--net/ipv6/route.c171
-rw-r--r--net/ipv6/sit.c47
-rw-r--r--net/ipv6/syncookies.c13
-rw-r--r--net/ipv6/tcp_ipv6.c57
-rw-r--r--net/ipv6/udp.c29
-rw-r--r--net/ipv6/xfrm6_mode_beet.c2
-rw-r--r--net/ipv6/xfrm6_mode_tunnel.c6
-rw-r--r--net/ipv6/xfrm6_policy.c2
-rw-r--r--net/ipv6/xfrm6_tunnel.c12
-rw-r--r--net/irda/ircomm/ircomm_core.c6
-rw-r--r--net/irda/ircomm/ircomm_lmp.c5
-rw-r--r--net/irda/ircomm/ircomm_tty.c14
-rw-r--r--net/irda/iriap.c5
-rw-r--r--net/irda/irlan/irlan_filter.c4
-rw-r--r--net/irda/irlan/irlan_provider.c3
-rw-r--r--net/irda/irlap_event.c3
-rw-r--r--net/irda/irproc.c5
-rw-r--r--net/iucv/af_iucv.c9
-rw-r--r--net/iucv/iucv.c79
-rw-r--r--net/key/af_key.c4
-rw-r--r--net/l2tp/l2tp_core.c28
-rw-r--r--net/l2tp/l2tp_debugfs.c2
-rw-r--r--net/l2tp/l2tp_ip.c52
-rw-r--r--net/l2tp/l2tp_netlink.c3
-rw-r--r--net/mac80211/Kconfig1
-rw-r--r--net/mac80211/aes_ccm.c6
-rw-r--r--net/mac80211/agg-rx.c3
-rw-r--r--net/mac80211/agg-tx.c67
-rw-r--r--net/mac80211/cfg.c188
-rw-r--r--net/mac80211/debugfs.c91
-rw-r--r--net/mac80211/debugfs_key.c21
-rw-r--r--net/mac80211/debugfs_sta.c26
-rw-r--r--net/mac80211/driver-ops.h87
-rw-r--r--net/mac80211/driver-trace.h275
-rw-r--r--net/mac80211/ht.c27
-rw-r--r--net/mac80211/ibss.c27
-rw-r--r--net/mac80211/ieee80211_i.h48
-rw-r--r--net/mac80211/iface.c7
-rw-r--r--net/mac80211/key.c30
-rw-r--r--net/mac80211/key.h4
-rw-r--r--net/mac80211/main.c85
-rw-r--r--net/mac80211/mesh.c62
-rw-r--r--net/mac80211/mesh.h16
-rw-r--r--net/mac80211/mesh_hwmp.c42
-rw-r--r--net/mac80211/mesh_pathtbl.c308
-rw-r--r--net/mac80211/mesh_plink.c112
-rw-r--r--net/mac80211/mlme.c49
-rw-r--r--net/mac80211/pm.c29
-rw-r--r--net/mac80211/rc80211_minstrel.c4
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c27
-rw-r--r--net/mac80211/rx.c94
-rw-r--r--net/mac80211/scan.c127
-rw-r--r--net/mac80211/sta_info.c52
-rw-r--r--net/mac80211/sta_info.h56
-rw-r--r--net/mac80211/status.c19
-rw-r--r--net/mac80211/tkip.c4
-rw-r--r--net/mac80211/tkip.h4
-rw-r--r--net/mac80211/tx.c20
-rw-r--r--net/mac80211/util.c21
-rw-r--r--net/mac80211/wep.c34
-rw-r--r--net/mac80211/wep.h4
-rw-r--r--net/mac80211/work.c16
-rw-r--r--net/mac80211/wpa.c84
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_ip.c2
-rw-r--r--net/netfilter/ipset/ip_set_core.c4
-rw-r--r--net/netfilter/ipset/ip_set_getport.c16
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipport.c2
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipportip.c2
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipportnet.c12
-rw-r--r--net/netfilter/ipset/ip_set_hash_net.c8
-rw-r--r--net/netfilter/ipset/ip_set_hash_netport.c8
-rw-r--r--net/netfilter/ipvs/ip_vs_conn.c10
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c41
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_ftp.c27
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c97
-rw-r--r--net/netfilter/nf_conntrack_core.c7
-rw-r--r--net/netfilter/nf_conntrack_extend.c8
-rw-r--r--net/netfilter/nf_conntrack_ftp.c2
-rw-r--r--net/netfilter/nf_conntrack_h323_main.c10
-rw-r--r--net/netfilter/nf_conntrack_irc.c3
-rw-r--r--net/netfilter/nf_conntrack_pptp.c3
-rw-r--r--net/netfilter/nf_conntrack_sane.c2
-rw-r--r--net/netfilter/nf_conntrack_sip.c18
-rw-r--r--net/netfilter/nf_conntrack_standalone.c2
-rw-r--r--net/netfilter/nfnetlink_log.c5
-rw-r--r--net/netfilter/nfnetlink_queue.c3
-rw-r--r--net/netfilter/x_tables.c9
-rw-r--r--net/netfilter/xt_osf.c11
-rw-r--r--net/netfilter/xt_socket.c4
-rw-r--r--net/netlabel/netlabel_addrlist.h8
-rw-r--r--net/netlabel/netlabel_cipso_v4.c4
-rw-r--r--net/netlabel/netlabel_unlabeled.c42
-rw-r--r--net/netlink/af_netlink.c10
-rw-r--r--net/netrom/af_netrom.c12
-rw-r--r--net/packet/af_packet.c21
-rw-r--r--net/phonet/pn_dev.c16
-rw-r--r--net/phonet/pn_netlink.c4
-rw-r--r--net/phonet/socket.c47
-rw-r--r--net/rds/ib.c2
-rw-r--r--net/rds/ib_cm.c2
-rw-r--r--net/rds/iw.c2
-rw-r--r--net/rds/iw_cm.c2
-rw-r--r--net/rds/rdma_transport.c3
-rw-r--r--net/rfkill/Kconfig20
-rw-r--r--net/rfkill/Makefile2
-rw-r--r--net/rfkill/core.c2
-rw-r--r--net/rfkill/rfkill-gpio.c227
-rw-r--r--net/rfkill/rfkill-regulator.c164
-rw-r--r--net/rose/af_rose.c16
-rw-r--r--net/rxrpc/ar-ack.c2
-rw-r--r--net/rxrpc/ar-connevent.c3
-rw-r--r--net/rxrpc/ar-error.c5
-rw-r--r--net/rxrpc/ar-peer.c6
-rw-r--r--net/rxrpc/ar-transport.c3
-rw-r--r--net/sched/Kconfig12
-rw-r--r--net/sched/Makefile1
-rw-r--r--net/sched/act_api.c7
-rw-r--r--net/sched/act_police.c8
-rw-r--r--net/sched/sch_api.c2
-rw-r--r--net/sched/sch_generic.c20
-rw-r--r--net/sched/sch_qfq.c1137
-rw-r--r--net/sched/sch_sfq.c24
-rw-r--r--net/sctp/associola.c33
-rw-r--r--net/sctp/bind_addr.c12
-rw-r--r--net/sctp/debug.c1
-rw-r--r--net/sctp/endpointola.c20
-rw-r--r--net/sctp/input.c19
-rw-r--r--net/sctp/ipv6.c187
-rw-r--r--net/sctp/output.c19
-rw-r--r--net/sctp/outqueue.c39
-rw-r--r--net/sctp/proc.c4
-rw-r--r--net/sctp/protocol.c91
-rw-r--r--net/sctp/sm_make_chunk.c62
-rw-r--r--net/sctp/sm_sideeffect.c32
-rw-r--r--net/sctp/sm_statefuns.c135
-rw-r--r--net/sctp/sm_statetable.c80
-rw-r--r--net/sctp/socket.c131
-rw-r--r--net/sctp/transport.c27
-rw-r--r--net/sctp/ulpevent.c46
-rw-r--r--net/socket.c230
-rw-r--r--net/sunrpc/addr.c2
-rw-r--r--net/sunrpc/auth.c4
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c4
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_mech.c3
-rw-r--r--net/sunrpc/clnt.c34
-rw-r--r--net/sunrpc/rpcb_clnt.c99
-rw-r--r--net/sunrpc/sched.c28
-rw-r--r--net/sunrpc/svc.c2
-rw-r--r--net/sunrpc/svcsock.c336
-rw-r--r--net/sunrpc/xdr.c19
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c5
-rw-r--r--net/sunrpc/xprtrdma/verbs.c2
-rw-r--r--net/sunrpc/xprtsock.c435
-rw-r--r--net/tipc/addr.h7
-rw-r--r--net/tipc/bcast.c22
-rw-r--r--net/tipc/bearer.c45
-rw-r--r--net/tipc/core.c3
-rw-r--r--net/tipc/discover.c150
-rw-r--r--net/tipc/discover.h11
-rw-r--r--net/tipc/link.c104
-rw-r--r--net/tipc/link.h1
-rw-r--r--net/tipc/msg.c25
-rw-r--r--net/tipc/msg.h161
-rw-r--r--net/tipc/port.c55
-rw-r--r--net/tipc/port.h14
-rw-r--r--net/tipc/socket.c27
-rw-r--r--net/tipc/subscr.c4
-rw-r--r--net/unix/af_unix.c2
-rw-r--r--net/wireless/core.c112
-rw-r--r--net/wireless/core.h40
-rw-r--r--net/wireless/lib80211_crypt_wep.c3
-rw-r--r--net/wireless/mesh.c23
-rw-r--r--net/wireless/mlme.c19
-rw-r--r--net/wireless/nl80211.c812
-rw-r--r--net/wireless/nl80211.h11
-rw-r--r--net/wireless/reg.c74
-rw-r--r--net/wireless/scan.c114
-rw-r--r--net/wireless/sme.c19
-rw-r--r--net/wireless/sysfs.c2
-rw-r--r--net/wireless/util.c128
-rw-r--r--net/xfrm/xfrm_policy.c9
-rw-r--r--net/xfrm/xfrm_replay.c4
-rw-r--r--net/xfrm/xfrm_state.c14
389 files changed, 15572 insertions, 8354 deletions
diff --git a/net/802/garp.c b/net/802/garp.c
index c1df2dad8c6b..16102951d36a 100644
--- a/net/802/garp.c
+++ b/net/802/garp.c
@@ -554,8 +554,7 @@ static void garp_release_port(struct net_device *dev)
554 return; 554 return;
555 } 555 }
556 rcu_assign_pointer(dev->garp_port, NULL); 556 rcu_assign_pointer(dev->garp_port, NULL);
557 synchronize_rcu(); 557 kfree_rcu(port, rcu);
558 kfree(port);
559} 558}
560 559
561int garp_init_applicant(struct net_device *dev, struct garp_application *appl) 560int garp_init_applicant(struct net_device *dev, struct garp_application *appl)
@@ -607,7 +606,6 @@ void garp_uninit_applicant(struct net_device *dev, struct garp_application *appl
607 ASSERT_RTNL(); 606 ASSERT_RTNL();
608 607
609 rcu_assign_pointer(port->applicants[appl->type], NULL); 608 rcu_assign_pointer(port->applicants[appl->type], NULL);
610 synchronize_rcu();
611 609
612 /* Delete timer and generate a final TRANSMIT_PDU event to flush out 610 /* Delete timer and generate a final TRANSMIT_PDU event to flush out
613 * all pending messages before the applicant is gone. */ 611 * all pending messages before the applicant is gone. */
@@ -617,7 +615,7 @@ void garp_uninit_applicant(struct net_device *dev, struct garp_application *appl
617 garp_queue_xmit(app); 615 garp_queue_xmit(app);
618 616
619 dev_mc_del(dev, appl->proto.group_address); 617 dev_mc_del(dev, appl->proto.group_address);
620 kfree(app); 618 kfree_rcu(app, rcu);
621 garp_release_port(dev); 619 garp_release_port(dev);
622} 620}
623EXPORT_SYMBOL_GPL(garp_uninit_applicant); 621EXPORT_SYMBOL_GPL(garp_uninit_applicant);
diff --git a/net/802/psnap.c b/net/802/psnap.c
index 21cde8fd5795..db6baf7cf6e9 100644
--- a/net/802/psnap.c
+++ b/net/802/psnap.c
@@ -147,7 +147,6 @@ struct datalink_proto *register_snap_client(const unsigned char *desc,
147out: 147out:
148 spin_unlock_bh(&snap_lock); 148 spin_unlock_bh(&snap_lock);
149 149
150 synchronize_net();
151 return proto; 150 return proto;
152} 151}
153 152
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 0eb1a886b370..917ecb93ea28 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -46,13 +46,6 @@ int vlan_net_id __read_mostly;
46 46
47const char vlan_fullname[] = "802.1Q VLAN Support"; 47const char vlan_fullname[] = "802.1Q VLAN Support";
48const char vlan_version[] = DRV_VERSION; 48const char vlan_version[] = DRV_VERSION;
49static const char vlan_copyright[] = "Ben Greear <greearb@candelatech.com>";
50static const char vlan_buggyright[] = "David S. Miller <davem@redhat.com>";
51
52static struct packet_type vlan_packet_type __read_mostly = {
53 .type = cpu_to_be16(ETH_P_8021Q),
54 .func = vlan_skb_recv, /* VLAN receive method */
55};
56 49
57/* End of global variables definitions. */ 50/* End of global variables definitions. */
58 51
@@ -128,9 +121,10 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
128 vlan_gvrp_request_leave(dev); 121 vlan_gvrp_request_leave(dev);
129 122
130 vlan_group_set_device(grp, vlan_id, NULL); 123 vlan_group_set_device(grp, vlan_id, NULL);
131 if (!grp->killall) 124 /* Because unregister_netdevice_queue() makes sure at least one rcu
132 synchronize_net(); 125 * grace period is respected before device freeing,
133 126 * we dont need to call synchronize_net() here.
127 */
134 unregister_netdevice_queue(dev, head); 128 unregister_netdevice_queue(dev, head);
135 129
136 /* If the group is now empty, kill off the group. */ 130 /* If the group is now empty, kill off the group. */
@@ -211,7 +205,7 @@ int register_vlan_dev(struct net_device *dev)
211 grp->nr_vlans++; 205 grp->nr_vlans++;
212 206
213 if (ngrp) { 207 if (ngrp) {
214 if (ops->ndo_vlan_rx_register) 208 if (ops->ndo_vlan_rx_register && (real_dev->features & NETIF_F_HW_VLAN_RX))
215 ops->ndo_vlan_rx_register(real_dev, ngrp); 209 ops->ndo_vlan_rx_register(real_dev, ngrp);
216 rcu_assign_pointer(real_dev->vlgrp, ngrp); 210 rcu_assign_pointer(real_dev->vlgrp, ngrp);
217 } 211 }
@@ -330,10 +324,6 @@ static void vlan_sync_address(struct net_device *dev,
330static void vlan_transfer_features(struct net_device *dev, 324static void vlan_transfer_features(struct net_device *dev,
331 struct net_device *vlandev) 325 struct net_device *vlandev)
332{ 326{
333 u32 old_features = vlandev->features;
334
335 vlandev->features &= ~dev->vlan_features;
336 vlandev->features |= dev->features & dev->vlan_features;
337 vlandev->gso_max_size = dev->gso_max_size; 327 vlandev->gso_max_size = dev->gso_max_size;
338 328
339 if (dev->features & NETIF_F_HW_VLAN_TX) 329 if (dev->features & NETIF_F_HW_VLAN_TX)
@@ -344,8 +334,8 @@ static void vlan_transfer_features(struct net_device *dev,
344#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) 334#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
345 vlandev->fcoe_ddp_xid = dev->fcoe_ddp_xid; 335 vlandev->fcoe_ddp_xid = dev->fcoe_ddp_xid;
346#endif 336#endif
347 if (old_features != vlandev->features) 337
348 netdev_features_change(vlandev); 338 netdev_update_features(vlandev);
349} 339}
350 340
351static void __vlan_device_event(struct net_device *dev, unsigned long event) 341static void __vlan_device_event(struct net_device *dev, unsigned long event)
@@ -490,9 +480,6 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
490 if (dev->reg_state != NETREG_UNREGISTERING) 480 if (dev->reg_state != NETREG_UNREGISTERING)
491 break; 481 break;
492 482
493 /* Delete all VLANs for this dev. */
494 grp->killall = 1;
495
496 for (i = 0; i < VLAN_N_VID; i++) { 483 for (i = 0; i < VLAN_N_VID; i++) {
497 vlandev = vlan_group_get_device(grp, i); 484 vlandev = vlan_group_get_device(grp, i);
498 if (!vlandev) 485 if (!vlandev)
@@ -511,6 +498,18 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
511 case NETDEV_PRE_TYPE_CHANGE: 498 case NETDEV_PRE_TYPE_CHANGE:
512 /* Forbid underlaying device to change its type. */ 499 /* Forbid underlaying device to change its type. */
513 return NOTIFY_BAD; 500 return NOTIFY_BAD;
501
502 case NETDEV_NOTIFY_PEERS:
503 case NETDEV_BONDING_FAILOVER:
504 /* Propagate to vlan devices */
505 for (i = 0; i < VLAN_N_VID; i++) {
506 vlandev = vlan_group_get_device(grp, i);
507 if (!vlandev)
508 continue;
509
510 call_netdevice_notifiers(event, vlandev);
511 }
512 break;
514 } 513 }
515 514
516out: 515out:
@@ -672,8 +671,7 @@ static int __init vlan_proto_init(void)
672{ 671{
673 int err; 672 int err;
674 673
675 pr_info("%s v%s %s\n", vlan_fullname, vlan_version, vlan_copyright); 674 pr_info("%s v%s\n", vlan_fullname, vlan_version);
676 pr_info("All bugs added by %s\n", vlan_buggyright);
677 675
678 err = register_pernet_subsys(&vlan_net_ops); 676 err = register_pernet_subsys(&vlan_net_ops);
679 if (err < 0) 677 if (err < 0)
@@ -691,7 +689,6 @@ static int __init vlan_proto_init(void)
691 if (err < 0) 689 if (err < 0)
692 goto err4; 690 goto err4;
693 691
694 dev_add_pack(&vlan_packet_type);
695 vlan_ioctl_set(vlan_ioctl_handler); 692 vlan_ioctl_set(vlan_ioctl_handler);
696 return 0; 693 return 0;
697 694
@@ -712,8 +709,6 @@ static void __exit vlan_cleanup_module(void)
712 709
713 unregister_netdevice_notifier(&vlan_notifier_block); 710 unregister_netdevice_notifier(&vlan_notifier_block);
714 711
715 dev_remove_pack(&vlan_packet_type);
716
717 unregister_pernet_subsys(&vlan_net_ops); 712 unregister_pernet_subsys(&vlan_net_ops);
718 rcu_barrier(); /* Wait for completion of call_rcu()'s */ 713 rcu_barrier(); /* Wait for completion of call_rcu()'s */
719 714
diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h
index 5687c9b95f33..9da07e30d1a2 100644
--- a/net/8021q/vlan.h
+++ b/net/8021q/vlan.h
@@ -75,8 +75,6 @@ static inline struct vlan_dev_info *vlan_dev_info(const struct net_device *dev)
75} 75}
76 76
77/* found in vlan_dev.c */ 77/* found in vlan_dev.c */
78int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
79 struct packet_type *ptype, struct net_device *orig_dev);
80void vlan_dev_set_ingress_priority(const struct net_device *dev, 78void vlan_dev_set_ingress_priority(const struct net_device *dev,
81 u32 skb_prio, u16 vlan_prio); 79 u32 skb_prio, u16 vlan_prio);
82int vlan_dev_set_egress_priority(const struct net_device *dev, 80int vlan_dev_set_egress_priority(const struct net_device *dev,
@@ -120,11 +118,6 @@ extern void vlan_netlink_fini(void);
120 118
121extern struct rtnl_link_ops vlan_link_ops; 119extern struct rtnl_link_ops vlan_link_ops;
122 120
123static inline int is_vlan_dev(struct net_device *dev)
124{
125 return dev->priv_flags & IFF_802_1Q_VLAN;
126}
127
128extern int vlan_net_id; 121extern int vlan_net_id;
129 122
130struct proc_dir_entry; 123struct proc_dir_entry;
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index ce8e3ab3e7a5..fcc684678af6 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -4,7 +4,7 @@
4#include <linux/netpoll.h> 4#include <linux/netpoll.h>
5#include "vlan.h" 5#include "vlan.h"
6 6
7bool vlan_hwaccel_do_receive(struct sk_buff **skbp) 7bool vlan_do_receive(struct sk_buff **skbp)
8{ 8{
9 struct sk_buff *skb = *skbp; 9 struct sk_buff *skb = *skbp;
10 u16 vlan_id = skb->vlan_tci & VLAN_VID_MASK; 10 u16 vlan_id = skb->vlan_tci & VLAN_VID_MASK;
@@ -23,6 +23,31 @@ bool vlan_hwaccel_do_receive(struct sk_buff **skbp)
23 return false; 23 return false;
24 24
25 skb->dev = vlan_dev; 25 skb->dev = vlan_dev;
26 if (skb->pkt_type == PACKET_OTHERHOST) {
27 /* Our lower layer thinks this is not local, let's make sure.
28 * This allows the VLAN to have a different MAC than the
29 * underlying device, and still route correctly. */
30 if (!compare_ether_addr(eth_hdr(skb)->h_dest,
31 vlan_dev->dev_addr))
32 skb->pkt_type = PACKET_HOST;
33 }
34
35 if (!(vlan_dev_info(vlan_dev)->flags & VLAN_FLAG_REORDER_HDR)) {
36 unsigned int offset = skb->data - skb_mac_header(skb);
37
38 /*
39 * vlan_insert_tag expect skb->data pointing to mac header.
40 * So change skb->data before calling it and change back to
41 * original position later
42 */
43 skb_push(skb, offset);
44 skb = *skbp = vlan_insert_tag(skb, skb->vlan_tci);
45 if (!skb)
46 return false;
47 skb_pull(skb, offset + VLAN_HLEN);
48 skb_reset_mac_len(skb);
49 }
50
26 skb->priority = vlan_get_ingress_priority(vlan_dev, skb->vlan_tci); 51 skb->priority = vlan_get_ingress_priority(vlan_dev, skb->vlan_tci);
27 skb->vlan_tci = 0; 52 skb->vlan_tci = 0;
28 53
@@ -31,22 +56,8 @@ bool vlan_hwaccel_do_receive(struct sk_buff **skbp)
31 u64_stats_update_begin(&rx_stats->syncp); 56 u64_stats_update_begin(&rx_stats->syncp);
32 rx_stats->rx_packets++; 57 rx_stats->rx_packets++;
33 rx_stats->rx_bytes += skb->len; 58 rx_stats->rx_bytes += skb->len;
34 59 if (skb->pkt_type == PACKET_MULTICAST)
35 switch (skb->pkt_type) {
36 case PACKET_BROADCAST:
37 break;
38 case PACKET_MULTICAST:
39 rx_stats->rx_multicast++; 60 rx_stats->rx_multicast++;
40 break;
41 case PACKET_OTHERHOST:
42 /* Our lower layer thinks this is not local, let's make sure.
43 * This allows the VLAN to have a different MAC than the
44 * underlying device, and still route correctly. */
45 if (!compare_ether_addr(eth_hdr(skb)->h_dest,
46 vlan_dev->dev_addr))
47 skb->pkt_type = PACKET_HOST;
48 break;
49 }
50 u64_stats_update_end(&rx_stats->syncp); 61 u64_stats_update_end(&rx_stats->syncp);
51 62
52 return true; 63 return true;
@@ -88,3 +99,81 @@ gro_result_t vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp,
88 return napi_gro_frags(napi); 99 return napi_gro_frags(napi);
89} 100}
90EXPORT_SYMBOL(vlan_gro_frags); 101EXPORT_SYMBOL(vlan_gro_frags);
102
103static struct sk_buff *vlan_reorder_header(struct sk_buff *skb)
104{
105 if (skb_cow(skb, skb_headroom(skb)) < 0)
106 return NULL;
107 memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 2 * ETH_ALEN);
108 skb->mac_header += VLAN_HLEN;
109 skb_reset_mac_len(skb);
110 return skb;
111}
112
113static void vlan_set_encap_proto(struct sk_buff *skb, struct vlan_hdr *vhdr)
114{
115 __be16 proto;
116 unsigned char *rawp;
117
118 /*
119 * Was a VLAN packet, grab the encapsulated protocol, which the layer
120 * three protocols care about.
121 */
122
123 proto = vhdr->h_vlan_encapsulated_proto;
124 if (ntohs(proto) >= 1536) {
125 skb->protocol = proto;
126 return;
127 }
128
129 rawp = skb->data;
130 if (*(unsigned short *) rawp == 0xFFFF)
131 /*
132 * This is a magic hack to spot IPX packets. Older Novell
133 * breaks the protocol design and runs IPX over 802.3 without
134 * an 802.2 LLC layer. We look for FFFF which isn't a used
135 * 802.2 SSAP/DSAP. This won't work for fault tolerant netware
136 * but does for the rest.
137 */
138 skb->protocol = htons(ETH_P_802_3);
139 else
140 /*
141 * Real 802.2 LLC
142 */
143 skb->protocol = htons(ETH_P_802_2);
144}
145
146struct sk_buff *vlan_untag(struct sk_buff *skb)
147{
148 struct vlan_hdr *vhdr;
149 u16 vlan_tci;
150
151 if (unlikely(vlan_tx_tag_present(skb))) {
152 /* vlan_tci is already set-up so leave this for another time */
153 return skb;
154 }
155
156 skb = skb_share_check(skb, GFP_ATOMIC);
157 if (unlikely(!skb))
158 goto err_free;
159
160 if (unlikely(!pskb_may_pull(skb, VLAN_HLEN)))
161 goto err_free;
162
163 vhdr = (struct vlan_hdr *) skb->data;
164 vlan_tci = ntohs(vhdr->h_vlan_TCI);
165 __vlan_hwaccel_put_tag(skb, vlan_tci);
166
167 skb_pull_rcsum(skb, VLAN_HLEN);
168 vlan_set_encap_proto(skb, vhdr);
169
170 skb = vlan_reorder_header(skb);
171 if (unlikely(!skb))
172 goto err_free;
173
174 return skb;
175
176err_free:
177 kfree_skb(skb);
178 return NULL;
179}
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index b2ff6c8d3603..6e82148edfc8 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -65,179 +65,6 @@ static int vlan_dev_rebuild_header(struct sk_buff *skb)
65 return 0; 65 return 0;
66} 66}
67 67
68static inline struct sk_buff *vlan_check_reorder_header(struct sk_buff *skb)
69{
70 if (vlan_dev_info(skb->dev)->flags & VLAN_FLAG_REORDER_HDR) {
71 if (skb_cow(skb, skb_headroom(skb)) < 0)
72 skb = NULL;
73 if (skb) {
74 /* Lifted from Gleb's VLAN code... */
75 memmove(skb->data - ETH_HLEN,
76 skb->data - VLAN_ETH_HLEN, 12);
77 skb->mac_header += VLAN_HLEN;
78 }
79 }
80
81 return skb;
82}
83
84static inline void vlan_set_encap_proto(struct sk_buff *skb,
85 struct vlan_hdr *vhdr)
86{
87 __be16 proto;
88 unsigned char *rawp;
89
90 /*
91 * Was a VLAN packet, grab the encapsulated protocol, which the layer
92 * three protocols care about.
93 */
94
95 proto = vhdr->h_vlan_encapsulated_proto;
96 if (ntohs(proto) >= 1536) {
97 skb->protocol = proto;
98 return;
99 }
100
101 rawp = skb->data;
102 if (*(unsigned short *)rawp == 0xFFFF)
103 /*
104 * This is a magic hack to spot IPX packets. Older Novell
105 * breaks the protocol design and runs IPX over 802.3 without
106 * an 802.2 LLC layer. We look for FFFF which isn't a used
107 * 802.2 SSAP/DSAP. This won't work for fault tolerant netware
108 * but does for the rest.
109 */
110 skb->protocol = htons(ETH_P_802_3);
111 else
112 /*
113 * Real 802.2 LLC
114 */
115 skb->protocol = htons(ETH_P_802_2);
116}
117
118/*
119 * Determine the packet's protocol ID. The rule here is that we
120 * assume 802.3 if the type field is short enough to be a length.
121 * This is normal practice and works for any 'now in use' protocol.
122 *
123 * Also, at this point we assume that we ARE dealing exclusively with
124 * VLAN packets, or packets that should be made into VLAN packets based
125 * on a default VLAN ID.
126 *
127 * NOTE: Should be similar to ethernet/eth.c.
128 *
129 * SANITY NOTE: This method is called when a packet is moving up the stack
130 * towards userland. To get here, it would have already passed
131 * through the ethernet/eth.c eth_type_trans() method.
132 * SANITY NOTE 2: We are referencing to the VLAN_HDR frields, which MAY be
133 * stored UNALIGNED in the memory. RISC systems don't like
134 * such cases very much...
135 * SANITY NOTE 2a: According to Dave Miller & Alexey, it will always be
136 * aligned, so there doesn't need to be any of the unaligned
137 * stuff. It has been commented out now... --Ben
138 *
139 */
140int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
141 struct packet_type *ptype, struct net_device *orig_dev)
142{
143 struct vlan_hdr *vhdr;
144 struct vlan_pcpu_stats *rx_stats;
145 struct net_device *vlan_dev;
146 u16 vlan_id;
147 u16 vlan_tci;
148
149 skb = skb_share_check(skb, GFP_ATOMIC);
150 if (skb == NULL)
151 goto err_free;
152
153 if (unlikely(!pskb_may_pull(skb, VLAN_HLEN)))
154 goto err_free;
155
156 vhdr = (struct vlan_hdr *)skb->data;
157 vlan_tci = ntohs(vhdr->h_vlan_TCI);
158 vlan_id = vlan_tci & VLAN_VID_MASK;
159
160 rcu_read_lock();
161 vlan_dev = vlan_find_dev(dev, vlan_id);
162
163 /* If the VLAN device is defined, we use it.
164 * If not, and the VID is 0, it is a 802.1p packet (not
165 * really a VLAN), so we will just netif_rx it later to the
166 * original interface, but with the skb->proto set to the
167 * wrapped proto: we do nothing here.
168 */
169
170 if (!vlan_dev) {
171 if (vlan_id) {
172 pr_debug("%s: ERROR: No net_device for VID: %u on dev: %s\n",
173 __func__, vlan_id, dev->name);
174 goto err_unlock;
175 }
176 rx_stats = NULL;
177 } else {
178 skb->dev = vlan_dev;
179
180 rx_stats = this_cpu_ptr(vlan_dev_info(skb->dev)->vlan_pcpu_stats);
181
182 u64_stats_update_begin(&rx_stats->syncp);
183 rx_stats->rx_packets++;
184 rx_stats->rx_bytes += skb->len;
185
186 skb->priority = vlan_get_ingress_priority(skb->dev, vlan_tci);
187
188 pr_debug("%s: priority: %u for TCI: %hu\n",
189 __func__, skb->priority, vlan_tci);
190
191 switch (skb->pkt_type) {
192 case PACKET_BROADCAST:
193 /* Yeah, stats collect these together.. */
194 /* stats->broadcast ++; // no such counter :-( */
195 break;
196
197 case PACKET_MULTICAST:
198 rx_stats->rx_multicast++;
199 break;
200
201 case PACKET_OTHERHOST:
202 /* Our lower layer thinks this is not local, let's make
203 * sure.
204 * This allows the VLAN to have a different MAC than the
205 * underlying device, and still route correctly.
206 */
207 if (!compare_ether_addr(eth_hdr(skb)->h_dest,
208 skb->dev->dev_addr))
209 skb->pkt_type = PACKET_HOST;
210 break;
211 default:
212 break;
213 }
214 u64_stats_update_end(&rx_stats->syncp);
215 }
216
217 skb_pull_rcsum(skb, VLAN_HLEN);
218 vlan_set_encap_proto(skb, vhdr);
219
220 if (vlan_dev) {
221 skb = vlan_check_reorder_header(skb);
222 if (!skb) {
223 rx_stats->rx_errors++;
224 goto err_unlock;
225 }
226 }
227
228 netif_rx(skb);
229
230 rcu_read_unlock();
231 return NET_RX_SUCCESS;
232
233err_unlock:
234 rcu_read_unlock();
235err_free:
236 atomic_long_inc(&dev->rx_dropped);
237 kfree_skb(skb);
238 return NET_RX_DROP;
239}
240
241static inline u16 68static inline u16
242vlan_dev_get_egress_qos_mask(struct net_device *dev, struct sk_buff *skb) 69vlan_dev_get_egress_qos_mask(struct net_device *dev, struct sk_buff *skb)
243{ 70{
@@ -338,7 +165,7 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
338 u64_stats_update_begin(&stats->syncp); 165 u64_stats_update_begin(&stats->syncp);
339 stats->tx_packets++; 166 stats->tx_packets++;
340 stats->tx_bytes += len; 167 stats->tx_bytes += len;
341 u64_stats_update_begin(&stats->syncp); 168 u64_stats_update_end(&stats->syncp);
342 } else { 169 } else {
343 this_cpu_inc(vlan_dev_info(dev)->vlan_pcpu_stats->tx_dropped); 170 this_cpu_inc(vlan_dev_info(dev)->vlan_pcpu_stats->tx_dropped);
344 } 171 }
@@ -701,8 +528,12 @@ static int vlan_dev_init(struct net_device *dev)
701 (1<<__LINK_STATE_DORMANT))) | 528 (1<<__LINK_STATE_DORMANT))) |
702 (1<<__LINK_STATE_PRESENT); 529 (1<<__LINK_STATE_PRESENT);
703 530
704 dev->features |= real_dev->features & real_dev->vlan_features; 531 dev->hw_features = NETIF_F_ALL_CSUM | NETIF_F_SG |
705 dev->features |= NETIF_F_LLTX; 532 NETIF_F_FRAGLIST | NETIF_F_ALL_TSO |
533 NETIF_F_HIGHDMA | NETIF_F_SCTP_CSUM |
534 NETIF_F_ALL_FCOE;
535
536 dev->features |= real_dev->vlan_features | NETIF_F_LLTX;
706 dev->gso_max_size = real_dev->gso_max_size; 537 dev->gso_max_size = real_dev->gso_max_size;
707 538
708 /* ipv6 shared card related stuff */ 539 /* ipv6 shared card related stuff */
@@ -756,6 +587,24 @@ static void vlan_dev_uninit(struct net_device *dev)
756 } 587 }
757} 588}
758 589
590static u32 vlan_dev_fix_features(struct net_device *dev, u32 features)
591{
592 struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
593 u32 old_features = features;
594
595 features &= real_dev->features;
596 features &= real_dev->vlan_features;
597
598 if (old_features & NETIF_F_SOFT_FEATURES)
599 features |= old_features & NETIF_F_SOFT_FEATURES;
600
601 if (dev_ethtool_get_rx_csum(real_dev))
602 features |= NETIF_F_RXCSUM;
603 features |= NETIF_F_LLTX;
604
605 return features;
606}
607
759static int vlan_ethtool_get_settings(struct net_device *dev, 608static int vlan_ethtool_get_settings(struct net_device *dev,
760 struct ethtool_cmd *cmd) 609 struct ethtool_cmd *cmd)
761{ 610{
@@ -771,18 +620,6 @@ static void vlan_ethtool_get_drvinfo(struct net_device *dev,
771 strcpy(info->fw_version, "N/A"); 620 strcpy(info->fw_version, "N/A");
772} 621}
773 622
774static u32 vlan_ethtool_get_rx_csum(struct net_device *dev)
775{
776 const struct vlan_dev_info *vlan = vlan_dev_info(dev);
777 return dev_ethtool_get_rx_csum(vlan->real_dev);
778}
779
780static u32 vlan_ethtool_get_flags(struct net_device *dev)
781{
782 const struct vlan_dev_info *vlan = vlan_dev_info(dev);
783 return dev_ethtool_get_flags(vlan->real_dev);
784}
785
786static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 623static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
787{ 624{
788 625
@@ -820,32 +657,10 @@ static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, st
820 return stats; 657 return stats;
821} 658}
822 659
823static int vlan_ethtool_set_tso(struct net_device *dev, u32 data)
824{
825 if (data) {
826 struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
827
828 /* Underlying device must support TSO for VLAN-tagged packets
829 * and must have TSO enabled now.
830 */
831 if (!(real_dev->vlan_features & NETIF_F_TSO))
832 return -EOPNOTSUPP;
833 if (!(real_dev->features & NETIF_F_TSO))
834 return -EINVAL;
835 dev->features |= NETIF_F_TSO;
836 } else {
837 dev->features &= ~NETIF_F_TSO;
838 }
839 return 0;
840}
841
842static const struct ethtool_ops vlan_ethtool_ops = { 660static const struct ethtool_ops vlan_ethtool_ops = {
843 .get_settings = vlan_ethtool_get_settings, 661 .get_settings = vlan_ethtool_get_settings,
844 .get_drvinfo = vlan_ethtool_get_drvinfo, 662 .get_drvinfo = vlan_ethtool_get_drvinfo,
845 .get_link = ethtool_op_get_link, 663 .get_link = ethtool_op_get_link,
846 .get_rx_csum = vlan_ethtool_get_rx_csum,
847 .get_flags = vlan_ethtool_get_flags,
848 .set_tso = vlan_ethtool_set_tso,
849}; 664};
850 665
851static const struct net_device_ops vlan_netdev_ops = { 666static const struct net_device_ops vlan_netdev_ops = {
@@ -871,6 +686,7 @@ static const struct net_device_ops vlan_netdev_ops = {
871 .ndo_fcoe_get_wwn = vlan_dev_fcoe_get_wwn, 686 .ndo_fcoe_get_wwn = vlan_dev_fcoe_get_wwn,
872 .ndo_fcoe_ddp_target = vlan_dev_fcoe_ddp_target, 687 .ndo_fcoe_ddp_target = vlan_dev_fcoe_ddp_target,
873#endif 688#endif
689 .ndo_fix_features = vlan_dev_fix_features,
874}; 690};
875 691
876void vlan_setup(struct net_device *dev) 692void vlan_setup(struct net_device *dev)
diff --git a/net/9p/Kconfig b/net/9p/Kconfig
index 7ed75c7bd5d1..d9ea09b11cf8 100644
--- a/net/9p/Kconfig
+++ b/net/9p/Kconfig
@@ -3,8 +3,8 @@
3# 3#
4 4
5menuconfig NET_9P 5menuconfig NET_9P
6 depends on NET && EXPERIMENTAL 6 depends on NET
7 tristate "Plan 9 Resource Sharing Support (9P2000) (Experimental)" 7 tristate "Plan 9 Resource Sharing Support (9P2000)"
8 help 8 help
9 If you say Y here, you will get experimental support for 9 If you say Y here, you will get experimental support for
10 Plan 9 resource sharing via the 9P2000 protocol. 10 Plan 9 resource sharing via the 9P2000 protocol.
@@ -16,8 +16,8 @@ menuconfig NET_9P
16if NET_9P 16if NET_9P
17 17
18config NET_9P_VIRTIO 18config NET_9P_VIRTIO
19 depends on EXPERIMENTAL && VIRTIO 19 depends on VIRTIO
20 tristate "9P Virtio Transport (Experimental)" 20 tristate "9P Virtio Transport"
21 help 21 help
22 This builds support for a transports between 22 This builds support for a transports between
23 guest partitions and a host partition. 23 guest partitions and a host partition.
diff --git a/net/9p/client.c b/net/9p/client.c
index a9aa2dd66482..9e3b0e640da1 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -92,9 +92,6 @@ static int get_protocol_version(const substring_t *name)
92 return version; 92 return version;
93} 93}
94 94
95static struct p9_req_t *
96p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...);
97
98/** 95/**
99 * parse_options - parse mount options into client structure 96 * parse_options - parse mount options into client structure
100 * @opts: options string passed from mount 97 * @opts: options string passed from mount
@@ -307,12 +304,13 @@ static int p9_tag_init(struct p9_client *c)
307 c->tagpool = p9_idpool_create(); 304 c->tagpool = p9_idpool_create();
308 if (IS_ERR(c->tagpool)) { 305 if (IS_ERR(c->tagpool)) {
309 err = PTR_ERR(c->tagpool); 306 err = PTR_ERR(c->tagpool);
310 c->tagpool = NULL;
311 goto error; 307 goto error;
312 } 308 }
313 309 err = p9_idpool_get(c->tagpool); /* reserve tag 0 */
314 p9_idpool_get(c->tagpool); /* reserve tag 0 */ 310 if (err < 0) {
315 311 p9_idpool_destroy(c->tagpool);
312 goto error;
313 }
316 c->max_tag = 0; 314 c->max_tag = 0;
317error: 315error:
318 return err; 316 return err;
@@ -518,12 +516,15 @@ out_err:
518 return err; 516 return err;
519} 517}
520 518
519static struct p9_req_t *
520p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...);
521
521/** 522/**
522 * p9_client_flush - flush (cancel) a request 523 * p9_client_flush - flush (cancel) a request
523 * @c: client state 524 * @c: client state
524 * @oldreq: request to cancel 525 * @oldreq: request to cancel
525 * 526 *
526 * This sents a flush for a particular requests and links 527 * This sents a flush for a particular request and links
527 * the flush request to the original request. The current 528 * the flush request to the original request. The current
528 * code only supports a single flush request although the protocol 529 * code only supports a single flush request although the protocol
529 * allows for multiple flush requests to be sent for a single request. 530 * allows for multiple flush requests to be sent for a single request.
@@ -789,11 +790,13 @@ struct p9_client *p9_client_create(const char *dev_name, char *options)
789 spin_lock_init(&clnt->lock); 790 spin_lock_init(&clnt->lock);
790 INIT_LIST_HEAD(&clnt->fidlist); 791 INIT_LIST_HEAD(&clnt->fidlist);
791 792
792 p9_tag_init(clnt); 793 err = p9_tag_init(clnt);
794 if (err < 0)
795 goto free_client;
793 796
794 err = parse_opts(options, clnt); 797 err = parse_opts(options, clnt);
795 if (err < 0) 798 if (err < 0)
796 goto free_client; 799 goto destroy_tagpool;
797 800
798 if (!clnt->trans_mod) 801 if (!clnt->trans_mod)
799 clnt->trans_mod = v9fs_get_default_trans(); 802 clnt->trans_mod = v9fs_get_default_trans();
@@ -802,13 +805,12 @@ struct p9_client *p9_client_create(const char *dev_name, char *options)
802 err = -EPROTONOSUPPORT; 805 err = -EPROTONOSUPPORT;
803 P9_DPRINTK(P9_DEBUG_ERROR, 806 P9_DPRINTK(P9_DEBUG_ERROR,
804 "No transport defined or default transport\n"); 807 "No transport defined or default transport\n");
805 goto free_client; 808 goto destroy_tagpool;
806 } 809 }
807 810
808 clnt->fidpool = p9_idpool_create(); 811 clnt->fidpool = p9_idpool_create();
809 if (IS_ERR(clnt->fidpool)) { 812 if (IS_ERR(clnt->fidpool)) {
810 err = PTR_ERR(clnt->fidpool); 813 err = PTR_ERR(clnt->fidpool);
811 clnt->fidpool = NULL;
812 goto put_trans; 814 goto put_trans;
813 } 815 }
814 816
@@ -834,6 +836,8 @@ destroy_fidpool:
834 p9_idpool_destroy(clnt->fidpool); 836 p9_idpool_destroy(clnt->fidpool);
835put_trans: 837put_trans:
836 v9fs_put_trans(clnt->trans_mod); 838 v9fs_put_trans(clnt->trans_mod);
839destroy_tagpool:
840 p9_idpool_destroy(clnt->tagpool);
837free_client: 841free_client:
838 kfree(clnt); 842 kfree(clnt);
839 return ERR_PTR(err); 843 return ERR_PTR(err);
@@ -1281,7 +1285,7 @@ int
1281p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset, 1285p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
1282 u32 count) 1286 u32 count)
1283{ 1287{
1284 int err, rsize, total; 1288 int err, rsize;
1285 struct p9_client *clnt; 1289 struct p9_client *clnt;
1286 struct p9_req_t *req; 1290 struct p9_req_t *req;
1287 char *dataptr; 1291 char *dataptr;
@@ -1290,7 +1294,6 @@ p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
1290 (long long unsigned) offset, count); 1294 (long long unsigned) offset, count);
1291 err = 0; 1295 err = 0;
1292 clnt = fid->clnt; 1296 clnt = fid->clnt;
1293 total = 0;
1294 1297
1295 rsize = fid->iounit; 1298 rsize = fid->iounit;
1296 if (!rsize || rsize > clnt->msize-P9_IOHDRSZ) 1299 if (!rsize || rsize > clnt->msize-P9_IOHDRSZ)
@@ -1299,7 +1302,7 @@ p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
1299 if (count < rsize) 1302 if (count < rsize)
1300 rsize = count; 1303 rsize = count;
1301 1304
1302 /* Don't bother zerocopy form small IO (< 1024) */ 1305 /* Don't bother zerocopy for small IO (< 1024) */
1303 if (((clnt->trans_mod->pref & P9_TRANS_PREF_PAYLOAD_MASK) == 1306 if (((clnt->trans_mod->pref & P9_TRANS_PREF_PAYLOAD_MASK) ==
1304 P9_TRANS_PREF_PAYLOAD_SEP) && (rsize > 1024)) { 1307 P9_TRANS_PREF_PAYLOAD_SEP) && (rsize > 1024)) {
1305 req = p9_client_rpc(clnt, P9_TREAD, "dqE", fid->fid, offset, 1308 req = p9_client_rpc(clnt, P9_TREAD, "dqE", fid->fid, offset,
@@ -1346,7 +1349,7 @@ int
1346p9_client_write(struct p9_fid *fid, char *data, const char __user *udata, 1349p9_client_write(struct p9_fid *fid, char *data, const char __user *udata,
1347 u64 offset, u32 count) 1350 u64 offset, u32 count)
1348{ 1351{
1349 int err, rsize, total; 1352 int err, rsize;
1350 struct p9_client *clnt; 1353 struct p9_client *clnt;
1351 struct p9_req_t *req; 1354 struct p9_req_t *req;
1352 1355
@@ -1354,7 +1357,6 @@ p9_client_write(struct p9_fid *fid, char *data, const char __user *udata,
1354 fid->fid, (long long unsigned) offset, count); 1357 fid->fid, (long long unsigned) offset, count);
1355 err = 0; 1358 err = 0;
1356 clnt = fid->clnt; 1359 clnt = fid->clnt;
1357 total = 0;
1358 1360
1359 rsize = fid->iounit; 1361 rsize = fid->iounit;
1360 if (!rsize || rsize > clnt->msize-P9_IOHDRSZ) 1362 if (!rsize || rsize > clnt->msize-P9_IOHDRSZ)
@@ -1745,7 +1747,7 @@ EXPORT_SYMBOL_GPL(p9_client_xattrcreate);
1745 1747
1746int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset) 1748int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset)
1747{ 1749{
1748 int err, rsize, total; 1750 int err, rsize;
1749 struct p9_client *clnt; 1751 struct p9_client *clnt;
1750 struct p9_req_t *req; 1752 struct p9_req_t *req;
1751 char *dataptr; 1753 char *dataptr;
@@ -1755,7 +1757,6 @@ int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset)
1755 1757
1756 err = 0; 1758 err = 0;
1757 clnt = fid->clnt; 1759 clnt = fid->clnt;
1758 total = 0;
1759 1760
1760 rsize = fid->iounit; 1761 rsize = fid->iounit;
1761 if (!rsize || rsize > clnt->msize-P9_READDIRHDRSZ) 1762 if (!rsize || rsize > clnt->msize-P9_READDIRHDRSZ)
diff --git a/net/9p/mod.c b/net/9p/mod.c
index cf8a4128cd5c..72c398275051 100644
--- a/net/9p/mod.c
+++ b/net/9p/mod.c
@@ -139,7 +139,7 @@ void v9fs_put_trans(struct p9_trans_module *m)
139} 139}
140 140
141/** 141/**
142 * v9fs_init - Initialize module 142 * init_p9 - Initialize module
143 * 143 *
144 */ 144 */
145static int __init init_p9(void) 145static int __init init_p9(void)
@@ -154,7 +154,7 @@ static int __init init_p9(void)
154} 154}
155 155
156/** 156/**
157 * v9fs_init - shutdown module 157 * exit_p9 - shutdown module
158 * 158 *
159 */ 159 */
160 160
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
index aa5672b15eae..fdfdb5747f63 100644
--- a/net/9p/trans_fd.c
+++ b/net/9p/trans_fd.c
@@ -716,7 +716,6 @@ static int parse_opts(char *params, struct p9_fd_opts *opts)
716 substring_t args[MAX_OPT_ARGS]; 716 substring_t args[MAX_OPT_ARGS];
717 int option; 717 int option;
718 char *options, *tmp_options; 718 char *options, *tmp_options;
719 int ret;
720 719
721 opts->port = P9_PORT; 720 opts->port = P9_PORT;
722 opts->rfd = ~0; 721 opts->rfd = ~0;
@@ -744,7 +743,6 @@ static int parse_opts(char *params, struct p9_fd_opts *opts)
744 if (r < 0) { 743 if (r < 0) {
745 P9_DPRINTK(P9_DEBUG_ERROR, 744 P9_DPRINTK(P9_DEBUG_ERROR,
746 "integer field, but no integer?\n"); 745 "integer field, but no integer?\n");
747 ret = r;
748 continue; 746 continue;
749 } 747 }
750 } 748 }
@@ -918,8 +916,8 @@ p9_fd_create_tcp(struct p9_client *client, const char *addr, char *args)
918 sin_server.sin_family = AF_INET; 916 sin_server.sin_family = AF_INET;
919 sin_server.sin_addr.s_addr = in_aton(addr); 917 sin_server.sin_addr.s_addr = in_aton(addr);
920 sin_server.sin_port = htons(opts.port); 918 sin_server.sin_port = htons(opts.port);
921 err = sock_create_kern(PF_INET, SOCK_STREAM, IPPROTO_TCP, &csocket); 919 err = __sock_create(read_pnet(&current->nsproxy->net_ns), PF_INET,
922 920 SOCK_STREAM, IPPROTO_TCP, &csocket, 1);
923 if (err) { 921 if (err) {
924 P9_EPRINTK(KERN_ERR, "p9_trans_tcp: problem creating socket\n"); 922 P9_EPRINTK(KERN_ERR, "p9_trans_tcp: problem creating socket\n");
925 return err; 923 return err;
@@ -956,7 +954,8 @@ p9_fd_create_unix(struct p9_client *client, const char *addr, char *args)
956 954
957 sun_server.sun_family = PF_UNIX; 955 sun_server.sun_family = PF_UNIX;
958 strcpy(sun_server.sun_path, addr); 956 strcpy(sun_server.sun_path, addr);
959 err = sock_create_kern(PF_UNIX, SOCK_STREAM, 0, &csocket); 957 err = __sock_create(read_pnet(&current->nsproxy->net_ns), PF_UNIX,
958 SOCK_STREAM, 0, &csocket, 1);
960 if (err < 0) { 959 if (err < 0) {
961 P9_EPRINTK(KERN_ERR, "p9_trans_unix: problem creating socket\n"); 960 P9_EPRINTK(KERN_ERR, "p9_trans_unix: problem creating socket\n");
962 return err; 961 return err;
diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c
index 150e0c4bbf40..159c50f1c6bf 100644
--- a/net/9p/trans_rdma.c
+++ b/net/9p/trans_rdma.c
@@ -167,7 +167,6 @@ static int parse_opts(char *params, struct p9_rdma_opts *opts)
167 substring_t args[MAX_OPT_ARGS]; 167 substring_t args[MAX_OPT_ARGS];
168 int option; 168 int option;
169 char *options, *tmp_options; 169 char *options, *tmp_options;
170 int ret;
171 170
172 opts->port = P9_PORT; 171 opts->port = P9_PORT;
173 opts->sq_depth = P9_RDMA_SQ_DEPTH; 172 opts->sq_depth = P9_RDMA_SQ_DEPTH;
@@ -195,7 +194,6 @@ static int parse_opts(char *params, struct p9_rdma_opts *opts)
195 if (r < 0) { 194 if (r < 0) {
196 P9_DPRINTK(P9_DEBUG_ERROR, 195 P9_DPRINTK(P9_DEBUG_ERROR,
197 "integer field, but no integer?\n"); 196 "integer field, but no integer?\n");
198 ret = r;
199 continue; 197 continue;
200 } 198 }
201 switch (token) { 199 switch (token) {
@@ -591,7 +589,8 @@ rdma_create_trans(struct p9_client *client, const char *addr, char *args)
591 return -ENOMEM; 589 return -ENOMEM;
592 590
593 /* Create the RDMA CM ID */ 591 /* Create the RDMA CM ID */
594 rdma->cm_id = rdma_create_id(p9_cm_event_handler, client, RDMA_PS_TCP); 592 rdma->cm_id = rdma_create_id(p9_cm_event_handler, client, RDMA_PS_TCP,
593 IB_QPT_RC);
595 if (IS_ERR(rdma->cm_id)) 594 if (IS_ERR(rdma->cm_id))
596 goto error; 595 goto error;
597 596
diff --git a/net/9p/util.c b/net/9p/util.c
index da6af81e59d9..9c1c9348ac35 100644
--- a/net/9p/util.c
+++ b/net/9p/util.c
@@ -93,7 +93,7 @@ int p9_idpool_get(struct p9_idpool *p)
93 93
94retry: 94retry:
95 if (idr_pre_get(&p->pool, GFP_NOFS) == 0) 95 if (idr_pre_get(&p->pool, GFP_NOFS) == 0)
96 return 0; 96 return -1;
97 97
98 spin_lock_irqsave(&p->lock, flags); 98 spin_lock_irqsave(&p->lock, flags);
99 99
diff --git a/net/Kconfig b/net/Kconfig
index 79cabf1ee68b..878151c772c9 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -232,6 +232,20 @@ config XPS
232 depends on SMP && SYSFS && USE_GENERIC_SMP_HELPERS 232 depends on SMP && SYSFS && USE_GENERIC_SMP_HELPERS
233 default y 233 default y
234 234
235config HAVE_BPF_JIT
236 bool
237
238config BPF_JIT
239 bool "enable BPF Just In Time compiler"
240 depends on HAVE_BPF_JIT
241 depends on MODULES
242 ---help---
243 Berkeley Packet Filter filtering capabilities are normally handled
244 by an interpreter. This option allows kernel to generate a native
245 code when filter is loaded in memory. This should speedup
246 packet sniffing (libpcap/tcpdump). Note : Admin should enable
247 this feature changing /proc/sys/net/core/bpf_jit_enable
248
235menu "Network testing" 249menu "Network testing"
236 250
237config NET_PKTGEN 251config NET_PKTGEN
diff --git a/net/atm/atm_sysfs.c b/net/atm/atm_sysfs.c
index f7fa67c78766..f49da5814bc3 100644
--- a/net/atm/atm_sysfs.c
+++ b/net/atm/atm_sysfs.c
@@ -59,6 +59,14 @@ static ssize_t show_atmaddress(struct device *cdev,
59 return pos - buf; 59 return pos - buf;
60} 60}
61 61
62static ssize_t show_atmindex(struct device *cdev,
63 struct device_attribute *attr, char *buf)
64{
65 struct atm_dev *adev = to_atm_dev(cdev);
66
67 return sprintf(buf, "%d\n", adev->number);
68}
69
62static ssize_t show_carrier(struct device *cdev, 70static ssize_t show_carrier(struct device *cdev,
63 struct device_attribute *attr, char *buf) 71 struct device_attribute *attr, char *buf)
64{ 72{
@@ -99,6 +107,7 @@ static ssize_t show_link_rate(struct device *cdev,
99 107
100static DEVICE_ATTR(address, S_IRUGO, show_address, NULL); 108static DEVICE_ATTR(address, S_IRUGO, show_address, NULL);
101static DEVICE_ATTR(atmaddress, S_IRUGO, show_atmaddress, NULL); 109static DEVICE_ATTR(atmaddress, S_IRUGO, show_atmaddress, NULL);
110static DEVICE_ATTR(atmindex, S_IRUGO, show_atmindex, NULL);
102static DEVICE_ATTR(carrier, S_IRUGO, show_carrier, NULL); 111static DEVICE_ATTR(carrier, S_IRUGO, show_carrier, NULL);
103static DEVICE_ATTR(type, S_IRUGO, show_type, NULL); 112static DEVICE_ATTR(type, S_IRUGO, show_type, NULL);
104static DEVICE_ATTR(link_rate, S_IRUGO, show_link_rate, NULL); 113static DEVICE_ATTR(link_rate, S_IRUGO, show_link_rate, NULL);
@@ -106,6 +115,7 @@ static DEVICE_ATTR(link_rate, S_IRUGO, show_link_rate, NULL);
106static struct device_attribute *atm_attrs[] = { 115static struct device_attribute *atm_attrs[] = {
107 &dev_attr_atmaddress, 116 &dev_attr_atmaddress,
108 &dev_attr_address, 117 &dev_attr_address,
118 &dev_attr_atmindex,
109 &dev_attr_carrier, 119 &dev_attr_carrier,
110 &dev_attr_type, 120 &dev_attr_type,
111 &dev_attr_link_rate, 121 &dev_attr_link_rate,
diff --git a/net/atm/lec.c b/net/atm/lec.c
index 38754fdb88ba..ba48daa68c1f 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -129,7 +129,6 @@ static struct net_device *dev_lec[MAX_LEC_ITF];
129#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) 129#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
130static void lec_handle_bridge(struct sk_buff *skb, struct net_device *dev) 130static void lec_handle_bridge(struct sk_buff *skb, struct net_device *dev)
131{ 131{
132 struct ethhdr *eth;
133 char *buff; 132 char *buff;
134 struct lec_priv *priv; 133 struct lec_priv *priv;
135 134
@@ -138,7 +137,6 @@ static void lec_handle_bridge(struct sk_buff *skb, struct net_device *dev)
138 * LE_TOPOLOGY_REQUEST with the same value of Topology Change bit 137 * LE_TOPOLOGY_REQUEST with the same value of Topology Change bit
139 * as the Config BPDU has 138 * as the Config BPDU has
140 */ 139 */
141 eth = (struct ethhdr *)skb->data;
142 buff = skb->data + skb->dev->hard_header_len; 140 buff = skb->data + skb->dev->hard_header_len;
143 if (*buff++ == 0x42 && *buff++ == 0x42 && *buff++ == 0x03) { 141 if (*buff++ == 0x42 && *buff++ == 0x42 && *buff++ == 0x03) {
144 struct sock *sk; 142 struct sock *sk;
@@ -1173,14 +1171,13 @@ static int __init lane_module_init(void)
1173#endif 1171#endif
1174 1172
1175 register_atm_ioctl(&lane_ioctl_ops); 1173 register_atm_ioctl(&lane_ioctl_ops);
1176 pr_info("lec.c: " __DATE__ " " __TIME__ " initialized\n"); 1174 pr_info("lec.c: initialized\n");
1177 return 0; 1175 return 0;
1178} 1176}
1179 1177
1180static void __exit lane_module_cleanup(void) 1178static void __exit lane_module_cleanup(void)
1181{ 1179{
1182 int i; 1180 int i;
1183 struct lec_priv *priv;
1184 1181
1185 remove_proc_entry("lec", atm_proc_root); 1182 remove_proc_entry("lec", atm_proc_root);
1186 1183
@@ -1188,7 +1185,6 @@ static void __exit lane_module_cleanup(void)
1188 1185
1189 for (i = 0; i < MAX_LEC_ITF; i++) { 1186 for (i = 0; i < MAX_LEC_ITF; i++) {
1190 if (dev_lec[i] != NULL) { 1187 if (dev_lec[i] != NULL) {
1191 priv = netdev_priv(dev_lec[i]);
1192 unregister_netdev(dev_lec[i]); 1188 unregister_netdev(dev_lec[i]);
1193 free_netdev(dev_lec[i]); 1189 free_netdev(dev_lec[i]);
1194 dev_lec[i] = NULL; 1190 dev_lec[i] = NULL;
diff --git a/net/atm/mpc.c b/net/atm/mpc.c
index 644cdf071642..3ccca42e6f90 100644
--- a/net/atm/mpc.c
+++ b/net/atm/mpc.c
@@ -1482,7 +1482,7 @@ static __init int atm_mpoa_init(void)
1482 if (mpc_proc_init() != 0) 1482 if (mpc_proc_init() != 0)
1483 pr_info("failed to initialize /proc/mpoa\n"); 1483 pr_info("failed to initialize /proc/mpoa\n");
1484 1484
1485 pr_info("mpc.c: " __DATE__ " " __TIME__ " initialized\n"); 1485 pr_info("mpc.c: initialized\n");
1486 1486
1487 return 0; 1487 return 0;
1488} 1488}
diff --git a/net/atm/proc.c b/net/atm/proc.c
index f85da0779e5e..be3afdefec58 100644
--- a/net/atm/proc.c
+++ b/net/atm/proc.c
@@ -191,7 +191,7 @@ static void vcc_info(struct seq_file *seq, struct atm_vcc *vcc)
191{ 191{
192 struct sock *sk = sk_atm(vcc); 192 struct sock *sk = sk_atm(vcc);
193 193
194 seq_printf(seq, "%p ", vcc); 194 seq_printf(seq, "%pK ", vcc);
195 if (!vcc->dev) 195 if (!vcc->dev)
196 seq_printf(seq, "Unassigned "); 196 seq_printf(seq, "Unassigned ");
197 else 197 else
@@ -218,7 +218,7 @@ static void svc_info(struct seq_file *seq, struct atm_vcc *vcc)
218{ 218{
219 if (!vcc->dev) 219 if (!vcc->dev)
220 seq_printf(seq, sizeof(void *) == 4 ? 220 seq_printf(seq, sizeof(void *) == 4 ?
221 "N/A@%p%10s" : "N/A@%p%2s", vcc, ""); 221 "N/A@%pK%10s" : "N/A@%pK%2s", vcc, "");
222 else 222 else
223 seq_printf(seq, "%3d %3d %5d ", 223 seq_printf(seq, "%3d %3d %5d ",
224 vcc->dev->number, vcc->vpi, vcc->vci); 224 vcc->dev->number, vcc->vpi, vcc->vci);
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 6da5daeebab7..e7c69f4619ec 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -1538,8 +1538,6 @@ static int ax25_sendmsg(struct kiocb *iocb, struct socket *sock,
1538 } 1538 }
1539 1539
1540 /* Build a packet */ 1540 /* Build a packet */
1541 SOCK_DEBUG(sk, "AX.25: sendto: Addresses built. Building packet.\n");
1542
1543 /* Assume the worst case */ 1541 /* Assume the worst case */
1544 size = len + ax25->ax25_dev->dev->hard_header_len; 1542 size = len + ax25->ax25_dev->dev->hard_header_len;
1545 1543
@@ -1549,8 +1547,6 @@ static int ax25_sendmsg(struct kiocb *iocb, struct socket *sock,
1549 1547
1550 skb_reserve(skb, size - len); 1548 skb_reserve(skb, size - len);
1551 1549
1552 SOCK_DEBUG(sk, "AX.25: Appending user data\n");
1553
1554 /* User data follows immediately after the AX.25 data */ 1550 /* User data follows immediately after the AX.25 data */
1555 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) { 1551 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
1556 err = -EFAULT; 1552 err = -EFAULT;
@@ -1564,8 +1560,6 @@ static int ax25_sendmsg(struct kiocb *iocb, struct socket *sock,
1564 if (!ax25->pidincl) 1560 if (!ax25->pidincl)
1565 *skb_push(skb, 1) = sk->sk_protocol; 1561 *skb_push(skb, 1) = sk->sk_protocol;
1566 1562
1567 SOCK_DEBUG(sk, "AX.25: Transmitting buffer\n");
1568
1569 if (sk->sk_type == SOCK_SEQPACKET) { 1563 if (sk->sk_type == SOCK_SEQPACKET) {
1570 /* Connected mode sockets go via the LAPB machine */ 1564 /* Connected mode sockets go via the LAPB machine */
1571 if (sk->sk_state != TCP_ESTABLISHED) { 1565 if (sk->sk_state != TCP_ESTABLISHED) {
@@ -1583,22 +1577,14 @@ static int ax25_sendmsg(struct kiocb *iocb, struct socket *sock,
1583 1577
1584 skb_push(skb, 1 + ax25_addr_size(dp)); 1578 skb_push(skb, 1 + ax25_addr_size(dp));
1585 1579
1586 SOCK_DEBUG(sk, "Building AX.25 Header (dp=%p).\n", dp); 1580 /* Building AX.25 Header */
1587
1588 if (dp != NULL)
1589 SOCK_DEBUG(sk, "Num digipeaters=%d\n", dp->ndigi);
1590 1581
1591 /* Build an AX.25 header */ 1582 /* Build an AX.25 header */
1592 lv = ax25_addr_build(skb->data, &ax25->source_addr, &sax.sax25_call, 1583 lv = ax25_addr_build(skb->data, &ax25->source_addr, &sax.sax25_call,
1593 dp, AX25_COMMAND, AX25_MODULUS); 1584 dp, AX25_COMMAND, AX25_MODULUS);
1594 1585
1595 SOCK_DEBUG(sk, "Built header (%d bytes)\n",lv);
1596
1597 skb_set_transport_header(skb, lv); 1586 skb_set_transport_header(skb, lv);
1598 1587
1599 SOCK_DEBUG(sk, "base=%p pos=%p\n",
1600 skb->data, skb_transport_header(skb));
1601
1602 *skb_transport_header(skb) = AX25_UI; 1588 *skb_transport_header(skb) = AX25_UI;
1603 1589
1604 /* Datagram frames go straight out of the door as UI */ 1590 /* Datagram frames go straight out of the door as UI */
diff --git a/net/ax25/ax25_iface.c b/net/ax25/ax25_iface.c
index 5a0dda8df492..60b545e2822a 100644
--- a/net/ax25/ax25_iface.c
+++ b/net/ax25/ax25_iface.c
@@ -58,7 +58,7 @@ EXPORT_SYMBOL_GPL(ax25_register_pid);
58 58
59void ax25_protocol_release(unsigned int pid) 59void ax25_protocol_release(unsigned int pid)
60{ 60{
61 struct ax25_protocol *s, *protocol; 61 struct ax25_protocol *protocol;
62 62
63 write_lock_bh(&protocol_list_lock); 63 write_lock_bh(&protocol_list_lock);
64 protocol = protocol_list; 64 protocol = protocol_list;
@@ -72,7 +72,6 @@ void ax25_protocol_release(unsigned int pid)
72 72
73 while (protocol != NULL && protocol->next != NULL) { 73 while (protocol != NULL && protocol->next != NULL) {
74 if (protocol->next->pid == pid) { 74 if (protocol->next->pid == pid) {
75 s = protocol->next;
76 protocol->next = protocol->next->next; 75 protocol->next = protocol->next->next;
77 goto out; 76 goto out;
78 } 77 }
diff --git a/net/batman-adv/aggregation.c b/net/batman-adv/aggregation.c
index af45d6b2031f..a8c32030527c 100644
--- a/net/batman-adv/aggregation.c
+++ b/net/batman-adv/aggregation.c
@@ -23,11 +23,12 @@
23#include "aggregation.h" 23#include "aggregation.h"
24#include "send.h" 24#include "send.h"
25#include "routing.h" 25#include "routing.h"
26#include "hard-interface.h"
26 27
27/* calculate the size of the hna information for a given packet */ 28/* calculate the size of the tt information for a given packet */
28static int hna_len(struct batman_packet *batman_packet) 29static int tt_len(struct batman_packet *batman_packet)
29{ 30{
30 return batman_packet->num_hna * ETH_ALEN; 31 return batman_packet->num_tt * ETH_ALEN;
31} 32}
32 33
33/* return true if new_packet can be aggregated with forw_packet */ 34/* return true if new_packet can be aggregated with forw_packet */
@@ -95,7 +96,6 @@ static bool can_aggregate_with(struct batman_packet *new_batman_packet,
95 return false; 96 return false;
96} 97}
97 98
98#define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0)
99/* create a new aggregated packet and add this packet to it */ 99/* create a new aggregated packet and add this packet to it */
100static void new_aggregated_packet(unsigned char *packet_buff, int packet_len, 100static void new_aggregated_packet(unsigned char *packet_buff, int packet_len,
101 unsigned long send_time, bool direct_link, 101 unsigned long send_time, bool direct_link,
@@ -106,12 +106,15 @@ static void new_aggregated_packet(unsigned char *packet_buff, int packet_len,
106 struct forw_packet *forw_packet_aggr; 106 struct forw_packet *forw_packet_aggr;
107 unsigned char *skb_buff; 107 unsigned char *skb_buff;
108 108
109 if (!atomic_inc_not_zero(&if_incoming->refcount))
110 return;
111
109 /* own packet should always be scheduled */ 112 /* own packet should always be scheduled */
110 if (!own_packet) { 113 if (!own_packet) {
111 if (!atomic_dec_not_zero(&bat_priv->batman_queue_left)) { 114 if (!atomic_dec_not_zero(&bat_priv->batman_queue_left)) {
112 bat_dbg(DBG_BATMAN, bat_priv, 115 bat_dbg(DBG_BATMAN, bat_priv,
113 "batman packet queue full\n"); 116 "batman packet queue full\n");
114 return; 117 goto out;
115 } 118 }
116 } 119 }
117 120
@@ -119,7 +122,7 @@ static void new_aggregated_packet(unsigned char *packet_buff, int packet_len,
119 if (!forw_packet_aggr) { 122 if (!forw_packet_aggr) {
120 if (!own_packet) 123 if (!own_packet)
121 atomic_inc(&bat_priv->batman_queue_left); 124 atomic_inc(&bat_priv->batman_queue_left);
122 return; 125 goto out;
123 } 126 }
124 127
125 if ((atomic_read(&bat_priv->aggregated_ogms)) && 128 if ((atomic_read(&bat_priv->aggregated_ogms)) &&
@@ -134,7 +137,7 @@ static void new_aggregated_packet(unsigned char *packet_buff, int packet_len,
134 if (!own_packet) 137 if (!own_packet)
135 atomic_inc(&bat_priv->batman_queue_left); 138 atomic_inc(&bat_priv->batman_queue_left);
136 kfree(forw_packet_aggr); 139 kfree(forw_packet_aggr);
137 return; 140 goto out;
138 } 141 }
139 skb_reserve(forw_packet_aggr->skb, sizeof(struct ethhdr)); 142 skb_reserve(forw_packet_aggr->skb, sizeof(struct ethhdr));
140 143
@@ -165,6 +168,10 @@ static void new_aggregated_packet(unsigned char *packet_buff, int packet_len,
165 queue_delayed_work(bat_event_workqueue, 168 queue_delayed_work(bat_event_workqueue,
166 &forw_packet_aggr->delayed_work, 169 &forw_packet_aggr->delayed_work,
167 send_time - jiffies); 170 send_time - jiffies);
171
172 return;
173out:
174 hardif_free_ref(if_incoming);
168} 175}
169 176
170/* aggregate a new packet into the existing aggregation */ 177/* aggregate a new packet into the existing aggregation */
@@ -251,7 +258,7 @@ void receive_aggr_bat_packet(struct ethhdr *ethhdr, unsigned char *packet_buff,
251{ 258{
252 struct batman_packet *batman_packet; 259 struct batman_packet *batman_packet;
253 int buff_pos = 0; 260 int buff_pos = 0;
254 unsigned char *hna_buff; 261 unsigned char *tt_buff;
255 262
256 batman_packet = (struct batman_packet *)packet_buff; 263 batman_packet = (struct batman_packet *)packet_buff;
257 264
@@ -260,14 +267,14 @@ void receive_aggr_bat_packet(struct ethhdr *ethhdr, unsigned char *packet_buff,
260 orig_interval. */ 267 orig_interval. */
261 batman_packet->seqno = ntohl(batman_packet->seqno); 268 batman_packet->seqno = ntohl(batman_packet->seqno);
262 269
263 hna_buff = packet_buff + buff_pos + BAT_PACKET_LEN; 270 tt_buff = packet_buff + buff_pos + BAT_PACKET_LEN;
264 receive_bat_packet(ethhdr, batman_packet, 271 receive_bat_packet(ethhdr, batman_packet,
265 hna_buff, hna_len(batman_packet), 272 tt_buff, tt_len(batman_packet),
266 if_incoming); 273 if_incoming);
267 274
268 buff_pos += BAT_PACKET_LEN + hna_len(batman_packet); 275 buff_pos += BAT_PACKET_LEN + tt_len(batman_packet);
269 batman_packet = (struct batman_packet *) 276 batman_packet = (struct batman_packet *)
270 (packet_buff + buff_pos); 277 (packet_buff + buff_pos);
271 } while (aggregated_packet(buff_pos, packet_len, 278 } while (aggregated_packet(buff_pos, packet_len,
272 batman_packet->num_hna)); 279 batman_packet->num_tt));
273} 280}
diff --git a/net/batman-adv/aggregation.h b/net/batman-adv/aggregation.h
index 062204289d1f..7e6d72fbf540 100644
--- a/net/batman-adv/aggregation.h
+++ b/net/batman-adv/aggregation.h
@@ -25,9 +25,9 @@
25#include "main.h" 25#include "main.h"
26 26
27/* is there another aggregated packet here? */ 27/* is there another aggregated packet here? */
28static inline int aggregated_packet(int buff_pos, int packet_len, int num_hna) 28static inline int aggregated_packet(int buff_pos, int packet_len, int num_tt)
29{ 29{
30 int next_buff_pos = buff_pos + BAT_PACKET_LEN + (num_hna * ETH_ALEN); 30 int next_buff_pos = buff_pos + BAT_PACKET_LEN + (num_tt * ETH_ALEN);
31 31
32 return (next_buff_pos <= packet_len) && 32 return (next_buff_pos <= packet_len) &&
33 (next_buff_pos <= MAX_AGGREGATION_BYTES); 33 (next_buff_pos <= MAX_AGGREGATION_BYTES);
diff --git a/net/batman-adv/bat_debugfs.c b/net/batman-adv/bat_debugfs.c
index 0e9d43509935..abaeec5f6247 100644
--- a/net/batman-adv/bat_debugfs.c
+++ b/net/batman-adv/bat_debugfs.c
@@ -241,13 +241,13 @@ static int softif_neigh_open(struct inode *inode, struct file *file)
241static int transtable_global_open(struct inode *inode, struct file *file) 241static int transtable_global_open(struct inode *inode, struct file *file)
242{ 242{
243 struct net_device *net_dev = (struct net_device *)inode->i_private; 243 struct net_device *net_dev = (struct net_device *)inode->i_private;
244 return single_open(file, hna_global_seq_print_text, net_dev); 244 return single_open(file, tt_global_seq_print_text, net_dev);
245} 245}
246 246
247static int transtable_local_open(struct inode *inode, struct file *file) 247static int transtable_local_open(struct inode *inode, struct file *file)
248{ 248{
249 struct net_device *net_dev = (struct net_device *)inode->i_private; 249 struct net_device *net_dev = (struct net_device *)inode->i_private;
250 return single_open(file, hna_local_seq_print_text, net_dev); 250 return single_open(file, tt_local_seq_print_text, net_dev);
251} 251}
252 252
253static int vis_data_open(struct inode *inode, struct file *file) 253static int vis_data_open(struct inode *inode, struct file *file)
diff --git a/net/batman-adv/bat_sysfs.c b/net/batman-adv/bat_sysfs.c
index e449bf6353e0..497a0700cc3c 100644
--- a/net/batman-adv/bat_sysfs.c
+++ b/net/batman-adv/bat_sysfs.c
@@ -488,22 +488,24 @@ static ssize_t store_mesh_iface(struct kobject *kobj, struct attribute *attr,
488 (strncmp(hard_iface->soft_iface->name, buff, IFNAMSIZ) == 0)) 488 (strncmp(hard_iface->soft_iface->name, buff, IFNAMSIZ) == 0))
489 goto out; 489 goto out;
490 490
491 if (!rtnl_trylock()) {
492 ret = -ERESTARTSYS;
493 goto out;
494 }
495
491 if (status_tmp == IF_NOT_IN_USE) { 496 if (status_tmp == IF_NOT_IN_USE) {
492 rtnl_lock();
493 hardif_disable_interface(hard_iface); 497 hardif_disable_interface(hard_iface);
494 rtnl_unlock(); 498 goto unlock;
495 goto out;
496 } 499 }
497 500
498 /* if the interface already is in use */ 501 /* if the interface already is in use */
499 if (hard_iface->if_status != IF_NOT_IN_USE) { 502 if (hard_iface->if_status != IF_NOT_IN_USE)
500 rtnl_lock();
501 hardif_disable_interface(hard_iface); 503 hardif_disable_interface(hard_iface);
502 rtnl_unlock();
503 }
504 504
505 ret = hardif_enable_interface(hard_iface, buff); 505 ret = hardif_enable_interface(hard_iface, buff);
506 506
507unlock:
508 rtnl_unlock();
507out: 509out:
508 hardif_free_ref(hard_iface); 510 hardif_free_ref(hard_iface);
509 return ret; 511 return ret;
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index 3cc43558cf9c..61605a0f3f39 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -23,80 +23,88 @@
23#include "gateway_client.h" 23#include "gateway_client.h"
24#include "gateway_common.h" 24#include "gateway_common.h"
25#include "hard-interface.h" 25#include "hard-interface.h"
26#include "originator.h"
26#include <linux/ip.h> 27#include <linux/ip.h>
27#include <linux/ipv6.h> 28#include <linux/ipv6.h>
28#include <linux/udp.h> 29#include <linux/udp.h>
29#include <linux/if_vlan.h> 30#include <linux/if_vlan.h>
30 31
31static void gw_node_free_rcu(struct rcu_head *rcu)
32{
33 struct gw_node *gw_node;
34
35 gw_node = container_of(rcu, struct gw_node, rcu);
36 kfree(gw_node);
37}
38
39static void gw_node_free_ref(struct gw_node *gw_node) 32static void gw_node_free_ref(struct gw_node *gw_node)
40{ 33{
41 if (atomic_dec_and_test(&gw_node->refcount)) 34 if (atomic_dec_and_test(&gw_node->refcount))
42 call_rcu(&gw_node->rcu, gw_node_free_rcu); 35 kfree_rcu(gw_node, rcu);
43} 36}
44 37
45void *gw_get_selected(struct bat_priv *bat_priv) 38static struct gw_node *gw_get_selected_gw_node(struct bat_priv *bat_priv)
46{ 39{
47 struct gw_node *curr_gateway_tmp; 40 struct gw_node *gw_node;
48 struct orig_node *orig_node = NULL;
49 41
50 rcu_read_lock(); 42 rcu_read_lock();
51 curr_gateway_tmp = rcu_dereference(bat_priv->curr_gw); 43 gw_node = rcu_dereference(bat_priv->curr_gw);
52 if (!curr_gateway_tmp) 44 if (!gw_node)
53 goto out; 45 goto out;
54 46
55 orig_node = curr_gateway_tmp->orig_node; 47 if (!atomic_inc_not_zero(&gw_node->refcount))
56 if (!orig_node) 48 gw_node = NULL;
57 goto out;
58
59 if (!atomic_inc_not_zero(&orig_node->refcount))
60 orig_node = NULL;
61 49
62out: 50out:
63 rcu_read_unlock(); 51 rcu_read_unlock();
64 return orig_node; 52 return gw_node;
65} 53}
66 54
67void gw_deselect(struct bat_priv *bat_priv) 55struct orig_node *gw_get_selected_orig(struct bat_priv *bat_priv)
68{ 56{
69 struct gw_node *gw_node; 57 struct gw_node *gw_node;
58 struct orig_node *orig_node = NULL;
70 59
71 spin_lock_bh(&bat_priv->gw_list_lock); 60 gw_node = gw_get_selected_gw_node(bat_priv);
72 gw_node = rcu_dereference(bat_priv->curr_gw); 61 if (!gw_node)
73 rcu_assign_pointer(bat_priv->curr_gw, NULL); 62 goto out;
74 spin_unlock_bh(&bat_priv->gw_list_lock); 63
64 rcu_read_lock();
65 orig_node = gw_node->orig_node;
66 if (!orig_node)
67 goto unlock;
75 68
69 if (!atomic_inc_not_zero(&orig_node->refcount))
70 orig_node = NULL;
71
72unlock:
73 rcu_read_unlock();
74out:
76 if (gw_node) 75 if (gw_node)
77 gw_node_free_ref(gw_node); 76 gw_node_free_ref(gw_node);
77 return orig_node;
78} 78}
79 79
80static void gw_select(struct bat_priv *bat_priv, struct gw_node *new_gw_node) 80static void gw_select(struct bat_priv *bat_priv, struct gw_node *new_gw_node)
81{ 81{
82 struct gw_node *curr_gw_node; 82 struct gw_node *curr_gw_node;
83 83
84 spin_lock_bh(&bat_priv->gw_list_lock);
85
84 if (new_gw_node && !atomic_inc_not_zero(&new_gw_node->refcount)) 86 if (new_gw_node && !atomic_inc_not_zero(&new_gw_node->refcount))
85 new_gw_node = NULL; 87 new_gw_node = NULL;
86 88
87 spin_lock_bh(&bat_priv->gw_list_lock); 89 curr_gw_node = bat_priv->curr_gw;
88 curr_gw_node = rcu_dereference(bat_priv->curr_gw);
89 rcu_assign_pointer(bat_priv->curr_gw, new_gw_node); 90 rcu_assign_pointer(bat_priv->curr_gw, new_gw_node);
90 spin_unlock_bh(&bat_priv->gw_list_lock);
91 91
92 if (curr_gw_node) 92 if (curr_gw_node)
93 gw_node_free_ref(curr_gw_node); 93 gw_node_free_ref(curr_gw_node);
94
95 spin_unlock_bh(&bat_priv->gw_list_lock);
96}
97
98void gw_deselect(struct bat_priv *bat_priv)
99{
100 gw_select(bat_priv, NULL);
94} 101}
95 102
96void gw_election(struct bat_priv *bat_priv) 103void gw_election(struct bat_priv *bat_priv)
97{ 104{
98 struct hlist_node *node; 105 struct hlist_node *node;
99 struct gw_node *gw_node, *curr_gw, *curr_gw_tmp = NULL; 106 struct gw_node *gw_node, *curr_gw = NULL, *curr_gw_tmp = NULL;
107 struct neigh_node *router;
100 uint8_t max_tq = 0; 108 uint8_t max_tq = 0;
101 uint32_t max_gw_factor = 0, tmp_gw_factor = 0; 109 uint32_t max_gw_factor = 0, tmp_gw_factor = 0;
102 int down, up; 110 int down, up;
@@ -110,32 +118,25 @@ void gw_election(struct bat_priv *bat_priv)
110 if (atomic_read(&bat_priv->gw_mode) != GW_MODE_CLIENT) 118 if (atomic_read(&bat_priv->gw_mode) != GW_MODE_CLIENT)
111 return; 119 return;
112 120
113 rcu_read_lock(); 121 curr_gw = gw_get_selected_gw_node(bat_priv);
114 curr_gw = rcu_dereference(bat_priv->curr_gw); 122 if (curr_gw)
115 if (curr_gw) { 123 goto out;
116 rcu_read_unlock();
117 return;
118 }
119 124
125 rcu_read_lock();
120 if (hlist_empty(&bat_priv->gw_list)) { 126 if (hlist_empty(&bat_priv->gw_list)) {
121 127 bat_dbg(DBG_BATMAN, bat_priv,
122 if (curr_gw) { 128 "Removing selected gateway - "
123 rcu_read_unlock(); 129 "no gateway in range\n");
124 bat_dbg(DBG_BATMAN, bat_priv, 130 gw_deselect(bat_priv);
125 "Removing selected gateway - " 131 goto unlock;
126 "no gateway in range\n");
127 gw_deselect(bat_priv);
128 } else
129 rcu_read_unlock();
130
131 return;
132 } 132 }
133 133
134 hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) { 134 hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) {
135 if (!gw_node->orig_node->router) 135 if (gw_node->deleted)
136 continue; 136 continue;
137 137
138 if (gw_node->deleted) 138 router = orig_node_get_router(gw_node->orig_node);
139 if (!router)
139 continue; 140 continue;
140 141
141 switch (atomic_read(&bat_priv->gw_sel_class)) { 142 switch (atomic_read(&bat_priv->gw_sel_class)) {
@@ -143,15 +144,14 @@ void gw_election(struct bat_priv *bat_priv)
143 gw_bandwidth_to_kbit(gw_node->orig_node->gw_flags, 144 gw_bandwidth_to_kbit(gw_node->orig_node->gw_flags,
144 &down, &up); 145 &down, &up);
145 146
146 tmp_gw_factor = (gw_node->orig_node->router->tq_avg * 147 tmp_gw_factor = (router->tq_avg * router->tq_avg *
147 gw_node->orig_node->router->tq_avg *
148 down * 100 * 100) / 148 down * 100 * 100) /
149 (TQ_LOCAL_WINDOW_SIZE * 149 (TQ_LOCAL_WINDOW_SIZE *
150 TQ_LOCAL_WINDOW_SIZE * 64); 150 TQ_LOCAL_WINDOW_SIZE * 64);
151 151
152 if ((tmp_gw_factor > max_gw_factor) || 152 if ((tmp_gw_factor > max_gw_factor) ||
153 ((tmp_gw_factor == max_gw_factor) && 153 ((tmp_gw_factor == max_gw_factor) &&
154 (gw_node->orig_node->router->tq_avg > max_tq))) 154 (router->tq_avg > max_tq)))
155 curr_gw_tmp = gw_node; 155 curr_gw_tmp = gw_node;
156 break; 156 break;
157 157
@@ -163,19 +163,25 @@ void gw_election(struct bat_priv *bat_priv)
163 * soon as a better gateway appears which has 163 * soon as a better gateway appears which has
164 * $routing_class more tq points) 164 * $routing_class more tq points)
165 **/ 165 **/
166 if (gw_node->orig_node->router->tq_avg > max_tq) 166 if (router->tq_avg > max_tq)
167 curr_gw_tmp = gw_node; 167 curr_gw_tmp = gw_node;
168 break; 168 break;
169 } 169 }
170 170
171 if (gw_node->orig_node->router->tq_avg > max_tq) 171 if (router->tq_avg > max_tq)
172 max_tq = gw_node->orig_node->router->tq_avg; 172 max_tq = router->tq_avg;
173 173
174 if (tmp_gw_factor > max_gw_factor) 174 if (tmp_gw_factor > max_gw_factor)
175 max_gw_factor = tmp_gw_factor; 175 max_gw_factor = tmp_gw_factor;
176
177 neigh_node_free_ref(router);
176 } 178 }
177 179
178 if (curr_gw != curr_gw_tmp) { 180 if (curr_gw != curr_gw_tmp) {
181 router = orig_node_get_router(curr_gw_tmp->orig_node);
182 if (!router)
183 goto unlock;
184
179 if ((curr_gw) && (!curr_gw_tmp)) 185 if ((curr_gw) && (!curr_gw_tmp))
180 bat_dbg(DBG_BATMAN, bat_priv, 186 bat_dbg(DBG_BATMAN, bat_priv,
181 "Removing selected gateway - " 187 "Removing selected gateway - "
@@ -186,48 +192,50 @@ void gw_election(struct bat_priv *bat_priv)
186 "(gw_flags: %i, tq: %i)\n", 192 "(gw_flags: %i, tq: %i)\n",
187 curr_gw_tmp->orig_node->orig, 193 curr_gw_tmp->orig_node->orig,
188 curr_gw_tmp->orig_node->gw_flags, 194 curr_gw_tmp->orig_node->gw_flags,
189 curr_gw_tmp->orig_node->router->tq_avg); 195 router->tq_avg);
190 else 196 else
191 bat_dbg(DBG_BATMAN, bat_priv, 197 bat_dbg(DBG_BATMAN, bat_priv,
192 "Changing route to gateway %pM " 198 "Changing route to gateway %pM "
193 "(gw_flags: %i, tq: %i)\n", 199 "(gw_flags: %i, tq: %i)\n",
194 curr_gw_tmp->orig_node->orig, 200 curr_gw_tmp->orig_node->orig,
195 curr_gw_tmp->orig_node->gw_flags, 201 curr_gw_tmp->orig_node->gw_flags,
196 curr_gw_tmp->orig_node->router->tq_avg); 202 router->tq_avg);
197 203
204 neigh_node_free_ref(router);
198 gw_select(bat_priv, curr_gw_tmp); 205 gw_select(bat_priv, curr_gw_tmp);
199 } 206 }
200 207
208unlock:
201 rcu_read_unlock(); 209 rcu_read_unlock();
210out:
211 if (curr_gw)
212 gw_node_free_ref(curr_gw);
202} 213}
203 214
204void gw_check_election(struct bat_priv *bat_priv, struct orig_node *orig_node) 215void gw_check_election(struct bat_priv *bat_priv, struct orig_node *orig_node)
205{ 216{
206 struct gw_node *curr_gateway_tmp; 217 struct orig_node *curr_gw_orig;
218 struct neigh_node *router_gw = NULL, *router_orig = NULL;
207 uint8_t gw_tq_avg, orig_tq_avg; 219 uint8_t gw_tq_avg, orig_tq_avg;
208 220
209 rcu_read_lock(); 221 curr_gw_orig = gw_get_selected_orig(bat_priv);
210 curr_gateway_tmp = rcu_dereference(bat_priv->curr_gw); 222 if (!curr_gw_orig)
211 if (!curr_gateway_tmp) 223 goto deselect;
212 goto out_rcu;
213
214 if (!curr_gateway_tmp->orig_node)
215 goto deselect_rcu;
216 224
217 if (!curr_gateway_tmp->orig_node->router) 225 router_gw = orig_node_get_router(curr_gw_orig);
218 goto deselect_rcu; 226 if (!router_gw)
227 goto deselect;
219 228
220 /* this node already is the gateway */ 229 /* this node already is the gateway */
221 if (curr_gateway_tmp->orig_node == orig_node) 230 if (curr_gw_orig == orig_node)
222 goto out_rcu; 231 goto out;
223
224 if (!orig_node->router)
225 goto out_rcu;
226 232
227 gw_tq_avg = curr_gateway_tmp->orig_node->router->tq_avg; 233 router_orig = orig_node_get_router(orig_node);
228 rcu_read_unlock(); 234 if (!router_orig)
235 goto out;
229 236
230 orig_tq_avg = orig_node->router->tq_avg; 237 gw_tq_avg = router_gw->tq_avg;
238 orig_tq_avg = router_orig->tq_avg;
231 239
232 /* the TQ value has to be better */ 240 /* the TQ value has to be better */
233 if (orig_tq_avg < gw_tq_avg) 241 if (orig_tq_avg < gw_tq_avg)
@@ -245,16 +253,17 @@ void gw_check_election(struct bat_priv *bat_priv, struct orig_node *orig_node)
245 "Restarting gateway selection: better gateway found (tq curr: " 253 "Restarting gateway selection: better gateway found (tq curr: "
246 "%i, tq new: %i)\n", 254 "%i, tq new: %i)\n",
247 gw_tq_avg, orig_tq_avg); 255 gw_tq_avg, orig_tq_avg);
248 goto deselect;
249 256
250out_rcu:
251 rcu_read_unlock();
252 goto out;
253deselect_rcu:
254 rcu_read_unlock();
255deselect: 257deselect:
256 gw_deselect(bat_priv); 258 gw_deselect(bat_priv);
257out: 259out:
260 if (curr_gw_orig)
261 orig_node_free_ref(curr_gw_orig);
262 if (router_gw)
263 neigh_node_free_ref(router_gw);
264 if (router_orig)
265 neigh_node_free_ref(router_orig);
266
258 return; 267 return;
259} 268}
260 269
@@ -291,7 +300,15 @@ void gw_node_update(struct bat_priv *bat_priv,
291 struct orig_node *orig_node, uint8_t new_gwflags) 300 struct orig_node *orig_node, uint8_t new_gwflags)
292{ 301{
293 struct hlist_node *node; 302 struct hlist_node *node;
294 struct gw_node *gw_node; 303 struct gw_node *gw_node, *curr_gw;
304
305 /**
306 * Note: We don't need a NULL check here, since curr_gw never gets
307 * dereferenced. If curr_gw is NULL we also should not exit as we may
308 * have this gateway in our list (duplication check!) even though we
309 * have no currently selected gateway.
310 */
311 curr_gw = gw_get_selected_gw_node(bat_priv);
295 312
296 rcu_read_lock(); 313 rcu_read_lock();
297 hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) { 314 hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) {
@@ -312,22 +329,26 @@ void gw_node_update(struct bat_priv *bat_priv,
312 "Gateway %pM removed from gateway list\n", 329 "Gateway %pM removed from gateway list\n",
313 orig_node->orig); 330 orig_node->orig);
314 331
315 if (gw_node == rcu_dereference(bat_priv->curr_gw)) { 332 if (gw_node == curr_gw)
316 rcu_read_unlock(); 333 goto deselect;
317 gw_deselect(bat_priv);
318 return;
319 }
320 } 334 }
321 335
322 rcu_read_unlock(); 336 goto unlock;
323 return;
324 } 337 }
325 rcu_read_unlock();
326 338
327 if (new_gwflags == 0) 339 if (new_gwflags == 0)
328 return; 340 goto unlock;
329 341
330 gw_node_add(bat_priv, orig_node, new_gwflags); 342 gw_node_add(bat_priv, orig_node, new_gwflags);
343 goto unlock;
344
345deselect:
346 gw_deselect(bat_priv);
347unlock:
348 rcu_read_unlock();
349
350 if (curr_gw)
351 gw_node_free_ref(curr_gw);
331} 352}
332 353
333void gw_node_delete(struct bat_priv *bat_priv, struct orig_node *orig_node) 354void gw_node_delete(struct bat_priv *bat_priv, struct orig_node *orig_node)
@@ -337,9 +358,12 @@ void gw_node_delete(struct bat_priv *bat_priv, struct orig_node *orig_node)
337 358
338void gw_node_purge(struct bat_priv *bat_priv) 359void gw_node_purge(struct bat_priv *bat_priv)
339{ 360{
340 struct gw_node *gw_node; 361 struct gw_node *gw_node, *curr_gw;
341 struct hlist_node *node, *node_tmp; 362 struct hlist_node *node, *node_tmp;
342 unsigned long timeout = 2 * PURGE_TIMEOUT * HZ; 363 unsigned long timeout = 2 * PURGE_TIMEOUT * HZ;
364 char do_deselect = 0;
365
366 curr_gw = gw_get_selected_gw_node(bat_priv);
343 367
344 spin_lock_bh(&bat_priv->gw_list_lock); 368 spin_lock_bh(&bat_priv->gw_list_lock);
345 369
@@ -350,41 +374,56 @@ void gw_node_purge(struct bat_priv *bat_priv)
350 atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE) 374 atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE)
351 continue; 375 continue;
352 376
353 if (rcu_dereference(bat_priv->curr_gw) == gw_node) 377 if (curr_gw == gw_node)
354 gw_deselect(bat_priv); 378 do_deselect = 1;
355 379
356 hlist_del_rcu(&gw_node->list); 380 hlist_del_rcu(&gw_node->list);
357 gw_node_free_ref(gw_node); 381 gw_node_free_ref(gw_node);
358 } 382 }
359 383
360
361 spin_unlock_bh(&bat_priv->gw_list_lock); 384 spin_unlock_bh(&bat_priv->gw_list_lock);
385
386 /* gw_deselect() needs to acquire the gw_list_lock */
387 if (do_deselect)
388 gw_deselect(bat_priv);
389
390 if (curr_gw)
391 gw_node_free_ref(curr_gw);
362} 392}
363 393
394/**
395 * fails if orig_node has no router
396 */
364static int _write_buffer_text(struct bat_priv *bat_priv, 397static int _write_buffer_text(struct bat_priv *bat_priv,
365 struct seq_file *seq, struct gw_node *gw_node) 398 struct seq_file *seq, struct gw_node *gw_node)
366{ 399{
367 struct gw_node *curr_gw; 400 struct gw_node *curr_gw;
368 int down, up, ret; 401 struct neigh_node *router;
402 int down, up, ret = -1;
369 403
370 gw_bandwidth_to_kbit(gw_node->orig_node->gw_flags, &down, &up); 404 gw_bandwidth_to_kbit(gw_node->orig_node->gw_flags, &down, &up);
371 405
372 rcu_read_lock(); 406 router = orig_node_get_router(gw_node->orig_node);
373 curr_gw = rcu_dereference(bat_priv->curr_gw); 407 if (!router)
408 goto out;
374 409
375 ret = seq_printf(seq, "%s %pM (%3i) %pM [%10s]: %3i - %i%s/%i%s\n", 410 curr_gw = gw_get_selected_gw_node(bat_priv);
376 (curr_gw == gw_node ? "=>" : " "),
377 gw_node->orig_node->orig,
378 gw_node->orig_node->router->tq_avg,
379 gw_node->orig_node->router->addr,
380 gw_node->orig_node->router->if_incoming->net_dev->name,
381 gw_node->orig_node->gw_flags,
382 (down > 2048 ? down / 1024 : down),
383 (down > 2048 ? "MBit" : "KBit"),
384 (up > 2048 ? up / 1024 : up),
385 (up > 2048 ? "MBit" : "KBit"));
386 411
387 rcu_read_unlock(); 412 ret = seq_printf(seq, "%s %pM (%3i) %pM [%10s]: %3i - %i%s/%i%s\n",
413 (curr_gw == gw_node ? "=>" : " "),
414 gw_node->orig_node->orig,
415 router->tq_avg, router->addr,
416 router->if_incoming->net_dev->name,
417 gw_node->orig_node->gw_flags,
418 (down > 2048 ? down / 1024 : down),
419 (down > 2048 ? "MBit" : "KBit"),
420 (up > 2048 ? up / 1024 : up),
421 (up > 2048 ? "MBit" : "KBit"));
422
423 neigh_node_free_ref(router);
424 if (curr_gw)
425 gw_node_free_ref(curr_gw);
426out:
388 return ret; 427 return ret;
389} 428}
390 429
@@ -392,40 +431,42 @@ int gw_client_seq_print_text(struct seq_file *seq, void *offset)
392{ 431{
393 struct net_device *net_dev = (struct net_device *)seq->private; 432 struct net_device *net_dev = (struct net_device *)seq->private;
394 struct bat_priv *bat_priv = netdev_priv(net_dev); 433 struct bat_priv *bat_priv = netdev_priv(net_dev);
434 struct hard_iface *primary_if;
395 struct gw_node *gw_node; 435 struct gw_node *gw_node;
396 struct hlist_node *node; 436 struct hlist_node *node;
397 int gw_count = 0; 437 int gw_count = 0, ret = 0;
398
399 if (!bat_priv->primary_if) {
400 438
401 return seq_printf(seq, "BATMAN mesh %s disabled - please " 439 primary_if = primary_if_get_selected(bat_priv);
402 "specify interfaces to enable it\n", 440 if (!primary_if) {
403 net_dev->name); 441 ret = seq_printf(seq, "BATMAN mesh %s disabled - please "
442 "specify interfaces to enable it\n",
443 net_dev->name);
444 goto out;
404 } 445 }
405 446
406 if (bat_priv->primary_if->if_status != IF_ACTIVE) { 447 if (primary_if->if_status != IF_ACTIVE) {
407 448 ret = seq_printf(seq, "BATMAN mesh %s disabled - "
408 return seq_printf(seq, "BATMAN mesh %s disabled - " 449 "primary interface not active\n",
409 "primary interface not active\n", 450 net_dev->name);
410 net_dev->name); 451 goto out;
411 } 452 }
412 453
413 seq_printf(seq, " %-12s (%s/%i) %17s [%10s]: gw_class ... " 454 seq_printf(seq, " %-12s (%s/%i) %17s [%10s]: gw_class ... "
414 "[B.A.T.M.A.N. adv %s%s, MainIF/MAC: %s/%pM (%s)]\n", 455 "[B.A.T.M.A.N. adv %s%s, MainIF/MAC: %s/%pM (%s)]\n",
415 "Gateway", "#", TQ_MAX_VALUE, "Nexthop", 456 "Gateway", "#", TQ_MAX_VALUE, "Nexthop",
416 "outgoingIF", SOURCE_VERSION, REVISION_VERSION_STR, 457 "outgoingIF", SOURCE_VERSION, REVISION_VERSION_STR,
417 bat_priv->primary_if->net_dev->name, 458 primary_if->net_dev->name,
418 bat_priv->primary_if->net_dev->dev_addr, net_dev->name); 459 primary_if->net_dev->dev_addr, net_dev->name);
419 460
420 rcu_read_lock(); 461 rcu_read_lock();
421 hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) { 462 hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) {
422 if (gw_node->deleted) 463 if (gw_node->deleted)
423 continue; 464 continue;
424 465
425 if (!gw_node->orig_node->router) 466 /* fails if orig_node has no router */
467 if (_write_buffer_text(bat_priv, seq, gw_node) < 0)
426 continue; 468 continue;
427 469
428 _write_buffer_text(bat_priv, seq, gw_node);
429 gw_count++; 470 gw_count++;
430 } 471 }
431 rcu_read_unlock(); 472 rcu_read_unlock();
@@ -433,7 +474,10 @@ int gw_client_seq_print_text(struct seq_file *seq, void *offset)
433 if (gw_count == 0) 474 if (gw_count == 0)
434 seq_printf(seq, "No gateways in range ...\n"); 475 seq_printf(seq, "No gateways in range ...\n");
435 476
436 return 0; 477out:
478 if (primary_if)
479 hardif_free_ref(primary_if);
480 return ret;
437} 481}
438 482
439int gw_is_target(struct bat_priv *bat_priv, struct sk_buff *skb) 483int gw_is_target(struct bat_priv *bat_priv, struct sk_buff *skb)
@@ -442,6 +486,7 @@ int gw_is_target(struct bat_priv *bat_priv, struct sk_buff *skb)
442 struct iphdr *iphdr; 486 struct iphdr *iphdr;
443 struct ipv6hdr *ipv6hdr; 487 struct ipv6hdr *ipv6hdr;
444 struct udphdr *udphdr; 488 struct udphdr *udphdr;
489 struct gw_node *curr_gw;
445 unsigned int header_len = 0; 490 unsigned int header_len = 0;
446 491
447 if (atomic_read(&bat_priv->gw_mode) == GW_MODE_OFF) 492 if (atomic_read(&bat_priv->gw_mode) == GW_MODE_OFF)
@@ -506,12 +551,11 @@ int gw_is_target(struct bat_priv *bat_priv, struct sk_buff *skb)
506 if (atomic_read(&bat_priv->gw_mode) == GW_MODE_SERVER) 551 if (atomic_read(&bat_priv->gw_mode) == GW_MODE_SERVER)
507 return -1; 552 return -1;
508 553
509 rcu_read_lock(); 554 curr_gw = gw_get_selected_gw_node(bat_priv);
510 if (!rcu_dereference(bat_priv->curr_gw)) { 555 if (!curr_gw)
511 rcu_read_unlock();
512 return 0; 556 return 0;
513 }
514 rcu_read_unlock();
515 557
558 if (curr_gw)
559 gw_node_free_ref(curr_gw);
516 return 1; 560 return 1;
517} 561}
diff --git a/net/batman-adv/gateway_client.h b/net/batman-adv/gateway_client.h
index 2aa439124ee3..1ce8c6066da1 100644
--- a/net/batman-adv/gateway_client.h
+++ b/net/batman-adv/gateway_client.h
@@ -24,7 +24,7 @@
24 24
25void gw_deselect(struct bat_priv *bat_priv); 25void gw_deselect(struct bat_priv *bat_priv);
26void gw_election(struct bat_priv *bat_priv); 26void gw_election(struct bat_priv *bat_priv);
27void *gw_get_selected(struct bat_priv *bat_priv); 27struct orig_node *gw_get_selected_orig(struct bat_priv *bat_priv);
28void gw_check_election(struct bat_priv *bat_priv, struct orig_node *orig_node); 28void gw_check_election(struct bat_priv *bat_priv, struct orig_node *orig_node);
29void gw_node_update(struct bat_priv *bat_priv, 29void gw_node_update(struct bat_priv *bat_priv,
30 struct orig_node *orig_node, uint8_t new_gwflags); 30 struct orig_node *orig_node, uint8_t new_gwflags);
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index b3058e46ee6b..dfbfccc9fe40 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -31,9 +31,6 @@
31 31
32#include <linux/if_arp.h> 32#include <linux/if_arp.h>
33 33
34/* protect update critical side of hardif_list - but not the content */
35static DEFINE_SPINLOCK(hardif_list_lock);
36
37 34
38static int batman_skb_recv(struct sk_buff *skb, 35static int batman_skb_recv(struct sk_buff *skb,
39 struct net_device *dev, 36 struct net_device *dev,
@@ -110,47 +107,57 @@ out:
110 return hard_iface; 107 return hard_iface;
111} 108}
112 109
113static void update_primary_addr(struct bat_priv *bat_priv) 110static void primary_if_update_addr(struct bat_priv *bat_priv)
114{ 111{
115 struct vis_packet *vis_packet; 112 struct vis_packet *vis_packet;
113 struct hard_iface *primary_if;
114
115 primary_if = primary_if_get_selected(bat_priv);
116 if (!primary_if)
117 goto out;
116 118
117 vis_packet = (struct vis_packet *) 119 vis_packet = (struct vis_packet *)
118 bat_priv->my_vis_info->skb_packet->data; 120 bat_priv->my_vis_info->skb_packet->data;
119 memcpy(vis_packet->vis_orig, 121 memcpy(vis_packet->vis_orig, primary_if->net_dev->dev_addr, ETH_ALEN);
120 bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
121 memcpy(vis_packet->sender_orig, 122 memcpy(vis_packet->sender_orig,
122 bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN); 123 primary_if->net_dev->dev_addr, ETH_ALEN);
124
125out:
126 if (primary_if)
127 hardif_free_ref(primary_if);
123} 128}
124 129
125static void set_primary_if(struct bat_priv *bat_priv, 130static void primary_if_select(struct bat_priv *bat_priv,
126 struct hard_iface *hard_iface) 131 struct hard_iface *new_hard_iface)
127{ 132{
133 struct hard_iface *curr_hard_iface;
128 struct batman_packet *batman_packet; 134 struct batman_packet *batman_packet;
129 struct hard_iface *old_if;
130 135
131 if (hard_iface && !atomic_inc_not_zero(&hard_iface->refcount)) 136 ASSERT_RTNL();
132 hard_iface = NULL; 137
138 if (new_hard_iface && !atomic_inc_not_zero(&new_hard_iface->refcount))
139 new_hard_iface = NULL;
133 140
134 old_if = bat_priv->primary_if; 141 curr_hard_iface = bat_priv->primary_if;
135 bat_priv->primary_if = hard_iface; 142 rcu_assign_pointer(bat_priv->primary_if, new_hard_iface);
136 143
137 if (old_if) 144 if (curr_hard_iface)
138 hardif_free_ref(old_if); 145 hardif_free_ref(curr_hard_iface);
139 146
140 if (!bat_priv->primary_if) 147 if (!new_hard_iface)
141 return; 148 return;
142 149
143 batman_packet = (struct batman_packet *)(hard_iface->packet_buff); 150 batman_packet = (struct batman_packet *)(new_hard_iface->packet_buff);
144 batman_packet->flags = PRIMARIES_FIRST_HOP; 151 batman_packet->flags = PRIMARIES_FIRST_HOP;
145 batman_packet->ttl = TTL; 152 batman_packet->ttl = TTL;
146 153
147 update_primary_addr(bat_priv); 154 primary_if_update_addr(bat_priv);
148 155
149 /*** 156 /***
150 * hacky trick to make sure that we send the HNA information via 157 * hacky trick to make sure that we send the TT information via
151 * our new primary interface 158 * our new primary interface
152 */ 159 */
153 atomic_set(&bat_priv->hna_local_changed, 1); 160 atomic_set(&bat_priv->tt_local_changed, 1);
154} 161}
155 162
156static bool hardif_is_iface_up(struct hard_iface *hard_iface) 163static bool hardif_is_iface_up(struct hard_iface *hard_iface)
@@ -236,9 +243,10 @@ void update_min_mtu(struct net_device *soft_iface)
236static void hardif_activate_interface(struct hard_iface *hard_iface) 243static void hardif_activate_interface(struct hard_iface *hard_iface)
237{ 244{
238 struct bat_priv *bat_priv; 245 struct bat_priv *bat_priv;
246 struct hard_iface *primary_if = NULL;
239 247
240 if (hard_iface->if_status != IF_INACTIVE) 248 if (hard_iface->if_status != IF_INACTIVE)
241 return; 249 goto out;
242 250
243 bat_priv = netdev_priv(hard_iface->soft_iface); 251 bat_priv = netdev_priv(hard_iface->soft_iface);
244 252
@@ -249,14 +257,18 @@ static void hardif_activate_interface(struct hard_iface *hard_iface)
249 * the first active interface becomes our primary interface or 257 * the first active interface becomes our primary interface or
250 * the next active interface after the old primay interface was removed 258 * the next active interface after the old primay interface was removed
251 */ 259 */
252 if (!bat_priv->primary_if) 260 primary_if = primary_if_get_selected(bat_priv);
253 set_primary_if(bat_priv, hard_iface); 261 if (!primary_if)
262 primary_if_select(bat_priv, hard_iface);
254 263
255 bat_info(hard_iface->soft_iface, "Interface activated: %s\n", 264 bat_info(hard_iface->soft_iface, "Interface activated: %s\n",
256 hard_iface->net_dev->name); 265 hard_iface->net_dev->name);
257 266
258 update_min_mtu(hard_iface->soft_iface); 267 update_min_mtu(hard_iface->soft_iface);
259 return; 268
269out:
270 if (primary_if)
271 hardif_free_ref(primary_if);
260} 272}
261 273
262static void hardif_deactivate_interface(struct hard_iface *hard_iface) 274static void hardif_deactivate_interface(struct hard_iface *hard_iface)
@@ -327,7 +339,7 @@ int hardif_enable_interface(struct hard_iface *hard_iface, char *iface_name)
327 batman_packet->flags = 0; 339 batman_packet->flags = 0;
328 batman_packet->ttl = 2; 340 batman_packet->ttl = 2;
329 batman_packet->tq = TQ_MAX_VALUE; 341 batman_packet->tq = TQ_MAX_VALUE;
330 batman_packet->num_hna = 0; 342 batman_packet->num_tt = 0;
331 343
332 hard_iface->if_num = bat_priv->num_ifaces; 344 hard_iface->if_num = bat_priv->num_ifaces;
333 bat_priv->num_ifaces++; 345 bat_priv->num_ifaces++;
@@ -386,12 +398,13 @@ err:
386void hardif_disable_interface(struct hard_iface *hard_iface) 398void hardif_disable_interface(struct hard_iface *hard_iface)
387{ 399{
388 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); 400 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
401 struct hard_iface *primary_if = NULL;
389 402
390 if (hard_iface->if_status == IF_ACTIVE) 403 if (hard_iface->if_status == IF_ACTIVE)
391 hardif_deactivate_interface(hard_iface); 404 hardif_deactivate_interface(hard_iface);
392 405
393 if (hard_iface->if_status != IF_INACTIVE) 406 if (hard_iface->if_status != IF_INACTIVE)
394 return; 407 goto out;
395 408
396 bat_info(hard_iface->soft_iface, "Removing interface: %s\n", 409 bat_info(hard_iface->soft_iface, "Removing interface: %s\n",
397 hard_iface->net_dev->name); 410 hard_iface->net_dev->name);
@@ -400,11 +413,12 @@ void hardif_disable_interface(struct hard_iface *hard_iface)
400 bat_priv->num_ifaces--; 413 bat_priv->num_ifaces--;
401 orig_hash_del_if(hard_iface, bat_priv->num_ifaces); 414 orig_hash_del_if(hard_iface, bat_priv->num_ifaces);
402 415
403 if (hard_iface == bat_priv->primary_if) { 416 primary_if = primary_if_get_selected(bat_priv);
417 if (hard_iface == primary_if) {
404 struct hard_iface *new_if; 418 struct hard_iface *new_if;
405 419
406 new_if = hardif_get_active(hard_iface->soft_iface); 420 new_if = hardif_get_active(hard_iface->soft_iface);
407 set_primary_if(bat_priv, new_if); 421 primary_if_select(bat_priv, new_if);
408 422
409 if (new_if) 423 if (new_if)
410 hardif_free_ref(new_if); 424 hardif_free_ref(new_if);
@@ -425,6 +439,10 @@ void hardif_disable_interface(struct hard_iface *hard_iface)
425 439
426 hard_iface->soft_iface = NULL; 440 hard_iface->soft_iface = NULL;
427 hardif_free_ref(hard_iface); 441 hardif_free_ref(hard_iface);
442
443out:
444 if (primary_if)
445 hardif_free_ref(primary_if);
428} 446}
429 447
430static struct hard_iface *hardif_add_interface(struct net_device *net_dev) 448static struct hard_iface *hardif_add_interface(struct net_device *net_dev)
@@ -432,6 +450,8 @@ static struct hard_iface *hardif_add_interface(struct net_device *net_dev)
432 struct hard_iface *hard_iface; 450 struct hard_iface *hard_iface;
433 int ret; 451 int ret;
434 452
453 ASSERT_RTNL();
454
435 ret = is_valid_iface(net_dev); 455 ret = is_valid_iface(net_dev);
436 if (ret != 1) 456 if (ret != 1)
437 goto out; 457 goto out;
@@ -458,10 +478,7 @@ static struct hard_iface *hardif_add_interface(struct net_device *net_dev)
458 atomic_set(&hard_iface->refcount, 2); 478 atomic_set(&hard_iface->refcount, 2);
459 479
460 check_known_mac_addr(hard_iface->net_dev); 480 check_known_mac_addr(hard_iface->net_dev);
461
462 spin_lock(&hardif_list_lock);
463 list_add_tail_rcu(&hard_iface->list, &hardif_list); 481 list_add_tail_rcu(&hard_iface->list, &hardif_list);
464 spin_unlock(&hardif_list_lock);
465 482
466 return hard_iface; 483 return hard_iface;
467 484
@@ -475,6 +492,8 @@ out:
475 492
476static void hardif_remove_interface(struct hard_iface *hard_iface) 493static void hardif_remove_interface(struct hard_iface *hard_iface)
477{ 494{
495 ASSERT_RTNL();
496
478 /* first deactivate interface */ 497 /* first deactivate interface */
479 if (hard_iface->if_status != IF_NOT_IN_USE) 498 if (hard_iface->if_status != IF_NOT_IN_USE)
480 hardif_disable_interface(hard_iface); 499 hardif_disable_interface(hard_iface);
@@ -490,20 +509,11 @@ static void hardif_remove_interface(struct hard_iface *hard_iface)
490void hardif_remove_interfaces(void) 509void hardif_remove_interfaces(void)
491{ 510{
492 struct hard_iface *hard_iface, *hard_iface_tmp; 511 struct hard_iface *hard_iface, *hard_iface_tmp;
493 struct list_head if_queue;
494
495 INIT_LIST_HEAD(&if_queue);
496 512
497 spin_lock(&hardif_list_lock); 513 rtnl_lock();
498 list_for_each_entry_safe(hard_iface, hard_iface_tmp, 514 list_for_each_entry_safe(hard_iface, hard_iface_tmp,
499 &hardif_list, list) { 515 &hardif_list, list) {
500 list_del_rcu(&hard_iface->list); 516 list_del_rcu(&hard_iface->list);
501 list_add_tail(&hard_iface->list, &if_queue);
502 }
503 spin_unlock(&hardif_list_lock);
504
505 rtnl_lock();
506 list_for_each_entry_safe(hard_iface, hard_iface_tmp, &if_queue, list) {
507 hardif_remove_interface(hard_iface); 517 hardif_remove_interface(hard_iface);
508 } 518 }
509 rtnl_unlock(); 519 rtnl_unlock();
@@ -514,6 +524,7 @@ static int hard_if_event(struct notifier_block *this,
514{ 524{
515 struct net_device *net_dev = (struct net_device *)ptr; 525 struct net_device *net_dev = (struct net_device *)ptr;
516 struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev); 526 struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev);
527 struct hard_iface *primary_if = NULL;
517 struct bat_priv *bat_priv; 528 struct bat_priv *bat_priv;
518 529
519 if (!hard_iface && event == NETDEV_REGISTER) 530 if (!hard_iface && event == NETDEV_REGISTER)
@@ -531,9 +542,7 @@ static int hard_if_event(struct notifier_block *this,
531 hardif_deactivate_interface(hard_iface); 542 hardif_deactivate_interface(hard_iface);
532 break; 543 break;
533 case NETDEV_UNREGISTER: 544 case NETDEV_UNREGISTER:
534 spin_lock(&hardif_list_lock);
535 list_del_rcu(&hard_iface->list); 545 list_del_rcu(&hard_iface->list);
536 spin_unlock(&hardif_list_lock);
537 546
538 hardif_remove_interface(hard_iface); 547 hardif_remove_interface(hard_iface);
539 break; 548 break;
@@ -549,8 +558,12 @@ static int hard_if_event(struct notifier_block *this,
549 update_mac_addresses(hard_iface); 558 update_mac_addresses(hard_iface);
550 559
551 bat_priv = netdev_priv(hard_iface->soft_iface); 560 bat_priv = netdev_priv(hard_iface->soft_iface);
552 if (hard_iface == bat_priv->primary_if) 561 primary_if = primary_if_get_selected(bat_priv);
553 update_primary_addr(bat_priv); 562 if (!primary_if)
563 goto hardif_put;
564
565 if (hard_iface == primary_if)
566 primary_if_update_addr(bat_priv);
554 break; 567 break;
555 default: 568 default:
556 break; 569 break;
@@ -559,6 +572,8 @@ static int hard_if_event(struct notifier_block *this,
559hardif_put: 572hardif_put:
560 hardif_free_ref(hard_iface); 573 hardif_free_ref(hard_iface);
561out: 574out:
575 if (primary_if)
576 hardif_free_ref(primary_if);
562 return NOTIFY_DONE; 577 return NOTIFY_DONE;
563} 578}
564 579
diff --git a/net/batman-adv/hard-interface.h b/net/batman-adv/hard-interface.h
index a9ddf36e51c8..64265991460b 100644
--- a/net/batman-adv/hard-interface.h
+++ b/net/batman-adv/hard-interface.h
@@ -45,4 +45,22 @@ static inline void hardif_free_ref(struct hard_iface *hard_iface)
45 call_rcu(&hard_iface->rcu, hardif_free_rcu); 45 call_rcu(&hard_iface->rcu, hardif_free_rcu);
46} 46}
47 47
48static inline struct hard_iface *primary_if_get_selected(
49 struct bat_priv *bat_priv)
50{
51 struct hard_iface *hard_iface;
52
53 rcu_read_lock();
54 hard_iface = rcu_dereference(bat_priv->primary_if);
55 if (!hard_iface)
56 goto out;
57
58 if (!atomic_inc_not_zero(&hard_iface->refcount))
59 hard_iface = NULL;
60
61out:
62 rcu_read_unlock();
63 return hard_iface;
64}
65
48#endif /* _NET_BATMAN_ADV_HARD_INTERFACE_H_ */ 66#endif /* _NET_BATMAN_ADV_HARD_INTERFACE_H_ */
diff --git a/net/batman-adv/icmp_socket.c b/net/batman-adv/icmp_socket.c
index 34ce56c358e5..fa22ba2bb832 100644
--- a/net/batman-adv/icmp_socket.c
+++ b/net/batman-adv/icmp_socket.c
@@ -153,6 +153,7 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff,
153{ 153{
154 struct socket_client *socket_client = file->private_data; 154 struct socket_client *socket_client = file->private_data;
155 struct bat_priv *bat_priv = socket_client->bat_priv; 155 struct bat_priv *bat_priv = socket_client->bat_priv;
156 struct hard_iface *primary_if = NULL;
156 struct sk_buff *skb; 157 struct sk_buff *skb;
157 struct icmp_packet_rr *icmp_packet; 158 struct icmp_packet_rr *icmp_packet;
158 159
@@ -167,15 +168,21 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff,
167 return -EINVAL; 168 return -EINVAL;
168 } 169 }
169 170
170 if (!bat_priv->primary_if) 171 primary_if = primary_if_get_selected(bat_priv);
171 return -EFAULT; 172
173 if (!primary_if) {
174 len = -EFAULT;
175 goto out;
176 }
172 177
173 if (len >= sizeof(struct icmp_packet_rr)) 178 if (len >= sizeof(struct icmp_packet_rr))
174 packet_len = sizeof(struct icmp_packet_rr); 179 packet_len = sizeof(struct icmp_packet_rr);
175 180
176 skb = dev_alloc_skb(packet_len + sizeof(struct ethhdr)); 181 skb = dev_alloc_skb(packet_len + sizeof(struct ethhdr));
177 if (!skb) 182 if (!skb) {
178 return -ENOMEM; 183 len = -ENOMEM;
184 goto out;
185 }
179 186
180 skb_reserve(skb, sizeof(struct ethhdr)); 187 skb_reserve(skb, sizeof(struct ethhdr));
181 icmp_packet = (struct icmp_packet_rr *)skb_put(skb, packet_len); 188 icmp_packet = (struct icmp_packet_rr *)skb_put(skb, packet_len);
@@ -218,23 +225,13 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff,
218 if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE) 225 if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE)
219 goto dst_unreach; 226 goto dst_unreach;
220 227
221 rcu_read_lock();
222 orig_node = orig_hash_find(bat_priv, icmp_packet->dst); 228 orig_node = orig_hash_find(bat_priv, icmp_packet->dst);
223
224 if (!orig_node) 229 if (!orig_node)
225 goto unlock; 230 goto dst_unreach;
226
227 neigh_node = orig_node->router;
228 231
232 neigh_node = orig_node_get_router(orig_node);
229 if (!neigh_node) 233 if (!neigh_node)
230 goto unlock; 234 goto dst_unreach;
231
232 if (!atomic_inc_not_zero(&neigh_node->refcount)) {
233 neigh_node = NULL;
234 goto unlock;
235 }
236
237 rcu_read_unlock();
238 235
239 if (!neigh_node->if_incoming) 236 if (!neigh_node->if_incoming)
240 goto dst_unreach; 237 goto dst_unreach;
@@ -243,7 +240,7 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff,
243 goto dst_unreach; 240 goto dst_unreach;
244 241
245 memcpy(icmp_packet->orig, 242 memcpy(icmp_packet->orig,
246 bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN); 243 primary_if->net_dev->dev_addr, ETH_ALEN);
247 244
248 if (packet_len == sizeof(struct icmp_packet_rr)) 245 if (packet_len == sizeof(struct icmp_packet_rr))
249 memcpy(icmp_packet->rr, 246 memcpy(icmp_packet->rr,
@@ -252,14 +249,14 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff,
252 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); 249 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
253 goto out; 250 goto out;
254 251
255unlock:
256 rcu_read_unlock();
257dst_unreach: 252dst_unreach:
258 icmp_packet->msg_type = DESTINATION_UNREACHABLE; 253 icmp_packet->msg_type = DESTINATION_UNREACHABLE;
259 bat_socket_add_packet(socket_client, icmp_packet, packet_len); 254 bat_socket_add_packet(socket_client, icmp_packet, packet_len);
260free_skb: 255free_skb:
261 kfree_skb(skb); 256 kfree_skb(skb);
262out: 257out:
258 if (primary_if)
259 hardif_free_ref(primary_if);
263 if (neigh_node) 260 if (neigh_node)
264 neigh_node_free_ref(neigh_node); 261 neigh_node_free_ref(neigh_node);
265 if (orig_node) 262 if (orig_node)
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index 709b33bbdf43..0a7cee0076f4 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -33,6 +33,9 @@
33#include "vis.h" 33#include "vis.h"
34#include "hash.h" 34#include "hash.h"
35 35
36
37/* List manipulations on hardif_list have to be rtnl_lock()'ed,
38 * list traversals just rcu-locked */
36struct list_head hardif_list; 39struct list_head hardif_list;
37 40
38unsigned char broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 41unsigned char broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
@@ -81,28 +84,29 @@ int mesh_init(struct net_device *soft_iface)
81 84
82 spin_lock_init(&bat_priv->forw_bat_list_lock); 85 spin_lock_init(&bat_priv->forw_bat_list_lock);
83 spin_lock_init(&bat_priv->forw_bcast_list_lock); 86 spin_lock_init(&bat_priv->forw_bcast_list_lock);
84 spin_lock_init(&bat_priv->hna_lhash_lock); 87 spin_lock_init(&bat_priv->tt_lhash_lock);
85 spin_lock_init(&bat_priv->hna_ghash_lock); 88 spin_lock_init(&bat_priv->tt_ghash_lock);
86 spin_lock_init(&bat_priv->gw_list_lock); 89 spin_lock_init(&bat_priv->gw_list_lock);
87 spin_lock_init(&bat_priv->vis_hash_lock); 90 spin_lock_init(&bat_priv->vis_hash_lock);
88 spin_lock_init(&bat_priv->vis_list_lock); 91 spin_lock_init(&bat_priv->vis_list_lock);
89 spin_lock_init(&bat_priv->softif_neigh_lock); 92 spin_lock_init(&bat_priv->softif_neigh_lock);
93 spin_lock_init(&bat_priv->softif_neigh_vid_lock);
90 94
91 INIT_HLIST_HEAD(&bat_priv->forw_bat_list); 95 INIT_HLIST_HEAD(&bat_priv->forw_bat_list);
92 INIT_HLIST_HEAD(&bat_priv->forw_bcast_list); 96 INIT_HLIST_HEAD(&bat_priv->forw_bcast_list);
93 INIT_HLIST_HEAD(&bat_priv->gw_list); 97 INIT_HLIST_HEAD(&bat_priv->gw_list);
94 INIT_HLIST_HEAD(&bat_priv->softif_neigh_list); 98 INIT_HLIST_HEAD(&bat_priv->softif_neigh_vids);
95 99
96 if (originator_init(bat_priv) < 1) 100 if (originator_init(bat_priv) < 1)
97 goto err; 101 goto err;
98 102
99 if (hna_local_init(bat_priv) < 1) 103 if (tt_local_init(bat_priv) < 1)
100 goto err; 104 goto err;
101 105
102 if (hna_global_init(bat_priv) < 1) 106 if (tt_global_init(bat_priv) < 1)
103 goto err; 107 goto err;
104 108
105 hna_local_add(soft_iface, soft_iface->dev_addr); 109 tt_local_add(soft_iface, soft_iface->dev_addr);
106 110
107 if (vis_init(bat_priv) < 1) 111 if (vis_init(bat_priv) < 1)
108 goto err; 112 goto err;
@@ -133,8 +137,8 @@ void mesh_free(struct net_device *soft_iface)
133 gw_node_purge(bat_priv); 137 gw_node_purge(bat_priv);
134 originator_free(bat_priv); 138 originator_free(bat_priv);
135 139
136 hna_local_free(bat_priv); 140 tt_local_free(bat_priv);
137 hna_global_free(bat_priv); 141 tt_global_free(bat_priv);
138 142
139 softif_neigh_purge(bat_priv); 143 softif_neigh_purge(bat_priv);
140 144
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index dc248697de71..148b49e02642 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -34,16 +34,18 @@
34 34
35#define TQ_MAX_VALUE 255 35#define TQ_MAX_VALUE 255
36#define JITTER 20 36#define JITTER 20
37#define TTL 50 /* Time To Live of broadcast messages */
38 37
39#define PURGE_TIMEOUT 200 /* purge originators after time in seconds if no 38 /* Time To Live of broadcast messages */
40 * valid packet comes in -> TODO: check 39#define TTL 50
41 * influence on TQ_LOCAL_WINDOW_SIZE */
42#define LOCAL_HNA_TIMEOUT 3600 /* in seconds */
43 40
44#define TQ_LOCAL_WINDOW_SIZE 64 /* sliding packet range of received originator 41/* purge originators after time in seconds if no valid packet comes in
45 * messages in squence numbers (should be a 42 * -> TODO: check influence on TQ_LOCAL_WINDOW_SIZE */
46 * multiple of our word size) */ 43#define PURGE_TIMEOUT 200
44#define TT_LOCAL_TIMEOUT 3600 /* in seconds */
45
46/* sliding packet range of received originator messages in squence numbers
47 * (should be a multiple of our word size) */
48#define TQ_LOCAL_WINDOW_SIZE 64
47#define TQ_GLOBAL_WINDOW_SIZE 5 49#define TQ_GLOBAL_WINDOW_SIZE 5
48#define TQ_LOCAL_BIDRECT_SEND_MINIMUM 1 50#define TQ_LOCAL_BIDRECT_SEND_MINIMUM 1
49#define TQ_LOCAL_BIDRECT_RECV_MINIMUM 1 51#define TQ_LOCAL_BIDRECT_RECV_MINIMUM 1
@@ -55,21 +57,20 @@
55 57
56#define VIS_INTERVAL 5000 /* 5 seconds */ 58#define VIS_INTERVAL 5000 /* 5 seconds */
57 59
58/* how much worse secondary interfaces may be to 60/* how much worse secondary interfaces may be to be considered as bonding
59 * to be considered as bonding candidates */ 61 * candidates */
60
61#define BONDING_TQ_THRESHOLD 50 62#define BONDING_TQ_THRESHOLD 50
62 63
63#define MAX_AGGREGATION_BYTES 512 /* should not be bigger than 512 bytes or 64/* should not be bigger than 512 bytes or change the size of
64 * change the size of 65 * forw_packet->direct_link_flags */
65 * forw_packet->direct_link_flags */ 66#define MAX_AGGREGATION_BYTES 512
66#define MAX_AGGREGATION_MS 100 67#define MAX_AGGREGATION_MS 100
67 68
68#define SOFTIF_NEIGH_TIMEOUT 180000 /* 3 minutes */ 69#define SOFTIF_NEIGH_TIMEOUT 180000 /* 3 minutes */
69 70
71/* don't reset again within 30 seconds */
70#define RESET_PROTECTION_MS 30000 72#define RESET_PROTECTION_MS 30000
71#define EXPECTED_SEQNO_RANGE 65536 73#define EXPECTED_SEQNO_RANGE 65536
72/* don't reset again within 30 seconds */
73 74
74#define MESH_INACTIVE 0 75#define MESH_INACTIVE 0
75#define MESH_ACTIVE 1 76#define MESH_ACTIVE 1
@@ -84,12 +85,13 @@
84#ifdef pr_fmt 85#ifdef pr_fmt
85#undef pr_fmt 86#undef pr_fmt
86#endif 87#endif
87#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt /* Append 'batman-adv: ' before 88/* Append 'batman-adv: ' before kernel messages */
88 * kernel messages */ 89#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
89 90
90#define DBG_BATMAN 1 /* all messages related to routing / flooding / 91/* all messages related to routing / flooding / broadcasting / etc */
91 * broadcasting / etc */ 92#define DBG_BATMAN 1
92#define DBG_ROUTES 2 /* route or hna added / changed / deleted */ 93/* route or tt entry added / changed / deleted */
94#define DBG_ROUTES 2
93#define DBG_ALL 3 95#define DBG_ALL 3
94 96
95 97
@@ -175,4 +177,6 @@ static inline int compare_eth(void *data1, void *data2)
175 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0); 177 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
176} 178}
177 179
180#define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0)
181
178#endif /* _NET_BATMAN_ADV_MAIN_H_ */ 182#endif /* _NET_BATMAN_ADV_MAIN_H_ */
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index 0b9133022d2d..40a30bbcd147 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -19,8 +19,6 @@
19 * 19 *
20 */ 20 */
21 21
22/* increase the reference counter for this originator */
23
24#include "main.h" 22#include "main.h"
25#include "originator.h" 23#include "originator.h"
26#include "hash.h" 24#include "hash.h"
@@ -56,18 +54,25 @@ err:
56 return 0; 54 return 0;
57} 55}
58 56
59static void neigh_node_free_rcu(struct rcu_head *rcu) 57void neigh_node_free_ref(struct neigh_node *neigh_node)
60{ 58{
61 struct neigh_node *neigh_node; 59 if (atomic_dec_and_test(&neigh_node->refcount))
62 60 kfree_rcu(neigh_node, rcu);
63 neigh_node = container_of(rcu, struct neigh_node, rcu);
64 kfree(neigh_node);
65} 61}
66 62
67void neigh_node_free_ref(struct neigh_node *neigh_node) 63/* increases the refcounter of a found router */
64struct neigh_node *orig_node_get_router(struct orig_node *orig_node)
68{ 65{
69 if (atomic_dec_and_test(&neigh_node->refcount)) 66 struct neigh_node *router;
70 call_rcu(&neigh_node->rcu, neigh_node_free_rcu); 67
68 rcu_read_lock();
69 router = rcu_dereference(orig_node->router);
70
71 if (router && !atomic_inc_not_zero(&router->refcount))
72 router = NULL;
73
74 rcu_read_unlock();
75 return router;
71} 76}
72 77
73struct neigh_node *create_neighbor(struct orig_node *orig_node, 78struct neigh_node *create_neighbor(struct orig_node *orig_node,
@@ -87,6 +92,7 @@ struct neigh_node *create_neighbor(struct orig_node *orig_node,
87 92
88 INIT_HLIST_NODE(&neigh_node->list); 93 INIT_HLIST_NODE(&neigh_node->list);
89 INIT_LIST_HEAD(&neigh_node->bonding_list); 94 INIT_LIST_HEAD(&neigh_node->bonding_list);
95 spin_lock_init(&neigh_node->tq_lock);
90 96
91 memcpy(neigh_node->addr, neigh, ETH_ALEN); 97 memcpy(neigh_node->addr, neigh, ETH_ALEN);
92 neigh_node->orig_node = orig_neigh_node; 98 neigh_node->orig_node = orig_neigh_node;
@@ -128,7 +134,7 @@ static void orig_node_free_rcu(struct rcu_head *rcu)
128 spin_unlock_bh(&orig_node->neigh_list_lock); 134 spin_unlock_bh(&orig_node->neigh_list_lock);
129 135
130 frag_list_free(&orig_node->frag_list); 136 frag_list_free(&orig_node->frag_list);
131 hna_global_del_orig(orig_node->bat_priv, orig_node, 137 tt_global_del_orig(orig_node->bat_priv, orig_node,
132 "originator timed out"); 138 "originator timed out");
133 139
134 kfree(orig_node->bcast_own); 140 kfree(orig_node->bcast_own);
@@ -206,7 +212,7 @@ struct orig_node *get_orig_node(struct bat_priv *bat_priv, uint8_t *addr)
206 orig_node->bat_priv = bat_priv; 212 orig_node->bat_priv = bat_priv;
207 memcpy(orig_node->orig, addr, ETH_ALEN); 213 memcpy(orig_node->orig, addr, ETH_ALEN);
208 orig_node->router = NULL; 214 orig_node->router = NULL;
209 orig_node->hna_buff = NULL; 215 orig_node->tt_buff = NULL;
210 orig_node->bcast_seqno_reset = jiffies - 1 216 orig_node->bcast_seqno_reset = jiffies - 1
211 - msecs_to_jiffies(RESET_PROTECTION_MS); 217 - msecs_to_jiffies(RESET_PROTECTION_MS);
212 orig_node->batman_seqno_reset = jiffies - 1 218 orig_node->batman_seqno_reset = jiffies - 1
@@ -317,8 +323,8 @@ static bool purge_orig_node(struct bat_priv *bat_priv,
317 &best_neigh_node)) { 323 &best_neigh_node)) {
318 update_routes(bat_priv, orig_node, 324 update_routes(bat_priv, orig_node,
319 best_neigh_node, 325 best_neigh_node,
320 orig_node->hna_buff, 326 orig_node->tt_buff,
321 orig_node->hna_buff_len); 327 orig_node->tt_buff_len);
322 } 328 }
323 } 329 }
324 330
@@ -389,29 +395,34 @@ int orig_seq_print_text(struct seq_file *seq, void *offset)
389 struct hashtable_t *hash = bat_priv->orig_hash; 395 struct hashtable_t *hash = bat_priv->orig_hash;
390 struct hlist_node *node, *node_tmp; 396 struct hlist_node *node, *node_tmp;
391 struct hlist_head *head; 397 struct hlist_head *head;
398 struct hard_iface *primary_if;
392 struct orig_node *orig_node; 399 struct orig_node *orig_node;
393 struct neigh_node *neigh_node; 400 struct neigh_node *neigh_node, *neigh_node_tmp;
394 int batman_count = 0; 401 int batman_count = 0;
395 int last_seen_secs; 402 int last_seen_secs;
396 int last_seen_msecs; 403 int last_seen_msecs;
397 int i; 404 int i, ret = 0;
405
406 primary_if = primary_if_get_selected(bat_priv);
398 407
399 if ((!bat_priv->primary_if) || 408 if (!primary_if) {
400 (bat_priv->primary_if->if_status != IF_ACTIVE)) { 409 ret = seq_printf(seq, "BATMAN mesh %s disabled - "
401 if (!bat_priv->primary_if) 410 "please specify interfaces to enable it\n",
402 return seq_printf(seq, "BATMAN mesh %s disabled - " 411 net_dev->name);
403 "please specify interfaces to enable it\n", 412 goto out;
404 net_dev->name); 413 }
405 414
406 return seq_printf(seq, "BATMAN mesh %s " 415 if (primary_if->if_status != IF_ACTIVE) {
407 "disabled - primary interface not active\n", 416 ret = seq_printf(seq, "BATMAN mesh %s "
408 net_dev->name); 417 "disabled - primary interface not active\n",
418 net_dev->name);
419 goto out;
409 } 420 }
410 421
411 seq_printf(seq, "[B.A.T.M.A.N. adv %s%s, MainIF/MAC: %s/%pM (%s)]\n", 422 seq_printf(seq, "[B.A.T.M.A.N. adv %s%s, MainIF/MAC: %s/%pM (%s)]\n",
412 SOURCE_VERSION, REVISION_VERSION_STR, 423 SOURCE_VERSION, REVISION_VERSION_STR,
413 bat_priv->primary_if->net_dev->name, 424 primary_if->net_dev->name,
414 bat_priv->primary_if->net_dev->dev_addr, net_dev->name); 425 primary_if->net_dev->dev_addr, net_dev->name);
415 seq_printf(seq, " %-15s %s (%s/%i) %17s [%10s]: %20s ...\n", 426 seq_printf(seq, " %-15s %s (%s/%i) %17s [%10s]: %20s ...\n",
416 "Originator", "last-seen", "#", TQ_MAX_VALUE, "Nexthop", 427 "Originator", "last-seen", "#", TQ_MAX_VALUE, "Nexthop",
417 "outgoingIF", "Potential nexthops"); 428 "outgoingIF", "Potential nexthops");
@@ -421,40 +432,47 @@ int orig_seq_print_text(struct seq_file *seq, void *offset)
421 432
422 rcu_read_lock(); 433 rcu_read_lock();
423 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { 434 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
424 if (!orig_node->router) 435 neigh_node = orig_node_get_router(orig_node);
436 if (!neigh_node)
425 continue; 437 continue;
426 438
427 if (orig_node->router->tq_avg == 0) 439 if (neigh_node->tq_avg == 0)
428 continue; 440 goto next;
429 441
430 last_seen_secs = jiffies_to_msecs(jiffies - 442 last_seen_secs = jiffies_to_msecs(jiffies -
431 orig_node->last_valid) / 1000; 443 orig_node->last_valid) / 1000;
432 last_seen_msecs = jiffies_to_msecs(jiffies - 444 last_seen_msecs = jiffies_to_msecs(jiffies -
433 orig_node->last_valid) % 1000; 445 orig_node->last_valid) % 1000;
434 446
435 neigh_node = orig_node->router;
436 seq_printf(seq, "%pM %4i.%03is (%3i) %pM [%10s]:", 447 seq_printf(seq, "%pM %4i.%03is (%3i) %pM [%10s]:",
437 orig_node->orig, last_seen_secs, 448 orig_node->orig, last_seen_secs,
438 last_seen_msecs, neigh_node->tq_avg, 449 last_seen_msecs, neigh_node->tq_avg,
439 neigh_node->addr, 450 neigh_node->addr,
440 neigh_node->if_incoming->net_dev->name); 451 neigh_node->if_incoming->net_dev->name);
441 452
442 hlist_for_each_entry_rcu(neigh_node, node_tmp, 453 hlist_for_each_entry_rcu(neigh_node_tmp, node_tmp,
443 &orig_node->neigh_list, list) { 454 &orig_node->neigh_list, list) {
444 seq_printf(seq, " %pM (%3i)", neigh_node->addr, 455 seq_printf(seq, " %pM (%3i)",
445 neigh_node->tq_avg); 456 neigh_node_tmp->addr,
457 neigh_node_tmp->tq_avg);
446 } 458 }
447 459
448 seq_printf(seq, "\n"); 460 seq_printf(seq, "\n");
449 batman_count++; 461 batman_count++;
462
463next:
464 neigh_node_free_ref(neigh_node);
450 } 465 }
451 rcu_read_unlock(); 466 rcu_read_unlock();
452 } 467 }
453 468
454 if ((batman_count == 0)) 469 if (batman_count == 0)
455 seq_printf(seq, "No batman nodes in range ...\n"); 470 seq_printf(seq, "No batman nodes in range ...\n");
456 471
457 return 0; 472out:
473 if (primary_if)
474 hardif_free_ref(primary_if);
475 return ret;
458} 476}
459 477
460static int orig_node_add_if(struct orig_node *orig_node, int max_if_num) 478static int orig_node_add_if(struct orig_node *orig_node, int max_if_num)
diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h
index 5cc011057da1..e1d641f27aa9 100644
--- a/net/batman-adv/originator.h
+++ b/net/batman-adv/originator.h
@@ -34,6 +34,7 @@ struct neigh_node *create_neighbor(struct orig_node *orig_node,
34 uint8_t *neigh, 34 uint8_t *neigh,
35 struct hard_iface *if_incoming); 35 struct hard_iface *if_incoming);
36void neigh_node_free_ref(struct neigh_node *neigh_node); 36void neigh_node_free_ref(struct neigh_node *neigh_node);
37struct neigh_node *orig_node_get_router(struct orig_node *orig_node);
37int orig_seq_print_text(struct seq_file *seq, void *offset); 38int orig_seq_print_text(struct seq_file *seq, void *offset);
38int orig_hash_add_if(struct hard_iface *hard_iface, int max_if_num); 39int orig_hash_add_if(struct hard_iface *hard_iface, int max_if_num);
39int orig_hash_del_if(struct hard_iface *hard_iface, int max_if_num); 40int orig_hash_del_if(struct hard_iface *hard_iface, int max_if_num);
diff --git a/net/batman-adv/packet.h b/net/batman-adv/packet.h
index e7571879af3f..eda99650e9f8 100644
--- a/net/batman-adv/packet.h
+++ b/net/batman-adv/packet.h
@@ -61,7 +61,7 @@ struct batman_packet {
61 uint8_t orig[6]; 61 uint8_t orig[6];
62 uint8_t prev_sender[6]; 62 uint8_t prev_sender[6];
63 uint8_t ttl; 63 uint8_t ttl;
64 uint8_t num_hna; 64 uint8_t num_tt;
65 uint8_t gw_flags; /* flags related to gateway class */ 65 uint8_t gw_flags; /* flags related to gateway class */
66 uint8_t align; 66 uint8_t align;
67} __packed; 67} __packed;
@@ -128,8 +128,7 @@ struct vis_packet {
128 uint8_t entries; /* number of entries behind this struct */ 128 uint8_t entries; /* number of entries behind this struct */
129 uint32_t seqno; /* sequence number */ 129 uint32_t seqno; /* sequence number */
130 uint8_t ttl; /* TTL */ 130 uint8_t ttl; /* TTL */
131 uint8_t vis_orig[6]; /* originator that informs about its 131 uint8_t vis_orig[6]; /* originator that announces its neighbors */
132 * neighbors */
133 uint8_t target_orig[6]; /* who should receive this packet */ 132 uint8_t target_orig[6]; /* who should receive this packet */
134 uint8_t sender_orig[6]; /* who sent or rebroadcasted this packet */ 133 uint8_t sender_orig[6]; /* who sent or rebroadcasted this packet */
135} __packed; 134} __packed;
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index c172f5d0e05a..bb1c3ec7e3ff 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -64,80 +64,97 @@ void slide_own_bcast_window(struct hard_iface *hard_iface)
64 } 64 }
65} 65}
66 66
67static void update_HNA(struct bat_priv *bat_priv, struct orig_node *orig_node, 67static void update_TT(struct bat_priv *bat_priv, struct orig_node *orig_node,
68 unsigned char *hna_buff, int hna_buff_len) 68 unsigned char *tt_buff, int tt_buff_len)
69{ 69{
70 if ((hna_buff_len != orig_node->hna_buff_len) || 70 if ((tt_buff_len != orig_node->tt_buff_len) ||
71 ((hna_buff_len > 0) && 71 ((tt_buff_len > 0) &&
72 (orig_node->hna_buff_len > 0) && 72 (orig_node->tt_buff_len > 0) &&
73 (memcmp(orig_node->hna_buff, hna_buff, hna_buff_len) != 0))) { 73 (memcmp(orig_node->tt_buff, tt_buff, tt_buff_len) != 0))) {
74 74
75 if (orig_node->hna_buff_len > 0) 75 if (orig_node->tt_buff_len > 0)
76 hna_global_del_orig(bat_priv, orig_node, 76 tt_global_del_orig(bat_priv, orig_node,
77 "originator changed hna"); 77 "originator changed tt");
78 78
79 if ((hna_buff_len > 0) && (hna_buff)) 79 if ((tt_buff_len > 0) && (tt_buff))
80 hna_global_add_orig(bat_priv, orig_node, 80 tt_global_add_orig(bat_priv, orig_node,
81 hna_buff, hna_buff_len); 81 tt_buff, tt_buff_len);
82 } 82 }
83} 83}
84 84
85static void update_route(struct bat_priv *bat_priv, 85static void update_route(struct bat_priv *bat_priv,
86 struct orig_node *orig_node, 86 struct orig_node *orig_node,
87 struct neigh_node *neigh_node, 87 struct neigh_node *neigh_node,
88 unsigned char *hna_buff, int hna_buff_len) 88 unsigned char *tt_buff, int tt_buff_len)
89{ 89{
90 struct neigh_node *neigh_node_tmp; 90 struct neigh_node *curr_router;
91
92 curr_router = orig_node_get_router(orig_node);
91 93
92 /* route deleted */ 94 /* route deleted */
93 if ((orig_node->router) && (!neigh_node)) { 95 if ((curr_router) && (!neigh_node)) {
94 96
95 bat_dbg(DBG_ROUTES, bat_priv, "Deleting route towards: %pM\n", 97 bat_dbg(DBG_ROUTES, bat_priv, "Deleting route towards: %pM\n",
96 orig_node->orig); 98 orig_node->orig);
97 hna_global_del_orig(bat_priv, orig_node, 99 tt_global_del_orig(bat_priv, orig_node,
98 "originator timed out"); 100 "originator timed out");
99 101
100 /* route added */ 102 /* route added */
101 } else if ((!orig_node->router) && (neigh_node)) { 103 } else if ((!curr_router) && (neigh_node)) {
102 104
103 bat_dbg(DBG_ROUTES, bat_priv, 105 bat_dbg(DBG_ROUTES, bat_priv,
104 "Adding route towards: %pM (via %pM)\n", 106 "Adding route towards: %pM (via %pM)\n",
105 orig_node->orig, neigh_node->addr); 107 orig_node->orig, neigh_node->addr);
106 hna_global_add_orig(bat_priv, orig_node, 108 tt_global_add_orig(bat_priv, orig_node,
107 hna_buff, hna_buff_len); 109 tt_buff, tt_buff_len);
108 110
109 /* route changed */ 111 /* route changed */
110 } else { 112 } else {
111 bat_dbg(DBG_ROUTES, bat_priv, 113 bat_dbg(DBG_ROUTES, bat_priv,
112 "Changing route towards: %pM " 114 "Changing route towards: %pM "
113 "(now via %pM - was via %pM)\n", 115 "(now via %pM - was via %pM)\n",
114 orig_node->orig, neigh_node->addr, 116 orig_node->orig, neigh_node->addr,
115 orig_node->router->addr); 117 curr_router->addr);
116 } 118 }
117 119
120 if (curr_router)
121 neigh_node_free_ref(curr_router);
122
123 /* increase refcount of new best neighbor */
118 if (neigh_node && !atomic_inc_not_zero(&neigh_node->refcount)) 124 if (neigh_node && !atomic_inc_not_zero(&neigh_node->refcount))
119 neigh_node = NULL; 125 neigh_node = NULL;
120 neigh_node_tmp = orig_node->router; 126
121 orig_node->router = neigh_node; 127 spin_lock_bh(&orig_node->neigh_list_lock);
122 if (neigh_node_tmp) 128 rcu_assign_pointer(orig_node->router, neigh_node);
123 neigh_node_free_ref(neigh_node_tmp); 129 spin_unlock_bh(&orig_node->neigh_list_lock);
130
131 /* decrease refcount of previous best neighbor */
132 if (curr_router)
133 neigh_node_free_ref(curr_router);
124} 134}
125 135
126 136
127void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node, 137void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node,
128 struct neigh_node *neigh_node, unsigned char *hna_buff, 138 struct neigh_node *neigh_node, unsigned char *tt_buff,
129 int hna_buff_len) 139 int tt_buff_len)
130{ 140{
141 struct neigh_node *router = NULL;
131 142
132 if (!orig_node) 143 if (!orig_node)
133 return; 144 goto out;
145
146 router = orig_node_get_router(orig_node);
134 147
135 if (orig_node->router != neigh_node) 148 if (router != neigh_node)
136 update_route(bat_priv, orig_node, neigh_node, 149 update_route(bat_priv, orig_node, neigh_node,
137 hna_buff, hna_buff_len); 150 tt_buff, tt_buff_len);
138 /* may be just HNA changed */ 151 /* may be just TT changed */
139 else 152 else
140 update_HNA(bat_priv, orig_node, hna_buff, hna_buff_len); 153 update_TT(bat_priv, orig_node, tt_buff, tt_buff_len);
154
155out:
156 if (router)
157 neigh_node_free_ref(router);
141} 158}
142 159
143static int is_bidirectional_neigh(struct orig_node *orig_node, 160static int is_bidirectional_neigh(struct orig_node *orig_node,
@@ -152,65 +169,41 @@ static int is_bidirectional_neigh(struct orig_node *orig_node,
152 uint8_t orig_eq_count, neigh_rq_count, tq_own; 169 uint8_t orig_eq_count, neigh_rq_count, tq_own;
153 int tq_asym_penalty, ret = 0; 170 int tq_asym_penalty, ret = 0;
154 171
155 if (orig_node == orig_neigh_node) { 172 /* find corresponding one hop neighbor */
156 rcu_read_lock(); 173 rcu_read_lock();
157 hlist_for_each_entry_rcu(tmp_neigh_node, node, 174 hlist_for_each_entry_rcu(tmp_neigh_node, node,
158 &orig_node->neigh_list, list) { 175 &orig_neigh_node->neigh_list, list) {
159
160 if (!compare_eth(tmp_neigh_node->addr,
161 orig_neigh_node->orig))
162 continue;
163
164 if (tmp_neigh_node->if_incoming != if_incoming)
165 continue;
166
167 if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
168 continue;
169
170 neigh_node = tmp_neigh_node;
171 }
172 rcu_read_unlock();
173 176
174 if (!neigh_node) 177 if (!compare_eth(tmp_neigh_node->addr, orig_neigh_node->orig))
175 neigh_node = create_neighbor(orig_node, 178 continue;
176 orig_neigh_node,
177 orig_neigh_node->orig,
178 if_incoming);
179 if (!neigh_node)
180 goto out;
181 179
182 neigh_node->last_valid = jiffies; 180 if (tmp_neigh_node->if_incoming != if_incoming)
183 } else { 181 continue;
184 /* find packet count of corresponding one hop neighbor */
185 rcu_read_lock();
186 hlist_for_each_entry_rcu(tmp_neigh_node, node,
187 &orig_neigh_node->neigh_list, list) {
188 182
189 if (!compare_eth(tmp_neigh_node->addr, 183 if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
190 orig_neigh_node->orig)) 184 continue;
191 continue;
192 185
193 if (tmp_neigh_node->if_incoming != if_incoming) 186 neigh_node = tmp_neigh_node;
194 continue; 187 break;
188 }
189 rcu_read_unlock();
195 190
196 if (!atomic_inc_not_zero(&tmp_neigh_node->refcount)) 191 if (!neigh_node)
197 continue; 192 neigh_node = create_neighbor(orig_neigh_node,
193 orig_neigh_node,
194 orig_neigh_node->orig,
195 if_incoming);
198 196
199 neigh_node = tmp_neigh_node; 197 if (!neigh_node)
200 } 198 goto out;
201 rcu_read_unlock();
202 199
203 if (!neigh_node) 200 /* if orig_node is direct neighbour update neigh_node last_valid */
204 neigh_node = create_neighbor(orig_neigh_node, 201 if (orig_node == orig_neigh_node)
205 orig_neigh_node, 202 neigh_node->last_valid = jiffies;
206 orig_neigh_node->orig,
207 if_incoming);
208 if (!neigh_node)
209 goto out;
210 }
211 203
212 orig_node->last_valid = jiffies; 204 orig_node->last_valid = jiffies;
213 205
206 /* find packet count of corresponding one hop neighbor */
214 spin_lock_bh(&orig_node->ogm_cnt_lock); 207 spin_lock_bh(&orig_node->ogm_cnt_lock);
215 orig_eq_count = orig_neigh_node->bcast_own_sum[if_incoming->if_num]; 208 orig_eq_count = orig_neigh_node->bcast_own_sum[if_incoming->if_num];
216 neigh_rq_count = neigh_node->real_packet_count; 209 neigh_rq_count = neigh_node->real_packet_count;
@@ -288,8 +281,8 @@ static void bonding_candidate_add(struct orig_node *orig_node,
288 struct neigh_node *neigh_node) 281 struct neigh_node *neigh_node)
289{ 282{
290 struct hlist_node *node; 283 struct hlist_node *node;
291 struct neigh_node *tmp_neigh_node; 284 struct neigh_node *tmp_neigh_node, *router = NULL;
292 uint8_t best_tq, interference_candidate = 0; 285 uint8_t interference_candidate = 0;
293 286
294 spin_lock_bh(&orig_node->neigh_list_lock); 287 spin_lock_bh(&orig_node->neigh_list_lock);
295 288
@@ -298,13 +291,12 @@ static void bonding_candidate_add(struct orig_node *orig_node,
298 neigh_node->orig_node->primary_addr)) 291 neigh_node->orig_node->primary_addr))
299 goto candidate_del; 292 goto candidate_del;
300 293
301 if (!orig_node->router) 294 router = orig_node_get_router(orig_node);
295 if (!router)
302 goto candidate_del; 296 goto candidate_del;
303 297
304 best_tq = orig_node->router->tq_avg;
305
306 /* ... and is good enough to be considered */ 298 /* ... and is good enough to be considered */
307 if (neigh_node->tq_avg < best_tq - BONDING_TQ_THRESHOLD) 299 if (neigh_node->tq_avg < router->tq_avg - BONDING_TQ_THRESHOLD)
308 goto candidate_del; 300 goto candidate_del;
309 301
310 /** 302 /**
@@ -350,7 +342,9 @@ candidate_del:
350 342
351out: 343out:
352 spin_unlock_bh(&orig_node->neigh_list_lock); 344 spin_unlock_bh(&orig_node->neigh_list_lock);
353 return; 345
346 if (router)
347 neigh_node_free_ref(router);
354} 348}
355 349
356/* copy primary address for bonding */ 350/* copy primary address for bonding */
@@ -369,13 +363,14 @@ static void update_orig(struct bat_priv *bat_priv,
369 struct ethhdr *ethhdr, 363 struct ethhdr *ethhdr,
370 struct batman_packet *batman_packet, 364 struct batman_packet *batman_packet,
371 struct hard_iface *if_incoming, 365 struct hard_iface *if_incoming,
372 unsigned char *hna_buff, int hna_buff_len, 366 unsigned char *tt_buff, int tt_buff_len,
373 char is_duplicate) 367 char is_duplicate)
374{ 368{
375 struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL; 369 struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
370 struct neigh_node *router = NULL;
376 struct orig_node *orig_node_tmp; 371 struct orig_node *orig_node_tmp;
377 struct hlist_node *node; 372 struct hlist_node *node;
378 int tmp_hna_buff_len; 373 int tmp_tt_buff_len;
379 uint8_t bcast_own_sum_orig, bcast_own_sum_neigh; 374 uint8_t bcast_own_sum_orig, bcast_own_sum_neigh;
380 375
381 bat_dbg(DBG_BATMAN, bat_priv, "update_originator(): " 376 bat_dbg(DBG_BATMAN, bat_priv, "update_originator(): "
@@ -396,10 +391,12 @@ static void update_orig(struct bat_priv *bat_priv,
396 if (is_duplicate) 391 if (is_duplicate)
397 continue; 392 continue;
398 393
394 spin_lock_bh(&tmp_neigh_node->tq_lock);
399 ring_buffer_set(tmp_neigh_node->tq_recv, 395 ring_buffer_set(tmp_neigh_node->tq_recv,
400 &tmp_neigh_node->tq_index, 0); 396 &tmp_neigh_node->tq_index, 0);
401 tmp_neigh_node->tq_avg = 397 tmp_neigh_node->tq_avg =
402 ring_buffer_avg(tmp_neigh_node->tq_recv); 398 ring_buffer_avg(tmp_neigh_node->tq_recv);
399 spin_unlock_bh(&tmp_neigh_node->tq_lock);
403 } 400 }
404 401
405 if (!neigh_node) { 402 if (!neigh_node) {
@@ -424,10 +421,12 @@ static void update_orig(struct bat_priv *bat_priv,
424 orig_node->flags = batman_packet->flags; 421 orig_node->flags = batman_packet->flags;
425 neigh_node->last_valid = jiffies; 422 neigh_node->last_valid = jiffies;
426 423
424 spin_lock_bh(&neigh_node->tq_lock);
427 ring_buffer_set(neigh_node->tq_recv, 425 ring_buffer_set(neigh_node->tq_recv,
428 &neigh_node->tq_index, 426 &neigh_node->tq_index,
429 batman_packet->tq); 427 batman_packet->tq);
430 neigh_node->tq_avg = ring_buffer_avg(neigh_node->tq_recv); 428 neigh_node->tq_avg = ring_buffer_avg(neigh_node->tq_recv);
429 spin_unlock_bh(&neigh_node->tq_lock);
431 430
432 if (!is_duplicate) { 431 if (!is_duplicate) {
433 orig_node->last_ttl = batman_packet->ttl; 432 orig_node->last_ttl = batman_packet->ttl;
@@ -436,24 +435,23 @@ static void update_orig(struct bat_priv *bat_priv,
436 435
437 bonding_candidate_add(orig_node, neigh_node); 436 bonding_candidate_add(orig_node, neigh_node);
438 437
439 tmp_hna_buff_len = (hna_buff_len > batman_packet->num_hna * ETH_ALEN ? 438 tmp_tt_buff_len = (tt_buff_len > batman_packet->num_tt * ETH_ALEN ?
440 batman_packet->num_hna * ETH_ALEN : hna_buff_len); 439 batman_packet->num_tt * ETH_ALEN : tt_buff_len);
441 440
442 /* if this neighbor already is our next hop there is nothing 441 /* if this neighbor already is our next hop there is nothing
443 * to change */ 442 * to change */
444 if (orig_node->router == neigh_node) 443 router = orig_node_get_router(orig_node);
445 goto update_hna; 444 if (router == neigh_node)
445 goto update_tt;
446 446
447 /* if this neighbor does not offer a better TQ we won't consider it */ 447 /* if this neighbor does not offer a better TQ we won't consider it */
448 if ((orig_node->router) && 448 if (router && (router->tq_avg > neigh_node->tq_avg))
449 (orig_node->router->tq_avg > neigh_node->tq_avg)) 449 goto update_tt;
450 goto update_hna;
451 450
452 /* if the TQ is the same and the link not more symetric we 451 /* if the TQ is the same and the link not more symetric we
453 * won't consider it either */ 452 * won't consider it either */
454 if ((orig_node->router) && 453 if (router && (neigh_node->tq_avg == router->tq_avg)) {
455 (neigh_node->tq_avg == orig_node->router->tq_avg)) { 454 orig_node_tmp = router->orig_node;
456 orig_node_tmp = orig_node->router->orig_node;
457 spin_lock_bh(&orig_node_tmp->ogm_cnt_lock); 455 spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
458 bcast_own_sum_orig = 456 bcast_own_sum_orig =
459 orig_node_tmp->bcast_own_sum[if_incoming->if_num]; 457 orig_node_tmp->bcast_own_sum[if_incoming->if_num];
@@ -466,16 +464,16 @@ static void update_orig(struct bat_priv *bat_priv,
466 spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock); 464 spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
467 465
468 if (bcast_own_sum_orig >= bcast_own_sum_neigh) 466 if (bcast_own_sum_orig >= bcast_own_sum_neigh)
469 goto update_hna; 467 goto update_tt;
470 } 468 }
471 469
472 update_routes(bat_priv, orig_node, neigh_node, 470 update_routes(bat_priv, orig_node, neigh_node,
473 hna_buff, tmp_hna_buff_len); 471 tt_buff, tmp_tt_buff_len);
474 goto update_gw; 472 goto update_gw;
475 473
476update_hna: 474update_tt:
477 update_routes(bat_priv, orig_node, orig_node->router, 475 update_routes(bat_priv, orig_node, router,
478 hna_buff, tmp_hna_buff_len); 476 tt_buff, tmp_tt_buff_len);
479 477
480update_gw: 478update_gw:
481 if (orig_node->gw_flags != batman_packet->gw_flags) 479 if (orig_node->gw_flags != batman_packet->gw_flags)
@@ -496,6 +494,8 @@ unlock:
496out: 494out:
497 if (neigh_node) 495 if (neigh_node)
498 neigh_node_free_ref(neigh_node); 496 neigh_node_free_ref(neigh_node);
497 if (router)
498 neigh_node_free_ref(router);
499} 499}
500 500
501/* checks whether the host restarted and is in the protection time. 501/* checks whether the host restarted and is in the protection time.
@@ -597,12 +597,14 @@ out:
597 597
598void receive_bat_packet(struct ethhdr *ethhdr, 598void receive_bat_packet(struct ethhdr *ethhdr,
599 struct batman_packet *batman_packet, 599 struct batman_packet *batman_packet,
600 unsigned char *hna_buff, int hna_buff_len, 600 unsigned char *tt_buff, int tt_buff_len,
601 struct hard_iface *if_incoming) 601 struct hard_iface *if_incoming)
602{ 602{
603 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); 603 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
604 struct hard_iface *hard_iface; 604 struct hard_iface *hard_iface;
605 struct orig_node *orig_neigh_node, *orig_node; 605 struct orig_node *orig_neigh_node, *orig_node;
606 struct neigh_node *router = NULL, *router_router = NULL;
607 struct neigh_node *orig_neigh_router = NULL;
606 char has_directlink_flag; 608 char has_directlink_flag;
607 char is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0; 609 char is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0;
608 char is_broadcast = 0, is_bidirectional, is_single_hop_neigh; 610 char is_broadcast = 0, is_bidirectional, is_single_hop_neigh;
@@ -747,14 +749,15 @@ void receive_bat_packet(struct ethhdr *ethhdr,
747 goto out; 749 goto out;
748 } 750 }
749 751
752 router = orig_node_get_router(orig_node);
753 if (router)
754 router_router = orig_node_get_router(router->orig_node);
755
750 /* avoid temporary routing loops */ 756 /* avoid temporary routing loops */
751 if ((orig_node->router) && 757 if (router && router_router &&
752 (orig_node->router->orig_node->router) && 758 (compare_eth(router->addr, batman_packet->prev_sender)) &&
753 (compare_eth(orig_node->router->addr,
754 batman_packet->prev_sender)) &&
755 !(compare_eth(batman_packet->orig, batman_packet->prev_sender)) && 759 !(compare_eth(batman_packet->orig, batman_packet->prev_sender)) &&
756 (compare_eth(orig_node->router->addr, 760 (compare_eth(router->addr, router_router->addr))) {
757 orig_node->router->orig_node->router->addr))) {
758 bat_dbg(DBG_BATMAN, bat_priv, 761 bat_dbg(DBG_BATMAN, bat_priv,
759 "Drop packet: ignoring all rebroadcast packets that " 762 "Drop packet: ignoring all rebroadcast packets that "
760 "may make me loop (sender: %pM)\n", ethhdr->h_source); 763 "may make me loop (sender: %pM)\n", ethhdr->h_source);
@@ -769,9 +772,11 @@ void receive_bat_packet(struct ethhdr *ethhdr,
769 if (!orig_neigh_node) 772 if (!orig_neigh_node)
770 goto out; 773 goto out;
771 774
775 orig_neigh_router = orig_node_get_router(orig_neigh_node);
776
772 /* drop packet if sender is not a direct neighbor and if we 777 /* drop packet if sender is not a direct neighbor and if we
773 * don't route towards it */ 778 * don't route towards it */
774 if (!is_single_hop_neigh && (!orig_neigh_node->router)) { 779 if (!is_single_hop_neigh && (!orig_neigh_router)) {
775 bat_dbg(DBG_BATMAN, bat_priv, 780 bat_dbg(DBG_BATMAN, bat_priv,
776 "Drop packet: OGM via unknown neighbor!\n"); 781 "Drop packet: OGM via unknown neighbor!\n");
777 goto out_neigh; 782 goto out_neigh;
@@ -789,14 +794,14 @@ void receive_bat_packet(struct ethhdr *ethhdr,
789 ((orig_node->last_real_seqno == batman_packet->seqno) && 794 ((orig_node->last_real_seqno == batman_packet->seqno) &&
790 (orig_node->last_ttl - 3 <= batman_packet->ttl)))) 795 (orig_node->last_ttl - 3 <= batman_packet->ttl))))
791 update_orig(bat_priv, orig_node, ethhdr, batman_packet, 796 update_orig(bat_priv, orig_node, ethhdr, batman_packet,
792 if_incoming, hna_buff, hna_buff_len, is_duplicate); 797 if_incoming, tt_buff, tt_buff_len, is_duplicate);
793 798
794 /* is single hop (direct) neighbor */ 799 /* is single hop (direct) neighbor */
795 if (is_single_hop_neigh) { 800 if (is_single_hop_neigh) {
796 801
797 /* mark direct link on incoming interface */ 802 /* mark direct link on incoming interface */
798 schedule_forward_packet(orig_node, ethhdr, batman_packet, 803 schedule_forward_packet(orig_node, ethhdr, batman_packet,
799 1, hna_buff_len, if_incoming); 804 1, tt_buff_len, if_incoming);
800 805
801 bat_dbg(DBG_BATMAN, bat_priv, "Forwarding packet: " 806 bat_dbg(DBG_BATMAN, bat_priv, "Forwarding packet: "
802 "rebroadcast neighbor packet with direct link flag\n"); 807 "rebroadcast neighbor packet with direct link flag\n");
@@ -819,12 +824,19 @@ void receive_bat_packet(struct ethhdr *ethhdr,
819 bat_dbg(DBG_BATMAN, bat_priv, 824 bat_dbg(DBG_BATMAN, bat_priv,
820 "Forwarding packet: rebroadcast originator packet\n"); 825 "Forwarding packet: rebroadcast originator packet\n");
821 schedule_forward_packet(orig_node, ethhdr, batman_packet, 826 schedule_forward_packet(orig_node, ethhdr, batman_packet,
822 0, hna_buff_len, if_incoming); 827 0, tt_buff_len, if_incoming);
823 828
824out_neigh: 829out_neigh:
825 if ((orig_neigh_node) && (!is_single_hop_neigh)) 830 if ((orig_neigh_node) && (!is_single_hop_neigh))
826 orig_node_free_ref(orig_neigh_node); 831 orig_node_free_ref(orig_neigh_node);
827out: 832out:
833 if (router)
834 neigh_node_free_ref(router);
835 if (router_router)
836 neigh_node_free_ref(router_router);
837 if (orig_neigh_router)
838 neigh_node_free_ref(orig_neigh_router);
839
828 orig_node_free_ref(orig_node); 840 orig_node_free_ref(orig_node);
829} 841}
830 842
@@ -868,8 +880,9 @@ int recv_bat_packet(struct sk_buff *skb, struct hard_iface *hard_iface)
868static int recv_my_icmp_packet(struct bat_priv *bat_priv, 880static int recv_my_icmp_packet(struct bat_priv *bat_priv,
869 struct sk_buff *skb, size_t icmp_len) 881 struct sk_buff *skb, size_t icmp_len)
870{ 882{
883 struct hard_iface *primary_if = NULL;
871 struct orig_node *orig_node = NULL; 884 struct orig_node *orig_node = NULL;
872 struct neigh_node *neigh_node = NULL; 885 struct neigh_node *router = NULL;
873 struct icmp_packet_rr *icmp_packet; 886 struct icmp_packet_rr *icmp_packet;
874 int ret = NET_RX_DROP; 887 int ret = NET_RX_DROP;
875 888
@@ -881,28 +894,19 @@ static int recv_my_icmp_packet(struct bat_priv *bat_priv,
881 goto out; 894 goto out;
882 } 895 }
883 896
884 if (!bat_priv->primary_if) 897 primary_if = primary_if_get_selected(bat_priv);
898 if (!primary_if)
885 goto out; 899 goto out;
886 900
887 /* answer echo request (ping) */ 901 /* answer echo request (ping) */
888 /* get routing information */ 902 /* get routing information */
889 rcu_read_lock();
890 orig_node = orig_hash_find(bat_priv, icmp_packet->orig); 903 orig_node = orig_hash_find(bat_priv, icmp_packet->orig);
891
892 if (!orig_node) 904 if (!orig_node)
893 goto unlock; 905 goto out;
894
895 neigh_node = orig_node->router;
896
897 if (!neigh_node)
898 goto unlock;
899
900 if (!atomic_inc_not_zero(&neigh_node->refcount)) {
901 neigh_node = NULL;
902 goto unlock;
903 }
904 906
905 rcu_read_unlock(); 907 router = orig_node_get_router(orig_node);
908 if (!router)
909 goto out;
906 910
907 /* create a copy of the skb, if needed, to modify it. */ 911 /* create a copy of the skb, if needed, to modify it. */
908 if (skb_cow(skb, sizeof(struct ethhdr)) < 0) 912 if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
@@ -911,20 +915,18 @@ static int recv_my_icmp_packet(struct bat_priv *bat_priv,
911 icmp_packet = (struct icmp_packet_rr *)skb->data; 915 icmp_packet = (struct icmp_packet_rr *)skb->data;
912 916
913 memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN); 917 memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
914 memcpy(icmp_packet->orig, 918 memcpy(icmp_packet->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
915 bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
916 icmp_packet->msg_type = ECHO_REPLY; 919 icmp_packet->msg_type = ECHO_REPLY;
917 icmp_packet->ttl = TTL; 920 icmp_packet->ttl = TTL;
918 921
919 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); 922 send_skb_packet(skb, router->if_incoming, router->addr);
920 ret = NET_RX_SUCCESS; 923 ret = NET_RX_SUCCESS;
921 goto out;
922 924
923unlock:
924 rcu_read_unlock();
925out: 925out:
926 if (neigh_node) 926 if (primary_if)
927 neigh_node_free_ref(neigh_node); 927 hardif_free_ref(primary_if);
928 if (router)
929 neigh_node_free_ref(router);
928 if (orig_node) 930 if (orig_node)
929 orig_node_free_ref(orig_node); 931 orig_node_free_ref(orig_node);
930 return ret; 932 return ret;
@@ -933,8 +935,9 @@ out:
933static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv, 935static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv,
934 struct sk_buff *skb) 936 struct sk_buff *skb)
935{ 937{
938 struct hard_iface *primary_if = NULL;
936 struct orig_node *orig_node = NULL; 939 struct orig_node *orig_node = NULL;
937 struct neigh_node *neigh_node = NULL; 940 struct neigh_node *router = NULL;
938 struct icmp_packet *icmp_packet; 941 struct icmp_packet *icmp_packet;
939 int ret = NET_RX_DROP; 942 int ret = NET_RX_DROP;
940 943
@@ -948,27 +951,18 @@ static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv,
948 goto out; 951 goto out;
949 } 952 }
950 953
951 if (!bat_priv->primary_if) 954 primary_if = primary_if_get_selected(bat_priv);
955 if (!primary_if)
952 goto out; 956 goto out;
953 957
954 /* get routing information */ 958 /* get routing information */
955 rcu_read_lock();
956 orig_node = orig_hash_find(bat_priv, icmp_packet->orig); 959 orig_node = orig_hash_find(bat_priv, icmp_packet->orig);
957
958 if (!orig_node) 960 if (!orig_node)
959 goto unlock; 961 goto out;
960
961 neigh_node = orig_node->router;
962
963 if (!neigh_node)
964 goto unlock;
965
966 if (!atomic_inc_not_zero(&neigh_node->refcount)) {
967 neigh_node = NULL;
968 goto unlock;
969 }
970 962
971 rcu_read_unlock(); 963 router = orig_node_get_router(orig_node);
964 if (!router)
965 goto out;
972 966
973 /* create a copy of the skb, if needed, to modify it. */ 967 /* create a copy of the skb, if needed, to modify it. */
974 if (skb_cow(skb, sizeof(struct ethhdr)) < 0) 968 if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
@@ -977,20 +971,18 @@ static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv,
977 icmp_packet = (struct icmp_packet *)skb->data; 971 icmp_packet = (struct icmp_packet *)skb->data;
978 972
979 memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN); 973 memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
980 memcpy(icmp_packet->orig, 974 memcpy(icmp_packet->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
981 bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
982 icmp_packet->msg_type = TTL_EXCEEDED; 975 icmp_packet->msg_type = TTL_EXCEEDED;
983 icmp_packet->ttl = TTL; 976 icmp_packet->ttl = TTL;
984 977
985 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); 978 send_skb_packet(skb, router->if_incoming, router->addr);
986 ret = NET_RX_SUCCESS; 979 ret = NET_RX_SUCCESS;
987 goto out;
988 980
989unlock:
990 rcu_read_unlock();
991out: 981out:
992 if (neigh_node) 982 if (primary_if)
993 neigh_node_free_ref(neigh_node); 983 hardif_free_ref(primary_if);
984 if (router)
985 neigh_node_free_ref(router);
994 if (orig_node) 986 if (orig_node)
995 orig_node_free_ref(orig_node); 987 orig_node_free_ref(orig_node);
996 return ret; 988 return ret;
@@ -1003,7 +995,7 @@ int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if)
1003 struct icmp_packet_rr *icmp_packet; 995 struct icmp_packet_rr *icmp_packet;
1004 struct ethhdr *ethhdr; 996 struct ethhdr *ethhdr;
1005 struct orig_node *orig_node = NULL; 997 struct orig_node *orig_node = NULL;
1006 struct neigh_node *neigh_node = NULL; 998 struct neigh_node *router = NULL;
1007 int hdr_size = sizeof(struct icmp_packet); 999 int hdr_size = sizeof(struct icmp_packet);
1008 int ret = NET_RX_DROP; 1000 int ret = NET_RX_DROP;
1009 1001
@@ -1050,23 +1042,13 @@ int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if)
1050 return recv_icmp_ttl_exceeded(bat_priv, skb); 1042 return recv_icmp_ttl_exceeded(bat_priv, skb);
1051 1043
1052 /* get routing information */ 1044 /* get routing information */
1053 rcu_read_lock();
1054 orig_node = orig_hash_find(bat_priv, icmp_packet->dst); 1045 orig_node = orig_hash_find(bat_priv, icmp_packet->dst);
1055
1056 if (!orig_node) 1046 if (!orig_node)
1057 goto unlock; 1047 goto out;
1058
1059 neigh_node = orig_node->router;
1060
1061 if (!neigh_node)
1062 goto unlock;
1063
1064 if (!atomic_inc_not_zero(&neigh_node->refcount)) {
1065 neigh_node = NULL;
1066 goto unlock;
1067 }
1068 1048
1069 rcu_read_unlock(); 1049 router = orig_node_get_router(orig_node);
1050 if (!router)
1051 goto out;
1070 1052
1071 /* create a copy of the skb, if needed, to modify it. */ 1053 /* create a copy of the skb, if needed, to modify it. */
1072 if (skb_cow(skb, sizeof(struct ethhdr)) < 0) 1054 if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
@@ -1078,20 +1060,117 @@ int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if)
1078 icmp_packet->ttl--; 1060 icmp_packet->ttl--;
1079 1061
1080 /* route it */ 1062 /* route it */
1081 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); 1063 send_skb_packet(skb, router->if_incoming, router->addr);
1082 ret = NET_RX_SUCCESS; 1064 ret = NET_RX_SUCCESS;
1083 goto out;
1084 1065
1085unlock:
1086 rcu_read_unlock();
1087out: 1066out:
1088 if (neigh_node) 1067 if (router)
1089 neigh_node_free_ref(neigh_node); 1068 neigh_node_free_ref(router);
1090 if (orig_node) 1069 if (orig_node)
1091 orig_node_free_ref(orig_node); 1070 orig_node_free_ref(orig_node);
1092 return ret; 1071 return ret;
1093} 1072}
1094 1073
1074/* In the bonding case, send the packets in a round
1075 * robin fashion over the remaining interfaces.
1076 *
1077 * This method rotates the bonding list and increases the
1078 * returned router's refcount. */
1079static struct neigh_node *find_bond_router(struct orig_node *primary_orig,
1080 struct hard_iface *recv_if)
1081{
1082 struct neigh_node *tmp_neigh_node;
1083 struct neigh_node *router = NULL, *first_candidate = NULL;
1084
1085 rcu_read_lock();
1086 list_for_each_entry_rcu(tmp_neigh_node, &primary_orig->bond_list,
1087 bonding_list) {
1088 if (!first_candidate)
1089 first_candidate = tmp_neigh_node;
1090
1091 /* recv_if == NULL on the first node. */
1092 if (tmp_neigh_node->if_incoming == recv_if)
1093 continue;
1094
1095 if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
1096 continue;
1097
1098 router = tmp_neigh_node;
1099 break;
1100 }
1101
1102 /* use the first candidate if nothing was found. */
1103 if (!router && first_candidate &&
1104 atomic_inc_not_zero(&first_candidate->refcount))
1105 router = first_candidate;
1106
1107 if (!router)
1108 goto out;
1109
1110 /* selected should point to the next element
1111 * after the current router */
1112 spin_lock_bh(&primary_orig->neigh_list_lock);
1113 /* this is a list_move(), which unfortunately
1114 * does not exist as rcu version */
1115 list_del_rcu(&primary_orig->bond_list);
1116 list_add_rcu(&primary_orig->bond_list,
1117 &router->bonding_list);
1118 spin_unlock_bh(&primary_orig->neigh_list_lock);
1119
1120out:
1121 rcu_read_unlock();
1122 return router;
1123}
1124
1125/* Interface Alternating: Use the best of the
1126 * remaining candidates which are not using
1127 * this interface.
1128 *
1129 * Increases the returned router's refcount */
1130static struct neigh_node *find_ifalter_router(struct orig_node *primary_orig,
1131 struct hard_iface *recv_if)
1132{
1133 struct neigh_node *tmp_neigh_node;
1134 struct neigh_node *router = NULL, *first_candidate = NULL;
1135
1136 rcu_read_lock();
1137 list_for_each_entry_rcu(tmp_neigh_node, &primary_orig->bond_list,
1138 bonding_list) {
1139 if (!first_candidate)
1140 first_candidate = tmp_neigh_node;
1141
1142 /* recv_if == NULL on the first node. */
1143 if (tmp_neigh_node->if_incoming == recv_if)
1144 continue;
1145
1146 if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
1147 continue;
1148
1149 /* if we don't have a router yet
1150 * or this one is better, choose it. */
1151 if ((!router) ||
1152 (tmp_neigh_node->tq_avg > router->tq_avg)) {
1153 /* decrement refcount of
1154 * previously selected router */
1155 if (router)
1156 neigh_node_free_ref(router);
1157
1158 router = tmp_neigh_node;
1159 atomic_inc_not_zero(&router->refcount);
1160 }
1161
1162 neigh_node_free_ref(tmp_neigh_node);
1163 }
1164
1165 /* use the first candidate if nothing was found. */
1166 if (!router && first_candidate &&
1167 atomic_inc_not_zero(&first_candidate->refcount))
1168 router = first_candidate;
1169
1170 rcu_read_unlock();
1171 return router;
1172}
1173
1095/* find a suitable router for this originator, and use 1174/* find a suitable router for this originator, and use
1096 * bonding if possible. increases the found neighbors 1175 * bonding if possible. increases the found neighbors
1097 * refcount.*/ 1176 * refcount.*/
@@ -1101,15 +1180,16 @@ struct neigh_node *find_router(struct bat_priv *bat_priv,
1101{ 1180{
1102 struct orig_node *primary_orig_node; 1181 struct orig_node *primary_orig_node;
1103 struct orig_node *router_orig; 1182 struct orig_node *router_orig;
1104 struct neigh_node *router, *first_candidate, *tmp_neigh_node; 1183 struct neigh_node *router;
1105 static uint8_t zero_mac[ETH_ALEN] = {0, 0, 0, 0, 0, 0}; 1184 static uint8_t zero_mac[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
1106 int bonding_enabled; 1185 int bonding_enabled;
1107 1186
1108 if (!orig_node) 1187 if (!orig_node)
1109 return NULL; 1188 return NULL;
1110 1189
1111 if (!orig_node->router) 1190 router = orig_node_get_router(orig_node);
1112 return NULL; 1191 if (!router)
1192 goto err;
1113 1193
1114 /* without bonding, the first node should 1194 /* without bonding, the first node should
1115 * always choose the default router. */ 1195 * always choose the default router. */
@@ -1117,12 +1197,9 @@ struct neigh_node *find_router(struct bat_priv *bat_priv,
1117 1197
1118 rcu_read_lock(); 1198 rcu_read_lock();
1119 /* select default router to output */ 1199 /* select default router to output */
1120 router = orig_node->router; 1200 router_orig = router->orig_node;
1121 router_orig = orig_node->router->orig_node; 1201 if (!router_orig)
1122 if (!router_orig || !atomic_inc_not_zero(&router->refcount)) { 1202 goto err_unlock;
1123 rcu_read_unlock();
1124 return NULL;
1125 }
1126 1203
1127 if ((!recv_if) && (!bonding_enabled)) 1204 if ((!recv_if) && (!bonding_enabled))
1128 goto return_router; 1205 goto return_router;
@@ -1151,91 +1228,26 @@ struct neigh_node *find_router(struct bat_priv *bat_priv,
1151 if (atomic_read(&primary_orig_node->bond_candidates) < 2) 1228 if (atomic_read(&primary_orig_node->bond_candidates) < 2)
1152 goto return_router; 1229 goto return_router;
1153 1230
1154
1155 /* all nodes between should choose a candidate which 1231 /* all nodes between should choose a candidate which
1156 * is is not on the interface where the packet came 1232 * is is not on the interface where the packet came
1157 * in. */ 1233 * in. */
1158 1234
1159 neigh_node_free_ref(router); 1235 neigh_node_free_ref(router);
1160 first_candidate = NULL;
1161 router = NULL;
1162
1163 if (bonding_enabled) {
1164 /* in the bonding case, send the packets in a round
1165 * robin fashion over the remaining interfaces. */
1166
1167 list_for_each_entry_rcu(tmp_neigh_node,
1168 &primary_orig_node->bond_list, bonding_list) {
1169 if (!first_candidate)
1170 first_candidate = tmp_neigh_node;
1171 /* recv_if == NULL on the first node. */
1172 if (tmp_neigh_node->if_incoming != recv_if &&
1173 atomic_inc_not_zero(&tmp_neigh_node->refcount)) {
1174 router = tmp_neigh_node;
1175 break;
1176 }
1177 }
1178
1179 /* use the first candidate if nothing was found. */
1180 if (!router && first_candidate &&
1181 atomic_inc_not_zero(&first_candidate->refcount))
1182 router = first_candidate;
1183 1236
1184 if (!router) { 1237 if (bonding_enabled)
1185 rcu_read_unlock(); 1238 router = find_bond_router(primary_orig_node, recv_if);
1186 return NULL; 1239 else
1187 } 1240 router = find_ifalter_router(primary_orig_node, recv_if);
1188
1189 /* selected should point to the next element
1190 * after the current router */
1191 spin_lock_bh(&primary_orig_node->neigh_list_lock);
1192 /* this is a list_move(), which unfortunately
1193 * does not exist as rcu version */
1194 list_del_rcu(&primary_orig_node->bond_list);
1195 list_add_rcu(&primary_orig_node->bond_list,
1196 &router->bonding_list);
1197 spin_unlock_bh(&primary_orig_node->neigh_list_lock);
1198
1199 } else {
1200 /* if bonding is disabled, use the best of the
1201 * remaining candidates which are not using
1202 * this interface. */
1203 list_for_each_entry_rcu(tmp_neigh_node,
1204 &primary_orig_node->bond_list, bonding_list) {
1205 if (!first_candidate)
1206 first_candidate = tmp_neigh_node;
1207
1208 /* recv_if == NULL on the first node. */
1209 if (tmp_neigh_node->if_incoming == recv_if)
1210 continue;
1211
1212 if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
1213 continue;
1214
1215 /* if we don't have a router yet
1216 * or this one is better, choose it. */
1217 if ((!router) ||
1218 (tmp_neigh_node->tq_avg > router->tq_avg)) {
1219 /* decrement refcount of
1220 * previously selected router */
1221 if (router)
1222 neigh_node_free_ref(router);
1223
1224 router = tmp_neigh_node;
1225 atomic_inc_not_zero(&router->refcount);
1226 }
1227
1228 neigh_node_free_ref(tmp_neigh_node);
1229 }
1230 1241
1231 /* use the first candidate if nothing was found. */
1232 if (!router && first_candidate &&
1233 atomic_inc_not_zero(&first_candidate->refcount))
1234 router = first_candidate;
1235 }
1236return_router: 1242return_router:
1237 rcu_read_unlock(); 1243 rcu_read_unlock();
1238 return router; 1244 return router;
1245err_unlock:
1246 rcu_read_unlock();
1247err:
1248 if (router)
1249 neigh_node_free_ref(router);
1250 return NULL;
1239} 1251}
1240 1252
1241static int check_unicast_packet(struct sk_buff *skb, int hdr_size) 1253static int check_unicast_packet(struct sk_buff *skb, int hdr_size)
@@ -1284,13 +1296,10 @@ int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
1284 } 1296 }
1285 1297
1286 /* get routing information */ 1298 /* get routing information */
1287 rcu_read_lock();
1288 orig_node = orig_hash_find(bat_priv, unicast_packet->dest); 1299 orig_node = orig_hash_find(bat_priv, unicast_packet->dest);
1289 1300
1290 if (!orig_node) 1301 if (!orig_node)
1291 goto unlock; 1302 goto out;
1292
1293 rcu_read_unlock();
1294 1303
1295 /* find_router() increases neigh_nodes refcount if found. */ 1304 /* find_router() increases neigh_nodes refcount if found. */
1296 neigh_node = find_router(bat_priv, orig_node, recv_if); 1305 neigh_node = find_router(bat_priv, orig_node, recv_if);
@@ -1336,10 +1345,7 @@ int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
1336 /* route it */ 1345 /* route it */
1337 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); 1346 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
1338 ret = NET_RX_SUCCESS; 1347 ret = NET_RX_SUCCESS;
1339 goto out;
1340 1348
1341unlock:
1342 rcu_read_unlock();
1343out: 1349out:
1344 if (neigh_node) 1350 if (neigh_node)
1345 neigh_node_free_ref(neigh_node); 1351 neigh_node_free_ref(neigh_node);
@@ -1438,13 +1444,10 @@ int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
1438 if (bcast_packet->ttl < 2) 1444 if (bcast_packet->ttl < 2)
1439 goto out; 1445 goto out;
1440 1446
1441 rcu_read_lock();
1442 orig_node = orig_hash_find(bat_priv, bcast_packet->orig); 1447 orig_node = orig_hash_find(bat_priv, bcast_packet->orig);
1443 1448
1444 if (!orig_node) 1449 if (!orig_node)
1445 goto rcu_unlock; 1450 goto out;
1446
1447 rcu_read_unlock();
1448 1451
1449 spin_lock_bh(&orig_node->bcast_seqno_lock); 1452 spin_lock_bh(&orig_node->bcast_seqno_lock);
1450 1453
@@ -1475,9 +1478,6 @@ int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
1475 ret = NET_RX_SUCCESS; 1478 ret = NET_RX_SUCCESS;
1476 goto out; 1479 goto out;
1477 1480
1478rcu_unlock:
1479 rcu_read_unlock();
1480 goto out;
1481spin_unlock: 1481spin_unlock:
1482 spin_unlock_bh(&orig_node->bcast_seqno_lock); 1482 spin_unlock_bh(&orig_node->bcast_seqno_lock);
1483out: 1483out:
diff --git a/net/batman-adv/routing.h b/net/batman-adv/routing.h
index b5a064c88a4f..870f29842b28 100644
--- a/net/batman-adv/routing.h
+++ b/net/batman-adv/routing.h
@@ -25,11 +25,11 @@
25void slide_own_bcast_window(struct hard_iface *hard_iface); 25void slide_own_bcast_window(struct hard_iface *hard_iface);
26void receive_bat_packet(struct ethhdr *ethhdr, 26void receive_bat_packet(struct ethhdr *ethhdr,
27 struct batman_packet *batman_packet, 27 struct batman_packet *batman_packet,
28 unsigned char *hna_buff, int hna_buff_len, 28 unsigned char *tt_buff, int tt_buff_len,
29 struct hard_iface *if_incoming); 29 struct hard_iface *if_incoming);
30void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node, 30void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node,
31 struct neigh_node *neigh_node, unsigned char *hna_buff, 31 struct neigh_node *neigh_node, unsigned char *tt_buff,
32 int hna_buff_len); 32 int tt_buff_len);
33int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if); 33int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if);
34int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if); 34int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if);
35int recv_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if); 35int recv_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if);
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index d49e54d932af..33779278f1b2 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -121,7 +121,7 @@ static void send_packet_to_if(struct forw_packet *forw_packet,
121 /* adjust all flags and log packets */ 121 /* adjust all flags and log packets */
122 while (aggregated_packet(buff_pos, 122 while (aggregated_packet(buff_pos,
123 forw_packet->packet_len, 123 forw_packet->packet_len,
124 batman_packet->num_hna)) { 124 batman_packet->num_tt)) {
125 125
126 /* we might have aggregated direct link packets with an 126 /* we might have aggregated direct link packets with an
127 * ordinary base packet */ 127 * ordinary base packet */
@@ -146,7 +146,7 @@ static void send_packet_to_if(struct forw_packet *forw_packet,
146 hard_iface->net_dev->dev_addr); 146 hard_iface->net_dev->dev_addr);
147 147
148 buff_pos += sizeof(struct batman_packet) + 148 buff_pos += sizeof(struct batman_packet) +
149 (batman_packet->num_hna * ETH_ALEN); 149 (batman_packet->num_tt * ETH_ALEN);
150 packet_num++; 150 packet_num++;
151 batman_packet = (struct batman_packet *) 151 batman_packet = (struct batman_packet *)
152 (forw_packet->skb->data + buff_pos); 152 (forw_packet->skb->data + buff_pos);
@@ -222,7 +222,7 @@ static void rebuild_batman_packet(struct bat_priv *bat_priv,
222 struct batman_packet *batman_packet; 222 struct batman_packet *batman_packet;
223 223
224 new_len = sizeof(struct batman_packet) + 224 new_len = sizeof(struct batman_packet) +
225 (bat_priv->num_local_hna * ETH_ALEN); 225 (bat_priv->num_local_tt * ETH_ALEN);
226 new_buff = kmalloc(new_len, GFP_ATOMIC); 226 new_buff = kmalloc(new_len, GFP_ATOMIC);
227 227
228 /* keep old buffer if kmalloc should fail */ 228 /* keep old buffer if kmalloc should fail */
@@ -231,7 +231,7 @@ static void rebuild_batman_packet(struct bat_priv *bat_priv,
231 sizeof(struct batman_packet)); 231 sizeof(struct batman_packet));
232 batman_packet = (struct batman_packet *)new_buff; 232 batman_packet = (struct batman_packet *)new_buff;
233 233
234 batman_packet->num_hna = hna_local_fill_buffer(bat_priv, 234 batman_packet->num_tt = tt_local_fill_buffer(bat_priv,
235 new_buff + sizeof(struct batman_packet), 235 new_buff + sizeof(struct batman_packet),
236 new_len - sizeof(struct batman_packet)); 236 new_len - sizeof(struct batman_packet));
237 237
@@ -244,6 +244,7 @@ static void rebuild_batman_packet(struct bat_priv *bat_priv,
244void schedule_own_packet(struct hard_iface *hard_iface) 244void schedule_own_packet(struct hard_iface *hard_iface)
245{ 245{
246 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); 246 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
247 struct hard_iface *primary_if;
247 unsigned long send_time; 248 unsigned long send_time;
248 struct batman_packet *batman_packet; 249 struct batman_packet *batman_packet;
249 int vis_server; 250 int vis_server;
@@ -253,6 +254,7 @@ void schedule_own_packet(struct hard_iface *hard_iface)
253 return; 254 return;
254 255
255 vis_server = atomic_read(&bat_priv->vis_mode); 256 vis_server = atomic_read(&bat_priv->vis_mode);
257 primary_if = primary_if_get_selected(bat_priv);
256 258
257 /** 259 /**
258 * the interface gets activated here to avoid race conditions between 260 * the interface gets activated here to avoid race conditions between
@@ -264,9 +266,9 @@ void schedule_own_packet(struct hard_iface *hard_iface)
264 if (hard_iface->if_status == IF_TO_BE_ACTIVATED) 266 if (hard_iface->if_status == IF_TO_BE_ACTIVATED)
265 hard_iface->if_status = IF_ACTIVE; 267 hard_iface->if_status = IF_ACTIVE;
266 268
267 /* if local hna has changed and interface is a primary interface */ 269 /* if local tt has changed and interface is a primary interface */
268 if ((atomic_read(&bat_priv->hna_local_changed)) && 270 if ((atomic_read(&bat_priv->tt_local_changed)) &&
269 (hard_iface == bat_priv->primary_if)) 271 (hard_iface == primary_if))
270 rebuild_batman_packet(bat_priv, hard_iface); 272 rebuild_batman_packet(bat_priv, hard_iface);
271 273
272 /** 274 /**
@@ -284,7 +286,7 @@ void schedule_own_packet(struct hard_iface *hard_iface)
284 else 286 else
285 batman_packet->flags &= ~VIS_SERVER; 287 batman_packet->flags &= ~VIS_SERVER;
286 288
287 if ((hard_iface == bat_priv->primary_if) && 289 if ((hard_iface == primary_if) &&
288 (atomic_read(&bat_priv->gw_mode) == GW_MODE_SERVER)) 290 (atomic_read(&bat_priv->gw_mode) == GW_MODE_SERVER))
289 batman_packet->gw_flags = 291 batman_packet->gw_flags =
290 (uint8_t)atomic_read(&bat_priv->gw_bandwidth); 292 (uint8_t)atomic_read(&bat_priv->gw_bandwidth);
@@ -299,15 +301,19 @@ void schedule_own_packet(struct hard_iface *hard_iface)
299 hard_iface->packet_buff, 301 hard_iface->packet_buff,
300 hard_iface->packet_len, 302 hard_iface->packet_len,
301 hard_iface, 1, send_time); 303 hard_iface, 1, send_time);
304
305 if (primary_if)
306 hardif_free_ref(primary_if);
302} 307}
303 308
304void schedule_forward_packet(struct orig_node *orig_node, 309void schedule_forward_packet(struct orig_node *orig_node,
305 struct ethhdr *ethhdr, 310 struct ethhdr *ethhdr,
306 struct batman_packet *batman_packet, 311 struct batman_packet *batman_packet,
307 uint8_t directlink, int hna_buff_len, 312 uint8_t directlink, int tt_buff_len,
308 struct hard_iface *if_incoming) 313 struct hard_iface *if_incoming)
309{ 314{
310 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); 315 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
316 struct neigh_node *router;
311 unsigned char in_tq, in_ttl, tq_avg = 0; 317 unsigned char in_tq, in_ttl, tq_avg = 0;
312 unsigned long send_time; 318 unsigned long send_time;
313 319
@@ -316,6 +322,8 @@ void schedule_forward_packet(struct orig_node *orig_node,
316 return; 322 return;
317 } 323 }
318 324
325 router = orig_node_get_router(orig_node);
326
319 in_tq = batman_packet->tq; 327 in_tq = batman_packet->tq;
320 in_ttl = batman_packet->ttl; 328 in_ttl = batman_packet->ttl;
321 329
@@ -324,20 +332,22 @@ void schedule_forward_packet(struct orig_node *orig_node,
324 332
325 /* rebroadcast tq of our best ranking neighbor to ensure the rebroadcast 333 /* rebroadcast tq of our best ranking neighbor to ensure the rebroadcast
326 * of our best tq value */ 334 * of our best tq value */
327 if ((orig_node->router) && (orig_node->router->tq_avg != 0)) { 335 if (router && router->tq_avg != 0) {
328 336
329 /* rebroadcast ogm of best ranking neighbor as is */ 337 /* rebroadcast ogm of best ranking neighbor as is */
330 if (!compare_eth(orig_node->router->addr, ethhdr->h_source)) { 338 if (!compare_eth(router->addr, ethhdr->h_source)) {
331 batman_packet->tq = orig_node->router->tq_avg; 339 batman_packet->tq = router->tq_avg;
332 340
333 if (orig_node->router->last_ttl) 341 if (router->last_ttl)
334 batman_packet->ttl = orig_node->router->last_ttl 342 batman_packet->ttl = router->last_ttl - 1;
335 - 1;
336 } 343 }
337 344
338 tq_avg = orig_node->router->tq_avg; 345 tq_avg = router->tq_avg;
339 } 346 }
340 347
348 if (router)
349 neigh_node_free_ref(router);
350
341 /* apply hop penalty */ 351 /* apply hop penalty */
342 batman_packet->tq = hop_penalty(batman_packet->tq, bat_priv); 352 batman_packet->tq = hop_penalty(batman_packet->tq, bat_priv);
343 353
@@ -359,7 +369,7 @@ void schedule_forward_packet(struct orig_node *orig_node,
359 send_time = forward_send_time(); 369 send_time = forward_send_time();
360 add_bat_packet_to_list(bat_priv, 370 add_bat_packet_to_list(bat_priv,
361 (unsigned char *)batman_packet, 371 (unsigned char *)batman_packet,
362 sizeof(struct batman_packet) + hna_buff_len, 372 sizeof(struct batman_packet) + tt_buff_len,
363 if_incoming, 0, send_time); 373 if_incoming, 0, send_time);
364} 374}
365 375
@@ -367,6 +377,8 @@ static void forw_packet_free(struct forw_packet *forw_packet)
367{ 377{
368 if (forw_packet->skb) 378 if (forw_packet->skb)
369 kfree_skb(forw_packet->skb); 379 kfree_skb(forw_packet->skb);
380 if (forw_packet->if_incoming)
381 hardif_free_ref(forw_packet->if_incoming);
370 kfree(forw_packet); 382 kfree(forw_packet);
371} 383}
372 384
@@ -388,7 +400,6 @@ static void _add_bcast_packet_to_list(struct bat_priv *bat_priv,
388 send_time); 400 send_time);
389} 401}
390 402
391#define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0)
392/* add a broadcast packet to the queue and setup timers. broadcast packets 403/* add a broadcast packet to the queue and setup timers. broadcast packets
393 * are sent multiple times to increase probability for beeing received. 404 * are sent multiple times to increase probability for beeing received.
394 * 405 *
@@ -399,6 +410,7 @@ static void _add_bcast_packet_to_list(struct bat_priv *bat_priv,
399 * skb is freed. */ 410 * skb is freed. */
400int add_bcast_packet_to_list(struct bat_priv *bat_priv, struct sk_buff *skb) 411int add_bcast_packet_to_list(struct bat_priv *bat_priv, struct sk_buff *skb)
401{ 412{
413 struct hard_iface *primary_if = NULL;
402 struct forw_packet *forw_packet; 414 struct forw_packet *forw_packet;
403 struct bcast_packet *bcast_packet; 415 struct bcast_packet *bcast_packet;
404 416
@@ -407,8 +419,9 @@ int add_bcast_packet_to_list(struct bat_priv *bat_priv, struct sk_buff *skb)
407 goto out; 419 goto out;
408 } 420 }
409 421
410 if (!bat_priv->primary_if) 422 primary_if = primary_if_get_selected(bat_priv);
411 goto out; 423 if (!primary_if)
424 goto out_and_inc;
412 425
413 forw_packet = kmalloc(sizeof(struct forw_packet), GFP_ATOMIC); 426 forw_packet = kmalloc(sizeof(struct forw_packet), GFP_ATOMIC);
414 427
@@ -426,7 +439,7 @@ int add_bcast_packet_to_list(struct bat_priv *bat_priv, struct sk_buff *skb)
426 skb_reset_mac_header(skb); 439 skb_reset_mac_header(skb);
427 440
428 forw_packet->skb = skb; 441 forw_packet->skb = skb;
429 forw_packet->if_incoming = bat_priv->primary_if; 442 forw_packet->if_incoming = primary_if;
430 443
431 /* how often did we send the bcast packet ? */ 444 /* how often did we send the bcast packet ? */
432 forw_packet->num_packets = 0; 445 forw_packet->num_packets = 0;
@@ -439,6 +452,8 @@ packet_free:
439out_and_inc: 452out_and_inc:
440 atomic_inc(&bat_priv->bcast_queue_left); 453 atomic_inc(&bat_priv->bcast_queue_left);
441out: 454out:
455 if (primary_if)
456 hardif_free_ref(primary_if);
442 return NETDEV_TX_BUSY; 457 return NETDEV_TX_BUSY;
443} 458}
444 459
@@ -526,6 +541,7 @@ void purge_outstanding_packets(struct bat_priv *bat_priv,
526{ 541{
527 struct forw_packet *forw_packet; 542 struct forw_packet *forw_packet;
528 struct hlist_node *tmp_node, *safe_tmp_node; 543 struct hlist_node *tmp_node, *safe_tmp_node;
544 bool pending;
529 545
530 if (hard_iface) 546 if (hard_iface)
531 bat_dbg(DBG_BATMAN, bat_priv, 547 bat_dbg(DBG_BATMAN, bat_priv,
@@ -554,8 +570,13 @@ void purge_outstanding_packets(struct bat_priv *bat_priv,
554 * send_outstanding_bcast_packet() will lock the list to 570 * send_outstanding_bcast_packet() will lock the list to
555 * delete the item from the list 571 * delete the item from the list
556 */ 572 */
557 cancel_delayed_work_sync(&forw_packet->delayed_work); 573 pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
558 spin_lock_bh(&bat_priv->forw_bcast_list_lock); 574 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
575
576 if (pending) {
577 hlist_del(&forw_packet->list);
578 forw_packet_free(forw_packet);
579 }
559 } 580 }
560 spin_unlock_bh(&bat_priv->forw_bcast_list_lock); 581 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
561 582
@@ -578,8 +599,13 @@ void purge_outstanding_packets(struct bat_priv *bat_priv,
578 * send_outstanding_bat_packet() will lock the list to 599 * send_outstanding_bat_packet() will lock the list to
579 * delete the item from the list 600 * delete the item from the list
580 */ 601 */
581 cancel_delayed_work_sync(&forw_packet->delayed_work); 602 pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
582 spin_lock_bh(&bat_priv->forw_bat_list_lock); 603 spin_lock_bh(&bat_priv->forw_bat_list_lock);
604
605 if (pending) {
606 hlist_del(&forw_packet->list);
607 forw_packet_free(forw_packet);
608 }
583 } 609 }
584 spin_unlock_bh(&bat_priv->forw_bat_list_lock); 610 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
585} 611}
diff --git a/net/batman-adv/send.h b/net/batman-adv/send.h
index 7b2ff19c05e7..247172d71e4b 100644
--- a/net/batman-adv/send.h
+++ b/net/batman-adv/send.h
@@ -29,7 +29,7 @@ void schedule_own_packet(struct hard_iface *hard_iface);
29void schedule_forward_packet(struct orig_node *orig_node, 29void schedule_forward_packet(struct orig_node *orig_node,
30 struct ethhdr *ethhdr, 30 struct ethhdr *ethhdr,
31 struct batman_packet *batman_packet, 31 struct batman_packet *batman_packet,
32 uint8_t directlink, int hna_buff_len, 32 uint8_t directlink, int tt_buff_len,
33 struct hard_iface *if_outgoing); 33 struct hard_iface *if_outgoing);
34int add_bcast_packet_to_list(struct bat_priv *bat_priv, struct sk_buff *skb); 34int add_bcast_packet_to_list(struct bat_priv *bat_priv, struct sk_buff *skb);
35void send_outstanding_bat_packet(struct work_struct *work); 35void send_outstanding_bat_packet(struct work_struct *work);
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 824e1f6e50f2..d5aa60999e83 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -43,8 +43,6 @@ static void bat_get_drvinfo(struct net_device *dev,
43static u32 bat_get_msglevel(struct net_device *dev); 43static u32 bat_get_msglevel(struct net_device *dev);
44static void bat_set_msglevel(struct net_device *dev, u32 value); 44static void bat_set_msglevel(struct net_device *dev, u32 value);
45static u32 bat_get_link(struct net_device *dev); 45static u32 bat_get_link(struct net_device *dev);
46static u32 bat_get_rx_csum(struct net_device *dev);
47static int bat_set_rx_csum(struct net_device *dev, u32 data);
48 46
49static const struct ethtool_ops bat_ethtool_ops = { 47static const struct ethtool_ops bat_ethtool_ops = {
50 .get_settings = bat_get_settings, 48 .get_settings = bat_get_settings,
@@ -52,8 +50,6 @@ static const struct ethtool_ops bat_ethtool_ops = {
52 .get_msglevel = bat_get_msglevel, 50 .get_msglevel = bat_get_msglevel,
53 .set_msglevel = bat_set_msglevel, 51 .set_msglevel = bat_set_msglevel,
54 .get_link = bat_get_link, 52 .get_link = bat_get_link,
55 .get_rx_csum = bat_get_rx_csum,
56 .set_rx_csum = bat_set_rx_csum
57}; 53};
58 54
59int my_skb_head_push(struct sk_buff *skb, unsigned int len) 55int my_skb_head_push(struct sk_buff *skb, unsigned int len)
@@ -76,120 +72,371 @@ int my_skb_head_push(struct sk_buff *skb, unsigned int len)
76 return 0; 72 return 0;
77} 73}
78 74
79static void softif_neigh_free_rcu(struct rcu_head *rcu)
80{
81 struct softif_neigh *softif_neigh;
82
83 softif_neigh = container_of(rcu, struct softif_neigh, rcu);
84 kfree(softif_neigh);
85}
86
87static void softif_neigh_free_ref(struct softif_neigh *softif_neigh) 75static void softif_neigh_free_ref(struct softif_neigh *softif_neigh)
88{ 76{
89 if (atomic_dec_and_test(&softif_neigh->refcount)) 77 if (atomic_dec_and_test(&softif_neigh->refcount))
90 call_rcu(&softif_neigh->rcu, softif_neigh_free_rcu); 78 kfree_rcu(softif_neigh, rcu);
91} 79}
92 80
93void softif_neigh_purge(struct bat_priv *bat_priv) 81static void softif_neigh_vid_free_rcu(struct rcu_head *rcu)
94{ 82{
95 struct softif_neigh *softif_neigh, *softif_neigh_tmp; 83 struct softif_neigh_vid *softif_neigh_vid;
84 struct softif_neigh *softif_neigh;
96 struct hlist_node *node, *node_tmp; 85 struct hlist_node *node, *node_tmp;
86 struct bat_priv *bat_priv;
97 87
98 spin_lock_bh(&bat_priv->softif_neigh_lock); 88 softif_neigh_vid = container_of(rcu, struct softif_neigh_vid, rcu);
89 bat_priv = softif_neigh_vid->bat_priv;
99 90
91 spin_lock_bh(&bat_priv->softif_neigh_lock);
100 hlist_for_each_entry_safe(softif_neigh, node, node_tmp, 92 hlist_for_each_entry_safe(softif_neigh, node, node_tmp,
101 &bat_priv->softif_neigh_list, list) { 93 &softif_neigh_vid->softif_neigh_list, list) {
94 hlist_del_rcu(&softif_neigh->list);
95 softif_neigh_free_ref(softif_neigh);
96 }
97 spin_unlock_bh(&bat_priv->softif_neigh_lock);
102 98
103 if ((!time_after(jiffies, softif_neigh->last_seen + 99 kfree(softif_neigh_vid);
104 msecs_to_jiffies(SOFTIF_NEIGH_TIMEOUT))) && 100}
105 (atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE))
106 continue;
107 101
108 hlist_del_rcu(&softif_neigh->list); 102static void softif_neigh_vid_free_ref(struct softif_neigh_vid *softif_neigh_vid)
103{
104 if (atomic_dec_and_test(&softif_neigh_vid->refcount))
105 call_rcu(&softif_neigh_vid->rcu, softif_neigh_vid_free_rcu);
106}
109 107
110 if (bat_priv->softif_neigh == softif_neigh) { 108static struct softif_neigh_vid *softif_neigh_vid_get(struct bat_priv *bat_priv,
111 bat_dbg(DBG_ROUTES, bat_priv, 109 short vid)
112 "Current mesh exit point '%pM' vanished " 110{
113 "(vid: %d).\n", 111 struct softif_neigh_vid *softif_neigh_vid;
114 softif_neigh->addr, softif_neigh->vid); 112 struct hlist_node *node;
115 softif_neigh_tmp = bat_priv->softif_neigh;
116 bat_priv->softif_neigh = NULL;
117 softif_neigh_free_ref(softif_neigh_tmp);
118 }
119 113
120 softif_neigh_free_ref(softif_neigh); 114 rcu_read_lock();
115 hlist_for_each_entry_rcu(softif_neigh_vid, node,
116 &bat_priv->softif_neigh_vids, list) {
117 if (softif_neigh_vid->vid != vid)
118 continue;
119
120 if (!atomic_inc_not_zero(&softif_neigh_vid->refcount))
121 continue;
122
123 goto out;
121 } 124 }
122 125
123 spin_unlock_bh(&bat_priv->softif_neigh_lock); 126 softif_neigh_vid = kzalloc(sizeof(struct softif_neigh_vid),
127 GFP_ATOMIC);
128 if (!softif_neigh_vid)
129 goto out;
130
131 softif_neigh_vid->vid = vid;
132 softif_neigh_vid->bat_priv = bat_priv;
133
134 /* initialize with 2 - caller decrements counter by one */
135 atomic_set(&softif_neigh_vid->refcount, 2);
136 INIT_HLIST_HEAD(&softif_neigh_vid->softif_neigh_list);
137 INIT_HLIST_NODE(&softif_neigh_vid->list);
138 spin_lock_bh(&bat_priv->softif_neigh_vid_lock);
139 hlist_add_head_rcu(&softif_neigh_vid->list,
140 &bat_priv->softif_neigh_vids);
141 spin_unlock_bh(&bat_priv->softif_neigh_vid_lock);
142
143out:
144 rcu_read_unlock();
145 return softif_neigh_vid;
124} 146}
125 147
126static struct softif_neigh *softif_neigh_get(struct bat_priv *bat_priv, 148static struct softif_neigh *softif_neigh_get(struct bat_priv *bat_priv,
127 uint8_t *addr, short vid) 149 uint8_t *addr, short vid)
128{ 150{
129 struct softif_neigh *softif_neigh; 151 struct softif_neigh_vid *softif_neigh_vid;
152 struct softif_neigh *softif_neigh = NULL;
130 struct hlist_node *node; 153 struct hlist_node *node;
131 154
155 softif_neigh_vid = softif_neigh_vid_get(bat_priv, vid);
156 if (!softif_neigh_vid)
157 goto out;
158
132 rcu_read_lock(); 159 rcu_read_lock();
133 hlist_for_each_entry_rcu(softif_neigh, node, 160 hlist_for_each_entry_rcu(softif_neigh, node,
134 &bat_priv->softif_neigh_list, list) { 161 &softif_neigh_vid->softif_neigh_list,
162 list) {
135 if (!compare_eth(softif_neigh->addr, addr)) 163 if (!compare_eth(softif_neigh->addr, addr))
136 continue; 164 continue;
137 165
138 if (softif_neigh->vid != vid)
139 continue;
140
141 if (!atomic_inc_not_zero(&softif_neigh->refcount)) 166 if (!atomic_inc_not_zero(&softif_neigh->refcount))
142 continue; 167 continue;
143 168
144 softif_neigh->last_seen = jiffies; 169 softif_neigh->last_seen = jiffies;
145 goto out; 170 goto unlock;
146 } 171 }
147 172
148 softif_neigh = kzalloc(sizeof(struct softif_neigh), GFP_ATOMIC); 173 softif_neigh = kzalloc(sizeof(struct softif_neigh), GFP_ATOMIC);
149 if (!softif_neigh) 174 if (!softif_neigh)
150 goto out; 175 goto unlock;
151 176
152 memcpy(softif_neigh->addr, addr, ETH_ALEN); 177 memcpy(softif_neigh->addr, addr, ETH_ALEN);
153 softif_neigh->vid = vid;
154 softif_neigh->last_seen = jiffies; 178 softif_neigh->last_seen = jiffies;
155 /* initialize with 2 - caller decrements counter by one */ 179 /* initialize with 2 - caller decrements counter by one */
156 atomic_set(&softif_neigh->refcount, 2); 180 atomic_set(&softif_neigh->refcount, 2);
157 181
158 INIT_HLIST_NODE(&softif_neigh->list); 182 INIT_HLIST_NODE(&softif_neigh->list);
159 spin_lock_bh(&bat_priv->softif_neigh_lock); 183 spin_lock_bh(&bat_priv->softif_neigh_lock);
160 hlist_add_head_rcu(&softif_neigh->list, &bat_priv->softif_neigh_list); 184 hlist_add_head_rcu(&softif_neigh->list,
185 &softif_neigh_vid->softif_neigh_list);
161 spin_unlock_bh(&bat_priv->softif_neigh_lock); 186 spin_unlock_bh(&bat_priv->softif_neigh_lock);
162 187
188unlock:
189 rcu_read_unlock();
163out: 190out:
191 if (softif_neigh_vid)
192 softif_neigh_vid_free_ref(softif_neigh_vid);
193 return softif_neigh;
194}
195
196static struct softif_neigh *softif_neigh_get_selected(
197 struct softif_neigh_vid *softif_neigh_vid)
198{
199 struct softif_neigh *softif_neigh;
200
201 rcu_read_lock();
202 softif_neigh = rcu_dereference(softif_neigh_vid->softif_neigh);
203
204 if (softif_neigh && !atomic_inc_not_zero(&softif_neigh->refcount))
205 softif_neigh = NULL;
206
164 rcu_read_unlock(); 207 rcu_read_unlock();
165 return softif_neigh; 208 return softif_neigh;
166} 209}
167 210
211static struct softif_neigh *softif_neigh_vid_get_selected(
212 struct bat_priv *bat_priv,
213 short vid)
214{
215 struct softif_neigh_vid *softif_neigh_vid;
216 struct softif_neigh *softif_neigh = NULL;
217
218 softif_neigh_vid = softif_neigh_vid_get(bat_priv, vid);
219 if (!softif_neigh_vid)
220 goto out;
221
222 softif_neigh = softif_neigh_get_selected(softif_neigh_vid);
223out:
224 if (softif_neigh_vid)
225 softif_neigh_vid_free_ref(softif_neigh_vid);
226 return softif_neigh;
227}
228
229static void softif_neigh_vid_select(struct bat_priv *bat_priv,
230 struct softif_neigh *new_neigh,
231 short vid)
232{
233 struct softif_neigh_vid *softif_neigh_vid;
234 struct softif_neigh *curr_neigh;
235
236 softif_neigh_vid = softif_neigh_vid_get(bat_priv, vid);
237 if (!softif_neigh_vid)
238 goto out;
239
240 spin_lock_bh(&bat_priv->softif_neigh_lock);
241
242 if (new_neigh && !atomic_inc_not_zero(&new_neigh->refcount))
243 new_neigh = NULL;
244
245 curr_neigh = softif_neigh_vid->softif_neigh;
246 rcu_assign_pointer(softif_neigh_vid->softif_neigh, new_neigh);
247
248 if ((curr_neigh) && (!new_neigh))
249 bat_dbg(DBG_ROUTES, bat_priv,
250 "Removing mesh exit point on vid: %d (prev: %pM).\n",
251 vid, curr_neigh->addr);
252 else if ((curr_neigh) && (new_neigh))
253 bat_dbg(DBG_ROUTES, bat_priv,
254 "Changing mesh exit point on vid: %d from %pM "
255 "to %pM.\n", vid, curr_neigh->addr, new_neigh->addr);
256 else if ((!curr_neigh) && (new_neigh))
257 bat_dbg(DBG_ROUTES, bat_priv,
258 "Setting mesh exit point on vid: %d to %pM.\n",
259 vid, new_neigh->addr);
260
261 if (curr_neigh)
262 softif_neigh_free_ref(curr_neigh);
263
264 spin_unlock_bh(&bat_priv->softif_neigh_lock);
265
266out:
267 if (softif_neigh_vid)
268 softif_neigh_vid_free_ref(softif_neigh_vid);
269}
270
271static void softif_neigh_vid_deselect(struct bat_priv *bat_priv,
272 struct softif_neigh_vid *softif_neigh_vid)
273{
274 struct softif_neigh *curr_neigh;
275 struct softif_neigh *softif_neigh = NULL, *softif_neigh_tmp;
276 struct hard_iface *primary_if = NULL;
277 struct hlist_node *node;
278
279 primary_if = primary_if_get_selected(bat_priv);
280 if (!primary_if)
281 goto out;
282
283 /* find new softif_neigh immediately to avoid temporary loops */
284 rcu_read_lock();
285 curr_neigh = rcu_dereference(softif_neigh_vid->softif_neigh);
286
287 hlist_for_each_entry_rcu(softif_neigh_tmp, node,
288 &softif_neigh_vid->softif_neigh_list,
289 list) {
290 if (softif_neigh_tmp == curr_neigh)
291 continue;
292
293 /* we got a neighbor but its mac is 'bigger' than ours */
294 if (memcmp(primary_if->net_dev->dev_addr,
295 softif_neigh_tmp->addr, ETH_ALEN) < 0)
296 continue;
297
298 if (!atomic_inc_not_zero(&softif_neigh_tmp->refcount))
299 continue;
300
301 softif_neigh = softif_neigh_tmp;
302 goto unlock;
303 }
304
305unlock:
306 rcu_read_unlock();
307out:
308 softif_neigh_vid_select(bat_priv, softif_neigh, softif_neigh_vid->vid);
309
310 if (primary_if)
311 hardif_free_ref(primary_if);
312 if (softif_neigh)
313 softif_neigh_free_ref(softif_neigh);
314}
315
168int softif_neigh_seq_print_text(struct seq_file *seq, void *offset) 316int softif_neigh_seq_print_text(struct seq_file *seq, void *offset)
169{ 317{
170 struct net_device *net_dev = (struct net_device *)seq->private; 318 struct net_device *net_dev = (struct net_device *)seq->private;
171 struct bat_priv *bat_priv = netdev_priv(net_dev); 319 struct bat_priv *bat_priv = netdev_priv(net_dev);
320 struct softif_neigh_vid *softif_neigh_vid;
172 struct softif_neigh *softif_neigh; 321 struct softif_neigh *softif_neigh;
173 struct hlist_node *node; 322 struct hard_iface *primary_if;
323 struct hlist_node *node, *node_tmp;
324 struct softif_neigh *curr_softif_neigh;
325 int ret = 0, last_seen_secs, last_seen_msecs;
326
327 primary_if = primary_if_get_selected(bat_priv);
328 if (!primary_if) {
329 ret = seq_printf(seq, "BATMAN mesh %s disabled - "
330 "please specify interfaces to enable it\n",
331 net_dev->name);
332 goto out;
333 }
174 334
175 if (!bat_priv->primary_if) { 335 if (primary_if->if_status != IF_ACTIVE) {
176 return seq_printf(seq, "BATMAN mesh %s disabled - " 336 ret = seq_printf(seq, "BATMAN mesh %s "
177 "please specify interfaces to enable it\n", 337 "disabled - primary interface not active\n",
178 net_dev->name); 338 net_dev->name);
339 goto out;
179 } 340 }
180 341
181 seq_printf(seq, "Softif neighbor list (%s)\n", net_dev->name); 342 seq_printf(seq, "Softif neighbor list (%s)\n", net_dev->name);
182 343
183 rcu_read_lock(); 344 rcu_read_lock();
184 hlist_for_each_entry_rcu(softif_neigh, node, 345 hlist_for_each_entry_rcu(softif_neigh_vid, node,
185 &bat_priv->softif_neigh_list, list) 346 &bat_priv->softif_neigh_vids, list) {
186 seq_printf(seq, "%s %pM (vid: %d)\n", 347 seq_printf(seq, " %-15s %s on vid: %d\n",
187 bat_priv->softif_neigh == softif_neigh 348 "Originator", "last-seen", softif_neigh_vid->vid);
188 ? "=>" : " ", softif_neigh->addr, 349
189 softif_neigh->vid); 350 curr_softif_neigh = softif_neigh_get_selected(softif_neigh_vid);
351
352 hlist_for_each_entry_rcu(softif_neigh, node_tmp,
353 &softif_neigh_vid->softif_neigh_list,
354 list) {
355 last_seen_secs = jiffies_to_msecs(jiffies -
356 softif_neigh->last_seen) / 1000;
357 last_seen_msecs = jiffies_to_msecs(jiffies -
358 softif_neigh->last_seen) % 1000;
359 seq_printf(seq, "%s %pM %3i.%03is\n",
360 curr_softif_neigh == softif_neigh
361 ? "=>" : " ", softif_neigh->addr,
362 last_seen_secs, last_seen_msecs);
363 }
364
365 if (curr_softif_neigh)
366 softif_neigh_free_ref(curr_softif_neigh);
367
368 seq_printf(seq, "\n");
369 }
190 rcu_read_unlock(); 370 rcu_read_unlock();
191 371
192 return 0; 372out:
373 if (primary_if)
374 hardif_free_ref(primary_if);
375 return ret;
376}
377
378void softif_neigh_purge(struct bat_priv *bat_priv)
379{
380 struct softif_neigh *softif_neigh, *curr_softif_neigh;
381 struct softif_neigh_vid *softif_neigh_vid;
382 struct hlist_node *node, *node_tmp, *node_tmp2;
383 char do_deselect;
384
385 rcu_read_lock();
386 hlist_for_each_entry_rcu(softif_neigh_vid, node,
387 &bat_priv->softif_neigh_vids, list) {
388 if (!atomic_inc_not_zero(&softif_neigh_vid->refcount))
389 continue;
390
391 curr_softif_neigh = softif_neigh_get_selected(softif_neigh_vid);
392 do_deselect = 0;
393
394 spin_lock_bh(&bat_priv->softif_neigh_lock);
395 hlist_for_each_entry_safe(softif_neigh, node_tmp, node_tmp2,
396 &softif_neigh_vid->softif_neigh_list,
397 list) {
398 if ((!time_after(jiffies, softif_neigh->last_seen +
399 msecs_to_jiffies(SOFTIF_NEIGH_TIMEOUT))) &&
400 (atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE))
401 continue;
402
403 if (curr_softif_neigh == softif_neigh) {
404 bat_dbg(DBG_ROUTES, bat_priv,
405 "Current mesh exit point on vid: %d "
406 "'%pM' vanished.\n",
407 softif_neigh_vid->vid,
408 softif_neigh->addr);
409 do_deselect = 1;
410 }
411
412 hlist_del_rcu(&softif_neigh->list);
413 softif_neigh_free_ref(softif_neigh);
414 }
415 spin_unlock_bh(&bat_priv->softif_neigh_lock);
416
417 /* soft_neigh_vid_deselect() needs to acquire the
418 * softif_neigh_lock */
419 if (do_deselect)
420 softif_neigh_vid_deselect(bat_priv, softif_neigh_vid);
421
422 if (curr_softif_neigh)
423 softif_neigh_free_ref(curr_softif_neigh);
424
425 softif_neigh_vid_free_ref(softif_neigh_vid);
426 }
427 rcu_read_unlock();
428
429 spin_lock_bh(&bat_priv->softif_neigh_vid_lock);
430 hlist_for_each_entry_safe(softif_neigh_vid, node, node_tmp,
431 &bat_priv->softif_neigh_vids, list) {
432 if (!hlist_empty(&softif_neigh_vid->softif_neigh_list))
433 continue;
434
435 hlist_del_rcu(&softif_neigh_vid->list);
436 softif_neigh_vid_free_ref(softif_neigh_vid);
437 }
438 spin_unlock_bh(&bat_priv->softif_neigh_vid_lock);
439
193} 440}
194 441
195static void softif_batman_recv(struct sk_buff *skb, struct net_device *dev, 442static void softif_batman_recv(struct sk_buff *skb, struct net_device *dev,
@@ -198,7 +445,9 @@ static void softif_batman_recv(struct sk_buff *skb, struct net_device *dev,
198 struct bat_priv *bat_priv = netdev_priv(dev); 445 struct bat_priv *bat_priv = netdev_priv(dev);
199 struct ethhdr *ethhdr = (struct ethhdr *)skb->data; 446 struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
200 struct batman_packet *batman_packet; 447 struct batman_packet *batman_packet;
201 struct softif_neigh *softif_neigh, *softif_neigh_tmp; 448 struct softif_neigh *softif_neigh = NULL;
449 struct hard_iface *primary_if = NULL;
450 struct softif_neigh *curr_softif_neigh = NULL;
202 451
203 if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) 452 if (ntohs(ethhdr->h_proto) == ETH_P_8021Q)
204 batman_packet = (struct batman_packet *) 453 batman_packet = (struct batman_packet *)
@@ -207,63 +456,52 @@ static void softif_batman_recv(struct sk_buff *skb, struct net_device *dev,
207 batman_packet = (struct batman_packet *)(skb->data + ETH_HLEN); 456 batman_packet = (struct batman_packet *)(skb->data + ETH_HLEN);
208 457
209 if (batman_packet->version != COMPAT_VERSION) 458 if (batman_packet->version != COMPAT_VERSION)
210 goto err; 459 goto out;
211 460
212 if (batman_packet->packet_type != BAT_PACKET) 461 if (batman_packet->packet_type != BAT_PACKET)
213 goto err; 462 goto out;
214 463
215 if (!(batman_packet->flags & PRIMARIES_FIRST_HOP)) 464 if (!(batman_packet->flags & PRIMARIES_FIRST_HOP))
216 goto err; 465 goto out;
217 466
218 if (is_my_mac(batman_packet->orig)) 467 if (is_my_mac(batman_packet->orig))
219 goto err; 468 goto out;
220 469
221 softif_neigh = softif_neigh_get(bat_priv, batman_packet->orig, vid); 470 softif_neigh = softif_neigh_get(bat_priv, batman_packet->orig, vid);
222
223 if (!softif_neigh) 471 if (!softif_neigh)
224 goto err; 472 goto out;
473
474 curr_softif_neigh = softif_neigh_vid_get_selected(bat_priv, vid);
475 if (curr_softif_neigh == softif_neigh)
476 goto out;
225 477
226 if (bat_priv->softif_neigh == softif_neigh) 478 primary_if = primary_if_get_selected(bat_priv);
479 if (!primary_if)
227 goto out; 480 goto out;
228 481
229 /* we got a neighbor but its mac is 'bigger' than ours */ 482 /* we got a neighbor but its mac is 'bigger' than ours */
230 if (memcmp(bat_priv->primary_if->net_dev->dev_addr, 483 if (memcmp(primary_if->net_dev->dev_addr,
231 softif_neigh->addr, ETH_ALEN) < 0) 484 softif_neigh->addr, ETH_ALEN) < 0)
232 goto out; 485 goto out;
233 486
234 /* switch to new 'smallest neighbor' */
235 if ((bat_priv->softif_neigh) &&
236 (memcmp(softif_neigh->addr, bat_priv->softif_neigh->addr,
237 ETH_ALEN) < 0)) {
238 bat_dbg(DBG_ROUTES, bat_priv,
239 "Changing mesh exit point from %pM (vid: %d) "
240 "to %pM (vid: %d).\n",
241 bat_priv->softif_neigh->addr,
242 bat_priv->softif_neigh->vid,
243 softif_neigh->addr, softif_neigh->vid);
244 softif_neigh_tmp = bat_priv->softif_neigh;
245 bat_priv->softif_neigh = softif_neigh;
246 softif_neigh_free_ref(softif_neigh_tmp);
247 /* we need to hold the additional reference */
248 goto err;
249 }
250
251 /* close own batX device and use softif_neigh as exit node */ 487 /* close own batX device and use softif_neigh as exit node */
252 if ((!bat_priv->softif_neigh) && 488 if (!curr_softif_neigh) {
253 (memcmp(softif_neigh->addr, 489 softif_neigh_vid_select(bat_priv, softif_neigh, vid);
254 bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN) < 0)) { 490 goto out;
255 bat_dbg(DBG_ROUTES, bat_priv,
256 "Setting mesh exit point to %pM (vid: %d).\n",
257 softif_neigh->addr, softif_neigh->vid);
258 bat_priv->softif_neigh = softif_neigh;
259 /* we need to hold the additional reference */
260 goto err;
261 } 491 }
262 492
493 /* switch to new 'smallest neighbor' */
494 if (memcmp(softif_neigh->addr, curr_softif_neigh->addr, ETH_ALEN) < 0)
495 softif_neigh_vid_select(bat_priv, softif_neigh, vid);
496
263out: 497out:
264 softif_neigh_free_ref(softif_neigh);
265err:
266 kfree_skb(skb); 498 kfree_skb(skb);
499 if (softif_neigh)
500 softif_neigh_free_ref(softif_neigh);
501 if (curr_softif_neigh)
502 softif_neigh_free_ref(curr_softif_neigh);
503 if (primary_if)
504 hardif_free_ref(primary_if);
267 return; 505 return;
268} 506}
269 507
@@ -293,11 +531,11 @@ static int interface_set_mac_addr(struct net_device *dev, void *p)
293 if (!is_valid_ether_addr(addr->sa_data)) 531 if (!is_valid_ether_addr(addr->sa_data))
294 return -EADDRNOTAVAIL; 532 return -EADDRNOTAVAIL;
295 533
296 /* only modify hna-table if it has been initialised before */ 534 /* only modify transtable if it has been initialised before */
297 if (atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE) { 535 if (atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE) {
298 hna_local_remove(bat_priv, dev->dev_addr, 536 tt_local_remove(bat_priv, dev->dev_addr,
299 "mac address changed"); 537 "mac address changed");
300 hna_local_add(dev, addr->sa_data); 538 tt_local_add(dev, addr->sa_data);
301 } 539 }
302 540
303 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); 541 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
@@ -319,8 +557,10 @@ int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
319{ 557{
320 struct ethhdr *ethhdr = (struct ethhdr *)skb->data; 558 struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
321 struct bat_priv *bat_priv = netdev_priv(soft_iface); 559 struct bat_priv *bat_priv = netdev_priv(soft_iface);
560 struct hard_iface *primary_if = NULL;
322 struct bcast_packet *bcast_packet; 561 struct bcast_packet *bcast_packet;
323 struct vlan_ethhdr *vhdr; 562 struct vlan_ethhdr *vhdr;
563 struct softif_neigh *curr_softif_neigh = NULL;
324 int data_len = skb->len, ret; 564 int data_len = skb->len, ret;
325 short vid = -1; 565 short vid = -1;
326 bool do_bcast = false; 566 bool do_bcast = false;
@@ -348,11 +588,12 @@ int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
348 * if we have a another chosen mesh exit node in range 588 * if we have a another chosen mesh exit node in range
349 * it will transport the packets to the mesh 589 * it will transport the packets to the mesh
350 */ 590 */
351 if ((bat_priv->softif_neigh) && (bat_priv->softif_neigh->vid == vid)) 591 curr_softif_neigh = softif_neigh_vid_get_selected(bat_priv, vid);
592 if (curr_softif_neigh)
352 goto dropped; 593 goto dropped;
353 594
354 /* TODO: check this for locks */ 595 /* TODO: check this for locks */
355 hna_local_add(soft_iface, ethhdr->h_source); 596 tt_local_add(soft_iface, ethhdr->h_source);
356 597
357 if (is_multicast_ether_addr(ethhdr->h_dest)) { 598 if (is_multicast_ether_addr(ethhdr->h_dest)) {
358 ret = gw_is_target(bat_priv, skb); 599 ret = gw_is_target(bat_priv, skb);
@@ -366,7 +607,8 @@ int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
366 607
367 /* ethernet packet should be broadcasted */ 608 /* ethernet packet should be broadcasted */
368 if (do_bcast) { 609 if (do_bcast) {
369 if (!bat_priv->primary_if) 610 primary_if = primary_if_get_selected(bat_priv);
611 if (!primary_if)
370 goto dropped; 612 goto dropped;
371 613
372 if (my_skb_head_push(skb, sizeof(struct bcast_packet)) < 0) 614 if (my_skb_head_push(skb, sizeof(struct bcast_packet)) < 0)
@@ -382,7 +624,7 @@ int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
382 /* hw address of first interface is the orig mac because only 624 /* hw address of first interface is the orig mac because only
383 * this mac is known throughout the mesh */ 625 * this mac is known throughout the mesh */
384 memcpy(bcast_packet->orig, 626 memcpy(bcast_packet->orig,
385 bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN); 627 primary_if->net_dev->dev_addr, ETH_ALEN);
386 628
387 /* set broadcast sequence number */ 629 /* set broadcast sequence number */
388 bcast_packet->seqno = 630 bcast_packet->seqno =
@@ -410,6 +652,10 @@ dropped:
410dropped_freed: 652dropped_freed:
411 bat_priv->stats.tx_dropped++; 653 bat_priv->stats.tx_dropped++;
412end: 654end:
655 if (curr_softif_neigh)
656 softif_neigh_free_ref(curr_softif_neigh);
657 if (primary_if)
658 hardif_free_ref(primary_if);
413 return NETDEV_TX_OK; 659 return NETDEV_TX_OK;
414} 660}
415 661
@@ -421,6 +667,7 @@ void interface_rx(struct net_device *soft_iface,
421 struct unicast_packet *unicast_packet; 667 struct unicast_packet *unicast_packet;
422 struct ethhdr *ethhdr; 668 struct ethhdr *ethhdr;
423 struct vlan_ethhdr *vhdr; 669 struct vlan_ethhdr *vhdr;
670 struct softif_neigh *curr_softif_neigh = NULL;
424 short vid = -1; 671 short vid = -1;
425 int ret; 672 int ret;
426 673
@@ -450,7 +697,8 @@ void interface_rx(struct net_device *soft_iface,
450 * if we have a another chosen mesh exit node in range 697 * if we have a another chosen mesh exit node in range
451 * it will transport the packets to the non-mesh network 698 * it will transport the packets to the non-mesh network
452 */ 699 */
453 if ((bat_priv->softif_neigh) && (bat_priv->softif_neigh->vid == vid)) { 700 curr_softif_neigh = softif_neigh_vid_get_selected(bat_priv, vid);
701 if (curr_softif_neigh) {
454 skb_push(skb, hdr_size); 702 skb_push(skb, hdr_size);
455 unicast_packet = (struct unicast_packet *)skb->data; 703 unicast_packet = (struct unicast_packet *)skb->data;
456 704
@@ -461,7 +709,7 @@ void interface_rx(struct net_device *soft_iface,
461 skb_reset_mac_header(skb); 709 skb_reset_mac_header(skb);
462 710
463 memcpy(unicast_packet->dest, 711 memcpy(unicast_packet->dest,
464 bat_priv->softif_neigh->addr, ETH_ALEN); 712 curr_softif_neigh->addr, ETH_ALEN);
465 ret = route_unicast_packet(skb, recv_if); 713 ret = route_unicast_packet(skb, recv_if);
466 if (ret == NET_RX_DROP) 714 if (ret == NET_RX_DROP)
467 goto dropped; 715 goto dropped;
@@ -486,11 +734,13 @@ void interface_rx(struct net_device *soft_iface,
486 soft_iface->last_rx = jiffies; 734 soft_iface->last_rx = jiffies;
487 735
488 netif_rx(skb); 736 netif_rx(skb);
489 return; 737 goto out;
490 738
491dropped: 739dropped:
492 kfree_skb(skb); 740 kfree_skb(skb);
493out: 741out:
742 if (curr_softif_neigh)
743 softif_neigh_free_ref(curr_softif_neigh);
494 return; 744 return;
495} 745}
496 746
@@ -524,14 +774,15 @@ static void interface_setup(struct net_device *dev)
524 dev->hard_start_xmit = interface_tx; 774 dev->hard_start_xmit = interface_tx;
525#endif 775#endif
526 dev->destructor = free_netdev; 776 dev->destructor = free_netdev;
777 dev->tx_queue_len = 0;
527 778
528 /** 779 /**
529 * can't call min_mtu, because the needed variables 780 * can't call min_mtu, because the needed variables
530 * have not been initialized yet 781 * have not been initialized yet
531 */ 782 */
532 dev->mtu = ETH_DATA_LEN; 783 dev->mtu = ETH_DATA_LEN;
533 dev->hard_header_len = BAT_HEADER_LEN; /* reserve more space in the 784 /* reserve more space in the skbuff for our header */
534 * skbuff for our header */ 785 dev->hard_header_len = BAT_HEADER_LEN;
535 786
536 /* generate random address */ 787 /* generate random address */
537 random_ether_addr(dev_addr); 788 random_ether_addr(dev_addr);
@@ -556,7 +807,7 @@ struct net_device *softif_create(char *name)
556 goto out; 807 goto out;
557 } 808 }
558 809
559 ret = register_netdev(soft_iface); 810 ret = register_netdevice(soft_iface);
560 if (ret < 0) { 811 if (ret < 0) {
561 pr_err("Unable to register the batman interface '%s': %i\n", 812 pr_err("Unable to register the batman interface '%s': %i\n",
562 name, ret); 813 name, ret);
@@ -580,11 +831,10 @@ struct net_device *softif_create(char *name)
580 831
581 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE); 832 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
582 atomic_set(&bat_priv->bcast_seqno, 1); 833 atomic_set(&bat_priv->bcast_seqno, 1);
583 atomic_set(&bat_priv->hna_local_changed, 0); 834 atomic_set(&bat_priv->tt_local_changed, 0);
584 835
585 bat_priv->primary_if = NULL; 836 bat_priv->primary_if = NULL;
586 bat_priv->num_ifaces = 0; 837 bat_priv->num_ifaces = 0;
587 bat_priv->softif_neigh = NULL;
588 838
589 ret = sysfs_add_meshif(soft_iface); 839 ret = sysfs_add_meshif(soft_iface);
590 if (ret < 0) 840 if (ret < 0)
@@ -640,7 +890,7 @@ static int bat_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
640{ 890{
641 cmd->supported = 0; 891 cmd->supported = 0;
642 cmd->advertising = 0; 892 cmd->advertising = 0;
643 cmd->speed = SPEED_10; 893 ethtool_cmd_speed_set(cmd, SPEED_10);
644 cmd->duplex = DUPLEX_FULL; 894 cmd->duplex = DUPLEX_FULL;
645 cmd->port = PORT_TP; 895 cmd->port = PORT_TP;
646 cmd->phy_address = 0; 896 cmd->phy_address = 0;
@@ -675,12 +925,3 @@ static u32 bat_get_link(struct net_device *dev)
675 return 1; 925 return 1;
676} 926}
677 927
678static u32 bat_get_rx_csum(struct net_device *dev)
679{
680 return 0;
681}
682
683static int bat_set_rx_csum(struct net_device *dev, u32 data)
684{
685 return -EOPNOTSUPP;
686}
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index 8d15b48d1692..7b729660cbfd 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -22,43 +22,44 @@
22#include "main.h" 22#include "main.h"
23#include "translation-table.h" 23#include "translation-table.h"
24#include "soft-interface.h" 24#include "soft-interface.h"
25#include "hard-interface.h"
25#include "hash.h" 26#include "hash.h"
26#include "originator.h" 27#include "originator.h"
27 28
28static void hna_local_purge(struct work_struct *work); 29static void tt_local_purge(struct work_struct *work);
29static void _hna_global_del_orig(struct bat_priv *bat_priv, 30static void _tt_global_del_orig(struct bat_priv *bat_priv,
30 struct hna_global_entry *hna_global_entry, 31 struct tt_global_entry *tt_global_entry,
31 char *message); 32 char *message);
32 33
33/* returns 1 if they are the same mac addr */ 34/* returns 1 if they are the same mac addr */
34static int compare_lhna(struct hlist_node *node, void *data2) 35static int compare_ltt(struct hlist_node *node, void *data2)
35{ 36{
36 void *data1 = container_of(node, struct hna_local_entry, hash_entry); 37 void *data1 = container_of(node, struct tt_local_entry, hash_entry);
37 38
38 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0); 39 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
39} 40}
40 41
41/* returns 1 if they are the same mac addr */ 42/* returns 1 if they are the same mac addr */
42static int compare_ghna(struct hlist_node *node, void *data2) 43static int compare_gtt(struct hlist_node *node, void *data2)
43{ 44{
44 void *data1 = container_of(node, struct hna_global_entry, hash_entry); 45 void *data1 = container_of(node, struct tt_global_entry, hash_entry);
45 46
46 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0); 47 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
47} 48}
48 49
49static void hna_local_start_timer(struct bat_priv *bat_priv) 50static void tt_local_start_timer(struct bat_priv *bat_priv)
50{ 51{
51 INIT_DELAYED_WORK(&bat_priv->hna_work, hna_local_purge); 52 INIT_DELAYED_WORK(&bat_priv->tt_work, tt_local_purge);
52 queue_delayed_work(bat_event_workqueue, &bat_priv->hna_work, 10 * HZ); 53 queue_delayed_work(bat_event_workqueue, &bat_priv->tt_work, 10 * HZ);
53} 54}
54 55
55static struct hna_local_entry *hna_local_hash_find(struct bat_priv *bat_priv, 56static struct tt_local_entry *tt_local_hash_find(struct bat_priv *bat_priv,
56 void *data) 57 void *data)
57{ 58{
58 struct hashtable_t *hash = bat_priv->hna_local_hash; 59 struct hashtable_t *hash = bat_priv->tt_local_hash;
59 struct hlist_head *head; 60 struct hlist_head *head;
60 struct hlist_node *node; 61 struct hlist_node *node;
61 struct hna_local_entry *hna_local_entry, *hna_local_entry_tmp = NULL; 62 struct tt_local_entry *tt_local_entry, *tt_local_entry_tmp = NULL;
62 int index; 63 int index;
63 64
64 if (!hash) 65 if (!hash)
@@ -68,26 +69,26 @@ static struct hna_local_entry *hna_local_hash_find(struct bat_priv *bat_priv,
68 head = &hash->table[index]; 69 head = &hash->table[index];
69 70
70 rcu_read_lock(); 71 rcu_read_lock();
71 hlist_for_each_entry_rcu(hna_local_entry, node, head, hash_entry) { 72 hlist_for_each_entry_rcu(tt_local_entry, node, head, hash_entry) {
72 if (!compare_eth(hna_local_entry, data)) 73 if (!compare_eth(tt_local_entry, data))
73 continue; 74 continue;
74 75
75 hna_local_entry_tmp = hna_local_entry; 76 tt_local_entry_tmp = tt_local_entry;
76 break; 77 break;
77 } 78 }
78 rcu_read_unlock(); 79 rcu_read_unlock();
79 80
80 return hna_local_entry_tmp; 81 return tt_local_entry_tmp;
81} 82}
82 83
83static struct hna_global_entry *hna_global_hash_find(struct bat_priv *bat_priv, 84static struct tt_global_entry *tt_global_hash_find(struct bat_priv *bat_priv,
84 void *data) 85 void *data)
85{ 86{
86 struct hashtable_t *hash = bat_priv->hna_global_hash; 87 struct hashtable_t *hash = bat_priv->tt_global_hash;
87 struct hlist_head *head; 88 struct hlist_head *head;
88 struct hlist_node *node; 89 struct hlist_node *node;
89 struct hna_global_entry *hna_global_entry; 90 struct tt_global_entry *tt_global_entry;
90 struct hna_global_entry *hna_global_entry_tmp = NULL; 91 struct tt_global_entry *tt_global_entry_tmp = NULL;
91 int index; 92 int index;
92 93
93 if (!hash) 94 if (!hash)
@@ -97,125 +98,125 @@ static struct hna_global_entry *hna_global_hash_find(struct bat_priv *bat_priv,
97 head = &hash->table[index]; 98 head = &hash->table[index];
98 99
99 rcu_read_lock(); 100 rcu_read_lock();
100 hlist_for_each_entry_rcu(hna_global_entry, node, head, hash_entry) { 101 hlist_for_each_entry_rcu(tt_global_entry, node, head, hash_entry) {
101 if (!compare_eth(hna_global_entry, data)) 102 if (!compare_eth(tt_global_entry, data))
102 continue; 103 continue;
103 104
104 hna_global_entry_tmp = hna_global_entry; 105 tt_global_entry_tmp = tt_global_entry;
105 break; 106 break;
106 } 107 }
107 rcu_read_unlock(); 108 rcu_read_unlock();
108 109
109 return hna_global_entry_tmp; 110 return tt_global_entry_tmp;
110} 111}
111 112
112int hna_local_init(struct bat_priv *bat_priv) 113int tt_local_init(struct bat_priv *bat_priv)
113{ 114{
114 if (bat_priv->hna_local_hash) 115 if (bat_priv->tt_local_hash)
115 return 1; 116 return 1;
116 117
117 bat_priv->hna_local_hash = hash_new(1024); 118 bat_priv->tt_local_hash = hash_new(1024);
118 119
119 if (!bat_priv->hna_local_hash) 120 if (!bat_priv->tt_local_hash)
120 return 0; 121 return 0;
121 122
122 atomic_set(&bat_priv->hna_local_changed, 0); 123 atomic_set(&bat_priv->tt_local_changed, 0);
123 hna_local_start_timer(bat_priv); 124 tt_local_start_timer(bat_priv);
124 125
125 return 1; 126 return 1;
126} 127}
127 128
128void hna_local_add(struct net_device *soft_iface, uint8_t *addr) 129void tt_local_add(struct net_device *soft_iface, uint8_t *addr)
129{ 130{
130 struct bat_priv *bat_priv = netdev_priv(soft_iface); 131 struct bat_priv *bat_priv = netdev_priv(soft_iface);
131 struct hna_local_entry *hna_local_entry; 132 struct tt_local_entry *tt_local_entry;
132 struct hna_global_entry *hna_global_entry; 133 struct tt_global_entry *tt_global_entry;
133 int required_bytes; 134 int required_bytes;
134 135
135 spin_lock_bh(&bat_priv->hna_lhash_lock); 136 spin_lock_bh(&bat_priv->tt_lhash_lock);
136 hna_local_entry = hna_local_hash_find(bat_priv, addr); 137 tt_local_entry = tt_local_hash_find(bat_priv, addr);
137 spin_unlock_bh(&bat_priv->hna_lhash_lock); 138 spin_unlock_bh(&bat_priv->tt_lhash_lock);
138 139
139 if (hna_local_entry) { 140 if (tt_local_entry) {
140 hna_local_entry->last_seen = jiffies; 141 tt_local_entry->last_seen = jiffies;
141 return; 142 return;
142 } 143 }
143 144
144 /* only announce as many hosts as possible in the batman-packet and 145 /* only announce as many hosts as possible in the batman-packet and
145 space in batman_packet->num_hna That also should give a limit to 146 space in batman_packet->num_tt That also should give a limit to
146 MAC-flooding. */ 147 MAC-flooding. */
147 required_bytes = (bat_priv->num_local_hna + 1) * ETH_ALEN; 148 required_bytes = (bat_priv->num_local_tt + 1) * ETH_ALEN;
148 required_bytes += BAT_PACKET_LEN; 149 required_bytes += BAT_PACKET_LEN;
149 150
150 if ((required_bytes > ETH_DATA_LEN) || 151 if ((required_bytes > ETH_DATA_LEN) ||
151 (atomic_read(&bat_priv->aggregated_ogms) && 152 (atomic_read(&bat_priv->aggregated_ogms) &&
152 required_bytes > MAX_AGGREGATION_BYTES) || 153 required_bytes > MAX_AGGREGATION_BYTES) ||
153 (bat_priv->num_local_hna + 1 > 255)) { 154 (bat_priv->num_local_tt + 1 > 255)) {
154 bat_dbg(DBG_ROUTES, bat_priv, 155 bat_dbg(DBG_ROUTES, bat_priv,
155 "Can't add new local hna entry (%pM): " 156 "Can't add new local tt entry (%pM): "
156 "number of local hna entries exceeds packet size\n", 157 "number of local tt entries exceeds packet size\n",
157 addr); 158 addr);
158 return; 159 return;
159 } 160 }
160 161
161 bat_dbg(DBG_ROUTES, bat_priv, 162 bat_dbg(DBG_ROUTES, bat_priv,
162 "Creating new local hna entry: %pM\n", addr); 163 "Creating new local tt entry: %pM\n", addr);
163 164
164 hna_local_entry = kmalloc(sizeof(struct hna_local_entry), GFP_ATOMIC); 165 tt_local_entry = kmalloc(sizeof(struct tt_local_entry), GFP_ATOMIC);
165 if (!hna_local_entry) 166 if (!tt_local_entry)
166 return; 167 return;
167 168
168 memcpy(hna_local_entry->addr, addr, ETH_ALEN); 169 memcpy(tt_local_entry->addr, addr, ETH_ALEN);
169 hna_local_entry->last_seen = jiffies; 170 tt_local_entry->last_seen = jiffies;
170 171
171 /* the batman interface mac address should never be purged */ 172 /* the batman interface mac address should never be purged */
172 if (compare_eth(addr, soft_iface->dev_addr)) 173 if (compare_eth(addr, soft_iface->dev_addr))
173 hna_local_entry->never_purge = 1; 174 tt_local_entry->never_purge = 1;
174 else 175 else
175 hna_local_entry->never_purge = 0; 176 tt_local_entry->never_purge = 0;
176 177
177 spin_lock_bh(&bat_priv->hna_lhash_lock); 178 spin_lock_bh(&bat_priv->tt_lhash_lock);
178 179
179 hash_add(bat_priv->hna_local_hash, compare_lhna, choose_orig, 180 hash_add(bat_priv->tt_local_hash, compare_ltt, choose_orig,
180 hna_local_entry, &hna_local_entry->hash_entry); 181 tt_local_entry, &tt_local_entry->hash_entry);
181 bat_priv->num_local_hna++; 182 bat_priv->num_local_tt++;
182 atomic_set(&bat_priv->hna_local_changed, 1); 183 atomic_set(&bat_priv->tt_local_changed, 1);
183 184
184 spin_unlock_bh(&bat_priv->hna_lhash_lock); 185 spin_unlock_bh(&bat_priv->tt_lhash_lock);
185 186
186 /* remove address from global hash if present */ 187 /* remove address from global hash if present */
187 spin_lock_bh(&bat_priv->hna_ghash_lock); 188 spin_lock_bh(&bat_priv->tt_ghash_lock);
188 189
189 hna_global_entry = hna_global_hash_find(bat_priv, addr); 190 tt_global_entry = tt_global_hash_find(bat_priv, addr);
190 191
191 if (hna_global_entry) 192 if (tt_global_entry)
192 _hna_global_del_orig(bat_priv, hna_global_entry, 193 _tt_global_del_orig(bat_priv, tt_global_entry,
193 "local hna received"); 194 "local tt received");
194 195
195 spin_unlock_bh(&bat_priv->hna_ghash_lock); 196 spin_unlock_bh(&bat_priv->tt_ghash_lock);
196} 197}
197 198
198int hna_local_fill_buffer(struct bat_priv *bat_priv, 199int tt_local_fill_buffer(struct bat_priv *bat_priv,
199 unsigned char *buff, int buff_len) 200 unsigned char *buff, int buff_len)
200{ 201{
201 struct hashtable_t *hash = bat_priv->hna_local_hash; 202 struct hashtable_t *hash = bat_priv->tt_local_hash;
202 struct hna_local_entry *hna_local_entry; 203 struct tt_local_entry *tt_local_entry;
203 struct hlist_node *node; 204 struct hlist_node *node;
204 struct hlist_head *head; 205 struct hlist_head *head;
205 int i, count = 0; 206 int i, count = 0;
206 207
207 spin_lock_bh(&bat_priv->hna_lhash_lock); 208 spin_lock_bh(&bat_priv->tt_lhash_lock);
208 209
209 for (i = 0; i < hash->size; i++) { 210 for (i = 0; i < hash->size; i++) {
210 head = &hash->table[i]; 211 head = &hash->table[i];
211 212
212 rcu_read_lock(); 213 rcu_read_lock();
213 hlist_for_each_entry_rcu(hna_local_entry, node, 214 hlist_for_each_entry_rcu(tt_local_entry, node,
214 head, hash_entry) { 215 head, hash_entry) {
215 if (buff_len < (count + 1) * ETH_ALEN) 216 if (buff_len < (count + 1) * ETH_ALEN)
216 break; 217 break;
217 218
218 memcpy(buff + (count * ETH_ALEN), hna_local_entry->addr, 219 memcpy(buff + (count * ETH_ALEN), tt_local_entry->addr,
219 ETH_ALEN); 220 ETH_ALEN);
220 221
221 count++; 222 count++;
@@ -223,37 +224,47 @@ int hna_local_fill_buffer(struct bat_priv *bat_priv,
223 rcu_read_unlock(); 224 rcu_read_unlock();
224 } 225 }
225 226
226 /* if we did not get all new local hnas see you next time ;-) */ 227 /* if we did not get all new local tts see you next time ;-) */
227 if (count == bat_priv->num_local_hna) 228 if (count == bat_priv->num_local_tt)
228 atomic_set(&bat_priv->hna_local_changed, 0); 229 atomic_set(&bat_priv->tt_local_changed, 0);
229 230
230 spin_unlock_bh(&bat_priv->hna_lhash_lock); 231 spin_unlock_bh(&bat_priv->tt_lhash_lock);
231 return count; 232 return count;
232} 233}
233 234
234int hna_local_seq_print_text(struct seq_file *seq, void *offset) 235int tt_local_seq_print_text(struct seq_file *seq, void *offset)
235{ 236{
236 struct net_device *net_dev = (struct net_device *)seq->private; 237 struct net_device *net_dev = (struct net_device *)seq->private;
237 struct bat_priv *bat_priv = netdev_priv(net_dev); 238 struct bat_priv *bat_priv = netdev_priv(net_dev);
238 struct hashtable_t *hash = bat_priv->hna_local_hash; 239 struct hashtable_t *hash = bat_priv->tt_local_hash;
239 struct hna_local_entry *hna_local_entry; 240 struct tt_local_entry *tt_local_entry;
241 struct hard_iface *primary_if;
240 struct hlist_node *node; 242 struct hlist_node *node;
241 struct hlist_head *head; 243 struct hlist_head *head;
242 size_t buf_size, pos; 244 size_t buf_size, pos;
243 char *buff; 245 char *buff;
244 int i; 246 int i, ret = 0;
245 247
246 if (!bat_priv->primary_if) { 248 primary_if = primary_if_get_selected(bat_priv);
247 return seq_printf(seq, "BATMAN mesh %s disabled - " 249 if (!primary_if) {
248 "please specify interfaces to enable it\n", 250 ret = seq_printf(seq, "BATMAN mesh %s disabled - "
249 net_dev->name); 251 "please specify interfaces to enable it\n",
252 net_dev->name);
253 goto out;
254 }
255
256 if (primary_if->if_status != IF_ACTIVE) {
257 ret = seq_printf(seq, "BATMAN mesh %s disabled - "
258 "primary interface not active\n",
259 net_dev->name);
260 goto out;
250 } 261 }
251 262
252 seq_printf(seq, "Locally retrieved addresses (from %s) " 263 seq_printf(seq, "Locally retrieved addresses (from %s) "
253 "announced via HNA:\n", 264 "announced via TT:\n",
254 net_dev->name); 265 net_dev->name);
255 266
256 spin_lock_bh(&bat_priv->hna_lhash_lock); 267 spin_lock_bh(&bat_priv->tt_lhash_lock);
257 268
258 buf_size = 1; 269 buf_size = 1;
259 /* Estimate length for: " * xx:xx:xx:xx:xx:xx\n" */ 270 /* Estimate length for: " * xx:xx:xx:xx:xx:xx\n" */
@@ -268,8 +279,9 @@ int hna_local_seq_print_text(struct seq_file *seq, void *offset)
268 279
269 buff = kmalloc(buf_size, GFP_ATOMIC); 280 buff = kmalloc(buf_size, GFP_ATOMIC);
270 if (!buff) { 281 if (!buff) {
271 spin_unlock_bh(&bat_priv->hna_lhash_lock); 282 spin_unlock_bh(&bat_priv->tt_lhash_lock);
272 return -ENOMEM; 283 ret = -ENOMEM;
284 goto out;
273 } 285 }
274 286
275 buff[0] = '\0'; 287 buff[0] = '\0';
@@ -279,211 +291,225 @@ int hna_local_seq_print_text(struct seq_file *seq, void *offset)
279 head = &hash->table[i]; 291 head = &hash->table[i];
280 292
281 rcu_read_lock(); 293 rcu_read_lock();
282 hlist_for_each_entry_rcu(hna_local_entry, node, 294 hlist_for_each_entry_rcu(tt_local_entry, node,
283 head, hash_entry) { 295 head, hash_entry) {
284 pos += snprintf(buff + pos, 22, " * %pM\n", 296 pos += snprintf(buff + pos, 22, " * %pM\n",
285 hna_local_entry->addr); 297 tt_local_entry->addr);
286 } 298 }
287 rcu_read_unlock(); 299 rcu_read_unlock();
288 } 300 }
289 301
290 spin_unlock_bh(&bat_priv->hna_lhash_lock); 302 spin_unlock_bh(&bat_priv->tt_lhash_lock);
291 303
292 seq_printf(seq, "%s", buff); 304 seq_printf(seq, "%s", buff);
293 kfree(buff); 305 kfree(buff);
294 return 0; 306out:
307 if (primary_if)
308 hardif_free_ref(primary_if);
309 return ret;
295} 310}
296 311
297static void _hna_local_del(struct hlist_node *node, void *arg) 312static void _tt_local_del(struct hlist_node *node, void *arg)
298{ 313{
299 struct bat_priv *bat_priv = (struct bat_priv *)arg; 314 struct bat_priv *bat_priv = (struct bat_priv *)arg;
300 void *data = container_of(node, struct hna_local_entry, hash_entry); 315 void *data = container_of(node, struct tt_local_entry, hash_entry);
301 316
302 kfree(data); 317 kfree(data);
303 bat_priv->num_local_hna--; 318 bat_priv->num_local_tt--;
304 atomic_set(&bat_priv->hna_local_changed, 1); 319 atomic_set(&bat_priv->tt_local_changed, 1);
305} 320}
306 321
307static void hna_local_del(struct bat_priv *bat_priv, 322static void tt_local_del(struct bat_priv *bat_priv,
308 struct hna_local_entry *hna_local_entry, 323 struct tt_local_entry *tt_local_entry,
309 char *message) 324 char *message)
310{ 325{
311 bat_dbg(DBG_ROUTES, bat_priv, "Deleting local hna entry (%pM): %s\n", 326 bat_dbg(DBG_ROUTES, bat_priv, "Deleting local tt entry (%pM): %s\n",
312 hna_local_entry->addr, message); 327 tt_local_entry->addr, message);
313 328
314 hash_remove(bat_priv->hna_local_hash, compare_lhna, choose_orig, 329 hash_remove(bat_priv->tt_local_hash, compare_ltt, choose_orig,
315 hna_local_entry->addr); 330 tt_local_entry->addr);
316 _hna_local_del(&hna_local_entry->hash_entry, bat_priv); 331 _tt_local_del(&tt_local_entry->hash_entry, bat_priv);
317} 332}
318 333
319void hna_local_remove(struct bat_priv *bat_priv, 334void tt_local_remove(struct bat_priv *bat_priv,
320 uint8_t *addr, char *message) 335 uint8_t *addr, char *message)
321{ 336{
322 struct hna_local_entry *hna_local_entry; 337 struct tt_local_entry *tt_local_entry;
323 338
324 spin_lock_bh(&bat_priv->hna_lhash_lock); 339 spin_lock_bh(&bat_priv->tt_lhash_lock);
325 340
326 hna_local_entry = hna_local_hash_find(bat_priv, addr); 341 tt_local_entry = tt_local_hash_find(bat_priv, addr);
327 342
328 if (hna_local_entry) 343 if (tt_local_entry)
329 hna_local_del(bat_priv, hna_local_entry, message); 344 tt_local_del(bat_priv, tt_local_entry, message);
330 345
331 spin_unlock_bh(&bat_priv->hna_lhash_lock); 346 spin_unlock_bh(&bat_priv->tt_lhash_lock);
332} 347}
333 348
334static void hna_local_purge(struct work_struct *work) 349static void tt_local_purge(struct work_struct *work)
335{ 350{
336 struct delayed_work *delayed_work = 351 struct delayed_work *delayed_work =
337 container_of(work, struct delayed_work, work); 352 container_of(work, struct delayed_work, work);
338 struct bat_priv *bat_priv = 353 struct bat_priv *bat_priv =
339 container_of(delayed_work, struct bat_priv, hna_work); 354 container_of(delayed_work, struct bat_priv, tt_work);
340 struct hashtable_t *hash = bat_priv->hna_local_hash; 355 struct hashtable_t *hash = bat_priv->tt_local_hash;
341 struct hna_local_entry *hna_local_entry; 356 struct tt_local_entry *tt_local_entry;
342 struct hlist_node *node, *node_tmp; 357 struct hlist_node *node, *node_tmp;
343 struct hlist_head *head; 358 struct hlist_head *head;
344 unsigned long timeout; 359 unsigned long timeout;
345 int i; 360 int i;
346 361
347 spin_lock_bh(&bat_priv->hna_lhash_lock); 362 spin_lock_bh(&bat_priv->tt_lhash_lock);
348 363
349 for (i = 0; i < hash->size; i++) { 364 for (i = 0; i < hash->size; i++) {
350 head = &hash->table[i]; 365 head = &hash->table[i];
351 366
352 hlist_for_each_entry_safe(hna_local_entry, node, node_tmp, 367 hlist_for_each_entry_safe(tt_local_entry, node, node_tmp,
353 head, hash_entry) { 368 head, hash_entry) {
354 if (hna_local_entry->never_purge) 369 if (tt_local_entry->never_purge)
355 continue; 370 continue;
356 371
357 timeout = hna_local_entry->last_seen; 372 timeout = tt_local_entry->last_seen;
358 timeout += LOCAL_HNA_TIMEOUT * HZ; 373 timeout += TT_LOCAL_TIMEOUT * HZ;
359 374
360 if (time_before(jiffies, timeout)) 375 if (time_before(jiffies, timeout))
361 continue; 376 continue;
362 377
363 hna_local_del(bat_priv, hna_local_entry, 378 tt_local_del(bat_priv, tt_local_entry,
364 "address timed out"); 379 "address timed out");
365 } 380 }
366 } 381 }
367 382
368 spin_unlock_bh(&bat_priv->hna_lhash_lock); 383 spin_unlock_bh(&bat_priv->tt_lhash_lock);
369 hna_local_start_timer(bat_priv); 384 tt_local_start_timer(bat_priv);
370} 385}
371 386
372void hna_local_free(struct bat_priv *bat_priv) 387void tt_local_free(struct bat_priv *bat_priv)
373{ 388{
374 if (!bat_priv->hna_local_hash) 389 if (!bat_priv->tt_local_hash)
375 return; 390 return;
376 391
377 cancel_delayed_work_sync(&bat_priv->hna_work); 392 cancel_delayed_work_sync(&bat_priv->tt_work);
378 hash_delete(bat_priv->hna_local_hash, _hna_local_del, bat_priv); 393 hash_delete(bat_priv->tt_local_hash, _tt_local_del, bat_priv);
379 bat_priv->hna_local_hash = NULL; 394 bat_priv->tt_local_hash = NULL;
380} 395}
381 396
382int hna_global_init(struct bat_priv *bat_priv) 397int tt_global_init(struct bat_priv *bat_priv)
383{ 398{
384 if (bat_priv->hna_global_hash) 399 if (bat_priv->tt_global_hash)
385 return 1; 400 return 1;
386 401
387 bat_priv->hna_global_hash = hash_new(1024); 402 bat_priv->tt_global_hash = hash_new(1024);
388 403
389 if (!bat_priv->hna_global_hash) 404 if (!bat_priv->tt_global_hash)
390 return 0; 405 return 0;
391 406
392 return 1; 407 return 1;
393} 408}
394 409
395void hna_global_add_orig(struct bat_priv *bat_priv, 410void tt_global_add_orig(struct bat_priv *bat_priv,
396 struct orig_node *orig_node, 411 struct orig_node *orig_node,
397 unsigned char *hna_buff, int hna_buff_len) 412 unsigned char *tt_buff, int tt_buff_len)
398{ 413{
399 struct hna_global_entry *hna_global_entry; 414 struct tt_global_entry *tt_global_entry;
400 struct hna_local_entry *hna_local_entry; 415 struct tt_local_entry *tt_local_entry;
401 int hna_buff_count = 0; 416 int tt_buff_count = 0;
402 unsigned char *hna_ptr; 417 unsigned char *tt_ptr;
403 418
404 while ((hna_buff_count + 1) * ETH_ALEN <= hna_buff_len) { 419 while ((tt_buff_count + 1) * ETH_ALEN <= tt_buff_len) {
405 spin_lock_bh(&bat_priv->hna_ghash_lock); 420 spin_lock_bh(&bat_priv->tt_ghash_lock);
406 421
407 hna_ptr = hna_buff + (hna_buff_count * ETH_ALEN); 422 tt_ptr = tt_buff + (tt_buff_count * ETH_ALEN);
408 hna_global_entry = hna_global_hash_find(bat_priv, hna_ptr); 423 tt_global_entry = tt_global_hash_find(bat_priv, tt_ptr);
409 424
410 if (!hna_global_entry) { 425 if (!tt_global_entry) {
411 spin_unlock_bh(&bat_priv->hna_ghash_lock); 426 spin_unlock_bh(&bat_priv->tt_ghash_lock);
412 427
413 hna_global_entry = 428 tt_global_entry =
414 kmalloc(sizeof(struct hna_global_entry), 429 kmalloc(sizeof(struct tt_global_entry),
415 GFP_ATOMIC); 430 GFP_ATOMIC);
416 431
417 if (!hna_global_entry) 432 if (!tt_global_entry)
418 break; 433 break;
419 434
420 memcpy(hna_global_entry->addr, hna_ptr, ETH_ALEN); 435 memcpy(tt_global_entry->addr, tt_ptr, ETH_ALEN);
421 436
422 bat_dbg(DBG_ROUTES, bat_priv, 437 bat_dbg(DBG_ROUTES, bat_priv,
423 "Creating new global hna entry: " 438 "Creating new global tt entry: "
424 "%pM (via %pM)\n", 439 "%pM (via %pM)\n",
425 hna_global_entry->addr, orig_node->orig); 440 tt_global_entry->addr, orig_node->orig);
426 441
427 spin_lock_bh(&bat_priv->hna_ghash_lock); 442 spin_lock_bh(&bat_priv->tt_ghash_lock);
428 hash_add(bat_priv->hna_global_hash, compare_ghna, 443 hash_add(bat_priv->tt_global_hash, compare_gtt,
429 choose_orig, hna_global_entry, 444 choose_orig, tt_global_entry,
430 &hna_global_entry->hash_entry); 445 &tt_global_entry->hash_entry);
431 446
432 } 447 }
433 448
434 hna_global_entry->orig_node = orig_node; 449 tt_global_entry->orig_node = orig_node;
435 spin_unlock_bh(&bat_priv->hna_ghash_lock); 450 spin_unlock_bh(&bat_priv->tt_ghash_lock);
436 451
437 /* remove address from local hash if present */ 452 /* remove address from local hash if present */
438 spin_lock_bh(&bat_priv->hna_lhash_lock); 453 spin_lock_bh(&bat_priv->tt_lhash_lock);
439 454
440 hna_ptr = hna_buff + (hna_buff_count * ETH_ALEN); 455 tt_ptr = tt_buff + (tt_buff_count * ETH_ALEN);
441 hna_local_entry = hna_local_hash_find(bat_priv, hna_ptr); 456 tt_local_entry = tt_local_hash_find(bat_priv, tt_ptr);
442 457
443 if (hna_local_entry) 458 if (tt_local_entry)
444 hna_local_del(bat_priv, hna_local_entry, 459 tt_local_del(bat_priv, tt_local_entry,
445 "global hna received"); 460 "global tt received");
446 461
447 spin_unlock_bh(&bat_priv->hna_lhash_lock); 462 spin_unlock_bh(&bat_priv->tt_lhash_lock);
448 463
449 hna_buff_count++; 464 tt_buff_count++;
450 } 465 }
451 466
452 /* initialize, and overwrite if malloc succeeds */ 467 /* initialize, and overwrite if malloc succeeds */
453 orig_node->hna_buff = NULL; 468 orig_node->tt_buff = NULL;
454 orig_node->hna_buff_len = 0; 469 orig_node->tt_buff_len = 0;
455 470
456 if (hna_buff_len > 0) { 471 if (tt_buff_len > 0) {
457 orig_node->hna_buff = kmalloc(hna_buff_len, GFP_ATOMIC); 472 orig_node->tt_buff = kmalloc(tt_buff_len, GFP_ATOMIC);
458 if (orig_node->hna_buff) { 473 if (orig_node->tt_buff) {
459 memcpy(orig_node->hna_buff, hna_buff, hna_buff_len); 474 memcpy(orig_node->tt_buff, tt_buff, tt_buff_len);
460 orig_node->hna_buff_len = hna_buff_len; 475 orig_node->tt_buff_len = tt_buff_len;
461 } 476 }
462 } 477 }
463} 478}
464 479
465int hna_global_seq_print_text(struct seq_file *seq, void *offset) 480int tt_global_seq_print_text(struct seq_file *seq, void *offset)
466{ 481{
467 struct net_device *net_dev = (struct net_device *)seq->private; 482 struct net_device *net_dev = (struct net_device *)seq->private;
468 struct bat_priv *bat_priv = netdev_priv(net_dev); 483 struct bat_priv *bat_priv = netdev_priv(net_dev);
469 struct hashtable_t *hash = bat_priv->hna_global_hash; 484 struct hashtable_t *hash = bat_priv->tt_global_hash;
470 struct hna_global_entry *hna_global_entry; 485 struct tt_global_entry *tt_global_entry;
486 struct hard_iface *primary_if;
471 struct hlist_node *node; 487 struct hlist_node *node;
472 struct hlist_head *head; 488 struct hlist_head *head;
473 size_t buf_size, pos; 489 size_t buf_size, pos;
474 char *buff; 490 char *buff;
475 int i; 491 int i, ret = 0;
476 492
477 if (!bat_priv->primary_if) { 493 primary_if = primary_if_get_selected(bat_priv);
478 return seq_printf(seq, "BATMAN mesh %s disabled - " 494 if (!primary_if) {
479 "please specify interfaces to enable it\n", 495 ret = seq_printf(seq, "BATMAN mesh %s disabled - please "
480 net_dev->name); 496 "specify interfaces to enable it\n",
497 net_dev->name);
498 goto out;
481 } 499 }
482 500
483 seq_printf(seq, "Globally announced HNAs received via the mesh %s\n", 501 if (primary_if->if_status != IF_ACTIVE) {
502 ret = seq_printf(seq, "BATMAN mesh %s disabled - "
503 "primary interface not active\n",
504 net_dev->name);
505 goto out;
506 }
507
508 seq_printf(seq,
509 "Globally announced TT entries received via the mesh %s\n",
484 net_dev->name); 510 net_dev->name);
485 511
486 spin_lock_bh(&bat_priv->hna_ghash_lock); 512 spin_lock_bh(&bat_priv->tt_ghash_lock);
487 513
488 buf_size = 1; 514 buf_size = 1;
489 /* Estimate length for: " * xx:xx:xx:xx:xx:xx via xx:xx:xx:xx:xx:xx\n"*/ 515 /* Estimate length for: " * xx:xx:xx:xx:xx:xx via xx:xx:xx:xx:xx:xx\n"*/
@@ -498,8 +524,9 @@ int hna_global_seq_print_text(struct seq_file *seq, void *offset)
498 524
499 buff = kmalloc(buf_size, GFP_ATOMIC); 525 buff = kmalloc(buf_size, GFP_ATOMIC);
500 if (!buff) { 526 if (!buff) {
501 spin_unlock_bh(&bat_priv->hna_ghash_lock); 527 spin_unlock_bh(&bat_priv->tt_ghash_lock);
502 return -ENOMEM; 528 ret = -ENOMEM;
529 goto out;
503 } 530 }
504 buff[0] = '\0'; 531 buff[0] = '\0';
505 pos = 0; 532 pos = 0;
@@ -508,101 +535,104 @@ int hna_global_seq_print_text(struct seq_file *seq, void *offset)
508 head = &hash->table[i]; 535 head = &hash->table[i];
509 536
510 rcu_read_lock(); 537 rcu_read_lock();
511 hlist_for_each_entry_rcu(hna_global_entry, node, 538 hlist_for_each_entry_rcu(tt_global_entry, node,
512 head, hash_entry) { 539 head, hash_entry) {
513 pos += snprintf(buff + pos, 44, 540 pos += snprintf(buff + pos, 44,
514 " * %pM via %pM\n", 541 " * %pM via %pM\n",
515 hna_global_entry->addr, 542 tt_global_entry->addr,
516 hna_global_entry->orig_node->orig); 543 tt_global_entry->orig_node->orig);
517 } 544 }
518 rcu_read_unlock(); 545 rcu_read_unlock();
519 } 546 }
520 547
521 spin_unlock_bh(&bat_priv->hna_ghash_lock); 548 spin_unlock_bh(&bat_priv->tt_ghash_lock);
522 549
523 seq_printf(seq, "%s", buff); 550 seq_printf(seq, "%s", buff);
524 kfree(buff); 551 kfree(buff);
525 return 0; 552out:
553 if (primary_if)
554 hardif_free_ref(primary_if);
555 return ret;
526} 556}
527 557
528static void _hna_global_del_orig(struct bat_priv *bat_priv, 558static void _tt_global_del_orig(struct bat_priv *bat_priv,
529 struct hna_global_entry *hna_global_entry, 559 struct tt_global_entry *tt_global_entry,
530 char *message) 560 char *message)
531{ 561{
532 bat_dbg(DBG_ROUTES, bat_priv, 562 bat_dbg(DBG_ROUTES, bat_priv,
533 "Deleting global hna entry %pM (via %pM): %s\n", 563 "Deleting global tt entry %pM (via %pM): %s\n",
534 hna_global_entry->addr, hna_global_entry->orig_node->orig, 564 tt_global_entry->addr, tt_global_entry->orig_node->orig,
535 message); 565 message);
536 566
537 hash_remove(bat_priv->hna_global_hash, compare_ghna, choose_orig, 567 hash_remove(bat_priv->tt_global_hash, compare_gtt, choose_orig,
538 hna_global_entry->addr); 568 tt_global_entry->addr);
539 kfree(hna_global_entry); 569 kfree(tt_global_entry);
540} 570}
541 571
542void hna_global_del_orig(struct bat_priv *bat_priv, 572void tt_global_del_orig(struct bat_priv *bat_priv,
543 struct orig_node *orig_node, char *message) 573 struct orig_node *orig_node, char *message)
544{ 574{
545 struct hna_global_entry *hna_global_entry; 575 struct tt_global_entry *tt_global_entry;
546 int hna_buff_count = 0; 576 int tt_buff_count = 0;
547 unsigned char *hna_ptr; 577 unsigned char *tt_ptr;
548 578
549 if (orig_node->hna_buff_len == 0) 579 if (orig_node->tt_buff_len == 0)
550 return; 580 return;
551 581
552 spin_lock_bh(&bat_priv->hna_ghash_lock); 582 spin_lock_bh(&bat_priv->tt_ghash_lock);
553 583
554 while ((hna_buff_count + 1) * ETH_ALEN <= orig_node->hna_buff_len) { 584 while ((tt_buff_count + 1) * ETH_ALEN <= orig_node->tt_buff_len) {
555 hna_ptr = orig_node->hna_buff + (hna_buff_count * ETH_ALEN); 585 tt_ptr = orig_node->tt_buff + (tt_buff_count * ETH_ALEN);
556 hna_global_entry = hna_global_hash_find(bat_priv, hna_ptr); 586 tt_global_entry = tt_global_hash_find(bat_priv, tt_ptr);
557 587
558 if ((hna_global_entry) && 588 if ((tt_global_entry) &&
559 (hna_global_entry->orig_node == orig_node)) 589 (tt_global_entry->orig_node == orig_node))
560 _hna_global_del_orig(bat_priv, hna_global_entry, 590 _tt_global_del_orig(bat_priv, tt_global_entry,
561 message); 591 message);
562 592
563 hna_buff_count++; 593 tt_buff_count++;
564 } 594 }
565 595
566 spin_unlock_bh(&bat_priv->hna_ghash_lock); 596 spin_unlock_bh(&bat_priv->tt_ghash_lock);
567 597
568 orig_node->hna_buff_len = 0; 598 orig_node->tt_buff_len = 0;
569 kfree(orig_node->hna_buff); 599 kfree(orig_node->tt_buff);
570 orig_node->hna_buff = NULL; 600 orig_node->tt_buff = NULL;
571} 601}
572 602
573static void hna_global_del(struct hlist_node *node, void *arg) 603static void tt_global_del(struct hlist_node *node, void *arg)
574{ 604{
575 void *data = container_of(node, struct hna_global_entry, hash_entry); 605 void *data = container_of(node, struct tt_global_entry, hash_entry);
576 606
577 kfree(data); 607 kfree(data);
578} 608}
579 609
580void hna_global_free(struct bat_priv *bat_priv) 610void tt_global_free(struct bat_priv *bat_priv)
581{ 611{
582 if (!bat_priv->hna_global_hash) 612 if (!bat_priv->tt_global_hash)
583 return; 613 return;
584 614
585 hash_delete(bat_priv->hna_global_hash, hna_global_del, NULL); 615 hash_delete(bat_priv->tt_global_hash, tt_global_del, NULL);
586 bat_priv->hna_global_hash = NULL; 616 bat_priv->tt_global_hash = NULL;
587} 617}
588 618
589struct orig_node *transtable_search(struct bat_priv *bat_priv, uint8_t *addr) 619struct orig_node *transtable_search(struct bat_priv *bat_priv, uint8_t *addr)
590{ 620{
591 struct hna_global_entry *hna_global_entry; 621 struct tt_global_entry *tt_global_entry;
592 struct orig_node *orig_node = NULL; 622 struct orig_node *orig_node = NULL;
593 623
594 spin_lock_bh(&bat_priv->hna_ghash_lock); 624 spin_lock_bh(&bat_priv->tt_ghash_lock);
595 hna_global_entry = hna_global_hash_find(bat_priv, addr); 625 tt_global_entry = tt_global_hash_find(bat_priv, addr);
596 626
597 if (!hna_global_entry) 627 if (!tt_global_entry)
598 goto out; 628 goto out;
599 629
600 if (!atomic_inc_not_zero(&hna_global_entry->orig_node->refcount)) 630 if (!atomic_inc_not_zero(&tt_global_entry->orig_node->refcount))
601 goto out; 631 goto out;
602 632
603 orig_node = hna_global_entry->orig_node; 633 orig_node = tt_global_entry->orig_node;
604 634
605out: 635out:
606 spin_unlock_bh(&bat_priv->hna_ghash_lock); 636 spin_unlock_bh(&bat_priv->tt_ghash_lock);
607 return orig_node; 637 return orig_node;
608} 638}
diff --git a/net/batman-adv/translation-table.h b/net/batman-adv/translation-table.h
index f19931ca1457..46152c38cc95 100644
--- a/net/batman-adv/translation-table.h
+++ b/net/batman-adv/translation-table.h
@@ -22,22 +22,22 @@
22#ifndef _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ 22#ifndef _NET_BATMAN_ADV_TRANSLATION_TABLE_H_
23#define _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ 23#define _NET_BATMAN_ADV_TRANSLATION_TABLE_H_
24 24
25int hna_local_init(struct bat_priv *bat_priv); 25int tt_local_init(struct bat_priv *bat_priv);
26void hna_local_add(struct net_device *soft_iface, uint8_t *addr); 26void tt_local_add(struct net_device *soft_iface, uint8_t *addr);
27void hna_local_remove(struct bat_priv *bat_priv, 27void tt_local_remove(struct bat_priv *bat_priv,
28 uint8_t *addr, char *message); 28 uint8_t *addr, char *message);
29int hna_local_fill_buffer(struct bat_priv *bat_priv, 29int tt_local_fill_buffer(struct bat_priv *bat_priv,
30 unsigned char *buff, int buff_len); 30 unsigned char *buff, int buff_len);
31int hna_local_seq_print_text(struct seq_file *seq, void *offset); 31int tt_local_seq_print_text(struct seq_file *seq, void *offset);
32void hna_local_free(struct bat_priv *bat_priv); 32void tt_local_free(struct bat_priv *bat_priv);
33int hna_global_init(struct bat_priv *bat_priv); 33int tt_global_init(struct bat_priv *bat_priv);
34void hna_global_add_orig(struct bat_priv *bat_priv, 34void tt_global_add_orig(struct bat_priv *bat_priv,
35 struct orig_node *orig_node, 35 struct orig_node *orig_node,
36 unsigned char *hna_buff, int hna_buff_len); 36 unsigned char *tt_buff, int tt_buff_len);
37int hna_global_seq_print_text(struct seq_file *seq, void *offset); 37int tt_global_seq_print_text(struct seq_file *seq, void *offset);
38void hna_global_del_orig(struct bat_priv *bat_priv, 38void tt_global_del_orig(struct bat_priv *bat_priv,
39 struct orig_node *orig_node, char *message); 39 struct orig_node *orig_node, char *message);
40void hna_global_free(struct bat_priv *bat_priv); 40void tt_global_free(struct bat_priv *bat_priv);
41struct orig_node *transtable_search(struct bat_priv *bat_priv, uint8_t *addr); 41struct orig_node *transtable_search(struct bat_priv *bat_priv, uint8_t *addr);
42 42
43#endif /* _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ */ 43#endif /* _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ */
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index 83445cf0cc9f..fab70e8b16ee 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -67,7 +67,7 @@ struct hard_iface {
67struct orig_node { 67struct orig_node {
68 uint8_t orig[ETH_ALEN]; 68 uint8_t orig[ETH_ALEN];
69 uint8_t primary_addr[ETH_ALEN]; 69 uint8_t primary_addr[ETH_ALEN];
70 struct neigh_node *router; 70 struct neigh_node __rcu *router; /* rcu protected pointer */
71 unsigned long *bcast_own; 71 unsigned long *bcast_own;
72 uint8_t *bcast_own_sum; 72 uint8_t *bcast_own_sum;
73 unsigned long last_valid; 73 unsigned long last_valid;
@@ -75,25 +75,25 @@ struct orig_node {
75 unsigned long batman_seqno_reset; 75 unsigned long batman_seqno_reset;
76 uint8_t gw_flags; 76 uint8_t gw_flags;
77 uint8_t flags; 77 uint8_t flags;
78 unsigned char *hna_buff; 78 unsigned char *tt_buff;
79 int16_t hna_buff_len; 79 int16_t tt_buff_len;
80 uint32_t last_real_seqno; 80 uint32_t last_real_seqno;
81 uint8_t last_ttl; 81 uint8_t last_ttl;
82 unsigned long bcast_bits[NUM_WORDS]; 82 unsigned long bcast_bits[NUM_WORDS];
83 uint32_t last_bcast_seqno; 83 uint32_t last_bcast_seqno;
84 struct hlist_head neigh_list; 84 struct hlist_head neigh_list;
85 struct list_head frag_list; 85 struct list_head frag_list;
86 spinlock_t neigh_list_lock; /* protects neighbor list */ 86 spinlock_t neigh_list_lock; /* protects neigh_list and router */
87 atomic_t refcount; 87 atomic_t refcount;
88 struct rcu_head rcu; 88 struct rcu_head rcu;
89 struct hlist_node hash_entry; 89 struct hlist_node hash_entry;
90 struct bat_priv *bat_priv; 90 struct bat_priv *bat_priv;
91 unsigned long last_frag_packet; 91 unsigned long last_frag_packet;
92 spinlock_t ogm_cnt_lock; /* protects: bcast_own, bcast_own_sum, 92 /* ogm_cnt_lock protects: bcast_own, bcast_own_sum,
93 * neigh_node->real_bits, 93 * neigh_node->real_bits, neigh_node->real_packet_count */
94 * neigh_node->real_packet_count */ 94 spinlock_t ogm_cnt_lock;
95 spinlock_t bcast_seqno_lock; /* protects bcast_bits, 95 /* bcast_seqno_lock protects bcast_bits, last_bcast_seqno */
96 * last_bcast_seqno */ 96 spinlock_t bcast_seqno_lock;
97 atomic_t bond_candidates; 97 atomic_t bond_candidates;
98 struct list_head bond_list; 98 struct list_head bond_list;
99}; 99};
@@ -125,6 +125,7 @@ struct neigh_node {
125 struct rcu_head rcu; 125 struct rcu_head rcu;
126 struct orig_node *orig_node; 126 struct orig_node *orig_node;
127 struct hard_iface *if_incoming; 127 struct hard_iface *if_incoming;
128 spinlock_t tq_lock; /* protects: tq_recv, tq_index */
128}; 129};
129 130
130 131
@@ -145,34 +146,34 @@ struct bat_priv {
145 atomic_t bcast_queue_left; 146 atomic_t bcast_queue_left;
146 atomic_t batman_queue_left; 147 atomic_t batman_queue_left;
147 char num_ifaces; 148 char num_ifaces;
148 struct hlist_head softif_neigh_list;
149 struct softif_neigh *softif_neigh;
150 struct debug_log *debug_log; 149 struct debug_log *debug_log;
151 struct hard_iface *primary_if;
152 struct kobject *mesh_obj; 150 struct kobject *mesh_obj;
153 struct dentry *debug_dir; 151 struct dentry *debug_dir;
154 struct hlist_head forw_bat_list; 152 struct hlist_head forw_bat_list;
155 struct hlist_head forw_bcast_list; 153 struct hlist_head forw_bcast_list;
156 struct hlist_head gw_list; 154 struct hlist_head gw_list;
155 struct hlist_head softif_neigh_vids;
157 struct list_head vis_send_list; 156 struct list_head vis_send_list;
158 struct hashtable_t *orig_hash; 157 struct hashtable_t *orig_hash;
159 struct hashtable_t *hna_local_hash; 158 struct hashtable_t *tt_local_hash;
160 struct hashtable_t *hna_global_hash; 159 struct hashtable_t *tt_global_hash;
161 struct hashtable_t *vis_hash; 160 struct hashtable_t *vis_hash;
162 spinlock_t forw_bat_list_lock; /* protects forw_bat_list */ 161 spinlock_t forw_bat_list_lock; /* protects forw_bat_list */
163 spinlock_t forw_bcast_list_lock; /* protects */ 162 spinlock_t forw_bcast_list_lock; /* protects */
164 spinlock_t hna_lhash_lock; /* protects hna_local_hash */ 163 spinlock_t tt_lhash_lock; /* protects tt_local_hash */
165 spinlock_t hna_ghash_lock; /* protects hna_global_hash */ 164 spinlock_t tt_ghash_lock; /* protects tt_global_hash */
166 spinlock_t gw_list_lock; /* protects gw_list and curr_gw */ 165 spinlock_t gw_list_lock; /* protects gw_list and curr_gw */
167 spinlock_t vis_hash_lock; /* protects vis_hash */ 166 spinlock_t vis_hash_lock; /* protects vis_hash */
168 spinlock_t vis_list_lock; /* protects vis_info::recv_list */ 167 spinlock_t vis_list_lock; /* protects vis_info::recv_list */
169 spinlock_t softif_neigh_lock; /* protects soft-interface neigh list */ 168 spinlock_t softif_neigh_lock; /* protects soft-interface neigh list */
170 int16_t num_local_hna; 169 spinlock_t softif_neigh_vid_lock; /* protects soft-interface vid list */
171 atomic_t hna_local_changed; 170 int16_t num_local_tt;
172 struct delayed_work hna_work; 171 atomic_t tt_local_changed;
172 struct delayed_work tt_work;
173 struct delayed_work orig_work; 173 struct delayed_work orig_work;
174 struct delayed_work vis_work; 174 struct delayed_work vis_work;
175 struct gw_node __rcu *curr_gw; /* rcu protected pointer */ 175 struct gw_node __rcu *curr_gw; /* rcu protected pointer */
176 struct hard_iface __rcu *primary_if; /* rcu protected pointer */
176 struct vis_info *my_vis_info; 177 struct vis_info *my_vis_info;
177}; 178};
178 179
@@ -191,14 +192,14 @@ struct socket_packet {
191 struct icmp_packet_rr icmp_packet; 192 struct icmp_packet_rr icmp_packet;
192}; 193};
193 194
194struct hna_local_entry { 195struct tt_local_entry {
195 uint8_t addr[ETH_ALEN]; 196 uint8_t addr[ETH_ALEN];
196 unsigned long last_seen; 197 unsigned long last_seen;
197 char never_purge; 198 char never_purge;
198 struct hlist_node hash_entry; 199 struct hlist_node hash_entry;
199}; 200};
200 201
201struct hna_global_entry { 202struct tt_global_entry {
202 uint8_t addr[ETH_ALEN]; 203 uint8_t addr[ETH_ALEN];
203 struct orig_node *orig_node; 204 struct orig_node *orig_node;
204 struct hlist_node hash_entry; 205 struct hlist_node hash_entry;
@@ -261,7 +262,7 @@ struct vis_info {
261struct vis_info_entry { 262struct vis_info_entry {
262 uint8_t src[ETH_ALEN]; 263 uint8_t src[ETH_ALEN];
263 uint8_t dest[ETH_ALEN]; 264 uint8_t dest[ETH_ALEN];
264 uint8_t quality; /* quality = 0 means HNA */ 265 uint8_t quality; /* quality = 0 client */
265} __packed; 266} __packed;
266 267
267struct recvlist_node { 268struct recvlist_node {
@@ -269,11 +270,20 @@ struct recvlist_node {
269 uint8_t mac[ETH_ALEN]; 270 uint8_t mac[ETH_ALEN];
270}; 271};
271 272
273struct softif_neigh_vid {
274 struct hlist_node list;
275 struct bat_priv *bat_priv;
276 short vid;
277 atomic_t refcount;
278 struct softif_neigh __rcu *softif_neigh;
279 struct rcu_head rcu;
280 struct hlist_head softif_neigh_list;
281};
282
272struct softif_neigh { 283struct softif_neigh {
273 struct hlist_node list; 284 struct hlist_node list;
274 uint8_t addr[ETH_ALEN]; 285 uint8_t addr[ETH_ALEN];
275 unsigned long last_seen; 286 unsigned long last_seen;
276 short vid;
277 atomic_t refcount; 287 atomic_t refcount;
278 struct rcu_head rcu; 288 struct rcu_head rcu;
279}; 289};
diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
index 19f84bd443af..19c3daf34ac6 100644
--- a/net/batman-adv/unicast.c
+++ b/net/batman-adv/unicast.c
@@ -221,15 +221,17 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
221 struct hard_iface *hard_iface, uint8_t dstaddr[]) 221 struct hard_iface *hard_iface, uint8_t dstaddr[])
222{ 222{
223 struct unicast_packet tmp_uc, *unicast_packet; 223 struct unicast_packet tmp_uc, *unicast_packet;
224 struct hard_iface *primary_if;
224 struct sk_buff *frag_skb; 225 struct sk_buff *frag_skb;
225 struct unicast_frag_packet *frag1, *frag2; 226 struct unicast_frag_packet *frag1, *frag2;
226 int uc_hdr_len = sizeof(struct unicast_packet); 227 int uc_hdr_len = sizeof(struct unicast_packet);
227 int ucf_hdr_len = sizeof(struct unicast_frag_packet); 228 int ucf_hdr_len = sizeof(struct unicast_frag_packet);
228 int data_len = skb->len - uc_hdr_len; 229 int data_len = skb->len - uc_hdr_len;
229 int large_tail = 0; 230 int large_tail = 0, ret = NET_RX_DROP;
230 uint16_t seqno; 231 uint16_t seqno;
231 232
232 if (!bat_priv->primary_if) 233 primary_if = primary_if_get_selected(bat_priv);
234 if (!primary_if)
233 goto dropped; 235 goto dropped;
234 236
235 frag_skb = dev_alloc_skb(data_len - (data_len / 2) + ucf_hdr_len); 237 frag_skb = dev_alloc_skb(data_len - (data_len / 2) + ucf_hdr_len);
@@ -254,7 +256,7 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
254 frag1->version = COMPAT_VERSION; 256 frag1->version = COMPAT_VERSION;
255 frag1->packet_type = BAT_UNICAST_FRAG; 257 frag1->packet_type = BAT_UNICAST_FRAG;
256 258
257 memcpy(frag1->orig, bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN); 259 memcpy(frag1->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
258 memcpy(frag2, frag1, sizeof(struct unicast_frag_packet)); 260 memcpy(frag2, frag1, sizeof(struct unicast_frag_packet));
259 261
260 if (data_len & 1) 262 if (data_len & 1)
@@ -269,13 +271,17 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
269 271
270 send_skb_packet(skb, hard_iface, dstaddr); 272 send_skb_packet(skb, hard_iface, dstaddr);
271 send_skb_packet(frag_skb, hard_iface, dstaddr); 273 send_skb_packet(frag_skb, hard_iface, dstaddr);
272 return NET_RX_SUCCESS; 274 ret = NET_RX_SUCCESS;
275 goto out;
273 276
274drop_frag: 277drop_frag:
275 kfree_skb(frag_skb); 278 kfree_skb(frag_skb);
276dropped: 279dropped:
277 kfree_skb(skb); 280 kfree_skb(skb);
278 return NET_RX_DROP; 281out:
282 if (primary_if)
283 hardif_free_ref(primary_if);
284 return ret;
279} 285}
280 286
281int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv) 287int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv)
@@ -289,12 +295,12 @@ int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv)
289 295
290 /* get routing information */ 296 /* get routing information */
291 if (is_multicast_ether_addr(ethhdr->h_dest)) { 297 if (is_multicast_ether_addr(ethhdr->h_dest)) {
292 orig_node = (struct orig_node *)gw_get_selected(bat_priv); 298 orig_node = (struct orig_node *)gw_get_selected_orig(bat_priv);
293 if (orig_node) 299 if (orig_node)
294 goto find_router; 300 goto find_router;
295 } 301 }
296 302
297 /* check for hna host - increases orig_node refcount */ 303 /* check for tt host - increases orig_node refcount */
298 orig_node = transtable_search(bat_priv, ethhdr->h_dest); 304 orig_node = transtable_search(bat_priv, ethhdr->h_dest);
299 305
300find_router: 306find_router:
diff --git a/net/batman-adv/vis.c b/net/batman-adv/vis.c
index f90212f42082..c39f20cc1ba6 100644
--- a/net/batman-adv/vis.c
+++ b/net/batman-adv/vis.c
@@ -194,7 +194,7 @@ static ssize_t vis_data_read_entry(char *buff, struct vis_info_entry *entry,
194{ 194{
195 /* maximal length: max(4+17+2, 3+17+1+3+2) == 26 */ 195 /* maximal length: max(4+17+2, 3+17+1+3+2) == 26 */
196 if (primary && entry->quality == 0) 196 if (primary && entry->quality == 0)
197 return sprintf(buff, "HNA %pM, ", entry->dest); 197 return sprintf(buff, "TT %pM, ", entry->dest);
198 else if (compare_eth(entry->src, src)) 198 else if (compare_eth(entry->src, src))
199 return sprintf(buff, "TQ %pM %d, ", entry->dest, 199 return sprintf(buff, "TQ %pM %d, ", entry->dest,
200 entry->quality); 200 entry->quality);
@@ -204,6 +204,7 @@ static ssize_t vis_data_read_entry(char *buff, struct vis_info_entry *entry,
204 204
205int vis_seq_print_text(struct seq_file *seq, void *offset) 205int vis_seq_print_text(struct seq_file *seq, void *offset)
206{ 206{
207 struct hard_iface *primary_if;
207 struct hlist_node *node; 208 struct hlist_node *node;
208 struct hlist_head *head; 209 struct hlist_head *head;
209 struct vis_info *info; 210 struct vis_info *info;
@@ -215,15 +216,18 @@ int vis_seq_print_text(struct seq_file *seq, void *offset)
215 HLIST_HEAD(vis_if_list); 216 HLIST_HEAD(vis_if_list);
216 struct if_list_entry *entry; 217 struct if_list_entry *entry;
217 struct hlist_node *pos, *n; 218 struct hlist_node *pos, *n;
218 int i, j; 219 int i, j, ret = 0;
219 int vis_server = atomic_read(&bat_priv->vis_mode); 220 int vis_server = atomic_read(&bat_priv->vis_mode);
220 size_t buff_pos, buf_size; 221 size_t buff_pos, buf_size;
221 char *buff; 222 char *buff;
222 int compare; 223 int compare;
223 224
224 if ((!bat_priv->primary_if) || 225 primary_if = primary_if_get_selected(bat_priv);
225 (vis_server == VIS_TYPE_CLIENT_UPDATE)) 226 if (!primary_if)
226 return 0; 227 goto out;
228
229 if (vis_server == VIS_TYPE_CLIENT_UPDATE)
230 goto out;
227 231
228 buf_size = 1; 232 buf_size = 1;
229 /* Estimate length */ 233 /* Estimate length */
@@ -270,7 +274,8 @@ int vis_seq_print_text(struct seq_file *seq, void *offset)
270 buff = kmalloc(buf_size, GFP_ATOMIC); 274 buff = kmalloc(buf_size, GFP_ATOMIC);
271 if (!buff) { 275 if (!buff) {
272 spin_unlock_bh(&bat_priv->vis_hash_lock); 276 spin_unlock_bh(&bat_priv->vis_hash_lock);
273 return -ENOMEM; 277 ret = -ENOMEM;
278 goto out;
274 } 279 }
275 buff[0] = '\0'; 280 buff[0] = '\0';
276 buff_pos = 0; 281 buff_pos = 0;
@@ -328,7 +333,10 @@ int vis_seq_print_text(struct seq_file *seq, void *offset)
328 seq_printf(seq, "%s", buff); 333 seq_printf(seq, "%s", buff);
329 kfree(buff); 334 kfree(buff);
330 335
331 return 0; 336out:
337 if (primary_if)
338 hardif_free_ref(primary_if);
339 return ret;
332} 340}
333 341
334/* add the info packet to the send list, if it was not 342/* add the info packet to the send list, if it was not
@@ -558,6 +566,7 @@ static int find_best_vis_server(struct bat_priv *bat_priv,
558 struct vis_info *info) 566 struct vis_info *info)
559{ 567{
560 struct hashtable_t *hash = bat_priv->orig_hash; 568 struct hashtable_t *hash = bat_priv->orig_hash;
569 struct neigh_node *router;
561 struct hlist_node *node; 570 struct hlist_node *node;
562 struct hlist_head *head; 571 struct hlist_head *head;
563 struct orig_node *orig_node; 572 struct orig_node *orig_node;
@@ -571,13 +580,17 @@ static int find_best_vis_server(struct bat_priv *bat_priv,
571 580
572 rcu_read_lock(); 581 rcu_read_lock();
573 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { 582 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
574 if ((orig_node) && (orig_node->router) && 583 router = orig_node_get_router(orig_node);
575 (orig_node->flags & VIS_SERVER) && 584 if (!router)
576 (orig_node->router->tq_avg > best_tq)) { 585 continue;
577 best_tq = orig_node->router->tq_avg; 586
587 if ((orig_node->flags & VIS_SERVER) &&
588 (router->tq_avg > best_tq)) {
589 best_tq = router->tq_avg;
578 memcpy(packet->target_orig, orig_node->orig, 590 memcpy(packet->target_orig, orig_node->orig,
579 ETH_ALEN); 591 ETH_ALEN);
580 } 592 }
593 neigh_node_free_ref(router);
581 } 594 }
582 rcu_read_unlock(); 595 rcu_read_unlock();
583 } 596 }
@@ -605,11 +618,11 @@ static int generate_vis_packet(struct bat_priv *bat_priv)
605 struct hlist_node *node; 618 struct hlist_node *node;
606 struct hlist_head *head; 619 struct hlist_head *head;
607 struct orig_node *orig_node; 620 struct orig_node *orig_node;
608 struct neigh_node *neigh_node; 621 struct neigh_node *router;
609 struct vis_info *info = (struct vis_info *)bat_priv->my_vis_info; 622 struct vis_info *info = (struct vis_info *)bat_priv->my_vis_info;
610 struct vis_packet *packet = (struct vis_packet *)info->skb_packet->data; 623 struct vis_packet *packet = (struct vis_packet *)info->skb_packet->data;
611 struct vis_info_entry *entry; 624 struct vis_info_entry *entry;
612 struct hna_local_entry *hna_local_entry; 625 struct tt_local_entry *tt_local_entry;
613 int best_tq = -1, i; 626 int best_tq = -1, i;
614 627
615 info->first_seen = jiffies; 628 info->first_seen = jiffies;
@@ -633,59 +646,61 @@ static int generate_vis_packet(struct bat_priv *bat_priv)
633 646
634 rcu_read_lock(); 647 rcu_read_lock();
635 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { 648 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
636 neigh_node = orig_node->router; 649 router = orig_node_get_router(orig_node);
637 650 if (!router)
638 if (!neigh_node)
639 continue; 651 continue;
640 652
641 if (!compare_eth(neigh_node->addr, orig_node->orig)) 653 if (!compare_eth(router->addr, orig_node->orig))
642 continue; 654 goto next;
643 655
644 if (neigh_node->if_incoming->if_status != IF_ACTIVE) 656 if (router->if_incoming->if_status != IF_ACTIVE)
645 continue; 657 goto next;
646 658
647 if (neigh_node->tq_avg < 1) 659 if (router->tq_avg < 1)
648 continue; 660 goto next;
649 661
650 /* fill one entry into buffer. */ 662 /* fill one entry into buffer. */
651 entry = (struct vis_info_entry *) 663 entry = (struct vis_info_entry *)
652 skb_put(info->skb_packet, sizeof(*entry)); 664 skb_put(info->skb_packet, sizeof(*entry));
653 memcpy(entry->src, 665 memcpy(entry->src,
654 neigh_node->if_incoming->net_dev->dev_addr, 666 router->if_incoming->net_dev->dev_addr,
655 ETH_ALEN); 667 ETH_ALEN);
656 memcpy(entry->dest, orig_node->orig, ETH_ALEN); 668 memcpy(entry->dest, orig_node->orig, ETH_ALEN);
657 entry->quality = neigh_node->tq_avg; 669 entry->quality = router->tq_avg;
658 packet->entries++; 670 packet->entries++;
659 671
672next:
673 neigh_node_free_ref(router);
674
660 if (vis_packet_full(info)) 675 if (vis_packet_full(info))
661 goto unlock; 676 goto unlock;
662 } 677 }
663 rcu_read_unlock(); 678 rcu_read_unlock();
664 } 679 }
665 680
666 hash = bat_priv->hna_local_hash; 681 hash = bat_priv->tt_local_hash;
667 682
668 spin_lock_bh(&bat_priv->hna_lhash_lock); 683 spin_lock_bh(&bat_priv->tt_lhash_lock);
669 for (i = 0; i < hash->size; i++) { 684 for (i = 0; i < hash->size; i++) {
670 head = &hash->table[i]; 685 head = &hash->table[i];
671 686
672 hlist_for_each_entry(hna_local_entry, node, head, hash_entry) { 687 hlist_for_each_entry(tt_local_entry, node, head, hash_entry) {
673 entry = (struct vis_info_entry *) 688 entry = (struct vis_info_entry *)
674 skb_put(info->skb_packet, 689 skb_put(info->skb_packet,
675 sizeof(*entry)); 690 sizeof(*entry));
676 memset(entry->src, 0, ETH_ALEN); 691 memset(entry->src, 0, ETH_ALEN);
677 memcpy(entry->dest, hna_local_entry->addr, ETH_ALEN); 692 memcpy(entry->dest, tt_local_entry->addr, ETH_ALEN);
678 entry->quality = 0; /* 0 means HNA */ 693 entry->quality = 0; /* 0 means TT */
679 packet->entries++; 694 packet->entries++;
680 695
681 if (vis_packet_full(info)) { 696 if (vis_packet_full(info)) {
682 spin_unlock_bh(&bat_priv->hna_lhash_lock); 697 spin_unlock_bh(&bat_priv->tt_lhash_lock);
683 return 0; 698 return 0;
684 } 699 }
685 } 700 }
686 } 701 }
687 702
688 spin_unlock_bh(&bat_priv->hna_lhash_lock); 703 spin_unlock_bh(&bat_priv->tt_lhash_lock);
689 return 0; 704 return 0;
690 705
691unlock: 706unlock:
@@ -725,6 +740,7 @@ static void purge_vis_packets(struct bat_priv *bat_priv)
725static void broadcast_vis_packet(struct bat_priv *bat_priv, 740static void broadcast_vis_packet(struct bat_priv *bat_priv,
726 struct vis_info *info) 741 struct vis_info *info)
727{ 742{
743 struct neigh_node *router;
728 struct hashtable_t *hash = bat_priv->orig_hash; 744 struct hashtable_t *hash = bat_priv->orig_hash;
729 struct hlist_node *node; 745 struct hlist_node *node;
730 struct hlist_head *head; 746 struct hlist_head *head;
@@ -745,19 +761,26 @@ static void broadcast_vis_packet(struct bat_priv *bat_priv,
745 rcu_read_lock(); 761 rcu_read_lock();
746 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { 762 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
747 /* if it's a vis server and reachable, send it. */ 763 /* if it's a vis server and reachable, send it. */
748 if ((!orig_node) || (!orig_node->router))
749 continue;
750 if (!(orig_node->flags & VIS_SERVER)) 764 if (!(orig_node->flags & VIS_SERVER))
751 continue; 765 continue;
766
767 router = orig_node_get_router(orig_node);
768 if (!router)
769 continue;
770
752 /* don't send it if we already received the packet from 771 /* don't send it if we already received the packet from
753 * this node. */ 772 * this node. */
754 if (recv_list_is_in(bat_priv, &info->recv_list, 773 if (recv_list_is_in(bat_priv, &info->recv_list,
755 orig_node->orig)) 774 orig_node->orig)) {
775 neigh_node_free_ref(router);
756 continue; 776 continue;
777 }
757 778
758 memcpy(packet->target_orig, orig_node->orig, ETH_ALEN); 779 memcpy(packet->target_orig, orig_node->orig, ETH_ALEN);
759 hard_iface = orig_node->router->if_incoming; 780 hard_iface = router->if_incoming;
760 memcpy(dstaddr, orig_node->router->addr, ETH_ALEN); 781 memcpy(dstaddr, router->addr, ETH_ALEN);
782
783 neigh_node_free_ref(router);
761 784
762 skb = skb_clone(info->skb_packet, GFP_ATOMIC); 785 skb = skb_clone(info->skb_packet, GFP_ATOMIC);
763 if (skb) 786 if (skb)
@@ -772,60 +795,48 @@ static void unicast_vis_packet(struct bat_priv *bat_priv,
772 struct vis_info *info) 795 struct vis_info *info)
773{ 796{
774 struct orig_node *orig_node; 797 struct orig_node *orig_node;
775 struct neigh_node *neigh_node = NULL; 798 struct neigh_node *router = NULL;
776 struct sk_buff *skb; 799 struct sk_buff *skb;
777 struct vis_packet *packet; 800 struct vis_packet *packet;
778 801
779 packet = (struct vis_packet *)info->skb_packet->data; 802 packet = (struct vis_packet *)info->skb_packet->data;
780 803
781 rcu_read_lock();
782 orig_node = orig_hash_find(bat_priv, packet->target_orig); 804 orig_node = orig_hash_find(bat_priv, packet->target_orig);
783
784 if (!orig_node) 805 if (!orig_node)
785 goto unlock; 806 goto out;
786
787 neigh_node = orig_node->router;
788 807
789 if (!neigh_node) 808 router = orig_node_get_router(orig_node);
790 goto unlock; 809 if (!router)
791 810 goto out;
792 if (!atomic_inc_not_zero(&neigh_node->refcount)) {
793 neigh_node = NULL;
794 goto unlock;
795 }
796
797 rcu_read_unlock();
798 811
799 skb = skb_clone(info->skb_packet, GFP_ATOMIC); 812 skb = skb_clone(info->skb_packet, GFP_ATOMIC);
800 if (skb) 813 if (skb)
801 send_skb_packet(skb, neigh_node->if_incoming, 814 send_skb_packet(skb, router->if_incoming, router->addr);
802 neigh_node->addr);
803 815
804 goto out;
805
806unlock:
807 rcu_read_unlock();
808out: 816out:
809 if (neigh_node) 817 if (router)
810 neigh_node_free_ref(neigh_node); 818 neigh_node_free_ref(router);
811 if (orig_node) 819 if (orig_node)
812 orig_node_free_ref(orig_node); 820 orig_node_free_ref(orig_node);
813 return;
814} 821}
815 822
816/* only send one vis packet. called from send_vis_packets() */ 823/* only send one vis packet. called from send_vis_packets() */
817static void send_vis_packet(struct bat_priv *bat_priv, struct vis_info *info) 824static void send_vis_packet(struct bat_priv *bat_priv, struct vis_info *info)
818{ 825{
826 struct hard_iface *primary_if;
819 struct vis_packet *packet; 827 struct vis_packet *packet;
820 828
829 primary_if = primary_if_get_selected(bat_priv);
830 if (!primary_if)
831 goto out;
832
821 packet = (struct vis_packet *)info->skb_packet->data; 833 packet = (struct vis_packet *)info->skb_packet->data;
822 if (packet->ttl < 2) { 834 if (packet->ttl < 2) {
823 pr_debug("Error - can't send vis packet: ttl exceeded\n"); 835 pr_debug("Error - can't send vis packet: ttl exceeded\n");
824 return; 836 goto out;
825 } 837 }
826 838
827 memcpy(packet->sender_orig, bat_priv->primary_if->net_dev->dev_addr, 839 memcpy(packet->sender_orig, primary_if->net_dev->dev_addr, ETH_ALEN);
828 ETH_ALEN);
829 packet->ttl--; 840 packet->ttl--;
830 841
831 if (is_broadcast_ether_addr(packet->target_orig)) 842 if (is_broadcast_ether_addr(packet->target_orig))
@@ -833,6 +844,10 @@ static void send_vis_packet(struct bat_priv *bat_priv, struct vis_info *info)
833 else 844 else
834 unicast_vis_packet(bat_priv, info); 845 unicast_vis_packet(bat_priv, info);
835 packet->ttl++; /* restore TTL */ 846 packet->ttl++; /* restore TTL */
847
848out:
849 if (primary_if)
850 hardif_free_ref(primary_if);
836} 851}
837 852
838/* called from timer; send (and maybe generate) vis packet. */ 853/* called from timer; send (and maybe generate) vis packet. */
@@ -859,8 +874,7 @@ static void send_vis_packets(struct work_struct *work)
859 kref_get(&info->refcount); 874 kref_get(&info->refcount);
860 spin_unlock_bh(&bat_priv->vis_hash_lock); 875 spin_unlock_bh(&bat_priv->vis_hash_lock);
861 876
862 if (bat_priv->primary_if) 877 send_vis_packet(bat_priv, info);
863 send_vis_packet(bat_priv, info);
864 878
865 spin_lock_bh(&bat_priv->vis_hash_lock); 879 spin_lock_bh(&bat_priv->vis_hash_lock);
866 send_list_del(info); 880 send_list_del(info);
diff --git a/net/bluetooth/bnep/bnep.h b/net/bluetooth/bnep/bnep.h
index 70672544db86..8e6c06158f8e 100644
--- a/net/bluetooth/bnep/bnep.h
+++ b/net/bluetooth/bnep/bnep.h
@@ -23,88 +23,88 @@
23#include <linux/crc32.h> 23#include <linux/crc32.h>
24#include <net/bluetooth/bluetooth.h> 24#include <net/bluetooth/bluetooth.h>
25 25
26// Limits 26/* Limits */
27#define BNEP_MAX_PROTO_FILTERS 5 27#define BNEP_MAX_PROTO_FILTERS 5
28#define BNEP_MAX_MULTICAST_FILTERS 20 28#define BNEP_MAX_MULTICAST_FILTERS 20
29 29
30// UUIDs 30/* UUIDs */
31#define BNEP_BASE_UUID 0x0000000000001000800000805F9B34FB 31#define BNEP_BASE_UUID 0x0000000000001000800000805F9B34FB
32#define BNEP_UUID16 0x02 32#define BNEP_UUID16 0x02
33#define BNEP_UUID32 0x04 33#define BNEP_UUID32 0x04
34#define BNEP_UUID128 0x16 34#define BNEP_UUID128 0x16
35 35
36#define BNEP_SVC_PANU 0x1115 36#define BNEP_SVC_PANU 0x1115
37#define BNEP_SVC_NAP 0x1116 37#define BNEP_SVC_NAP 0x1116
38#define BNEP_SVC_GN 0x1117 38#define BNEP_SVC_GN 0x1117
39 39
40// Packet types 40/* Packet types */
41#define BNEP_GENERAL 0x00 41#define BNEP_GENERAL 0x00
42#define BNEP_CONTROL 0x01 42#define BNEP_CONTROL 0x01
43#define BNEP_COMPRESSED 0x02 43#define BNEP_COMPRESSED 0x02
44#define BNEP_COMPRESSED_SRC_ONLY 0x03 44#define BNEP_COMPRESSED_SRC_ONLY 0x03
45#define BNEP_COMPRESSED_DST_ONLY 0x04 45#define BNEP_COMPRESSED_DST_ONLY 0x04
46 46
47// Control types 47/* Control types */
48#define BNEP_CMD_NOT_UNDERSTOOD 0x00 48#define BNEP_CMD_NOT_UNDERSTOOD 0x00
49#define BNEP_SETUP_CONN_REQ 0x01 49#define BNEP_SETUP_CONN_REQ 0x01
50#define BNEP_SETUP_CONN_RSP 0x02 50#define BNEP_SETUP_CONN_RSP 0x02
51#define BNEP_FILTER_NET_TYPE_SET 0x03 51#define BNEP_FILTER_NET_TYPE_SET 0x03
52#define BNEP_FILTER_NET_TYPE_RSP 0x04 52#define BNEP_FILTER_NET_TYPE_RSP 0x04
53#define BNEP_FILTER_MULTI_ADDR_SET 0x05 53#define BNEP_FILTER_MULTI_ADDR_SET 0x05
54#define BNEP_FILTER_MULTI_ADDR_RSP 0x06 54#define BNEP_FILTER_MULTI_ADDR_RSP 0x06
55 55
56// Extension types 56/* Extension types */
57#define BNEP_EXT_CONTROL 0x00 57#define BNEP_EXT_CONTROL 0x00
58 58
59// Response messages 59/* Response messages */
60#define BNEP_SUCCESS 0x00 60#define BNEP_SUCCESS 0x00
61 61
62#define BNEP_CONN_INVALID_DST 0x01 62#define BNEP_CONN_INVALID_DST 0x01
63#define BNEP_CONN_INVALID_SRC 0x02 63#define BNEP_CONN_INVALID_SRC 0x02
64#define BNEP_CONN_INVALID_SVC 0x03 64#define BNEP_CONN_INVALID_SVC 0x03
65#define BNEP_CONN_NOT_ALLOWED 0x04 65#define BNEP_CONN_NOT_ALLOWED 0x04
66 66
67#define BNEP_FILTER_UNSUPPORTED_REQ 0x01 67#define BNEP_FILTER_UNSUPPORTED_REQ 0x01
68#define BNEP_FILTER_INVALID_RANGE 0x02 68#define BNEP_FILTER_INVALID_RANGE 0x02
69#define BNEP_FILTER_INVALID_MCADDR 0x02 69#define BNEP_FILTER_INVALID_MCADDR 0x02
70#define BNEP_FILTER_LIMIT_REACHED 0x03 70#define BNEP_FILTER_LIMIT_REACHED 0x03
71#define BNEP_FILTER_DENIED_SECURITY 0x04 71#define BNEP_FILTER_DENIED_SECURITY 0x04
72 72
73// L2CAP settings 73/* L2CAP settings */
74#define BNEP_MTU 1691 74#define BNEP_MTU 1691
75#define BNEP_PSM 0x0f 75#define BNEP_PSM 0x0f
76#define BNEP_FLUSH_TO 0xffff 76#define BNEP_FLUSH_TO 0xffff
77#define BNEP_CONNECT_TO 15 77#define BNEP_CONNECT_TO 15
78#define BNEP_FILTER_TO 15 78#define BNEP_FILTER_TO 15
79 79
80// Headers 80/* Headers */
81#define BNEP_TYPE_MASK 0x7f 81#define BNEP_TYPE_MASK 0x7f
82#define BNEP_EXT_HEADER 0x80 82#define BNEP_EXT_HEADER 0x80
83 83
84struct bnep_setup_conn_req { 84struct bnep_setup_conn_req {
85 __u8 type; 85 __u8 type;
86 __u8 ctrl; 86 __u8 ctrl;
87 __u8 uuid_size; 87 __u8 uuid_size;
88 __u8 service[0]; 88 __u8 service[0];
89} __packed; 89} __packed;
90 90
91struct bnep_set_filter_req { 91struct bnep_set_filter_req {
92 __u8 type; 92 __u8 type;
93 __u8 ctrl; 93 __u8 ctrl;
94 __be16 len; 94 __be16 len;
95 __u8 list[0]; 95 __u8 list[0];
96} __packed; 96} __packed;
97 97
98struct bnep_control_rsp { 98struct bnep_control_rsp {
99 __u8 type; 99 __u8 type;
100 __u8 ctrl; 100 __u8 ctrl;
101 __be16 resp; 101 __be16 resp;
102} __packed; 102} __packed;
103 103
104struct bnep_ext_hdr { 104struct bnep_ext_hdr {
105 __u8 type; 105 __u8 type;
106 __u8 len; 106 __u8 len;
107 __u8 data[0]; 107 __u8 data[0];
108} __packed; 108} __packed;
109 109
110/* BNEP ioctl defines */ 110/* BNEP ioctl defines */
@@ -114,10 +114,10 @@ struct bnep_ext_hdr {
114#define BNEPGETCONNINFO _IOR('B', 211, int) 114#define BNEPGETCONNINFO _IOR('B', 211, int)
115 115
116struct bnep_connadd_req { 116struct bnep_connadd_req {
117 int sock; // Connected socket 117 int sock; /* Connected socket */
118 __u32 flags; 118 __u32 flags;
119 __u16 role; 119 __u16 role;
120 char device[16]; // Name of the Ethernet device 120 char device[16]; /* Name of the Ethernet device */
121}; 121};
122 122
123struct bnep_conndel_req { 123struct bnep_conndel_req {
@@ -148,14 +148,14 @@ int bnep_del_connection(struct bnep_conndel_req *req);
148int bnep_get_connlist(struct bnep_connlist_req *req); 148int bnep_get_connlist(struct bnep_connlist_req *req);
149int bnep_get_conninfo(struct bnep_conninfo *ci); 149int bnep_get_conninfo(struct bnep_conninfo *ci);
150 150
151// BNEP sessions 151/* BNEP sessions */
152struct bnep_session { 152struct bnep_session {
153 struct list_head list; 153 struct list_head list;
154 154
155 unsigned int role; 155 unsigned int role;
156 unsigned long state; 156 unsigned long state;
157 unsigned long flags; 157 unsigned long flags;
158 atomic_t killed; 158 struct task_struct *task;
159 159
160 struct ethhdr eh; 160 struct ethhdr eh;
161 struct msghdr msg; 161 struct msghdr msg;
@@ -173,7 +173,7 @@ void bnep_sock_cleanup(void);
173 173
174static inline int bnep_mc_hash(__u8 *addr) 174static inline int bnep_mc_hash(__u8 *addr)
175{ 175{
176 return (crc32_be(~0, addr, ETH_ALEN) >> 26); 176 return crc32_be(~0, addr, ETH_ALEN) >> 26;
177} 177}
178 178
179#endif 179#endif
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index 03d4d1245d58..ca39fcf010ce 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -36,6 +36,7 @@
36#include <linux/errno.h> 36#include <linux/errno.h>
37#include <linux/net.h> 37#include <linux/net.h>
38#include <linux/slab.h> 38#include <linux/slab.h>
39#include <linux/kthread.h>
39#include <net/sock.h> 40#include <net/sock.h>
40 41
41#include <linux/socket.h> 42#include <linux/socket.h>
@@ -131,7 +132,8 @@ static int bnep_ctrl_set_netfilter(struct bnep_session *s, __be16 *data, int len
131 return -EILSEQ; 132 return -EILSEQ;
132 133
133 n = get_unaligned_be16(data); 134 n = get_unaligned_be16(data);
134 data++; len -= 2; 135 data++;
136 len -= 2;
135 137
136 if (len < n) 138 if (len < n)
137 return -EILSEQ; 139 return -EILSEQ;
@@ -176,7 +178,8 @@ static int bnep_ctrl_set_mcfilter(struct bnep_session *s, u8 *data, int len)
176 return -EILSEQ; 178 return -EILSEQ;
177 179
178 n = get_unaligned_be16(data); 180 n = get_unaligned_be16(data);
179 data += 2; len -= 2; 181 data += 2;
182 len -= 2;
180 183
181 if (len < n) 184 if (len < n)
182 return -EILSEQ; 185 return -EILSEQ;
@@ -187,6 +190,8 @@ static int bnep_ctrl_set_mcfilter(struct bnep_session *s, u8 *data, int len)
187 n /= (ETH_ALEN * 2); 190 n /= (ETH_ALEN * 2);
188 191
189 if (n > 0) { 192 if (n > 0) {
193 int i;
194
190 s->mc_filter = 0; 195 s->mc_filter = 0;
191 196
192 /* Always send broadcast */ 197 /* Always send broadcast */
@@ -196,18 +201,22 @@ static int bnep_ctrl_set_mcfilter(struct bnep_session *s, u8 *data, int len)
196 for (; n > 0; n--) { 201 for (; n > 0; n--) {
197 u8 a1[6], *a2; 202 u8 a1[6], *a2;
198 203
199 memcpy(a1, data, ETH_ALEN); data += ETH_ALEN; 204 memcpy(a1, data, ETH_ALEN);
200 a2 = data; data += ETH_ALEN; 205 data += ETH_ALEN;
206 a2 = data;
207 data += ETH_ALEN;
201 208
202 BT_DBG("mc filter %s -> %s", 209 BT_DBG("mc filter %s -> %s",
203 batostr((void *) a1), batostr((void *) a2)); 210 batostr((void *) a1), batostr((void *) a2));
204 211
205 #define INCA(a) { int i = 5; while (i >=0 && ++a[i--] == 0); }
206
207 /* Iterate from a1 to a2 */ 212 /* Iterate from a1 to a2 */
208 set_bit(bnep_mc_hash(a1), (ulong *) &s->mc_filter); 213 set_bit(bnep_mc_hash(a1), (ulong *) &s->mc_filter);
209 while (memcmp(a1, a2, 6) < 0 && s->mc_filter != ~0LL) { 214 while (memcmp(a1, a2, 6) < 0 && s->mc_filter != ~0LL) {
210 INCA(a1); 215 /* Increment a1 */
216 i = 5;
217 while (i >= 0 && ++a1[i--] == 0)
218 ;
219
211 set_bit(bnep_mc_hash(a1), (ulong *) &s->mc_filter); 220 set_bit(bnep_mc_hash(a1), (ulong *) &s->mc_filter);
212 } 221 }
213 } 222 }
@@ -227,7 +236,8 @@ static int bnep_rx_control(struct bnep_session *s, void *data, int len)
227 u8 cmd = *(u8 *)data; 236 u8 cmd = *(u8 *)data;
228 int err = 0; 237 int err = 0;
229 238
230 data++; len--; 239 data++;
240 len--;
231 241
232 switch (cmd) { 242 switch (cmd) {
233 case BNEP_CMD_NOT_UNDERSTOOD: 243 case BNEP_CMD_NOT_UNDERSTOOD:
@@ -302,7 +312,6 @@ static u8 __bnep_rx_hlen[] = {
302 ETH_ALEN + 2, /* BNEP_COMPRESSED_SRC_ONLY */ 312 ETH_ALEN + 2, /* BNEP_COMPRESSED_SRC_ONLY */
303 ETH_ALEN + 2 /* BNEP_COMPRESSED_DST_ONLY */ 313 ETH_ALEN + 2 /* BNEP_COMPRESSED_DST_ONLY */
304}; 314};
305#define BNEP_RX_TYPES (sizeof(__bnep_rx_hlen) - 1)
306 315
307static inline int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb) 316static inline int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb)
308{ 317{
@@ -312,9 +321,10 @@ static inline int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb)
312 321
313 dev->stats.rx_bytes += skb->len; 322 dev->stats.rx_bytes += skb->len;
314 323
315 type = *(u8 *) skb->data; skb_pull(skb, 1); 324 type = *(u8 *) skb->data;
325 skb_pull(skb, 1);
316 326
317 if ((type & BNEP_TYPE_MASK) > BNEP_RX_TYPES) 327 if ((type & BNEP_TYPE_MASK) >= sizeof(__bnep_rx_hlen))
318 goto badframe; 328 goto badframe;
319 329
320 if ((type & BNEP_TYPE_MASK) == BNEP_CONTROL) { 330 if ((type & BNEP_TYPE_MASK) == BNEP_CONTROL) {
@@ -367,14 +377,14 @@ static inline int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb)
367 377
368 case BNEP_COMPRESSED_DST_ONLY: 378 case BNEP_COMPRESSED_DST_ONLY:
369 memcpy(__skb_put(nskb, ETH_ALEN), skb_mac_header(skb), 379 memcpy(__skb_put(nskb, ETH_ALEN), skb_mac_header(skb),
370 ETH_ALEN); 380 ETH_ALEN);
371 memcpy(__skb_put(nskb, ETH_ALEN + 2), s->eh.h_source, 381 memcpy(__skb_put(nskb, ETH_ALEN + 2), s->eh.h_source,
372 ETH_ALEN + 2); 382 ETH_ALEN + 2);
373 break; 383 break;
374 384
375 case BNEP_GENERAL: 385 case BNEP_GENERAL:
376 memcpy(__skb_put(nskb, ETH_ALEN * 2), skb_mac_header(skb), 386 memcpy(__skb_put(nskb, ETH_ALEN * 2), skb_mac_header(skb),
377 ETH_ALEN * 2); 387 ETH_ALEN * 2);
378 put_unaligned(s->eh.h_proto, (__be16 *) __skb_put(nskb, 2)); 388 put_unaligned(s->eh.h_proto, (__be16 *) __skb_put(nskb, 2));
379 break; 389 break;
380 } 390 }
@@ -470,15 +480,14 @@ static int bnep_session(void *arg)
470 480
471 BT_DBG(""); 481 BT_DBG("");
472 482
473 daemonize("kbnepd %s", dev->name);
474 set_user_nice(current, -15); 483 set_user_nice(current, -15);
475 484
476 init_waitqueue_entry(&wait, current); 485 init_waitqueue_entry(&wait, current);
477 add_wait_queue(sk_sleep(sk), &wait); 486 add_wait_queue(sk_sleep(sk), &wait);
478 while (!atomic_read(&s->killed)) { 487 while (!kthread_should_stop()) {
479 set_current_state(TASK_INTERRUPTIBLE); 488 set_current_state(TASK_INTERRUPTIBLE);
480 489
481 // RX 490 /* RX */
482 while ((skb = skb_dequeue(&sk->sk_receive_queue))) { 491 while ((skb = skb_dequeue(&sk->sk_receive_queue))) {
483 skb_orphan(skb); 492 skb_orphan(skb);
484 bnep_rx_frame(s, skb); 493 bnep_rx_frame(s, skb);
@@ -487,7 +496,7 @@ static int bnep_session(void *arg)
487 if (sk->sk_state != BT_CONNECTED) 496 if (sk->sk_state != BT_CONNECTED)
488 break; 497 break;
489 498
490 // TX 499 /* TX */
491 while ((skb = skb_dequeue(&sk->sk_write_queue))) 500 while ((skb = skb_dequeue(&sk->sk_write_queue)))
492 if (bnep_tx_frame(s, skb)) 501 if (bnep_tx_frame(s, skb))
493 break; 502 break;
@@ -555,8 +564,8 @@ int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock)
555 564
556 /* session struct allocated as private part of net_device */ 565 /* session struct allocated as private part of net_device */
557 dev = alloc_netdev(sizeof(struct bnep_session), 566 dev = alloc_netdev(sizeof(struct bnep_session),
558 (*req->device) ? req->device : "bnep%d", 567 (*req->device) ? req->device : "bnep%d",
559 bnep_net_setup); 568 bnep_net_setup);
560 if (!dev) 569 if (!dev)
561 return -ENOMEM; 570 return -ENOMEM;
562 571
@@ -571,7 +580,7 @@ int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock)
571 s = netdev_priv(dev); 580 s = netdev_priv(dev);
572 581
573 /* This is rx header therefore addresses are swapped. 582 /* This is rx header therefore addresses are swapped.
574 * ie eh.h_dest is our local address. */ 583 * ie. eh.h_dest is our local address. */
575 memcpy(s->eh.h_dest, &src, ETH_ALEN); 584 memcpy(s->eh.h_dest, &src, ETH_ALEN);
576 memcpy(s->eh.h_source, &dst, ETH_ALEN); 585 memcpy(s->eh.h_source, &dst, ETH_ALEN);
577 memcpy(dev->dev_addr, s->eh.h_dest, ETH_ALEN); 586 memcpy(dev->dev_addr, s->eh.h_dest, ETH_ALEN);
@@ -597,17 +606,17 @@ int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock)
597 SET_NETDEV_DEVTYPE(dev, &bnep_type); 606 SET_NETDEV_DEVTYPE(dev, &bnep_type);
598 607
599 err = register_netdev(dev); 608 err = register_netdev(dev);
600 if (err) { 609 if (err)
601 goto failed; 610 goto failed;
602 }
603 611
604 __bnep_link_session(s); 612 __bnep_link_session(s);
605 613
606 err = kernel_thread(bnep_session, s, CLONE_KERNEL); 614 s->task = kthread_run(bnep_session, s, "kbnepd %s", dev->name);
607 if (err < 0) { 615 if (IS_ERR(s->task)) {
608 /* Session thread start failed, gotta cleanup. */ 616 /* Session thread start failed, gotta cleanup. */
609 unregister_netdev(dev); 617 unregister_netdev(dev);
610 __bnep_unlink_session(s); 618 __bnep_unlink_session(s);
619 err = PTR_ERR(s->task);
611 goto failed; 620 goto failed;
612 } 621 }
613 622
@@ -631,15 +640,9 @@ int bnep_del_connection(struct bnep_conndel_req *req)
631 down_read(&bnep_session_sem); 640 down_read(&bnep_session_sem);
632 641
633 s = __bnep_get_session(req->dst); 642 s = __bnep_get_session(req->dst);
634 if (s) { 643 if (s)
635 /* Wakeup user-space which is polling for socket errors. 644 kthread_stop(s->task);
636 * This is temporary hack until we have shutdown in L2CAP */ 645 else
637 s->sock->sk->sk_err = EUNATCH;
638
639 /* Kill session thread */
640 atomic_inc(&s->killed);
641 wake_up_interruptible(sk_sleep(s->sock->sk));
642 } else
643 err = -ENOENT; 646 err = -ENOENT;
644 647
645 up_read(&bnep_session_sem); 648 up_read(&bnep_session_sem);
diff --git a/net/bluetooth/bnep/sock.c b/net/bluetooth/bnep/sock.c
index d935da71ab3b..17800b1d28ea 100644
--- a/net/bluetooth/bnep/sock.c
+++ b/net/bluetooth/bnep/sock.c
@@ -39,10 +39,10 @@
39#include <linux/init.h> 39#include <linux/init.h>
40#include <linux/compat.h> 40#include <linux/compat.h>
41#include <linux/gfp.h> 41#include <linux/gfp.h>
42#include <linux/uaccess.h>
42#include <net/sock.h> 43#include <net/sock.h>
43 44
44#include <asm/system.h> 45#include <asm/system.h>
45#include <asm/uaccess.h>
46 46
47#include "bnep.h" 47#include "bnep.h"
48 48
diff --git a/net/bluetooth/cmtp/capi.c b/net/bluetooth/cmtp/capi.c
index 67cff810c77d..744233cba244 100644
--- a/net/bluetooth/cmtp/capi.c
+++ b/net/bluetooth/cmtp/capi.c
@@ -35,6 +35,7 @@
35#include <linux/ioctl.h> 35#include <linux/ioctl.h>
36#include <linux/file.h> 36#include <linux/file.h>
37#include <linux/wait.h> 37#include <linux/wait.h>
38#include <linux/kthread.h>
38#include <net/sock.h> 39#include <net/sock.h>
39 40
40#include <linux/isdn/capilli.h> 41#include <linux/isdn/capilli.h>
@@ -143,7 +144,7 @@ static void cmtp_send_capimsg(struct cmtp_session *session, struct sk_buff *skb)
143 144
144 skb_queue_tail(&session->transmit, skb); 145 skb_queue_tail(&session->transmit, skb);
145 146
146 cmtp_schedule(session); 147 wake_up_interruptible(sk_sleep(session->sock->sk));
147} 148}
148 149
149static void cmtp_send_interopmsg(struct cmtp_session *session, 150static void cmtp_send_interopmsg(struct cmtp_session *session,
@@ -386,8 +387,7 @@ static void cmtp_reset_ctr(struct capi_ctr *ctrl)
386 387
387 capi_ctr_down(ctrl); 388 capi_ctr_down(ctrl);
388 389
389 atomic_inc(&session->terminate); 390 kthread_stop(session->task);
390 cmtp_schedule(session);
391} 391}
392 392
393static void cmtp_register_appl(struct capi_ctr *ctrl, __u16 appl, capi_register_params *rp) 393static void cmtp_register_appl(struct capi_ctr *ctrl, __u16 appl, capi_register_params *rp)
diff --git a/net/bluetooth/cmtp/cmtp.h b/net/bluetooth/cmtp/cmtp.h
index 785e79e953c5..db43b54ac9af 100644
--- a/net/bluetooth/cmtp/cmtp.h
+++ b/net/bluetooth/cmtp/cmtp.h
@@ -37,7 +37,7 @@
37#define CMTP_LOOPBACK 0 37#define CMTP_LOOPBACK 0
38 38
39struct cmtp_connadd_req { 39struct cmtp_connadd_req {
40 int sock; // Connected socket 40 int sock; /* Connected socket */
41 __u32 flags; 41 __u32 flags;
42}; 42};
43 43
@@ -81,7 +81,7 @@ struct cmtp_session {
81 81
82 char name[BTNAMSIZ]; 82 char name[BTNAMSIZ];
83 83
84 atomic_t terminate; 84 struct task_struct *task;
85 85
86 wait_queue_head_t wait; 86 wait_queue_head_t wait;
87 87
@@ -121,13 +121,6 @@ void cmtp_detach_device(struct cmtp_session *session);
121 121
122void cmtp_recv_capimsg(struct cmtp_session *session, struct sk_buff *skb); 122void cmtp_recv_capimsg(struct cmtp_session *session, struct sk_buff *skb);
123 123
124static inline void cmtp_schedule(struct cmtp_session *session)
125{
126 struct sock *sk = session->sock->sk;
127
128 wake_up_interruptible(sk_sleep(sk));
129}
130
131/* CMTP init defines */ 124/* CMTP init defines */
132int cmtp_init_sockets(void); 125int cmtp_init_sockets(void);
133void cmtp_cleanup_sockets(void); 126void cmtp_cleanup_sockets(void);
diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c
index 964ea9126f9f..c5b11af908be 100644
--- a/net/bluetooth/cmtp/core.c
+++ b/net/bluetooth/cmtp/core.c
@@ -35,6 +35,7 @@
35#include <linux/ioctl.h> 35#include <linux/ioctl.h>
36#include <linux/file.h> 36#include <linux/file.h>
37#include <linux/init.h> 37#include <linux/init.h>
38#include <linux/kthread.h>
38#include <net/sock.h> 39#include <net/sock.h>
39 40
40#include <linux/isdn/capilli.h> 41#include <linux/isdn/capilli.h>
@@ -235,9 +236,12 @@ static void cmtp_process_transmit(struct cmtp_session *session)
235 236
236 size = min_t(uint, ((tail < 258) ? (tail - 2) : (tail - 3)), skb->len); 237 size = min_t(uint, ((tail < 258) ? (tail - 2) : (tail - 3)), skb->len);
237 238
238 if ((scb->id < 0) && ((scb->id = cmtp_alloc_block_id(session)) < 0)) { 239 if (scb->id < 0) {
239 skb_queue_head(&session->transmit, skb); 240 scb->id = cmtp_alloc_block_id(session);
240 break; 241 if (scb->id < 0) {
242 skb_queue_head(&session->transmit, skb);
243 break;
244 }
241 } 245 }
242 246
243 if (size < 256) { 247 if (size < 256) {
@@ -284,12 +288,11 @@ static int cmtp_session(void *arg)
284 288
285 BT_DBG("session %p", session); 289 BT_DBG("session %p", session);
286 290
287 daemonize("kcmtpd_ctr_%d", session->num);
288 set_user_nice(current, -15); 291 set_user_nice(current, -15);
289 292
290 init_waitqueue_entry(&wait, current); 293 init_waitqueue_entry(&wait, current);
291 add_wait_queue(sk_sleep(sk), &wait); 294 add_wait_queue(sk_sleep(sk), &wait);
292 while (!atomic_read(&session->terminate)) { 295 while (!kthread_should_stop()) {
293 set_current_state(TASK_INTERRUPTIBLE); 296 set_current_state(TASK_INTERRUPTIBLE);
294 297
295 if (sk->sk_state != BT_CONNECTED) 298 if (sk->sk_state != BT_CONNECTED)
@@ -343,7 +346,8 @@ int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock)
343 346
344 bacpy(&session->bdaddr, &bt_sk(sock->sk)->dst); 347 bacpy(&session->bdaddr, &bt_sk(sock->sk)->dst);
345 348
346 session->mtu = min_t(uint, l2cap_pi(sock->sk)->omtu, l2cap_pi(sock->sk)->imtu); 349 session->mtu = min_t(uint, l2cap_pi(sock->sk)->chan->omtu,
350 l2cap_pi(sock->sk)->chan->imtu);
347 351
348 BT_DBG("mtu %d", session->mtu); 352 BT_DBG("mtu %d", session->mtu);
349 353
@@ -367,9 +371,12 @@ int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock)
367 371
368 __cmtp_link_session(session); 372 __cmtp_link_session(session);
369 373
370 err = kernel_thread(cmtp_session, session, CLONE_KERNEL); 374 session->task = kthread_run(cmtp_session, session, "kcmtpd_ctr_%d",
371 if (err < 0) 375 session->num);
376 if (IS_ERR(session->task)) {
377 err = PTR_ERR(session->task);
372 goto unlink; 378 goto unlink;
379 }
373 380
374 if (!(session->flags & (1 << CMTP_LOOPBACK))) { 381 if (!(session->flags & (1 << CMTP_LOOPBACK))) {
375 err = cmtp_attach_device(session); 382 err = cmtp_attach_device(session);
@@ -406,9 +413,8 @@ int cmtp_del_connection(struct cmtp_conndel_req *req)
406 /* Flush the transmit queue */ 413 /* Flush the transmit queue */
407 skb_queue_purge(&session->transmit); 414 skb_queue_purge(&session->transmit);
408 415
409 /* Kill session thread */ 416 /* Stop session thread */
410 atomic_inc(&session->terminate); 417 kthread_stop(session->task);
411 cmtp_schedule(session);
412 } else 418 } else
413 err = -ENOENT; 419 err = -ENOENT;
414 420
diff --git a/net/bluetooth/cmtp/sock.c b/net/bluetooth/cmtp/sock.c
index 7ea1979a8e4f..3f2dd5c25ae5 100644
--- a/net/bluetooth/cmtp/sock.c
+++ b/net/bluetooth/cmtp/sock.c
@@ -34,12 +34,12 @@
34#include <linux/file.h> 34#include <linux/file.h>
35#include <linux/compat.h> 35#include <linux/compat.h>
36#include <linux/gfp.h> 36#include <linux/gfp.h>
37#include <linux/uaccess.h>
37#include <net/sock.h> 38#include <net/sock.h>
38 39
39#include <linux/isdn/capilli.h> 40#include <linux/isdn/capilli.h>
40 41
41#include <asm/system.h> 42#include <asm/system.h>
42#include <asm/uaccess.h>
43 43
44#include "cmtp.h" 44#include "cmtp.h"
45 45
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 7a6f56b2f49d..bcd158f40bb9 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -269,6 +269,19 @@ static void hci_conn_idle(unsigned long arg)
269 hci_conn_enter_sniff_mode(conn); 269 hci_conn_enter_sniff_mode(conn);
270} 270}
271 271
272static void hci_conn_auto_accept(unsigned long arg)
273{
274 struct hci_conn *conn = (void *) arg;
275 struct hci_dev *hdev = conn->hdev;
276
277 hci_dev_lock(hdev);
278
279 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
280 &conn->dst);
281
282 hci_dev_unlock(hdev);
283}
284
272struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst) 285struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
273{ 286{
274 struct hci_conn *conn; 287 struct hci_conn *conn;
@@ -287,6 +300,7 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
287 conn->auth_type = HCI_AT_GENERAL_BONDING; 300 conn->auth_type = HCI_AT_GENERAL_BONDING;
288 conn->io_capability = hdev->io_capability; 301 conn->io_capability = hdev->io_capability;
289 conn->remote_auth = 0xff; 302 conn->remote_auth = 0xff;
303 conn->key_type = 0xff;
290 304
291 conn->power_save = 1; 305 conn->power_save = 1;
292 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 306 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
@@ -311,6 +325,8 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
311 325
312 setup_timer(&conn->disc_timer, hci_conn_timeout, (unsigned long)conn); 326 setup_timer(&conn->disc_timer, hci_conn_timeout, (unsigned long)conn);
313 setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn); 327 setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn);
328 setup_timer(&conn->auto_accept_timer, hci_conn_auto_accept,
329 (unsigned long) conn);
314 330
315 atomic_set(&conn->refcnt, 0); 331 atomic_set(&conn->refcnt, 0);
316 332
@@ -341,6 +357,8 @@ int hci_conn_del(struct hci_conn *conn)
341 357
342 del_timer(&conn->disc_timer); 358 del_timer(&conn->disc_timer);
343 359
360 del_timer(&conn->auto_accept_timer);
361
344 if (conn->type == ACL_LINK) { 362 if (conn->type == ACL_LINK) {
345 struct hci_conn *sco = conn->link; 363 struct hci_conn *sco = conn->link;
346 if (sco) 364 if (sco)
@@ -375,6 +393,9 @@ int hci_conn_del(struct hci_conn *conn)
375 393
376 hci_dev_put(hdev); 394 hci_dev_put(hdev);
377 395
396 if (conn->handle == 0)
397 kfree(conn);
398
378 return 0; 399 return 0;
379} 400}
380 401
@@ -535,36 +556,93 @@ static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
535 return 0; 556 return 0;
536} 557}
537 558
559/* Encrypt the the link */
560static void hci_conn_encrypt(struct hci_conn *conn)
561{
562 BT_DBG("conn %p", conn);
563
564 if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) {
565 struct hci_cp_set_conn_encrypt cp;
566 cp.handle = cpu_to_le16(conn->handle);
567 cp.encrypt = 0x01;
568 hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
569 &cp);
570 }
571}
572
538/* Enable security */ 573/* Enable security */
539int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type) 574int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
540{ 575{
541 BT_DBG("conn %p", conn); 576 BT_DBG("conn %p", conn);
542 577
578 /* For sdp we don't need the link key. */
543 if (sec_level == BT_SECURITY_SDP) 579 if (sec_level == BT_SECURITY_SDP)
544 return 1; 580 return 1;
545 581
582 /* For non 2.1 devices and low security level we don't need the link
583 key. */
546 if (sec_level == BT_SECURITY_LOW && 584 if (sec_level == BT_SECURITY_LOW &&
547 (!conn->ssp_mode || !conn->hdev->ssp_mode)) 585 (!conn->ssp_mode || !conn->hdev->ssp_mode))
548 return 1; 586 return 1;
549 587
550 if (conn->link_mode & HCI_LM_ENCRYPT) 588 /* For other security levels we need the link key. */
551 return hci_conn_auth(conn, sec_level, auth_type); 589 if (!(conn->link_mode & HCI_LM_AUTH))
590 goto auth;
591
592 /* An authenticated combination key has sufficient security for any
593 security level. */
594 if (conn->key_type == HCI_LK_AUTH_COMBINATION)
595 goto encrypt;
596
597 /* An unauthenticated combination key has sufficient security for
598 security level 1 and 2. */
599 if (conn->key_type == HCI_LK_UNAUTH_COMBINATION &&
600 (sec_level == BT_SECURITY_MEDIUM ||
601 sec_level == BT_SECURITY_LOW))
602 goto encrypt;
603
604 /* A combination key has always sufficient security for the security
605 levels 1 or 2. High security level requires the combination key
606 is generated using maximum PIN code length (16).
607 For pre 2.1 units. */
608 if (conn->key_type == HCI_LK_COMBINATION &&
609 (sec_level != BT_SECURITY_HIGH ||
610 conn->pin_length == 16))
611 goto encrypt;
612
613auth:
614 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
615 return 0;
552 616
553 if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) 617 if (!hci_conn_auth(conn, sec_level, auth_type))
554 return 0; 618 return 0;
555 619
556 if (hci_conn_auth(conn, sec_level, auth_type)) { 620encrypt:
557 struct hci_cp_set_conn_encrypt cp; 621 if (conn->link_mode & HCI_LM_ENCRYPT)
558 cp.handle = cpu_to_le16(conn->handle); 622 return 1;
559 cp.encrypt = 1;
560 hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT,
561 sizeof(cp), &cp);
562 }
563 623
624 hci_conn_encrypt(conn);
564 return 0; 625 return 0;
565} 626}
566EXPORT_SYMBOL(hci_conn_security); 627EXPORT_SYMBOL(hci_conn_security);
567 628
629/* Check secure link requirement */
630int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
631{
632 BT_DBG("conn %p", conn);
633
634 if (sec_level != BT_SECURITY_HIGH)
635 return 1; /* Accept if non-secure is required */
636
637 if (conn->key_type == HCI_LK_AUTH_COMBINATION ||
638 (conn->key_type == HCI_LK_COMBINATION &&
639 conn->pin_length == 16))
640 return 1;
641
642 return 0; /* Reject not secure link */
643}
644EXPORT_SYMBOL(hci_conn_check_secure);
645
568/* Change link key */ 646/* Change link key */
569int hci_conn_change_link_key(struct hci_conn *conn) 647int hci_conn_change_link_key(struct hci_conn *conn)
570{ 648{
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index b5a8afc2be33..815269b07f20 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -56,7 +56,6 @@
56static void hci_cmd_task(unsigned long arg); 56static void hci_cmd_task(unsigned long arg);
57static void hci_rx_task(unsigned long arg); 57static void hci_rx_task(unsigned long arg);
58static void hci_tx_task(unsigned long arg); 58static void hci_tx_task(unsigned long arg);
59static void hci_notify(struct hci_dev *hdev, int event);
60 59
61static DEFINE_RWLOCK(hci_task_lock); 60static DEFINE_RWLOCK(hci_task_lock);
62 61
@@ -1021,18 +1020,54 @@ struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1021 return NULL; 1020 return NULL;
1022} 1021}
1023 1022
1024int hci_add_link_key(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr, 1023static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1025 u8 *val, u8 type, u8 pin_len) 1024 u8 key_type, u8 old_key_type)
1025{
1026 /* Legacy key */
1027 if (key_type < 0x03)
1028 return 1;
1029
1030 /* Debug keys are insecure so don't store them persistently */
1031 if (key_type == HCI_LK_DEBUG_COMBINATION)
1032 return 0;
1033
1034 /* Changed combination key and there's no previous one */
1035 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1036 return 0;
1037
1038 /* Security mode 3 case */
1039 if (!conn)
1040 return 1;
1041
1042 /* Neither local nor remote side had no-bonding as requirement */
1043 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1044 return 1;
1045
1046 /* Local side had dedicated bonding as requirement */
1047 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1048 return 1;
1049
1050 /* Remote side had dedicated bonding as requirement */
1051 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1052 return 1;
1053
1054 /* If none of the above criteria match, then don't store the key
1055 * persistently */
1056 return 0;
1057}
1058
1059int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1060 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1026{ 1061{
1027 struct link_key *key, *old_key; 1062 struct link_key *key, *old_key;
1028 u8 old_key_type; 1063 u8 old_key_type, persistent;
1029 1064
1030 old_key = hci_find_link_key(hdev, bdaddr); 1065 old_key = hci_find_link_key(hdev, bdaddr);
1031 if (old_key) { 1066 if (old_key) {
1032 old_key_type = old_key->type; 1067 old_key_type = old_key->type;
1033 key = old_key; 1068 key = old_key;
1034 } else { 1069 } else {
1035 old_key_type = 0xff; 1070 old_key_type = conn ? conn->key_type : 0xff;
1036 key = kzalloc(sizeof(*key), GFP_ATOMIC); 1071 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1037 if (!key) 1072 if (!key)
1038 return -ENOMEM; 1073 return -ENOMEM;
@@ -1041,16 +1076,37 @@ int hci_add_link_key(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
1041 1076
1042 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type); 1077 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1043 1078
1079 /* Some buggy controller combinations generate a changed
1080 * combination key for legacy pairing even when there's no
1081 * previous key */
1082 if (type == HCI_LK_CHANGED_COMBINATION &&
1083 (!conn || conn->remote_auth == 0xff) &&
1084 old_key_type == 0xff) {
1085 type = HCI_LK_COMBINATION;
1086 if (conn)
1087 conn->key_type = type;
1088 }
1089
1044 bacpy(&key->bdaddr, bdaddr); 1090 bacpy(&key->bdaddr, bdaddr);
1045 memcpy(key->val, val, 16); 1091 memcpy(key->val, val, 16);
1046 key->type = type;
1047 key->pin_len = pin_len; 1092 key->pin_len = pin_len;
1048 1093
1049 if (new_key) 1094 if (type == HCI_LK_CHANGED_COMBINATION)
1050 mgmt_new_key(hdev->id, key, old_key_type);
1051
1052 if (type == 0x06)
1053 key->type = old_key_type; 1095 key->type = old_key_type;
1096 else
1097 key->type = type;
1098
1099 if (!new_key)
1100 return 0;
1101
1102 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1103
1104 mgmt_new_key(hdev->id, key, persistent);
1105
1106 if (!persistent) {
1107 list_del(&key->list);
1108 kfree(key);
1109 }
1054 1110
1055 return 0; 1111 return 0;
1056} 1112}
@@ -1082,6 +1138,70 @@ static void hci_cmd_timer(unsigned long arg)
1082 tasklet_schedule(&hdev->cmd_task); 1138 tasklet_schedule(&hdev->cmd_task);
1083} 1139}
1084 1140
1141struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1142 bdaddr_t *bdaddr)
1143{
1144 struct oob_data *data;
1145
1146 list_for_each_entry(data, &hdev->remote_oob_data, list)
1147 if (bacmp(bdaddr, &data->bdaddr) == 0)
1148 return data;
1149
1150 return NULL;
1151}
1152
1153int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1154{
1155 struct oob_data *data;
1156
1157 data = hci_find_remote_oob_data(hdev, bdaddr);
1158 if (!data)
1159 return -ENOENT;
1160
1161 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1162
1163 list_del(&data->list);
1164 kfree(data);
1165
1166 return 0;
1167}
1168
1169int hci_remote_oob_data_clear(struct hci_dev *hdev)
1170{
1171 struct oob_data *data, *n;
1172
1173 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1174 list_del(&data->list);
1175 kfree(data);
1176 }
1177
1178 return 0;
1179}
1180
1181int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1182 u8 *randomizer)
1183{
1184 struct oob_data *data;
1185
1186 data = hci_find_remote_oob_data(hdev, bdaddr);
1187
1188 if (!data) {
1189 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1190 if (!data)
1191 return -ENOMEM;
1192
1193 bacpy(&data->bdaddr, bdaddr);
1194 list_add(&data->list, &hdev->remote_oob_data);
1195 }
1196
1197 memcpy(data->hash, hash, sizeof(data->hash));
1198 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1199
1200 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1201
1202 return 0;
1203}
1204
1085/* Register HCI device */ 1205/* Register HCI device */
1086int hci_register_dev(struct hci_dev *hdev) 1206int hci_register_dev(struct hci_dev *hdev)
1087{ 1207{
@@ -1146,6 +1266,8 @@ int hci_register_dev(struct hci_dev *hdev)
1146 1266
1147 INIT_LIST_HEAD(&hdev->link_keys); 1267 INIT_LIST_HEAD(&hdev->link_keys);
1148 1268
1269 INIT_LIST_HEAD(&hdev->remote_oob_data);
1270
1149 INIT_WORK(&hdev->power_on, hci_power_on); 1271 INIT_WORK(&hdev->power_on, hci_power_on);
1150 INIT_WORK(&hdev->power_off, hci_power_off); 1272 INIT_WORK(&hdev->power_off, hci_power_off);
1151 setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev); 1273 setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
@@ -1225,6 +1347,7 @@ int hci_unregister_dev(struct hci_dev *hdev)
1225 hci_blacklist_clear(hdev); 1347 hci_blacklist_clear(hdev);
1226 hci_uuids_clear(hdev); 1348 hci_uuids_clear(hdev);
1227 hci_link_keys_clear(hdev); 1349 hci_link_keys_clear(hdev);
1350 hci_remote_oob_data_clear(hdev);
1228 hci_dev_unlock_bh(hdev); 1351 hci_dev_unlock_bh(hdev);
1229 1352
1230 __hci_dev_put(hdev); 1353 __hci_dev_put(hdev);
@@ -1274,7 +1397,7 @@ int hci_recv_frame(struct sk_buff *skb)
1274EXPORT_SYMBOL(hci_recv_frame); 1397EXPORT_SYMBOL(hci_recv_frame);
1275 1398
1276static int hci_reassembly(struct hci_dev *hdev, int type, void *data, 1399static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1277 int count, __u8 index, gfp_t gfp_mask) 1400 int count, __u8 index)
1278{ 1401{
1279 int len = 0; 1402 int len = 0;
1280 int hlen = 0; 1403 int hlen = 0;
@@ -1304,7 +1427,7 @@ static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1304 break; 1427 break;
1305 } 1428 }
1306 1429
1307 skb = bt_skb_alloc(len, gfp_mask); 1430 skb = bt_skb_alloc(len, GFP_ATOMIC);
1308 if (!skb) 1431 if (!skb)
1309 return -ENOMEM; 1432 return -ENOMEM;
1310 1433
@@ -1390,8 +1513,7 @@ int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1390 return -EILSEQ; 1513 return -EILSEQ;
1391 1514
1392 while (count) { 1515 while (count) {
1393 rem = hci_reassembly(hdev, type, data, count, 1516 rem = hci_reassembly(hdev, type, data, count, type - 1);
1394 type - 1, GFP_ATOMIC);
1395 if (rem < 0) 1517 if (rem < 0)
1396 return rem; 1518 return rem;
1397 1519
@@ -1425,8 +1547,8 @@ int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1425 } else 1547 } else
1426 type = bt_cb(skb)->pkt_type; 1548 type = bt_cb(skb)->pkt_type;
1427 1549
1428 rem = hci_reassembly(hdev, type, data, 1550 rem = hci_reassembly(hdev, type, data, count,
1429 count, STREAM_REASSEMBLY, GFP_ATOMIC); 1551 STREAM_REASSEMBLY);
1430 if (rem < 0) 1552 if (rem < 0)
1431 return rem; 1553 return rem;
1432 1554
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index b2570159a044..77930aa522e3 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -56,7 +56,9 @@ static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
56 if (status) 56 if (status)
57 return; 57 return;
58 58
59 clear_bit(HCI_INQUIRY, &hdev->flags); 59 if (test_bit(HCI_MGMT, &hdev->flags) &&
60 test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
61 mgmt_discovering(hdev->id, 0);
60 62
61 hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status); 63 hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status);
62 64
@@ -72,7 +74,9 @@ static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
72 if (status) 74 if (status)
73 return; 75 return;
74 76
75 clear_bit(HCI_INQUIRY, &hdev->flags); 77 if (test_bit(HCI_MGMT, &hdev->flags) &&
78 test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
79 mgmt_discovering(hdev->id, 0);
76 80
77 hci_conn_check_pending(hdev); 81 hci_conn_check_pending(hdev);
78} 82}
@@ -195,14 +199,17 @@ static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
195 199
196 BT_DBG("%s status 0x%x", hdev->name, status); 200 BT_DBG("%s status 0x%x", hdev->name, status);
197 201
198 if (status)
199 return;
200
201 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME); 202 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
202 if (!sent) 203 if (!sent)
203 return; 204 return;
204 205
205 memcpy(hdev->dev_name, sent, 248); 206 if (test_bit(HCI_MGMT, &hdev->flags))
207 mgmt_set_local_name_complete(hdev->id, sent, status);
208
209 if (status)
210 return;
211
212 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
206} 213}
207 214
208static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb) 215static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
@@ -214,7 +221,7 @@ static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
214 if (rp->status) 221 if (rp->status)
215 return; 222 return;
216 223
217 memcpy(hdev->dev_name, rp->name, 248); 224 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
218} 225}
219 226
220static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb) 227static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
@@ -470,14 +477,16 @@ static void hci_setup_event_mask(struct hci_dev *hdev)
470 * command otherwise */ 477 * command otherwise */
471 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 }; 478 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
472 479
473 /* Events for 1.2 and newer controllers */ 480 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
474 if (hdev->lmp_ver > 1) { 481 * any event mask for pre 1.2 devices */
475 events[4] |= 0x01; /* Flow Specification Complete */ 482 if (hdev->lmp_ver <= 1)
476 events[4] |= 0x02; /* Inquiry Result with RSSI */ 483 return;
477 events[4] |= 0x04; /* Read Remote Extended Features Complete */ 484
478 events[5] |= 0x08; /* Synchronous Connection Complete */ 485 events[4] |= 0x01; /* Flow Specification Complete */
479 events[5] |= 0x10; /* Synchronous Connection Changed */ 486 events[4] |= 0x02; /* Inquiry Result with RSSI */
480 } 487 events[4] |= 0x04; /* Read Remote Extended Features Complete */
488 events[5] |= 0x08; /* Synchronous Connection Complete */
489 events[5] |= 0x10; /* Synchronous Connection Changed */
481 490
482 if (hdev->features[3] & LMP_RSSI_INQ) 491 if (hdev->features[3] & LMP_RSSI_INQ)
483 events[4] |= 0x04; /* Inquiry Result with RSSI */ 492 events[4] |= 0x04; /* Inquiry Result with RSSI */
@@ -821,16 +830,31 @@ static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
821 rp->status); 830 rp->status);
822} 831}
823 832
833static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
834 struct sk_buff *skb)
835{
836 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
837
838 BT_DBG("%s status 0x%x", hdev->name, rp->status);
839
840 mgmt_read_local_oob_data_reply_complete(hdev->id, rp->hash,
841 rp->randomizer, rp->status);
842}
843
824static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status) 844static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
825{ 845{
826 BT_DBG("%s status 0x%x", hdev->name, status); 846 BT_DBG("%s status 0x%x", hdev->name, status);
827 847
828 if (status) { 848 if (status) {
829 hci_req_complete(hdev, HCI_OP_INQUIRY, status); 849 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
830
831 hci_conn_check_pending(hdev); 850 hci_conn_check_pending(hdev);
832 } else 851 return;
833 set_bit(HCI_INQUIRY, &hdev->flags); 852 }
853
854 if (test_bit(HCI_MGMT, &hdev->flags) &&
855 !test_and_set_bit(HCI_INQUIRY,
856 &hdev->flags))
857 mgmt_discovering(hdev->id, 1);
834} 858}
835 859
836static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status) 860static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
@@ -999,12 +1023,19 @@ static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
999 hci_dev_lock(hdev); 1023 hci_dev_lock(hdev);
1000 1024
1001 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 1025 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1002 if (conn && hci_outgoing_auth_needed(hdev, conn)) { 1026 if (!conn)
1027 goto unlock;
1028
1029 if (!hci_outgoing_auth_needed(hdev, conn))
1030 goto unlock;
1031
1032 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
1003 struct hci_cp_auth_requested cp; 1033 struct hci_cp_auth_requested cp;
1004 cp.handle = __cpu_to_le16(conn->handle); 1034 cp.handle = __cpu_to_le16(conn->handle);
1005 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp); 1035 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1006 } 1036 }
1007 1037
1038unlock:
1008 hci_dev_unlock(hdev); 1039 hci_dev_unlock(hdev);
1009} 1040}
1010 1041
@@ -1194,7 +1225,9 @@ static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff
1194 1225
1195 BT_DBG("%s status %d", hdev->name, status); 1226 BT_DBG("%s status %d", hdev->name, status);
1196 1227
1197 clear_bit(HCI_INQUIRY, &hdev->flags); 1228 if (test_bit(HCI_MGMT, &hdev->flags) &&
1229 test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1230 mgmt_discovering(hdev->id, 0);
1198 1231
1199 hci_req_complete(hdev, HCI_OP_INQUIRY, status); 1232 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1200 1233
@@ -1214,7 +1247,13 @@ static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *
1214 1247
1215 hci_dev_lock(hdev); 1248 hci_dev_lock(hdev);
1216 1249
1217 for (; num_rsp; num_rsp--) { 1250 if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags)) {
1251
1252 if (test_bit(HCI_MGMT, &hdev->flags))
1253 mgmt_discovering(hdev->id, 1);
1254 }
1255
1256 for (; num_rsp; num_rsp--, info++) {
1218 bacpy(&data.bdaddr, &info->bdaddr); 1257 bacpy(&data.bdaddr, &info->bdaddr);
1219 data.pscan_rep_mode = info->pscan_rep_mode; 1258 data.pscan_rep_mode = info->pscan_rep_mode;
1220 data.pscan_period_mode = info->pscan_period_mode; 1259 data.pscan_period_mode = info->pscan_period_mode;
@@ -1223,8 +1262,9 @@ static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *
1223 data.clock_offset = info->clock_offset; 1262 data.clock_offset = info->clock_offset;
1224 data.rssi = 0x00; 1263 data.rssi = 0x00;
1225 data.ssp_mode = 0x00; 1264 data.ssp_mode = 0x00;
1226 info++;
1227 hci_inquiry_cache_update(hdev, &data); 1265 hci_inquiry_cache_update(hdev, &data);
1266 mgmt_device_found(hdev->id, &info->bdaddr, info->dev_class, 0,
1267 NULL);
1228 } 1268 }
1229 1269
1230 hci_dev_unlock(hdev); 1270 hci_dev_unlock(hdev);
@@ -1402,7 +1442,7 @@ static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff
1402 1442
1403 conn->state = BT_CLOSED; 1443 conn->state = BT_CLOSED;
1404 1444
1405 if (conn->type == ACL_LINK) 1445 if (conn->type == ACL_LINK || conn->type == LE_LINK)
1406 mgmt_disconnected(hdev->id, &conn->dst); 1446 mgmt_disconnected(hdev->id, &conn->dst);
1407 1447
1408 hci_proto_disconn_cfm(conn, ev->reason); 1448 hci_proto_disconn_cfm(conn, ev->reason);
@@ -1428,7 +1468,6 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s
1428 conn->sec_level = conn->pending_sec_level; 1468 conn->sec_level = conn->pending_sec_level;
1429 } else { 1469 } else {
1430 mgmt_auth_failed(hdev->id, &conn->dst, ev->status); 1470 mgmt_auth_failed(hdev->id, &conn->dst, ev->status);
1431 conn->sec_level = BT_SECURITY_LOW;
1432 } 1471 }
1433 1472
1434 clear_bit(HCI_CONN_AUTH_PEND, &conn->pend); 1473 clear_bit(HCI_CONN_AUTH_PEND, &conn->pend);
@@ -1482,13 +1521,23 @@ static inline void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb
1482 1521
1483 hci_dev_lock(hdev); 1522 hci_dev_lock(hdev);
1484 1523
1524 if (ev->status == 0 && test_bit(HCI_MGMT, &hdev->flags))
1525 mgmt_remote_name(hdev->id, &ev->bdaddr, ev->name);
1526
1485 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 1527 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
1486 if (conn && hci_outgoing_auth_needed(hdev, conn)) { 1528 if (!conn)
1529 goto unlock;
1530
1531 if (!hci_outgoing_auth_needed(hdev, conn))
1532 goto unlock;
1533
1534 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
1487 struct hci_cp_auth_requested cp; 1535 struct hci_cp_auth_requested cp;
1488 cp.handle = __cpu_to_le16(conn->handle); 1536 cp.handle = __cpu_to_le16(conn->handle);
1489 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp); 1537 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1490 } 1538 }
1491 1539
1540unlock:
1492 hci_dev_unlock(hdev); 1541 hci_dev_unlock(hdev);
1493} 1542}
1494 1543
@@ -1751,6 +1800,10 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk
1751 hci_cc_pin_code_neg_reply(hdev, skb); 1800 hci_cc_pin_code_neg_reply(hdev, skb);
1752 break; 1801 break;
1753 1802
1803 case HCI_OP_READ_LOCAL_OOB_DATA:
1804 hci_cc_read_local_oob_data_reply(hdev, skb);
1805 break;
1806
1754 case HCI_OP_LE_READ_BUFFER_SIZE: 1807 case HCI_OP_LE_READ_BUFFER_SIZE:
1755 hci_cc_le_read_buffer_size(hdev, skb); 1808 hci_cc_le_read_buffer_size(hdev, skb);
1756 break; 1809 break;
@@ -1984,9 +2037,16 @@ static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff
1984 if (!test_bit(HCI_PAIRABLE, &hdev->flags)) 2037 if (!test_bit(HCI_PAIRABLE, &hdev->flags))
1985 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY, 2038 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
1986 sizeof(ev->bdaddr), &ev->bdaddr); 2039 sizeof(ev->bdaddr), &ev->bdaddr);
2040 else if (test_bit(HCI_MGMT, &hdev->flags)) {
2041 u8 secure;
1987 2042
1988 if (test_bit(HCI_MGMT, &hdev->flags)) 2043 if (conn->pending_sec_level == BT_SECURITY_HIGH)
1989 mgmt_pin_code_request(hdev->id, &ev->bdaddr); 2044 secure = 1;
2045 else
2046 secure = 0;
2047
2048 mgmt_pin_code_request(hdev->id, &ev->bdaddr, secure);
2049 }
1990 2050
1991 hci_dev_unlock(hdev); 2051 hci_dev_unlock(hdev);
1992} 2052}
@@ -2015,17 +2075,30 @@ static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff
2015 BT_DBG("%s found key type %u for %s", hdev->name, key->type, 2075 BT_DBG("%s found key type %u for %s", hdev->name, key->type,
2016 batostr(&ev->bdaddr)); 2076 batostr(&ev->bdaddr));
2017 2077
2018 if (!test_bit(HCI_DEBUG_KEYS, &hdev->flags) && key->type == 0x03) { 2078 if (!test_bit(HCI_DEBUG_KEYS, &hdev->flags) &&
2079 key->type == HCI_LK_DEBUG_COMBINATION) {
2019 BT_DBG("%s ignoring debug key", hdev->name); 2080 BT_DBG("%s ignoring debug key", hdev->name);
2020 goto not_found; 2081 goto not_found;
2021 } 2082 }
2022 2083
2023 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 2084 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2085 if (conn) {
2086 if (key->type == HCI_LK_UNAUTH_COMBINATION &&
2087 conn->auth_type != 0xff &&
2088 (conn->auth_type & 0x01)) {
2089 BT_DBG("%s ignoring unauthenticated key", hdev->name);
2090 goto not_found;
2091 }
2024 2092
2025 if (key->type == 0x04 && conn && conn->auth_type != 0xff && 2093 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
2026 (conn->auth_type & 0x01)) { 2094 conn->pending_sec_level == BT_SECURITY_HIGH) {
2027 BT_DBG("%s ignoring unauthenticated key", hdev->name); 2095 BT_DBG("%s ignoring key unauthenticated for high \
2028 goto not_found; 2096 security", hdev->name);
2097 goto not_found;
2098 }
2099
2100 conn->key_type = key->type;
2101 conn->pin_length = key->pin_len;
2029 } 2102 }
2030 2103
2031 bacpy(&cp.bdaddr, &ev->bdaddr); 2104 bacpy(&cp.bdaddr, &ev->bdaddr);
@@ -2057,11 +2130,15 @@ static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff
2057 hci_conn_hold(conn); 2130 hci_conn_hold(conn);
2058 conn->disc_timeout = HCI_DISCONN_TIMEOUT; 2131 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2059 pin_len = conn->pin_length; 2132 pin_len = conn->pin_length;
2133
2134 if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
2135 conn->key_type = ev->key_type;
2136
2060 hci_conn_put(conn); 2137 hci_conn_put(conn);
2061 } 2138 }
2062 2139
2063 if (test_bit(HCI_LINK_KEYS, &hdev->flags)) 2140 if (test_bit(HCI_LINK_KEYS, &hdev->flags))
2064 hci_add_link_key(hdev, 1, &ev->bdaddr, ev->link_key, 2141 hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
2065 ev->key_type, pin_len); 2142 ev->key_type, pin_len);
2066 2143
2067 hci_dev_unlock(hdev); 2144 hci_dev_unlock(hdev);
@@ -2136,11 +2213,17 @@ static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct
2136 2213
2137 hci_dev_lock(hdev); 2214 hci_dev_lock(hdev);
2138 2215
2216 if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags)) {
2217
2218 if (test_bit(HCI_MGMT, &hdev->flags))
2219 mgmt_discovering(hdev->id, 1);
2220 }
2221
2139 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) { 2222 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
2140 struct inquiry_info_with_rssi_and_pscan_mode *info; 2223 struct inquiry_info_with_rssi_and_pscan_mode *info;
2141 info = (void *) (skb->data + 1); 2224 info = (void *) (skb->data + 1);
2142 2225
2143 for (; num_rsp; num_rsp--) { 2226 for (; num_rsp; num_rsp--, info++) {
2144 bacpy(&data.bdaddr, &info->bdaddr); 2227 bacpy(&data.bdaddr, &info->bdaddr);
2145 data.pscan_rep_mode = info->pscan_rep_mode; 2228 data.pscan_rep_mode = info->pscan_rep_mode;
2146 data.pscan_period_mode = info->pscan_period_mode; 2229 data.pscan_period_mode = info->pscan_period_mode;
@@ -2149,13 +2232,15 @@ static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct
2149 data.clock_offset = info->clock_offset; 2232 data.clock_offset = info->clock_offset;
2150 data.rssi = info->rssi; 2233 data.rssi = info->rssi;
2151 data.ssp_mode = 0x00; 2234 data.ssp_mode = 0x00;
2152 info++;
2153 hci_inquiry_cache_update(hdev, &data); 2235 hci_inquiry_cache_update(hdev, &data);
2236 mgmt_device_found(hdev->id, &info->bdaddr,
2237 info->dev_class, info->rssi,
2238 NULL);
2154 } 2239 }
2155 } else { 2240 } else {
2156 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1); 2241 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
2157 2242
2158 for (; num_rsp; num_rsp--) { 2243 for (; num_rsp; num_rsp--, info++) {
2159 bacpy(&data.bdaddr, &info->bdaddr); 2244 bacpy(&data.bdaddr, &info->bdaddr);
2160 data.pscan_rep_mode = info->pscan_rep_mode; 2245 data.pscan_rep_mode = info->pscan_rep_mode;
2161 data.pscan_period_mode = info->pscan_period_mode; 2246 data.pscan_period_mode = info->pscan_period_mode;
@@ -2164,8 +2249,10 @@ static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct
2164 data.clock_offset = info->clock_offset; 2249 data.clock_offset = info->clock_offset;
2165 data.rssi = info->rssi; 2250 data.rssi = info->rssi;
2166 data.ssp_mode = 0x00; 2251 data.ssp_mode = 0x00;
2167 info++;
2168 hci_inquiry_cache_update(hdev, &data); 2252 hci_inquiry_cache_update(hdev, &data);
2253 mgmt_device_found(hdev->id, &info->bdaddr,
2254 info->dev_class, info->rssi,
2255 NULL);
2169 } 2256 }
2170 } 2257 }
2171 2258
@@ -2294,9 +2381,15 @@ static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct
2294 if (!num_rsp) 2381 if (!num_rsp)
2295 return; 2382 return;
2296 2383
2384 if (!test_and_set_bit(HCI_INQUIRY, &hdev->flags)) {
2385
2386 if (test_bit(HCI_MGMT, &hdev->flags))
2387 mgmt_discovering(hdev->id, 1);
2388 }
2389
2297 hci_dev_lock(hdev); 2390 hci_dev_lock(hdev);
2298 2391
2299 for (; num_rsp; num_rsp--) { 2392 for (; num_rsp; num_rsp--, info++) {
2300 bacpy(&data.bdaddr, &info->bdaddr); 2393 bacpy(&data.bdaddr, &info->bdaddr);
2301 data.pscan_rep_mode = info->pscan_rep_mode; 2394 data.pscan_rep_mode = info->pscan_rep_mode;
2302 data.pscan_period_mode = info->pscan_period_mode; 2395 data.pscan_period_mode = info->pscan_period_mode;
@@ -2305,8 +2398,9 @@ static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct
2305 data.clock_offset = info->clock_offset; 2398 data.clock_offset = info->clock_offset;
2306 data.rssi = info->rssi; 2399 data.rssi = info->rssi;
2307 data.ssp_mode = 0x01; 2400 data.ssp_mode = 0x01;
2308 info++;
2309 hci_inquiry_cache_update(hdev, &data); 2401 hci_inquiry_cache_update(hdev, &data);
2402 mgmt_device_found(hdev->id, &info->bdaddr, info->dev_class,
2403 info->rssi, info->data);
2310 } 2404 }
2311 2405
2312 hci_dev_unlock(hdev); 2406 hci_dev_unlock(hdev);
@@ -2326,7 +2420,7 @@ static inline u8 hci_get_auth_req(struct hci_conn *conn)
2326 2420
2327 /* If remote requests no-bonding follow that lead */ 2421 /* If remote requests no-bonding follow that lead */
2328 if (conn->remote_auth == 0x00 || conn->remote_auth == 0x01) 2422 if (conn->remote_auth == 0x00 || conn->remote_auth == 0x01)
2329 return 0x00; 2423 return conn->remote_auth | (conn->auth_type & 0x01);
2330 2424
2331 return conn->auth_type; 2425 return conn->auth_type;
2332} 2426}
@@ -2355,8 +2449,14 @@ static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff
2355 2449
2356 bacpy(&cp.bdaddr, &ev->bdaddr); 2450 bacpy(&cp.bdaddr, &ev->bdaddr);
2357 cp.capability = conn->io_capability; 2451 cp.capability = conn->io_capability;
2358 cp.oob_data = 0; 2452 conn->auth_type = hci_get_auth_req(conn);
2359 cp.authentication = hci_get_auth_req(conn); 2453 cp.authentication = conn->auth_type;
2454
2455 if ((conn->out == 0x01 || conn->remote_oob == 0x01) &&
2456 hci_find_remote_oob_data(hdev, &conn->dst))
2457 cp.oob_data = 0x01;
2458 else
2459 cp.oob_data = 0x00;
2360 2460
2361 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY, 2461 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
2362 sizeof(cp), &cp); 2462 sizeof(cp), &cp);
@@ -2364,7 +2464,7 @@ static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff
2364 struct hci_cp_io_capability_neg_reply cp; 2464 struct hci_cp_io_capability_neg_reply cp;
2365 2465
2366 bacpy(&cp.bdaddr, &ev->bdaddr); 2466 bacpy(&cp.bdaddr, &ev->bdaddr);
2367 cp.reason = 0x16; /* Pairing not allowed */ 2467 cp.reason = 0x18; /* Pairing not allowed */
2368 2468
2369 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY, 2469 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
2370 sizeof(cp), &cp); 2470 sizeof(cp), &cp);
@@ -2399,14 +2499,67 @@ static inline void hci_user_confirm_request_evt(struct hci_dev *hdev,
2399 struct sk_buff *skb) 2499 struct sk_buff *skb)
2400{ 2500{
2401 struct hci_ev_user_confirm_req *ev = (void *) skb->data; 2501 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
2502 int loc_mitm, rem_mitm, confirm_hint = 0;
2503 struct hci_conn *conn;
2402 2504
2403 BT_DBG("%s", hdev->name); 2505 BT_DBG("%s", hdev->name);
2404 2506
2405 hci_dev_lock(hdev); 2507 hci_dev_lock(hdev);
2406 2508
2407 if (test_bit(HCI_MGMT, &hdev->flags)) 2509 if (!test_bit(HCI_MGMT, &hdev->flags))
2408 mgmt_user_confirm_request(hdev->id, &ev->bdaddr, ev->passkey); 2510 goto unlock;
2511
2512 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2513 if (!conn)
2514 goto unlock;
2515
2516 loc_mitm = (conn->auth_type & 0x01);
2517 rem_mitm = (conn->remote_auth & 0x01);
2518
2519 /* If we require MITM but the remote device can't provide that
2520 * (it has NoInputNoOutput) then reject the confirmation
2521 * request. The only exception is when we're dedicated bonding
2522 * initiators (connect_cfm_cb set) since then we always have the MITM
2523 * bit set. */
2524 if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) {
2525 BT_DBG("Rejecting request: remote device can't provide MITM");
2526 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
2527 sizeof(ev->bdaddr), &ev->bdaddr);
2528 goto unlock;
2529 }
2530
2531 /* If no side requires MITM protection; auto-accept */
2532 if ((!loc_mitm || conn->remote_cap == 0x03) &&
2533 (!rem_mitm || conn->io_capability == 0x03)) {
2534
2535 /* If we're not the initiators request authorization to
2536 * proceed from user space (mgmt_user_confirm with
2537 * confirm_hint set to 1). */
2538 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
2539 BT_DBG("Confirming auto-accept as acceptor");
2540 confirm_hint = 1;
2541 goto confirm;
2542 }
2543
2544 BT_DBG("Auto-accept of user confirmation with %ums delay",
2545 hdev->auto_accept_delay);
2546
2547 if (hdev->auto_accept_delay > 0) {
2548 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
2549 mod_timer(&conn->auto_accept_timer, jiffies + delay);
2550 goto unlock;
2551 }
2552
2553 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
2554 sizeof(ev->bdaddr), &ev->bdaddr);
2555 goto unlock;
2556 }
2409 2557
2558confirm:
2559 mgmt_user_confirm_request(hdev->id, &ev->bdaddr, ev->passkey,
2560 confirm_hint);
2561
2562unlock:
2410 hci_dev_unlock(hdev); 2563 hci_dev_unlock(hdev);
2411} 2564}
2412 2565
@@ -2453,6 +2606,41 @@ static inline void hci_remote_host_features_evt(struct hci_dev *hdev, struct sk_
2453 hci_dev_unlock(hdev); 2606 hci_dev_unlock(hdev);
2454} 2607}
2455 2608
2609static inline void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
2610 struct sk_buff *skb)
2611{
2612 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
2613 struct oob_data *data;
2614
2615 BT_DBG("%s", hdev->name);
2616
2617 hci_dev_lock(hdev);
2618
2619 if (!test_bit(HCI_MGMT, &hdev->flags))
2620 goto unlock;
2621
2622 data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
2623 if (data) {
2624 struct hci_cp_remote_oob_data_reply cp;
2625
2626 bacpy(&cp.bdaddr, &ev->bdaddr);
2627 memcpy(cp.hash, data->hash, sizeof(cp.hash));
2628 memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer));
2629
2630 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp),
2631 &cp);
2632 } else {
2633 struct hci_cp_remote_oob_data_neg_reply cp;
2634
2635 bacpy(&cp.bdaddr, &ev->bdaddr);
2636 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp),
2637 &cp);
2638 }
2639
2640unlock:
2641 hci_dev_unlock(hdev);
2642}
2643
2456static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 2644static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2457{ 2645{
2458 struct hci_ev_le_conn_complete *ev = (void *) skb->data; 2646 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
@@ -2473,12 +2661,15 @@ static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff
2473 } 2661 }
2474 2662
2475 if (ev->status) { 2663 if (ev->status) {
2664 mgmt_connect_failed(hdev->id, &ev->bdaddr, ev->status);
2476 hci_proto_connect_cfm(conn, ev->status); 2665 hci_proto_connect_cfm(conn, ev->status);
2477 conn->state = BT_CLOSED; 2666 conn->state = BT_CLOSED;
2478 hci_conn_del(conn); 2667 hci_conn_del(conn);
2479 goto unlock; 2668 goto unlock;
2480 } 2669 }
2481 2670
2671 mgmt_connected(hdev->id, &ev->bdaddr);
2672
2482 conn->handle = __le16_to_cpu(ev->handle); 2673 conn->handle = __le16_to_cpu(ev->handle);
2483 conn->state = BT_CONNECTED; 2674 conn->state = BT_CONNECTED;
2484 2675
@@ -2655,6 +2846,10 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
2655 hci_le_meta_evt(hdev, skb); 2846 hci_le_meta_evt(hdev, skb);
2656 break; 2847 break;
2657 2848
2849 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
2850 hci_remote_oob_data_request_evt(hdev, skb);
2851 break;
2852
2658 default: 2853 default:
2659 BT_DBG("%s event 0x%x", hdev->name, event); 2854 BT_DBG("%s event 0x%x", hdev->name, event);
2660 break; 2855 break;
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index 3c838a65a75a..a6c3aa8be1f7 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -216,13 +216,13 @@ static ssize_t show_type(struct device *dev, struct device_attribute *attr, char
216static ssize_t show_name(struct device *dev, struct device_attribute *attr, char *buf) 216static ssize_t show_name(struct device *dev, struct device_attribute *attr, char *buf)
217{ 217{
218 struct hci_dev *hdev = dev_get_drvdata(dev); 218 struct hci_dev *hdev = dev_get_drvdata(dev);
219 char name[249]; 219 char name[HCI_MAX_NAME_LENGTH + 1];
220 int i; 220 int i;
221 221
222 for (i = 0; i < 248; i++) 222 for (i = 0; i < HCI_MAX_NAME_LENGTH; i++)
223 name[i] = hdev->dev_name[i]; 223 name[i] = hdev->dev_name[i];
224 224
225 name[248] = '\0'; 225 name[HCI_MAX_NAME_LENGTH] = '\0';
226 return sprintf(buf, "%s\n", name); 226 return sprintf(buf, "%s\n", name);
227} 227}
228 228
@@ -277,10 +277,12 @@ static ssize_t show_idle_timeout(struct device *dev, struct device_attribute *at
277static ssize_t store_idle_timeout(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 277static ssize_t store_idle_timeout(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
278{ 278{
279 struct hci_dev *hdev = dev_get_drvdata(dev); 279 struct hci_dev *hdev = dev_get_drvdata(dev);
280 unsigned long val; 280 unsigned int val;
281 int rv;
281 282
282 if (strict_strtoul(buf, 0, &val) < 0) 283 rv = kstrtouint(buf, 0, &val);
283 return -EINVAL; 284 if (rv < 0)
285 return rv;
284 286
285 if (val != 0 && (val < 500 || val > 3600000)) 287 if (val != 0 && (val < 500 || val > 3600000))
286 return -EINVAL; 288 return -EINVAL;
@@ -299,15 +301,14 @@ static ssize_t show_sniff_max_interval(struct device *dev, struct device_attribu
299static ssize_t store_sniff_max_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 301static ssize_t store_sniff_max_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
300{ 302{
301 struct hci_dev *hdev = dev_get_drvdata(dev); 303 struct hci_dev *hdev = dev_get_drvdata(dev);
302 unsigned long val; 304 u16 val;
303 305 int rv;
304 if (strict_strtoul(buf, 0, &val) < 0)
305 return -EINVAL;
306 306
307 if (val < 0x0002 || val > 0xFFFE || val % 2) 307 rv = kstrtou16(buf, 0, &val);
308 return -EINVAL; 308 if (rv < 0)
309 return rv;
309 310
310 if (val < hdev->sniff_min_interval) 311 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
311 return -EINVAL; 312 return -EINVAL;
312 313
313 hdev->sniff_max_interval = val; 314 hdev->sniff_max_interval = val;
@@ -324,15 +325,14 @@ static ssize_t show_sniff_min_interval(struct device *dev, struct device_attribu
324static ssize_t store_sniff_min_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) 325static ssize_t store_sniff_min_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
325{ 326{
326 struct hci_dev *hdev = dev_get_drvdata(dev); 327 struct hci_dev *hdev = dev_get_drvdata(dev);
327 unsigned long val; 328 u16 val;
329 int rv;
328 330
329 if (strict_strtoul(buf, 0, &val) < 0) 331 rv = kstrtou16(buf, 0, &val);
330 return -EINVAL; 332 if (rv < 0)
331 333 return rv;
332 if (val < 0x0002 || val > 0xFFFE || val % 2)
333 return -EINVAL;
334 334
335 if (val > hdev->sniff_max_interval) 335 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
336 return -EINVAL; 336 return -EINVAL;
337 337
338 hdev->sniff_min_interval = val; 338 hdev->sniff_min_interval = val;
@@ -511,6 +511,35 @@ static const struct file_operations uuids_fops = {
511 .release = single_release, 511 .release = single_release,
512}; 512};
513 513
514static int auto_accept_delay_set(void *data, u64 val)
515{
516 struct hci_dev *hdev = data;
517
518 hci_dev_lock_bh(hdev);
519
520 hdev->auto_accept_delay = val;
521
522 hci_dev_unlock_bh(hdev);
523
524 return 0;
525}
526
527static int auto_accept_delay_get(void *data, u64 *val)
528{
529 struct hci_dev *hdev = data;
530
531 hci_dev_lock_bh(hdev);
532
533 *val = hdev->auto_accept_delay;
534
535 hci_dev_unlock_bh(hdev);
536
537 return 0;
538}
539
540DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
541 auto_accept_delay_set, "%llu\n");
542
514int hci_register_sysfs(struct hci_dev *hdev) 543int hci_register_sysfs(struct hci_dev *hdev)
515{ 544{
516 struct device *dev = &hdev->dev; 545 struct device *dev = &hdev->dev;
@@ -545,6 +574,8 @@ int hci_register_sysfs(struct hci_dev *hdev)
545 574
546 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops); 575 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
547 576
577 debugfs_create_file("auto_accept_delay", 0444, hdev->debugfs, hdev,
578 &auto_accept_delay_fops);
548 return 0; 579 return 0;
549} 580}
550 581
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 5ec12971af6b..43b4c2deb7cc 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -37,6 +37,7 @@
37#include <linux/init.h> 37#include <linux/init.h>
38#include <linux/wait.h> 38#include <linux/wait.h>
39#include <linux/mutex.h> 39#include <linux/mutex.h>
40#include <linux/kthread.h>
40#include <net/sock.h> 41#include <net/sock.h>
41 42
42#include <linux/input.h> 43#include <linux/input.h>
@@ -55,22 +56,24 @@ static DECLARE_RWSEM(hidp_session_sem);
55static LIST_HEAD(hidp_session_list); 56static LIST_HEAD(hidp_session_list);
56 57
57static unsigned char hidp_keycode[256] = { 58static unsigned char hidp_keycode[256] = {
58 0, 0, 0, 0, 30, 48, 46, 32, 18, 33, 34, 35, 23, 36, 37, 38, 59 0, 0, 0, 0, 30, 48, 46, 32, 18, 33, 34, 35, 23, 36,
59 50, 49, 24, 25, 16, 19, 31, 20, 22, 47, 17, 45, 21, 44, 2, 3, 60 37, 38, 50, 49, 24, 25, 16, 19, 31, 20, 22, 47, 17, 45,
60 4, 5, 6, 7, 8, 9, 10, 11, 28, 1, 14, 15, 57, 12, 13, 26, 61 21, 44, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 28, 1,
61 27, 43, 43, 39, 40, 41, 51, 52, 53, 58, 59, 60, 61, 62, 63, 64, 62 14, 15, 57, 12, 13, 26, 27, 43, 43, 39, 40, 41, 51, 52,
62 65, 66, 67, 68, 87, 88, 99, 70,119,110,102,104,111,107,109,106, 63 53, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 87, 88,
63 105,108,103, 69, 98, 55, 74, 78, 96, 79, 80, 81, 75, 76, 77, 71, 64 99, 70, 119, 110, 102, 104, 111, 107, 109, 106, 105, 108, 103, 69,
64 72, 73, 82, 83, 86,127,116,117,183,184,185,186,187,188,189,190, 65 98, 55, 74, 78, 96, 79, 80, 81, 75, 76, 77, 71, 72, 73,
65 191,192,193,194,134,138,130,132,128,129,131,137,133,135,136,113, 66 82, 83, 86, 127, 116, 117, 183, 184, 185, 186, 187, 188, 189, 190,
66 115,114, 0, 0, 0,121, 0, 89, 93,124, 92, 94, 95, 0, 0, 0, 67 191, 192, 193, 194, 134, 138, 130, 132, 128, 129, 131, 137, 133, 135,
67 122,123, 90, 91, 85, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 68 136, 113, 115, 114, 0, 0, 0, 121, 0, 89, 93, 124, 92, 94,
68 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 69 95, 0, 0, 0, 122, 123, 90, 91, 85, 0, 0, 0, 0, 0,
69 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 70 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
70 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 71 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
71 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 72 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
72 29, 42, 56,125, 97, 54,100,126,164,166,165,163,161,115,114,113, 73 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
73 150,158,159,128,136,177,178,176,142,152,173,140 74 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
75 29, 42, 56, 125, 97, 54, 100, 126, 164, 166, 165, 163, 161, 115,
76 114, 113, 150, 158, 159, 128, 136, 177, 178, 176, 142, 152, 173, 140
74}; 77};
75 78
76static unsigned char hidp_mkeyspat[] = { 0x01, 0x01, 0x01, 0x01, 0x01, 0x01 }; 79static unsigned char hidp_mkeyspat[] = { 0x01, 0x01, 0x01, 0x01, 0x01, 0x01 };
@@ -462,7 +465,7 @@ static void hidp_idle_timeout(unsigned long arg)
462 struct hidp_session *session = (struct hidp_session *) arg; 465 struct hidp_session *session = (struct hidp_session *) arg;
463 466
464 atomic_inc(&session->terminate); 467 atomic_inc(&session->terminate);
465 hidp_schedule(session); 468 wake_up_process(session->task);
466} 469}
467 470
468static void hidp_set_timer(struct hidp_session *session) 471static void hidp_set_timer(struct hidp_session *session)
@@ -533,9 +536,8 @@ static void hidp_process_hid_control(struct hidp_session *session,
533 skb_queue_purge(&session->ctrl_transmit); 536 skb_queue_purge(&session->ctrl_transmit);
534 skb_queue_purge(&session->intr_transmit); 537 skb_queue_purge(&session->intr_transmit);
535 538
536 /* Kill session thread */
537 atomic_inc(&session->terminate); 539 atomic_inc(&session->terminate);
538 hidp_schedule(session); 540 wake_up_process(current);
539 } 541 }
540} 542}
541 543
@@ -694,22 +696,10 @@ static int hidp_session(void *arg)
694 struct sock *ctrl_sk = session->ctrl_sock->sk; 696 struct sock *ctrl_sk = session->ctrl_sock->sk;
695 struct sock *intr_sk = session->intr_sock->sk; 697 struct sock *intr_sk = session->intr_sock->sk;
696 struct sk_buff *skb; 698 struct sk_buff *skb;
697 int vendor = 0x0000, product = 0x0000;
698 wait_queue_t ctrl_wait, intr_wait; 699 wait_queue_t ctrl_wait, intr_wait;
699 700
700 BT_DBG("session %p", session); 701 BT_DBG("session %p", session);
701 702
702 if (session->input) {
703 vendor = session->input->id.vendor;
704 product = session->input->id.product;
705 }
706
707 if (session->hid) {
708 vendor = session->hid->vendor;
709 product = session->hid->product;
710 }
711
712 daemonize("khidpd_%04x%04x", vendor, product);
713 set_user_nice(current, -15); 703 set_user_nice(current, -15);
714 704
715 init_waitqueue_entry(&ctrl_wait, current); 705 init_waitqueue_entry(&ctrl_wait, current);
@@ -718,10 +708,10 @@ static int hidp_session(void *arg)
718 add_wait_queue(sk_sleep(intr_sk), &intr_wait); 708 add_wait_queue(sk_sleep(intr_sk), &intr_wait);
719 session->waiting_for_startup = 0; 709 session->waiting_for_startup = 0;
720 wake_up_interruptible(&session->startup_queue); 710 wake_up_interruptible(&session->startup_queue);
711 set_current_state(TASK_INTERRUPTIBLE);
721 while (!atomic_read(&session->terminate)) { 712 while (!atomic_read(&session->terminate)) {
722 set_current_state(TASK_INTERRUPTIBLE); 713 if (ctrl_sk->sk_state != BT_CONNECTED ||
723 714 intr_sk->sk_state != BT_CONNECTED)
724 if (ctrl_sk->sk_state != BT_CONNECTED || intr_sk->sk_state != BT_CONNECTED)
725 break; 715 break;
726 716
727 while ((skb = skb_dequeue(&ctrl_sk->sk_receive_queue))) { 717 while ((skb = skb_dequeue(&ctrl_sk->sk_receive_queue))) {
@@ -737,6 +727,7 @@ static int hidp_session(void *arg)
737 hidp_process_transmit(session); 727 hidp_process_transmit(session);
738 728
739 schedule(); 729 schedule();
730 set_current_state(TASK_INTERRUPTIBLE);
740 } 731 }
741 set_current_state(TASK_RUNNING); 732 set_current_state(TASK_RUNNING);
742 remove_wait_queue(sk_sleep(intr_sk), &intr_wait); 733 remove_wait_queue(sk_sleep(intr_sk), &intr_wait);
@@ -965,6 +956,7 @@ fault:
965int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock, struct socket *intr_sock) 956int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock, struct socket *intr_sock)
966{ 957{
967 struct hidp_session *session, *s; 958 struct hidp_session *session, *s;
959 int vendor, product;
968 int err; 960 int err;
969 961
970 BT_DBG(""); 962 BT_DBG("");
@@ -989,8 +981,10 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock,
989 981
990 bacpy(&session->bdaddr, &bt_sk(ctrl_sock->sk)->dst); 982 bacpy(&session->bdaddr, &bt_sk(ctrl_sock->sk)->dst);
991 983
992 session->ctrl_mtu = min_t(uint, l2cap_pi(ctrl_sock->sk)->omtu, l2cap_pi(ctrl_sock->sk)->imtu); 984 session->ctrl_mtu = min_t(uint, l2cap_pi(ctrl_sock->sk)->chan->omtu,
993 session->intr_mtu = min_t(uint, l2cap_pi(intr_sock->sk)->omtu, l2cap_pi(intr_sock->sk)->imtu); 985 l2cap_pi(ctrl_sock->sk)->chan->imtu);
986 session->intr_mtu = min_t(uint, l2cap_pi(intr_sock->sk)->chan->omtu,
987 l2cap_pi(intr_sock->sk)->chan->imtu);
994 988
995 BT_DBG("ctrl mtu %d intr mtu %d", session->ctrl_mtu, session->intr_mtu); 989 BT_DBG("ctrl mtu %d intr mtu %d", session->ctrl_mtu, session->intr_mtu);
996 990
@@ -1026,9 +1020,24 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock,
1026 1020
1027 hidp_set_timer(session); 1021 hidp_set_timer(session);
1028 1022
1029 err = kernel_thread(hidp_session, session, CLONE_KERNEL); 1023 if (session->hid) {
1030 if (err < 0) 1024 vendor = session->hid->vendor;
1025 product = session->hid->product;
1026 } else if (session->input) {
1027 vendor = session->input->id.vendor;
1028 product = session->input->id.product;
1029 } else {
1030 vendor = 0x0000;
1031 product = 0x0000;
1032 }
1033
1034 session->task = kthread_run(hidp_session, session, "khidpd_%04x%04x",
1035 vendor, product);
1036 if (IS_ERR(session->task)) {
1037 err = PTR_ERR(session->task);
1031 goto unlink; 1038 goto unlink;
1039 }
1040
1032 while (session->waiting_for_startup) { 1041 while (session->waiting_for_startup) {
1033 wait_event_interruptible(session->startup_queue, 1042 wait_event_interruptible(session->startup_queue,
1034 !session->waiting_for_startup); 1043 !session->waiting_for_startup);
@@ -1054,7 +1063,7 @@ err_add_device:
1054 hid_destroy_device(session->hid); 1063 hid_destroy_device(session->hid);
1055 session->hid = NULL; 1064 session->hid = NULL;
1056 atomic_inc(&session->terminate); 1065 atomic_inc(&session->terminate);
1057 hidp_schedule(session); 1066 wake_up_process(session->task);
1058 1067
1059unlink: 1068unlink:
1060 hidp_del_timer(session); 1069 hidp_del_timer(session);
@@ -1105,13 +1114,8 @@ int hidp_del_connection(struct hidp_conndel_req *req)
1105 skb_queue_purge(&session->ctrl_transmit); 1114 skb_queue_purge(&session->ctrl_transmit);
1106 skb_queue_purge(&session->intr_transmit); 1115 skb_queue_purge(&session->intr_transmit);
1107 1116
1108 /* Wakeup user-space polling for socket errors */
1109 session->intr_sock->sk->sk_err = EUNATCH;
1110 session->ctrl_sock->sk->sk_err = EUNATCH;
1111
1112 /* Kill session thread */
1113 atomic_inc(&session->terminate); 1117 atomic_inc(&session->terminate);
1114 hidp_schedule(session); 1118 wake_up_process(session->task);
1115 } 1119 }
1116 } else 1120 } else
1117 err = -ENOENT; 1121 err = -ENOENT;
diff --git a/net/bluetooth/hidp/hidp.h b/net/bluetooth/hidp/hidp.h
index 13de5fa03480..af1bcc823f26 100644
--- a/net/bluetooth/hidp/hidp.h
+++ b/net/bluetooth/hidp/hidp.h
@@ -80,12 +80,12 @@
80#define HIDP_VIRTUAL_CABLE_UNPLUG 0 80#define HIDP_VIRTUAL_CABLE_UNPLUG 0
81#define HIDP_BOOT_PROTOCOL_MODE 1 81#define HIDP_BOOT_PROTOCOL_MODE 1
82#define HIDP_BLUETOOTH_VENDOR_ID 9 82#define HIDP_BLUETOOTH_VENDOR_ID 9
83#define HIDP_WAITING_FOR_RETURN 10 83#define HIDP_WAITING_FOR_RETURN 10
84#define HIDP_WAITING_FOR_SEND_ACK 11 84#define HIDP_WAITING_FOR_SEND_ACK 11
85 85
86struct hidp_connadd_req { 86struct hidp_connadd_req {
87 int ctrl_sock; // Connected control socket 87 int ctrl_sock; /* Connected control socket */
88 int intr_sock; // Connteted interrupt socket 88 int intr_sock; /* Connected interrupt socket */
89 __u16 parser; 89 __u16 parser;
90 __u16 rd_size; 90 __u16 rd_size;
91 __u8 __user *rd_data; 91 __u8 __user *rd_data;
@@ -143,6 +143,7 @@ struct hidp_session {
143 uint intr_mtu; 143 uint intr_mtu;
144 144
145 atomic_t terminate; 145 atomic_t terminate;
146 struct task_struct *task;
146 147
147 unsigned char keys[8]; 148 unsigned char keys[8];
148 unsigned char leds; 149 unsigned char leds;
diff --git a/net/bluetooth/hidp/sock.c b/net/bluetooth/hidp/sock.c
index 250dfd46237d..178ac7f127ad 100644
--- a/net/bluetooth/hidp/sock.c
+++ b/net/bluetooth/hidp/sock.c
@@ -85,7 +85,8 @@ static int hidp_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long
85 return err; 85 return err;
86 } 86 }
87 87
88 if (csock->sk->sk_state != BT_CONNECTED || isock->sk->sk_state != BT_CONNECTED) { 88 if (csock->sk->sk_state != BT_CONNECTED ||
89 isock->sk->sk_state != BT_CONNECTED) {
89 sockfd_put(csock); 90 sockfd_put(csock);
90 sockfd_put(isock); 91 sockfd_put(isock);
91 return -EBADFD; 92 return -EBADFD;
@@ -140,8 +141,8 @@ static int hidp_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long
140 141
141#ifdef CONFIG_COMPAT 142#ifdef CONFIG_COMPAT
142struct compat_hidp_connadd_req { 143struct compat_hidp_connadd_req {
143 int ctrl_sock; // Connected control socket 144 int ctrl_sock; /* Connected control socket */
144 int intr_sock; // Connteted interrupt socket 145 int intr_sock; /* Connected interrupt socket */
145 __u16 parser; 146 __u16 parser;
146 __u16 rd_size; 147 __u16 rd_size;
147 compat_uptr_t rd_data; 148 compat_uptr_t rd_data;
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 2c8dd4494c63..7705e26e699f 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -62,168 +62,233 @@ static u8 l2cap_fixed_chan[8] = { 0x02, };
62 62
63static struct workqueue_struct *_busy_wq; 63static struct workqueue_struct *_busy_wq;
64 64
65struct bt_sock_list l2cap_sk_list = { 65LIST_HEAD(chan_list);
66 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock) 66DEFINE_RWLOCK(chan_list_lock);
67};
68 67
69static void l2cap_busy_work(struct work_struct *work); 68static void l2cap_busy_work(struct work_struct *work);
70 69
71static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, 70static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
72 u8 code, u8 ident, u16 dlen, void *data); 71 u8 code, u8 ident, u16 dlen, void *data);
72static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
73 73
74static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb); 74static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
75 75
76/* ---- L2CAP channels ---- */ 76/* ---- L2CAP channels ---- */
77static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid) 77static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
78{ 78{
79 struct sock *s; 79 struct l2cap_chan *c;
80 for (s = l->head; s; s = l2cap_pi(s)->next_c) { 80
81 if (l2cap_pi(s)->dcid == cid) 81 list_for_each_entry(c, &conn->chan_l, list) {
82 break; 82 if (c->dcid == cid)
83 return c;
83 } 84 }
84 return s; 85 return NULL;
86
85} 87}
86 88
87static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid) 89static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
88{ 90{
89 struct sock *s; 91 struct l2cap_chan *c;
90 for (s = l->head; s; s = l2cap_pi(s)->next_c) { 92
91 if (l2cap_pi(s)->scid == cid) 93 list_for_each_entry(c, &conn->chan_l, list) {
92 break; 94 if (c->scid == cid)
95 return c;
93 } 96 }
94 return s; 97 return NULL;
95} 98}
96 99
97/* Find channel with given SCID. 100/* Find channel with given SCID.
98 * Returns locked socket */ 101 * Returns locked socket */
99static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid) 102static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
100{ 103{
101 struct sock *s; 104 struct l2cap_chan *c;
102 read_lock(&l->lock); 105
103 s = __l2cap_get_chan_by_scid(l, cid); 106 read_lock(&conn->chan_lock);
104 if (s) 107 c = __l2cap_get_chan_by_scid(conn, cid);
105 bh_lock_sock(s); 108 if (c)
106 read_unlock(&l->lock); 109 bh_lock_sock(c->sk);
107 return s; 110 read_unlock(&conn->chan_lock);
111 return c;
108} 112}
109 113
110static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident) 114static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
111{ 115{
112 struct sock *s; 116 struct l2cap_chan *c;
113 for (s = l->head; s; s = l2cap_pi(s)->next_c) { 117
114 if (l2cap_pi(s)->ident == ident) 118 list_for_each_entry(c, &conn->chan_l, list) {
115 break; 119 if (c->ident == ident)
120 return c;
121 }
122 return NULL;
123}
124
125static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
126{
127 struct l2cap_chan *c;
128
129 read_lock(&conn->chan_lock);
130 c = __l2cap_get_chan_by_ident(conn, ident);
131 if (c)
132 bh_lock_sock(c->sk);
133 read_unlock(&conn->chan_lock);
134 return c;
135}
136
137static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
138{
139 struct l2cap_chan *c;
140
141 list_for_each_entry(c, &chan_list, global_l) {
142 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
143 goto found;
144 }
145
146 c = NULL;
147found:
148 return c;
149}
150
151int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
152{
153 int err;
154
155 write_lock_bh(&chan_list_lock);
156
157 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
158 err = -EADDRINUSE;
159 goto done;
160 }
161
162 if (psm) {
163 chan->psm = psm;
164 chan->sport = psm;
165 err = 0;
166 } else {
167 u16 p;
168
169 err = -EINVAL;
170 for (p = 0x1001; p < 0x1100; p += 2)
171 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
172 chan->psm = cpu_to_le16(p);
173 chan->sport = cpu_to_le16(p);
174 err = 0;
175 break;
176 }
116 } 177 }
117 return s; 178
179done:
180 write_unlock_bh(&chan_list_lock);
181 return err;
118} 182}
119 183
120static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident) 184int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
121{ 185{
122 struct sock *s; 186 write_lock_bh(&chan_list_lock);
123 read_lock(&l->lock); 187
124 s = __l2cap_get_chan_by_ident(l, ident); 188 chan->scid = scid;
125 if (s) 189
126 bh_lock_sock(s); 190 write_unlock_bh(&chan_list_lock);
127 read_unlock(&l->lock); 191
128 return s; 192 return 0;
129} 193}
130 194
131static u16 l2cap_alloc_cid(struct l2cap_chan_list *l) 195static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
132{ 196{
133 u16 cid = L2CAP_CID_DYN_START; 197 u16 cid = L2CAP_CID_DYN_START;
134 198
135 for (; cid < L2CAP_CID_DYN_END; cid++) { 199 for (; cid < L2CAP_CID_DYN_END; cid++) {
136 if (!__l2cap_get_chan_by_scid(l, cid)) 200 if (!__l2cap_get_chan_by_scid(conn, cid))
137 return cid; 201 return cid;
138 } 202 }
139 203
140 return 0; 204 return 0;
141} 205}
142 206
143static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk) 207struct l2cap_chan *l2cap_chan_create(struct sock *sk)
144{ 208{
145 sock_hold(sk); 209 struct l2cap_chan *chan;
146 210
147 if (l->head) 211 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
148 l2cap_pi(l->head)->prev_c = sk; 212 if (!chan)
213 return NULL;
149 214
150 l2cap_pi(sk)->next_c = l->head; 215 chan->sk = sk;
151 l2cap_pi(sk)->prev_c = NULL;
152 l->head = sk;
153}
154 216
155static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk) 217 write_lock_bh(&chan_list_lock);
156{ 218 list_add(&chan->global_l, &chan_list);
157 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c; 219 write_unlock_bh(&chan_list_lock);
158 220
159 write_lock_bh(&l->lock); 221 return chan;
160 if (sk == l->head) 222}
161 l->head = next;
162 223
163 if (next) 224void l2cap_chan_destroy(struct l2cap_chan *chan)
164 l2cap_pi(next)->prev_c = prev; 225{
165 if (prev) 226 write_lock_bh(&chan_list_lock);
166 l2cap_pi(prev)->next_c = next; 227 list_del(&chan->global_l);
167 write_unlock_bh(&l->lock); 228 write_unlock_bh(&chan_list_lock);
168 229
169 __sock_put(sk); 230 kfree(chan);
170} 231}
171 232
172static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent) 233static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
173{ 234{
174 struct l2cap_chan_list *l = &conn->chan_list; 235 struct sock *sk = chan->sk;
175 236
176 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, 237 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
177 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid); 238 chan->psm, chan->dcid);
178 239
179 conn->disc_reason = 0x13; 240 conn->disc_reason = 0x13;
180 241
181 l2cap_pi(sk)->conn = conn; 242 chan->conn = conn;
182 243
183 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) { 244 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
184 if (conn->hcon->type == LE_LINK) { 245 if (conn->hcon->type == LE_LINK) {
185 /* LE connection */ 246 /* LE connection */
186 l2cap_pi(sk)->omtu = L2CAP_LE_DEFAULT_MTU; 247 chan->omtu = L2CAP_LE_DEFAULT_MTU;
187 l2cap_pi(sk)->scid = L2CAP_CID_LE_DATA; 248 chan->scid = L2CAP_CID_LE_DATA;
188 l2cap_pi(sk)->dcid = L2CAP_CID_LE_DATA; 249 chan->dcid = L2CAP_CID_LE_DATA;
189 } else { 250 } else {
190 /* Alloc CID for connection-oriented socket */ 251 /* Alloc CID for connection-oriented socket */
191 l2cap_pi(sk)->scid = l2cap_alloc_cid(l); 252 chan->scid = l2cap_alloc_cid(conn);
192 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU; 253 chan->omtu = L2CAP_DEFAULT_MTU;
193 } 254 }
194 } else if (sk->sk_type == SOCK_DGRAM) { 255 } else if (sk->sk_type == SOCK_DGRAM) {
195 /* Connectionless socket */ 256 /* Connectionless socket */
196 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS; 257 chan->scid = L2CAP_CID_CONN_LESS;
197 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS; 258 chan->dcid = L2CAP_CID_CONN_LESS;
198 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU; 259 chan->omtu = L2CAP_DEFAULT_MTU;
199 } else { 260 } else {
200 /* Raw socket can send/recv signalling messages only */ 261 /* Raw socket can send/recv signalling messages only */
201 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING; 262 chan->scid = L2CAP_CID_SIGNALING;
202 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING; 263 chan->dcid = L2CAP_CID_SIGNALING;
203 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU; 264 chan->omtu = L2CAP_DEFAULT_MTU;
204 } 265 }
205 266
206 __l2cap_chan_link(l, sk); 267 sock_hold(sk);
207 268
208 if (parent) 269 list_add(&chan->list, &conn->chan_l);
209 bt_accept_enqueue(parent, sk);
210} 270}
211 271
212/* Delete channel. 272/* Delete channel.
213 * Must be called on the locked socket. */ 273 * Must be called on the locked socket. */
214void l2cap_chan_del(struct sock *sk, int err) 274void l2cap_chan_del(struct l2cap_chan *chan, int err)
215{ 275{
216 struct l2cap_conn *conn = l2cap_pi(sk)->conn; 276 struct sock *sk = chan->sk;
277 struct l2cap_conn *conn = chan->conn;
217 struct sock *parent = bt_sk(sk)->parent; 278 struct sock *parent = bt_sk(sk)->parent;
218 279
219 l2cap_sock_clear_timer(sk); 280 l2cap_sock_clear_timer(sk);
220 281
221 BT_DBG("sk %p, conn %p, err %d", sk, conn, err); 282 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
222 283
223 if (conn) { 284 if (conn) {
224 /* Unlink from channel list */ 285 /* Delete from channel list */
225 l2cap_chan_unlink(&conn->chan_list, sk); 286 write_lock_bh(&conn->chan_lock);
226 l2cap_pi(sk)->conn = NULL; 287 list_del(&chan->list);
288 write_unlock_bh(&conn->chan_lock);
289 __sock_put(sk);
290
291 chan->conn = NULL;
227 hci_conn_put(conn->hcon); 292 hci_conn_put(conn->hcon);
228 } 293 }
229 294
@@ -239,29 +304,35 @@ void l2cap_chan_del(struct sock *sk, int err)
239 } else 304 } else
240 sk->sk_state_change(sk); 305 sk->sk_state_change(sk);
241 306
242 skb_queue_purge(TX_QUEUE(sk)); 307 if (!(chan->conf_state & L2CAP_CONF_OUTPUT_DONE &&
308 chan->conf_state & L2CAP_CONF_INPUT_DONE))
309 return;
310
311 skb_queue_purge(&chan->tx_q);
243 312
244 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) { 313 if (chan->mode == L2CAP_MODE_ERTM) {
245 struct srej_list *l, *tmp; 314 struct srej_list *l, *tmp;
246 315
247 del_timer(&l2cap_pi(sk)->retrans_timer); 316 del_timer(&chan->retrans_timer);
248 del_timer(&l2cap_pi(sk)->monitor_timer); 317 del_timer(&chan->monitor_timer);
249 del_timer(&l2cap_pi(sk)->ack_timer); 318 del_timer(&chan->ack_timer);
250 319
251 skb_queue_purge(SREJ_QUEUE(sk)); 320 skb_queue_purge(&chan->srej_q);
252 skb_queue_purge(BUSY_QUEUE(sk)); 321 skb_queue_purge(&chan->busy_q);
253 322
254 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) { 323 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
255 list_del(&l->list); 324 list_del(&l->list);
256 kfree(l); 325 kfree(l);
257 } 326 }
258 } 327 }
259} 328}
260 329
261static inline u8 l2cap_get_auth_type(struct sock *sk) 330static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
262{ 331{
332 struct sock *sk = chan->sk;
333
263 if (sk->sk_type == SOCK_RAW) { 334 if (sk->sk_type == SOCK_RAW) {
264 switch (l2cap_pi(sk)->sec_level) { 335 switch (chan->sec_level) {
265 case BT_SECURITY_HIGH: 336 case BT_SECURITY_HIGH:
266 return HCI_AT_DEDICATED_BONDING_MITM; 337 return HCI_AT_DEDICATED_BONDING_MITM;
267 case BT_SECURITY_MEDIUM: 338 case BT_SECURITY_MEDIUM:
@@ -269,16 +340,16 @@ static inline u8 l2cap_get_auth_type(struct sock *sk)
269 default: 340 default:
270 return HCI_AT_NO_BONDING; 341 return HCI_AT_NO_BONDING;
271 } 342 }
272 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) { 343 } else if (chan->psm == cpu_to_le16(0x0001)) {
273 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW) 344 if (chan->sec_level == BT_SECURITY_LOW)
274 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP; 345 chan->sec_level = BT_SECURITY_SDP;
275 346
276 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH) 347 if (chan->sec_level == BT_SECURITY_HIGH)
277 return HCI_AT_NO_BONDING_MITM; 348 return HCI_AT_NO_BONDING_MITM;
278 else 349 else
279 return HCI_AT_NO_BONDING; 350 return HCI_AT_NO_BONDING;
280 } else { 351 } else {
281 switch (l2cap_pi(sk)->sec_level) { 352 switch (chan->sec_level) {
282 case BT_SECURITY_HIGH: 353 case BT_SECURITY_HIGH:
283 return HCI_AT_GENERAL_BONDING_MITM; 354 return HCI_AT_GENERAL_BONDING_MITM;
284 case BT_SECURITY_MEDIUM: 355 case BT_SECURITY_MEDIUM:
@@ -290,15 +361,14 @@ static inline u8 l2cap_get_auth_type(struct sock *sk)
290} 361}
291 362
292/* Service level security */ 363/* Service level security */
293static inline int l2cap_check_security(struct sock *sk) 364static inline int l2cap_check_security(struct l2cap_chan *chan)
294{ 365{
295 struct l2cap_conn *conn = l2cap_pi(sk)->conn; 366 struct l2cap_conn *conn = chan->conn;
296 __u8 auth_type; 367 __u8 auth_type;
297 368
298 auth_type = l2cap_get_auth_type(sk); 369 auth_type = l2cap_get_auth_type(chan);
299 370
300 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level, 371 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
301 auth_type);
302} 372}
303 373
304u8 l2cap_get_ident(struct l2cap_conn *conn) 374u8 l2cap_get_ident(struct l2cap_conn *conn)
@@ -341,11 +411,12 @@ void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *d
341 hci_send_acl(conn->hcon, skb, flags); 411 hci_send_acl(conn->hcon, skb, flags);
342} 412}
343 413
344static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control) 414static inline void l2cap_send_sframe(struct l2cap_chan *chan, u16 control)
345{ 415{
346 struct sk_buff *skb; 416 struct sk_buff *skb;
347 struct l2cap_hdr *lh; 417 struct l2cap_hdr *lh;
348 struct l2cap_conn *conn = pi->conn; 418 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
419 struct l2cap_conn *conn = chan->conn;
349 struct sock *sk = (struct sock *)pi; 420 struct sock *sk = (struct sock *)pi;
350 int count, hlen = L2CAP_HDR_SIZE + 2; 421 int count, hlen = L2CAP_HDR_SIZE + 2;
351 u8 flags; 422 u8 flags;
@@ -353,22 +424,22 @@ static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
353 if (sk->sk_state != BT_CONNECTED) 424 if (sk->sk_state != BT_CONNECTED)
354 return; 425 return;
355 426
356 if (pi->fcs == L2CAP_FCS_CRC16) 427 if (chan->fcs == L2CAP_FCS_CRC16)
357 hlen += 2; 428 hlen += 2;
358 429
359 BT_DBG("pi %p, control 0x%2.2x", pi, control); 430 BT_DBG("chan %p, control 0x%2.2x", chan, control);
360 431
361 count = min_t(unsigned int, conn->mtu, hlen); 432 count = min_t(unsigned int, conn->mtu, hlen);
362 control |= L2CAP_CTRL_FRAME_TYPE; 433 control |= L2CAP_CTRL_FRAME_TYPE;
363 434
364 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) { 435 if (chan->conn_state & L2CAP_CONN_SEND_FBIT) {
365 control |= L2CAP_CTRL_FINAL; 436 control |= L2CAP_CTRL_FINAL;
366 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT; 437 chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
367 } 438 }
368 439
369 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) { 440 if (chan->conn_state & L2CAP_CONN_SEND_PBIT) {
370 control |= L2CAP_CTRL_POLL; 441 control |= L2CAP_CTRL_POLL;
371 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT; 442 chan->conn_state &= ~L2CAP_CONN_SEND_PBIT;
372 } 443 }
373 444
374 skb = bt_skb_alloc(count, GFP_ATOMIC); 445 skb = bt_skb_alloc(count, GFP_ATOMIC);
@@ -377,10 +448,10 @@ static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
377 448
378 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); 449 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
379 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE); 450 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
380 lh->cid = cpu_to_le16(pi->dcid); 451 lh->cid = cpu_to_le16(chan->dcid);
381 put_unaligned_le16(control, skb_put(skb, 2)); 452 put_unaligned_le16(control, skb_put(skb, 2));
382 453
383 if (pi->fcs == L2CAP_FCS_CRC16) { 454 if (chan->fcs == L2CAP_FCS_CRC16) {
384 u16 fcs = crc16(0, (u8 *)lh, count - 2); 455 u16 fcs = crc16(0, (u8 *)lh, count - 2);
385 put_unaligned_le16(fcs, skb_put(skb, 2)); 456 put_unaligned_le16(fcs, skb_put(skb, 2));
386 } 457 }
@@ -390,45 +461,46 @@ static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
390 else 461 else
391 flags = ACL_START; 462 flags = ACL_START;
392 463
393 hci_send_acl(pi->conn->hcon, skb, flags); 464 hci_send_acl(chan->conn->hcon, skb, flags);
394} 465}
395 466
396static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control) 467static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u16 control)
397{ 468{
398 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) { 469 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
399 control |= L2CAP_SUPER_RCV_NOT_READY; 470 control |= L2CAP_SUPER_RCV_NOT_READY;
400 pi->conn_state |= L2CAP_CONN_RNR_SENT; 471 chan->conn_state |= L2CAP_CONN_RNR_SENT;
401 } else 472 } else
402 control |= L2CAP_SUPER_RCV_READY; 473 control |= L2CAP_SUPER_RCV_READY;
403 474
404 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT; 475 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
405 476
406 l2cap_send_sframe(pi, control); 477 l2cap_send_sframe(chan, control);
407} 478}
408 479
409static inline int __l2cap_no_conn_pending(struct sock *sk) 480static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
410{ 481{
411 return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND); 482 return !(chan->conf_state & L2CAP_CONF_CONNECT_PEND);
412} 483}
413 484
414static void l2cap_do_start(struct sock *sk) 485static void l2cap_do_start(struct l2cap_chan *chan)
415{ 486{
416 struct l2cap_conn *conn = l2cap_pi(sk)->conn; 487 struct l2cap_conn *conn = chan->conn;
417 488
418 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) { 489 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
419 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)) 490 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
420 return; 491 return;
421 492
422 if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) { 493 if (l2cap_check_security(chan) &&
494 __l2cap_no_conn_pending(chan)) {
423 struct l2cap_conn_req req; 495 struct l2cap_conn_req req;
424 req.scid = cpu_to_le16(l2cap_pi(sk)->scid); 496 req.scid = cpu_to_le16(chan->scid);
425 req.psm = l2cap_pi(sk)->psm; 497 req.psm = chan->psm;
426 498
427 l2cap_pi(sk)->ident = l2cap_get_ident(conn); 499 chan->ident = l2cap_get_ident(conn);
428 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND; 500 chan->conf_state |= L2CAP_CONF_CONNECT_PEND;
429 501
430 l2cap_send_cmd(conn, l2cap_pi(sk)->ident, 502 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
431 L2CAP_CONN_REQ, sizeof(req), &req); 503 sizeof(req), &req);
432 } 504 }
433 } else { 505 } else {
434 struct l2cap_info_req req; 506 struct l2cap_info_req req;
@@ -461,23 +533,24 @@ static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
461 } 533 }
462} 534}
463 535
464void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err) 536void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
465{ 537{
538 struct sock *sk;
466 struct l2cap_disconn_req req; 539 struct l2cap_disconn_req req;
467 540
468 if (!conn) 541 if (!conn)
469 return; 542 return;
470 543
471 skb_queue_purge(TX_QUEUE(sk)); 544 sk = chan->sk;
472 545
473 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) { 546 if (chan->mode == L2CAP_MODE_ERTM) {
474 del_timer(&l2cap_pi(sk)->retrans_timer); 547 del_timer(&chan->retrans_timer);
475 del_timer(&l2cap_pi(sk)->monitor_timer); 548 del_timer(&chan->monitor_timer);
476 del_timer(&l2cap_pi(sk)->ack_timer); 549 del_timer(&chan->ack_timer);
477 } 550 }
478 551
479 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid); 552 req.dcid = cpu_to_le16(chan->dcid);
480 req.scid = cpu_to_le16(l2cap_pi(sk)->scid); 553 req.scid = cpu_to_le16(chan->scid);
481 l2cap_send_cmd(conn, l2cap_get_ident(conn), 554 l2cap_send_cmd(conn, l2cap_get_ident(conn),
482 L2CAP_DISCONN_REQ, sizeof(req), &req); 555 L2CAP_DISCONN_REQ, sizeof(req), &req);
483 556
@@ -488,17 +561,15 @@ void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err)
488/* ---- L2CAP connections ---- */ 561/* ---- L2CAP connections ---- */
489static void l2cap_conn_start(struct l2cap_conn *conn) 562static void l2cap_conn_start(struct l2cap_conn *conn)
490{ 563{
491 struct l2cap_chan_list *l = &conn->chan_list; 564 struct l2cap_chan *chan, *tmp;
492 struct sock_del_list del, *tmp1, *tmp2;
493 struct sock *sk;
494 565
495 BT_DBG("conn %p", conn); 566 BT_DBG("conn %p", conn);
496 567
497 INIT_LIST_HEAD(&del.list); 568 read_lock(&conn->chan_lock);
498 569
499 read_lock(&l->lock); 570 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
571 struct sock *sk = chan->sk;
500 572
501 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
502 bh_lock_sock(sk); 573 bh_lock_sock(sk);
503 574
504 if (sk->sk_type != SOCK_SEQPACKET && 575 if (sk->sk_type != SOCK_SEQPACKET &&
@@ -510,45 +581,47 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
510 if (sk->sk_state == BT_CONNECT) { 581 if (sk->sk_state == BT_CONNECT) {
511 struct l2cap_conn_req req; 582 struct l2cap_conn_req req;
512 583
513 if (!l2cap_check_security(sk) || 584 if (!l2cap_check_security(chan) ||
514 !__l2cap_no_conn_pending(sk)) { 585 !__l2cap_no_conn_pending(chan)) {
515 bh_unlock_sock(sk); 586 bh_unlock_sock(sk);
516 continue; 587 continue;
517 } 588 }
518 589
519 if (!l2cap_mode_supported(l2cap_pi(sk)->mode, 590 if (!l2cap_mode_supported(chan->mode,
520 conn->feat_mask) 591 conn->feat_mask)
521 && l2cap_pi(sk)->conf_state & 592 && chan->conf_state &
522 L2CAP_CONF_STATE2_DEVICE) { 593 L2CAP_CONF_STATE2_DEVICE) {
523 tmp1 = kzalloc(sizeof(struct sock_del_list), 594 /* __l2cap_sock_close() calls list_del(chan)
524 GFP_ATOMIC); 595 * so release the lock */
525 tmp1->sk = sk; 596 read_unlock_bh(&conn->chan_lock);
526 list_add_tail(&tmp1->list, &del.list); 597 __l2cap_sock_close(sk, ECONNRESET);
598 read_lock_bh(&conn->chan_lock);
527 bh_unlock_sock(sk); 599 bh_unlock_sock(sk);
528 continue; 600 continue;
529 } 601 }
530 602
531 req.scid = cpu_to_le16(l2cap_pi(sk)->scid); 603 req.scid = cpu_to_le16(chan->scid);
532 req.psm = l2cap_pi(sk)->psm; 604 req.psm = chan->psm;
533 605
534 l2cap_pi(sk)->ident = l2cap_get_ident(conn); 606 chan->ident = l2cap_get_ident(conn);
535 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND; 607 chan->conf_state |= L2CAP_CONF_CONNECT_PEND;
536 608
537 l2cap_send_cmd(conn, l2cap_pi(sk)->ident, 609 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
538 L2CAP_CONN_REQ, sizeof(req), &req); 610 sizeof(req), &req);
539 611
540 } else if (sk->sk_state == BT_CONNECT2) { 612 } else if (sk->sk_state == BT_CONNECT2) {
541 struct l2cap_conn_rsp rsp; 613 struct l2cap_conn_rsp rsp;
542 char buf[128]; 614 char buf[128];
543 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid); 615 rsp.scid = cpu_to_le16(chan->dcid);
544 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid); 616 rsp.dcid = cpu_to_le16(chan->scid);
545 617
546 if (l2cap_check_security(sk)) { 618 if (l2cap_check_security(chan)) {
547 if (bt_sk(sk)->defer_setup) { 619 if (bt_sk(sk)->defer_setup) {
548 struct sock *parent = bt_sk(sk)->parent; 620 struct sock *parent = bt_sk(sk)->parent;
549 rsp.result = cpu_to_le16(L2CAP_CR_PEND); 621 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
550 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND); 622 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
551 parent->sk_data_ready(parent, 0); 623 if (parent)
624 parent->sk_data_ready(parent, 0);
552 625
553 } else { 626 } else {
554 sk->sk_state = BT_CONFIG; 627 sk->sk_state = BT_CONFIG;
@@ -560,80 +633,77 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
560 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND); 633 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
561 } 634 }
562 635
563 l2cap_send_cmd(conn, l2cap_pi(sk)->ident, 636 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
564 L2CAP_CONN_RSP, sizeof(rsp), &rsp); 637 sizeof(rsp), &rsp);
565 638
566 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT || 639 if (chan->conf_state & L2CAP_CONF_REQ_SENT ||
567 rsp.result != L2CAP_CR_SUCCESS) { 640 rsp.result != L2CAP_CR_SUCCESS) {
568 bh_unlock_sock(sk); 641 bh_unlock_sock(sk);
569 continue; 642 continue;
570 } 643 }
571 644
572 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT; 645 chan->conf_state |= L2CAP_CONF_REQ_SENT;
573 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, 646 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
574 l2cap_build_conf_req(sk, buf), buf); 647 l2cap_build_conf_req(chan, buf), buf);
575 l2cap_pi(sk)->num_conf_req++; 648 chan->num_conf_req++;
576 } 649 }
577 650
578 bh_unlock_sock(sk); 651 bh_unlock_sock(sk);
579 } 652 }
580 653
581 read_unlock(&l->lock); 654 read_unlock(&conn->chan_lock);
582
583 list_for_each_entry_safe(tmp1, tmp2, &del.list, list) {
584 bh_lock_sock(tmp1->sk);
585 __l2cap_sock_close(tmp1->sk, ECONNRESET);
586 bh_unlock_sock(tmp1->sk);
587 list_del(&tmp1->list);
588 kfree(tmp1);
589 }
590} 655}
591 656
592/* Find socket with cid and source bdaddr. 657/* Find socket with cid and source bdaddr.
593 * Returns closest match, locked. 658 * Returns closest match, locked.
594 */ 659 */
595static struct sock *l2cap_get_sock_by_scid(int state, __le16 cid, bdaddr_t *src) 660static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
596{ 661{
597 struct sock *s, *sk = NULL, *sk1 = NULL; 662 struct l2cap_chan *c, *c1 = NULL;
598 struct hlist_node *node; 663
664 read_lock(&chan_list_lock);
599 665
600 read_lock(&l2cap_sk_list.lock); 666 list_for_each_entry(c, &chan_list, global_l) {
667 struct sock *sk = c->sk;
601 668
602 sk_for_each(sk, node, &l2cap_sk_list.head) {
603 if (state && sk->sk_state != state) 669 if (state && sk->sk_state != state)
604 continue; 670 continue;
605 671
606 if (l2cap_pi(sk)->scid == cid) { 672 if (c->scid == cid) {
607 /* Exact match. */ 673 /* Exact match. */
608 if (!bacmp(&bt_sk(sk)->src, src)) 674 if (!bacmp(&bt_sk(sk)->src, src)) {
609 break; 675 read_unlock(&chan_list_lock);
676 return c;
677 }
610 678
611 /* Closest match */ 679 /* Closest match */
612 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) 680 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
613 sk1 = sk; 681 c1 = c;
614 } 682 }
615 } 683 }
616 s = node ? sk : sk1;
617 if (s)
618 bh_lock_sock(s);
619 read_unlock(&l2cap_sk_list.lock);
620 684
621 return s; 685 read_unlock(&chan_list_lock);
686
687 return c1;
622} 688}
623 689
624static void l2cap_le_conn_ready(struct l2cap_conn *conn) 690static void l2cap_le_conn_ready(struct l2cap_conn *conn)
625{ 691{
626 struct l2cap_chan_list *list = &conn->chan_list; 692 struct sock *parent, *sk;
627 struct sock *parent, *uninitialized_var(sk); 693 struct l2cap_chan *chan, *pchan;
628 694
629 BT_DBG(""); 695 BT_DBG("");
630 696
631 /* Check if we have socket listening on cid */ 697 /* Check if we have socket listening on cid */
632 parent = l2cap_get_sock_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA, 698 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
633 conn->src); 699 conn->src);
634 if (!parent) 700 if (!pchan)
635 return; 701 return;
636 702
703 parent = pchan->sk;
704
705 bh_lock_sock(parent);
706
637 /* Check for backlog size */ 707 /* Check for backlog size */
638 if (sk_acceptq_is_full(parent)) { 708 if (sk_acceptq_is_full(parent)) {
639 BT_DBG("backlog full %d", parent->sk_ack_backlog); 709 BT_DBG("backlog full %d", parent->sk_ack_backlog);
@@ -644,22 +714,33 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn)
644 if (!sk) 714 if (!sk)
645 goto clean; 715 goto clean;
646 716
647 write_lock_bh(&list->lock); 717 chan = l2cap_chan_create(sk);
718 if (!chan) {
719 l2cap_sock_kill(sk);
720 goto clean;
721 }
722
723 l2cap_pi(sk)->chan = chan;
724
725 write_lock_bh(&conn->chan_lock);
648 726
649 hci_conn_hold(conn->hcon); 727 hci_conn_hold(conn->hcon);
650 728
651 l2cap_sock_init(sk, parent); 729 l2cap_sock_init(sk, parent);
730
652 bacpy(&bt_sk(sk)->src, conn->src); 731 bacpy(&bt_sk(sk)->src, conn->src);
653 bacpy(&bt_sk(sk)->dst, conn->dst); 732 bacpy(&bt_sk(sk)->dst, conn->dst);
654 733
655 __l2cap_chan_add(conn, sk, parent); 734 bt_accept_enqueue(parent, sk);
735
736 __l2cap_chan_add(conn, chan);
656 737
657 l2cap_sock_set_timer(sk, sk->sk_sndtimeo); 738 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
658 739
659 sk->sk_state = BT_CONNECTED; 740 sk->sk_state = BT_CONNECTED;
660 parent->sk_data_ready(parent, 0); 741 parent->sk_data_ready(parent, 0);
661 742
662 write_unlock_bh(&list->lock); 743 write_unlock_bh(&conn->chan_lock);
663 744
664clean: 745clean:
665 bh_unlock_sock(parent); 746 bh_unlock_sock(parent);
@@ -667,17 +748,18 @@ clean:
667 748
668static void l2cap_conn_ready(struct l2cap_conn *conn) 749static void l2cap_conn_ready(struct l2cap_conn *conn)
669{ 750{
670 struct l2cap_chan_list *l = &conn->chan_list; 751 struct l2cap_chan *chan;
671 struct sock *sk;
672 752
673 BT_DBG("conn %p", conn); 753 BT_DBG("conn %p", conn);
674 754
675 if (!conn->hcon->out && conn->hcon->type == LE_LINK) 755 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
676 l2cap_le_conn_ready(conn); 756 l2cap_le_conn_ready(conn);
677 757
678 read_lock(&l->lock); 758 read_lock(&conn->chan_lock);
759
760 list_for_each_entry(chan, &conn->chan_l, list) {
761 struct sock *sk = chan->sk;
679 762
680 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
681 bh_lock_sock(sk); 763 bh_lock_sock(sk);
682 764
683 if (conn->hcon->type == LE_LINK) { 765 if (conn->hcon->type == LE_LINK) {
@@ -692,30 +774,31 @@ static void l2cap_conn_ready(struct l2cap_conn *conn)
692 sk->sk_state = BT_CONNECTED; 774 sk->sk_state = BT_CONNECTED;
693 sk->sk_state_change(sk); 775 sk->sk_state_change(sk);
694 } else if (sk->sk_state == BT_CONNECT) 776 } else if (sk->sk_state == BT_CONNECT)
695 l2cap_do_start(sk); 777 l2cap_do_start(chan);
696 778
697 bh_unlock_sock(sk); 779 bh_unlock_sock(sk);
698 } 780 }
699 781
700 read_unlock(&l->lock); 782 read_unlock(&conn->chan_lock);
701} 783}
702 784
703/* Notify sockets that we cannot guaranty reliability anymore */ 785/* Notify sockets that we cannot guaranty reliability anymore */
704static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err) 786static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
705{ 787{
706 struct l2cap_chan_list *l = &conn->chan_list; 788 struct l2cap_chan *chan;
707 struct sock *sk;
708 789
709 BT_DBG("conn %p", conn); 790 BT_DBG("conn %p", conn);
710 791
711 read_lock(&l->lock); 792 read_lock(&conn->chan_lock);
712 793
713 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) { 794 list_for_each_entry(chan, &conn->chan_l, list) {
714 if (l2cap_pi(sk)->force_reliable) 795 struct sock *sk = chan->sk;
796
797 if (chan->force_reliable)
715 sk->sk_err = err; 798 sk->sk_err = err;
716 } 799 }
717 800
718 read_unlock(&l->lock); 801 read_unlock(&conn->chan_lock);
719} 802}
720 803
721static void l2cap_info_timeout(unsigned long arg) 804static void l2cap_info_timeout(unsigned long arg)
@@ -755,7 +838,9 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
755 conn->feat_mask = 0; 838 conn->feat_mask = 0;
756 839
757 spin_lock_init(&conn->lock); 840 spin_lock_init(&conn->lock);
758 rwlock_init(&conn->chan_list.lock); 841 rwlock_init(&conn->chan_lock);
842
843 INIT_LIST_HEAD(&conn->chan_l);
759 844
760 if (hcon->type != LE_LINK) 845 if (hcon->type != LE_LINK)
761 setup_timer(&conn->info_timer, l2cap_info_timeout, 846 setup_timer(&conn->info_timer, l2cap_info_timeout,
@@ -769,6 +854,7 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
769static void l2cap_conn_del(struct hci_conn *hcon, int err) 854static void l2cap_conn_del(struct hci_conn *hcon, int err)
770{ 855{
771 struct l2cap_conn *conn = hcon->l2cap_data; 856 struct l2cap_conn *conn = hcon->l2cap_data;
857 struct l2cap_chan *chan, *l;
772 struct sock *sk; 858 struct sock *sk;
773 859
774 if (!conn) 860 if (!conn)
@@ -779,9 +865,10 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err)
779 kfree_skb(conn->rx_skb); 865 kfree_skb(conn->rx_skb);
780 866
781 /* Kill channels */ 867 /* Kill channels */
782 while ((sk = conn->chan_list.head)) { 868 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
869 sk = chan->sk;
783 bh_lock_sock(sk); 870 bh_lock_sock(sk);
784 l2cap_chan_del(sk, err); 871 l2cap_chan_del(chan, err);
785 bh_unlock_sock(sk); 872 bh_unlock_sock(sk);
786 l2cap_sock_kill(sk); 873 l2cap_sock_kill(sk);
787 } 874 }
@@ -793,12 +880,11 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err)
793 kfree(conn); 880 kfree(conn);
794} 881}
795 882
796static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent) 883static inline void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
797{ 884{
798 struct l2cap_chan_list *l = &conn->chan_list; 885 write_lock_bh(&conn->chan_lock);
799 write_lock_bh(&l->lock); 886 __l2cap_chan_add(conn, chan);
800 __l2cap_chan_add(conn, sk, parent); 887 write_unlock_bh(&conn->chan_lock);
801 write_unlock_bh(&l->lock);
802} 888}
803 889
804/* ---- Socket interface ---- */ 890/* ---- Socket interface ---- */
@@ -806,35 +892,39 @@ static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, stru
806/* Find socket with psm and source bdaddr. 892/* Find socket with psm and source bdaddr.
807 * Returns closest match. 893 * Returns closest match.
808 */ 894 */
809static struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src) 895static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
810{ 896{
811 struct sock *sk = NULL, *sk1 = NULL; 897 struct l2cap_chan *c, *c1 = NULL;
812 struct hlist_node *node; 898
899 read_lock(&chan_list_lock);
813 900
814 read_lock(&l2cap_sk_list.lock); 901 list_for_each_entry(c, &chan_list, global_l) {
902 struct sock *sk = c->sk;
815 903
816 sk_for_each(sk, node, &l2cap_sk_list.head) {
817 if (state && sk->sk_state != state) 904 if (state && sk->sk_state != state)
818 continue; 905 continue;
819 906
820 if (l2cap_pi(sk)->psm == psm) { 907 if (c->psm == psm) {
821 /* Exact match. */ 908 /* Exact match. */
822 if (!bacmp(&bt_sk(sk)->src, src)) 909 if (!bacmp(&bt_sk(sk)->src, src)) {
823 break; 910 read_unlock(&chan_list_lock);
911 return c;
912 }
824 913
825 /* Closest match */ 914 /* Closest match */
826 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) 915 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
827 sk1 = sk; 916 c1 = c;
828 } 917 }
829 } 918 }
830 919
831 read_unlock(&l2cap_sk_list.lock); 920 read_unlock(&chan_list_lock);
832 921
833 return node ? sk : sk1; 922 return c1;
834} 923}
835 924
836int l2cap_do_connect(struct sock *sk) 925int l2cap_chan_connect(struct l2cap_chan *chan)
837{ 926{
927 struct sock *sk = chan->sk;
838 bdaddr_t *src = &bt_sk(sk)->src; 928 bdaddr_t *src = &bt_sk(sk)->src;
839 bdaddr_t *dst = &bt_sk(sk)->dst; 929 bdaddr_t *dst = &bt_sk(sk)->dst;
840 struct l2cap_conn *conn; 930 struct l2cap_conn *conn;
@@ -844,7 +934,7 @@ int l2cap_do_connect(struct sock *sk)
844 int err; 934 int err;
845 935
846 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst), 936 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
847 l2cap_pi(sk)->psm); 937 chan->psm);
848 938
849 hdev = hci_get_route(dst, src); 939 hdev = hci_get_route(dst, src);
850 if (!hdev) 940 if (!hdev)
@@ -852,14 +942,14 @@ int l2cap_do_connect(struct sock *sk)
852 942
853 hci_dev_lock_bh(hdev); 943 hci_dev_lock_bh(hdev);
854 944
855 auth_type = l2cap_get_auth_type(sk); 945 auth_type = l2cap_get_auth_type(chan);
856 946
857 if (l2cap_pi(sk)->dcid == L2CAP_CID_LE_DATA) 947 if (chan->dcid == L2CAP_CID_LE_DATA)
858 hcon = hci_connect(hdev, LE_LINK, dst, 948 hcon = hci_connect(hdev, LE_LINK, dst,
859 l2cap_pi(sk)->sec_level, auth_type); 949 chan->sec_level, auth_type);
860 else 950 else
861 hcon = hci_connect(hdev, ACL_LINK, dst, 951 hcon = hci_connect(hdev, ACL_LINK, dst,
862 l2cap_pi(sk)->sec_level, auth_type); 952 chan->sec_level, auth_type);
863 953
864 if (IS_ERR(hcon)) { 954 if (IS_ERR(hcon)) {
865 err = PTR_ERR(hcon); 955 err = PTR_ERR(hcon);
@@ -876,7 +966,7 @@ int l2cap_do_connect(struct sock *sk)
876 /* Update source addr of the socket */ 966 /* Update source addr of the socket */
877 bacpy(src, conn->src); 967 bacpy(src, conn->src);
878 968
879 l2cap_chan_add(conn, sk, NULL); 969 l2cap_chan_add(conn, chan);
880 970
881 sk->sk_state = BT_CONNECT; 971 sk->sk_state = BT_CONNECT;
882 l2cap_sock_set_timer(sk, sk->sk_sndtimeo); 972 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
@@ -885,10 +975,10 @@ int l2cap_do_connect(struct sock *sk)
885 if (sk->sk_type != SOCK_SEQPACKET && 975 if (sk->sk_type != SOCK_SEQPACKET &&
886 sk->sk_type != SOCK_STREAM) { 976 sk->sk_type != SOCK_STREAM) {
887 l2cap_sock_clear_timer(sk); 977 l2cap_sock_clear_timer(sk);
888 if (l2cap_check_security(sk)) 978 if (l2cap_check_security(chan))
889 sk->sk_state = BT_CONNECTED; 979 sk->sk_state = BT_CONNECTED;
890 } else 980 } else
891 l2cap_do_start(sk); 981 l2cap_do_start(chan);
892 } 982 }
893 983
894 err = 0; 984 err = 0;
@@ -901,12 +991,13 @@ done:
901 991
902int __l2cap_wait_ack(struct sock *sk) 992int __l2cap_wait_ack(struct sock *sk)
903{ 993{
994 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
904 DECLARE_WAITQUEUE(wait, current); 995 DECLARE_WAITQUEUE(wait, current);
905 int err = 0; 996 int err = 0;
906 int timeo = HZ/5; 997 int timeo = HZ/5;
907 998
908 add_wait_queue(sk_sleep(sk), &wait); 999 add_wait_queue(sk_sleep(sk), &wait);
909 while ((l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn)) { 1000 while ((chan->unacked_frames > 0 && chan->conn)) {
910 set_current_state(TASK_INTERRUPTIBLE); 1001 set_current_state(TASK_INTERRUPTIBLE);
911 1002
912 if (!timeo) 1003 if (!timeo)
@@ -932,68 +1023,69 @@ int __l2cap_wait_ack(struct sock *sk)
932 1023
933static void l2cap_monitor_timeout(unsigned long arg) 1024static void l2cap_monitor_timeout(unsigned long arg)
934{ 1025{
935 struct sock *sk = (void *) arg; 1026 struct l2cap_chan *chan = (void *) arg;
1027 struct sock *sk = chan->sk;
936 1028
937 BT_DBG("sk %p", sk); 1029 BT_DBG("chan %p", chan);
938 1030
939 bh_lock_sock(sk); 1031 bh_lock_sock(sk);
940 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) { 1032 if (chan->retry_count >= chan->remote_max_tx) {
941 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNABORTED); 1033 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
942 bh_unlock_sock(sk); 1034 bh_unlock_sock(sk);
943 return; 1035 return;
944 } 1036 }
945 1037
946 l2cap_pi(sk)->retry_count++; 1038 chan->retry_count++;
947 __mod_monitor_timer(); 1039 __mod_monitor_timer();
948 1040
949 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL); 1041 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
950 bh_unlock_sock(sk); 1042 bh_unlock_sock(sk);
951} 1043}
952 1044
953static void l2cap_retrans_timeout(unsigned long arg) 1045static void l2cap_retrans_timeout(unsigned long arg)
954{ 1046{
955 struct sock *sk = (void *) arg; 1047 struct l2cap_chan *chan = (void *) arg;
1048 struct sock *sk = chan->sk;
956 1049
957 BT_DBG("sk %p", sk); 1050 BT_DBG("chan %p", chan);
958 1051
959 bh_lock_sock(sk); 1052 bh_lock_sock(sk);
960 l2cap_pi(sk)->retry_count = 1; 1053 chan->retry_count = 1;
961 __mod_monitor_timer(); 1054 __mod_monitor_timer();
962 1055
963 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F; 1056 chan->conn_state |= L2CAP_CONN_WAIT_F;
964 1057
965 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL); 1058 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
966 bh_unlock_sock(sk); 1059 bh_unlock_sock(sk);
967} 1060}
968 1061
969static void l2cap_drop_acked_frames(struct sock *sk) 1062static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
970{ 1063{
971 struct sk_buff *skb; 1064 struct sk_buff *skb;
972 1065
973 while ((skb = skb_peek(TX_QUEUE(sk))) && 1066 while ((skb = skb_peek(&chan->tx_q)) &&
974 l2cap_pi(sk)->unacked_frames) { 1067 chan->unacked_frames) {
975 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq) 1068 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
976 break; 1069 break;
977 1070
978 skb = skb_dequeue(TX_QUEUE(sk)); 1071 skb = skb_dequeue(&chan->tx_q);
979 kfree_skb(skb); 1072 kfree_skb(skb);
980 1073
981 l2cap_pi(sk)->unacked_frames--; 1074 chan->unacked_frames--;
982 } 1075 }
983 1076
984 if (!l2cap_pi(sk)->unacked_frames) 1077 if (!chan->unacked_frames)
985 del_timer(&l2cap_pi(sk)->retrans_timer); 1078 del_timer(&chan->retrans_timer);
986} 1079}
987 1080
988void l2cap_do_send(struct sock *sk, struct sk_buff *skb) 1081void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
989{ 1082{
990 struct l2cap_pinfo *pi = l2cap_pi(sk); 1083 struct hci_conn *hcon = chan->conn->hcon;
991 struct hci_conn *hcon = pi->conn->hcon;
992 u16 flags; 1084 u16 flags;
993 1085
994 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len); 1086 BT_DBG("chan %p, skb %p len %d", chan, skb, skb->len);
995 1087
996 if (!pi->flushable && lmp_no_flush_capable(hcon->hdev)) 1088 if (!chan->flushable && lmp_no_flush_capable(hcon->hdev))
997 flags = ACL_START_NO_FLUSH; 1089 flags = ACL_START_NO_FLUSH;
998 else 1090 else
999 flags = ACL_START; 1091 flags = ACL_START;
@@ -1001,35 +1093,33 @@ void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1001 hci_send_acl(hcon, skb, flags); 1093 hci_send_acl(hcon, skb, flags);
1002} 1094}
1003 1095
1004void l2cap_streaming_send(struct sock *sk) 1096void l2cap_streaming_send(struct l2cap_chan *chan)
1005{ 1097{
1006 struct sk_buff *skb; 1098 struct sk_buff *skb;
1007 struct l2cap_pinfo *pi = l2cap_pi(sk);
1008 u16 control, fcs; 1099 u16 control, fcs;
1009 1100
1010 while ((skb = skb_dequeue(TX_QUEUE(sk)))) { 1101 while ((skb = skb_dequeue(&chan->tx_q))) {
1011 control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE); 1102 control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
1012 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT; 1103 control |= chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1013 put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE); 1104 put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
1014 1105
1015 if (pi->fcs == L2CAP_FCS_CRC16) { 1106 if (chan->fcs == L2CAP_FCS_CRC16) {
1016 fcs = crc16(0, (u8 *)skb->data, skb->len - 2); 1107 fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
1017 put_unaligned_le16(fcs, skb->data + skb->len - 2); 1108 put_unaligned_le16(fcs, skb->data + skb->len - 2);
1018 } 1109 }
1019 1110
1020 l2cap_do_send(sk, skb); 1111 l2cap_do_send(chan, skb);
1021 1112
1022 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64; 1113 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1023 } 1114 }
1024} 1115}
1025 1116
1026static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq) 1117static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u8 tx_seq)
1027{ 1118{
1028 struct l2cap_pinfo *pi = l2cap_pi(sk);
1029 struct sk_buff *skb, *tx_skb; 1119 struct sk_buff *skb, *tx_skb;
1030 u16 control, fcs; 1120 u16 control, fcs;
1031 1121
1032 skb = skb_peek(TX_QUEUE(sk)); 1122 skb = skb_peek(&chan->tx_q);
1033 if (!skb) 1123 if (!skb)
1034 return; 1124 return;
1035 1125
@@ -1037,14 +1127,14 @@ static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
1037 if (bt_cb(skb)->tx_seq == tx_seq) 1127 if (bt_cb(skb)->tx_seq == tx_seq)
1038 break; 1128 break;
1039 1129
1040 if (skb_queue_is_last(TX_QUEUE(sk), skb)) 1130 if (skb_queue_is_last(&chan->tx_q, skb))
1041 return; 1131 return;
1042 1132
1043 } while ((skb = skb_queue_next(TX_QUEUE(sk), skb))); 1133 } while ((skb = skb_queue_next(&chan->tx_q, skb)));
1044 1134
1045 if (pi->remote_max_tx && 1135 if (chan->remote_max_tx &&
1046 bt_cb(skb)->retries == pi->remote_max_tx) { 1136 bt_cb(skb)->retries == chan->remote_max_tx) {
1047 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED); 1137 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1048 return; 1138 return;
1049 } 1139 }
1050 1140
@@ -1053,39 +1143,39 @@ static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
1053 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE); 1143 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1054 control &= L2CAP_CTRL_SAR; 1144 control &= L2CAP_CTRL_SAR;
1055 1145
1056 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) { 1146 if (chan->conn_state & L2CAP_CONN_SEND_FBIT) {
1057 control |= L2CAP_CTRL_FINAL; 1147 control |= L2CAP_CTRL_FINAL;
1058 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT; 1148 chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1059 } 1149 }
1060 1150
1061 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT) 1151 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1062 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT); 1152 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1063 1153
1064 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE); 1154 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1065 1155
1066 if (pi->fcs == L2CAP_FCS_CRC16) { 1156 if (chan->fcs == L2CAP_FCS_CRC16) {
1067 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2); 1157 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1068 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2); 1158 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1069 } 1159 }
1070 1160
1071 l2cap_do_send(sk, tx_skb); 1161 l2cap_do_send(chan, tx_skb);
1072} 1162}
1073 1163
1074int l2cap_ertm_send(struct sock *sk) 1164int l2cap_ertm_send(struct l2cap_chan *chan)
1075{ 1165{
1076 struct sk_buff *skb, *tx_skb; 1166 struct sk_buff *skb, *tx_skb;
1077 struct l2cap_pinfo *pi = l2cap_pi(sk); 1167 struct sock *sk = chan->sk;
1078 u16 control, fcs; 1168 u16 control, fcs;
1079 int nsent = 0; 1169 int nsent = 0;
1080 1170
1081 if (sk->sk_state != BT_CONNECTED) 1171 if (sk->sk_state != BT_CONNECTED)
1082 return -ENOTCONN; 1172 return -ENOTCONN;
1083 1173
1084 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk))) { 1174 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1085 1175
1086 if (pi->remote_max_tx && 1176 if (chan->remote_max_tx &&
1087 bt_cb(skb)->retries == pi->remote_max_tx) { 1177 bt_cb(skb)->retries == chan->remote_max_tx) {
1088 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED); 1178 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1089 break; 1179 break;
1090 } 1180 }
1091 1181
@@ -1096,36 +1186,36 @@ int l2cap_ertm_send(struct sock *sk)
1096 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE); 1186 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1097 control &= L2CAP_CTRL_SAR; 1187 control &= L2CAP_CTRL_SAR;
1098 1188
1099 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) { 1189 if (chan->conn_state & L2CAP_CONN_SEND_FBIT) {
1100 control |= L2CAP_CTRL_FINAL; 1190 control |= L2CAP_CTRL_FINAL;
1101 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT; 1191 chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1102 } 1192 }
1103 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT) 1193 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1104 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT); 1194 | (chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1105 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE); 1195 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1106 1196
1107 1197
1108 if (pi->fcs == L2CAP_FCS_CRC16) { 1198 if (chan->fcs == L2CAP_FCS_CRC16) {
1109 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2); 1199 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1110 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2); 1200 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1111 } 1201 }
1112 1202
1113 l2cap_do_send(sk, tx_skb); 1203 l2cap_do_send(chan, tx_skb);
1114 1204
1115 __mod_retrans_timer(); 1205 __mod_retrans_timer();
1116 1206
1117 bt_cb(skb)->tx_seq = pi->next_tx_seq; 1207 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1118 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64; 1208 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1119 1209
1120 if (bt_cb(skb)->retries == 1) 1210 if (bt_cb(skb)->retries == 1)
1121 pi->unacked_frames++; 1211 chan->unacked_frames++;
1122 1212
1123 pi->frames_sent++; 1213 chan->frames_sent++;
1124 1214
1125 if (skb_queue_is_last(TX_QUEUE(sk), skb)) 1215 if (skb_queue_is_last(&chan->tx_q, skb))
1126 sk->sk_send_head = NULL; 1216 chan->tx_send_head = NULL;
1127 else 1217 else
1128 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb); 1218 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1129 1219
1130 nsent++; 1220 nsent++;
1131 } 1221 }
@@ -1133,41 +1223,39 @@ int l2cap_ertm_send(struct sock *sk)
1133 return nsent; 1223 return nsent;
1134} 1224}
1135 1225
1136static int l2cap_retransmit_frames(struct sock *sk) 1226static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1137{ 1227{
1138 struct l2cap_pinfo *pi = l2cap_pi(sk);
1139 int ret; 1228 int ret;
1140 1229
1141 if (!skb_queue_empty(TX_QUEUE(sk))) 1230 if (!skb_queue_empty(&chan->tx_q))
1142 sk->sk_send_head = TX_QUEUE(sk)->next; 1231 chan->tx_send_head = chan->tx_q.next;
1143 1232
1144 pi->next_tx_seq = pi->expected_ack_seq; 1233 chan->next_tx_seq = chan->expected_ack_seq;
1145 ret = l2cap_ertm_send(sk); 1234 ret = l2cap_ertm_send(chan);
1146 return ret; 1235 return ret;
1147} 1236}
1148 1237
1149static void l2cap_send_ack(struct l2cap_pinfo *pi) 1238static void l2cap_send_ack(struct l2cap_chan *chan)
1150{ 1239{
1151 struct sock *sk = (struct sock *)pi;
1152 u16 control = 0; 1240 u16 control = 0;
1153 1241
1154 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT; 1242 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1155 1243
1156 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) { 1244 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1157 control |= L2CAP_SUPER_RCV_NOT_READY; 1245 control |= L2CAP_SUPER_RCV_NOT_READY;
1158 pi->conn_state |= L2CAP_CONN_RNR_SENT; 1246 chan->conn_state |= L2CAP_CONN_RNR_SENT;
1159 l2cap_send_sframe(pi, control); 1247 l2cap_send_sframe(chan, control);
1160 return; 1248 return;
1161 } 1249 }
1162 1250
1163 if (l2cap_ertm_send(sk) > 0) 1251 if (l2cap_ertm_send(chan) > 0)
1164 return; 1252 return;
1165 1253
1166 control |= L2CAP_SUPER_RCV_READY; 1254 control |= L2CAP_SUPER_RCV_READY;
1167 l2cap_send_sframe(pi, control); 1255 l2cap_send_sframe(chan, control);
1168} 1256}
1169 1257
1170static void l2cap_send_srejtail(struct sock *sk) 1258static void l2cap_send_srejtail(struct l2cap_chan *chan)
1171{ 1259{
1172 struct srej_list *tail; 1260 struct srej_list *tail;
1173 u16 control; 1261 u16 control;
@@ -1175,15 +1263,15 @@ static void l2cap_send_srejtail(struct sock *sk)
1175 control = L2CAP_SUPER_SELECT_REJECT; 1263 control = L2CAP_SUPER_SELECT_REJECT;
1176 control |= L2CAP_CTRL_FINAL; 1264 control |= L2CAP_CTRL_FINAL;
1177 1265
1178 tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list); 1266 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1179 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT; 1267 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1180 1268
1181 l2cap_send_sframe(l2cap_pi(sk), control); 1269 l2cap_send_sframe(chan, control);
1182} 1270}
1183 1271
1184static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb) 1272static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1185{ 1273{
1186 struct l2cap_conn *conn = l2cap_pi(sk)->conn; 1274 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
1187 struct sk_buff **frag; 1275 struct sk_buff **frag;
1188 int err, sent = 0; 1276 int err, sent = 0;
1189 1277
@@ -1213,9 +1301,10 @@ static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, in
1213 return sent; 1301 return sent;
1214} 1302}
1215 1303
1216struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len) 1304struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1217{ 1305{
1218 struct l2cap_conn *conn = l2cap_pi(sk)->conn; 1306 struct sock *sk = chan->sk;
1307 struct l2cap_conn *conn = chan->conn;
1219 struct sk_buff *skb; 1308 struct sk_buff *skb;
1220 int err, count, hlen = L2CAP_HDR_SIZE + 2; 1309 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1221 struct l2cap_hdr *lh; 1310 struct l2cap_hdr *lh;
@@ -1230,9 +1319,9 @@ struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, s
1230 1319
1231 /* Create L2CAP header */ 1320 /* Create L2CAP header */
1232 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); 1321 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1233 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid); 1322 lh->cid = cpu_to_le16(chan->dcid);
1234 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE)); 1323 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1235 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2)); 1324 put_unaligned_le16(chan->psm, skb_put(skb, 2));
1236 1325
1237 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb); 1326 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1238 if (unlikely(err < 0)) { 1327 if (unlikely(err < 0)) {
@@ -1242,9 +1331,10 @@ struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, s
1242 return skb; 1331 return skb;
1243} 1332}
1244 1333
1245struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len) 1334struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1246{ 1335{
1247 struct l2cap_conn *conn = l2cap_pi(sk)->conn; 1336 struct sock *sk = chan->sk;
1337 struct l2cap_conn *conn = chan->conn;
1248 struct sk_buff *skb; 1338 struct sk_buff *skb;
1249 int err, count, hlen = L2CAP_HDR_SIZE; 1339 int err, count, hlen = L2CAP_HDR_SIZE;
1250 struct l2cap_hdr *lh; 1340 struct l2cap_hdr *lh;
@@ -1259,7 +1349,7 @@ struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size
1259 1349
1260 /* Create L2CAP header */ 1350 /* Create L2CAP header */
1261 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); 1351 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1262 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid); 1352 lh->cid = cpu_to_le16(chan->dcid);
1263 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE)); 1353 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1264 1354
1265 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb); 1355 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
@@ -1270,9 +1360,10 @@ struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size
1270 return skb; 1360 return skb;
1271} 1361}
1272 1362
1273struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen) 1363struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1274{ 1364{
1275 struct l2cap_conn *conn = l2cap_pi(sk)->conn; 1365 struct sock *sk = chan->sk;
1366 struct l2cap_conn *conn = chan->conn;
1276 struct sk_buff *skb; 1367 struct sk_buff *skb;
1277 int err, count, hlen = L2CAP_HDR_SIZE + 2; 1368 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1278 struct l2cap_hdr *lh; 1369 struct l2cap_hdr *lh;
@@ -1285,7 +1376,7 @@ struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, siz
1285 if (sdulen) 1376 if (sdulen)
1286 hlen += 2; 1377 hlen += 2;
1287 1378
1288 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) 1379 if (chan->fcs == L2CAP_FCS_CRC16)
1289 hlen += 2; 1380 hlen += 2;
1290 1381
1291 count = min_t(unsigned int, (conn->mtu - hlen), len); 1382 count = min_t(unsigned int, (conn->mtu - hlen), len);
@@ -1296,7 +1387,7 @@ struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, siz
1296 1387
1297 /* Create L2CAP header */ 1388 /* Create L2CAP header */
1298 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); 1389 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1299 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid); 1390 lh->cid = cpu_to_le16(chan->dcid);
1300 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE)); 1391 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1301 put_unaligned_le16(control, skb_put(skb, 2)); 1392 put_unaligned_le16(control, skb_put(skb, 2));
1302 if (sdulen) 1393 if (sdulen)
@@ -1308,16 +1399,15 @@ struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, siz
1308 return ERR_PTR(err); 1399 return ERR_PTR(err);
1309 } 1400 }
1310 1401
1311 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) 1402 if (chan->fcs == L2CAP_FCS_CRC16)
1312 put_unaligned_le16(0, skb_put(skb, 2)); 1403 put_unaligned_le16(0, skb_put(skb, 2));
1313 1404
1314 bt_cb(skb)->retries = 0; 1405 bt_cb(skb)->retries = 0;
1315 return skb; 1406 return skb;
1316} 1407}
1317 1408
1318int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len) 1409int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1319{ 1410{
1320 struct l2cap_pinfo *pi = l2cap_pi(sk);
1321 struct sk_buff *skb; 1411 struct sk_buff *skb;
1322 struct sk_buff_head sar_queue; 1412 struct sk_buff_head sar_queue;
1323 u16 control; 1413 u16 control;
@@ -1325,26 +1415,26 @@ int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1325 1415
1326 skb_queue_head_init(&sar_queue); 1416 skb_queue_head_init(&sar_queue);
1327 control = L2CAP_SDU_START; 1417 control = L2CAP_SDU_START;
1328 skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len); 1418 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1329 if (IS_ERR(skb)) 1419 if (IS_ERR(skb))
1330 return PTR_ERR(skb); 1420 return PTR_ERR(skb);
1331 1421
1332 __skb_queue_tail(&sar_queue, skb); 1422 __skb_queue_tail(&sar_queue, skb);
1333 len -= pi->remote_mps; 1423 len -= chan->remote_mps;
1334 size += pi->remote_mps; 1424 size += chan->remote_mps;
1335 1425
1336 while (len > 0) { 1426 while (len > 0) {
1337 size_t buflen; 1427 size_t buflen;
1338 1428
1339 if (len > pi->remote_mps) { 1429 if (len > chan->remote_mps) {
1340 control = L2CAP_SDU_CONTINUE; 1430 control = L2CAP_SDU_CONTINUE;
1341 buflen = pi->remote_mps; 1431 buflen = chan->remote_mps;
1342 } else { 1432 } else {
1343 control = L2CAP_SDU_END; 1433 control = L2CAP_SDU_END;
1344 buflen = len; 1434 buflen = len;
1345 } 1435 }
1346 1436
1347 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0); 1437 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1348 if (IS_ERR(skb)) { 1438 if (IS_ERR(skb)) {
1349 skb_queue_purge(&sar_queue); 1439 skb_queue_purge(&sar_queue);
1350 return PTR_ERR(skb); 1440 return PTR_ERR(skb);
@@ -1354,9 +1444,9 @@ int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1354 len -= buflen; 1444 len -= buflen;
1355 size += buflen; 1445 size += buflen;
1356 } 1446 }
1357 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk)); 1447 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1358 if (sk->sk_send_head == NULL) 1448 if (chan->tx_send_head == NULL)
1359 sk->sk_send_head = sar_queue.next; 1449 chan->tx_send_head = sar_queue.next;
1360 1450
1361 return size; 1451 return size;
1362} 1452}
@@ -1364,10 +1454,11 @@ int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1364static void l2cap_chan_ready(struct sock *sk) 1454static void l2cap_chan_ready(struct sock *sk)
1365{ 1455{
1366 struct sock *parent = bt_sk(sk)->parent; 1456 struct sock *parent = bt_sk(sk)->parent;
1457 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1367 1458
1368 BT_DBG("sk %p, parent %p", sk, parent); 1459 BT_DBG("sk %p, parent %p", sk, parent);
1369 1460
1370 l2cap_pi(sk)->conf_state = 0; 1461 chan->conf_state = 0;
1371 l2cap_sock_clear_timer(sk); 1462 l2cap_sock_clear_timer(sk);
1372 1463
1373 if (!parent) { 1464 if (!parent) {
@@ -1387,14 +1478,14 @@ static void l2cap_chan_ready(struct sock *sk)
1387/* Copy frame to all raw sockets on that connection */ 1478/* Copy frame to all raw sockets on that connection */
1388static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb) 1479static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1389{ 1480{
1390 struct l2cap_chan_list *l = &conn->chan_list;
1391 struct sk_buff *nskb; 1481 struct sk_buff *nskb;
1392 struct sock *sk; 1482 struct l2cap_chan *chan;
1393 1483
1394 BT_DBG("conn %p", conn); 1484 BT_DBG("conn %p", conn);
1395 1485
1396 read_lock(&l->lock); 1486 read_lock(&conn->chan_lock);
1397 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) { 1487 list_for_each_entry(chan, &conn->chan_l, list) {
1488 struct sock *sk = chan->sk;
1398 if (sk->sk_type != SOCK_RAW) 1489 if (sk->sk_type != SOCK_RAW)
1399 continue; 1490 continue;
1400 1491
@@ -1408,7 +1499,7 @@ static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1408 if (sock_queue_rcv_skb(sk, nskb)) 1499 if (sock_queue_rcv_skb(sk, nskb))
1409 kfree_skb(nskb); 1500 kfree_skb(nskb);
1410 } 1501 }
1411 read_unlock(&l->lock); 1502 read_unlock(&conn->chan_lock);
1412} 1503}
1413 1504
1414/* ---- L2CAP signalling commands ---- */ 1505/* ---- L2CAP signalling commands ---- */
@@ -1540,32 +1631,35 @@ static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1540 1631
1541static void l2cap_ack_timeout(unsigned long arg) 1632static void l2cap_ack_timeout(unsigned long arg)
1542{ 1633{
1543 struct sock *sk = (void *) arg; 1634 struct l2cap_chan *chan = (void *) arg;
1544 1635
1545 bh_lock_sock(sk); 1636 bh_lock_sock(chan->sk);
1546 l2cap_send_ack(l2cap_pi(sk)); 1637 l2cap_send_ack(chan);
1547 bh_unlock_sock(sk); 1638 bh_unlock_sock(chan->sk);
1548} 1639}
1549 1640
1550static inline void l2cap_ertm_init(struct sock *sk) 1641static inline void l2cap_ertm_init(struct l2cap_chan *chan)
1551{ 1642{
1552 l2cap_pi(sk)->expected_ack_seq = 0; 1643 struct sock *sk = chan->sk;
1553 l2cap_pi(sk)->unacked_frames = 0; 1644
1554 l2cap_pi(sk)->buffer_seq = 0; 1645 chan->expected_ack_seq = 0;
1555 l2cap_pi(sk)->num_acked = 0; 1646 chan->unacked_frames = 0;
1556 l2cap_pi(sk)->frames_sent = 0; 1647 chan->buffer_seq = 0;
1648 chan->num_acked = 0;
1649 chan->frames_sent = 0;
1650
1651 setup_timer(&chan->retrans_timer, l2cap_retrans_timeout,
1652 (unsigned long) chan);
1653 setup_timer(&chan->monitor_timer, l2cap_monitor_timeout,
1654 (unsigned long) chan);
1655 setup_timer(&chan->ack_timer, l2cap_ack_timeout, (unsigned long) chan);
1557 1656
1558 setup_timer(&l2cap_pi(sk)->retrans_timer, 1657 skb_queue_head_init(&chan->srej_q);
1559 l2cap_retrans_timeout, (unsigned long) sk); 1658 skb_queue_head_init(&chan->busy_q);
1560 setup_timer(&l2cap_pi(sk)->monitor_timer,
1561 l2cap_monitor_timeout, (unsigned long) sk);
1562 setup_timer(&l2cap_pi(sk)->ack_timer,
1563 l2cap_ack_timeout, (unsigned long) sk);
1564 1659
1565 __skb_queue_head_init(SREJ_QUEUE(sk)); 1660 INIT_LIST_HEAD(&chan->srej_l);
1566 __skb_queue_head_init(BUSY_QUEUE(sk));
1567 1661
1568 INIT_WORK(&l2cap_pi(sk)->busy_work, l2cap_busy_work); 1662 INIT_WORK(&chan->busy_work, l2cap_busy_work);
1569 1663
1570 sk->sk_backlog_rcv = l2cap_ertm_data_rcv; 1664 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
1571} 1665}
@@ -1583,38 +1677,37 @@ static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
1583 } 1677 }
1584} 1678}
1585 1679
1586int l2cap_build_conf_req(struct sock *sk, void *data) 1680static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
1587{ 1681{
1588 struct l2cap_pinfo *pi = l2cap_pi(sk);
1589 struct l2cap_conf_req *req = data; 1682 struct l2cap_conf_req *req = data;
1590 struct l2cap_conf_rfc rfc = { .mode = pi->mode }; 1683 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
1591 void *ptr = req->data; 1684 void *ptr = req->data;
1592 1685
1593 BT_DBG("sk %p", sk); 1686 BT_DBG("chan %p", chan);
1594 1687
1595 if (pi->num_conf_req || pi->num_conf_rsp) 1688 if (chan->num_conf_req || chan->num_conf_rsp)
1596 goto done; 1689 goto done;
1597 1690
1598 switch (pi->mode) { 1691 switch (chan->mode) {
1599 case L2CAP_MODE_STREAMING: 1692 case L2CAP_MODE_STREAMING:
1600 case L2CAP_MODE_ERTM: 1693 case L2CAP_MODE_ERTM:
1601 if (pi->conf_state & L2CAP_CONF_STATE2_DEVICE) 1694 if (chan->conf_state & L2CAP_CONF_STATE2_DEVICE)
1602 break; 1695 break;
1603 1696
1604 /* fall through */ 1697 /* fall through */
1605 default: 1698 default:
1606 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask); 1699 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
1607 break; 1700 break;
1608 } 1701 }
1609 1702
1610done: 1703done:
1611 if (pi->imtu != L2CAP_DEFAULT_MTU) 1704 if (chan->imtu != L2CAP_DEFAULT_MTU)
1612 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu); 1705 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
1613 1706
1614 switch (pi->mode) { 1707 switch (chan->mode) {
1615 case L2CAP_MODE_BASIC: 1708 case L2CAP_MODE_BASIC:
1616 if (!(pi->conn->feat_mask & L2CAP_FEAT_ERTM) && 1709 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
1617 !(pi->conn->feat_mask & L2CAP_FEAT_STREAMING)) 1710 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
1618 break; 1711 break;
1619 1712
1620 rfc.mode = L2CAP_MODE_BASIC; 1713 rfc.mode = L2CAP_MODE_BASIC;
@@ -1630,24 +1723,24 @@ done:
1630 1723
1631 case L2CAP_MODE_ERTM: 1724 case L2CAP_MODE_ERTM:
1632 rfc.mode = L2CAP_MODE_ERTM; 1725 rfc.mode = L2CAP_MODE_ERTM;
1633 rfc.txwin_size = pi->tx_win; 1726 rfc.txwin_size = chan->tx_win;
1634 rfc.max_transmit = pi->max_tx; 1727 rfc.max_transmit = chan->max_tx;
1635 rfc.retrans_timeout = 0; 1728 rfc.retrans_timeout = 0;
1636 rfc.monitor_timeout = 0; 1729 rfc.monitor_timeout = 0;
1637 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE); 1730 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1638 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10) 1731 if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
1639 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10); 1732 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1640 1733
1641 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), 1734 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1642 (unsigned long) &rfc); 1735 (unsigned long) &rfc);
1643 1736
1644 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS)) 1737 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
1645 break; 1738 break;
1646 1739
1647 if (pi->fcs == L2CAP_FCS_NONE || 1740 if (chan->fcs == L2CAP_FCS_NONE ||
1648 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) { 1741 chan->conf_state & L2CAP_CONF_NO_FCS_RECV) {
1649 pi->fcs = L2CAP_FCS_NONE; 1742 chan->fcs = L2CAP_FCS_NONE;
1650 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs); 1743 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
1651 } 1744 }
1652 break; 1745 break;
1653 1746
@@ -1658,43 +1751,42 @@ done:
1658 rfc.retrans_timeout = 0; 1751 rfc.retrans_timeout = 0;
1659 rfc.monitor_timeout = 0; 1752 rfc.monitor_timeout = 0;
1660 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE); 1753 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1661 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10) 1754 if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
1662 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10); 1755 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1663 1756
1664 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), 1757 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1665 (unsigned long) &rfc); 1758 (unsigned long) &rfc);
1666 1759
1667 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS)) 1760 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
1668 break; 1761 break;
1669 1762
1670 if (pi->fcs == L2CAP_FCS_NONE || 1763 if (chan->fcs == L2CAP_FCS_NONE ||
1671 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) { 1764 chan->conf_state & L2CAP_CONF_NO_FCS_RECV) {
1672 pi->fcs = L2CAP_FCS_NONE; 1765 chan->fcs = L2CAP_FCS_NONE;
1673 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs); 1766 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
1674 } 1767 }
1675 break; 1768 break;
1676 } 1769 }
1677 1770
1678 req->dcid = cpu_to_le16(pi->dcid); 1771 req->dcid = cpu_to_le16(chan->dcid);
1679 req->flags = cpu_to_le16(0); 1772 req->flags = cpu_to_le16(0);
1680 1773
1681 return ptr - data; 1774 return ptr - data;
1682} 1775}
1683 1776
1684static int l2cap_parse_conf_req(struct sock *sk, void *data) 1777static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
1685{ 1778{
1686 struct l2cap_pinfo *pi = l2cap_pi(sk);
1687 struct l2cap_conf_rsp *rsp = data; 1779 struct l2cap_conf_rsp *rsp = data;
1688 void *ptr = rsp->data; 1780 void *ptr = rsp->data;
1689 void *req = pi->conf_req; 1781 void *req = chan->conf_req;
1690 int len = pi->conf_len; 1782 int len = chan->conf_len;
1691 int type, hint, olen; 1783 int type, hint, olen;
1692 unsigned long val; 1784 unsigned long val;
1693 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC }; 1785 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
1694 u16 mtu = L2CAP_DEFAULT_MTU; 1786 u16 mtu = L2CAP_DEFAULT_MTU;
1695 u16 result = L2CAP_CONF_SUCCESS; 1787 u16 result = L2CAP_CONF_SUCCESS;
1696 1788
1697 BT_DBG("sk %p", sk); 1789 BT_DBG("chan %p", chan);
1698 1790
1699 while (len >= L2CAP_CONF_OPT_SIZE) { 1791 while (len >= L2CAP_CONF_OPT_SIZE) {
1700 len -= l2cap_get_conf_opt(&req, &type, &olen, &val); 1792 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
@@ -1708,7 +1800,7 @@ static int l2cap_parse_conf_req(struct sock *sk, void *data)
1708 break; 1800 break;
1709 1801
1710 case L2CAP_CONF_FLUSH_TO: 1802 case L2CAP_CONF_FLUSH_TO:
1711 pi->flush_to = val; 1803 chan->flush_to = val;
1712 break; 1804 break;
1713 1805
1714 case L2CAP_CONF_QOS: 1806 case L2CAP_CONF_QOS:
@@ -1721,7 +1813,7 @@ static int l2cap_parse_conf_req(struct sock *sk, void *data)
1721 1813
1722 case L2CAP_CONF_FCS: 1814 case L2CAP_CONF_FCS:
1723 if (val == L2CAP_FCS_NONE) 1815 if (val == L2CAP_FCS_NONE)
1724 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV; 1816 chan->conf_state |= L2CAP_CONF_NO_FCS_RECV;
1725 1817
1726 break; 1818 break;
1727 1819
@@ -1735,30 +1827,30 @@ static int l2cap_parse_conf_req(struct sock *sk, void *data)
1735 } 1827 }
1736 } 1828 }
1737 1829
1738 if (pi->num_conf_rsp || pi->num_conf_req > 1) 1830 if (chan->num_conf_rsp || chan->num_conf_req > 1)
1739 goto done; 1831 goto done;
1740 1832
1741 switch (pi->mode) { 1833 switch (chan->mode) {
1742 case L2CAP_MODE_STREAMING: 1834 case L2CAP_MODE_STREAMING:
1743 case L2CAP_MODE_ERTM: 1835 case L2CAP_MODE_ERTM:
1744 if (!(pi->conf_state & L2CAP_CONF_STATE2_DEVICE)) { 1836 if (!(chan->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
1745 pi->mode = l2cap_select_mode(rfc.mode, 1837 chan->mode = l2cap_select_mode(rfc.mode,
1746 pi->conn->feat_mask); 1838 chan->conn->feat_mask);
1747 break; 1839 break;
1748 } 1840 }
1749 1841
1750 if (pi->mode != rfc.mode) 1842 if (chan->mode != rfc.mode)
1751 return -ECONNREFUSED; 1843 return -ECONNREFUSED;
1752 1844
1753 break; 1845 break;
1754 } 1846 }
1755 1847
1756done: 1848done:
1757 if (pi->mode != rfc.mode) { 1849 if (chan->mode != rfc.mode) {
1758 result = L2CAP_CONF_UNACCEPT; 1850 result = L2CAP_CONF_UNACCEPT;
1759 rfc.mode = pi->mode; 1851 rfc.mode = chan->mode;
1760 1852
1761 if (pi->num_conf_rsp == 1) 1853 if (chan->num_conf_rsp == 1)
1762 return -ECONNREFUSED; 1854 return -ECONNREFUSED;
1763 1855
1764 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, 1856 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
@@ -1773,32 +1865,32 @@ done:
1773 if (mtu < L2CAP_DEFAULT_MIN_MTU) 1865 if (mtu < L2CAP_DEFAULT_MIN_MTU)
1774 result = L2CAP_CONF_UNACCEPT; 1866 result = L2CAP_CONF_UNACCEPT;
1775 else { 1867 else {
1776 pi->omtu = mtu; 1868 chan->omtu = mtu;
1777 pi->conf_state |= L2CAP_CONF_MTU_DONE; 1869 chan->conf_state |= L2CAP_CONF_MTU_DONE;
1778 } 1870 }
1779 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu); 1871 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
1780 1872
1781 switch (rfc.mode) { 1873 switch (rfc.mode) {
1782 case L2CAP_MODE_BASIC: 1874 case L2CAP_MODE_BASIC:
1783 pi->fcs = L2CAP_FCS_NONE; 1875 chan->fcs = L2CAP_FCS_NONE;
1784 pi->conf_state |= L2CAP_CONF_MODE_DONE; 1876 chan->conf_state |= L2CAP_CONF_MODE_DONE;
1785 break; 1877 break;
1786 1878
1787 case L2CAP_MODE_ERTM: 1879 case L2CAP_MODE_ERTM:
1788 pi->remote_tx_win = rfc.txwin_size; 1880 chan->remote_tx_win = rfc.txwin_size;
1789 pi->remote_max_tx = rfc.max_transmit; 1881 chan->remote_max_tx = rfc.max_transmit;
1790 1882
1791 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10) 1883 if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
1792 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10); 1884 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1793 1885
1794 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size); 1886 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
1795 1887
1796 rfc.retrans_timeout = 1888 rfc.retrans_timeout =
1797 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO); 1889 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
1798 rfc.monitor_timeout = 1890 rfc.monitor_timeout =
1799 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO); 1891 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
1800 1892
1801 pi->conf_state |= L2CAP_CONF_MODE_DONE; 1893 chan->conf_state |= L2CAP_CONF_MODE_DONE;
1802 1894
1803 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, 1895 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1804 sizeof(rfc), (unsigned long) &rfc); 1896 sizeof(rfc), (unsigned long) &rfc);
@@ -1806,12 +1898,12 @@ done:
1806 break; 1898 break;
1807 1899
1808 case L2CAP_MODE_STREAMING: 1900 case L2CAP_MODE_STREAMING:
1809 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10) 1901 if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
1810 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10); 1902 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1811 1903
1812 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size); 1904 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
1813 1905
1814 pi->conf_state |= L2CAP_CONF_MODE_DONE; 1906 chan->conf_state |= L2CAP_CONF_MODE_DONE;
1815 1907
1816 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, 1908 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1817 sizeof(rfc), (unsigned long) &rfc); 1909 sizeof(rfc), (unsigned long) &rfc);
@@ -1822,29 +1914,28 @@ done:
1822 result = L2CAP_CONF_UNACCEPT; 1914 result = L2CAP_CONF_UNACCEPT;
1823 1915
1824 memset(&rfc, 0, sizeof(rfc)); 1916 memset(&rfc, 0, sizeof(rfc));
1825 rfc.mode = pi->mode; 1917 rfc.mode = chan->mode;
1826 } 1918 }
1827 1919
1828 if (result == L2CAP_CONF_SUCCESS) 1920 if (result == L2CAP_CONF_SUCCESS)
1829 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE; 1921 chan->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1830 } 1922 }
1831 rsp->scid = cpu_to_le16(pi->dcid); 1923 rsp->scid = cpu_to_le16(chan->dcid);
1832 rsp->result = cpu_to_le16(result); 1924 rsp->result = cpu_to_le16(result);
1833 rsp->flags = cpu_to_le16(0x0000); 1925 rsp->flags = cpu_to_le16(0x0000);
1834 1926
1835 return ptr - data; 1927 return ptr - data;
1836} 1928}
1837 1929
1838static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result) 1930static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
1839{ 1931{
1840 struct l2cap_pinfo *pi = l2cap_pi(sk);
1841 struct l2cap_conf_req *req = data; 1932 struct l2cap_conf_req *req = data;
1842 void *ptr = req->data; 1933 void *ptr = req->data;
1843 int type, olen; 1934 int type, olen;
1844 unsigned long val; 1935 unsigned long val;
1845 struct l2cap_conf_rfc rfc; 1936 struct l2cap_conf_rfc rfc;
1846 1937
1847 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data); 1938 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
1848 1939
1849 while (len >= L2CAP_CONF_OPT_SIZE) { 1940 while (len >= L2CAP_CONF_OPT_SIZE) {
1850 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val); 1941 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
@@ -1853,27 +1944,27 @@ static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data,
1853 case L2CAP_CONF_MTU: 1944 case L2CAP_CONF_MTU:
1854 if (val < L2CAP_DEFAULT_MIN_MTU) { 1945 if (val < L2CAP_DEFAULT_MIN_MTU) {
1855 *result = L2CAP_CONF_UNACCEPT; 1946 *result = L2CAP_CONF_UNACCEPT;
1856 pi->imtu = L2CAP_DEFAULT_MIN_MTU; 1947 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
1857 } else 1948 } else
1858 pi->imtu = val; 1949 chan->imtu = val;
1859 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu); 1950 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
1860 break; 1951 break;
1861 1952
1862 case L2CAP_CONF_FLUSH_TO: 1953 case L2CAP_CONF_FLUSH_TO:
1863 pi->flush_to = val; 1954 chan->flush_to = val;
1864 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 1955 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
1865 2, pi->flush_to); 1956 2, chan->flush_to);
1866 break; 1957 break;
1867 1958
1868 case L2CAP_CONF_RFC: 1959 case L2CAP_CONF_RFC:
1869 if (olen == sizeof(rfc)) 1960 if (olen == sizeof(rfc))
1870 memcpy(&rfc, (void *)val, olen); 1961 memcpy(&rfc, (void *)val, olen);
1871 1962
1872 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) && 1963 if ((chan->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
1873 rfc.mode != pi->mode) 1964 rfc.mode != chan->mode)
1874 return -ECONNREFUSED; 1965 return -ECONNREFUSED;
1875 1966
1876 pi->fcs = 0; 1967 chan->fcs = 0;
1877 1968
1878 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, 1969 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1879 sizeof(rfc), (unsigned long) &rfc); 1970 sizeof(rfc), (unsigned long) &rfc);
@@ -1881,53 +1972,74 @@ static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data,
1881 } 1972 }
1882 } 1973 }
1883 1974
1884 if (pi->mode == L2CAP_MODE_BASIC && pi->mode != rfc.mode) 1975 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
1885 return -ECONNREFUSED; 1976 return -ECONNREFUSED;
1886 1977
1887 pi->mode = rfc.mode; 1978 chan->mode = rfc.mode;
1888 1979
1889 if (*result == L2CAP_CONF_SUCCESS) { 1980 if (*result == L2CAP_CONF_SUCCESS) {
1890 switch (rfc.mode) { 1981 switch (rfc.mode) {
1891 case L2CAP_MODE_ERTM: 1982 case L2CAP_MODE_ERTM:
1892 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout); 1983 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
1893 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout); 1984 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
1894 pi->mps = le16_to_cpu(rfc.max_pdu_size); 1985 chan->mps = le16_to_cpu(rfc.max_pdu_size);
1895 break; 1986 break;
1896 case L2CAP_MODE_STREAMING: 1987 case L2CAP_MODE_STREAMING:
1897 pi->mps = le16_to_cpu(rfc.max_pdu_size); 1988 chan->mps = le16_to_cpu(rfc.max_pdu_size);
1898 } 1989 }
1899 } 1990 }
1900 1991
1901 req->dcid = cpu_to_le16(pi->dcid); 1992 req->dcid = cpu_to_le16(chan->dcid);
1902 req->flags = cpu_to_le16(0x0000); 1993 req->flags = cpu_to_le16(0x0000);
1903 1994
1904 return ptr - data; 1995 return ptr - data;
1905} 1996}
1906 1997
1907static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags) 1998static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
1908{ 1999{
1909 struct l2cap_conf_rsp *rsp = data; 2000 struct l2cap_conf_rsp *rsp = data;
1910 void *ptr = rsp->data; 2001 void *ptr = rsp->data;
1911 2002
1912 BT_DBG("sk %p", sk); 2003 BT_DBG("chan %p", chan);
1913 2004
1914 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid); 2005 rsp->scid = cpu_to_le16(chan->dcid);
1915 rsp->result = cpu_to_le16(result); 2006 rsp->result = cpu_to_le16(result);
1916 rsp->flags = cpu_to_le16(flags); 2007 rsp->flags = cpu_to_le16(flags);
1917 2008
1918 return ptr - data; 2009 return ptr - data;
1919} 2010}
1920 2011
1921static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len) 2012void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2013{
2014 struct l2cap_conn_rsp rsp;
2015 struct l2cap_conn *conn = chan->conn;
2016 u8 buf[128];
2017
2018 rsp.scid = cpu_to_le16(chan->dcid);
2019 rsp.dcid = cpu_to_le16(chan->scid);
2020 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2021 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2022 l2cap_send_cmd(conn, chan->ident,
2023 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2024
2025 if (chan->conf_state & L2CAP_CONF_REQ_SENT)
2026 return;
2027
2028 chan->conf_state |= L2CAP_CONF_REQ_SENT;
2029 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2030 l2cap_build_conf_req(chan, buf), buf);
2031 chan->num_conf_req++;
2032}
2033
2034static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
1922{ 2035{
1923 struct l2cap_pinfo *pi = l2cap_pi(sk);
1924 int type, olen; 2036 int type, olen;
1925 unsigned long val; 2037 unsigned long val;
1926 struct l2cap_conf_rfc rfc; 2038 struct l2cap_conf_rfc rfc;
1927 2039
1928 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len); 2040 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
1929 2041
1930 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING)) 2042 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
1931 return; 2043 return;
1932 2044
1933 while (len >= L2CAP_CONF_OPT_SIZE) { 2045 while (len >= L2CAP_CONF_OPT_SIZE) {
@@ -1944,12 +2056,12 @@ static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
1944done: 2056done:
1945 switch (rfc.mode) { 2057 switch (rfc.mode) {
1946 case L2CAP_MODE_ERTM: 2058 case L2CAP_MODE_ERTM:
1947 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout); 2059 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
1948 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout); 2060 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
1949 pi->mps = le16_to_cpu(rfc.max_pdu_size); 2061 chan->mps = le16_to_cpu(rfc.max_pdu_size);
1950 break; 2062 break;
1951 case L2CAP_MODE_STREAMING: 2063 case L2CAP_MODE_STREAMING:
1952 pi->mps = le16_to_cpu(rfc.max_pdu_size); 2064 chan->mps = le16_to_cpu(rfc.max_pdu_size);
1953 } 2065 }
1954} 2066}
1955 2067
@@ -1975,9 +2087,9 @@ static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hd
1975 2087
1976static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data) 2088static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1977{ 2089{
1978 struct l2cap_chan_list *list = &conn->chan_list;
1979 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data; 2090 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
1980 struct l2cap_conn_rsp rsp; 2091 struct l2cap_conn_rsp rsp;
2092 struct l2cap_chan *chan = NULL, *pchan;
1981 struct sock *parent, *sk = NULL; 2093 struct sock *parent, *sk = NULL;
1982 int result, status = L2CAP_CS_NO_INFO; 2094 int result, status = L2CAP_CS_NO_INFO;
1983 2095
@@ -1987,12 +2099,14 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
1987 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid); 2099 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
1988 2100
1989 /* Check if we have socket listening on psm */ 2101 /* Check if we have socket listening on psm */
1990 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src); 2102 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
1991 if (!parent) { 2103 if (!pchan) {
1992 result = L2CAP_CR_BAD_PSM; 2104 result = L2CAP_CR_BAD_PSM;
1993 goto sendresp; 2105 goto sendresp;
1994 } 2106 }
1995 2107
2108 parent = pchan->sk;
2109
1996 bh_lock_sock(parent); 2110 bh_lock_sock(parent);
1997 2111
1998 /* Check if the ACL is secure enough (if not SDP) */ 2112 /* Check if the ACL is secure enough (if not SDP) */
@@ -2015,11 +2129,19 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
2015 if (!sk) 2129 if (!sk)
2016 goto response; 2130 goto response;
2017 2131
2018 write_lock_bh(&list->lock); 2132 chan = l2cap_chan_create(sk);
2133 if (!chan) {
2134 l2cap_sock_kill(sk);
2135 goto response;
2136 }
2137
2138 l2cap_pi(sk)->chan = chan;
2139
2140 write_lock_bh(&conn->chan_lock);
2019 2141
2020 /* Check if we already have channel with that dcid */ 2142 /* Check if we already have channel with that dcid */
2021 if (__l2cap_get_chan_by_dcid(list, scid)) { 2143 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2022 write_unlock_bh(&list->lock); 2144 write_unlock_bh(&conn->chan_lock);
2023 sock_set_flag(sk, SOCK_ZAPPED); 2145 sock_set_flag(sk, SOCK_ZAPPED);
2024 l2cap_sock_kill(sk); 2146 l2cap_sock_kill(sk);
2025 goto response; 2147 goto response;
@@ -2030,18 +2152,21 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
2030 l2cap_sock_init(sk, parent); 2152 l2cap_sock_init(sk, parent);
2031 bacpy(&bt_sk(sk)->src, conn->src); 2153 bacpy(&bt_sk(sk)->src, conn->src);
2032 bacpy(&bt_sk(sk)->dst, conn->dst); 2154 bacpy(&bt_sk(sk)->dst, conn->dst);
2033 l2cap_pi(sk)->psm = psm; 2155 chan->psm = psm;
2034 l2cap_pi(sk)->dcid = scid; 2156 chan->dcid = scid;
2157
2158 bt_accept_enqueue(parent, sk);
2159
2160 __l2cap_chan_add(conn, chan);
2035 2161
2036 __l2cap_chan_add(conn, sk, parent); 2162 dcid = chan->scid;
2037 dcid = l2cap_pi(sk)->scid;
2038 2163
2039 l2cap_sock_set_timer(sk, sk->sk_sndtimeo); 2164 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2040 2165
2041 l2cap_pi(sk)->ident = cmd->ident; 2166 chan->ident = cmd->ident;
2042 2167
2043 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) { 2168 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2044 if (l2cap_check_security(sk)) { 2169 if (l2cap_check_security(chan)) {
2045 if (bt_sk(sk)->defer_setup) { 2170 if (bt_sk(sk)->defer_setup) {
2046 sk->sk_state = BT_CONNECT2; 2171 sk->sk_state = BT_CONNECT2;
2047 result = L2CAP_CR_PEND; 2172 result = L2CAP_CR_PEND;
@@ -2063,7 +2188,7 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
2063 status = L2CAP_CS_NO_INFO; 2188 status = L2CAP_CS_NO_INFO;
2064 } 2189 }
2065 2190
2066 write_unlock_bh(&list->lock); 2191 write_unlock_bh(&conn->chan_lock);
2067 2192
2068response: 2193response:
2069 bh_unlock_sock(parent); 2194 bh_unlock_sock(parent);
@@ -2089,13 +2214,13 @@ sendresp:
2089 L2CAP_INFO_REQ, sizeof(info), &info); 2214 L2CAP_INFO_REQ, sizeof(info), &info);
2090 } 2215 }
2091 2216
2092 if (sk && !(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) && 2217 if (chan && !(chan->conf_state & L2CAP_CONF_REQ_SENT) &&
2093 result == L2CAP_CR_SUCCESS) { 2218 result == L2CAP_CR_SUCCESS) {
2094 u8 buf[128]; 2219 u8 buf[128];
2095 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT; 2220 chan->conf_state |= L2CAP_CONF_REQ_SENT;
2096 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, 2221 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2097 l2cap_build_conf_req(sk, buf), buf); 2222 l2cap_build_conf_req(chan, buf), buf);
2098 l2cap_pi(sk)->num_conf_req++; 2223 chan->num_conf_req++;
2099 } 2224 }
2100 2225
2101 return 0; 2226 return 0;
@@ -2105,6 +2230,7 @@ static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hd
2105{ 2230{
2106 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data; 2231 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2107 u16 scid, dcid, result, status; 2232 u16 scid, dcid, result, status;
2233 struct l2cap_chan *chan;
2108 struct sock *sk; 2234 struct sock *sk;
2109 u8 req[128]; 2235 u8 req[128];
2110 2236
@@ -2116,34 +2242,36 @@ static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hd
2116 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status); 2242 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2117 2243
2118 if (scid) { 2244 if (scid) {
2119 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid); 2245 chan = l2cap_get_chan_by_scid(conn, scid);
2120 if (!sk) 2246 if (!chan)
2121 return -EFAULT; 2247 return -EFAULT;
2122 } else { 2248 } else {
2123 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident); 2249 chan = l2cap_get_chan_by_ident(conn, cmd->ident);
2124 if (!sk) 2250 if (!chan)
2125 return -EFAULT; 2251 return -EFAULT;
2126 } 2252 }
2127 2253
2254 sk = chan->sk;
2255
2128 switch (result) { 2256 switch (result) {
2129 case L2CAP_CR_SUCCESS: 2257 case L2CAP_CR_SUCCESS:
2130 sk->sk_state = BT_CONFIG; 2258 sk->sk_state = BT_CONFIG;
2131 l2cap_pi(sk)->ident = 0; 2259 chan->ident = 0;
2132 l2cap_pi(sk)->dcid = dcid; 2260 chan->dcid = dcid;
2133 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND; 2261 chan->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2134 2262
2135 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) 2263 if (chan->conf_state & L2CAP_CONF_REQ_SENT)
2136 break; 2264 break;
2137 2265
2138 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT; 2266 chan->conf_state |= L2CAP_CONF_REQ_SENT;
2139 2267
2140 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, 2268 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2141 l2cap_build_conf_req(sk, req), req); 2269 l2cap_build_conf_req(chan, req), req);
2142 l2cap_pi(sk)->num_conf_req++; 2270 chan->num_conf_req++;
2143 break; 2271 break;
2144 2272
2145 case L2CAP_CR_PEND: 2273 case L2CAP_CR_PEND:
2146 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND; 2274 chan->conf_state |= L2CAP_CONF_CONNECT_PEND;
2147 break; 2275 break;
2148 2276
2149 default: 2277 default:
@@ -2155,7 +2283,7 @@ static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hd
2155 break; 2283 break;
2156 } 2284 }
2157 2285
2158 l2cap_chan_del(sk, ECONNREFUSED); 2286 l2cap_chan_del(chan, ECONNREFUSED);
2159 break; 2287 break;
2160 } 2288 }
2161 2289
@@ -2163,15 +2291,17 @@ static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hd
2163 return 0; 2291 return 0;
2164} 2292}
2165 2293
2166static inline void set_default_fcs(struct l2cap_pinfo *pi) 2294static inline void set_default_fcs(struct l2cap_chan *chan)
2167{ 2295{
2296 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
2297
2168 /* FCS is enabled only in ERTM or streaming mode, if one or both 2298 /* FCS is enabled only in ERTM or streaming mode, if one or both
2169 * sides request it. 2299 * sides request it.
2170 */ 2300 */
2171 if (pi->mode != L2CAP_MODE_ERTM && pi->mode != L2CAP_MODE_STREAMING) 2301 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2172 pi->fcs = L2CAP_FCS_NONE; 2302 chan->fcs = L2CAP_FCS_NONE;
2173 else if (!(pi->conf_state & L2CAP_CONF_NO_FCS_RECV)) 2303 else if (!(pi->chan->conf_state & L2CAP_CONF_NO_FCS_RECV))
2174 pi->fcs = L2CAP_FCS_CRC16; 2304 chan->fcs = L2CAP_FCS_CRC16;
2175} 2305}
2176 2306
2177static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data) 2307static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
@@ -2179,6 +2309,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
2179 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data; 2309 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2180 u16 dcid, flags; 2310 u16 dcid, flags;
2181 u8 rsp[64]; 2311 u8 rsp[64];
2312 struct l2cap_chan *chan;
2182 struct sock *sk; 2313 struct sock *sk;
2183 int len; 2314 int len;
2184 2315
@@ -2187,11 +2318,13 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
2187 2318
2188 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags); 2319 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2189 2320
2190 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid); 2321 chan = l2cap_get_chan_by_scid(conn, dcid);
2191 if (!sk) 2322 if (!chan)
2192 return -ENOENT; 2323 return -ENOENT;
2193 2324
2194 if (sk->sk_state != BT_CONFIG) { 2325 sk = chan->sk;
2326
2327 if (sk->sk_state != BT_CONFIG && sk->sk_state != BT_CONNECT2) {
2195 struct l2cap_cmd_rej rej; 2328 struct l2cap_cmd_rej rej;
2196 2329
2197 rej.reason = cpu_to_le16(0x0002); 2330 rej.reason = cpu_to_le16(0x0002);
@@ -2202,62 +2335,62 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
2202 2335
2203 /* Reject if config buffer is too small. */ 2336 /* Reject if config buffer is too small. */
2204 len = cmd_len - sizeof(*req); 2337 len = cmd_len - sizeof(*req);
2205 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) { 2338 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
2206 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, 2339 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2207 l2cap_build_conf_rsp(sk, rsp, 2340 l2cap_build_conf_rsp(chan, rsp,
2208 L2CAP_CONF_REJECT, flags), rsp); 2341 L2CAP_CONF_REJECT, flags), rsp);
2209 goto unlock; 2342 goto unlock;
2210 } 2343 }
2211 2344
2212 /* Store config. */ 2345 /* Store config. */
2213 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len); 2346 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2214 l2cap_pi(sk)->conf_len += len; 2347 chan->conf_len += len;
2215 2348
2216 if (flags & 0x0001) { 2349 if (flags & 0x0001) {
2217 /* Incomplete config. Send empty response. */ 2350 /* Incomplete config. Send empty response. */
2218 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, 2351 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2219 l2cap_build_conf_rsp(sk, rsp, 2352 l2cap_build_conf_rsp(chan, rsp,
2220 L2CAP_CONF_SUCCESS, 0x0001), rsp); 2353 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2221 goto unlock; 2354 goto unlock;
2222 } 2355 }
2223 2356
2224 /* Complete config. */ 2357 /* Complete config. */
2225 len = l2cap_parse_conf_req(sk, rsp); 2358 len = l2cap_parse_conf_req(chan, rsp);
2226 if (len < 0) { 2359 if (len < 0) {
2227 l2cap_send_disconn_req(conn, sk, ECONNRESET); 2360 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2228 goto unlock; 2361 goto unlock;
2229 } 2362 }
2230 2363
2231 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp); 2364 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2232 l2cap_pi(sk)->num_conf_rsp++; 2365 chan->num_conf_rsp++;
2233 2366
2234 /* Reset config buffer. */ 2367 /* Reset config buffer. */
2235 l2cap_pi(sk)->conf_len = 0; 2368 chan->conf_len = 0;
2236 2369
2237 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE)) 2370 if (!(chan->conf_state & L2CAP_CONF_OUTPUT_DONE))
2238 goto unlock; 2371 goto unlock;
2239 2372
2240 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) { 2373 if (chan->conf_state & L2CAP_CONF_INPUT_DONE) {
2241 set_default_fcs(l2cap_pi(sk)); 2374 set_default_fcs(chan);
2242 2375
2243 sk->sk_state = BT_CONNECTED; 2376 sk->sk_state = BT_CONNECTED;
2244 2377
2245 l2cap_pi(sk)->next_tx_seq = 0; 2378 chan->next_tx_seq = 0;
2246 l2cap_pi(sk)->expected_tx_seq = 0; 2379 chan->expected_tx_seq = 0;
2247 __skb_queue_head_init(TX_QUEUE(sk)); 2380 skb_queue_head_init(&chan->tx_q);
2248 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) 2381 if (chan->mode == L2CAP_MODE_ERTM)
2249 l2cap_ertm_init(sk); 2382 l2cap_ertm_init(chan);
2250 2383
2251 l2cap_chan_ready(sk); 2384 l2cap_chan_ready(sk);
2252 goto unlock; 2385 goto unlock;
2253 } 2386 }
2254 2387
2255 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) { 2388 if (!(chan->conf_state & L2CAP_CONF_REQ_SENT)) {
2256 u8 buf[64]; 2389 u8 buf[64];
2257 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT; 2390 chan->conf_state |= L2CAP_CONF_REQ_SENT;
2258 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, 2391 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2259 l2cap_build_conf_req(sk, buf), buf); 2392 l2cap_build_conf_req(chan, buf), buf);
2260 l2cap_pi(sk)->num_conf_req++; 2393 chan->num_conf_req++;
2261 } 2394 }
2262 2395
2263unlock: 2396unlock:
@@ -2269,6 +2402,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
2269{ 2402{
2270 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data; 2403 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2271 u16 scid, flags, result; 2404 u16 scid, flags, result;
2405 struct l2cap_chan *chan;
2272 struct sock *sk; 2406 struct sock *sk;
2273 int len = cmd->len - sizeof(*rsp); 2407 int len = cmd->len - sizeof(*rsp);
2274 2408
@@ -2279,36 +2413,38 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
2279 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x", 2413 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2280 scid, flags, result); 2414 scid, flags, result);
2281 2415
2282 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid); 2416 chan = l2cap_get_chan_by_scid(conn, scid);
2283 if (!sk) 2417 if (!chan)
2284 return 0; 2418 return 0;
2285 2419
2420 sk = chan->sk;
2421
2286 switch (result) { 2422 switch (result) {
2287 case L2CAP_CONF_SUCCESS: 2423 case L2CAP_CONF_SUCCESS:
2288 l2cap_conf_rfc_get(sk, rsp->data, len); 2424 l2cap_conf_rfc_get(chan, rsp->data, len);
2289 break; 2425 break;
2290 2426
2291 case L2CAP_CONF_UNACCEPT: 2427 case L2CAP_CONF_UNACCEPT:
2292 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) { 2428 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2293 char req[64]; 2429 char req[64];
2294 2430
2295 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) { 2431 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2296 l2cap_send_disconn_req(conn, sk, ECONNRESET); 2432 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2297 goto done; 2433 goto done;
2298 } 2434 }
2299 2435
2300 /* throw out any old stored conf requests */ 2436 /* throw out any old stored conf requests */
2301 result = L2CAP_CONF_SUCCESS; 2437 result = L2CAP_CONF_SUCCESS;
2302 len = l2cap_parse_conf_rsp(sk, rsp->data, 2438 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2303 len, req, &result); 2439 req, &result);
2304 if (len < 0) { 2440 if (len < 0) {
2305 l2cap_send_disconn_req(conn, sk, ECONNRESET); 2441 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2306 goto done; 2442 goto done;
2307 } 2443 }
2308 2444
2309 l2cap_send_cmd(conn, l2cap_get_ident(conn), 2445 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2310 L2CAP_CONF_REQ, len, req); 2446 L2CAP_CONF_REQ, len, req);
2311 l2cap_pi(sk)->num_conf_req++; 2447 chan->num_conf_req++;
2312 if (result != L2CAP_CONF_SUCCESS) 2448 if (result != L2CAP_CONF_SUCCESS)
2313 goto done; 2449 goto done;
2314 break; 2450 break;
@@ -2317,24 +2453,24 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
2317 default: 2453 default:
2318 sk->sk_err = ECONNRESET; 2454 sk->sk_err = ECONNRESET;
2319 l2cap_sock_set_timer(sk, HZ * 5); 2455 l2cap_sock_set_timer(sk, HZ * 5);
2320 l2cap_send_disconn_req(conn, sk, ECONNRESET); 2456 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2321 goto done; 2457 goto done;
2322 } 2458 }
2323 2459
2324 if (flags & 0x01) 2460 if (flags & 0x01)
2325 goto done; 2461 goto done;
2326 2462
2327 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE; 2463 chan->conf_state |= L2CAP_CONF_INPUT_DONE;
2328 2464
2329 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) { 2465 if (chan->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2330 set_default_fcs(l2cap_pi(sk)); 2466 set_default_fcs(chan);
2331 2467
2332 sk->sk_state = BT_CONNECTED; 2468 sk->sk_state = BT_CONNECTED;
2333 l2cap_pi(sk)->next_tx_seq = 0; 2469 chan->next_tx_seq = 0;
2334 l2cap_pi(sk)->expected_tx_seq = 0; 2470 chan->expected_tx_seq = 0;
2335 __skb_queue_head_init(TX_QUEUE(sk)); 2471 skb_queue_head_init(&chan->tx_q);
2336 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) 2472 if (chan->mode == L2CAP_MODE_ERTM)
2337 l2cap_ertm_init(sk); 2473 l2cap_ertm_init(chan);
2338 2474
2339 l2cap_chan_ready(sk); 2475 l2cap_chan_ready(sk);
2340 } 2476 }
@@ -2349,6 +2485,7 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd
2349 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data; 2485 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2350 struct l2cap_disconn_rsp rsp; 2486 struct l2cap_disconn_rsp rsp;
2351 u16 dcid, scid; 2487 u16 dcid, scid;
2488 struct l2cap_chan *chan;
2352 struct sock *sk; 2489 struct sock *sk;
2353 2490
2354 scid = __le16_to_cpu(req->scid); 2491 scid = __le16_to_cpu(req->scid);
@@ -2356,12 +2493,14 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd
2356 2493
2357 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid); 2494 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2358 2495
2359 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid); 2496 chan = l2cap_get_chan_by_scid(conn, dcid);
2360 if (!sk) 2497 if (!chan)
2361 return 0; 2498 return 0;
2362 2499
2363 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid); 2500 sk = chan->sk;
2364 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid); 2501
2502 rsp.dcid = cpu_to_le16(chan->scid);
2503 rsp.scid = cpu_to_le16(chan->dcid);
2365 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp); 2504 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2366 2505
2367 sk->sk_shutdown = SHUTDOWN_MASK; 2506 sk->sk_shutdown = SHUTDOWN_MASK;
@@ -2375,7 +2514,7 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd
2375 return 0; 2514 return 0;
2376 } 2515 }
2377 2516
2378 l2cap_chan_del(sk, ECONNRESET); 2517 l2cap_chan_del(chan, ECONNRESET);
2379 bh_unlock_sock(sk); 2518 bh_unlock_sock(sk);
2380 2519
2381 l2cap_sock_kill(sk); 2520 l2cap_sock_kill(sk);
@@ -2386,6 +2525,7 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd
2386{ 2525{
2387 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data; 2526 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2388 u16 dcid, scid; 2527 u16 dcid, scid;
2528 struct l2cap_chan *chan;
2389 struct sock *sk; 2529 struct sock *sk;
2390 2530
2391 scid = __le16_to_cpu(rsp->scid); 2531 scid = __le16_to_cpu(rsp->scid);
@@ -2393,10 +2533,12 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd
2393 2533
2394 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid); 2534 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2395 2535
2396 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid); 2536 chan = l2cap_get_chan_by_scid(conn, scid);
2397 if (!sk) 2537 if (!chan)
2398 return 0; 2538 return 0;
2399 2539
2540 sk = chan->sk;
2541
2400 /* don't delete l2cap channel if sk is owned by user */ 2542 /* don't delete l2cap channel if sk is owned by user */
2401 if (sock_owned_by_user(sk)) { 2543 if (sock_owned_by_user(sk)) {
2402 sk->sk_state = BT_DISCONN; 2544 sk->sk_state = BT_DISCONN;
@@ -2406,7 +2548,7 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd
2406 return 0; 2548 return 0;
2407 } 2549 }
2408 2550
2409 l2cap_chan_del(sk, 0); 2551 l2cap_chan_del(chan, 0);
2410 bh_unlock_sock(sk); 2552 bh_unlock_sock(sk);
2411 2553
2412 l2cap_sock_kill(sk); 2554 l2cap_sock_kill(sk);
@@ -2463,6 +2605,11 @@ static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cm
2463 2605
2464 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result); 2606 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2465 2607
2608 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
2609 if (cmd->ident != conn->info_ident ||
2610 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
2611 return 0;
2612
2466 del_timer(&conn->info_timer); 2613 del_timer(&conn->info_timer);
2467 2614
2468 if (result != L2CAP_IR_SUCCESS) { 2615 if (result != L2CAP_IR_SUCCESS) {
@@ -2673,7 +2820,8 @@ static inline void l2cap_sig_channel(struct l2cap_conn *conn,
2673 2820
2674 if (err) { 2821 if (err) {
2675 struct l2cap_cmd_rej rej; 2822 struct l2cap_cmd_rej rej;
2676 BT_DBG("error %d", err); 2823
2824 BT_ERR("Wrong link type (%d)", err);
2677 2825
2678 /* FIXME: Map err to a valid reason */ 2826 /* FIXME: Map err to a valid reason */
2679 rej.reason = cpu_to_le16(0); 2827 rej.reason = cpu_to_le16(0);
@@ -2687,12 +2835,12 @@ static inline void l2cap_sig_channel(struct l2cap_conn *conn,
2687 kfree_skb(skb); 2835 kfree_skb(skb);
2688} 2836}
2689 2837
2690static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb) 2838static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
2691{ 2839{
2692 u16 our_fcs, rcv_fcs; 2840 u16 our_fcs, rcv_fcs;
2693 int hdr_size = L2CAP_HDR_SIZE + 2; 2841 int hdr_size = L2CAP_HDR_SIZE + 2;
2694 2842
2695 if (pi->fcs == L2CAP_FCS_CRC16) { 2843 if (chan->fcs == L2CAP_FCS_CRC16) {
2696 skb_trim(skb, skb->len - 2); 2844 skb_trim(skb, skb->len - 2);
2697 rcv_fcs = get_unaligned_le16(skb->data + skb->len); 2845 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
2698 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size); 2846 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
@@ -2703,49 +2851,47 @@ static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
2703 return 0; 2851 return 0;
2704} 2852}
2705 2853
2706static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk) 2854static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
2707{ 2855{
2708 struct l2cap_pinfo *pi = l2cap_pi(sk);
2709 u16 control = 0; 2856 u16 control = 0;
2710 2857
2711 pi->frames_sent = 0; 2858 chan->frames_sent = 0;
2712 2859
2713 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT; 2860 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
2714 2861
2715 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) { 2862 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
2716 control |= L2CAP_SUPER_RCV_NOT_READY; 2863 control |= L2CAP_SUPER_RCV_NOT_READY;
2717 l2cap_send_sframe(pi, control); 2864 l2cap_send_sframe(chan, control);
2718 pi->conn_state |= L2CAP_CONN_RNR_SENT; 2865 chan->conn_state |= L2CAP_CONN_RNR_SENT;
2719 } 2866 }
2720 2867
2721 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY) 2868 if (chan->conn_state & L2CAP_CONN_REMOTE_BUSY)
2722 l2cap_retransmit_frames(sk); 2869 l2cap_retransmit_frames(chan);
2723 2870
2724 l2cap_ertm_send(sk); 2871 l2cap_ertm_send(chan);
2725 2872
2726 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) && 2873 if (!(chan->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
2727 pi->frames_sent == 0) { 2874 chan->frames_sent == 0) {
2728 control |= L2CAP_SUPER_RCV_READY; 2875 control |= L2CAP_SUPER_RCV_READY;
2729 l2cap_send_sframe(pi, control); 2876 l2cap_send_sframe(chan, control);
2730 } 2877 }
2731} 2878}
2732 2879
2733static int l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar) 2880static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u8 tx_seq, u8 sar)
2734{ 2881{
2735 struct sk_buff *next_skb; 2882 struct sk_buff *next_skb;
2736 struct l2cap_pinfo *pi = l2cap_pi(sk);
2737 int tx_seq_offset, next_tx_seq_offset; 2883 int tx_seq_offset, next_tx_seq_offset;
2738 2884
2739 bt_cb(skb)->tx_seq = tx_seq; 2885 bt_cb(skb)->tx_seq = tx_seq;
2740 bt_cb(skb)->sar = sar; 2886 bt_cb(skb)->sar = sar;
2741 2887
2742 next_skb = skb_peek(SREJ_QUEUE(sk)); 2888 next_skb = skb_peek(&chan->srej_q);
2743 if (!next_skb) { 2889 if (!next_skb) {
2744 __skb_queue_tail(SREJ_QUEUE(sk), skb); 2890 __skb_queue_tail(&chan->srej_q, skb);
2745 return 0; 2891 return 0;
2746 } 2892 }
2747 2893
2748 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64; 2894 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
2749 if (tx_seq_offset < 0) 2895 if (tx_seq_offset < 0)
2750 tx_seq_offset += 64; 2896 tx_seq_offset += 64;
2751 2897
@@ -2754,53 +2900,52 @@ static int l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_s
2754 return -EINVAL; 2900 return -EINVAL;
2755 2901
2756 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq - 2902 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
2757 pi->buffer_seq) % 64; 2903 chan->buffer_seq) % 64;
2758 if (next_tx_seq_offset < 0) 2904 if (next_tx_seq_offset < 0)
2759 next_tx_seq_offset += 64; 2905 next_tx_seq_offset += 64;
2760 2906
2761 if (next_tx_seq_offset > tx_seq_offset) { 2907 if (next_tx_seq_offset > tx_seq_offset) {
2762 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb); 2908 __skb_queue_before(&chan->srej_q, next_skb, skb);
2763 return 0; 2909 return 0;
2764 } 2910 }
2765 2911
2766 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb)) 2912 if (skb_queue_is_last(&chan->srej_q, next_skb))
2767 break; 2913 break;
2768 2914
2769 } while ((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb))); 2915 } while ((next_skb = skb_queue_next(&chan->srej_q, next_skb)));
2770 2916
2771 __skb_queue_tail(SREJ_QUEUE(sk), skb); 2917 __skb_queue_tail(&chan->srej_q, skb);
2772 2918
2773 return 0; 2919 return 0;
2774} 2920}
2775 2921
2776static int l2cap_ertm_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control) 2922static int l2cap_ertm_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
2777{ 2923{
2778 struct l2cap_pinfo *pi = l2cap_pi(sk);
2779 struct sk_buff *_skb; 2924 struct sk_buff *_skb;
2780 int err; 2925 int err;
2781 2926
2782 switch (control & L2CAP_CTRL_SAR) { 2927 switch (control & L2CAP_CTRL_SAR) {
2783 case L2CAP_SDU_UNSEGMENTED: 2928 case L2CAP_SDU_UNSEGMENTED:
2784 if (pi->conn_state & L2CAP_CONN_SAR_SDU) 2929 if (chan->conn_state & L2CAP_CONN_SAR_SDU)
2785 goto drop; 2930 goto drop;
2786 2931
2787 err = sock_queue_rcv_skb(sk, skb); 2932 err = sock_queue_rcv_skb(chan->sk, skb);
2788 if (!err) 2933 if (!err)
2789 return err; 2934 return err;
2790 2935
2791 break; 2936 break;
2792 2937
2793 case L2CAP_SDU_START: 2938 case L2CAP_SDU_START:
2794 if (pi->conn_state & L2CAP_CONN_SAR_SDU) 2939 if (chan->conn_state & L2CAP_CONN_SAR_SDU)
2795 goto drop; 2940 goto drop;
2796 2941
2797 pi->sdu_len = get_unaligned_le16(skb->data); 2942 chan->sdu_len = get_unaligned_le16(skb->data);
2798 2943
2799 if (pi->sdu_len > pi->imtu) 2944 if (chan->sdu_len > chan->imtu)
2800 goto disconnect; 2945 goto disconnect;
2801 2946
2802 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC); 2947 chan->sdu = bt_skb_alloc(chan->sdu_len, GFP_ATOMIC);
2803 if (!pi->sdu) 2948 if (!chan->sdu)
2804 return -ENOMEM; 2949 return -ENOMEM;
2805 2950
2806 /* pull sdu_len bytes only after alloc, because of Local Busy 2951 /* pull sdu_len bytes only after alloc, because of Local Busy
@@ -2808,63 +2953,63 @@ static int l2cap_ertm_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 c
2808 * only once, i.e., when alloc does not fail */ 2953 * only once, i.e., when alloc does not fail */
2809 skb_pull(skb, 2); 2954 skb_pull(skb, 2);
2810 2955
2811 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len); 2956 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
2812 2957
2813 pi->conn_state |= L2CAP_CONN_SAR_SDU; 2958 chan->conn_state |= L2CAP_CONN_SAR_SDU;
2814 pi->partial_sdu_len = skb->len; 2959 chan->partial_sdu_len = skb->len;
2815 break; 2960 break;
2816 2961
2817 case L2CAP_SDU_CONTINUE: 2962 case L2CAP_SDU_CONTINUE:
2818 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU)) 2963 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
2819 goto disconnect; 2964 goto disconnect;
2820 2965
2821 if (!pi->sdu) 2966 if (!chan->sdu)
2822 goto disconnect; 2967 goto disconnect;
2823 2968
2824 pi->partial_sdu_len += skb->len; 2969 chan->partial_sdu_len += skb->len;
2825 if (pi->partial_sdu_len > pi->sdu_len) 2970 if (chan->partial_sdu_len > chan->sdu_len)
2826 goto drop; 2971 goto drop;
2827 2972
2828 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len); 2973 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
2829 2974
2830 break; 2975 break;
2831 2976
2832 case L2CAP_SDU_END: 2977 case L2CAP_SDU_END:
2833 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU)) 2978 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
2834 goto disconnect; 2979 goto disconnect;
2835 2980
2836 if (!pi->sdu) 2981 if (!chan->sdu)
2837 goto disconnect; 2982 goto disconnect;
2838 2983
2839 if (!(pi->conn_state & L2CAP_CONN_SAR_RETRY)) { 2984 if (!(chan->conn_state & L2CAP_CONN_SAR_RETRY)) {
2840 pi->partial_sdu_len += skb->len; 2985 chan->partial_sdu_len += skb->len;
2841 2986
2842 if (pi->partial_sdu_len > pi->imtu) 2987 if (chan->partial_sdu_len > chan->imtu)
2843 goto drop; 2988 goto drop;
2844 2989
2845 if (pi->partial_sdu_len != pi->sdu_len) 2990 if (chan->partial_sdu_len != chan->sdu_len)
2846 goto drop; 2991 goto drop;
2847 2992
2848 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len); 2993 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
2849 } 2994 }
2850 2995
2851 _skb = skb_clone(pi->sdu, GFP_ATOMIC); 2996 _skb = skb_clone(chan->sdu, GFP_ATOMIC);
2852 if (!_skb) { 2997 if (!_skb) {
2853 pi->conn_state |= L2CAP_CONN_SAR_RETRY; 2998 chan->conn_state |= L2CAP_CONN_SAR_RETRY;
2854 return -ENOMEM; 2999 return -ENOMEM;
2855 } 3000 }
2856 3001
2857 err = sock_queue_rcv_skb(sk, _skb); 3002 err = sock_queue_rcv_skb(chan->sk, _skb);
2858 if (err < 0) { 3003 if (err < 0) {
2859 kfree_skb(_skb); 3004 kfree_skb(_skb);
2860 pi->conn_state |= L2CAP_CONN_SAR_RETRY; 3005 chan->conn_state |= L2CAP_CONN_SAR_RETRY;
2861 return err; 3006 return err;
2862 } 3007 }
2863 3008
2864 pi->conn_state &= ~L2CAP_CONN_SAR_RETRY; 3009 chan->conn_state &= ~L2CAP_CONN_SAR_RETRY;
2865 pi->conn_state &= ~L2CAP_CONN_SAR_SDU; 3010 chan->conn_state &= ~L2CAP_CONN_SAR_SDU;
2866 3011
2867 kfree_skb(pi->sdu); 3012 kfree_skb(chan->sdu);
2868 break; 3013 break;
2869 } 3014 }
2870 3015
@@ -2872,51 +3017,50 @@ static int l2cap_ertm_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 c
2872 return 0; 3017 return 0;
2873 3018
2874drop: 3019drop:
2875 kfree_skb(pi->sdu); 3020 kfree_skb(chan->sdu);
2876 pi->sdu = NULL; 3021 chan->sdu = NULL;
2877 3022
2878disconnect: 3023disconnect:
2879 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET); 3024 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
2880 kfree_skb(skb); 3025 kfree_skb(skb);
2881 return 0; 3026 return 0;
2882} 3027}
2883 3028
2884static int l2cap_try_push_rx_skb(struct sock *sk) 3029static int l2cap_try_push_rx_skb(struct l2cap_chan *chan)
2885{ 3030{
2886 struct l2cap_pinfo *pi = l2cap_pi(sk);
2887 struct sk_buff *skb; 3031 struct sk_buff *skb;
2888 u16 control; 3032 u16 control;
2889 int err; 3033 int err;
2890 3034
2891 while ((skb = skb_dequeue(BUSY_QUEUE(sk)))) { 3035 while ((skb = skb_dequeue(&chan->busy_q))) {
2892 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT; 3036 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
2893 err = l2cap_ertm_reassembly_sdu(sk, skb, control); 3037 err = l2cap_ertm_reassembly_sdu(chan, skb, control);
2894 if (err < 0) { 3038 if (err < 0) {
2895 skb_queue_head(BUSY_QUEUE(sk), skb); 3039 skb_queue_head(&chan->busy_q, skb);
2896 return -EBUSY; 3040 return -EBUSY;
2897 } 3041 }
2898 3042
2899 pi->buffer_seq = (pi->buffer_seq + 1) % 64; 3043 chan->buffer_seq = (chan->buffer_seq + 1) % 64;
2900 } 3044 }
2901 3045
2902 if (!(pi->conn_state & L2CAP_CONN_RNR_SENT)) 3046 if (!(chan->conn_state & L2CAP_CONN_RNR_SENT))
2903 goto done; 3047 goto done;
2904 3048
2905 control = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT; 3049 control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
2906 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL; 3050 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
2907 l2cap_send_sframe(pi, control); 3051 l2cap_send_sframe(chan, control);
2908 l2cap_pi(sk)->retry_count = 1; 3052 chan->retry_count = 1;
2909 3053
2910 del_timer(&pi->retrans_timer); 3054 del_timer(&chan->retrans_timer);
2911 __mod_monitor_timer(); 3055 __mod_monitor_timer();
2912 3056
2913 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F; 3057 chan->conn_state |= L2CAP_CONN_WAIT_F;
2914 3058
2915done: 3059done:
2916 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY; 3060 chan->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
2917 pi->conn_state &= ~L2CAP_CONN_RNR_SENT; 3061 chan->conn_state &= ~L2CAP_CONN_RNR_SENT;
2918 3062
2919 BT_DBG("sk %p, Exit local busy", sk); 3063 BT_DBG("chan %p, Exit local busy", chan);
2920 3064
2921 return 0; 3065 return 0;
2922} 3066}
@@ -2924,21 +3068,21 @@ done:
2924static void l2cap_busy_work(struct work_struct *work) 3068static void l2cap_busy_work(struct work_struct *work)
2925{ 3069{
2926 DECLARE_WAITQUEUE(wait, current); 3070 DECLARE_WAITQUEUE(wait, current);
2927 struct l2cap_pinfo *pi = 3071 struct l2cap_chan *chan =
2928 container_of(work, struct l2cap_pinfo, busy_work); 3072 container_of(work, struct l2cap_chan, busy_work);
2929 struct sock *sk = (struct sock *)pi; 3073 struct sock *sk = chan->sk;
2930 int n_tries = 0, timeo = HZ/5, err; 3074 int n_tries = 0, timeo = HZ/5, err;
2931 struct sk_buff *skb; 3075 struct sk_buff *skb;
2932 3076
2933 lock_sock(sk); 3077 lock_sock(sk);
2934 3078
2935 add_wait_queue(sk_sleep(sk), &wait); 3079 add_wait_queue(sk_sleep(sk), &wait);
2936 while ((skb = skb_peek(BUSY_QUEUE(sk)))) { 3080 while ((skb = skb_peek(&chan->busy_q))) {
2937 set_current_state(TASK_INTERRUPTIBLE); 3081 set_current_state(TASK_INTERRUPTIBLE);
2938 3082
2939 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) { 3083 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
2940 err = -EBUSY; 3084 err = -EBUSY;
2941 l2cap_send_disconn_req(pi->conn, sk, EBUSY); 3085 l2cap_send_disconn_req(chan->conn, chan, EBUSY);
2942 break; 3086 break;
2943 } 3087 }
2944 3088
@@ -2958,7 +3102,7 @@ static void l2cap_busy_work(struct work_struct *work)
2958 if (err) 3102 if (err)
2959 break; 3103 break;
2960 3104
2961 if (l2cap_try_push_rx_skb(sk) == 0) 3105 if (l2cap_try_push_rx_skb(chan) == 0)
2962 break; 3106 break;
2963 } 3107 }
2964 3108
@@ -2968,48 +3112,46 @@ static void l2cap_busy_work(struct work_struct *work)
2968 release_sock(sk); 3112 release_sock(sk);
2969} 3113}
2970 3114
2971static int l2cap_push_rx_skb(struct sock *sk, struct sk_buff *skb, u16 control) 3115static int l2cap_push_rx_skb(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
2972{ 3116{
2973 struct l2cap_pinfo *pi = l2cap_pi(sk);
2974 int sctrl, err; 3117 int sctrl, err;
2975 3118
2976 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) { 3119 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
2977 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT; 3120 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
2978 __skb_queue_tail(BUSY_QUEUE(sk), skb); 3121 __skb_queue_tail(&chan->busy_q, skb);
2979 return l2cap_try_push_rx_skb(sk); 3122 return l2cap_try_push_rx_skb(chan);
2980 3123
2981 3124
2982 } 3125 }
2983 3126
2984 err = l2cap_ertm_reassembly_sdu(sk, skb, control); 3127 err = l2cap_ertm_reassembly_sdu(chan, skb, control);
2985 if (err >= 0) { 3128 if (err >= 0) {
2986 pi->buffer_seq = (pi->buffer_seq + 1) % 64; 3129 chan->buffer_seq = (chan->buffer_seq + 1) % 64;
2987 return err; 3130 return err;
2988 } 3131 }
2989 3132
2990 /* Busy Condition */ 3133 /* Busy Condition */
2991 BT_DBG("sk %p, Enter local busy", sk); 3134 BT_DBG("chan %p, Enter local busy", chan);
2992 3135
2993 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY; 3136 chan->conn_state |= L2CAP_CONN_LOCAL_BUSY;
2994 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT; 3137 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
2995 __skb_queue_tail(BUSY_QUEUE(sk), skb); 3138 __skb_queue_tail(&chan->busy_q, skb);
2996 3139
2997 sctrl = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT; 3140 sctrl = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
2998 sctrl |= L2CAP_SUPER_RCV_NOT_READY; 3141 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
2999 l2cap_send_sframe(pi, sctrl); 3142 l2cap_send_sframe(chan, sctrl);
3000 3143
3001 pi->conn_state |= L2CAP_CONN_RNR_SENT; 3144 chan->conn_state |= L2CAP_CONN_RNR_SENT;
3002 3145
3003 del_timer(&pi->ack_timer); 3146 del_timer(&chan->ack_timer);
3004 3147
3005 queue_work(_busy_wq, &pi->busy_work); 3148 queue_work(_busy_wq, &chan->busy_work);
3006 3149
3007 return err; 3150 return err;
3008} 3151}
3009 3152
3010static int l2cap_streaming_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control) 3153static int l2cap_streaming_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3011{ 3154{
3012 struct l2cap_pinfo *pi = l2cap_pi(sk);
3013 struct sk_buff *_skb; 3155 struct sk_buff *_skb;
3014 int err = -EINVAL; 3156 int err = -EINVAL;
3015 3157
@@ -3020,80 +3162,80 @@ static int l2cap_streaming_reassembly_sdu(struct sock *sk, struct sk_buff *skb,
3020 3162
3021 switch (control & L2CAP_CTRL_SAR) { 3163 switch (control & L2CAP_CTRL_SAR) {
3022 case L2CAP_SDU_UNSEGMENTED: 3164 case L2CAP_SDU_UNSEGMENTED:
3023 if (pi->conn_state & L2CAP_CONN_SAR_SDU) { 3165 if (chan->conn_state & L2CAP_CONN_SAR_SDU) {
3024 kfree_skb(pi->sdu); 3166 kfree_skb(chan->sdu);
3025 break; 3167 break;
3026 } 3168 }
3027 3169
3028 err = sock_queue_rcv_skb(sk, skb); 3170 err = sock_queue_rcv_skb(chan->sk, skb);
3029 if (!err) 3171 if (!err)
3030 return 0; 3172 return 0;
3031 3173
3032 break; 3174 break;
3033 3175
3034 case L2CAP_SDU_START: 3176 case L2CAP_SDU_START:
3035 if (pi->conn_state & L2CAP_CONN_SAR_SDU) { 3177 if (chan->conn_state & L2CAP_CONN_SAR_SDU) {
3036 kfree_skb(pi->sdu); 3178 kfree_skb(chan->sdu);
3037 break; 3179 break;
3038 } 3180 }
3039 3181
3040 pi->sdu_len = get_unaligned_le16(skb->data); 3182 chan->sdu_len = get_unaligned_le16(skb->data);
3041 skb_pull(skb, 2); 3183 skb_pull(skb, 2);
3042 3184
3043 if (pi->sdu_len > pi->imtu) { 3185 if (chan->sdu_len > chan->imtu) {
3044 err = -EMSGSIZE; 3186 err = -EMSGSIZE;
3045 break; 3187 break;
3046 } 3188 }
3047 3189
3048 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC); 3190 chan->sdu = bt_skb_alloc(chan->sdu_len, GFP_ATOMIC);
3049 if (!pi->sdu) { 3191 if (!chan->sdu) {
3050 err = -ENOMEM; 3192 err = -ENOMEM;
3051 break; 3193 break;
3052 } 3194 }
3053 3195
3054 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len); 3196 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3055 3197
3056 pi->conn_state |= L2CAP_CONN_SAR_SDU; 3198 chan->conn_state |= L2CAP_CONN_SAR_SDU;
3057 pi->partial_sdu_len = skb->len; 3199 chan->partial_sdu_len = skb->len;
3058 err = 0; 3200 err = 0;
3059 break; 3201 break;
3060 3202
3061 case L2CAP_SDU_CONTINUE: 3203 case L2CAP_SDU_CONTINUE:
3062 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU)) 3204 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
3063 break; 3205 break;
3064 3206
3065 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len); 3207 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3066 3208
3067 pi->partial_sdu_len += skb->len; 3209 chan->partial_sdu_len += skb->len;
3068 if (pi->partial_sdu_len > pi->sdu_len) 3210 if (chan->partial_sdu_len > chan->sdu_len)
3069 kfree_skb(pi->sdu); 3211 kfree_skb(chan->sdu);
3070 else 3212 else
3071 err = 0; 3213 err = 0;
3072 3214
3073 break; 3215 break;
3074 3216
3075 case L2CAP_SDU_END: 3217 case L2CAP_SDU_END:
3076 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU)) 3218 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
3077 break; 3219 break;
3078 3220
3079 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len); 3221 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3080 3222
3081 pi->conn_state &= ~L2CAP_CONN_SAR_SDU; 3223 chan->conn_state &= ~L2CAP_CONN_SAR_SDU;
3082 pi->partial_sdu_len += skb->len; 3224 chan->partial_sdu_len += skb->len;
3083 3225
3084 if (pi->partial_sdu_len > pi->imtu) 3226 if (chan->partial_sdu_len > chan->imtu)
3085 goto drop; 3227 goto drop;
3086 3228
3087 if (pi->partial_sdu_len == pi->sdu_len) { 3229 if (chan->partial_sdu_len == chan->sdu_len) {
3088 _skb = skb_clone(pi->sdu, GFP_ATOMIC); 3230 _skb = skb_clone(chan->sdu, GFP_ATOMIC);
3089 err = sock_queue_rcv_skb(sk, _skb); 3231 err = sock_queue_rcv_skb(chan->sk, _skb);
3090 if (err < 0) 3232 if (err < 0)
3091 kfree_skb(_skb); 3233 kfree_skb(_skb);
3092 } 3234 }
3093 err = 0; 3235 err = 0;
3094 3236
3095drop: 3237drop:
3096 kfree_skb(pi->sdu); 3238 kfree_skb(chan->sdu);
3097 break; 3239 break;
3098 } 3240 }
3099 3241
@@ -3101,31 +3243,30 @@ drop:
3101 return err; 3243 return err;
3102} 3244}
3103 3245
3104static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq) 3246static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq)
3105{ 3247{
3106 struct sk_buff *skb; 3248 struct sk_buff *skb;
3107 u16 control; 3249 u16 control;
3108 3250
3109 while ((skb = skb_peek(SREJ_QUEUE(sk)))) { 3251 while ((skb = skb_peek(&chan->srej_q))) {
3110 if (bt_cb(skb)->tx_seq != tx_seq) 3252 if (bt_cb(skb)->tx_seq != tx_seq)
3111 break; 3253 break;
3112 3254
3113 skb = skb_dequeue(SREJ_QUEUE(sk)); 3255 skb = skb_dequeue(&chan->srej_q);
3114 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT; 3256 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3115 l2cap_ertm_reassembly_sdu(sk, skb, control); 3257 l2cap_ertm_reassembly_sdu(chan, skb, control);
3116 l2cap_pi(sk)->buffer_seq_srej = 3258 chan->buffer_seq_srej =
3117 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64; 3259 (chan->buffer_seq_srej + 1) % 64;
3118 tx_seq = (tx_seq + 1) % 64; 3260 tx_seq = (tx_seq + 1) % 64;
3119 } 3261 }
3120} 3262}
3121 3263
3122static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq) 3264static void l2cap_resend_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3123{ 3265{
3124 struct l2cap_pinfo *pi = l2cap_pi(sk);
3125 struct srej_list *l, *tmp; 3266 struct srej_list *l, *tmp;
3126 u16 control; 3267 u16 control;
3127 3268
3128 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) { 3269 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3129 if (l->tx_seq == tx_seq) { 3270 if (l->tx_seq == tx_seq) {
3130 list_del(&l->list); 3271 list_del(&l->list);
3131 kfree(l); 3272 kfree(l);
@@ -3133,107 +3274,105 @@ static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3133 } 3274 }
3134 control = L2CAP_SUPER_SELECT_REJECT; 3275 control = L2CAP_SUPER_SELECT_REJECT;
3135 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT; 3276 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3136 l2cap_send_sframe(pi, control); 3277 l2cap_send_sframe(chan, control);
3137 list_del(&l->list); 3278 list_del(&l->list);
3138 list_add_tail(&l->list, SREJ_LIST(sk)); 3279 list_add_tail(&l->list, &chan->srej_l);
3139 } 3280 }
3140} 3281}
3141 3282
3142static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq) 3283static void l2cap_send_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3143{ 3284{
3144 struct l2cap_pinfo *pi = l2cap_pi(sk);
3145 struct srej_list *new; 3285 struct srej_list *new;
3146 u16 control; 3286 u16 control;
3147 3287
3148 while (tx_seq != pi->expected_tx_seq) { 3288 while (tx_seq != chan->expected_tx_seq) {
3149 control = L2CAP_SUPER_SELECT_REJECT; 3289 control = L2CAP_SUPER_SELECT_REJECT;
3150 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT; 3290 control |= chan->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3151 l2cap_send_sframe(pi, control); 3291 l2cap_send_sframe(chan, control);
3152 3292
3153 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC); 3293 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3154 new->tx_seq = pi->expected_tx_seq; 3294 new->tx_seq = chan->expected_tx_seq;
3155 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64; 3295 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3156 list_add_tail(&new->list, SREJ_LIST(sk)); 3296 list_add_tail(&new->list, &chan->srej_l);
3157 } 3297 }
3158 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64; 3298 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3159} 3299}
3160 3300
3161static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb) 3301static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3162{ 3302{
3163 struct l2cap_pinfo *pi = l2cap_pi(sk);
3164 u8 tx_seq = __get_txseq(rx_control); 3303 u8 tx_seq = __get_txseq(rx_control);
3165 u8 req_seq = __get_reqseq(rx_control); 3304 u8 req_seq = __get_reqseq(rx_control);
3166 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT; 3305 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3167 int tx_seq_offset, expected_tx_seq_offset; 3306 int tx_seq_offset, expected_tx_seq_offset;
3168 int num_to_ack = (pi->tx_win/6) + 1; 3307 int num_to_ack = (chan->tx_win/6) + 1;
3169 int err = 0; 3308 int err = 0;
3170 3309
3171 BT_DBG("sk %p len %d tx_seq %d rx_control 0x%4.4x", sk, skb->len, tx_seq, 3310 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%4.4x", chan, skb->len,
3172 rx_control); 3311 tx_seq, rx_control);
3173 3312
3174 if (L2CAP_CTRL_FINAL & rx_control && 3313 if (L2CAP_CTRL_FINAL & rx_control &&
3175 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) { 3314 chan->conn_state & L2CAP_CONN_WAIT_F) {
3176 del_timer(&pi->monitor_timer); 3315 del_timer(&chan->monitor_timer);
3177 if (pi->unacked_frames > 0) 3316 if (chan->unacked_frames > 0)
3178 __mod_retrans_timer(); 3317 __mod_retrans_timer();
3179 pi->conn_state &= ~L2CAP_CONN_WAIT_F; 3318 chan->conn_state &= ~L2CAP_CONN_WAIT_F;
3180 } 3319 }
3181 3320
3182 pi->expected_ack_seq = req_seq; 3321 chan->expected_ack_seq = req_seq;
3183 l2cap_drop_acked_frames(sk); 3322 l2cap_drop_acked_frames(chan);
3184 3323
3185 if (tx_seq == pi->expected_tx_seq) 3324 if (tx_seq == chan->expected_tx_seq)
3186 goto expected; 3325 goto expected;
3187 3326
3188 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64; 3327 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
3189 if (tx_seq_offset < 0) 3328 if (tx_seq_offset < 0)
3190 tx_seq_offset += 64; 3329 tx_seq_offset += 64;
3191 3330
3192 /* invalid tx_seq */ 3331 /* invalid tx_seq */
3193 if (tx_seq_offset >= pi->tx_win) { 3332 if (tx_seq_offset >= chan->tx_win) {
3194 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET); 3333 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3195 goto drop; 3334 goto drop;
3196 } 3335 }
3197 3336
3198 if (pi->conn_state == L2CAP_CONN_LOCAL_BUSY) 3337 if (chan->conn_state == L2CAP_CONN_LOCAL_BUSY)
3199 goto drop; 3338 goto drop;
3200 3339
3201 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) { 3340 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) {
3202 struct srej_list *first; 3341 struct srej_list *first;
3203 3342
3204 first = list_first_entry(SREJ_LIST(sk), 3343 first = list_first_entry(&chan->srej_l,
3205 struct srej_list, list); 3344 struct srej_list, list);
3206 if (tx_seq == first->tx_seq) { 3345 if (tx_seq == first->tx_seq) {
3207 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar); 3346 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3208 l2cap_check_srej_gap(sk, tx_seq); 3347 l2cap_check_srej_gap(chan, tx_seq);
3209 3348
3210 list_del(&first->list); 3349 list_del(&first->list);
3211 kfree(first); 3350 kfree(first);
3212 3351
3213 if (list_empty(SREJ_LIST(sk))) { 3352 if (list_empty(&chan->srej_l)) {
3214 pi->buffer_seq = pi->buffer_seq_srej; 3353 chan->buffer_seq = chan->buffer_seq_srej;
3215 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT; 3354 chan->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3216 l2cap_send_ack(pi); 3355 l2cap_send_ack(chan);
3217 BT_DBG("sk %p, Exit SREJ_SENT", sk); 3356 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3218 } 3357 }
3219 } else { 3358 } else {
3220 struct srej_list *l; 3359 struct srej_list *l;
3221 3360
3222 /* duplicated tx_seq */ 3361 /* duplicated tx_seq */
3223 if (l2cap_add_to_srej_queue(sk, skb, tx_seq, sar) < 0) 3362 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3224 goto drop; 3363 goto drop;
3225 3364
3226 list_for_each_entry(l, SREJ_LIST(sk), list) { 3365 list_for_each_entry(l, &chan->srej_l, list) {
3227 if (l->tx_seq == tx_seq) { 3366 if (l->tx_seq == tx_seq) {
3228 l2cap_resend_srejframe(sk, tx_seq); 3367 l2cap_resend_srejframe(chan, tx_seq);
3229 return 0; 3368 return 0;
3230 } 3369 }
3231 } 3370 }
3232 l2cap_send_srejframe(sk, tx_seq); 3371 l2cap_send_srejframe(chan, tx_seq);
3233 } 3372 }
3234 } else { 3373 } else {
3235 expected_tx_seq_offset = 3374 expected_tx_seq_offset =
3236 (pi->expected_tx_seq - pi->buffer_seq) % 64; 3375 (chan->expected_tx_seq - chan->buffer_seq) % 64;
3237 if (expected_tx_seq_offset < 0) 3376 if (expected_tx_seq_offset < 0)
3238 expected_tx_seq_offset += 64; 3377 expected_tx_seq_offset += 64;
3239 3378
@@ -3241,51 +3380,51 @@ static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, str
3241 if (tx_seq_offset < expected_tx_seq_offset) 3380 if (tx_seq_offset < expected_tx_seq_offset)
3242 goto drop; 3381 goto drop;
3243 3382
3244 pi->conn_state |= L2CAP_CONN_SREJ_SENT; 3383 chan->conn_state |= L2CAP_CONN_SREJ_SENT;
3245 3384
3246 BT_DBG("sk %p, Enter SREJ", sk); 3385 BT_DBG("chan %p, Enter SREJ", chan);
3247 3386
3248 INIT_LIST_HEAD(SREJ_LIST(sk)); 3387 INIT_LIST_HEAD(&chan->srej_l);
3249 pi->buffer_seq_srej = pi->buffer_seq; 3388 chan->buffer_seq_srej = chan->buffer_seq;
3250 3389
3251 __skb_queue_head_init(SREJ_QUEUE(sk)); 3390 __skb_queue_head_init(&chan->srej_q);
3252 __skb_queue_head_init(BUSY_QUEUE(sk)); 3391 __skb_queue_head_init(&chan->busy_q);
3253 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar); 3392 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3254 3393
3255 pi->conn_state |= L2CAP_CONN_SEND_PBIT; 3394 chan->conn_state |= L2CAP_CONN_SEND_PBIT;
3256 3395
3257 l2cap_send_srejframe(sk, tx_seq); 3396 l2cap_send_srejframe(chan, tx_seq);
3258 3397
3259 del_timer(&pi->ack_timer); 3398 del_timer(&chan->ack_timer);
3260 } 3399 }
3261 return 0; 3400 return 0;
3262 3401
3263expected: 3402expected:
3264 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64; 3403 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3265 3404
3266 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) { 3405 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) {
3267 bt_cb(skb)->tx_seq = tx_seq; 3406 bt_cb(skb)->tx_seq = tx_seq;
3268 bt_cb(skb)->sar = sar; 3407 bt_cb(skb)->sar = sar;
3269 __skb_queue_tail(SREJ_QUEUE(sk), skb); 3408 __skb_queue_tail(&chan->srej_q, skb);
3270 return 0; 3409 return 0;
3271 } 3410 }
3272 3411
3273 err = l2cap_push_rx_skb(sk, skb, rx_control); 3412 err = l2cap_push_rx_skb(chan, skb, rx_control);
3274 if (err < 0) 3413 if (err < 0)
3275 return 0; 3414 return 0;
3276 3415
3277 if (rx_control & L2CAP_CTRL_FINAL) { 3416 if (rx_control & L2CAP_CTRL_FINAL) {
3278 if (pi->conn_state & L2CAP_CONN_REJ_ACT) 3417 if (chan->conn_state & L2CAP_CONN_REJ_ACT)
3279 pi->conn_state &= ~L2CAP_CONN_REJ_ACT; 3418 chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3280 else 3419 else
3281 l2cap_retransmit_frames(sk); 3420 l2cap_retransmit_frames(chan);
3282 } 3421 }
3283 3422
3284 __mod_ack_timer(); 3423 __mod_ack_timer();
3285 3424
3286 pi->num_acked = (pi->num_acked + 1) % num_to_ack; 3425 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
3287 if (pi->num_acked == num_to_ack - 1) 3426 if (chan->num_acked == num_to_ack - 1)
3288 l2cap_send_ack(pi); 3427 l2cap_send_ack(chan);
3289 3428
3290 return 0; 3429 return 0;
3291 3430
@@ -3294,165 +3433,160 @@ drop:
3294 return 0; 3433 return 0;
3295} 3434}
3296 3435
3297static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control) 3436static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u16 rx_control)
3298{ 3437{
3299 struct l2cap_pinfo *pi = l2cap_pi(sk); 3438 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, __get_reqseq(rx_control),
3300
3301 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, __get_reqseq(rx_control),
3302 rx_control); 3439 rx_control);
3303 3440
3304 pi->expected_ack_seq = __get_reqseq(rx_control); 3441 chan->expected_ack_seq = __get_reqseq(rx_control);
3305 l2cap_drop_acked_frames(sk); 3442 l2cap_drop_acked_frames(chan);
3306 3443
3307 if (rx_control & L2CAP_CTRL_POLL) { 3444 if (rx_control & L2CAP_CTRL_POLL) {
3308 pi->conn_state |= L2CAP_CONN_SEND_FBIT; 3445 chan->conn_state |= L2CAP_CONN_SEND_FBIT;
3309 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) { 3446 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) {
3310 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) && 3447 if ((chan->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3311 (pi->unacked_frames > 0)) 3448 (chan->unacked_frames > 0))
3312 __mod_retrans_timer(); 3449 __mod_retrans_timer();
3313 3450
3314 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY; 3451 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3315 l2cap_send_srejtail(sk); 3452 l2cap_send_srejtail(chan);
3316 } else { 3453 } else {
3317 l2cap_send_i_or_rr_or_rnr(sk); 3454 l2cap_send_i_or_rr_or_rnr(chan);
3318 } 3455 }
3319 3456
3320 } else if (rx_control & L2CAP_CTRL_FINAL) { 3457 } else if (rx_control & L2CAP_CTRL_FINAL) {
3321 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY; 3458 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3322 3459
3323 if (pi->conn_state & L2CAP_CONN_REJ_ACT) 3460 if (chan->conn_state & L2CAP_CONN_REJ_ACT)
3324 pi->conn_state &= ~L2CAP_CONN_REJ_ACT; 3461 chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3325 else 3462 else
3326 l2cap_retransmit_frames(sk); 3463 l2cap_retransmit_frames(chan);
3327 3464
3328 } else { 3465 } else {
3329 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) && 3466 if ((chan->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3330 (pi->unacked_frames > 0)) 3467 (chan->unacked_frames > 0))
3331 __mod_retrans_timer(); 3468 __mod_retrans_timer();
3332 3469
3333 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY; 3470 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3334 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) 3471 if (chan->conn_state & L2CAP_CONN_SREJ_SENT)
3335 l2cap_send_ack(pi); 3472 l2cap_send_ack(chan);
3336 else 3473 else
3337 l2cap_ertm_send(sk); 3474 l2cap_ertm_send(chan);
3338 } 3475 }
3339} 3476}
3340 3477
3341static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control) 3478static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u16 rx_control)
3342{ 3479{
3343 struct l2cap_pinfo *pi = l2cap_pi(sk);
3344 u8 tx_seq = __get_reqseq(rx_control); 3480 u8 tx_seq = __get_reqseq(rx_control);
3345 3481
3346 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control); 3482 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3347 3483
3348 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY; 3484 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3349 3485
3350 pi->expected_ack_seq = tx_seq; 3486 chan->expected_ack_seq = tx_seq;
3351 l2cap_drop_acked_frames(sk); 3487 l2cap_drop_acked_frames(chan);
3352 3488
3353 if (rx_control & L2CAP_CTRL_FINAL) { 3489 if (rx_control & L2CAP_CTRL_FINAL) {
3354 if (pi->conn_state & L2CAP_CONN_REJ_ACT) 3490 if (chan->conn_state & L2CAP_CONN_REJ_ACT)
3355 pi->conn_state &= ~L2CAP_CONN_REJ_ACT; 3491 chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3356 else 3492 else
3357 l2cap_retransmit_frames(sk); 3493 l2cap_retransmit_frames(chan);
3358 } else { 3494 } else {
3359 l2cap_retransmit_frames(sk); 3495 l2cap_retransmit_frames(chan);
3360 3496
3361 if (pi->conn_state & L2CAP_CONN_WAIT_F) 3497 if (chan->conn_state & L2CAP_CONN_WAIT_F)
3362 pi->conn_state |= L2CAP_CONN_REJ_ACT; 3498 chan->conn_state |= L2CAP_CONN_REJ_ACT;
3363 } 3499 }
3364} 3500}
3365static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control) 3501static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_control)
3366{ 3502{
3367 struct l2cap_pinfo *pi = l2cap_pi(sk);
3368 u8 tx_seq = __get_reqseq(rx_control); 3503 u8 tx_seq = __get_reqseq(rx_control);
3369 3504
3370 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control); 3505 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3371 3506
3372 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY; 3507 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3373 3508
3374 if (rx_control & L2CAP_CTRL_POLL) { 3509 if (rx_control & L2CAP_CTRL_POLL) {
3375 pi->expected_ack_seq = tx_seq; 3510 chan->expected_ack_seq = tx_seq;
3376 l2cap_drop_acked_frames(sk); 3511 l2cap_drop_acked_frames(chan);
3377 3512
3378 pi->conn_state |= L2CAP_CONN_SEND_FBIT; 3513 chan->conn_state |= L2CAP_CONN_SEND_FBIT;
3379 l2cap_retransmit_one_frame(sk, tx_seq); 3514 l2cap_retransmit_one_frame(chan, tx_seq);
3380 3515
3381 l2cap_ertm_send(sk); 3516 l2cap_ertm_send(chan);
3382 3517
3383 if (pi->conn_state & L2CAP_CONN_WAIT_F) { 3518 if (chan->conn_state & L2CAP_CONN_WAIT_F) {
3384 pi->srej_save_reqseq = tx_seq; 3519 chan->srej_save_reqseq = tx_seq;
3385 pi->conn_state |= L2CAP_CONN_SREJ_ACT; 3520 chan->conn_state |= L2CAP_CONN_SREJ_ACT;
3386 } 3521 }
3387 } else if (rx_control & L2CAP_CTRL_FINAL) { 3522 } else if (rx_control & L2CAP_CTRL_FINAL) {
3388 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) && 3523 if ((chan->conn_state & L2CAP_CONN_SREJ_ACT) &&
3389 pi->srej_save_reqseq == tx_seq) 3524 chan->srej_save_reqseq == tx_seq)
3390 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT; 3525 chan->conn_state &= ~L2CAP_CONN_SREJ_ACT;
3391 else 3526 else
3392 l2cap_retransmit_one_frame(sk, tx_seq); 3527 l2cap_retransmit_one_frame(chan, tx_seq);
3393 } else { 3528 } else {
3394 l2cap_retransmit_one_frame(sk, tx_seq); 3529 l2cap_retransmit_one_frame(chan, tx_seq);
3395 if (pi->conn_state & L2CAP_CONN_WAIT_F) { 3530 if (chan->conn_state & L2CAP_CONN_WAIT_F) {
3396 pi->srej_save_reqseq = tx_seq; 3531 chan->srej_save_reqseq = tx_seq;
3397 pi->conn_state |= L2CAP_CONN_SREJ_ACT; 3532 chan->conn_state |= L2CAP_CONN_SREJ_ACT;
3398 } 3533 }
3399 } 3534 }
3400} 3535}
3401 3536
3402static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control) 3537static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u16 rx_control)
3403{ 3538{
3404 struct l2cap_pinfo *pi = l2cap_pi(sk);
3405 u8 tx_seq = __get_reqseq(rx_control); 3539 u8 tx_seq = __get_reqseq(rx_control);
3406 3540
3407 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control); 3541 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3408 3542
3409 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY; 3543 chan->conn_state |= L2CAP_CONN_REMOTE_BUSY;
3410 pi->expected_ack_seq = tx_seq; 3544 chan->expected_ack_seq = tx_seq;
3411 l2cap_drop_acked_frames(sk); 3545 l2cap_drop_acked_frames(chan);
3412 3546
3413 if (rx_control & L2CAP_CTRL_POLL) 3547 if (rx_control & L2CAP_CTRL_POLL)
3414 pi->conn_state |= L2CAP_CONN_SEND_FBIT; 3548 chan->conn_state |= L2CAP_CONN_SEND_FBIT;
3415 3549
3416 if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) { 3550 if (!(chan->conn_state & L2CAP_CONN_SREJ_SENT)) {
3417 del_timer(&pi->retrans_timer); 3551 del_timer(&chan->retrans_timer);
3418 if (rx_control & L2CAP_CTRL_POLL) 3552 if (rx_control & L2CAP_CTRL_POLL)
3419 l2cap_send_rr_or_rnr(pi, L2CAP_CTRL_FINAL); 3553 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
3420 return; 3554 return;
3421 } 3555 }
3422 3556
3423 if (rx_control & L2CAP_CTRL_POLL) 3557 if (rx_control & L2CAP_CTRL_POLL)
3424 l2cap_send_srejtail(sk); 3558 l2cap_send_srejtail(chan);
3425 else 3559 else
3426 l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY); 3560 l2cap_send_sframe(chan, L2CAP_SUPER_RCV_READY);
3427} 3561}
3428 3562
3429static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb) 3563static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3430{ 3564{
3431 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len); 3565 BT_DBG("chan %p rx_control 0x%4.4x len %d", chan, rx_control, skb->len);
3432 3566
3433 if (L2CAP_CTRL_FINAL & rx_control && 3567 if (L2CAP_CTRL_FINAL & rx_control &&
3434 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) { 3568 chan->conn_state & L2CAP_CONN_WAIT_F) {
3435 del_timer(&l2cap_pi(sk)->monitor_timer); 3569 del_timer(&chan->monitor_timer);
3436 if (l2cap_pi(sk)->unacked_frames > 0) 3570 if (chan->unacked_frames > 0)
3437 __mod_retrans_timer(); 3571 __mod_retrans_timer();
3438 l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F; 3572 chan->conn_state &= ~L2CAP_CONN_WAIT_F;
3439 } 3573 }
3440 3574
3441 switch (rx_control & L2CAP_CTRL_SUPERVISE) { 3575 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3442 case L2CAP_SUPER_RCV_READY: 3576 case L2CAP_SUPER_RCV_READY:
3443 l2cap_data_channel_rrframe(sk, rx_control); 3577 l2cap_data_channel_rrframe(chan, rx_control);
3444 break; 3578 break;
3445 3579
3446 case L2CAP_SUPER_REJECT: 3580 case L2CAP_SUPER_REJECT:
3447 l2cap_data_channel_rejframe(sk, rx_control); 3581 l2cap_data_channel_rejframe(chan, rx_control);
3448 break; 3582 break;
3449 3583
3450 case L2CAP_SUPER_SELECT_REJECT: 3584 case L2CAP_SUPER_SELECT_REJECT:
3451 l2cap_data_channel_srejframe(sk, rx_control); 3585 l2cap_data_channel_srejframe(chan, rx_control);
3452 break; 3586 break;
3453 3587
3454 case L2CAP_SUPER_RCV_NOT_READY: 3588 case L2CAP_SUPER_RCV_NOT_READY:
3455 l2cap_data_channel_rnrframe(sk, rx_control); 3589 l2cap_data_channel_rnrframe(chan, rx_control);
3456 break; 3590 break;
3457 } 3591 }
3458 3592
@@ -3462,7 +3596,7 @@ static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, str
3462 3596
3463static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb) 3597static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
3464{ 3598{
3465 struct l2cap_pinfo *pi = l2cap_pi(sk); 3599 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
3466 u16 control; 3600 u16 control;
3467 u8 req_seq; 3601 u8 req_seq;
3468 int len, next_tx_seq_offset, req_seq_offset; 3602 int len, next_tx_seq_offset, req_seq_offset;
@@ -3476,51 +3610,51 @@ static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
3476 * Receiver will miss it and start proper recovery 3610 * Receiver will miss it and start proper recovery
3477 * procedures and ask retransmission. 3611 * procedures and ask retransmission.
3478 */ 3612 */
3479 if (l2cap_check_fcs(pi, skb)) 3613 if (l2cap_check_fcs(chan, skb))
3480 goto drop; 3614 goto drop;
3481 3615
3482 if (__is_sar_start(control) && __is_iframe(control)) 3616 if (__is_sar_start(control) && __is_iframe(control))
3483 len -= 2; 3617 len -= 2;
3484 3618
3485 if (pi->fcs == L2CAP_FCS_CRC16) 3619 if (chan->fcs == L2CAP_FCS_CRC16)
3486 len -= 2; 3620 len -= 2;
3487 3621
3488 if (len > pi->mps) { 3622 if (len > chan->mps) {
3489 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET); 3623 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3490 goto drop; 3624 goto drop;
3491 } 3625 }
3492 3626
3493 req_seq = __get_reqseq(control); 3627 req_seq = __get_reqseq(control);
3494 req_seq_offset = (req_seq - pi->expected_ack_seq) % 64; 3628 req_seq_offset = (req_seq - chan->expected_ack_seq) % 64;
3495 if (req_seq_offset < 0) 3629 if (req_seq_offset < 0)
3496 req_seq_offset += 64; 3630 req_seq_offset += 64;
3497 3631
3498 next_tx_seq_offset = 3632 next_tx_seq_offset =
3499 (pi->next_tx_seq - pi->expected_ack_seq) % 64; 3633 (chan->next_tx_seq - chan->expected_ack_seq) % 64;
3500 if (next_tx_seq_offset < 0) 3634 if (next_tx_seq_offset < 0)
3501 next_tx_seq_offset += 64; 3635 next_tx_seq_offset += 64;
3502 3636
3503 /* check for invalid req-seq */ 3637 /* check for invalid req-seq */
3504 if (req_seq_offset > next_tx_seq_offset) { 3638 if (req_seq_offset > next_tx_seq_offset) {
3505 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET); 3639 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3506 goto drop; 3640 goto drop;
3507 } 3641 }
3508 3642
3509 if (__is_iframe(control)) { 3643 if (__is_iframe(control)) {
3510 if (len < 0) { 3644 if (len < 0) {
3511 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET); 3645 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3512 goto drop; 3646 goto drop;
3513 } 3647 }
3514 3648
3515 l2cap_data_channel_iframe(sk, control, skb); 3649 l2cap_data_channel_iframe(chan, control, skb);
3516 } else { 3650 } else {
3517 if (len != 0) { 3651 if (len != 0) {
3518 BT_ERR("%d", len); 3652 BT_ERR("%d", len);
3519 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET); 3653 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3520 goto drop; 3654 goto drop;
3521 } 3655 }
3522 3656
3523 l2cap_data_channel_sframe(sk, control, skb); 3657 l2cap_data_channel_sframe(chan, control, skb);
3524 } 3658 }
3525 3659
3526 return 0; 3660 return 0;
@@ -3532,33 +3666,35 @@ drop:
3532 3666
3533static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb) 3667static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3534{ 3668{
3535 struct sock *sk; 3669 struct l2cap_chan *chan;
3670 struct sock *sk = NULL;
3536 struct l2cap_pinfo *pi; 3671 struct l2cap_pinfo *pi;
3537 u16 control; 3672 u16 control;
3538 u8 tx_seq; 3673 u8 tx_seq;
3539 int len; 3674 int len;
3540 3675
3541 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid); 3676 chan = l2cap_get_chan_by_scid(conn, cid);
3542 if (!sk) { 3677 if (!chan) {
3543 BT_DBG("unknown cid 0x%4.4x", cid); 3678 BT_DBG("unknown cid 0x%4.4x", cid);
3544 goto drop; 3679 goto drop;
3545 } 3680 }
3546 3681
3682 sk = chan->sk;
3547 pi = l2cap_pi(sk); 3683 pi = l2cap_pi(sk);
3548 3684
3549 BT_DBG("sk %p, len %d", sk, skb->len); 3685 BT_DBG("chan %p, len %d", chan, skb->len);
3550 3686
3551 if (sk->sk_state != BT_CONNECTED) 3687 if (sk->sk_state != BT_CONNECTED)
3552 goto drop; 3688 goto drop;
3553 3689
3554 switch (pi->mode) { 3690 switch (chan->mode) {
3555 case L2CAP_MODE_BASIC: 3691 case L2CAP_MODE_BASIC:
3556 /* If socket recv buffers overflows we drop data here 3692 /* If socket recv buffers overflows we drop data here
3557 * which is *bad* because L2CAP has to be reliable. 3693 * which is *bad* because L2CAP has to be reliable.
3558 * But we don't have any other choice. L2CAP doesn't 3694 * But we don't have any other choice. L2CAP doesn't
3559 * provide flow control mechanism. */ 3695 * provide flow control mechanism. */
3560 3696
3561 if (pi->imtu < skb->len) 3697 if (chan->imtu < skb->len)
3562 goto drop; 3698 goto drop;
3563 3699
3564 if (!sock_queue_rcv_skb(sk, skb)) 3700 if (!sock_queue_rcv_skb(sk, skb))
@@ -3580,31 +3716,31 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk
3580 skb_pull(skb, 2); 3716 skb_pull(skb, 2);
3581 len = skb->len; 3717 len = skb->len;
3582 3718
3583 if (l2cap_check_fcs(pi, skb)) 3719 if (l2cap_check_fcs(chan, skb))
3584 goto drop; 3720 goto drop;
3585 3721
3586 if (__is_sar_start(control)) 3722 if (__is_sar_start(control))
3587 len -= 2; 3723 len -= 2;
3588 3724
3589 if (pi->fcs == L2CAP_FCS_CRC16) 3725 if (chan->fcs == L2CAP_FCS_CRC16)
3590 len -= 2; 3726 len -= 2;
3591 3727
3592 if (len > pi->mps || len < 0 || __is_sframe(control)) 3728 if (len > chan->mps || len < 0 || __is_sframe(control))
3593 goto drop; 3729 goto drop;
3594 3730
3595 tx_seq = __get_txseq(control); 3731 tx_seq = __get_txseq(control);
3596 3732
3597 if (pi->expected_tx_seq == tx_seq) 3733 if (chan->expected_tx_seq == tx_seq)
3598 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64; 3734 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3599 else 3735 else
3600 pi->expected_tx_seq = (tx_seq + 1) % 64; 3736 chan->expected_tx_seq = (tx_seq + 1) % 64;
3601 3737
3602 l2cap_streaming_reassembly_sdu(sk, skb, control); 3738 l2cap_streaming_reassembly_sdu(chan, skb, control);
3603 3739
3604 goto done; 3740 goto done;
3605 3741
3606 default: 3742 default:
3607 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode); 3743 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
3608 break; 3744 break;
3609 } 3745 }
3610 3746
@@ -3620,12 +3756,15 @@ done:
3620 3756
3621static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb) 3757static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3622{ 3758{
3623 struct sock *sk; 3759 struct sock *sk = NULL;
3760 struct l2cap_chan *chan;
3624 3761
3625 sk = l2cap_get_sock_by_psm(0, psm, conn->src); 3762 chan = l2cap_global_chan_by_psm(0, psm, conn->src);
3626 if (!sk) 3763 if (!chan)
3627 goto drop; 3764 goto drop;
3628 3765
3766 sk = chan->sk;
3767
3629 bh_lock_sock(sk); 3768 bh_lock_sock(sk);
3630 3769
3631 BT_DBG("sk %p, len %d", sk, skb->len); 3770 BT_DBG("sk %p, len %d", sk, skb->len);
@@ -3633,7 +3772,40 @@ static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, str
3633 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED) 3772 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3634 goto drop; 3773 goto drop;
3635 3774
3636 if (l2cap_pi(sk)->imtu < skb->len) 3775 if (l2cap_pi(sk)->chan->imtu < skb->len)
3776 goto drop;
3777
3778 if (!sock_queue_rcv_skb(sk, skb))
3779 goto done;
3780
3781drop:
3782 kfree_skb(skb);
3783
3784done:
3785 if (sk)
3786 bh_unlock_sock(sk);
3787 return 0;
3788}
3789
3790static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
3791{
3792 struct sock *sk = NULL;
3793 struct l2cap_chan *chan;
3794
3795 chan = l2cap_global_chan_by_scid(0, cid, conn->src);
3796 if (!chan)
3797 goto drop;
3798
3799 sk = chan->sk;
3800
3801 bh_lock_sock(sk);
3802
3803 BT_DBG("sk %p, len %d", sk, skb->len);
3804
3805 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3806 goto drop;
3807
3808 if (l2cap_pi(sk)->chan->imtu < skb->len)
3637 goto drop; 3809 goto drop;
3638 3810
3639 if (!sock_queue_rcv_skb(sk, skb)) 3811 if (!sock_queue_rcv_skb(sk, skb))
@@ -3677,6 +3849,10 @@ static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3677 l2cap_conless_channel(conn, psm, skb); 3849 l2cap_conless_channel(conn, psm, skb);
3678 break; 3850 break;
3679 3851
3852 case L2CAP_CID_LE_DATA:
3853 l2cap_att_channel(conn, cid, skb);
3854 break;
3855
3680 default: 3856 default:
3681 l2cap_data_channel(conn, cid, skb); 3857 l2cap_data_channel(conn, cid, skb);
3682 break; 3858 break;
@@ -3688,8 +3864,7 @@ static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3688static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type) 3864static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3689{ 3865{
3690 int exact = 0, lm1 = 0, lm2 = 0; 3866 int exact = 0, lm1 = 0, lm2 = 0;
3691 register struct sock *sk; 3867 struct l2cap_chan *c;
3692 struct hlist_node *node;
3693 3868
3694 if (type != ACL_LINK) 3869 if (type != ACL_LINK)
3695 return -EINVAL; 3870 return -EINVAL;
@@ -3697,23 +3872,25 @@ static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3697 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr)); 3872 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
3698 3873
3699 /* Find listening sockets and check their link_mode */ 3874 /* Find listening sockets and check their link_mode */
3700 read_lock(&l2cap_sk_list.lock); 3875 read_lock(&chan_list_lock);
3701 sk_for_each(sk, node, &l2cap_sk_list.head) { 3876 list_for_each_entry(c, &chan_list, global_l) {
3877 struct sock *sk = c->sk;
3878
3702 if (sk->sk_state != BT_LISTEN) 3879 if (sk->sk_state != BT_LISTEN)
3703 continue; 3880 continue;
3704 3881
3705 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) { 3882 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
3706 lm1 |= HCI_LM_ACCEPT; 3883 lm1 |= HCI_LM_ACCEPT;
3707 if (l2cap_pi(sk)->role_switch) 3884 if (c->role_switch)
3708 lm1 |= HCI_LM_MASTER; 3885 lm1 |= HCI_LM_MASTER;
3709 exact++; 3886 exact++;
3710 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) { 3887 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
3711 lm2 |= HCI_LM_ACCEPT; 3888 lm2 |= HCI_LM_ACCEPT;
3712 if (l2cap_pi(sk)->role_switch) 3889 if (c->role_switch)
3713 lm2 |= HCI_LM_MASTER; 3890 lm2 |= HCI_LM_MASTER;
3714 } 3891 }
3715 } 3892 }
3716 read_unlock(&l2cap_sk_list.lock); 3893 read_unlock(&chan_list_lock);
3717 3894
3718 return exact ? lm1 : lm2; 3895 return exact ? lm1 : lm2;
3719} 3896}
@@ -3761,49 +3938,50 @@ static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
3761 return 0; 3938 return 0;
3762} 3939}
3763 3940
3764static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt) 3941static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
3765{ 3942{
3943 struct sock *sk = chan->sk;
3944
3766 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM) 3945 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
3767 return; 3946 return;
3768 3947
3769 if (encrypt == 0x00) { 3948 if (encrypt == 0x00) {
3770 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) { 3949 if (chan->sec_level == BT_SECURITY_MEDIUM) {
3771 l2cap_sock_clear_timer(sk); 3950 l2cap_sock_clear_timer(sk);
3772 l2cap_sock_set_timer(sk, HZ * 5); 3951 l2cap_sock_set_timer(sk, HZ * 5);
3773 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH) 3952 } else if (chan->sec_level == BT_SECURITY_HIGH)
3774 __l2cap_sock_close(sk, ECONNREFUSED); 3953 __l2cap_sock_close(sk, ECONNREFUSED);
3775 } else { 3954 } else {
3776 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) 3955 if (chan->sec_level == BT_SECURITY_MEDIUM)
3777 l2cap_sock_clear_timer(sk); 3956 l2cap_sock_clear_timer(sk);
3778 } 3957 }
3779} 3958}
3780 3959
3781static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt) 3960static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
3782{ 3961{
3783 struct l2cap_chan_list *l;
3784 struct l2cap_conn *conn = hcon->l2cap_data; 3962 struct l2cap_conn *conn = hcon->l2cap_data;
3785 struct sock *sk; 3963 struct l2cap_chan *chan;
3786 3964
3787 if (!conn) 3965 if (!conn)
3788 return 0; 3966 return 0;
3789 3967
3790 l = &conn->chan_list;
3791
3792 BT_DBG("conn %p", conn); 3968 BT_DBG("conn %p", conn);
3793 3969
3794 read_lock(&l->lock); 3970 read_lock(&conn->chan_lock);
3971
3972 list_for_each_entry(chan, &conn->chan_l, list) {
3973 struct sock *sk = chan->sk;
3795 3974
3796 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
3797 bh_lock_sock(sk); 3975 bh_lock_sock(sk);
3798 3976
3799 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) { 3977 if (chan->conf_state & L2CAP_CONF_CONNECT_PEND) {
3800 bh_unlock_sock(sk); 3978 bh_unlock_sock(sk);
3801 continue; 3979 continue;
3802 } 3980 }
3803 3981
3804 if (!status && (sk->sk_state == BT_CONNECTED || 3982 if (!status && (sk->sk_state == BT_CONNECTED ||
3805 sk->sk_state == BT_CONFIG)) { 3983 sk->sk_state == BT_CONFIG)) {
3806 l2cap_check_encryption(sk, encrypt); 3984 l2cap_check_encryption(chan, encrypt);
3807 bh_unlock_sock(sk); 3985 bh_unlock_sock(sk);
3808 continue; 3986 continue;
3809 } 3987 }
@@ -3811,13 +3989,13 @@ static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
3811 if (sk->sk_state == BT_CONNECT) { 3989 if (sk->sk_state == BT_CONNECT) {
3812 if (!status) { 3990 if (!status) {
3813 struct l2cap_conn_req req; 3991 struct l2cap_conn_req req;
3814 req.scid = cpu_to_le16(l2cap_pi(sk)->scid); 3992 req.scid = cpu_to_le16(chan->scid);
3815 req.psm = l2cap_pi(sk)->psm; 3993 req.psm = chan->psm;
3816 3994
3817 l2cap_pi(sk)->ident = l2cap_get_ident(conn); 3995 chan->ident = l2cap_get_ident(conn);
3818 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND; 3996 chan->conf_state |= L2CAP_CONF_CONNECT_PEND;
3819 3997
3820 l2cap_send_cmd(conn, l2cap_pi(sk)->ident, 3998 l2cap_send_cmd(conn, chan->ident,
3821 L2CAP_CONN_REQ, sizeof(req), &req); 3999 L2CAP_CONN_REQ, sizeof(req), &req);
3822 } else { 4000 } else {
3823 l2cap_sock_clear_timer(sk); 4001 l2cap_sock_clear_timer(sk);
@@ -3825,29 +4003,39 @@ static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
3825 } 4003 }
3826 } else if (sk->sk_state == BT_CONNECT2) { 4004 } else if (sk->sk_state == BT_CONNECT2) {
3827 struct l2cap_conn_rsp rsp; 4005 struct l2cap_conn_rsp rsp;
3828 __u16 result; 4006 __u16 res, stat;
3829 4007
3830 if (!status) { 4008 if (!status) {
3831 sk->sk_state = BT_CONFIG; 4009 if (bt_sk(sk)->defer_setup) {
3832 result = L2CAP_CR_SUCCESS; 4010 struct sock *parent = bt_sk(sk)->parent;
4011 res = L2CAP_CR_PEND;
4012 stat = L2CAP_CS_AUTHOR_PEND;
4013 if (parent)
4014 parent->sk_data_ready(parent, 0);
4015 } else {
4016 sk->sk_state = BT_CONFIG;
4017 res = L2CAP_CR_SUCCESS;
4018 stat = L2CAP_CS_NO_INFO;
4019 }
3833 } else { 4020 } else {
3834 sk->sk_state = BT_DISCONN; 4021 sk->sk_state = BT_DISCONN;
3835 l2cap_sock_set_timer(sk, HZ / 10); 4022 l2cap_sock_set_timer(sk, HZ / 10);
3836 result = L2CAP_CR_SEC_BLOCK; 4023 res = L2CAP_CR_SEC_BLOCK;
4024 stat = L2CAP_CS_NO_INFO;
3837 } 4025 }
3838 4026
3839 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid); 4027 rsp.scid = cpu_to_le16(chan->dcid);
3840 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid); 4028 rsp.dcid = cpu_to_le16(chan->scid);
3841 rsp.result = cpu_to_le16(result); 4029 rsp.result = cpu_to_le16(res);
3842 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); 4030 rsp.status = cpu_to_le16(stat);
3843 l2cap_send_cmd(conn, l2cap_pi(sk)->ident, 4031 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
3844 L2CAP_CONN_RSP, sizeof(rsp), &rsp); 4032 sizeof(rsp), &rsp);
3845 } 4033 }
3846 4034
3847 bh_unlock_sock(sk); 4035 bh_unlock_sock(sk);
3848 } 4036 }
3849 4037
3850 read_unlock(&l->lock); 4038 read_unlock(&conn->chan_lock);
3851 4039
3852 return 0; 4040 return 0;
3853} 4041}
@@ -3866,7 +4054,7 @@ static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 fl
3866 4054
3867 if (!(flags & ACL_CONT)) { 4055 if (!(flags & ACL_CONT)) {
3868 struct l2cap_hdr *hdr; 4056 struct l2cap_hdr *hdr;
3869 struct sock *sk; 4057 struct l2cap_chan *chan;
3870 u16 cid; 4058 u16 cid;
3871 int len; 4059 int len;
3872 4060
@@ -3904,18 +4092,21 @@ static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 fl
3904 goto drop; 4092 goto drop;
3905 } 4093 }
3906 4094
3907 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid); 4095 chan = l2cap_get_chan_by_scid(conn, cid);
3908 4096
3909 if (sk && l2cap_pi(sk)->imtu < len - L2CAP_HDR_SIZE) { 4097 if (chan && chan->sk) {
3910 BT_ERR("Frame exceeding recv MTU (len %d, MTU %d)", 4098 struct sock *sk = chan->sk;
3911 len, l2cap_pi(sk)->imtu);
3912 bh_unlock_sock(sk);
3913 l2cap_conn_unreliable(conn, ECOMM);
3914 goto drop;
3915 }
3916 4099
3917 if (sk) 4100 if (chan->imtu < len - L2CAP_HDR_SIZE) {
4101 BT_ERR("Frame exceeding recv MTU (len %d, "
4102 "MTU %d)", len,
4103 chan->imtu);
4104 bh_unlock_sock(sk);
4105 l2cap_conn_unreliable(conn, ECOMM);
4106 goto drop;
4107 }
3918 bh_unlock_sock(sk); 4108 bh_unlock_sock(sk);
4109 }
3919 4110
3920 /* Allocate skb for the complete frame (with header) */ 4111 /* Allocate skb for the complete frame (with header) */
3921 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC); 4112 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
@@ -3962,24 +4153,22 @@ drop:
3962 4153
3963static int l2cap_debugfs_show(struct seq_file *f, void *p) 4154static int l2cap_debugfs_show(struct seq_file *f, void *p)
3964{ 4155{
3965 struct sock *sk; 4156 struct l2cap_chan *c;
3966 struct hlist_node *node;
3967 4157
3968 read_lock_bh(&l2cap_sk_list.lock); 4158 read_lock_bh(&chan_list_lock);
3969 4159
3970 sk_for_each(sk, node, &l2cap_sk_list.head) { 4160 list_for_each_entry(c, &chan_list, global_l) {
3971 struct l2cap_pinfo *pi = l2cap_pi(sk); 4161 struct sock *sk = c->sk;
3972 4162
3973 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n", 4163 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
3974 batostr(&bt_sk(sk)->src), 4164 batostr(&bt_sk(sk)->src),
3975 batostr(&bt_sk(sk)->dst), 4165 batostr(&bt_sk(sk)->dst),
3976 sk->sk_state, __le16_to_cpu(pi->psm), 4166 sk->sk_state, __le16_to_cpu(c->psm),
3977 pi->scid, pi->dcid, 4167 c->scid, c->dcid, c->imtu, c->omtu,
3978 pi->imtu, pi->omtu, pi->sec_level, 4168 c->sec_level, c->mode);
3979 pi->mode);
3980 } 4169 }
3981 4170
3982 read_unlock_bh(&l2cap_sk_list.lock); 4171 read_unlock_bh(&chan_list_lock);
3983 4172
3984 return 0; 4173 return 0;
3985} 4174}
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index 299fe56a9668..8248303f44e8 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -30,6 +30,8 @@
30#include <net/bluetooth/hci_core.h> 30#include <net/bluetooth/hci_core.h>
31#include <net/bluetooth/l2cap.h> 31#include <net/bluetooth/l2cap.h>
32 32
33static const struct proto_ops l2cap_sock_ops;
34
33/* ---- L2CAP timers ---- */ 35/* ---- L2CAP timers ---- */
34static void l2cap_sock_timeout(unsigned long arg) 36static void l2cap_sock_timeout(unsigned long arg)
35{ 37{
@@ -51,7 +53,7 @@ static void l2cap_sock_timeout(unsigned long arg)
51 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG) 53 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
52 reason = ECONNREFUSED; 54 reason = ECONNREFUSED;
53 else if (sk->sk_state == BT_CONNECT && 55 else if (sk->sk_state == BT_CONNECT &&
54 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP) 56 l2cap_pi(sk)->chan->sec_level != BT_SECURITY_SDP)
55 reason = ECONNREFUSED; 57 reason = ECONNREFUSED;
56 else 58 else
57 reason = ETIMEDOUT; 59 reason = ETIMEDOUT;
@@ -76,21 +78,10 @@ void l2cap_sock_clear_timer(struct sock *sk)
76 sk_stop_timer(sk, &sk->sk_timer); 78 sk_stop_timer(sk, &sk->sk_timer);
77} 79}
78 80
79static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
80{
81 struct sock *sk;
82 struct hlist_node *node;
83 sk_for_each(sk, node, &l2cap_sk_list.head)
84 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
85 goto found;
86 sk = NULL;
87found:
88 return sk;
89}
90
91static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen) 81static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
92{ 82{
93 struct sock *sk = sock->sk; 83 struct sock *sk = sock->sk;
84 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
94 struct sockaddr_l2 la; 85 struct sockaddr_l2 la;
95 int len, err = 0; 86 int len, err = 0;
96 87
@@ -129,26 +120,20 @@ static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
129 } 120 }
130 } 121 }
131 122
132 write_lock_bh(&l2cap_sk_list.lock); 123 if (la.l2_cid)
124 err = l2cap_add_scid(chan, la.l2_cid);
125 else
126 err = l2cap_add_psm(chan, &la.l2_bdaddr, la.l2_psm);
133 127
134 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) { 128 if (err < 0)
135 err = -EADDRINUSE; 129 goto done;
136 } else {
137 /* Save source address */
138 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
139 l2cap_pi(sk)->psm = la.l2_psm;
140 l2cap_pi(sk)->sport = la.l2_psm;
141 sk->sk_state = BT_BOUND;
142
143 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
144 __le16_to_cpu(la.l2_psm) == 0x0003)
145 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
146 }
147 130
148 if (la.l2_cid) 131 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
149 l2cap_pi(sk)->scid = la.l2_cid; 132 __le16_to_cpu(la.l2_psm) == 0x0003)
133 chan->sec_level = BT_SECURITY_SDP;
150 134
151 write_unlock_bh(&l2cap_sk_list.lock); 135 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
136 sk->sk_state = BT_BOUND;
152 137
153done: 138done:
154 release_sock(sk); 139 release_sock(sk);
@@ -158,6 +143,7 @@ done:
158static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags) 143static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
159{ 144{
160 struct sock *sk = sock->sk; 145 struct sock *sk = sock->sk;
146 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
161 struct sockaddr_l2 la; 147 struct sockaddr_l2 la;
162 int len, err = 0; 148 int len, err = 0;
163 149
@@ -182,7 +168,7 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int al
182 goto done; 168 goto done;
183 } 169 }
184 170
185 switch (l2cap_pi(sk)->mode) { 171 switch (chan->mode) {
186 case L2CAP_MODE_BASIC: 172 case L2CAP_MODE_BASIC:
187 break; 173 break;
188 case L2CAP_MODE_ERTM: 174 case L2CAP_MODE_ERTM:
@@ -226,10 +212,10 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int al
226 212
227 /* Set destination address and psm */ 213 /* Set destination address and psm */
228 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr); 214 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
229 l2cap_pi(sk)->psm = la.l2_psm; 215 chan->psm = la.l2_psm;
230 l2cap_pi(sk)->dcid = la.l2_cid; 216 chan->dcid = la.l2_cid;
231 217
232 err = l2cap_do_connect(sk); 218 err = l2cap_chan_connect(l2cap_pi(sk)->chan);
233 if (err) 219 if (err)
234 goto done; 220 goto done;
235 221
@@ -244,6 +230,7 @@ done:
244static int l2cap_sock_listen(struct socket *sock, int backlog) 230static int l2cap_sock_listen(struct socket *sock, int backlog)
245{ 231{
246 struct sock *sk = sock->sk; 232 struct sock *sk = sock->sk;
233 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
247 int err = 0; 234 int err = 0;
248 235
249 BT_DBG("sk %p backlog %d", sk, backlog); 236 BT_DBG("sk %p backlog %d", sk, backlog);
@@ -256,7 +243,7 @@ static int l2cap_sock_listen(struct socket *sock, int backlog)
256 goto done; 243 goto done;
257 } 244 }
258 245
259 switch (l2cap_pi(sk)->mode) { 246 switch (chan->mode) {
260 case L2CAP_MODE_BASIC: 247 case L2CAP_MODE_BASIC:
261 break; 248 break;
262 case L2CAP_MODE_ERTM: 249 case L2CAP_MODE_ERTM:
@@ -269,28 +256,6 @@ static int l2cap_sock_listen(struct socket *sock, int backlog)
269 goto done; 256 goto done;
270 } 257 }
271 258
272 if (!l2cap_pi(sk)->psm && !l2cap_pi(sk)->dcid) {
273 bdaddr_t *src = &bt_sk(sk)->src;
274 u16 psm;
275
276 err = -EINVAL;
277
278 write_lock_bh(&l2cap_sk_list.lock);
279
280 for (psm = 0x1001; psm < 0x1100; psm += 2)
281 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
282 l2cap_pi(sk)->psm = cpu_to_le16(psm);
283 l2cap_pi(sk)->sport = cpu_to_le16(psm);
284 err = 0;
285 break;
286 }
287
288 write_unlock_bh(&l2cap_sk_list.lock);
289
290 if (err < 0)
291 goto done;
292 }
293
294 sk->sk_max_ack_backlog = backlog; 259 sk->sk_max_ack_backlog = backlog;
295 sk->sk_ack_backlog = 0; 260 sk->sk_ack_backlog = 0;
296 sk->sk_state = BT_LISTEN; 261 sk->sk_state = BT_LISTEN;
@@ -360,6 +325,7 @@ static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *l
360{ 325{
361 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr; 326 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
362 struct sock *sk = sock->sk; 327 struct sock *sk = sock->sk;
328 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
363 329
364 BT_DBG("sock %p, sk %p", sock, sk); 330 BT_DBG("sock %p, sk %p", sock, sk);
365 331
@@ -367,13 +333,13 @@ static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *l
367 *len = sizeof(struct sockaddr_l2); 333 *len = sizeof(struct sockaddr_l2);
368 334
369 if (peer) { 335 if (peer) {
370 la->l2_psm = l2cap_pi(sk)->psm; 336 la->l2_psm = chan->psm;
371 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst); 337 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
372 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid); 338 la->l2_cid = cpu_to_le16(chan->dcid);
373 } else { 339 } else {
374 la->l2_psm = l2cap_pi(sk)->sport; 340 la->l2_psm = chan->sport;
375 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src); 341 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
376 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid); 342 la->l2_cid = cpu_to_le16(chan->scid);
377 } 343 }
378 344
379 return 0; 345 return 0;
@@ -382,6 +348,7 @@ static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *l
382static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen) 348static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
383{ 349{
384 struct sock *sk = sock->sk; 350 struct sock *sk = sock->sk;
351 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
385 struct l2cap_options opts; 352 struct l2cap_options opts;
386 struct l2cap_conninfo cinfo; 353 struct l2cap_conninfo cinfo;
387 int len, err = 0; 354 int len, err = 0;
@@ -397,13 +364,13 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __us
397 switch (optname) { 364 switch (optname) {
398 case L2CAP_OPTIONS: 365 case L2CAP_OPTIONS:
399 memset(&opts, 0, sizeof(opts)); 366 memset(&opts, 0, sizeof(opts));
400 opts.imtu = l2cap_pi(sk)->imtu; 367 opts.imtu = chan->imtu;
401 opts.omtu = l2cap_pi(sk)->omtu; 368 opts.omtu = chan->omtu;
402 opts.flush_to = l2cap_pi(sk)->flush_to; 369 opts.flush_to = chan->flush_to;
403 opts.mode = l2cap_pi(sk)->mode; 370 opts.mode = chan->mode;
404 opts.fcs = l2cap_pi(sk)->fcs; 371 opts.fcs = chan->fcs;
405 opts.max_tx = l2cap_pi(sk)->max_tx; 372 opts.max_tx = chan->max_tx;
406 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win; 373 opts.txwin_size = (__u16)chan->tx_win;
407 374
408 len = min_t(unsigned int, len, sizeof(opts)); 375 len = min_t(unsigned int, len, sizeof(opts));
409 if (copy_to_user(optval, (char *) &opts, len)) 376 if (copy_to_user(optval, (char *) &opts, len))
@@ -412,7 +379,7 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __us
412 break; 379 break;
413 380
414 case L2CAP_LM: 381 case L2CAP_LM:
415 switch (l2cap_pi(sk)->sec_level) { 382 switch (chan->sec_level) {
416 case BT_SECURITY_LOW: 383 case BT_SECURITY_LOW:
417 opt = L2CAP_LM_AUTH; 384 opt = L2CAP_LM_AUTH;
418 break; 385 break;
@@ -428,10 +395,10 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __us
428 break; 395 break;
429 } 396 }
430 397
431 if (l2cap_pi(sk)->role_switch) 398 if (chan->role_switch)
432 opt |= L2CAP_LM_MASTER; 399 opt |= L2CAP_LM_MASTER;
433 400
434 if (l2cap_pi(sk)->force_reliable) 401 if (chan->force_reliable)
435 opt |= L2CAP_LM_RELIABLE; 402 opt |= L2CAP_LM_RELIABLE;
436 403
437 if (put_user(opt, (u32 __user *) optval)) 404 if (put_user(opt, (u32 __user *) optval))
@@ -446,8 +413,9 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __us
446 break; 413 break;
447 } 414 }
448 415
449 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle; 416 memset(&cinfo, 0, sizeof(cinfo));
450 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3); 417 cinfo.hci_handle = chan->conn->hcon->handle;
418 memcpy(cinfo.dev_class, chan->conn->hcon->dev_class, 3);
451 419
452 len = min_t(unsigned int, len, sizeof(cinfo)); 420 len = min_t(unsigned int, len, sizeof(cinfo));
453 if (copy_to_user(optval, (char *) &cinfo, len)) 421 if (copy_to_user(optval, (char *) &cinfo, len))
@@ -467,6 +435,7 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __us
467static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) 435static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
468{ 436{
469 struct sock *sk = sock->sk; 437 struct sock *sk = sock->sk;
438 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
470 struct bt_security sec; 439 struct bt_security sec;
471 int len, err = 0; 440 int len, err = 0;
472 441
@@ -491,7 +460,7 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch
491 break; 460 break;
492 } 461 }
493 462
494 sec.level = l2cap_pi(sk)->sec_level; 463 sec.level = chan->sec_level;
495 464
496 len = min_t(unsigned int, len, sizeof(sec)); 465 len = min_t(unsigned int, len, sizeof(sec));
497 if (copy_to_user(optval, (char *) &sec, len)) 466 if (copy_to_user(optval, (char *) &sec, len))
@@ -511,7 +480,7 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch
511 break; 480 break;
512 481
513 case BT_FLUSHABLE: 482 case BT_FLUSHABLE:
514 if (put_user(l2cap_pi(sk)->flushable, (u32 __user *) optval)) 483 if (put_user(chan->flushable, (u32 __user *) optval))
515 err = -EFAULT; 484 err = -EFAULT;
516 485
517 break; 486 break;
@@ -528,6 +497,7 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch
528static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen) 497static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
529{ 498{
530 struct sock *sk = sock->sk; 499 struct sock *sk = sock->sk;
500 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
531 struct l2cap_options opts; 501 struct l2cap_options opts;
532 int len, err = 0; 502 int len, err = 0;
533 u32 opt; 503 u32 opt;
@@ -543,13 +513,13 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
543 break; 513 break;
544 } 514 }
545 515
546 opts.imtu = l2cap_pi(sk)->imtu; 516 opts.imtu = chan->imtu;
547 opts.omtu = l2cap_pi(sk)->omtu; 517 opts.omtu = chan->omtu;
548 opts.flush_to = l2cap_pi(sk)->flush_to; 518 opts.flush_to = chan->flush_to;
549 opts.mode = l2cap_pi(sk)->mode; 519 opts.mode = chan->mode;
550 opts.fcs = l2cap_pi(sk)->fcs; 520 opts.fcs = chan->fcs;
551 opts.max_tx = l2cap_pi(sk)->max_tx; 521 opts.max_tx = chan->max_tx;
552 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win; 522 opts.txwin_size = (__u16)chan->tx_win;
553 523
554 len = min_t(unsigned int, sizeof(opts), optlen); 524 len = min_t(unsigned int, sizeof(opts), optlen);
555 if (copy_from_user((char *) &opts, optval, len)) { 525 if (copy_from_user((char *) &opts, optval, len)) {
@@ -562,10 +532,10 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
562 break; 532 break;
563 } 533 }
564 534
565 l2cap_pi(sk)->mode = opts.mode; 535 chan->mode = opts.mode;
566 switch (l2cap_pi(sk)->mode) { 536 switch (chan->mode) {
567 case L2CAP_MODE_BASIC: 537 case L2CAP_MODE_BASIC:
568 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_STATE2_DEVICE; 538 chan->conf_state &= ~L2CAP_CONF_STATE2_DEVICE;
569 break; 539 break;
570 case L2CAP_MODE_ERTM: 540 case L2CAP_MODE_ERTM:
571 case L2CAP_MODE_STREAMING: 541 case L2CAP_MODE_STREAMING:
@@ -577,11 +547,11 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
577 break; 547 break;
578 } 548 }
579 549
580 l2cap_pi(sk)->imtu = opts.imtu; 550 chan->imtu = opts.imtu;
581 l2cap_pi(sk)->omtu = opts.omtu; 551 chan->omtu = opts.omtu;
582 l2cap_pi(sk)->fcs = opts.fcs; 552 chan->fcs = opts.fcs;
583 l2cap_pi(sk)->max_tx = opts.max_tx; 553 chan->max_tx = opts.max_tx;
584 l2cap_pi(sk)->tx_win = (__u8)opts.txwin_size; 554 chan->tx_win = (__u8)opts.txwin_size;
585 break; 555 break;
586 556
587 case L2CAP_LM: 557 case L2CAP_LM:
@@ -591,14 +561,14 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
591 } 561 }
592 562
593 if (opt & L2CAP_LM_AUTH) 563 if (opt & L2CAP_LM_AUTH)
594 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW; 564 chan->sec_level = BT_SECURITY_LOW;
595 if (opt & L2CAP_LM_ENCRYPT) 565 if (opt & L2CAP_LM_ENCRYPT)
596 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM; 566 chan->sec_level = BT_SECURITY_MEDIUM;
597 if (opt & L2CAP_LM_SECURE) 567 if (opt & L2CAP_LM_SECURE)
598 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH; 568 chan->sec_level = BT_SECURITY_HIGH;
599 569
600 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER); 570 chan->role_switch = (opt & L2CAP_LM_MASTER);
601 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE); 571 chan->force_reliable = (opt & L2CAP_LM_RELIABLE);
602 break; 572 break;
603 573
604 default: 574 default:
@@ -613,6 +583,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
613static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) 583static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
614{ 584{
615 struct sock *sk = sock->sk; 585 struct sock *sk = sock->sk;
586 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
616 struct bt_security sec; 587 struct bt_security sec;
617 int len, err = 0; 588 int len, err = 0;
618 u32 opt; 589 u32 opt;
@@ -649,7 +620,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
649 break; 620 break;
650 } 621 }
651 622
652 l2cap_pi(sk)->sec_level = sec.level; 623 chan->sec_level = sec.level;
653 break; 624 break;
654 625
655 case BT_DEFER_SETUP: 626 case BT_DEFER_SETUP:
@@ -678,7 +649,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
678 } 649 }
679 650
680 if (opt == BT_FLUSHABLE_OFF) { 651 if (opt == BT_FLUSHABLE_OFF) {
681 struct l2cap_conn *conn = l2cap_pi(sk)->conn; 652 struct l2cap_conn *conn = chan->conn;
682 /* proceed further only when we have l2cap_conn and 653 /* proceed further only when we have l2cap_conn and
683 No Flush support in the LM */ 654 No Flush support in the LM */
684 if (!conn || !lmp_no_flush_capable(conn->hcon->hdev)) { 655 if (!conn || !lmp_no_flush_capable(conn->hcon->hdev)) {
@@ -687,7 +658,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
687 } 658 }
688 } 659 }
689 660
690 l2cap_pi(sk)->flushable = opt; 661 chan->flushable = opt;
691 break; 662 break;
692 663
693 default: 664 default:
@@ -702,7 +673,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
702static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len) 673static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
703{ 674{
704 struct sock *sk = sock->sk; 675 struct sock *sk = sock->sk;
705 struct l2cap_pinfo *pi = l2cap_pi(sk); 676 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
706 struct sk_buff *skb; 677 struct sk_buff *skb;
707 u16 control; 678 u16 control;
708 int err; 679 int err;
@@ -725,74 +696,77 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct ms
725 696
726 /* Connectionless channel */ 697 /* Connectionless channel */
727 if (sk->sk_type == SOCK_DGRAM) { 698 if (sk->sk_type == SOCK_DGRAM) {
728 skb = l2cap_create_connless_pdu(sk, msg, len); 699 skb = l2cap_create_connless_pdu(chan, msg, len);
729 if (IS_ERR(skb)) { 700 if (IS_ERR(skb)) {
730 err = PTR_ERR(skb); 701 err = PTR_ERR(skb);
731 } else { 702 } else {
732 l2cap_do_send(sk, skb); 703 l2cap_do_send(chan, skb);
733 err = len; 704 err = len;
734 } 705 }
735 goto done; 706 goto done;
736 } 707 }
737 708
738 switch (pi->mode) { 709 switch (chan->mode) {
739 case L2CAP_MODE_BASIC: 710 case L2CAP_MODE_BASIC:
740 /* Check outgoing MTU */ 711 /* Check outgoing MTU */
741 if (len > pi->omtu) { 712 if (len > chan->omtu) {
742 err = -EMSGSIZE; 713 err = -EMSGSIZE;
743 goto done; 714 goto done;
744 } 715 }
745 716
746 /* Create a basic PDU */ 717 /* Create a basic PDU */
747 skb = l2cap_create_basic_pdu(sk, msg, len); 718 skb = l2cap_create_basic_pdu(chan, msg, len);
748 if (IS_ERR(skb)) { 719 if (IS_ERR(skb)) {
749 err = PTR_ERR(skb); 720 err = PTR_ERR(skb);
750 goto done; 721 goto done;
751 } 722 }
752 723
753 l2cap_do_send(sk, skb); 724 l2cap_do_send(chan, skb);
754 err = len; 725 err = len;
755 break; 726 break;
756 727
757 case L2CAP_MODE_ERTM: 728 case L2CAP_MODE_ERTM:
758 case L2CAP_MODE_STREAMING: 729 case L2CAP_MODE_STREAMING:
759 /* Entire SDU fits into one PDU */ 730 /* Entire SDU fits into one PDU */
760 if (len <= pi->remote_mps) { 731 if (len <= chan->remote_mps) {
761 control = L2CAP_SDU_UNSEGMENTED; 732 control = L2CAP_SDU_UNSEGMENTED;
762 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0); 733 skb = l2cap_create_iframe_pdu(chan, msg, len, control,
734 0);
763 if (IS_ERR(skb)) { 735 if (IS_ERR(skb)) {
764 err = PTR_ERR(skb); 736 err = PTR_ERR(skb);
765 goto done; 737 goto done;
766 } 738 }
767 __skb_queue_tail(TX_QUEUE(sk), skb); 739 __skb_queue_tail(&chan->tx_q, skb);
768 740
769 if (sk->sk_send_head == NULL) 741 if (chan->tx_send_head == NULL)
770 sk->sk_send_head = skb; 742 chan->tx_send_head = skb;
771 743
772 } else { 744 } else {
773 /* Segment SDU into multiples PDUs */ 745 /* Segment SDU into multiples PDUs */
774 err = l2cap_sar_segment_sdu(sk, msg, len); 746 err = l2cap_sar_segment_sdu(chan, msg, len);
775 if (err < 0) 747 if (err < 0)
776 goto done; 748 goto done;
777 } 749 }
778 750
779 if (pi->mode == L2CAP_MODE_STREAMING) { 751 if (chan->mode == L2CAP_MODE_STREAMING) {
780 l2cap_streaming_send(sk); 752 l2cap_streaming_send(chan);
781 } else { 753 err = len;
782 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) && 754 break;
783 (pi->conn_state & L2CAP_CONN_WAIT_F)) { 755 }
784 err = len; 756
785 break; 757 if ((chan->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
786 } 758 (chan->conn_state & L2CAP_CONN_WAIT_F)) {
787 err = l2cap_ertm_send(sk); 759 err = len;
760 break;
788 } 761 }
762 err = l2cap_ertm_send(chan);
789 763
790 if (err >= 0) 764 if (err >= 0)
791 err = len; 765 err = len;
792 break; 766 break;
793 767
794 default: 768 default:
795 BT_DBG("bad state %1.1x", pi->mode); 769 BT_DBG("bad state %1.1x", chan->mode);
796 err = -EBADFD; 770 err = -EBADFD;
797 } 771 }
798 772
@@ -808,29 +782,9 @@ static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct ms
808 lock_sock(sk); 782 lock_sock(sk);
809 783
810 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) { 784 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
811 struct l2cap_conn_rsp rsp;
812 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
813 u8 buf[128];
814
815 sk->sk_state = BT_CONFIG; 785 sk->sk_state = BT_CONFIG;
816 786
817 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid); 787 __l2cap_connect_rsp_defer(l2cap_pi(sk)->chan);
818 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
819 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
820 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
821 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
822 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
823
824 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) {
825 release_sock(sk);
826 return 0;
827 }
828
829 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
830 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
831 l2cap_build_conf_req(sk, buf), buf);
832 l2cap_pi(sk)->num_conf_req++;
833
834 release_sock(sk); 788 release_sock(sk);
835 return 0; 789 return 0;
836 } 790 }
@@ -854,7 +808,8 @@ void l2cap_sock_kill(struct sock *sk)
854 BT_DBG("sk %p state %d", sk, sk->sk_state); 808 BT_DBG("sk %p state %d", sk, sk->sk_state);
855 809
856 /* Kill poor orphan */ 810 /* Kill poor orphan */
857 bt_sock_unlink(&l2cap_sk_list, sk); 811
812 l2cap_chan_destroy(l2cap_pi(sk)->chan);
858 sock_set_flag(sk, SOCK_DEAD); 813 sock_set_flag(sk, SOCK_DEAD);
859 sock_put(sk); 814 sock_put(sk);
860} 815}
@@ -885,7 +840,8 @@ static void l2cap_sock_cleanup_listen(struct sock *parent)
885 840
886void __l2cap_sock_close(struct sock *sk, int reason) 841void __l2cap_sock_close(struct sock *sk, int reason)
887{ 842{
888 struct l2cap_conn *conn = l2cap_pi(sk)->conn; 843 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
844 struct l2cap_conn *conn = chan->conn;
889 845
890 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket); 846 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
891 847
@@ -900,9 +856,9 @@ void __l2cap_sock_close(struct sock *sk, int reason)
900 sk->sk_type == SOCK_STREAM) && 856 sk->sk_type == SOCK_STREAM) &&
901 conn->hcon->type == ACL_LINK) { 857 conn->hcon->type == ACL_LINK) {
902 l2cap_sock_set_timer(sk, sk->sk_sndtimeo); 858 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
903 l2cap_send_disconn_req(conn, sk, reason); 859 l2cap_send_disconn_req(conn, chan, reason);
904 } else 860 } else
905 l2cap_chan_del(sk, reason); 861 l2cap_chan_del(chan, reason);
906 break; 862 break;
907 863
908 case BT_CONNECT2: 864 case BT_CONNECT2:
@@ -917,20 +873,20 @@ void __l2cap_sock_close(struct sock *sk, int reason)
917 else 873 else
918 result = L2CAP_CR_BAD_PSM; 874 result = L2CAP_CR_BAD_PSM;
919 875
920 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid); 876 rsp.scid = cpu_to_le16(chan->dcid);
921 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid); 877 rsp.dcid = cpu_to_le16(chan->scid);
922 rsp.result = cpu_to_le16(result); 878 rsp.result = cpu_to_le16(result);
923 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); 879 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
924 l2cap_send_cmd(conn, l2cap_pi(sk)->ident, 880 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
925 L2CAP_CONN_RSP, sizeof(rsp), &rsp); 881 sizeof(rsp), &rsp);
926 } 882 }
927 883
928 l2cap_chan_del(sk, reason); 884 l2cap_chan_del(chan, reason);
929 break; 885 break;
930 886
931 case BT_CONNECT: 887 case BT_CONNECT:
932 case BT_DISCONN: 888 case BT_DISCONN:
933 l2cap_chan_del(sk, reason); 889 l2cap_chan_del(chan, reason);
934 break; 890 break;
935 891
936 default: 892 default:
@@ -942,6 +898,7 @@ void __l2cap_sock_close(struct sock *sk, int reason)
942static int l2cap_sock_shutdown(struct socket *sock, int how) 898static int l2cap_sock_shutdown(struct socket *sock, int how)
943{ 899{
944 struct sock *sk = sock->sk; 900 struct sock *sk = sock->sk;
901 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
945 int err = 0; 902 int err = 0;
946 903
947 BT_DBG("sock %p, sk %p", sock, sk); 904 BT_DBG("sock %p, sk %p", sock, sk);
@@ -951,7 +908,7 @@ static int l2cap_sock_shutdown(struct socket *sock, int how)
951 908
952 lock_sock(sk); 909 lock_sock(sk);
953 if (!sk->sk_shutdown) { 910 if (!sk->sk_shutdown) {
954 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) 911 if (chan->mode == L2CAP_MODE_ERTM)
955 err = __l2cap_wait_ack(sk); 912 err = __l2cap_wait_ack(sk);
956 913
957 sk->sk_shutdown = SHUTDOWN_MASK; 914 sk->sk_shutdown = SHUTDOWN_MASK;
@@ -998,49 +955,47 @@ static void l2cap_sock_destruct(struct sock *sk)
998void l2cap_sock_init(struct sock *sk, struct sock *parent) 955void l2cap_sock_init(struct sock *sk, struct sock *parent)
999{ 956{
1000 struct l2cap_pinfo *pi = l2cap_pi(sk); 957 struct l2cap_pinfo *pi = l2cap_pi(sk);
958 struct l2cap_chan *chan = pi->chan;
1001 959
1002 BT_DBG("sk %p", sk); 960 BT_DBG("sk %p", sk);
1003 961
1004 if (parent) { 962 if (parent) {
963 struct l2cap_chan *pchan = l2cap_pi(parent)->chan;
964
1005 sk->sk_type = parent->sk_type; 965 sk->sk_type = parent->sk_type;
1006 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup; 966 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
1007 967
1008 pi->imtu = l2cap_pi(parent)->imtu; 968 chan->imtu = pchan->imtu;
1009 pi->omtu = l2cap_pi(parent)->omtu; 969 chan->omtu = pchan->omtu;
1010 pi->conf_state = l2cap_pi(parent)->conf_state; 970 chan->conf_state = pchan->conf_state;
1011 pi->mode = l2cap_pi(parent)->mode; 971 chan->mode = pchan->mode;
1012 pi->fcs = l2cap_pi(parent)->fcs; 972 chan->fcs = pchan->fcs;
1013 pi->max_tx = l2cap_pi(parent)->max_tx; 973 chan->max_tx = pchan->max_tx;
1014 pi->tx_win = l2cap_pi(parent)->tx_win; 974 chan->tx_win = pchan->tx_win;
1015 pi->sec_level = l2cap_pi(parent)->sec_level; 975 chan->sec_level = pchan->sec_level;
1016 pi->role_switch = l2cap_pi(parent)->role_switch; 976 chan->role_switch = pchan->role_switch;
1017 pi->force_reliable = l2cap_pi(parent)->force_reliable; 977 chan->force_reliable = pchan->force_reliable;
1018 pi->flushable = l2cap_pi(parent)->flushable; 978 chan->flushable = pchan->flushable;
1019 } else { 979 } else {
1020 pi->imtu = L2CAP_DEFAULT_MTU; 980 chan->imtu = L2CAP_DEFAULT_MTU;
1021 pi->omtu = 0; 981 chan->omtu = 0;
1022 if (!disable_ertm && sk->sk_type == SOCK_STREAM) { 982 if (!disable_ertm && sk->sk_type == SOCK_STREAM) {
1023 pi->mode = L2CAP_MODE_ERTM; 983 chan->mode = L2CAP_MODE_ERTM;
1024 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE; 984 chan->conf_state |= L2CAP_CONF_STATE2_DEVICE;
1025 } else { 985 } else {
1026 pi->mode = L2CAP_MODE_BASIC; 986 chan->mode = L2CAP_MODE_BASIC;
1027 } 987 }
1028 pi->max_tx = L2CAP_DEFAULT_MAX_TX; 988 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
1029 pi->fcs = L2CAP_FCS_CRC16; 989 chan->fcs = L2CAP_FCS_CRC16;
1030 pi->tx_win = L2CAP_DEFAULT_TX_WINDOW; 990 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
1031 pi->sec_level = BT_SECURITY_LOW; 991 chan->sec_level = BT_SECURITY_LOW;
1032 pi->role_switch = 0; 992 chan->role_switch = 0;
1033 pi->force_reliable = 0; 993 chan->force_reliable = 0;
1034 pi->flushable = BT_FLUSHABLE_OFF; 994 chan->flushable = BT_FLUSHABLE_OFF;
1035 } 995 }
1036 996
1037 /* Default config options */ 997 /* Default config options */
1038 pi->conf_len = 0; 998 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
1039 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
1040 skb_queue_head_init(TX_QUEUE(sk));
1041 skb_queue_head_init(SREJ_QUEUE(sk));
1042 skb_queue_head_init(BUSY_QUEUE(sk));
1043 INIT_LIST_HEAD(SREJ_LIST(sk));
1044} 999}
1045 1000
1046static struct proto l2cap_proto = { 1001static struct proto l2cap_proto = {
@@ -1070,7 +1025,6 @@ struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, g
1070 1025
1071 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk); 1026 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
1072 1027
1073 bt_sock_link(&l2cap_sk_list, sk);
1074 return sk; 1028 return sk;
1075} 1029}
1076 1030
@@ -1078,6 +1032,7 @@ static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
1078 int kern) 1032 int kern)
1079{ 1033{
1080 struct sock *sk; 1034 struct sock *sk;
1035 struct l2cap_chan *chan;
1081 1036
1082 BT_DBG("sock %p", sock); 1037 BT_DBG("sock %p", sock);
1083 1038
@@ -1096,11 +1051,19 @@ static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
1096 if (!sk) 1051 if (!sk)
1097 return -ENOMEM; 1052 return -ENOMEM;
1098 1053
1054 chan = l2cap_chan_create(sk);
1055 if (!chan) {
1056 l2cap_sock_kill(sk);
1057 return -ENOMEM;
1058 }
1059
1060 l2cap_pi(sk)->chan = chan;
1061
1099 l2cap_sock_init(sk, NULL); 1062 l2cap_sock_init(sk, NULL);
1100 return 0; 1063 return 0;
1101} 1064}
1102 1065
1103const struct proto_ops l2cap_sock_ops = { 1066static const struct proto_ops l2cap_sock_ops = {
1104 .family = PF_BLUETOOTH, 1067 .family = PF_BLUETOOTH,
1105 .owner = THIS_MODULE, 1068 .owner = THIS_MODULE,
1106 .release = l2cap_sock_release, 1069 .release = l2cap_sock_release,
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 4476d8e3c0f2..dae382ce7020 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -36,7 +36,7 @@ struct pending_cmd {
36 struct list_head list; 36 struct list_head list;
37 __u16 opcode; 37 __u16 opcode;
38 int index; 38 int index;
39 void *cmd; 39 void *param;
40 struct sock *sk; 40 struct sock *sk;
41 void *user_data; 41 void *user_data;
42}; 42};
@@ -179,10 +179,12 @@ static int read_controller_info(struct sock *sk, u16 index)
179 179
180 hci_del_off_timer(hdev); 180 hci_del_off_timer(hdev);
181 181
182 hci_dev_lock_bh(hdev); 182 hci_dev_lock(hdev);
183 183
184 set_bit(HCI_MGMT, &hdev->flags); 184 set_bit(HCI_MGMT, &hdev->flags);
185 185
186 memset(&rp, 0, sizeof(rp));
187
186 rp.type = hdev->dev_type; 188 rp.type = hdev->dev_type;
187 189
188 rp.powered = test_bit(HCI_UP, &hdev->flags); 190 rp.powered = test_bit(HCI_UP, &hdev->flags);
@@ -204,7 +206,9 @@ static int read_controller_info(struct sock *sk, u16 index)
204 rp.hci_ver = hdev->hci_ver; 206 rp.hci_ver = hdev->hci_ver;
205 put_unaligned_le16(hdev->hci_rev, &rp.hci_rev); 207 put_unaligned_le16(hdev->hci_rev, &rp.hci_rev);
206 208
207 hci_dev_unlock_bh(hdev); 209 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
210
211 hci_dev_unlock(hdev);
208 hci_dev_put(hdev); 212 hci_dev_put(hdev);
209 213
210 return cmd_complete(sk, index, MGMT_OP_READ_INFO, &rp, sizeof(rp)); 214 return cmd_complete(sk, index, MGMT_OP_READ_INFO, &rp, sizeof(rp));
@@ -213,7 +217,7 @@ static int read_controller_info(struct sock *sk, u16 index)
213static void mgmt_pending_free(struct pending_cmd *cmd) 217static void mgmt_pending_free(struct pending_cmd *cmd)
214{ 218{
215 sock_put(cmd->sk); 219 sock_put(cmd->sk);
216 kfree(cmd->cmd); 220 kfree(cmd->param);
217 kfree(cmd); 221 kfree(cmd);
218} 222}
219 223
@@ -229,13 +233,14 @@ static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
229 cmd->opcode = opcode; 233 cmd->opcode = opcode;
230 cmd->index = index; 234 cmd->index = index;
231 235
232 cmd->cmd = kmalloc(len, GFP_ATOMIC); 236 cmd->param = kmalloc(len, GFP_ATOMIC);
233 if (!cmd->cmd) { 237 if (!cmd->param) {
234 kfree(cmd); 238 kfree(cmd);
235 return NULL; 239 return NULL;
236 } 240 }
237 241
238 memcpy(cmd->cmd, data, len); 242 if (data)
243 memcpy(cmd->param, data, len);
239 244
240 cmd->sk = sk; 245 cmd->sk = sk;
241 sock_hold(sk); 246 sock_hold(sk);
@@ -311,7 +316,7 @@ static int set_powered(struct sock *sk, u16 index, unsigned char *data, u16 len)
311 if (!hdev) 316 if (!hdev)
312 return cmd_status(sk, index, MGMT_OP_SET_POWERED, ENODEV); 317 return cmd_status(sk, index, MGMT_OP_SET_POWERED, ENODEV);
313 318
314 hci_dev_lock_bh(hdev); 319 hci_dev_lock(hdev);
315 320
316 up = test_bit(HCI_UP, &hdev->flags); 321 up = test_bit(HCI_UP, &hdev->flags);
317 if ((cp->val && up) || (!cp->val && !up)) { 322 if ((cp->val && up) || (!cp->val && !up)) {
@@ -338,7 +343,7 @@ static int set_powered(struct sock *sk, u16 index, unsigned char *data, u16 len)
338 err = 0; 343 err = 0;
339 344
340failed: 345failed:
341 hci_dev_unlock_bh(hdev); 346 hci_dev_unlock(hdev);
342 hci_dev_put(hdev); 347 hci_dev_put(hdev);
343 return err; 348 return err;
344} 349}
@@ -363,7 +368,7 @@ static int set_discoverable(struct sock *sk, u16 index, unsigned char *data,
363 if (!hdev) 368 if (!hdev)
364 return cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, ENODEV); 369 return cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, ENODEV);
365 370
366 hci_dev_lock_bh(hdev); 371 hci_dev_lock(hdev);
367 372
368 if (!test_bit(HCI_UP, &hdev->flags)) { 373 if (!test_bit(HCI_UP, &hdev->flags)) {
369 err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, ENETDOWN); 374 err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, ENETDOWN);
@@ -398,7 +403,7 @@ static int set_discoverable(struct sock *sk, u16 index, unsigned char *data,
398 mgmt_pending_remove(cmd); 403 mgmt_pending_remove(cmd);
399 404
400failed: 405failed:
401 hci_dev_unlock_bh(hdev); 406 hci_dev_unlock(hdev);
402 hci_dev_put(hdev); 407 hci_dev_put(hdev);
403 408
404 return err; 409 return err;
@@ -424,7 +429,7 @@ static int set_connectable(struct sock *sk, u16 index, unsigned char *data,
424 if (!hdev) 429 if (!hdev)
425 return cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, ENODEV); 430 return cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, ENODEV);
426 431
427 hci_dev_lock_bh(hdev); 432 hci_dev_lock(hdev);
428 433
429 if (!test_bit(HCI_UP, &hdev->flags)) { 434 if (!test_bit(HCI_UP, &hdev->flags)) {
430 err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, ENETDOWN); 435 err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, ENETDOWN);
@@ -458,7 +463,7 @@ static int set_connectable(struct sock *sk, u16 index, unsigned char *data,
458 mgmt_pending_remove(cmd); 463 mgmt_pending_remove(cmd);
459 464
460failed: 465failed:
461 hci_dev_unlock_bh(hdev); 466 hci_dev_unlock(hdev);
462 hci_dev_put(hdev); 467 hci_dev_put(hdev);
463 468
464 return err; 469 return err;
@@ -517,7 +522,7 @@ static int set_pairable(struct sock *sk, u16 index, unsigned char *data,
517 if (!hdev) 522 if (!hdev)
518 return cmd_status(sk, index, MGMT_OP_SET_PAIRABLE, ENODEV); 523 return cmd_status(sk, index, MGMT_OP_SET_PAIRABLE, ENODEV);
519 524
520 hci_dev_lock_bh(hdev); 525 hci_dev_lock(hdev);
521 526
522 if (cp->val) 527 if (cp->val)
523 set_bit(HCI_PAIRABLE, &hdev->flags); 528 set_bit(HCI_PAIRABLE, &hdev->flags);
@@ -533,12 +538,156 @@ static int set_pairable(struct sock *sk, u16 index, unsigned char *data,
533 err = mgmt_event(MGMT_EV_PAIRABLE, index, &ev, sizeof(ev), sk); 538 err = mgmt_event(MGMT_EV_PAIRABLE, index, &ev, sizeof(ev), sk);
534 539
535failed: 540failed:
536 hci_dev_unlock_bh(hdev); 541 hci_dev_unlock(hdev);
537 hci_dev_put(hdev); 542 hci_dev_put(hdev);
538 543
539 return err; 544 return err;
540} 545}
541 546
547#define EIR_FLAGS 0x01 /* flags */
548#define EIR_UUID16_SOME 0x02 /* 16-bit UUID, more available */
549#define EIR_UUID16_ALL 0x03 /* 16-bit UUID, all listed */
550#define EIR_UUID32_SOME 0x04 /* 32-bit UUID, more available */
551#define EIR_UUID32_ALL 0x05 /* 32-bit UUID, all listed */
552#define EIR_UUID128_SOME 0x06 /* 128-bit UUID, more available */
553#define EIR_UUID128_ALL 0x07 /* 128-bit UUID, all listed */
554#define EIR_NAME_SHORT 0x08 /* shortened local name */
555#define EIR_NAME_COMPLETE 0x09 /* complete local name */
556#define EIR_TX_POWER 0x0A /* transmit power level */
557#define EIR_DEVICE_ID 0x10 /* device ID */
558
559#define PNP_INFO_SVCLASS_ID 0x1200
560
561static u8 bluetooth_base_uuid[] = {
562 0xFB, 0x34, 0x9B, 0x5F, 0x80, 0x00, 0x00, 0x80,
563 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
564};
565
566static u16 get_uuid16(u8 *uuid128)
567{
568 u32 val;
569 int i;
570
571 for (i = 0; i < 12; i++) {
572 if (bluetooth_base_uuid[i] != uuid128[i])
573 return 0;
574 }
575
576 memcpy(&val, &uuid128[12], 4);
577
578 val = le32_to_cpu(val);
579 if (val > 0xffff)
580 return 0;
581
582 return (u16) val;
583}
584
585static void create_eir(struct hci_dev *hdev, u8 *data)
586{
587 u8 *ptr = data;
588 u16 eir_len = 0;
589 u16 uuid16_list[HCI_MAX_EIR_LENGTH / sizeof(u16)];
590 int i, truncated = 0;
591 struct list_head *p;
592 size_t name_len;
593
594 name_len = strlen(hdev->dev_name);
595
596 if (name_len > 0) {
597 /* EIR Data type */
598 if (name_len > 48) {
599 name_len = 48;
600 ptr[1] = EIR_NAME_SHORT;
601 } else
602 ptr[1] = EIR_NAME_COMPLETE;
603
604 /* EIR Data length */
605 ptr[0] = name_len + 1;
606
607 memcpy(ptr + 2, hdev->dev_name, name_len);
608
609 eir_len += (name_len + 2);
610 ptr += (name_len + 2);
611 }
612
613 memset(uuid16_list, 0, sizeof(uuid16_list));
614
615 /* Group all UUID16 types */
616 list_for_each(p, &hdev->uuids) {
617 struct bt_uuid *uuid = list_entry(p, struct bt_uuid, list);
618 u16 uuid16;
619
620 uuid16 = get_uuid16(uuid->uuid);
621 if (uuid16 == 0)
622 return;
623
624 if (uuid16 < 0x1100)
625 continue;
626
627 if (uuid16 == PNP_INFO_SVCLASS_ID)
628 continue;
629
630 /* Stop if not enough space to put next UUID */
631 if (eir_len + 2 + sizeof(u16) > HCI_MAX_EIR_LENGTH) {
632 truncated = 1;
633 break;
634 }
635
636 /* Check for duplicates */
637 for (i = 0; uuid16_list[i] != 0; i++)
638 if (uuid16_list[i] == uuid16)
639 break;
640
641 if (uuid16_list[i] == 0) {
642 uuid16_list[i] = uuid16;
643 eir_len += sizeof(u16);
644 }
645 }
646
647 if (uuid16_list[0] != 0) {
648 u8 *length = ptr;
649
650 /* EIR Data type */
651 ptr[1] = truncated ? EIR_UUID16_SOME : EIR_UUID16_ALL;
652
653 ptr += 2;
654 eir_len += 2;
655
656 for (i = 0; uuid16_list[i] != 0; i++) {
657 *ptr++ = (uuid16_list[i] & 0x00ff);
658 *ptr++ = (uuid16_list[i] & 0xff00) >> 8;
659 }
660
661 /* EIR Data length */
662 *length = (i * sizeof(u16)) + 1;
663 }
664}
665
666static int update_eir(struct hci_dev *hdev)
667{
668 struct hci_cp_write_eir cp;
669
670 if (!(hdev->features[6] & LMP_EXT_INQ))
671 return 0;
672
673 if (hdev->ssp_mode == 0)
674 return 0;
675
676 if (test_bit(HCI_SERVICE_CACHE, &hdev->flags))
677 return 0;
678
679 memset(&cp, 0, sizeof(cp));
680
681 create_eir(hdev, cp.data);
682
683 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
684 return 0;
685
686 memcpy(hdev->eir, cp.data, sizeof(cp.data));
687
688 return hci_send_cmd(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
689}
690
542static u8 get_service_classes(struct hci_dev *hdev) 691static u8 get_service_classes(struct hci_dev *hdev)
543{ 692{
544 struct list_head *p; 693 struct list_head *p;
@@ -590,7 +739,7 @@ static int add_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len)
590 if (!hdev) 739 if (!hdev)
591 return cmd_status(sk, index, MGMT_OP_ADD_UUID, ENODEV); 740 return cmd_status(sk, index, MGMT_OP_ADD_UUID, ENODEV);
592 741
593 hci_dev_lock_bh(hdev); 742 hci_dev_lock(hdev);
594 743
595 uuid = kmalloc(sizeof(*uuid), GFP_ATOMIC); 744 uuid = kmalloc(sizeof(*uuid), GFP_ATOMIC);
596 if (!uuid) { 745 if (!uuid) {
@@ -607,10 +756,14 @@ static int add_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len)
607 if (err < 0) 756 if (err < 0)
608 goto failed; 757 goto failed;
609 758
759 err = update_eir(hdev);
760 if (err < 0)
761 goto failed;
762
610 err = cmd_complete(sk, index, MGMT_OP_ADD_UUID, NULL, 0); 763 err = cmd_complete(sk, index, MGMT_OP_ADD_UUID, NULL, 0);
611 764
612failed: 765failed:
613 hci_dev_unlock_bh(hdev); 766 hci_dev_unlock(hdev);
614 hci_dev_put(hdev); 767 hci_dev_put(hdev);
615 768
616 return err; 769 return err;
@@ -635,7 +788,7 @@ static int remove_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len)
635 if (!hdev) 788 if (!hdev)
636 return cmd_status(sk, index, MGMT_OP_REMOVE_UUID, ENODEV); 789 return cmd_status(sk, index, MGMT_OP_REMOVE_UUID, ENODEV);
637 790
638 hci_dev_lock_bh(hdev); 791 hci_dev_lock(hdev);
639 792
640 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) { 793 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
641 err = hci_uuids_clear(hdev); 794 err = hci_uuids_clear(hdev);
@@ -663,10 +816,14 @@ static int remove_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len)
663 if (err < 0) 816 if (err < 0)
664 goto unlock; 817 goto unlock;
665 818
819 err = update_eir(hdev);
820 if (err < 0)
821 goto unlock;
822
666 err = cmd_complete(sk, index, MGMT_OP_REMOVE_UUID, NULL, 0); 823 err = cmd_complete(sk, index, MGMT_OP_REMOVE_UUID, NULL, 0);
667 824
668unlock: 825unlock:
669 hci_dev_unlock_bh(hdev); 826 hci_dev_unlock(hdev);
670 hci_dev_put(hdev); 827 hci_dev_put(hdev);
671 828
672 return err; 829 return err;
@@ -690,7 +847,7 @@ static int set_dev_class(struct sock *sk, u16 index, unsigned char *data,
690 if (!hdev) 847 if (!hdev)
691 return cmd_status(sk, index, MGMT_OP_SET_DEV_CLASS, ENODEV); 848 return cmd_status(sk, index, MGMT_OP_SET_DEV_CLASS, ENODEV);
692 849
693 hci_dev_lock_bh(hdev); 850 hci_dev_lock(hdev);
694 851
695 hdev->major_class = cp->major; 852 hdev->major_class = cp->major;
696 hdev->minor_class = cp->minor; 853 hdev->minor_class = cp->minor;
@@ -700,7 +857,7 @@ static int set_dev_class(struct sock *sk, u16 index, unsigned char *data,
700 if (err == 0) 857 if (err == 0)
701 err = cmd_complete(sk, index, MGMT_OP_SET_DEV_CLASS, NULL, 0); 858 err = cmd_complete(sk, index, MGMT_OP_SET_DEV_CLASS, NULL, 0);
702 859
703 hci_dev_unlock_bh(hdev); 860 hci_dev_unlock(hdev);
704 hci_dev_put(hdev); 861 hci_dev_put(hdev);
705 862
706 return err; 863 return err;
@@ -722,7 +879,7 @@ static int set_service_cache(struct sock *sk, u16 index, unsigned char *data,
722 if (!hdev) 879 if (!hdev)
723 return cmd_status(sk, index, MGMT_OP_SET_SERVICE_CACHE, ENODEV); 880 return cmd_status(sk, index, MGMT_OP_SET_SERVICE_CACHE, ENODEV);
724 881
725 hci_dev_lock_bh(hdev); 882 hci_dev_lock(hdev);
726 883
727 BT_DBG("hci%u enable %d", index, cp->enable); 884 BT_DBG("hci%u enable %d", index, cp->enable);
728 885
@@ -732,13 +889,15 @@ static int set_service_cache(struct sock *sk, u16 index, unsigned char *data,
732 } else { 889 } else {
733 clear_bit(HCI_SERVICE_CACHE, &hdev->flags); 890 clear_bit(HCI_SERVICE_CACHE, &hdev->flags);
734 err = update_class(hdev); 891 err = update_class(hdev);
892 if (err == 0)
893 err = update_eir(hdev);
735 } 894 }
736 895
737 if (err == 0) 896 if (err == 0)
738 err = cmd_complete(sk, index, MGMT_OP_SET_SERVICE_CACHE, NULL, 897 err = cmd_complete(sk, index, MGMT_OP_SET_SERVICE_CACHE, NULL,
739 0); 898 0);
740 899
741 hci_dev_unlock_bh(hdev); 900 hci_dev_unlock(hdev);
742 hci_dev_put(hdev); 901 hci_dev_put(hdev);
743 902
744 return err; 903 return err;
@@ -772,7 +931,7 @@ static int load_keys(struct sock *sk, u16 index, unsigned char *data, u16 len)
772 BT_DBG("hci%u debug_keys %u key_count %u", index, cp->debug_keys, 931 BT_DBG("hci%u debug_keys %u key_count %u", index, cp->debug_keys,
773 key_count); 932 key_count);
774 933
775 hci_dev_lock_bh(hdev); 934 hci_dev_lock(hdev);
776 935
777 hci_link_keys_clear(hdev); 936 hci_link_keys_clear(hdev);
778 937
@@ -786,11 +945,11 @@ static int load_keys(struct sock *sk, u16 index, unsigned char *data, u16 len)
786 for (i = 0; i < key_count; i++) { 945 for (i = 0; i < key_count; i++) {
787 struct mgmt_key_info *key = &cp->keys[i]; 946 struct mgmt_key_info *key = &cp->keys[i];
788 947
789 hci_add_link_key(hdev, 0, &key->bdaddr, key->val, key->type, 948 hci_add_link_key(hdev, NULL, 0, &key->bdaddr, key->val, key->type,
790 key->pin_len); 949 key->pin_len);
791 } 950 }
792 951
793 hci_dev_unlock_bh(hdev); 952 hci_dev_unlock(hdev);
794 hci_dev_put(hdev); 953 hci_dev_put(hdev);
795 954
796 return 0; 955 return 0;
@@ -812,7 +971,7 @@ static int remove_key(struct sock *sk, u16 index, unsigned char *data, u16 len)
812 if (!hdev) 971 if (!hdev)
813 return cmd_status(sk, index, MGMT_OP_REMOVE_KEY, ENODEV); 972 return cmd_status(sk, index, MGMT_OP_REMOVE_KEY, ENODEV);
814 973
815 hci_dev_lock_bh(hdev); 974 hci_dev_lock(hdev);
816 975
817 err = hci_remove_link_key(hdev, &cp->bdaddr); 976 err = hci_remove_link_key(hdev, &cp->bdaddr);
818 if (err < 0) { 977 if (err < 0) {
@@ -835,7 +994,7 @@ static int remove_key(struct sock *sk, u16 index, unsigned char *data, u16 len)
835 } 994 }
836 995
837unlock: 996unlock:
838 hci_dev_unlock_bh(hdev); 997 hci_dev_unlock(hdev);
839 hci_dev_put(hdev); 998 hci_dev_put(hdev);
840 999
841 return err; 1000 return err;
@@ -861,7 +1020,7 @@ static int disconnect(struct sock *sk, u16 index, unsigned char *data, u16 len)
861 if (!hdev) 1020 if (!hdev)
862 return cmd_status(sk, index, MGMT_OP_DISCONNECT, ENODEV); 1021 return cmd_status(sk, index, MGMT_OP_DISCONNECT, ENODEV);
863 1022
864 hci_dev_lock_bh(hdev); 1023 hci_dev_lock(hdev);
865 1024
866 if (!test_bit(HCI_UP, &hdev->flags)) { 1025 if (!test_bit(HCI_UP, &hdev->flags)) {
867 err = cmd_status(sk, index, MGMT_OP_DISCONNECT, ENETDOWN); 1026 err = cmd_status(sk, index, MGMT_OP_DISCONNECT, ENETDOWN);
@@ -874,6 +1033,9 @@ static int disconnect(struct sock *sk, u16 index, unsigned char *data, u16 len)
874 } 1033 }
875 1034
876 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 1035 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1036 if (!conn)
1037 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->bdaddr);
1038
877 if (!conn) { 1039 if (!conn) {
878 err = cmd_status(sk, index, MGMT_OP_DISCONNECT, ENOTCONN); 1040 err = cmd_status(sk, index, MGMT_OP_DISCONNECT, ENOTCONN);
879 goto failed; 1041 goto failed;
@@ -893,7 +1055,7 @@ static int disconnect(struct sock *sk, u16 index, unsigned char *data, u16 len)
893 mgmt_pending_remove(cmd); 1055 mgmt_pending_remove(cmd);
894 1056
895failed: 1057failed:
896 hci_dev_unlock_bh(hdev); 1058 hci_dev_unlock(hdev);
897 hci_dev_put(hdev); 1059 hci_dev_put(hdev);
898 1060
899 return err; 1061 return err;
@@ -914,7 +1076,7 @@ static int get_connections(struct sock *sk, u16 index)
914 if (!hdev) 1076 if (!hdev)
915 return cmd_status(sk, index, MGMT_OP_GET_CONNECTIONS, ENODEV); 1077 return cmd_status(sk, index, MGMT_OP_GET_CONNECTIONS, ENODEV);
916 1078
917 hci_dev_lock_bh(hdev); 1079 hci_dev_lock(hdev);
918 1080
919 count = 0; 1081 count = 0;
920 list_for_each(p, &hdev->conn_hash.list) { 1082 list_for_each(p, &hdev->conn_hash.list) {
@@ -945,7 +1107,7 @@ static int get_connections(struct sock *sk, u16 index)
945 1107
946unlock: 1108unlock:
947 kfree(rp); 1109 kfree(rp);
948 hci_dev_unlock_bh(hdev); 1110 hci_dev_unlock(hdev);
949 hci_dev_put(hdev); 1111 hci_dev_put(hdev);
950 return err; 1112 return err;
951} 1113}
@@ -970,7 +1132,7 @@ static int pin_code_reply(struct sock *sk, u16 index, unsigned char *data,
970 if (!hdev) 1132 if (!hdev)
971 return cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, ENODEV); 1133 return cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, ENODEV);
972 1134
973 hci_dev_lock_bh(hdev); 1135 hci_dev_lock(hdev);
974 1136
975 if (!test_bit(HCI_UP, &hdev->flags)) { 1137 if (!test_bit(HCI_UP, &hdev->flags)) {
976 err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, ENETDOWN); 1138 err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, ENETDOWN);
@@ -992,7 +1154,7 @@ static int pin_code_reply(struct sock *sk, u16 index, unsigned char *data,
992 mgmt_pending_remove(cmd); 1154 mgmt_pending_remove(cmd);
993 1155
994failed: 1156failed:
995 hci_dev_unlock_bh(hdev); 1157 hci_dev_unlock(hdev);
996 hci_dev_put(hdev); 1158 hci_dev_put(hdev);
997 1159
998 return err; 1160 return err;
@@ -1019,7 +1181,7 @@ static int pin_code_neg_reply(struct sock *sk, u16 index, unsigned char *data,
1019 return cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY, 1181 return cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY,
1020 ENODEV); 1182 ENODEV);
1021 1183
1022 hci_dev_lock_bh(hdev); 1184 hci_dev_lock(hdev);
1023 1185
1024 if (!test_bit(HCI_UP, &hdev->flags)) { 1186 if (!test_bit(HCI_UP, &hdev->flags)) {
1025 err = cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY, 1187 err = cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY,
@@ -1040,7 +1202,7 @@ static int pin_code_neg_reply(struct sock *sk, u16 index, unsigned char *data,
1040 mgmt_pending_remove(cmd); 1202 mgmt_pending_remove(cmd);
1041 1203
1042failed: 1204failed:
1043 hci_dev_unlock_bh(hdev); 1205 hci_dev_unlock(hdev);
1044 hci_dev_put(hdev); 1206 hci_dev_put(hdev);
1045 1207
1046 return err; 1208 return err;
@@ -1063,14 +1225,14 @@ static int set_io_capability(struct sock *sk, u16 index, unsigned char *data,
1063 if (!hdev) 1225 if (!hdev)
1064 return cmd_status(sk, index, MGMT_OP_SET_IO_CAPABILITY, ENODEV); 1226 return cmd_status(sk, index, MGMT_OP_SET_IO_CAPABILITY, ENODEV);
1065 1227
1066 hci_dev_lock_bh(hdev); 1228 hci_dev_lock(hdev);
1067 1229
1068 hdev->io_capability = cp->io_capability; 1230 hdev->io_capability = cp->io_capability;
1069 1231
1070 BT_DBG("%s IO capability set to 0x%02x", hdev->name, 1232 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
1071 hdev->io_capability); 1233 hdev->io_capability);
1072 1234
1073 hci_dev_unlock_bh(hdev); 1235 hci_dev_unlock(hdev);
1074 hci_dev_put(hdev); 1236 hci_dev_put(hdev);
1075 1237
1076 return cmd_complete(sk, index, MGMT_OP_SET_IO_CAPABILITY, NULL, 0); 1238 return cmd_complete(sk, index, MGMT_OP_SET_IO_CAPABILITY, NULL, 0);
@@ -1156,7 +1318,7 @@ static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len)
1156 if (!hdev) 1318 if (!hdev)
1157 return cmd_status(sk, index, MGMT_OP_PAIR_DEVICE, ENODEV); 1319 return cmd_status(sk, index, MGMT_OP_PAIR_DEVICE, ENODEV);
1158 1320
1159 hci_dev_lock_bh(hdev); 1321 hci_dev_lock(hdev);
1160 1322
1161 if (cp->io_cap == 0x03) { 1323 if (cp->io_cap == 0x03) {
1162 sec_level = BT_SECURITY_MEDIUM; 1324 sec_level = BT_SECURITY_MEDIUM;
@@ -1198,7 +1360,7 @@ static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len)
1198 err = 0; 1360 err = 0;
1199 1361
1200unlock: 1362unlock:
1201 hci_dev_unlock_bh(hdev); 1363 hci_dev_unlock(hdev);
1202 hci_dev_put(hdev); 1364 hci_dev_put(hdev);
1203 1365
1204 return err; 1366 return err;
@@ -1230,7 +1392,7 @@ static int user_confirm_reply(struct sock *sk, u16 index, unsigned char *data,
1230 if (!hdev) 1392 if (!hdev)
1231 return cmd_status(sk, index, mgmt_op, ENODEV); 1393 return cmd_status(sk, index, mgmt_op, ENODEV);
1232 1394
1233 hci_dev_lock_bh(hdev); 1395 hci_dev_lock(hdev);
1234 1396
1235 if (!test_bit(HCI_UP, &hdev->flags)) { 1397 if (!test_bit(HCI_UP, &hdev->flags)) {
1236 err = cmd_status(sk, index, mgmt_op, ENETDOWN); 1398 err = cmd_status(sk, index, mgmt_op, ENETDOWN);
@@ -1248,6 +1410,231 @@ static int user_confirm_reply(struct sock *sk, u16 index, unsigned char *data,
1248 mgmt_pending_remove(cmd); 1410 mgmt_pending_remove(cmd);
1249 1411
1250failed: 1412failed:
1413 hci_dev_unlock(hdev);
1414 hci_dev_put(hdev);
1415
1416 return err;
1417}
1418
1419static int set_local_name(struct sock *sk, u16 index, unsigned char *data,
1420 u16 len)
1421{
1422 struct mgmt_cp_set_local_name *mgmt_cp = (void *) data;
1423 struct hci_cp_write_local_name hci_cp;
1424 struct hci_dev *hdev;
1425 struct pending_cmd *cmd;
1426 int err;
1427
1428 BT_DBG("");
1429
1430 if (len != sizeof(*mgmt_cp))
1431 return cmd_status(sk, index, MGMT_OP_SET_LOCAL_NAME, EINVAL);
1432
1433 hdev = hci_dev_get(index);
1434 if (!hdev)
1435 return cmd_status(sk, index, MGMT_OP_SET_LOCAL_NAME, ENODEV);
1436
1437 hci_dev_lock(hdev);
1438
1439 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, index, data, len);
1440 if (!cmd) {
1441 err = -ENOMEM;
1442 goto failed;
1443 }
1444
1445 memcpy(hci_cp.name, mgmt_cp->name, sizeof(hci_cp.name));
1446 err = hci_send_cmd(hdev, HCI_OP_WRITE_LOCAL_NAME, sizeof(hci_cp),
1447 &hci_cp);
1448 if (err < 0)
1449 mgmt_pending_remove(cmd);
1450
1451failed:
1452 hci_dev_unlock(hdev);
1453 hci_dev_put(hdev);
1454
1455 return err;
1456}
1457
1458static int read_local_oob_data(struct sock *sk, u16 index)
1459{
1460 struct hci_dev *hdev;
1461 struct pending_cmd *cmd;
1462 int err;
1463
1464 BT_DBG("hci%u", index);
1465
1466 hdev = hci_dev_get(index);
1467 if (!hdev)
1468 return cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA,
1469 ENODEV);
1470
1471 hci_dev_lock(hdev);
1472
1473 if (!test_bit(HCI_UP, &hdev->flags)) {
1474 err = cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA,
1475 ENETDOWN);
1476 goto unlock;
1477 }
1478
1479 if (!(hdev->features[6] & LMP_SIMPLE_PAIR)) {
1480 err = cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA,
1481 EOPNOTSUPP);
1482 goto unlock;
1483 }
1484
1485 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, index)) {
1486 err = cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA, EBUSY);
1487 goto unlock;
1488 }
1489
1490 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, index, NULL, 0);
1491 if (!cmd) {
1492 err = -ENOMEM;
1493 goto unlock;
1494 }
1495
1496 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
1497 if (err < 0)
1498 mgmt_pending_remove(cmd);
1499
1500unlock:
1501 hci_dev_unlock(hdev);
1502 hci_dev_put(hdev);
1503
1504 return err;
1505}
1506
1507static int add_remote_oob_data(struct sock *sk, u16 index, unsigned char *data,
1508 u16 len)
1509{
1510 struct hci_dev *hdev;
1511 struct mgmt_cp_add_remote_oob_data *cp = (void *) data;
1512 int err;
1513
1514 BT_DBG("hci%u ", index);
1515
1516 if (len != sizeof(*cp))
1517 return cmd_status(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA,
1518 EINVAL);
1519
1520 hdev = hci_dev_get(index);
1521 if (!hdev)
1522 return cmd_status(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA,
1523 ENODEV);
1524
1525 hci_dev_lock(hdev);
1526
1527 err = hci_add_remote_oob_data(hdev, &cp->bdaddr, cp->hash,
1528 cp->randomizer);
1529 if (err < 0)
1530 err = cmd_status(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA, -err);
1531 else
1532 err = cmd_complete(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA, NULL,
1533 0);
1534
1535 hci_dev_unlock(hdev);
1536 hci_dev_put(hdev);
1537
1538 return err;
1539}
1540
1541static int remove_remote_oob_data(struct sock *sk, u16 index,
1542 unsigned char *data, u16 len)
1543{
1544 struct hci_dev *hdev;
1545 struct mgmt_cp_remove_remote_oob_data *cp = (void *) data;
1546 int err;
1547
1548 BT_DBG("hci%u ", index);
1549
1550 if (len != sizeof(*cp))
1551 return cmd_status(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
1552 EINVAL);
1553
1554 hdev = hci_dev_get(index);
1555 if (!hdev)
1556 return cmd_status(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
1557 ENODEV);
1558
1559 hci_dev_lock(hdev);
1560
1561 err = hci_remove_remote_oob_data(hdev, &cp->bdaddr);
1562 if (err < 0)
1563 err = cmd_status(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
1564 -err);
1565 else
1566 err = cmd_complete(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
1567 NULL, 0);
1568
1569 hci_dev_unlock(hdev);
1570 hci_dev_put(hdev);
1571
1572 return err;
1573}
1574
1575static int start_discovery(struct sock *sk, u16 index)
1576{
1577 u8 lap[3] = { 0x33, 0x8b, 0x9e };
1578 struct hci_cp_inquiry cp;
1579 struct pending_cmd *cmd;
1580 struct hci_dev *hdev;
1581 int err;
1582
1583 BT_DBG("hci%u", index);
1584
1585 hdev = hci_dev_get(index);
1586 if (!hdev)
1587 return cmd_status(sk, index, MGMT_OP_START_DISCOVERY, ENODEV);
1588
1589 hci_dev_lock_bh(hdev);
1590
1591 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, index, NULL, 0);
1592 if (!cmd) {
1593 err = -ENOMEM;
1594 goto failed;
1595 }
1596
1597 memset(&cp, 0, sizeof(cp));
1598 memcpy(&cp.lap, lap, 3);
1599 cp.length = 0x08;
1600 cp.num_rsp = 0x00;
1601
1602 err = hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
1603 if (err < 0)
1604 mgmt_pending_remove(cmd);
1605
1606failed:
1607 hci_dev_unlock_bh(hdev);
1608 hci_dev_put(hdev);
1609
1610 return err;
1611}
1612
1613static int stop_discovery(struct sock *sk, u16 index)
1614{
1615 struct hci_dev *hdev;
1616 struct pending_cmd *cmd;
1617 int err;
1618
1619 BT_DBG("hci%u", index);
1620
1621 hdev = hci_dev_get(index);
1622 if (!hdev)
1623 return cmd_status(sk, index, MGMT_OP_STOP_DISCOVERY, ENODEV);
1624
1625 hci_dev_lock_bh(hdev);
1626
1627 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, index, NULL, 0);
1628 if (!cmd) {
1629 err = -ENOMEM;
1630 goto failed;
1631 }
1632
1633 err = hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1634 if (err < 0)
1635 mgmt_pending_remove(cmd);
1636
1637failed:
1251 hci_dev_unlock_bh(hdev); 1638 hci_dev_unlock_bh(hdev);
1252 hci_dev_put(hdev); 1639 hci_dev_put(hdev);
1253 1640
@@ -1266,7 +1653,7 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
1266 if (msglen < sizeof(*hdr)) 1653 if (msglen < sizeof(*hdr))
1267 return -EINVAL; 1654 return -EINVAL;
1268 1655
1269 buf = kmalloc(msglen, GFP_ATOMIC); 1656 buf = kmalloc(msglen, GFP_KERNEL);
1270 if (!buf) 1657 if (!buf)
1271 return -ENOMEM; 1658 return -ENOMEM;
1272 1659
@@ -1349,6 +1736,25 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
1349 case MGMT_OP_USER_CONFIRM_NEG_REPLY: 1736 case MGMT_OP_USER_CONFIRM_NEG_REPLY:
1350 err = user_confirm_reply(sk, index, buf + sizeof(*hdr), len, 0); 1737 err = user_confirm_reply(sk, index, buf + sizeof(*hdr), len, 0);
1351 break; 1738 break;
1739 case MGMT_OP_SET_LOCAL_NAME:
1740 err = set_local_name(sk, index, buf + sizeof(*hdr), len);
1741 break;
1742 case MGMT_OP_READ_LOCAL_OOB_DATA:
1743 err = read_local_oob_data(sk, index);
1744 break;
1745 case MGMT_OP_ADD_REMOTE_OOB_DATA:
1746 err = add_remote_oob_data(sk, index, buf + sizeof(*hdr), len);
1747 break;
1748 case MGMT_OP_REMOVE_REMOTE_OOB_DATA:
1749 err = remove_remote_oob_data(sk, index, buf + sizeof(*hdr),
1750 len);
1751 break;
1752 case MGMT_OP_START_DISCOVERY:
1753 err = start_discovery(sk, index);
1754 break;
1755 case MGMT_OP_STOP_DISCOVERY:
1756 err = stop_discovery(sk, index);
1757 break;
1352 default: 1758 default:
1353 BT_DBG("Unknown op %u", opcode); 1759 BT_DBG("Unknown op %u", opcode);
1354 err = cmd_status(sk, index, opcode, 0x01); 1760 err = cmd_status(sk, index, opcode, 0x01);
@@ -1382,7 +1788,7 @@ struct cmd_lookup {
1382 1788
1383static void mode_rsp(struct pending_cmd *cmd, void *data) 1789static void mode_rsp(struct pending_cmd *cmd, void *data)
1384{ 1790{
1385 struct mgmt_mode *cp = cmd->cmd; 1791 struct mgmt_mode *cp = cmd->param;
1386 struct cmd_lookup *match = data; 1792 struct cmd_lookup *match = data;
1387 1793
1388 if (cp->val != match->val) 1794 if (cp->val != match->val)
@@ -1455,17 +1861,17 @@ int mgmt_connectable(u16 index, u8 connectable)
1455 return ret; 1861 return ret;
1456} 1862}
1457 1863
1458int mgmt_new_key(u16 index, struct link_key *key, u8 old_key_type) 1864int mgmt_new_key(u16 index, struct link_key *key, u8 persistent)
1459{ 1865{
1460 struct mgmt_ev_new_key ev; 1866 struct mgmt_ev_new_key ev;
1461 1867
1462 memset(&ev, 0, sizeof(ev)); 1868 memset(&ev, 0, sizeof(ev));
1463 1869
1870 ev.store_hint = persistent;
1464 bacpy(&ev.key.bdaddr, &key->bdaddr); 1871 bacpy(&ev.key.bdaddr, &key->bdaddr);
1465 ev.key.type = key->type; 1872 ev.key.type = key->type;
1466 memcpy(ev.key.val, key->val, 16); 1873 memcpy(ev.key.val, key->val, 16);
1467 ev.key.pin_len = key->pin_len; 1874 ev.key.pin_len = key->pin_len;
1468 ev.old_key_type = old_key_type;
1469 1875
1470 return mgmt_event(MGMT_EV_NEW_KEY, index, &ev, sizeof(ev), NULL); 1876 return mgmt_event(MGMT_EV_NEW_KEY, index, &ev, sizeof(ev), NULL);
1471} 1877}
@@ -1481,7 +1887,7 @@ int mgmt_connected(u16 index, bdaddr_t *bdaddr)
1481 1887
1482static void disconnect_rsp(struct pending_cmd *cmd, void *data) 1888static void disconnect_rsp(struct pending_cmd *cmd, void *data)
1483{ 1889{
1484 struct mgmt_cp_disconnect *cp = cmd->cmd; 1890 struct mgmt_cp_disconnect *cp = cmd->param;
1485 struct sock **sk = data; 1891 struct sock **sk = data;
1486 struct mgmt_rp_disconnect rp; 1892 struct mgmt_rp_disconnect rp;
1487 1893
@@ -1539,11 +1945,12 @@ int mgmt_connect_failed(u16 index, bdaddr_t *bdaddr, u8 status)
1539 return mgmt_event(MGMT_EV_CONNECT_FAILED, index, &ev, sizeof(ev), NULL); 1945 return mgmt_event(MGMT_EV_CONNECT_FAILED, index, &ev, sizeof(ev), NULL);
1540} 1946}
1541 1947
1542int mgmt_pin_code_request(u16 index, bdaddr_t *bdaddr) 1948int mgmt_pin_code_request(u16 index, bdaddr_t *bdaddr, u8 secure)
1543{ 1949{
1544 struct mgmt_ev_pin_code_request ev; 1950 struct mgmt_ev_pin_code_request ev;
1545 1951
1546 bacpy(&ev.bdaddr, bdaddr); 1952 bacpy(&ev.bdaddr, bdaddr);
1953 ev.secure = secure;
1547 1954
1548 return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, index, &ev, sizeof(ev), 1955 return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, index, &ev, sizeof(ev),
1549 NULL); 1956 NULL);
@@ -1591,13 +1998,15 @@ int mgmt_pin_code_neg_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status)
1591 return err; 1998 return err;
1592} 1999}
1593 2000
1594int mgmt_user_confirm_request(u16 index, bdaddr_t *bdaddr, __le32 value) 2001int mgmt_user_confirm_request(u16 index, bdaddr_t *bdaddr, __le32 value,
2002 u8 confirm_hint)
1595{ 2003{
1596 struct mgmt_ev_user_confirm_request ev; 2004 struct mgmt_ev_user_confirm_request ev;
1597 2005
1598 BT_DBG("hci%u", index); 2006 BT_DBG("hci%u", index);
1599 2007
1600 bacpy(&ev.bdaddr, bdaddr); 2008 bacpy(&ev.bdaddr, bdaddr);
2009 ev.confirm_hint = confirm_hint;
1601 put_unaligned_le32(value, &ev.value); 2010 put_unaligned_le32(value, &ev.value);
1602 2011
1603 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, index, &ev, sizeof(ev), 2012 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, index, &ev, sizeof(ev),
@@ -1645,3 +2054,110 @@ int mgmt_auth_failed(u16 index, bdaddr_t *bdaddr, u8 status)
1645 2054
1646 return mgmt_event(MGMT_EV_AUTH_FAILED, index, &ev, sizeof(ev), NULL); 2055 return mgmt_event(MGMT_EV_AUTH_FAILED, index, &ev, sizeof(ev), NULL);
1647} 2056}
2057
2058int mgmt_set_local_name_complete(u16 index, u8 *name, u8 status)
2059{
2060 struct pending_cmd *cmd;
2061 struct hci_dev *hdev;
2062 struct mgmt_cp_set_local_name ev;
2063 int err;
2064
2065 memset(&ev, 0, sizeof(ev));
2066 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
2067
2068 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, index);
2069 if (!cmd)
2070 goto send_event;
2071
2072 if (status) {
2073 err = cmd_status(cmd->sk, index, MGMT_OP_SET_LOCAL_NAME, EIO);
2074 goto failed;
2075 }
2076
2077 hdev = hci_dev_get(index);
2078 if (hdev) {
2079 hci_dev_lock_bh(hdev);
2080 update_eir(hdev);
2081 hci_dev_unlock_bh(hdev);
2082 hci_dev_put(hdev);
2083 }
2084
2085 err = cmd_complete(cmd->sk, index, MGMT_OP_SET_LOCAL_NAME, &ev,
2086 sizeof(ev));
2087 if (err < 0)
2088 goto failed;
2089
2090send_event:
2091 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, index, &ev, sizeof(ev),
2092 cmd ? cmd->sk : NULL);
2093
2094failed:
2095 if (cmd)
2096 mgmt_pending_remove(cmd);
2097 return err;
2098}
2099
2100int mgmt_read_local_oob_data_reply_complete(u16 index, u8 *hash, u8 *randomizer,
2101 u8 status)
2102{
2103 struct pending_cmd *cmd;
2104 int err;
2105
2106 BT_DBG("hci%u status %u", index, status);
2107
2108 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, index);
2109 if (!cmd)
2110 return -ENOENT;
2111
2112 if (status) {
2113 err = cmd_status(cmd->sk, index, MGMT_OP_READ_LOCAL_OOB_DATA,
2114 EIO);
2115 } else {
2116 struct mgmt_rp_read_local_oob_data rp;
2117
2118 memcpy(rp.hash, hash, sizeof(rp.hash));
2119 memcpy(rp.randomizer, randomizer, sizeof(rp.randomizer));
2120
2121 err = cmd_complete(cmd->sk, index, MGMT_OP_READ_LOCAL_OOB_DATA,
2122 &rp, sizeof(rp));
2123 }
2124
2125 mgmt_pending_remove(cmd);
2126
2127 return err;
2128}
2129
2130int mgmt_device_found(u16 index, bdaddr_t *bdaddr, u8 *dev_class, s8 rssi,
2131 u8 *eir)
2132{
2133 struct mgmt_ev_device_found ev;
2134
2135 memset(&ev, 0, sizeof(ev));
2136
2137 bacpy(&ev.bdaddr, bdaddr);
2138 memcpy(ev.dev_class, dev_class, sizeof(ev.dev_class));
2139 ev.rssi = rssi;
2140
2141 if (eir)
2142 memcpy(ev.eir, eir, sizeof(ev.eir));
2143
2144 return mgmt_event(MGMT_EV_DEVICE_FOUND, index, &ev, sizeof(ev), NULL);
2145}
2146
2147int mgmt_remote_name(u16 index, bdaddr_t *bdaddr, u8 *name)
2148{
2149 struct mgmt_ev_remote_name ev;
2150
2151 memset(&ev, 0, sizeof(ev));
2152
2153 bacpy(&ev.bdaddr, bdaddr);
2154 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
2155
2156 return mgmt_event(MGMT_EV_REMOTE_NAME, index, &ev, sizeof(ev), NULL);
2157}
2158
2159int mgmt_discovering(u16 index, u8 discovering)
2160{
2161 return mgmt_event(MGMT_EV_DISCOVERING, index, &discovering,
2162 sizeof(discovering), NULL);
2163}
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index c9973932456f..5759bb7054f7 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -232,6 +232,8 @@ static int rfcomm_l2sock_create(struct socket **sock)
232static inline int rfcomm_check_security(struct rfcomm_dlc *d) 232static inline int rfcomm_check_security(struct rfcomm_dlc *d)
233{ 233{
234 struct sock *sk = d->session->sock->sk; 234 struct sock *sk = d->session->sock->sk;
235 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
236
235 __u8 auth_type; 237 __u8 auth_type;
236 238
237 switch (d->sec_level) { 239 switch (d->sec_level) {
@@ -246,8 +248,7 @@ static inline int rfcomm_check_security(struct rfcomm_dlc *d)
246 break; 248 break;
247 } 249 }
248 250
249 return hci_conn_security(l2cap_pi(sk)->conn->hcon, d->sec_level, 251 return hci_conn_security(conn->hcon, d->sec_level, auth_type);
250 auth_type);
251} 252}
252 253
253static void rfcomm_session_timeout(unsigned long arg) 254static void rfcomm_session_timeout(unsigned long arg)
@@ -710,10 +711,10 @@ static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src,
710 /* Set L2CAP options */ 711 /* Set L2CAP options */
711 sk = sock->sk; 712 sk = sock->sk;
712 lock_sock(sk); 713 lock_sock(sk);
713 l2cap_pi(sk)->imtu = l2cap_mtu; 714 l2cap_pi(sk)->chan->imtu = l2cap_mtu;
714 l2cap_pi(sk)->sec_level = sec_level; 715 l2cap_pi(sk)->chan->sec_level = sec_level;
715 if (l2cap_ertm) 716 if (l2cap_ertm)
716 l2cap_pi(sk)->mode = L2CAP_MODE_ERTM; 717 l2cap_pi(sk)->chan->mode = L2CAP_MODE_ERTM;
717 release_sock(sk); 718 release_sock(sk);
718 719
719 s = rfcomm_session_add(sock, BT_BOUND); 720 s = rfcomm_session_add(sock, BT_BOUND);
@@ -1241,6 +1242,7 @@ static int rfcomm_recv_disc(struct rfcomm_session *s, u8 dlci)
1241void rfcomm_dlc_accept(struct rfcomm_dlc *d) 1242void rfcomm_dlc_accept(struct rfcomm_dlc *d)
1242{ 1243{
1243 struct sock *sk = d->session->sock->sk; 1244 struct sock *sk = d->session->sock->sk;
1245 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
1244 1246
1245 BT_DBG("dlc %p", d); 1247 BT_DBG("dlc %p", d);
1246 1248
@@ -1254,7 +1256,7 @@ void rfcomm_dlc_accept(struct rfcomm_dlc *d)
1254 rfcomm_dlc_unlock(d); 1256 rfcomm_dlc_unlock(d);
1255 1257
1256 if (d->role_switch) 1258 if (d->role_switch)
1257 hci_conn_switch_role(l2cap_pi(sk)->conn->hcon, 0x00); 1259 hci_conn_switch_role(conn->hcon, 0x00);
1258 1260
1259 rfcomm_send_msc(d->session, 1, d->dlci, d->v24_sig); 1261 rfcomm_send_msc(d->session, 1, d->dlci, d->v24_sig);
1260} 1262}
@@ -1890,7 +1892,8 @@ static inline void rfcomm_accept_connection(struct rfcomm_session *s)
1890 1892
1891 /* We should adjust MTU on incoming sessions. 1893 /* We should adjust MTU on incoming sessions.
1892 * L2CAP MTU minus UIH header and FCS. */ 1894 * L2CAP MTU minus UIH header and FCS. */
1893 s->mtu = min(l2cap_pi(nsock->sk)->omtu, l2cap_pi(nsock->sk)->imtu) - 5; 1895 s->mtu = min(l2cap_pi(nsock->sk)->chan->omtu,
1896 l2cap_pi(nsock->sk)->chan->imtu) - 5;
1894 1897
1895 rfcomm_schedule(); 1898 rfcomm_schedule();
1896 } else 1899 } else
@@ -1909,7 +1912,7 @@ static inline void rfcomm_check_connection(struct rfcomm_session *s)
1909 1912
1910 /* We can adjust MTU on outgoing sessions. 1913 /* We can adjust MTU on outgoing sessions.
1911 * L2CAP MTU minus UIH header and FCS. */ 1914 * L2CAP MTU minus UIH header and FCS. */
1912 s->mtu = min(l2cap_pi(sk)->omtu, l2cap_pi(sk)->imtu) - 5; 1915 s->mtu = min(l2cap_pi(sk)->chan->omtu, l2cap_pi(sk)->chan->imtu) - 5;
1913 1916
1914 rfcomm_send_sabm(s, 0); 1917 rfcomm_send_sabm(s, 0);
1915 break; 1918 break;
@@ -1992,7 +1995,7 @@ static int rfcomm_add_listener(bdaddr_t *ba)
1992 /* Set L2CAP options */ 1995 /* Set L2CAP options */
1993 sk = sock->sk; 1996 sk = sock->sk;
1994 lock_sock(sk); 1997 lock_sock(sk);
1995 l2cap_pi(sk)->imtu = l2cap_mtu; 1998 l2cap_pi(sk)->chan->imtu = l2cap_mtu;
1996 release_sock(sk); 1999 release_sock(sk);
1997 2000
1998 /* Start listening on the socket */ 2001 /* Start listening on the socket */
@@ -2093,7 +2096,7 @@ static void rfcomm_security_cfm(struct hci_conn *conn, u8 status, u8 encrypt)
2093 if (!test_and_clear_bit(RFCOMM_AUTH_PENDING, &d->flags)) 2096 if (!test_and_clear_bit(RFCOMM_AUTH_PENDING, &d->flags))
2094 continue; 2097 continue;
2095 2098
2096 if (!status) 2099 if (!status && hci_conn_check_secure(conn, d->sec_level))
2097 set_bit(RFCOMM_AUTH_ACCEPT, &d->flags); 2100 set_bit(RFCOMM_AUTH_ACCEPT, &d->flags);
2098 else 2101 else
2099 set_bit(RFCOMM_AUTH_REJECT, &d->flags); 2102 set_bit(RFCOMM_AUTH_REJECT, &d->flags);
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index 66cc1f0c3df8..1b10727ce523 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -743,6 +743,7 @@ static int rfcomm_sock_getsockopt_old(struct socket *sock, int optname, char __u
743 struct sock *sk = sock->sk; 743 struct sock *sk = sock->sk;
744 struct sock *l2cap_sk; 744 struct sock *l2cap_sk;
745 struct rfcomm_conninfo cinfo; 745 struct rfcomm_conninfo cinfo;
746 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
746 int len, err = 0; 747 int len, err = 0;
747 u32 opt; 748 u32 opt;
748 749
@@ -787,8 +788,9 @@ static int rfcomm_sock_getsockopt_old(struct socket *sock, int optname, char __u
787 788
788 l2cap_sk = rfcomm_pi(sk)->dlc->session->sock->sk; 789 l2cap_sk = rfcomm_pi(sk)->dlc->session->sock->sk;
789 790
790 cinfo.hci_handle = l2cap_pi(l2cap_sk)->conn->hcon->handle; 791 memset(&cinfo, 0, sizeof(cinfo));
791 memcpy(cinfo.dev_class, l2cap_pi(l2cap_sk)->conn->hcon->dev_class, 3); 792 cinfo.hci_handle = conn->hcon->handle;
793 memcpy(cinfo.dev_class, conn->hcon->dev_class, 3);
792 794
793 len = min_t(unsigned int, len, sizeof(cinfo)); 795 len = min_t(unsigned int, len, sizeof(cinfo));
794 if (copy_to_user(optval, (char *) &cinfo, len)) 796 if (copy_to_user(optval, (char *) &cinfo, len))
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 42fdffd1d76c..cb4fb7837e5c 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -369,6 +369,15 @@ static void __sco_sock_close(struct sock *sk)
369 369
370 case BT_CONNECTED: 370 case BT_CONNECTED:
371 case BT_CONFIG: 371 case BT_CONFIG:
372 if (sco_pi(sk)->conn) {
373 sk->sk_state = BT_DISCONN;
374 sco_sock_set_timer(sk, SCO_DISCONN_TIMEOUT);
375 hci_conn_put(sco_pi(sk)->conn->hcon);
376 sco_pi(sk)->conn->hcon = NULL;
377 } else
378 sco_chan_del(sk, ECONNRESET);
379 break;
380
372 case BT_CONNECT: 381 case BT_CONNECT:
373 case BT_DISCONN: 382 case BT_DISCONN:
374 sco_chan_del(sk, ECONNRESET); 383 sco_chan_del(sk, ECONNRESET);
@@ -819,7 +828,9 @@ static void sco_chan_del(struct sock *sk, int err)
819 conn->sk = NULL; 828 conn->sk = NULL;
820 sco_pi(sk)->conn = NULL; 829 sco_pi(sk)->conn = NULL;
821 sco_conn_unlock(conn); 830 sco_conn_unlock(conn);
822 hci_conn_put(conn->hcon); 831
832 if (conn->hcon)
833 hci_conn_put(conn->hcon);
823 } 834 }
824 835
825 sk->sk_state = BT_CLOSED; 836 sk->sk_state = BT_CLOSED;
diff --git a/net/bridge/br.c b/net/bridge/br.c
index 84bbb82599b2..f20c4fd915a8 100644
--- a/net/bridge/br.c
+++ b/net/bridge/br.c
@@ -104,3 +104,4 @@ module_init(br_init)
104module_exit(br_deinit) 104module_exit(br_deinit)
105MODULE_LICENSE("GPL"); 105MODULE_LICENSE("GPL");
106MODULE_VERSION(BR_VERSION); 106MODULE_VERSION(BR_VERSION);
107MODULE_ALIAS_RTNL_LINK("bridge");
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 21e5901186ea..32b8f9f7f79e 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -49,7 +49,9 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
49 skb_pull(skb, ETH_HLEN); 49 skb_pull(skb, ETH_HLEN);
50 50
51 rcu_read_lock(); 51 rcu_read_lock();
52 if (is_multicast_ether_addr(dest)) { 52 if (is_broadcast_ether_addr(dest))
53 br_flood_deliver(br, skb);
54 else if (is_multicast_ether_addr(dest)) {
53 if (unlikely(netpoll_tx_running(dev))) { 55 if (unlikely(netpoll_tx_running(dev))) {
54 br_flood_deliver(br, skb); 56 br_flood_deliver(br, skb);
55 goto out; 57 goto out;
@@ -74,13 +76,23 @@ out:
74 return NETDEV_TX_OK; 76 return NETDEV_TX_OK;
75} 77}
76 78
79static int br_dev_init(struct net_device *dev)
80{
81 struct net_bridge *br = netdev_priv(dev);
82
83 br->stats = alloc_percpu(struct br_cpu_netstats);
84 if (!br->stats)
85 return -ENOMEM;
86
87 return 0;
88}
89
77static int br_dev_open(struct net_device *dev) 90static int br_dev_open(struct net_device *dev)
78{ 91{
79 struct net_bridge *br = netdev_priv(dev); 92 struct net_bridge *br = netdev_priv(dev);
80 93
81 netif_carrier_off(dev); 94 netif_carrier_off(dev);
82 95 netdev_update_features(dev);
83 br_features_recompute(br);
84 netif_start_queue(dev); 96 netif_start_queue(dev);
85 br_stp_enable_bridge(br); 97 br_stp_enable_bridge(br);
86 br_multicast_open(br); 98 br_multicast_open(br);
@@ -177,48 +189,11 @@ static void br_getinfo(struct net_device *dev, struct ethtool_drvinfo *info)
177 strcpy(info->bus_info, "N/A"); 189 strcpy(info->bus_info, "N/A");
178} 190}
179 191
180static int br_set_sg(struct net_device *dev, u32 data) 192static u32 br_fix_features(struct net_device *dev, u32 features)
181{ 193{
182 struct net_bridge *br = netdev_priv(dev); 194 struct net_bridge *br = netdev_priv(dev);
183 195
184 if (data) 196 return br_features_recompute(br, features);
185 br->feature_mask |= NETIF_F_SG;
186 else
187 br->feature_mask &= ~NETIF_F_SG;
188
189 br_features_recompute(br);
190 return 0;
191}
192
193static int br_set_tso(struct net_device *dev, u32 data)
194{
195 struct net_bridge *br = netdev_priv(dev);
196
197 if (data)
198 br->feature_mask |= NETIF_F_TSO;
199 else
200 br->feature_mask &= ~NETIF_F_TSO;
201
202 br_features_recompute(br);
203 return 0;
204}
205
206static int br_set_tx_csum(struct net_device *dev, u32 data)
207{
208 struct net_bridge *br = netdev_priv(dev);
209
210 if (data)
211 br->feature_mask |= NETIF_F_NO_CSUM;
212 else
213 br->feature_mask &= ~NETIF_F_ALL_CSUM;
214
215 br_features_recompute(br);
216 return 0;
217}
218
219static int br_set_flags(struct net_device *netdev, u32 data)
220{
221 return ethtool_op_set_flags(netdev, data, ETH_FLAG_TXVLAN);
222} 197}
223 198
224#ifdef CONFIG_NET_POLL_CONTROLLER 199#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -270,6 +245,7 @@ int br_netpoll_enable(struct net_bridge_port *p)
270 goto out; 245 goto out;
271 246
272 np->dev = p->dev; 247 np->dev = p->dev;
248 strlcpy(np->dev_name, p->dev->name, IFNAMSIZ);
273 249
274 err = __netpoll_setup(np); 250 err = __netpoll_setup(np);
275 if (err) { 251 if (err) {
@@ -319,21 +295,12 @@ static int br_del_slave(struct net_device *dev, struct net_device *slave_dev)
319static const struct ethtool_ops br_ethtool_ops = { 295static const struct ethtool_ops br_ethtool_ops = {
320 .get_drvinfo = br_getinfo, 296 .get_drvinfo = br_getinfo,
321 .get_link = ethtool_op_get_link, 297 .get_link = ethtool_op_get_link,
322 .get_tx_csum = ethtool_op_get_tx_csum,
323 .set_tx_csum = br_set_tx_csum,
324 .get_sg = ethtool_op_get_sg,
325 .set_sg = br_set_sg,
326 .get_tso = ethtool_op_get_tso,
327 .set_tso = br_set_tso,
328 .get_ufo = ethtool_op_get_ufo,
329 .set_ufo = ethtool_op_set_ufo,
330 .get_flags = ethtool_op_get_flags,
331 .set_flags = br_set_flags,
332}; 298};
333 299
334static const struct net_device_ops br_netdev_ops = { 300static const struct net_device_ops br_netdev_ops = {
335 .ndo_open = br_dev_open, 301 .ndo_open = br_dev_open,
336 .ndo_stop = br_dev_stop, 302 .ndo_stop = br_dev_stop,
303 .ndo_init = br_dev_init,
337 .ndo_start_xmit = br_dev_xmit, 304 .ndo_start_xmit = br_dev_xmit,
338 .ndo_get_stats64 = br_get_stats64, 305 .ndo_get_stats64 = br_get_stats64,
339 .ndo_set_mac_address = br_set_mac_address, 306 .ndo_set_mac_address = br_set_mac_address,
@@ -347,6 +314,7 @@ static const struct net_device_ops br_netdev_ops = {
347#endif 314#endif
348 .ndo_add_slave = br_add_slave, 315 .ndo_add_slave = br_add_slave,
349 .ndo_del_slave = br_del_slave, 316 .ndo_del_slave = br_del_slave,
317 .ndo_fix_features = br_fix_features,
350}; 318};
351 319
352static void br_dev_free(struct net_device *dev) 320static void br_dev_free(struct net_device *dev)
@@ -357,18 +325,49 @@ static void br_dev_free(struct net_device *dev)
357 free_netdev(dev); 325 free_netdev(dev);
358} 326}
359 327
328static struct device_type br_type = {
329 .name = "bridge",
330};
331
360void br_dev_setup(struct net_device *dev) 332void br_dev_setup(struct net_device *dev)
361{ 333{
334 struct net_bridge *br = netdev_priv(dev);
335
362 random_ether_addr(dev->dev_addr); 336 random_ether_addr(dev->dev_addr);
363 ether_setup(dev); 337 ether_setup(dev);
364 338
365 dev->netdev_ops = &br_netdev_ops; 339 dev->netdev_ops = &br_netdev_ops;
366 dev->destructor = br_dev_free; 340 dev->destructor = br_dev_free;
367 SET_ETHTOOL_OPS(dev, &br_ethtool_ops); 341 SET_ETHTOOL_OPS(dev, &br_ethtool_ops);
342 SET_NETDEV_DEVTYPE(dev, &br_type);
368 dev->tx_queue_len = 0; 343 dev->tx_queue_len = 0;
369 dev->priv_flags = IFF_EBRIDGE; 344 dev->priv_flags = IFF_EBRIDGE;
370 345
371 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | 346 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA |
372 NETIF_F_GSO_MASK | NETIF_F_NO_CSUM | NETIF_F_LLTX | 347 NETIF_F_GSO_MASK | NETIF_F_NO_CSUM | NETIF_F_LLTX |
373 NETIF_F_NETNS_LOCAL | NETIF_F_GSO | NETIF_F_HW_VLAN_TX; 348 NETIF_F_NETNS_LOCAL | NETIF_F_HW_VLAN_TX;
349 dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA |
350 NETIF_F_GSO_MASK | NETIF_F_NO_CSUM |
351 NETIF_F_HW_VLAN_TX;
352
353 br->dev = dev;
354 spin_lock_init(&br->lock);
355 INIT_LIST_HEAD(&br->port_list);
356 spin_lock_init(&br->hash_lock);
357
358 br->bridge_id.prio[0] = 0x80;
359 br->bridge_id.prio[1] = 0x00;
360
361 memcpy(br->group_addr, br_group_address, ETH_ALEN);
362
363 br->stp_enabled = BR_NO_STP;
364 br->designated_root = br->bridge_id;
365 br->bridge_max_age = br->max_age = 20 * HZ;
366 br->bridge_hello_time = br->hello_time = 2 * HZ;
367 br->bridge_forward_delay = br->forward_delay = 15 * HZ;
368 br->ageing_time = 300 * HZ;
369
370 br_netfilter_rtable_init(br);
371 br_stp_timer_init(br);
372 br_multicast_init(br);
374} 373}
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index cc4d3c5ab1c6..e0dfbc151dd7 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -28,6 +28,7 @@
28static struct kmem_cache *br_fdb_cache __read_mostly; 28static struct kmem_cache *br_fdb_cache __read_mostly;
29static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source, 29static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
30 const unsigned char *addr); 30 const unsigned char *addr);
31static void fdb_notify(const struct net_bridge_fdb_entry *, int);
31 32
32static u32 fdb_salt __read_mostly; 33static u32 fdb_salt __read_mostly;
33 34
@@ -62,7 +63,7 @@ static inline int has_expired(const struct net_bridge *br,
62 const struct net_bridge_fdb_entry *fdb) 63 const struct net_bridge_fdb_entry *fdb)
63{ 64{
64 return !fdb->is_static && 65 return !fdb->is_static &&
65 time_before_eq(fdb->ageing_timer + hold_time(br), jiffies); 66 time_before_eq(fdb->updated + hold_time(br), jiffies);
66} 67}
67 68
68static inline int br_mac_hash(const unsigned char *mac) 69static inline int br_mac_hash(const unsigned char *mac)
@@ -81,6 +82,7 @@ static void fdb_rcu_free(struct rcu_head *head)
81 82
82static inline void fdb_delete(struct net_bridge_fdb_entry *f) 83static inline void fdb_delete(struct net_bridge_fdb_entry *f)
83{ 84{
85 fdb_notify(f, RTM_DELNEIGH);
84 hlist_del_rcu(&f->hlist); 86 hlist_del_rcu(&f->hlist);
85 call_rcu(&f->rcu, fdb_rcu_free); 87 call_rcu(&f->rcu, fdb_rcu_free);
86} 88}
@@ -140,7 +142,7 @@ void br_fdb_cleanup(unsigned long _data)
140 unsigned long this_timer; 142 unsigned long this_timer;
141 if (f->is_static) 143 if (f->is_static)
142 continue; 144 continue;
143 this_timer = f->ageing_timer + delay; 145 this_timer = f->updated + delay;
144 if (time_before_eq(this_timer, jiffies)) 146 if (time_before_eq(this_timer, jiffies))
145 fdb_delete(f); 147 fdb_delete(f);
146 else if (time_before(this_timer, next_timer)) 148 else if (time_before(this_timer, next_timer))
@@ -293,7 +295,7 @@ int br_fdb_fillbuf(struct net_bridge *br, void *buf,
293 295
294 fe->is_local = f->is_local; 296 fe->is_local = f->is_local;
295 if (!f->is_static) 297 if (!f->is_static)
296 fe->ageing_timer_value = jiffies_to_clock_t(jiffies - f->ageing_timer); 298 fe->ageing_timer_value = jiffies_to_clock_t(jiffies - f->updated);
297 ++fe; 299 ++fe;
298 ++num; 300 ++num;
299 } 301 }
@@ -305,8 +307,21 @@ int br_fdb_fillbuf(struct net_bridge *br, void *buf,
305 return num; 307 return num;
306} 308}
307 309
308static inline struct net_bridge_fdb_entry *fdb_find(struct hlist_head *head, 310static struct net_bridge_fdb_entry *fdb_find(struct hlist_head *head,
309 const unsigned char *addr) 311 const unsigned char *addr)
312{
313 struct hlist_node *h;
314 struct net_bridge_fdb_entry *fdb;
315
316 hlist_for_each_entry(fdb, h, head, hlist) {
317 if (!compare_ether_addr(fdb->addr.addr, addr))
318 return fdb;
319 }
320 return NULL;
321}
322
323static struct net_bridge_fdb_entry *fdb_find_rcu(struct hlist_head *head,
324 const unsigned char *addr)
310{ 325{
311 struct hlist_node *h; 326 struct hlist_node *h;
312 struct net_bridge_fdb_entry *fdb; 327 struct net_bridge_fdb_entry *fdb;
@@ -320,8 +335,7 @@ static inline struct net_bridge_fdb_entry *fdb_find(struct hlist_head *head,
320 335
321static struct net_bridge_fdb_entry *fdb_create(struct hlist_head *head, 336static struct net_bridge_fdb_entry *fdb_create(struct hlist_head *head,
322 struct net_bridge_port *source, 337 struct net_bridge_port *source,
323 const unsigned char *addr, 338 const unsigned char *addr)
324 int is_local)
325{ 339{
326 struct net_bridge_fdb_entry *fdb; 340 struct net_bridge_fdb_entry *fdb;
327 341
@@ -329,11 +343,11 @@ static struct net_bridge_fdb_entry *fdb_create(struct hlist_head *head,
329 if (fdb) { 343 if (fdb) {
330 memcpy(fdb->addr.addr, addr, ETH_ALEN); 344 memcpy(fdb->addr.addr, addr, ETH_ALEN);
331 fdb->dst = source; 345 fdb->dst = source;
332 fdb->is_local = is_local; 346 fdb->is_local = 0;
333 fdb->is_static = is_local; 347 fdb->is_static = 0;
334 fdb->ageing_timer = jiffies; 348 fdb->updated = fdb->used = jiffies;
335
336 hlist_add_head_rcu(&fdb->hlist, head); 349 hlist_add_head_rcu(&fdb->hlist, head);
350 fdb_notify(fdb, RTM_NEWNEIGH);
337 } 351 }
338 return fdb; 352 return fdb;
339} 353}
@@ -360,12 +374,15 @@ static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
360 fdb_delete(fdb); 374 fdb_delete(fdb);
361 } 375 }
362 376
363 if (!fdb_create(head, source, addr, 1)) 377 fdb = fdb_create(head, source, addr);
378 if (!fdb)
364 return -ENOMEM; 379 return -ENOMEM;
365 380
381 fdb->is_local = fdb->is_static = 1;
366 return 0; 382 return 0;
367} 383}
368 384
385/* Add entry for local address of interface */
369int br_fdb_insert(struct net_bridge *br, struct net_bridge_port *source, 386int br_fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
370 const unsigned char *addr) 387 const unsigned char *addr)
371{ 388{
@@ -392,7 +409,7 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
392 source->state == BR_STATE_FORWARDING)) 409 source->state == BR_STATE_FORWARDING))
393 return; 410 return;
394 411
395 fdb = fdb_find(head, addr); 412 fdb = fdb_find_rcu(head, addr);
396 if (likely(fdb)) { 413 if (likely(fdb)) {
397 /* attempt to update an entry for a local interface */ 414 /* attempt to update an entry for a local interface */
398 if (unlikely(fdb->is_local)) { 415 if (unlikely(fdb->is_local)) {
@@ -403,15 +420,277 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
403 } else { 420 } else {
404 /* fastpath: update of existing entry */ 421 /* fastpath: update of existing entry */
405 fdb->dst = source; 422 fdb->dst = source;
406 fdb->ageing_timer = jiffies; 423 fdb->updated = jiffies;
407 } 424 }
408 } else { 425 } else {
409 spin_lock(&br->hash_lock); 426 spin_lock(&br->hash_lock);
410 if (!fdb_find(head, addr)) 427 if (likely(!fdb_find(head, addr)))
411 fdb_create(head, source, addr, 0); 428 fdb_create(head, source, addr);
429
412 /* else we lose race and someone else inserts 430 /* else we lose race and someone else inserts
413 * it first, don't bother updating 431 * it first, don't bother updating
414 */ 432 */
415 spin_unlock(&br->hash_lock); 433 spin_unlock(&br->hash_lock);
416 } 434 }
417} 435}
436
437static int fdb_to_nud(const struct net_bridge_fdb_entry *fdb)
438{
439 if (fdb->is_local)
440 return NUD_PERMANENT;
441 else if (fdb->is_static)
442 return NUD_NOARP;
443 else if (has_expired(fdb->dst->br, fdb))
444 return NUD_STALE;
445 else
446 return NUD_REACHABLE;
447}
448
449static int fdb_fill_info(struct sk_buff *skb,
450 const struct net_bridge_fdb_entry *fdb,
451 u32 pid, u32 seq, int type, unsigned int flags)
452{
453 unsigned long now = jiffies;
454 struct nda_cacheinfo ci;
455 struct nlmsghdr *nlh;
456 struct ndmsg *ndm;
457
458 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
459 if (nlh == NULL)
460 return -EMSGSIZE;
461
462
463 ndm = nlmsg_data(nlh);
464 ndm->ndm_family = AF_BRIDGE;
465 ndm->ndm_pad1 = 0;
466 ndm->ndm_pad2 = 0;
467 ndm->ndm_flags = 0;
468 ndm->ndm_type = 0;
469 ndm->ndm_ifindex = fdb->dst->dev->ifindex;
470 ndm->ndm_state = fdb_to_nud(fdb);
471
472 NLA_PUT(skb, NDA_LLADDR, ETH_ALEN, &fdb->addr);
473
474 ci.ndm_used = jiffies_to_clock_t(now - fdb->used);
475 ci.ndm_confirmed = 0;
476 ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated);
477 ci.ndm_refcnt = 0;
478 NLA_PUT(skb, NDA_CACHEINFO, sizeof(ci), &ci);
479
480 return nlmsg_end(skb, nlh);
481
482nla_put_failure:
483 nlmsg_cancel(skb, nlh);
484 return -EMSGSIZE;
485}
486
487static inline size_t fdb_nlmsg_size(void)
488{
489 return NLMSG_ALIGN(sizeof(struct ndmsg))
490 + nla_total_size(ETH_ALEN) /* NDA_LLADDR */
491 + nla_total_size(sizeof(struct nda_cacheinfo));
492}
493
494static void fdb_notify(const struct net_bridge_fdb_entry *fdb, int type)
495{
496 struct net *net = dev_net(fdb->dst->dev);
497 struct sk_buff *skb;
498 int err = -ENOBUFS;
499
500 skb = nlmsg_new(fdb_nlmsg_size(), GFP_ATOMIC);
501 if (skb == NULL)
502 goto errout;
503
504 err = fdb_fill_info(skb, fdb, 0, 0, type, 0);
505 if (err < 0) {
506 /* -EMSGSIZE implies BUG in fdb_nlmsg_size() */
507 WARN_ON(err == -EMSGSIZE);
508 kfree_skb(skb);
509 goto errout;
510 }
511 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
512 return;
513errout:
514 if (err < 0)
515 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
516}
517
518/* Dump information about entries, in response to GETNEIGH */
519int br_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
520{
521 struct net *net = sock_net(skb->sk);
522 struct net_device *dev;
523 int idx = 0;
524
525 rcu_read_lock();
526 for_each_netdev_rcu(net, dev) {
527 struct net_bridge *br = netdev_priv(dev);
528 int i;
529
530 if (!(dev->priv_flags & IFF_EBRIDGE))
531 continue;
532
533 for (i = 0; i < BR_HASH_SIZE; i++) {
534 struct hlist_node *h;
535 struct net_bridge_fdb_entry *f;
536
537 hlist_for_each_entry_rcu(f, h, &br->hash[i], hlist) {
538 if (idx < cb->args[0])
539 goto skip;
540
541 if (fdb_fill_info(skb, f,
542 NETLINK_CB(cb->skb).pid,
543 cb->nlh->nlmsg_seq,
544 RTM_NEWNEIGH,
545 NLM_F_MULTI) < 0)
546 break;
547skip:
548 ++idx;
549 }
550 }
551 }
552 rcu_read_unlock();
553
554 cb->args[0] = idx;
555
556 return skb->len;
557}
558
559/* Create new static fdb entry */
560static int fdb_add_entry(struct net_bridge_port *source, const __u8 *addr,
561 __u16 state)
562{
563 struct net_bridge *br = source->br;
564 struct hlist_head *head = &br->hash[br_mac_hash(addr)];
565 struct net_bridge_fdb_entry *fdb;
566
567 fdb = fdb_find(head, addr);
568 if (fdb)
569 return -EEXIST;
570
571 fdb = fdb_create(head, source, addr);
572 if (!fdb)
573 return -ENOMEM;
574
575 if (state & NUD_PERMANENT)
576 fdb->is_local = fdb->is_static = 1;
577 else if (state & NUD_NOARP)
578 fdb->is_static = 1;
579 return 0;
580}
581
582/* Add new permanent fdb entry with RTM_NEWNEIGH */
583int br_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
584{
585 struct net *net = sock_net(skb->sk);
586 struct ndmsg *ndm;
587 struct nlattr *tb[NDA_MAX+1];
588 struct net_device *dev;
589 struct net_bridge_port *p;
590 const __u8 *addr;
591 int err;
592
593 ASSERT_RTNL();
594 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
595 if (err < 0)
596 return err;
597
598 ndm = nlmsg_data(nlh);
599 if (ndm->ndm_ifindex == 0) {
600 pr_info("bridge: RTM_NEWNEIGH with invalid ifindex\n");
601 return -EINVAL;
602 }
603
604 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
605 if (dev == NULL) {
606 pr_info("bridge: RTM_NEWNEIGH with unknown ifindex\n");
607 return -ENODEV;
608 }
609
610 if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
611 pr_info("bridge: RTM_NEWNEIGH with invalid address\n");
612 return -EINVAL;
613 }
614
615 addr = nla_data(tb[NDA_LLADDR]);
616 if (!is_valid_ether_addr(addr)) {
617 pr_info("bridge: RTM_NEWNEIGH with invalid ether address\n");
618 return -EINVAL;
619 }
620
621 p = br_port_get_rtnl(dev);
622 if (p == NULL) {
623 pr_info("bridge: RTM_NEWNEIGH %s not a bridge port\n",
624 dev->name);
625 return -EINVAL;
626 }
627
628 spin_lock_bh(&p->br->hash_lock);
629 err = fdb_add_entry(p, addr, ndm->ndm_state);
630 spin_unlock_bh(&p->br->hash_lock);
631
632 return err;
633}
634
635static int fdb_delete_by_addr(struct net_bridge_port *p, const u8 *addr)
636{
637 struct net_bridge *br = p->br;
638 struct hlist_head *head = &br->hash[br_mac_hash(addr)];
639 struct net_bridge_fdb_entry *fdb;
640
641 fdb = fdb_find(head, addr);
642 if (!fdb)
643 return -ENOENT;
644
645 fdb_delete(fdb);
646 return 0;
647}
648
649/* Remove neighbor entry with RTM_DELNEIGH */
650int br_fdb_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
651{
652 struct net *net = sock_net(skb->sk);
653 struct ndmsg *ndm;
654 struct net_bridge_port *p;
655 struct nlattr *llattr;
656 const __u8 *addr;
657 struct net_device *dev;
658 int err;
659
660 ASSERT_RTNL();
661 if (nlmsg_len(nlh) < sizeof(*ndm))
662 return -EINVAL;
663
664 ndm = nlmsg_data(nlh);
665 if (ndm->ndm_ifindex == 0) {
666 pr_info("bridge: RTM_DELNEIGH with invalid ifindex\n");
667 return -EINVAL;
668 }
669
670 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
671 if (dev == NULL) {
672 pr_info("bridge: RTM_DELNEIGH with unknown ifindex\n");
673 return -ENODEV;
674 }
675
676 llattr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_LLADDR);
677 if (llattr == NULL || nla_len(llattr) != ETH_ALEN) {
678 pr_info("bridge: RTM_DELNEIGH with invalid address\n");
679 return -EINVAL;
680 }
681
682 addr = nla_data(llattr);
683
684 p = br_port_get_rtnl(dev);
685 if (p == NULL) {
686 pr_info("bridge: RTM_DELNEIGH %s not a bridge port\n",
687 dev->name);
688 return -EINVAL;
689 }
690
691 spin_lock_bh(&p->br->hash_lock);
692 err = fdb_delete_by_addr(p, addr);
693 spin_unlock_bh(&p->br->hash_lock);
694
695 return err;
696}
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 718b60366dfe..1bacca4cb676 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -36,8 +36,8 @@ static int port_cost(struct net_device *dev)
36 if (dev->ethtool_ops && dev->ethtool_ops->get_settings) { 36 if (dev->ethtool_ops && dev->ethtool_ops->get_settings) {
37 struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET, }; 37 struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET, };
38 38
39 if (!dev->ethtool_ops->get_settings(dev, &ecmd)) { 39 if (!dev_ethtool_get_settings(dev, &ecmd)) {
40 switch(ecmd.speed) { 40 switch (ethtool_cmd_speed(&ecmd)) {
41 case SPEED_10000: 41 case SPEED_10000:
42 return 2; 42 return 2;
43 case SPEED_1000: 43 case SPEED_1000:
@@ -147,6 +147,7 @@ static void del_nbp(struct net_bridge_port *p)
147 dev->priv_flags &= ~IFF_BRIDGE_PORT; 147 dev->priv_flags &= ~IFF_BRIDGE_PORT;
148 148
149 netdev_rx_handler_unregister(dev); 149 netdev_rx_handler_unregister(dev);
150 synchronize_net();
150 151
151 netdev_set_master(dev, NULL); 152 netdev_set_master(dev, NULL);
152 153
@@ -175,56 +176,6 @@ static void del_br(struct net_bridge *br, struct list_head *head)
175 unregister_netdevice_queue(br->dev, head); 176 unregister_netdevice_queue(br->dev, head);
176} 177}
177 178
178static struct net_device *new_bridge_dev(struct net *net, const char *name)
179{
180 struct net_bridge *br;
181 struct net_device *dev;
182
183 dev = alloc_netdev(sizeof(struct net_bridge), name,
184 br_dev_setup);
185
186 if (!dev)
187 return NULL;
188 dev_net_set(dev, net);
189
190 br = netdev_priv(dev);
191 br->dev = dev;
192
193 br->stats = alloc_percpu(struct br_cpu_netstats);
194 if (!br->stats) {
195 free_netdev(dev);
196 return NULL;
197 }
198
199 spin_lock_init(&br->lock);
200 INIT_LIST_HEAD(&br->port_list);
201 spin_lock_init(&br->hash_lock);
202
203 br->bridge_id.prio[0] = 0x80;
204 br->bridge_id.prio[1] = 0x00;
205
206 memcpy(br->group_addr, br_group_address, ETH_ALEN);
207
208 br->feature_mask = dev->features;
209 br->stp_enabled = BR_NO_STP;
210 br->designated_root = br->bridge_id;
211 br->root_path_cost = 0;
212 br->root_port = 0;
213 br->bridge_max_age = br->max_age = 20 * HZ;
214 br->bridge_hello_time = br->hello_time = 2 * HZ;
215 br->bridge_forward_delay = br->forward_delay = 15 * HZ;
216 br->topology_change = 0;
217 br->topology_change_detected = 0;
218 br->ageing_time = 300 * HZ;
219
220 br_netfilter_rtable_init(br);
221
222 br_stp_timer_init(br);
223 br_multicast_init(br);
224
225 return dev;
226}
227
228/* find an available port number */ 179/* find an available port number */
229static int find_portno(struct net_bridge *br) 180static int find_portno(struct net_bridge *br)
230{ 181{
@@ -277,42 +228,19 @@ static struct net_bridge_port *new_nbp(struct net_bridge *br,
277 return p; 228 return p;
278} 229}
279 230
280static struct device_type br_type = {
281 .name = "bridge",
282};
283
284int br_add_bridge(struct net *net, const char *name) 231int br_add_bridge(struct net *net, const char *name)
285{ 232{
286 struct net_device *dev; 233 struct net_device *dev;
287 int ret;
288 234
289 dev = new_bridge_dev(net, name); 235 dev = alloc_netdev(sizeof(struct net_bridge), name,
236 br_dev_setup);
237
290 if (!dev) 238 if (!dev)
291 return -ENOMEM; 239 return -ENOMEM;
292 240
293 rtnl_lock(); 241 dev_net_set(dev, net);
294 if (strchr(dev->name, '%')) {
295 ret = dev_alloc_name(dev, dev->name);
296 if (ret < 0)
297 goto out_free;
298 }
299
300 SET_NETDEV_DEVTYPE(dev, &br_type);
301
302 ret = register_netdevice(dev);
303 if (ret)
304 goto out_free;
305
306 ret = br_sysfs_addbr(dev);
307 if (ret)
308 unregister_netdevice(dev);
309 out:
310 rtnl_unlock();
311 return ret;
312 242
313out_free: 243 return register_netdev(dev);
314 free_netdev(dev);
315 goto out;
316} 244}
317 245
318int br_del_bridge(struct net *net, const char *name) 246int br_del_bridge(struct net *net, const char *name)
@@ -364,15 +292,15 @@ int br_min_mtu(const struct net_bridge *br)
364/* 292/*
365 * Recomputes features using slave's features 293 * Recomputes features using slave's features
366 */ 294 */
367void br_features_recompute(struct net_bridge *br) 295u32 br_features_recompute(struct net_bridge *br, u32 features)
368{ 296{
369 struct net_bridge_port *p; 297 struct net_bridge_port *p;
370 u32 features, mask; 298 u32 mask;
371 299
372 features = mask = br->feature_mask;
373 if (list_empty(&br->port_list)) 300 if (list_empty(&br->port_list))
374 goto done; 301 return features;
375 302
303 mask = features;
376 features &= ~NETIF_F_ONE_FOR_ALL; 304 features &= ~NETIF_F_ONE_FOR_ALL;
377 305
378 list_for_each_entry(p, &br->port_list, list) { 306 list_for_each_entry(p, &br->port_list, list) {
@@ -380,8 +308,7 @@ void br_features_recompute(struct net_bridge *br)
380 p->dev->features, mask); 308 p->dev->features, mask);
381 } 309 }
382 310
383done: 311 return features;
384 br->dev->features = netdev_fix_features(br->dev, features);
385} 312}
386 313
387/* called with RTNL */ 314/* called with RTNL */
@@ -412,6 +339,8 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
412 if (IS_ERR(p)) 339 if (IS_ERR(p))
413 return PTR_ERR(p); 340 return PTR_ERR(p);
414 341
342 call_netdevice_notifiers(NETDEV_JOIN, dev);
343
415 err = dev_set_promiscuity(dev, 1); 344 err = dev_set_promiscuity(dev, 1);
416 if (err) 345 if (err)
417 goto put_back; 346 goto put_back;
@@ -446,9 +375,10 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
446 375
447 list_add_rcu(&p->list, &br->port_list); 376 list_add_rcu(&p->list, &br->port_list);
448 377
378 netdev_update_features(br->dev);
379
449 spin_lock_bh(&br->lock); 380 spin_lock_bh(&br->lock);
450 changed_addr = br_stp_recalculate_bridge_id(br); 381 changed_addr = br_stp_recalculate_bridge_id(br);
451 br_features_recompute(br);
452 382
453 if ((dev->flags & IFF_UP) && netif_carrier_ok(dev) && 383 if ((dev->flags & IFF_UP) && netif_carrier_ok(dev) &&
454 (br->dev->flags & IFF_UP)) 384 (br->dev->flags & IFF_UP))
@@ -496,9 +426,10 @@ int br_del_if(struct net_bridge *br, struct net_device *dev)
496 426
497 spin_lock_bh(&br->lock); 427 spin_lock_bh(&br->lock);
498 br_stp_recalculate_bridge_id(br); 428 br_stp_recalculate_bridge_id(br);
499 br_features_recompute(br);
500 spin_unlock_bh(&br->lock); 429 spin_unlock_bh(&br->lock);
501 430
431 netdev_update_features(br->dev);
432
502 return 0; 433 return 0;
503} 434}
504 435
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 0c7badad62af..f06ee39c73fd 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -60,7 +60,7 @@ int br_handle_frame_finish(struct sk_buff *skb)
60 br = p->br; 60 br = p->br;
61 br_fdb_update(br, p, eth_hdr(skb)->h_source); 61 br_fdb_update(br, p, eth_hdr(skb)->h_source);
62 62
63 if (is_multicast_ether_addr(dest) && 63 if (!is_broadcast_ether_addr(dest) && is_multicast_ether_addr(dest) &&
64 br_multicast_rcv(br, p, skb)) 64 br_multicast_rcv(br, p, skb))
65 goto drop; 65 goto drop;
66 66
@@ -77,7 +77,9 @@ int br_handle_frame_finish(struct sk_buff *skb)
77 77
78 dst = NULL; 78 dst = NULL;
79 79
80 if (is_multicast_ether_addr(dest)) { 80 if (is_broadcast_ether_addr(dest))
81 skb2 = skb;
82 else if (is_multicast_ether_addr(dest)) {
81 mdst = br_mdb_get(br, skb); 83 mdst = br_mdb_get(br, skb);
82 if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) { 84 if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) {
83 if ((mdst && mdst->mglist) || 85 if ((mdst && mdst->mglist) ||
@@ -98,9 +100,10 @@ int br_handle_frame_finish(struct sk_buff *skb)
98 } 100 }
99 101
100 if (skb) { 102 if (skb) {
101 if (dst) 103 if (dst) {
104 dst->used = jiffies;
102 br_forward(dst->dst, skb, skb2); 105 br_forward(dst->dst, skb, skb2);
103 else 106 } else
104 br_flood_forward(br, skb, skb2); 107 br_flood_forward(br, skb, skb2);
105 } 108 }
106 109
diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c
index 3d9fca0e3370..7222fe1d5460 100644
--- a/net/bridge/br_ioctl.c
+++ b/net/bridge/br_ioctl.c
@@ -181,40 +181,19 @@ static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
181 if (!capable(CAP_NET_ADMIN)) 181 if (!capable(CAP_NET_ADMIN))
182 return -EPERM; 182 return -EPERM;
183 183
184 spin_lock_bh(&br->lock); 184 return br_set_forward_delay(br, args[1]);
185 br->bridge_forward_delay = clock_t_to_jiffies(args[1]);
186 if (br_is_root_bridge(br))
187 br->forward_delay = br->bridge_forward_delay;
188 spin_unlock_bh(&br->lock);
189 return 0;
190 185
191 case BRCTL_SET_BRIDGE_HELLO_TIME: 186 case BRCTL_SET_BRIDGE_HELLO_TIME:
192 {
193 unsigned long t = clock_t_to_jiffies(args[1]);
194 if (!capable(CAP_NET_ADMIN)) 187 if (!capable(CAP_NET_ADMIN))
195 return -EPERM; 188 return -EPERM;
196 189
197 if (t < HZ) 190 return br_set_hello_time(br, args[1]);
198 return -EINVAL;
199
200 spin_lock_bh(&br->lock);
201 br->bridge_hello_time = t;
202 if (br_is_root_bridge(br))
203 br->hello_time = br->bridge_hello_time;
204 spin_unlock_bh(&br->lock);
205 return 0;
206 }
207 191
208 case BRCTL_SET_BRIDGE_MAX_AGE: 192 case BRCTL_SET_BRIDGE_MAX_AGE:
209 if (!capable(CAP_NET_ADMIN)) 193 if (!capable(CAP_NET_ADMIN))
210 return -EPERM; 194 return -EPERM;
211 195
212 spin_lock_bh(&br->lock); 196 return br_set_max_age(br, args[1]);
213 br->bridge_max_age = clock_t_to_jiffies(args[1]);
214 if (br_is_root_bridge(br))
215 br->max_age = br->bridge_max_age;
216 spin_unlock_bh(&br->lock);
217 return 0;
218 197
219 case BRCTL_SET_AGEING_TIME: 198 case BRCTL_SET_AGEING_TIME:
220 if (!capable(CAP_NET_ADMIN)) 199 if (!capable(CAP_NET_ADMIN))
@@ -275,19 +254,16 @@ static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
275 case BRCTL_SET_PORT_PRIORITY: 254 case BRCTL_SET_PORT_PRIORITY:
276 { 255 {
277 struct net_bridge_port *p; 256 struct net_bridge_port *p;
278 int ret = 0; 257 int ret;
279 258
280 if (!capable(CAP_NET_ADMIN)) 259 if (!capable(CAP_NET_ADMIN))
281 return -EPERM; 260 return -EPERM;
282 261
283 if (args[2] >= (1<<(16-BR_PORT_BITS)))
284 return -ERANGE;
285
286 spin_lock_bh(&br->lock); 262 spin_lock_bh(&br->lock);
287 if ((p = br_get_port(br, args[1])) == NULL) 263 if ((p = br_get_port(br, args[1])) == NULL)
288 ret = -EINVAL; 264 ret = -EINVAL;
289 else 265 else
290 br_stp_set_port_priority(p, args[2]); 266 ret = br_stp_set_port_priority(p, args[2]);
291 spin_unlock_bh(&br->lock); 267 spin_unlock_bh(&br->lock);
292 return ret; 268 return ret;
293 } 269 }
@@ -295,15 +271,17 @@ static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
295 case BRCTL_SET_PATH_COST: 271 case BRCTL_SET_PATH_COST:
296 { 272 {
297 struct net_bridge_port *p; 273 struct net_bridge_port *p;
298 int ret = 0; 274 int ret;
299 275
300 if (!capable(CAP_NET_ADMIN)) 276 if (!capable(CAP_NET_ADMIN))
301 return -EPERM; 277 return -EPERM;
302 278
279 spin_lock_bh(&br->lock);
303 if ((p = br_get_port(br, args[1])) == NULL) 280 if ((p = br_get_port(br, args[1])) == NULL)
304 ret = -EINVAL; 281 ret = -EINVAL;
305 else 282 else
306 br_stp_set_path_cost(p, args[2]); 283 ret = br_stp_set_path_cost(p, args[2]);
284 spin_unlock_bh(&br->lock);
307 285
308 return ret; 286 return ret;
309 } 287 }
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 59660c909a7c..2d85ca7111d3 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -413,7 +413,7 @@ out:
413 413
414#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 414#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
415static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br, 415static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
416 struct in6_addr *group) 416 const struct in6_addr *group)
417{ 417{
418 struct sk_buff *skb; 418 struct sk_buff *skb;
419 struct ipv6hdr *ip6h; 419 struct ipv6hdr *ip6h;
@@ -1115,7 +1115,7 @@ static int br_ip4_multicast_query(struct net_bridge *br,
1115 struct net_bridge_port *port, 1115 struct net_bridge_port *port,
1116 struct sk_buff *skb) 1116 struct sk_buff *skb)
1117{ 1117{
1118 struct iphdr *iph = ip_hdr(skb); 1118 const struct iphdr *iph = ip_hdr(skb);
1119 struct igmphdr *ih = igmp_hdr(skb); 1119 struct igmphdr *ih = igmp_hdr(skb);
1120 struct net_bridge_mdb_entry *mp; 1120 struct net_bridge_mdb_entry *mp;
1121 struct igmpv3_query *ih3; 1121 struct igmpv3_query *ih3;
@@ -1190,7 +1190,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
1190 struct net_bridge_port *port, 1190 struct net_bridge_port *port,
1191 struct sk_buff *skb) 1191 struct sk_buff *skb)
1192{ 1192{
1193 struct ipv6hdr *ip6h = ipv6_hdr(skb); 1193 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
1194 struct mld_msg *mld = (struct mld_msg *) icmp6_hdr(skb); 1194 struct mld_msg *mld = (struct mld_msg *) icmp6_hdr(skb);
1195 struct net_bridge_mdb_entry *mp; 1195 struct net_bridge_mdb_entry *mp;
1196 struct mld2_query *mld2q; 1196 struct mld2_query *mld2q;
@@ -1198,7 +1198,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
1198 struct net_bridge_port_group __rcu **pp; 1198 struct net_bridge_port_group __rcu **pp;
1199 unsigned long max_delay; 1199 unsigned long max_delay;
1200 unsigned long now = jiffies; 1200 unsigned long now = jiffies;
1201 struct in6_addr *group = NULL; 1201 const struct in6_addr *group = NULL;
1202 int err = 0; 1202 int err = 0;
1203 1203
1204 spin_lock(&br->multicast_lock); 1204 spin_lock(&br->multicast_lock);
@@ -1356,7 +1356,7 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
1356 struct sk_buff *skb) 1356 struct sk_buff *skb)
1357{ 1357{
1358 struct sk_buff *skb2 = skb; 1358 struct sk_buff *skb2 = skb;
1359 struct iphdr *iph; 1359 const struct iphdr *iph;
1360 struct igmphdr *ih; 1360 struct igmphdr *ih;
1361 unsigned len; 1361 unsigned len;
1362 unsigned offset; 1362 unsigned offset;
@@ -1379,8 +1379,11 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
1379 if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl))) 1379 if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
1380 return -EINVAL; 1380 return -EINVAL;
1381 1381
1382 if (iph->protocol != IPPROTO_IGMP) 1382 if (iph->protocol != IPPROTO_IGMP) {
1383 if ((iph->daddr & IGMP_LOCAL_GROUP_MASK) != IGMP_LOCAL_GROUP)
1384 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1383 return 0; 1385 return 0;
1386 }
1384 1387
1385 len = ntohs(iph->tot_len); 1388 len = ntohs(iph->tot_len);
1386 if (skb->len < len || len < ip_hdrlen(skb)) 1389 if (skb->len < len || len < ip_hdrlen(skb))
@@ -1424,7 +1427,7 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
1424 switch (ih->type) { 1427 switch (ih->type) {
1425 case IGMP_HOST_MEMBERSHIP_REPORT: 1428 case IGMP_HOST_MEMBERSHIP_REPORT:
1426 case IGMPV2_HOST_MEMBERSHIP_REPORT: 1429 case IGMPV2_HOST_MEMBERSHIP_REPORT:
1427 BR_INPUT_SKB_CB(skb2)->mrouters_only = 1; 1430 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1428 err = br_ip4_multicast_add_group(br, port, ih->group); 1431 err = br_ip4_multicast_add_group(br, port, ih->group);
1429 break; 1432 break;
1430 case IGMPV3_HOST_MEMBERSHIP_REPORT: 1433 case IGMPV3_HOST_MEMBERSHIP_REPORT:
@@ -1452,7 +1455,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
1452 struct sk_buff *skb) 1455 struct sk_buff *skb)
1453{ 1456{
1454 struct sk_buff *skb2; 1457 struct sk_buff *skb2;
1455 struct ipv6hdr *ip6h; 1458 const struct ipv6hdr *ip6h;
1456 struct icmp6hdr *icmp6h; 1459 struct icmp6hdr *icmp6h;
1457 u8 nexthdr; 1460 u8 nexthdr;
1458 unsigned len; 1461 unsigned len;
@@ -1543,7 +1546,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
1543 goto out; 1546 goto out;
1544 } 1547 }
1545 mld = (struct mld_msg *)skb_transport_header(skb2); 1548 mld = (struct mld_msg *)skb_transport_header(skb2);
1546 BR_INPUT_SKB_CB(skb2)->mrouters_only = 1; 1549 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1547 err = br_ip6_multicast_add_group(br, port, &mld->mld_mca); 1550 err = br_ip6_multicast_add_group(br, port, &mld->mld_mca);
1548 break; 1551 break;
1549 } 1552 }
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index 74ef4d4846a4..56149ec36d7f 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -104,10 +104,16 @@ static void fake_update_pmtu(struct dst_entry *dst, u32 mtu)
104{ 104{
105} 105}
106 106
107static u32 *fake_cow_metrics(struct dst_entry *dst, unsigned long old)
108{
109 return NULL;
110}
111
107static struct dst_ops fake_dst_ops = { 112static struct dst_ops fake_dst_ops = {
108 .family = AF_INET, 113 .family = AF_INET,
109 .protocol = cpu_to_be16(ETH_P_IP), 114 .protocol = cpu_to_be16(ETH_P_IP),
110 .update_pmtu = fake_update_pmtu, 115 .update_pmtu = fake_update_pmtu,
116 .cow_metrics = fake_cow_metrics,
111}; 117};
112 118
113/* 119/*
@@ -117,6 +123,10 @@ static struct dst_ops fake_dst_ops = {
117 * ipt_REJECT needs it. Future netfilter modules might 123 * ipt_REJECT needs it. Future netfilter modules might
118 * require us to fill additional fields. 124 * require us to fill additional fields.
119 */ 125 */
126static const u32 br_dst_default_metrics[RTAX_MAX] = {
127 [RTAX_MTU - 1] = 1500,
128};
129
120void br_netfilter_rtable_init(struct net_bridge *br) 130void br_netfilter_rtable_init(struct net_bridge *br)
121{ 131{
122 struct rtable *rt = &br->fake_rtable; 132 struct rtable *rt = &br->fake_rtable;
@@ -124,7 +134,7 @@ void br_netfilter_rtable_init(struct net_bridge *br)
124 atomic_set(&rt->dst.__refcnt, 1); 134 atomic_set(&rt->dst.__refcnt, 1);
125 rt->dst.dev = br->dev; 135 rt->dst.dev = br->dev;
126 rt->dst.path = &rt->dst; 136 rt->dst.path = &rt->dst;
127 dst_metric_set(&rt->dst, RTAX_MTU, 1500); 137 dst_init_metrics(&rt->dst, br_dst_default_metrics, true);
128 rt->dst.flags = DST_NOXFRM; 138 rt->dst.flags = DST_NOXFRM;
129 rt->dst.ops = &fake_dst_ops; 139 rt->dst.ops = &fake_dst_ops;
130} 140}
@@ -219,7 +229,7 @@ static inline void nf_bridge_update_protocol(struct sk_buff *skb)
219static int br_parse_ip_options(struct sk_buff *skb) 229static int br_parse_ip_options(struct sk_buff *skb)
220{ 230{
221 struct ip_options *opt; 231 struct ip_options *opt;
222 struct iphdr *iph; 232 const struct iphdr *iph;
223 struct net_device *dev = skb->dev; 233 struct net_device *dev = skb->dev;
224 u32 len; 234 u32 len;
225 235
@@ -554,7 +564,7 @@ static unsigned int br_nf_pre_routing_ipv6(unsigned int hook,
554 const struct net_device *out, 564 const struct net_device *out,
555 int (*okfn)(struct sk_buff *)) 565 int (*okfn)(struct sk_buff *))
556{ 566{
557 struct ipv6hdr *hdr; 567 const struct ipv6hdr *hdr;
558 u32 pkt_len; 568 u32 pkt_len;
559 569
560 if (skb->len < sizeof(struct ipv6hdr)) 570 if (skb->len < sizeof(struct ipv6hdr))
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index f8bf4c7f842c..ffb0dc4cc0e8 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -12,9 +12,11 @@
12 12
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/slab.h> 14#include <linux/slab.h>
15#include <linux/etherdevice.h>
15#include <net/rtnetlink.h> 16#include <net/rtnetlink.h>
16#include <net/net_namespace.h> 17#include <net/net_namespace.h>
17#include <net/sock.h> 18#include <net/sock.h>
19
18#include "br_private.h" 20#include "br_private.h"
19 21
20static inline size_t br_nlmsg_size(void) 22static inline size_t br_nlmsg_size(void)
@@ -118,8 +120,9 @@ static int br_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
118 int idx; 120 int idx;
119 121
120 idx = 0; 122 idx = 0;
121 for_each_netdev(net, dev) { 123 rcu_read_lock();
122 struct net_bridge_port *port = br_port_get_rtnl(dev); 124 for_each_netdev_rcu(net, dev) {
125 struct net_bridge_port *port = br_port_get_rcu(dev);
123 126
124 /* not a bridge port */ 127 /* not a bridge port */
125 if (!port || idx < cb->args[0]) 128 if (!port || idx < cb->args[0])
@@ -133,7 +136,7 @@ static int br_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
133skip: 136skip:
134 ++idx; 137 ++idx;
135 } 138 }
136 139 rcu_read_unlock();
137 cb->args[0] = idx; 140 cb->args[0] = idx;
138 141
139 return skb->len; 142 return skb->len;
@@ -188,20 +191,61 @@ static int br_rtm_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
188 return 0; 191 return 0;
189} 192}
190 193
194static int br_validate(struct nlattr *tb[], struct nlattr *data[])
195{
196 if (tb[IFLA_ADDRESS]) {
197 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
198 return -EINVAL;
199 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
200 return -EADDRNOTAVAIL;
201 }
202
203 return 0;
204}
205
206static struct rtnl_link_ops br_link_ops __read_mostly = {
207 .kind = "bridge",
208 .priv_size = sizeof(struct net_bridge),
209 .setup = br_dev_setup,
210 .validate = br_validate,
211};
191 212
192int __init br_netlink_init(void) 213int __init br_netlink_init(void)
193{ 214{
194 if (__rtnl_register(PF_BRIDGE, RTM_GETLINK, NULL, br_dump_ifinfo)) 215 int err;
195 return -ENOBUFS;
196 216
197 /* Only the first call to __rtnl_register can fail */ 217 err = rtnl_link_register(&br_link_ops);
198 __rtnl_register(PF_BRIDGE, RTM_SETLINK, br_rtm_setlink, NULL); 218 if (err < 0)
219 goto err1;
220
221 err = __rtnl_register(PF_BRIDGE, RTM_GETLINK, NULL, br_dump_ifinfo);
222 if (err)
223 goto err2;
224 err = __rtnl_register(PF_BRIDGE, RTM_SETLINK, br_rtm_setlink, NULL);
225 if (err)
226 goto err3;
227 err = __rtnl_register(PF_BRIDGE, RTM_NEWNEIGH, br_fdb_add, NULL);
228 if (err)
229 goto err3;
230 err = __rtnl_register(PF_BRIDGE, RTM_DELNEIGH, br_fdb_delete, NULL);
231 if (err)
232 goto err3;
233 err = __rtnl_register(PF_BRIDGE, RTM_GETNEIGH, NULL, br_fdb_dump);
234 if (err)
235 goto err3;
199 236
200 return 0; 237 return 0;
238
239err3:
240 rtnl_unregister_all(PF_BRIDGE);
241err2:
242 rtnl_link_unregister(&br_link_ops);
243err1:
244 return err;
201} 245}
202 246
203void __exit br_netlink_fini(void) 247void __exit br_netlink_fini(void)
204{ 248{
249 rtnl_link_unregister(&br_link_ops);
205 rtnl_unregister_all(PF_BRIDGE); 250 rtnl_unregister_all(PF_BRIDGE);
206} 251}
207
diff --git a/net/bridge/br_notify.c b/net/bridge/br_notify.c
index 7d337c9b6082..6545ee9591d1 100644
--- a/net/bridge/br_notify.c
+++ b/net/bridge/br_notify.c
@@ -36,6 +36,12 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v
36 struct net_bridge *br; 36 struct net_bridge *br;
37 int err; 37 int err;
38 38
39 /* register of bridge completed, add sysfs entries */
40 if ((dev->priv_flags & IFF_EBRIDGE) && event == NETDEV_REGISTER) {
41 br_sysfs_addbr(dev);
42 return NOTIFY_DONE;
43 }
44
39 /* not a port of a bridge */ 45 /* not a port of a bridge */
40 p = br_port_get_rtnl(dev); 46 p = br_port_get_rtnl(dev);
41 if (!p) 47 if (!p)
@@ -60,10 +66,7 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v
60 break; 66 break;
61 67
62 case NETDEV_FEAT_CHANGE: 68 case NETDEV_FEAT_CHANGE:
63 spin_lock_bh(&br->lock); 69 netdev_update_features(br->dev);
64 if (netif_running(br->dev))
65 br_features_recompute(br);
66 spin_unlock_bh(&br->lock);
67 break; 70 break;
68 71
69 case NETDEV_DOWN: 72 case NETDEV_DOWN:
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 387013d33745..54578f274d85 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -64,7 +64,8 @@ struct net_bridge_fdb_entry
64 struct net_bridge_port *dst; 64 struct net_bridge_port *dst;
65 65
66 struct rcu_head rcu; 66 struct rcu_head rcu;
67 unsigned long ageing_timer; 67 unsigned long updated;
68 unsigned long used;
68 mac_addr addr; 69 mac_addr addr;
69 unsigned char is_local; 70 unsigned char is_local;
70 unsigned char is_static; 71 unsigned char is_static;
@@ -182,7 +183,6 @@ struct net_bridge
182 struct br_cpu_netstats __percpu *stats; 183 struct br_cpu_netstats __percpu *stats;
183 spinlock_t hash_lock; 184 spinlock_t hash_lock;
184 struct hlist_head hash[BR_HASH_SIZE]; 185 struct hlist_head hash[BR_HASH_SIZE];
185 u32 feature_mask;
186#ifdef CONFIG_BRIDGE_NETFILTER 186#ifdef CONFIG_BRIDGE_NETFILTER
187 struct rtable fake_rtable; 187 struct rtable fake_rtable;
188 bool nf_call_iptables; 188 bool nf_call_iptables;
@@ -353,6 +353,9 @@ extern int br_fdb_insert(struct net_bridge *br,
353extern void br_fdb_update(struct net_bridge *br, 353extern void br_fdb_update(struct net_bridge *br,
354 struct net_bridge_port *source, 354 struct net_bridge_port *source,
355 const unsigned char *addr); 355 const unsigned char *addr);
356extern int br_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb);
357extern int br_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg);
358extern int br_fdb_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg);
356 359
357/* br_forward.c */ 360/* br_forward.c */
358extern void br_deliver(const struct net_bridge_port *to, 361extern void br_deliver(const struct net_bridge_port *to,
@@ -375,7 +378,7 @@ extern int br_add_if(struct net_bridge *br,
375extern int br_del_if(struct net_bridge *br, 378extern int br_del_if(struct net_bridge *br,
376 struct net_device *dev); 379 struct net_device *dev);
377extern int br_min_mtu(const struct net_bridge *br); 380extern int br_min_mtu(const struct net_bridge *br);
378extern void br_features_recompute(struct net_bridge *br); 381extern u32 br_features_recompute(struct net_bridge *br, u32 features);
379 382
380/* br_input.c */ 383/* br_input.c */
381extern int br_handle_frame_finish(struct sk_buff *skb); 384extern int br_handle_frame_finish(struct sk_buff *skb);
@@ -491,6 +494,11 @@ extern struct net_bridge_port *br_get_port(struct net_bridge *br,
491extern void br_init_port(struct net_bridge_port *p); 494extern void br_init_port(struct net_bridge_port *p);
492extern void br_become_designated_port(struct net_bridge_port *p); 495extern void br_become_designated_port(struct net_bridge_port *p);
493 496
497extern int br_set_forward_delay(struct net_bridge *br, unsigned long x);
498extern int br_set_hello_time(struct net_bridge *br, unsigned long x);
499extern int br_set_max_age(struct net_bridge *br, unsigned long x);
500
501
494/* br_stp_if.c */ 502/* br_stp_if.c */
495extern void br_stp_enable_bridge(struct net_bridge *br); 503extern void br_stp_enable_bridge(struct net_bridge *br);
496extern void br_stp_disable_bridge(struct net_bridge *br); 504extern void br_stp_disable_bridge(struct net_bridge *br);
@@ -501,10 +509,10 @@ extern bool br_stp_recalculate_bridge_id(struct net_bridge *br);
501extern void br_stp_change_bridge_id(struct net_bridge *br, const unsigned char *a); 509extern void br_stp_change_bridge_id(struct net_bridge *br, const unsigned char *a);
502extern void br_stp_set_bridge_priority(struct net_bridge *br, 510extern void br_stp_set_bridge_priority(struct net_bridge *br,
503 u16 newprio); 511 u16 newprio);
504extern void br_stp_set_port_priority(struct net_bridge_port *p, 512extern int br_stp_set_port_priority(struct net_bridge_port *p,
505 u8 newprio); 513 unsigned long newprio);
506extern void br_stp_set_path_cost(struct net_bridge_port *p, 514extern int br_stp_set_path_cost(struct net_bridge_port *p,
507 u32 path_cost); 515 unsigned long path_cost);
508extern ssize_t br_show_bridge_id(char *buf, const struct bridge_id *id); 516extern ssize_t br_show_bridge_id(char *buf, const struct bridge_id *id);
509 517
510/* br_stp_bpdu.c */ 518/* br_stp_bpdu.c */
diff --git a/net/bridge/br_private_stp.h b/net/bridge/br_private_stp.h
index 8b650f7fbfa0..642ef47a867e 100644
--- a/net/bridge/br_private_stp.h
+++ b/net/bridge/br_private_stp.h
@@ -16,6 +16,19 @@
16#define BPDU_TYPE_CONFIG 0 16#define BPDU_TYPE_CONFIG 0
17#define BPDU_TYPE_TCN 0x80 17#define BPDU_TYPE_TCN 0x80
18 18
19/* IEEE 802.1D-1998 timer values */
20#define BR_MIN_HELLO_TIME (1*HZ)
21#define BR_MAX_HELLO_TIME (10*HZ)
22
23#define BR_MIN_FORWARD_DELAY (2*HZ)
24#define BR_MAX_FORWARD_DELAY (30*HZ)
25
26#define BR_MIN_MAX_AGE (6*HZ)
27#define BR_MAX_MAX_AGE (40*HZ)
28
29#define BR_MIN_PATH_COST 1
30#define BR_MAX_PATH_COST 65535
31
19struct br_config_bpdu 32struct br_config_bpdu
20{ 33{
21 unsigned topology_change:1; 34 unsigned topology_change:1;
diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c
index 7370d14f634d..bb4383e84de9 100644
--- a/net/bridge/br_stp.c
+++ b/net/bridge/br_stp.c
@@ -484,3 +484,51 @@ void br_received_tcn_bpdu(struct net_bridge_port *p)
484 br_topology_change_acknowledge(p); 484 br_topology_change_acknowledge(p);
485 } 485 }
486} 486}
487
488/* Change bridge STP parameter */
489int br_set_hello_time(struct net_bridge *br, unsigned long val)
490{
491 unsigned long t = clock_t_to_jiffies(val);
492
493 if (t < BR_MIN_HELLO_TIME || t > BR_MAX_HELLO_TIME)
494 return -ERANGE;
495
496 spin_lock_bh(&br->lock);
497 br->bridge_hello_time = t;
498 if (br_is_root_bridge(br))
499 br->hello_time = br->bridge_hello_time;
500 spin_unlock_bh(&br->lock);
501 return 0;
502}
503
504int br_set_max_age(struct net_bridge *br, unsigned long val)
505{
506 unsigned long t = clock_t_to_jiffies(val);
507
508 if (t < BR_MIN_MAX_AGE || t > BR_MAX_MAX_AGE)
509 return -ERANGE;
510
511 spin_lock_bh(&br->lock);
512 br->bridge_max_age = t;
513 if (br_is_root_bridge(br))
514 br->max_age = br->bridge_max_age;
515 spin_unlock_bh(&br->lock);
516 return 0;
517
518}
519
520int br_set_forward_delay(struct net_bridge *br, unsigned long val)
521{
522 unsigned long t = clock_t_to_jiffies(val);
523
524 if (br->stp_enabled != BR_NO_STP &&
525 (t < BR_MIN_FORWARD_DELAY || t > BR_MAX_FORWARD_DELAY))
526 return -ERANGE;
527
528 spin_lock_bh(&br->lock);
529 br->bridge_forward_delay = t;
530 if (br_is_root_bridge(br))
531 br->forward_delay = br->bridge_forward_delay;
532 spin_unlock_bh(&br->lock);
533 return 0;
534}
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index 9b61d09de9b9..6f615b8192f4 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -20,7 +20,7 @@
20 20
21 21
22/* Port id is composed of priority and port number. 22/* Port id is composed of priority and port number.
23 * NB: least significant bits of priority are dropped to 23 * NB: some bits of priority are dropped to
24 * make room for more ports. 24 * make room for more ports.
25 */ 25 */
26static inline port_id br_make_port_id(__u8 priority, __u16 port_no) 26static inline port_id br_make_port_id(__u8 priority, __u16 port_no)
@@ -29,6 +29,8 @@ static inline port_id br_make_port_id(__u8 priority, __u16 port_no)
29 | (port_no & ((1<<BR_PORT_BITS)-1)); 29 | (port_no & ((1<<BR_PORT_BITS)-1));
30} 30}
31 31
32#define BR_MAX_PORT_PRIORITY ((u16)~0 >> BR_PORT_BITS)
33
32/* called under bridge lock */ 34/* called under bridge lock */
33void br_init_port(struct net_bridge_port *p) 35void br_init_port(struct net_bridge_port *p)
34{ 36{
@@ -255,10 +257,14 @@ void br_stp_set_bridge_priority(struct net_bridge *br, u16 newprio)
255} 257}
256 258
257/* called under bridge lock */ 259/* called under bridge lock */
258void br_stp_set_port_priority(struct net_bridge_port *p, u8 newprio) 260int br_stp_set_port_priority(struct net_bridge_port *p, unsigned long newprio)
259{ 261{
260 port_id new_port_id = br_make_port_id(newprio, p->port_no); 262 port_id new_port_id;
263
264 if (newprio > BR_MAX_PORT_PRIORITY)
265 return -ERANGE;
261 266
267 new_port_id = br_make_port_id(newprio, p->port_no);
262 if (br_is_designated_port(p)) 268 if (br_is_designated_port(p))
263 p->designated_port = new_port_id; 269 p->designated_port = new_port_id;
264 270
@@ -269,14 +275,21 @@ void br_stp_set_port_priority(struct net_bridge_port *p, u8 newprio)
269 br_become_designated_port(p); 275 br_become_designated_port(p);
270 br_port_state_selection(p->br); 276 br_port_state_selection(p->br);
271 } 277 }
278
279 return 0;
272} 280}
273 281
274/* called under bridge lock */ 282/* called under bridge lock */
275void br_stp_set_path_cost(struct net_bridge_port *p, u32 path_cost) 283int br_stp_set_path_cost(struct net_bridge_port *p, unsigned long path_cost)
276{ 284{
285 if (path_cost < BR_MIN_PATH_COST ||
286 path_cost > BR_MAX_PATH_COST)
287 return -ERANGE;
288
277 p->path_cost = path_cost; 289 p->path_cost = path_cost;
278 br_configuration_update(p->br); 290 br_configuration_update(p->br);
279 br_port_state_selection(p->br); 291 br_port_state_selection(p->br);
292 return 0;
280} 293}
281 294
282ssize_t br_show_bridge_id(char *buf, const struct bridge_id *id) 295ssize_t br_show_bridge_id(char *buf, const struct bridge_id *id)
diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c
index 5c1e5559ebba..68b893ea8c3a 100644
--- a/net/bridge/br_sysfs_br.c
+++ b/net/bridge/br_sysfs_br.c
@@ -43,9 +43,7 @@ static ssize_t store_bridge_parm(struct device *d,
43 if (endp == buf) 43 if (endp == buf)
44 return -EINVAL; 44 return -EINVAL;
45 45
46 spin_lock_bh(&br->lock);
47 err = (*set)(br, val); 46 err = (*set)(br, val);
48 spin_unlock_bh(&br->lock);
49 return err ? err : len; 47 return err ? err : len;
50} 48}
51 49
@@ -57,20 +55,11 @@ static ssize_t show_forward_delay(struct device *d,
57 return sprintf(buf, "%lu\n", jiffies_to_clock_t(br->forward_delay)); 55 return sprintf(buf, "%lu\n", jiffies_to_clock_t(br->forward_delay));
58} 56}
59 57
60static int set_forward_delay(struct net_bridge *br, unsigned long val)
61{
62 unsigned long delay = clock_t_to_jiffies(val);
63 br->forward_delay = delay;
64 if (br_is_root_bridge(br))
65 br->bridge_forward_delay = delay;
66 return 0;
67}
68
69static ssize_t store_forward_delay(struct device *d, 58static ssize_t store_forward_delay(struct device *d,
70 struct device_attribute *attr, 59 struct device_attribute *attr,
71 const char *buf, size_t len) 60 const char *buf, size_t len)
72{ 61{
73 return store_bridge_parm(d, buf, len, set_forward_delay); 62 return store_bridge_parm(d, buf, len, br_set_forward_delay);
74} 63}
75static DEVICE_ATTR(forward_delay, S_IRUGO | S_IWUSR, 64static DEVICE_ATTR(forward_delay, S_IRUGO | S_IWUSR,
76 show_forward_delay, store_forward_delay); 65 show_forward_delay, store_forward_delay);
@@ -82,24 +71,11 @@ static ssize_t show_hello_time(struct device *d, struct device_attribute *attr,
82 jiffies_to_clock_t(to_bridge(d)->hello_time)); 71 jiffies_to_clock_t(to_bridge(d)->hello_time));
83} 72}
84 73
85static int set_hello_time(struct net_bridge *br, unsigned long val)
86{
87 unsigned long t = clock_t_to_jiffies(val);
88
89 if (t < HZ)
90 return -EINVAL;
91
92 br->hello_time = t;
93 if (br_is_root_bridge(br))
94 br->bridge_hello_time = t;
95 return 0;
96}
97
98static ssize_t store_hello_time(struct device *d, 74static ssize_t store_hello_time(struct device *d,
99 struct device_attribute *attr, const char *buf, 75 struct device_attribute *attr, const char *buf,
100 size_t len) 76 size_t len)
101{ 77{
102 return store_bridge_parm(d, buf, len, set_hello_time); 78 return store_bridge_parm(d, buf, len, br_set_hello_time);
103} 79}
104static DEVICE_ATTR(hello_time, S_IRUGO | S_IWUSR, show_hello_time, 80static DEVICE_ATTR(hello_time, S_IRUGO | S_IWUSR, show_hello_time,
105 store_hello_time); 81 store_hello_time);
@@ -111,19 +87,10 @@ static ssize_t show_max_age(struct device *d, struct device_attribute *attr,
111 jiffies_to_clock_t(to_bridge(d)->max_age)); 87 jiffies_to_clock_t(to_bridge(d)->max_age));
112} 88}
113 89
114static int set_max_age(struct net_bridge *br, unsigned long val)
115{
116 unsigned long t = clock_t_to_jiffies(val);
117 br->max_age = t;
118 if (br_is_root_bridge(br))
119 br->bridge_max_age = t;
120 return 0;
121}
122
123static ssize_t store_max_age(struct device *d, struct device_attribute *attr, 90static ssize_t store_max_age(struct device *d, struct device_attribute *attr,
124 const char *buf, size_t len) 91 const char *buf, size_t len)
125{ 92{
126 return store_bridge_parm(d, buf, len, set_max_age); 93 return store_bridge_parm(d, buf, len, br_set_max_age);
127} 94}
128static DEVICE_ATTR(max_age, S_IRUGO | S_IWUSR, show_max_age, store_max_age); 95static DEVICE_ATTR(max_age, S_IRUGO | S_IWUSR, show_max_age, store_max_age);
129 96
diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c
index fd5799c9bc8d..6229b62749e8 100644
--- a/net/bridge/br_sysfs_if.c
+++ b/net/bridge/br_sysfs_if.c
@@ -23,7 +23,7 @@
23struct brport_attribute { 23struct brport_attribute {
24 struct attribute attr; 24 struct attribute attr;
25 ssize_t (*show)(struct net_bridge_port *, char *); 25 ssize_t (*show)(struct net_bridge_port *, char *);
26 ssize_t (*store)(struct net_bridge_port *, unsigned long); 26 int (*store)(struct net_bridge_port *, unsigned long);
27}; 27};
28 28
29#define BRPORT_ATTR(_name,_mode,_show,_store) \ 29#define BRPORT_ATTR(_name,_mode,_show,_store) \
@@ -38,27 +38,17 @@ static ssize_t show_path_cost(struct net_bridge_port *p, char *buf)
38{ 38{
39 return sprintf(buf, "%d\n", p->path_cost); 39 return sprintf(buf, "%d\n", p->path_cost);
40} 40}
41static ssize_t store_path_cost(struct net_bridge_port *p, unsigned long v) 41
42{
43 br_stp_set_path_cost(p, v);
44 return 0;
45}
46static BRPORT_ATTR(path_cost, S_IRUGO | S_IWUSR, 42static BRPORT_ATTR(path_cost, S_IRUGO | S_IWUSR,
47 show_path_cost, store_path_cost); 43 show_path_cost, br_stp_set_path_cost);
48 44
49static ssize_t show_priority(struct net_bridge_port *p, char *buf) 45static ssize_t show_priority(struct net_bridge_port *p, char *buf)
50{ 46{
51 return sprintf(buf, "%d\n", p->priority); 47 return sprintf(buf, "%d\n", p->priority);
52} 48}
53static ssize_t store_priority(struct net_bridge_port *p, unsigned long v) 49
54{
55 if (v >= (1<<(16-BR_PORT_BITS)))
56 return -ERANGE;
57 br_stp_set_port_priority(p, v);
58 return 0;
59}
60static BRPORT_ATTR(priority, S_IRUGO | S_IWUSR, 50static BRPORT_ATTR(priority, S_IRUGO | S_IWUSR,
61 show_priority, store_priority); 51 show_priority, br_stp_set_port_priority);
62 52
63static ssize_t show_designated_root(struct net_bridge_port *p, char *buf) 53static ssize_t show_designated_root(struct net_bridge_port *p, char *buf)
64{ 54{
@@ -136,7 +126,7 @@ static ssize_t show_hold_timer(struct net_bridge_port *p,
136} 126}
137static BRPORT_ATTR(hold_timer, S_IRUGO, show_hold_timer, NULL); 127static BRPORT_ATTR(hold_timer, S_IRUGO, show_hold_timer, NULL);
138 128
139static ssize_t store_flush(struct net_bridge_port *p, unsigned long v) 129static int store_flush(struct net_bridge_port *p, unsigned long v)
140{ 130{
141 br_fdb_delete_by_port(p->br, p, 0); // Don't delete local entry 131 br_fdb_delete_by_port(p->br, p, 0); // Don't delete local entry
142 return 0; 132 return 0;
@@ -148,7 +138,7 @@ static ssize_t show_hairpin_mode(struct net_bridge_port *p, char *buf)
148 int hairpin_mode = (p->flags & BR_HAIRPIN_MODE) ? 1 : 0; 138 int hairpin_mode = (p->flags & BR_HAIRPIN_MODE) ? 1 : 0;
149 return sprintf(buf, "%d\n", hairpin_mode); 139 return sprintf(buf, "%d\n", hairpin_mode);
150} 140}
151static ssize_t store_hairpin_mode(struct net_bridge_port *p, unsigned long v) 141static int store_hairpin_mode(struct net_bridge_port *p, unsigned long v)
152{ 142{
153 if (v) 143 if (v)
154 p->flags |= BR_HAIRPIN_MODE; 144 p->flags |= BR_HAIRPIN_MODE;
@@ -165,7 +155,7 @@ static ssize_t show_multicast_router(struct net_bridge_port *p, char *buf)
165 return sprintf(buf, "%d\n", p->multicast_router); 155 return sprintf(buf, "%d\n", p->multicast_router);
166} 156}
167 157
168static ssize_t store_multicast_router(struct net_bridge_port *p, 158static int store_multicast_router(struct net_bridge_port *p,
169 unsigned long v) 159 unsigned long v)
170{ 160{
171 return br_multicast_set_port_router(p, v); 161 return br_multicast_set_port_router(p, v);
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 1a92b369c820..2b5ca1a0054d 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -1883,14 +1883,13 @@ static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
1883 struct xt_target *wt; 1883 struct xt_target *wt;
1884 void *dst = NULL; 1884 void *dst = NULL;
1885 int off, pad = 0; 1885 int off, pad = 0;
1886 unsigned int size_kern, entry_offset, match_size = mwt->match_size; 1886 unsigned int size_kern, match_size = mwt->match_size;
1887 1887
1888 strlcpy(name, mwt->u.name, sizeof(name)); 1888 strlcpy(name, mwt->u.name, sizeof(name));
1889 1889
1890 if (state->buf_kern_start) 1890 if (state->buf_kern_start)
1891 dst = state->buf_kern_start + state->buf_kern_offset; 1891 dst = state->buf_kern_start + state->buf_kern_offset;
1892 1892
1893 entry_offset = (unsigned char *) mwt - base;
1894 switch (compat_mwt) { 1893 switch (compat_mwt) {
1895 case EBT_COMPAT_MATCH: 1894 case EBT_COMPAT_MATCH:
1896 match = try_then_request_module(xt_find_match(NFPROTO_BRIDGE, 1895 match = try_then_request_module(xt_find_match(NFPROTO_BRIDGE,
@@ -1933,6 +1932,9 @@ static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
1933 size_kern = wt->targetsize; 1932 size_kern = wt->targetsize;
1934 module_put(wt->me); 1933 module_put(wt->me);
1935 break; 1934 break;
1935
1936 default:
1937 return -EINVAL;
1936 } 1938 }
1937 1939
1938 state->buf_kern_offset += match_size + off; 1940 state->buf_kern_offset += match_size + off;
diff --git a/net/caif/Makefile b/net/caif/Makefile
index 9d38e406e4a4..ebcd4e7e6f47 100644
--- a/net/caif/Makefile
+++ b/net/caif/Makefile
@@ -5,7 +5,7 @@ caif-y := caif_dev.o \
5 cffrml.o cfveil.o cfdbgl.o\ 5 cffrml.o cfveil.o cfdbgl.o\
6 cfserl.o cfdgml.o \ 6 cfserl.o cfdgml.o \
7 cfrfml.o cfvidl.o cfutill.o \ 7 cfrfml.o cfvidl.o cfutill.o \
8 cfsrvl.o cfpkt_skbuff.o caif_config_util.o 8 cfsrvl.o cfpkt_skbuff.o
9 9
10obj-$(CONFIG_CAIF) += caif.o 10obj-$(CONFIG_CAIF) += caif.o
11obj-$(CONFIG_CAIF_NETDEV) += chnl_net.o 11obj-$(CONFIG_CAIF_NETDEV) += chnl_net.o
diff --git a/net/caif/caif_config_util.c b/net/caif/caif_config_util.c
deleted file mode 100644
index d522d8c1703e..000000000000
--- a/net/caif/caif_config_util.c
+++ /dev/null
@@ -1,99 +0,0 @@
1/*
2 * Copyright (C) ST-Ericsson AB 2010
3 * Author: Sjur Brendeland sjur.brandeland@stericsson.com
4 * License terms: GNU General Public License (GPL) version 2
5 */
6
7#include <linux/module.h>
8#include <linux/spinlock.h>
9#include <net/caif/cfctrl.h>
10#include <net/caif/cfcnfg.h>
11#include <net/caif/caif_dev.h>
12
13int connect_req_to_link_param(struct cfcnfg *cnfg,
14 struct caif_connect_request *s,
15 struct cfctrl_link_param *l)
16{
17 struct dev_info *dev_info;
18 enum cfcnfg_phy_preference pref;
19 int res;
20
21 memset(l, 0, sizeof(*l));
22 /* In caif protocol low value is high priority */
23 l->priority = CAIF_PRIO_MAX - s->priority + 1;
24
25 if (s->ifindex != 0){
26 res = cfcnfg_get_id_from_ifi(cnfg, s->ifindex);
27 if (res < 0)
28 return res;
29 l->phyid = res;
30 }
31 else {
32 switch (s->link_selector) {
33 case CAIF_LINK_HIGH_BANDW:
34 pref = CFPHYPREF_HIGH_BW;
35 break;
36 case CAIF_LINK_LOW_LATENCY:
37 pref = CFPHYPREF_LOW_LAT;
38 break;
39 default:
40 return -EINVAL;
41 }
42 dev_info = cfcnfg_get_phyid(cnfg, pref);
43 if (dev_info == NULL)
44 return -ENODEV;
45 l->phyid = dev_info->id;
46 }
47 switch (s->protocol) {
48 case CAIFPROTO_AT:
49 l->linktype = CFCTRL_SRV_VEI;
50 if (s->sockaddr.u.at.type == CAIF_ATTYPE_PLAIN)
51 l->chtype = 0x02;
52 else
53 l->chtype = s->sockaddr.u.at.type;
54 l->endpoint = 0x00;
55 break;
56 case CAIFPROTO_DATAGRAM:
57 l->linktype = CFCTRL_SRV_DATAGRAM;
58 l->chtype = 0x00;
59 l->u.datagram.connid = s->sockaddr.u.dgm.connection_id;
60 break;
61 case CAIFPROTO_DATAGRAM_LOOP:
62 l->linktype = CFCTRL_SRV_DATAGRAM;
63 l->chtype = 0x03;
64 l->endpoint = 0x00;
65 l->u.datagram.connid = s->sockaddr.u.dgm.connection_id;
66 break;
67 case CAIFPROTO_RFM:
68 l->linktype = CFCTRL_SRV_RFM;
69 l->u.datagram.connid = s->sockaddr.u.rfm.connection_id;
70 strncpy(l->u.rfm.volume, s->sockaddr.u.rfm.volume,
71 sizeof(l->u.rfm.volume)-1);
72 l->u.rfm.volume[sizeof(l->u.rfm.volume)-1] = 0;
73 break;
74 case CAIFPROTO_UTIL:
75 l->linktype = CFCTRL_SRV_UTIL;
76 l->endpoint = 0x00;
77 l->chtype = 0x00;
78 strncpy(l->u.utility.name, s->sockaddr.u.util.service,
79 sizeof(l->u.utility.name)-1);
80 l->u.utility.name[sizeof(l->u.utility.name)-1] = 0;
81 caif_assert(sizeof(l->u.utility.name) > 10);
82 l->u.utility.paramlen = s->param.size;
83 if (l->u.utility.paramlen > sizeof(l->u.utility.params))
84 l->u.utility.paramlen = sizeof(l->u.utility.params);
85
86 memcpy(l->u.utility.params, s->param.data,
87 l->u.utility.paramlen);
88
89 break;
90 case CAIFPROTO_DEBUG:
91 l->linktype = CFCTRL_SRV_DBG;
92 l->endpoint = s->sockaddr.u.dbg.service;
93 l->chtype = s->sockaddr.u.dbg.type;
94 break;
95 default:
96 return -EINVAL;
97 }
98 return 0;
99}
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
index a42a408306e4..682c0fedf360 100644
--- a/net/caif/caif_dev.c
+++ b/net/caif/caif_dev.c
@@ -12,49 +12,51 @@
12#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__ 12#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
13 13
14#include <linux/version.h> 14#include <linux/version.h>
15#include <linux/module.h>
16#include <linux/kernel.h> 15#include <linux/kernel.h>
17#include <linux/if_arp.h> 16#include <linux/if_arp.h>
18#include <linux/net.h> 17#include <linux/net.h>
19#include <linux/netdevice.h> 18#include <linux/netdevice.h>
20#include <linux/skbuff.h> 19#include <linux/mutex.h>
21#include <linux/sched.h>
22#include <linux/wait.h>
23#include <net/netns/generic.h> 20#include <net/netns/generic.h>
24#include <net/net_namespace.h> 21#include <net/net_namespace.h>
25#include <net/pkt_sched.h> 22#include <net/pkt_sched.h>
26#include <net/caif/caif_device.h> 23#include <net/caif/caif_device.h>
27#include <net/caif/caif_dev.h>
28#include <net/caif/caif_layer.h> 24#include <net/caif/caif_layer.h>
29#include <net/caif/cfpkt.h> 25#include <net/caif/cfpkt.h>
30#include <net/caif/cfcnfg.h> 26#include <net/caif/cfcnfg.h>
31 27
32MODULE_LICENSE("GPL"); 28MODULE_LICENSE("GPL");
33#define TIMEOUT (HZ*5)
34 29
35/* Used for local tracking of the CAIF net devices */ 30/* Used for local tracking of the CAIF net devices */
36struct caif_device_entry { 31struct caif_device_entry {
37 struct cflayer layer; 32 struct cflayer layer;
38 struct list_head list; 33 struct list_head list;
39 atomic_t in_use;
40 atomic_t state;
41 u16 phyid;
42 struct net_device *netdev; 34 struct net_device *netdev;
43 wait_queue_head_t event; 35 int __percpu *pcpu_refcnt;
44}; 36};
45 37
46struct caif_device_entry_list { 38struct caif_device_entry_list {
47 struct list_head list; 39 struct list_head list;
48 /* Protects simulanous deletes in list */ 40 /* Protects simulanous deletes in list */
49 spinlock_t lock; 41 struct mutex lock;
50}; 42};
51 43
52struct caif_net { 44struct caif_net {
45 struct cfcnfg *cfg;
53 struct caif_device_entry_list caifdevs; 46 struct caif_device_entry_list caifdevs;
54}; 47};
55 48
56static int caif_net_id; 49static int caif_net_id;
57static struct cfcnfg *cfg; 50
51struct cfcnfg *get_cfcnfg(struct net *net)
52{
53 struct caif_net *caifn;
54 BUG_ON(!net);
55 caifn = net_generic(net, caif_net_id);
56 BUG_ON(!caifn);
57 return caifn->cfg;
58}
59EXPORT_SYMBOL(get_cfcnfg);
58 60
59static struct caif_device_entry_list *caif_device_list(struct net *net) 61static struct caif_device_entry_list *caif_device_list(struct net *net)
60{ 62{
@@ -65,19 +67,39 @@ static struct caif_device_entry_list *caif_device_list(struct net *net)
65 return &caifn->caifdevs; 67 return &caifn->caifdevs;
66} 68}
67 69
70static void caifd_put(struct caif_device_entry *e)
71{
72 irqsafe_cpu_dec(*e->pcpu_refcnt);
73}
74
75static void caifd_hold(struct caif_device_entry *e)
76{
77 irqsafe_cpu_inc(*e->pcpu_refcnt);
78}
79
80static int caifd_refcnt_read(struct caif_device_entry *e)
81{
82 int i, refcnt = 0;
83 for_each_possible_cpu(i)
84 refcnt += *per_cpu_ptr(e->pcpu_refcnt, i);
85 return refcnt;
86}
87
68/* Allocate new CAIF device. */ 88/* Allocate new CAIF device. */
69static struct caif_device_entry *caif_device_alloc(struct net_device *dev) 89static struct caif_device_entry *caif_device_alloc(struct net_device *dev)
70{ 90{
71 struct caif_device_entry_list *caifdevs; 91 struct caif_device_entry_list *caifdevs;
72 struct caif_device_entry *caifd; 92 struct caif_device_entry *caifd;
93
73 caifdevs = caif_device_list(dev_net(dev)); 94 caifdevs = caif_device_list(dev_net(dev));
74 BUG_ON(!caifdevs); 95 BUG_ON(!caifdevs);
96
75 caifd = kzalloc(sizeof(*caifd), GFP_ATOMIC); 97 caifd = kzalloc(sizeof(*caifd), GFP_ATOMIC);
76 if (!caifd) 98 if (!caifd)
77 return NULL; 99 return NULL;
100 caifd->pcpu_refcnt = alloc_percpu(int);
78 caifd->netdev = dev; 101 caifd->netdev = dev;
79 list_add(&caifd->list, &caifdevs->list); 102 dev_hold(dev);
80 init_waitqueue_head(&caifd->event);
81 return caifd; 103 return caifd;
82} 104}
83 105
@@ -87,98 +109,65 @@ static struct caif_device_entry *caif_get(struct net_device *dev)
87 caif_device_list(dev_net(dev)); 109 caif_device_list(dev_net(dev));
88 struct caif_device_entry *caifd; 110 struct caif_device_entry *caifd;
89 BUG_ON(!caifdevs); 111 BUG_ON(!caifdevs);
90 list_for_each_entry(caifd, &caifdevs->list, list) { 112 list_for_each_entry_rcu(caifd, &caifdevs->list, list) {
91 if (caifd->netdev == dev) 113 if (caifd->netdev == dev)
92 return caifd; 114 return caifd;
93 } 115 }
94 return NULL; 116 return NULL;
95} 117}
96 118
97static void caif_device_destroy(struct net_device *dev)
98{
99 struct caif_device_entry_list *caifdevs =
100 caif_device_list(dev_net(dev));
101 struct caif_device_entry *caifd;
102 ASSERT_RTNL();
103 if (dev->type != ARPHRD_CAIF)
104 return;
105
106 spin_lock_bh(&caifdevs->lock);
107 caifd = caif_get(dev);
108 if (caifd == NULL) {
109 spin_unlock_bh(&caifdevs->lock);
110 return;
111 }
112
113 list_del(&caifd->list);
114 spin_unlock_bh(&caifdevs->lock);
115
116 kfree(caifd);
117}
118
119static int transmit(struct cflayer *layer, struct cfpkt *pkt) 119static int transmit(struct cflayer *layer, struct cfpkt *pkt)
120{ 120{
121 int err;
121 struct caif_device_entry *caifd = 122 struct caif_device_entry *caifd =
122 container_of(layer, struct caif_device_entry, layer); 123 container_of(layer, struct caif_device_entry, layer);
123 struct sk_buff *skb, *skb2; 124 struct sk_buff *skb;
124 int ret = -EINVAL; 125
125 skb = cfpkt_tonative(pkt); 126 skb = cfpkt_tonative(pkt);
126 skb->dev = caifd->netdev; 127 skb->dev = caifd->netdev;
127 /*
128 * Don't allow SKB to be destroyed upon error, but signal resend
129 * notification to clients. We can't rely on the return value as
130 * congestion (NET_XMIT_CN) sometimes drops the packet, sometimes don't.
131 */
132 if (netif_queue_stopped(caifd->netdev))
133 return -EAGAIN;
134 skb2 = skb_get(skb);
135
136 ret = dev_queue_xmit(skb2);
137
138 if (!ret)
139 kfree_skb(skb);
140 else
141 return -EAGAIN;
142 128
143 return 0; 129 err = dev_queue_xmit(skb);
144} 130 if (err > 0)
131 err = -EIO;
145 132
146static int modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl) 133 return err;
147{
148 struct caif_device_entry *caifd;
149 struct caif_dev_common *caifdev;
150 caifd = container_of(layr, struct caif_device_entry, layer);
151 caifdev = netdev_priv(caifd->netdev);
152 if (ctrl == _CAIF_MODEMCMD_PHYIF_USEFULL) {
153 atomic_set(&caifd->in_use, 1);
154 wake_up_interruptible(&caifd->event);
155
156 } else if (ctrl == _CAIF_MODEMCMD_PHYIF_USELESS) {
157 atomic_set(&caifd->in_use, 0);
158 wake_up_interruptible(&caifd->event);
159 }
160 return 0;
161} 134}
162 135
163/* 136/*
164 * Stuff received packets to associated sockets. 137 * Stuff received packets into the CAIF stack.
165 * On error, returns non-zero and releases the skb. 138 * On error, returns non-zero and releases the skb.
166 */ 139 */
167static int receive(struct sk_buff *skb, struct net_device *dev, 140static int receive(struct sk_buff *skb, struct net_device *dev,
168 struct packet_type *pkttype, struct net_device *orig_dev) 141 struct packet_type *pkttype, struct net_device *orig_dev)
169{ 142{
170 struct net *net;
171 struct cfpkt *pkt; 143 struct cfpkt *pkt;
172 struct caif_device_entry *caifd; 144 struct caif_device_entry *caifd;
173 net = dev_net(dev); 145 int err;
146
174 pkt = cfpkt_fromnative(CAIF_DIR_IN, skb); 147 pkt = cfpkt_fromnative(CAIF_DIR_IN, skb);
148
149 rcu_read_lock();
175 caifd = caif_get(dev); 150 caifd = caif_get(dev);
176 if (!caifd || !caifd->layer.up || !caifd->layer.up->receive)
177 return NET_RX_DROP;
178 151
179 if (caifd->layer.up->receive(caifd->layer.up, pkt)) 152 if (!caifd || !caifd->layer.up || !caifd->layer.up->receive ||
153 !netif_oper_up(caifd->netdev)) {
154 rcu_read_unlock();
155 kfree_skb(skb);
180 return NET_RX_DROP; 156 return NET_RX_DROP;
157 }
158
159 /* Hold reference to netdevice while using CAIF stack */
160 caifd_hold(caifd);
161 rcu_read_unlock();
162
163 err = caifd->layer.up->receive(caifd->layer.up, pkt);
181 164
165 /* For -EILSEQ the packet is not freed so so it now */
166 if (err == -EILSEQ)
167 cfpkt_destroy(pkt);
168
169 /* Release reference to stack upwards */
170 caifd_put(caifd);
182 return 0; 171 return 0;
183} 172}
184 173
@@ -189,15 +178,25 @@ static struct packet_type caif_packet_type __read_mostly = {
189 178
190static void dev_flowctrl(struct net_device *dev, int on) 179static void dev_flowctrl(struct net_device *dev, int on)
191{ 180{
192 struct caif_device_entry *caifd = caif_get(dev); 181 struct caif_device_entry *caifd;
193 if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) 182
183 rcu_read_lock();
184
185 caifd = caif_get(dev);
186 if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) {
187 rcu_read_unlock();
194 return; 188 return;
189 }
190
191 caifd_hold(caifd);
192 rcu_read_unlock();
195 193
196 caifd->layer.up->ctrlcmd(caifd->layer.up, 194 caifd->layer.up->ctrlcmd(caifd->layer.up,
197 on ? 195 on ?
198 _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND : 196 _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND :
199 _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND, 197 _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND,
200 caifd->layer.id); 198 caifd->layer.id);
199 caifd_put(caifd);
201} 200}
202 201
203/* notify Caif of device events */ 202/* notify Caif of device events */
@@ -208,37 +207,28 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
208 struct caif_device_entry *caifd = NULL; 207 struct caif_device_entry *caifd = NULL;
209 struct caif_dev_common *caifdev; 208 struct caif_dev_common *caifdev;
210 enum cfcnfg_phy_preference pref; 209 enum cfcnfg_phy_preference pref;
211 int res = -EINVAL;
212 enum cfcnfg_phy_type phy_type; 210 enum cfcnfg_phy_type phy_type;
211 struct cfcnfg *cfg;
212 struct caif_device_entry_list *caifdevs =
213 caif_device_list(dev_net(dev));
213 214
214 if (dev->type != ARPHRD_CAIF) 215 if (dev->type != ARPHRD_CAIF)
215 return 0; 216 return 0;
216 217
218 cfg = get_cfcnfg(dev_net(dev));
219 if (cfg == NULL)
220 return 0;
221
217 switch (what) { 222 switch (what) {
218 case NETDEV_REGISTER: 223 case NETDEV_REGISTER:
219 netdev_info(dev, "register\n");
220 caifd = caif_device_alloc(dev); 224 caifd = caif_device_alloc(dev);
221 if (caifd == NULL) 225 if (!caifd)
222 break; 226 return 0;
227
223 caifdev = netdev_priv(dev); 228 caifdev = netdev_priv(dev);
224 caifdev->flowctrl = dev_flowctrl; 229 caifdev->flowctrl = dev_flowctrl;
225 atomic_set(&caifd->state, what);
226 res = 0;
227 break;
228 230
229 case NETDEV_UP:
230 netdev_info(dev, "up\n");
231 caifd = caif_get(dev);
232 if (caifd == NULL)
233 break;
234 caifdev = netdev_priv(dev);
235 if (atomic_read(&caifd->state) == NETDEV_UP) {
236 netdev_info(dev, "already up\n");
237 break;
238 }
239 atomic_set(&caifd->state, what);
240 caifd->layer.transmit = transmit; 231 caifd->layer.transmit = transmit;
241 caifd->layer.modemcmd = modemcmd;
242 232
243 if (caifdev->use_frag) 233 if (caifdev->use_frag)
244 phy_type = CFPHYTYPE_FRAG; 234 phy_type = CFPHYTYPE_FRAG;
@@ -256,62 +246,94 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
256 pref = CFPHYPREF_HIGH_BW; 246 pref = CFPHYPREF_HIGH_BW;
257 break; 247 break;
258 } 248 }
259 dev_hold(dev); 249 strncpy(caifd->layer.name, dev->name,
260 cfcnfg_add_phy_layer(get_caif_conf(), 250 sizeof(caifd->layer.name) - 1);
251 caifd->layer.name[sizeof(caifd->layer.name) - 1] = 0;
252
253 mutex_lock(&caifdevs->lock);
254 list_add_rcu(&caifd->list, &caifdevs->list);
255
256 cfcnfg_add_phy_layer(cfg,
261 phy_type, 257 phy_type,
262 dev, 258 dev,
263 &caifd->layer, 259 &caifd->layer,
264 &caifd->phyid,
265 pref, 260 pref,
266 caifdev->use_fcs, 261 caifdev->use_fcs,
267 caifdev->use_stx); 262 caifdev->use_stx);
268 strncpy(caifd->layer.name, dev->name, 263 mutex_unlock(&caifdevs->lock);
269 sizeof(caifd->layer.name) - 1);
270 caifd->layer.name[sizeof(caifd->layer.name) - 1] = 0;
271 break; 264 break;
272 265
273 case NETDEV_GOING_DOWN: 266 case NETDEV_UP:
267 rcu_read_lock();
268
274 caifd = caif_get(dev); 269 caifd = caif_get(dev);
275 if (caifd == NULL) 270 if (caifd == NULL) {
271 rcu_read_unlock();
276 break; 272 break;
277 netdev_info(dev, "going down\n"); 273 }
278 274
279 if (atomic_read(&caifd->state) == NETDEV_GOING_DOWN || 275 cfcnfg_set_phy_state(cfg, &caifd->layer, true);
280 atomic_read(&caifd->state) == NETDEV_DOWN) 276 rcu_read_unlock();
281 break;
282 277
283 atomic_set(&caifd->state, what);
284 if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd)
285 return -EINVAL;
286 caifd->layer.up->ctrlcmd(caifd->layer.up,
287 _CAIF_CTRLCMD_PHYIF_DOWN_IND,
288 caifd->layer.id);
289 might_sleep();
290 res = wait_event_interruptible_timeout(caifd->event,
291 atomic_read(&caifd->in_use) == 0,
292 TIMEOUT);
293 break; 278 break;
294 279
295 case NETDEV_DOWN: 280 case NETDEV_DOWN:
281 rcu_read_lock();
282
296 caifd = caif_get(dev); 283 caifd = caif_get(dev);
297 if (caifd == NULL) 284 if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) {
298 break; 285 rcu_read_unlock();
299 netdev_info(dev, "down\n"); 286 return -EINVAL;
300 if (atomic_read(&caifd->in_use)) 287 }
301 netdev_warn(dev, 288
302 "Unregistering an active CAIF device\n"); 289 cfcnfg_set_phy_state(cfg, &caifd->layer, false);
303 cfcnfg_del_phy_layer(get_caif_conf(), &caifd->layer); 290 caifd_hold(caifd);
304 dev_put(dev); 291 rcu_read_unlock();
305 atomic_set(&caifd->state, what); 292
293 caifd->layer.up->ctrlcmd(caifd->layer.up,
294 _CAIF_CTRLCMD_PHYIF_DOWN_IND,
295 caifd->layer.id);
296 caifd_put(caifd);
306 break; 297 break;
307 298
308 case NETDEV_UNREGISTER: 299 case NETDEV_UNREGISTER:
300 mutex_lock(&caifdevs->lock);
301
309 caifd = caif_get(dev); 302 caifd = caif_get(dev);
310 if (caifd == NULL) 303 if (caifd == NULL) {
304 mutex_unlock(&caifdevs->lock);
305 break;
306 }
307 list_del_rcu(&caifd->list);
308
309 /*
310 * NETDEV_UNREGISTER is called repeatedly until all reference
311 * counts for the net-device are released. If references to
312 * caifd is taken, simply ignore NETDEV_UNREGISTER and wait for
313 * the next call to NETDEV_UNREGISTER.
314 *
315 * If any packets are in flight down the CAIF Stack,
316 * cfcnfg_del_phy_layer will return nonzero.
317 * If no packets are in flight, the CAIF Stack associated
318 * with the net-device un-registering is freed.
319 */
320
321 if (caifd_refcnt_read(caifd) != 0 ||
322 cfcnfg_del_phy_layer(cfg, &caifd->layer) != 0) {
323
324 pr_info("Wait for device inuse\n");
325 /* Enrole device if CAIF Stack is still in use */
326 list_add_rcu(&caifd->list, &caifdevs->list);
327 mutex_unlock(&caifdevs->lock);
311 break; 328 break;
312 netdev_info(dev, "unregister\n"); 329 }
313 atomic_set(&caifd->state, what); 330
314 caif_device_destroy(dev); 331 synchronize_rcu();
332 dev_put(caifd->netdev);
333 free_percpu(caifd->pcpu_refcnt);
334 kfree(caifd);
335
336 mutex_unlock(&caifdevs->lock);
315 break; 337 break;
316 } 338 }
317 return 0; 339 return 0;
@@ -322,61 +344,60 @@ static struct notifier_block caif_device_notifier = {
322 .priority = 0, 344 .priority = 0,
323}; 345};
324 346
325
326struct cfcnfg *get_caif_conf(void)
327{
328 return cfg;
329}
330EXPORT_SYMBOL(get_caif_conf);
331
332int caif_connect_client(struct caif_connect_request *conn_req,
333 struct cflayer *client_layer, int *ifindex,
334 int *headroom, int *tailroom)
335{
336 struct cfctrl_link_param param;
337 int ret;
338 ret = connect_req_to_link_param(get_caif_conf(), conn_req, &param);
339 if (ret)
340 return ret;
341 /* Hook up the adaptation layer. */
342 return cfcnfg_add_adaptation_layer(get_caif_conf(), &param,
343 client_layer, ifindex,
344 headroom, tailroom);
345}
346EXPORT_SYMBOL(caif_connect_client);
347
348int caif_disconnect_client(struct cflayer *adap_layer)
349{
350 return cfcnfg_disconn_adapt_layer(get_caif_conf(), adap_layer);
351}
352EXPORT_SYMBOL(caif_disconnect_client);
353
354void caif_release_client(struct cflayer *adap_layer)
355{
356 cfcnfg_release_adap_layer(adap_layer);
357}
358EXPORT_SYMBOL(caif_release_client);
359
360/* Per-namespace Caif devices handling */ 347/* Per-namespace Caif devices handling */
361static int caif_init_net(struct net *net) 348static int caif_init_net(struct net *net)
362{ 349{
363 struct caif_net *caifn = net_generic(net, caif_net_id); 350 struct caif_net *caifn = net_generic(net, caif_net_id);
351 BUG_ON(!caifn);
364 INIT_LIST_HEAD(&caifn->caifdevs.list); 352 INIT_LIST_HEAD(&caifn->caifdevs.list);
365 spin_lock_init(&caifn->caifdevs.lock); 353 mutex_init(&caifn->caifdevs.lock);
354
355 caifn->cfg = cfcnfg_create();
356 if (!caifn->cfg) {
357 pr_warn("can't create cfcnfg\n");
358 return -ENOMEM;
359 }
360
366 return 0; 361 return 0;
367} 362}
368 363
369static void caif_exit_net(struct net *net) 364static void caif_exit_net(struct net *net)
370{ 365{
371 struct net_device *dev; 366 struct caif_device_entry *caifd, *tmp;
372 int res; 367 struct caif_device_entry_list *caifdevs =
368 caif_device_list(net);
369 struct cfcnfg *cfg;
370
373 rtnl_lock(); 371 rtnl_lock();
374 for_each_netdev(net, dev) { 372 mutex_lock(&caifdevs->lock);
375 if (dev->type != ARPHRD_CAIF) 373
376 continue; 374 cfg = get_cfcnfg(net);
377 res = dev_close(dev); 375 if (cfg == NULL) {
378 caif_device_destroy(dev); 376 mutex_unlock(&caifdevs->lock);
377 return;
379 } 378 }
379
380 list_for_each_entry_safe(caifd, tmp, &caifdevs->list, list) {
381 int i = 0;
382 list_del_rcu(&caifd->list);
383 cfcnfg_set_phy_state(cfg, &caifd->layer, false);
384
385 while (i < 10 &&
386 (caifd_refcnt_read(caifd) != 0 ||
387 cfcnfg_del_phy_layer(cfg, &caifd->layer) != 0)) {
388
389 pr_info("Wait for device inuse\n");
390 msleep(250);
391 i++;
392 }
393 synchronize_rcu();
394 dev_put(caifd->netdev);
395 free_percpu(caifd->pcpu_refcnt);
396 kfree(caifd);
397 }
398 cfcnfg_remove(cfg);
399
400 mutex_unlock(&caifdevs->lock);
380 rtnl_unlock(); 401 rtnl_unlock();
381} 402}
382 403
@@ -391,32 +412,23 @@ static struct pernet_operations caif_net_ops = {
391static int __init caif_device_init(void) 412static int __init caif_device_init(void)
392{ 413{
393 int result; 414 int result;
394 cfg = cfcnfg_create(); 415
395 if (!cfg) {
396 pr_warn("can't create cfcnfg\n");
397 goto err_cfcnfg_create_failed;
398 }
399 result = register_pernet_device(&caif_net_ops); 416 result = register_pernet_device(&caif_net_ops);
400 417
401 if (result) { 418 if (result)
402 kfree(cfg);
403 cfg = NULL;
404 return result; 419 return result;
405 } 420
406 dev_add_pack(&caif_packet_type);
407 register_netdevice_notifier(&caif_device_notifier); 421 register_netdevice_notifier(&caif_device_notifier);
422 dev_add_pack(&caif_packet_type);
408 423
409 return result; 424 return result;
410err_cfcnfg_create_failed:
411 return -ENODEV;
412} 425}
413 426
414static void __exit caif_device_exit(void) 427static void __exit caif_device_exit(void)
415{ 428{
416 dev_remove_pack(&caif_packet_type);
417 unregister_pernet_device(&caif_net_ops); 429 unregister_pernet_device(&caif_net_ops);
418 unregister_netdevice_notifier(&caif_device_notifier); 430 unregister_netdevice_notifier(&caif_device_notifier);
419 cfcnfg_remove(cfg); 431 dev_remove_pack(&caif_packet_type);
420} 432}
421 433
422module_init(caif_device_init); 434module_init(caif_device_init);
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index 37a4034dfc29..a98628086452 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -19,7 +19,7 @@
19#include <linux/uaccess.h> 19#include <linux/uaccess.h>
20#include <linux/debugfs.h> 20#include <linux/debugfs.h>
21#include <linux/caif/caif_socket.h> 21#include <linux/caif/caif_socket.h>
22#include <asm/atomic.h> 22#include <linux/atomic.h>
23#include <net/sock.h> 23#include <net/sock.h>
24#include <net/tcp_states.h> 24#include <net/tcp_states.h>
25#include <net/caif/caif_layer.h> 25#include <net/caif/caif_layer.h>
@@ -48,6 +48,7 @@ static struct dentry *debugfsdir;
48#ifdef CONFIG_DEBUG_FS 48#ifdef CONFIG_DEBUG_FS
49struct debug_fs_counter { 49struct debug_fs_counter {
50 atomic_t caif_nr_socks; 50 atomic_t caif_nr_socks;
51 atomic_t caif_sock_create;
51 atomic_t num_connect_req; 52 atomic_t num_connect_req;
52 atomic_t num_connect_resp; 53 atomic_t num_connect_resp;
53 atomic_t num_connect_fail_resp; 54 atomic_t num_connect_fail_resp;
@@ -59,11 +60,11 @@ struct debug_fs_counter {
59 atomic_t num_rx_flow_on; 60 atomic_t num_rx_flow_on;
60}; 61};
61static struct debug_fs_counter cnt; 62static struct debug_fs_counter cnt;
62#define dbfs_atomic_inc(v) atomic_inc(v) 63#define dbfs_atomic_inc(v) atomic_inc_return(v)
63#define dbfs_atomic_dec(v) atomic_dec(v) 64#define dbfs_atomic_dec(v) atomic_dec_return(v)
64#else 65#else
65#define dbfs_atomic_inc(v) 66#define dbfs_atomic_inc(v) 0
66#define dbfs_atomic_dec(v) 67#define dbfs_atomic_dec(v) 0
67#endif 68#endif
68 69
69struct caifsock { 70struct caifsock {
@@ -155,9 +156,10 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
155 156
156 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 157 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
157 (unsigned)sk->sk_rcvbuf && rx_flow_is_on(cf_sk)) { 158 (unsigned)sk->sk_rcvbuf && rx_flow_is_on(cf_sk)) {
158 pr_debug("sending flow OFF (queue len = %d %d)\n", 159 if (net_ratelimit())
159 atomic_read(&cf_sk->sk.sk_rmem_alloc), 160 pr_debug("sending flow OFF (queue len = %d %d)\n",
160 sk_rcvbuf_lowwater(cf_sk)); 161 atomic_read(&cf_sk->sk.sk_rmem_alloc),
162 sk_rcvbuf_lowwater(cf_sk));
161 set_rx_flow_off(cf_sk); 163 set_rx_flow_off(cf_sk);
162 dbfs_atomic_inc(&cnt.num_rx_flow_off); 164 dbfs_atomic_inc(&cnt.num_rx_flow_off);
163 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ); 165 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
@@ -168,7 +170,8 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
168 return err; 170 return err;
169 if (!sk_rmem_schedule(sk, skb->truesize) && rx_flow_is_on(cf_sk)) { 171 if (!sk_rmem_schedule(sk, skb->truesize) && rx_flow_is_on(cf_sk)) {
170 set_rx_flow_off(cf_sk); 172 set_rx_flow_off(cf_sk);
171 pr_debug("sending flow OFF due to rmem_schedule\n"); 173 if (net_ratelimit())
174 pr_debug("sending flow OFF due to rmem_schedule\n");
172 dbfs_atomic_inc(&cnt.num_rx_flow_off); 175 dbfs_atomic_inc(&cnt.num_rx_flow_off);
173 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ); 176 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
174 } 177 }
@@ -202,13 +205,25 @@ static int caif_sktrecv_cb(struct cflayer *layr, struct cfpkt *pkt)
202 skb = cfpkt_tonative(pkt); 205 skb = cfpkt_tonative(pkt);
203 206
204 if (unlikely(cf_sk->sk.sk_state != CAIF_CONNECTED)) { 207 if (unlikely(cf_sk->sk.sk_state != CAIF_CONNECTED)) {
205 cfpkt_destroy(pkt); 208 kfree_skb(skb);
206 return 0; 209 return 0;
207 } 210 }
208 caif_queue_rcv_skb(&cf_sk->sk, skb); 211 caif_queue_rcv_skb(&cf_sk->sk, skb);
209 return 0; 212 return 0;
210} 213}
211 214
215static void cfsk_hold(struct cflayer *layr)
216{
217 struct caifsock *cf_sk = container_of(layr, struct caifsock, layer);
218 sock_hold(&cf_sk->sk);
219}
220
221static void cfsk_put(struct cflayer *layr)
222{
223 struct caifsock *cf_sk = container_of(layr, struct caifsock, layer);
224 sock_put(&cf_sk->sk);
225}
226
212/* Packet Control Callback function called from CAIF */ 227/* Packet Control Callback function called from CAIF */
213static void caif_ctrl_cb(struct cflayer *layr, 228static void caif_ctrl_cb(struct cflayer *layr,
214 enum caif_ctrlcmd flow, 229 enum caif_ctrlcmd flow,
@@ -232,6 +247,8 @@ static void caif_ctrl_cb(struct cflayer *layr,
232 247
233 case CAIF_CTRLCMD_INIT_RSP: 248 case CAIF_CTRLCMD_INIT_RSP:
234 /* We're now connected */ 249 /* We're now connected */
250 caif_client_register_refcnt(&cf_sk->layer,
251 cfsk_hold, cfsk_put);
235 dbfs_atomic_inc(&cnt.num_connect_resp); 252 dbfs_atomic_inc(&cnt.num_connect_resp);
236 cf_sk->sk.sk_state = CAIF_CONNECTED; 253 cf_sk->sk.sk_state = CAIF_CONNECTED;
237 set_tx_flow_on(cf_sk); 254 set_tx_flow_on(cf_sk);
@@ -242,7 +259,6 @@ static void caif_ctrl_cb(struct cflayer *layr,
242 /* We're now disconnected */ 259 /* We're now disconnected */
243 cf_sk->sk.sk_state = CAIF_DISCONNECTED; 260 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
244 cf_sk->sk.sk_state_change(&cf_sk->sk); 261 cf_sk->sk.sk_state_change(&cf_sk->sk);
245 cfcnfg_release_adap_layer(&cf_sk->layer);
246 break; 262 break;
247 263
248 case CAIF_CTRLCMD_INIT_FAIL_RSP: 264 case CAIF_CTRLCMD_INIT_FAIL_RSP:
@@ -519,43 +535,14 @@ static int transmit_skb(struct sk_buff *skb, struct caifsock *cf_sk,
519 int noblock, long timeo) 535 int noblock, long timeo)
520{ 536{
521 struct cfpkt *pkt; 537 struct cfpkt *pkt;
522 int ret, loopcnt = 0;
523 538
524 pkt = cfpkt_fromnative(CAIF_DIR_OUT, skb); 539 pkt = cfpkt_fromnative(CAIF_DIR_OUT, skb);
525 memset(cfpkt_info(pkt), 0, sizeof(struct caif_payload_info)); 540 memset(skb->cb, 0, sizeof(struct caif_payload_info));
526 do {
527 541
528 ret = -ETIMEDOUT; 542 if (cf_sk->layer.dn == NULL)
543 return -EINVAL;
529 544
530 /* Slight paranoia, probably not needed. */ 545 return cf_sk->layer.dn->transmit(cf_sk->layer.dn, pkt);
531 if (unlikely(loopcnt++ > 1000)) {
532 pr_warn("transmit retries failed, error = %d\n", ret);
533 break;
534 }
535
536 if (cf_sk->layer.dn != NULL)
537 ret = cf_sk->layer.dn->transmit(cf_sk->layer.dn, pkt);
538 if (likely(ret >= 0))
539 break;
540 /* if transmit return -EAGAIN, then retry */
541 if (noblock && ret == -EAGAIN)
542 break;
543 timeo = caif_wait_for_flow_on(cf_sk, 0, timeo, &ret);
544 if (signal_pending(current)) {
545 ret = sock_intr_errno(timeo);
546 break;
547 }
548 if (ret)
549 break;
550 if (cf_sk->sk.sk_state != CAIF_CONNECTED ||
551 sock_flag(&cf_sk->sk, SOCK_DEAD) ||
552 (cf_sk->sk.sk_shutdown & RCV_SHUTDOWN)) {
553 ret = -EPIPE;
554 cf_sk->sk.sk_err = EPIPE;
555 break;
556 }
557 } while (ret == -EAGAIN);
558 return ret;
559} 546}
560 547
561/* Copied from af_unix:unix_dgram_sendmsg, and adapted to CAIF */ 548/* Copied from af_unix:unix_dgram_sendmsg, and adapted to CAIF */
@@ -620,7 +607,9 @@ static int caif_seqpkt_sendmsg(struct kiocb *kiocb, struct socket *sock,
620 goto err; 607 goto err;
621 ret = transmit_skb(skb, cf_sk, noblock, timeo); 608 ret = transmit_skb(skb, cf_sk, noblock, timeo);
622 if (ret < 0) 609 if (ret < 0)
623 goto err; 610 /* skb is already freed */
611 return ret;
612
624 return len; 613 return len;
625err: 614err:
626 kfree_skb(skb); 615 kfree_skb(skb);
@@ -826,7 +815,8 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
826 sk->sk_state == CAIF_DISCONNECTED); 815 sk->sk_state == CAIF_DISCONNECTED);
827 if (sk->sk_shutdown & SHUTDOWN_MASK) { 816 if (sk->sk_shutdown & SHUTDOWN_MASK) {
828 /* Allow re-connect after SHUTDOWN_IND */ 817 /* Allow re-connect after SHUTDOWN_IND */
829 caif_disconnect_client(&cf_sk->layer); 818 caif_disconnect_client(sock_net(sk), &cf_sk->layer);
819 caif_free_client(&cf_sk->layer);
830 break; 820 break;
831 } 821 }
832 /* No reconnect on a seqpacket socket */ 822 /* No reconnect on a seqpacket socket */
@@ -852,7 +842,7 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
852 sock->state = SS_CONNECTING; 842 sock->state = SS_CONNECTING;
853 sk->sk_state = CAIF_CONNECTING; 843 sk->sk_state = CAIF_CONNECTING;
854 844
855 /* Check priority value coming from socket */ 845 /* Check priority value comming from socket */
856 /* if priority value is out of range it will be ajusted */ 846 /* if priority value is out of range it will be ajusted */
857 if (cf_sk->sk.sk_priority > CAIF_PRIO_MAX) 847 if (cf_sk->sk.sk_priority > CAIF_PRIO_MAX)
858 cf_sk->conn_req.priority = CAIF_PRIO_MAX; 848 cf_sk->conn_req.priority = CAIF_PRIO_MAX;
@@ -866,8 +856,10 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
866 856
867 dbfs_atomic_inc(&cnt.num_connect_req); 857 dbfs_atomic_inc(&cnt.num_connect_req);
868 cf_sk->layer.receive = caif_sktrecv_cb; 858 cf_sk->layer.receive = caif_sktrecv_cb;
869 err = caif_connect_client(&cf_sk->conn_req, 859
860 err = caif_connect_client(sock_net(sk), &cf_sk->conn_req,
870 &cf_sk->layer, &ifindex, &headroom, &tailroom); 861 &cf_sk->layer, &ifindex, &headroom, &tailroom);
862
871 if (err < 0) { 863 if (err < 0) {
872 cf_sk->sk.sk_socket->state = SS_UNCONNECTED; 864 cf_sk->sk.sk_socket->state = SS_UNCONNECTED;
873 cf_sk->sk.sk_state = CAIF_DISCONNECTED; 865 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
@@ -935,7 +927,6 @@ static int caif_release(struct socket *sock)
935{ 927{
936 struct sock *sk = sock->sk; 928 struct sock *sk = sock->sk;
937 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); 929 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
938 int res = 0;
939 930
940 if (!sk) 931 if (!sk)
941 return 0; 932 return 0;
@@ -947,13 +938,14 @@ static int caif_release(struct socket *sock)
947 * caif_queue_rcv_skb checks SOCK_DEAD holding the queue lock, 938 * caif_queue_rcv_skb checks SOCK_DEAD holding the queue lock,
948 * this ensures no packets when sock is dead. 939 * this ensures no packets when sock is dead.
949 */ 940 */
950 spin_lock(&sk->sk_receive_queue.lock); 941 spin_lock_bh(&sk->sk_receive_queue.lock);
951 sock_set_flag(sk, SOCK_DEAD); 942 sock_set_flag(sk, SOCK_DEAD);
952 spin_unlock(&sk->sk_receive_queue.lock); 943 spin_unlock_bh(&sk->sk_receive_queue.lock);
953 sock->sk = NULL; 944 sock->sk = NULL;
954 945
955 dbfs_atomic_inc(&cnt.num_disconnect); 946 dbfs_atomic_inc(&cnt.num_disconnect);
956 947
948 WARN_ON(IS_ERR(cf_sk->debugfs_socket_dir));
957 if (cf_sk->debugfs_socket_dir != NULL) 949 if (cf_sk->debugfs_socket_dir != NULL)
958 debugfs_remove_recursive(cf_sk->debugfs_socket_dir); 950 debugfs_remove_recursive(cf_sk->debugfs_socket_dir);
959 951
@@ -961,19 +953,15 @@ static int caif_release(struct socket *sock)
961 sk->sk_state = CAIF_DISCONNECTED; 953 sk->sk_state = CAIF_DISCONNECTED;
962 sk->sk_shutdown = SHUTDOWN_MASK; 954 sk->sk_shutdown = SHUTDOWN_MASK;
963 955
964 if (cf_sk->sk.sk_socket->state == SS_CONNECTED || 956 caif_disconnect_client(sock_net(sk), &cf_sk->layer);
965 cf_sk->sk.sk_socket->state == SS_CONNECTING)
966 res = caif_disconnect_client(&cf_sk->layer);
967
968 cf_sk->sk.sk_socket->state = SS_DISCONNECTING; 957 cf_sk->sk.sk_socket->state = SS_DISCONNECTING;
969 wake_up_interruptible_poll(sk_sleep(sk), POLLERR|POLLHUP); 958 wake_up_interruptible_poll(sk_sleep(sk), POLLERR|POLLHUP);
970 959
971 sock_orphan(sk); 960 sock_orphan(sk);
972 cf_sk->layer.dn = NULL;
973 sk_stream_kill_queues(&cf_sk->sk); 961 sk_stream_kill_queues(&cf_sk->sk);
974 release_sock(sk); 962 release_sock(sk);
975 sock_put(sk); 963 sock_put(sk);
976 return res; 964 return 0;
977} 965}
978 966
979/* Copied from af_unix.c:unix_poll(), added CAIF tx_flow handling */ 967/* Copied from af_unix.c:unix_poll(), added CAIF tx_flow handling */
@@ -1060,16 +1048,18 @@ static void caif_sock_destructor(struct sock *sk)
1060 caif_assert(sk_unhashed(sk)); 1048 caif_assert(sk_unhashed(sk));
1061 caif_assert(!sk->sk_socket); 1049 caif_assert(!sk->sk_socket);
1062 if (!sock_flag(sk, SOCK_DEAD)) { 1050 if (!sock_flag(sk, SOCK_DEAD)) {
1063 pr_info("Attempt to release alive CAIF socket: %p\n", sk); 1051 pr_debug("Attempt to release alive CAIF socket: %p\n", sk);
1064 return; 1052 return;
1065 } 1053 }
1066 sk_stream_kill_queues(&cf_sk->sk); 1054 sk_stream_kill_queues(&cf_sk->sk);
1067 dbfs_atomic_dec(&cnt.caif_nr_socks); 1055 dbfs_atomic_dec(&cnt.caif_nr_socks);
1056 caif_free_client(&cf_sk->layer);
1068} 1057}
1069 1058
1070static int caif_create(struct net *net, struct socket *sock, int protocol, 1059static int caif_create(struct net *net, struct socket *sock, int protocol,
1071 int kern) 1060 int kern)
1072{ 1061{
1062 int num;
1073 struct sock *sk = NULL; 1063 struct sock *sk = NULL;
1074 struct caifsock *cf_sk = NULL; 1064 struct caifsock *cf_sk = NULL;
1075 static struct proto prot = {.name = "PF_CAIF", 1065 static struct proto prot = {.name = "PF_CAIF",
@@ -1127,19 +1117,21 @@ static int caif_create(struct net *net, struct socket *sock, int protocol,
1127 set_rx_flow_on(cf_sk); 1117 set_rx_flow_on(cf_sk);
1128 1118
1129 /* Set default options on configuration */ 1119 /* Set default options on configuration */
1130 cf_sk->sk.sk_priority= CAIF_PRIO_NORMAL; 1120 cf_sk->sk.sk_priority = CAIF_PRIO_NORMAL;
1131 cf_sk->conn_req.link_selector = CAIF_LINK_LOW_LATENCY; 1121 cf_sk->conn_req.link_selector = CAIF_LINK_LOW_LATENCY;
1132 cf_sk->conn_req.protocol = protocol; 1122 cf_sk->conn_req.protocol = protocol;
1133 /* Increase the number of sockets created. */ 1123 /* Increase the number of sockets created. */
1134 dbfs_atomic_inc(&cnt.caif_nr_socks); 1124 dbfs_atomic_inc(&cnt.caif_nr_socks);
1125 num = dbfs_atomic_inc(&cnt.caif_sock_create);
1135#ifdef CONFIG_DEBUG_FS 1126#ifdef CONFIG_DEBUG_FS
1136 if (!IS_ERR(debugfsdir)) { 1127 if (!IS_ERR(debugfsdir)) {
1128
1137 /* Fill in some information concerning the misc socket. */ 1129 /* Fill in some information concerning the misc socket. */
1138 snprintf(cf_sk->name, sizeof(cf_sk->name), "cfsk%d", 1130 snprintf(cf_sk->name, sizeof(cf_sk->name), "cfsk%d", num);
1139 atomic_read(&cnt.caif_nr_socks));
1140 1131
1141 cf_sk->debugfs_socket_dir = 1132 cf_sk->debugfs_socket_dir =
1142 debugfs_create_dir(cf_sk->name, debugfsdir); 1133 debugfs_create_dir(cf_sk->name, debugfsdir);
1134
1143 debugfs_create_u32("sk_state", S_IRUSR | S_IWUSR, 1135 debugfs_create_u32("sk_state", S_IRUSR | S_IWUSR,
1144 cf_sk->debugfs_socket_dir, 1136 cf_sk->debugfs_socket_dir,
1145 (u32 *) &cf_sk->sk.sk_state); 1137 (u32 *) &cf_sk->sk.sk_state);
@@ -1183,6 +1175,9 @@ static int __init caif_sktinit_module(void)
1183 debugfs_create_u32("num_sockets", S_IRUSR | S_IWUSR, 1175 debugfs_create_u32("num_sockets", S_IRUSR | S_IWUSR,
1184 debugfsdir, 1176 debugfsdir,
1185 (u32 *) &cnt.caif_nr_socks); 1177 (u32 *) &cnt.caif_nr_socks);
1178 debugfs_create_u32("num_create", S_IRUSR | S_IWUSR,
1179 debugfsdir,
1180 (u32 *) &cnt.caif_sock_create);
1186 debugfs_create_u32("num_connect_req", S_IRUSR | S_IWUSR, 1181 debugfs_create_u32("num_connect_req", S_IRUSR | S_IWUSR,
1187 debugfsdir, 1182 debugfsdir,
1188 (u32 *) &cnt.num_connect_req); 1183 (u32 *) &cnt.num_connect_req);
diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c
index f1f98d967d8a..52fe33bee029 100644
--- a/net/caif/cfcnfg.c
+++ b/net/caif/cfcnfg.c
@@ -10,6 +10,7 @@
10#include <linux/stddef.h> 10#include <linux/stddef.h>
11#include <linux/slab.h> 11#include <linux/slab.h>
12#include <linux/netdevice.h> 12#include <linux/netdevice.h>
13#include <linux/module.h>
13#include <net/caif/caif_layer.h> 14#include <net/caif/caif_layer.h>
14#include <net/caif/cfpkt.h> 15#include <net/caif/cfpkt.h>
15#include <net/caif/cfcnfg.h> 16#include <net/caif/cfcnfg.h>
@@ -18,11 +19,7 @@
18#include <net/caif/cffrml.h> 19#include <net/caif/cffrml.h>
19#include <net/caif/cfserl.h> 20#include <net/caif/cfserl.h>
20#include <net/caif/cfsrvl.h> 21#include <net/caif/cfsrvl.h>
21 22#include <net/caif/caif_dev.h>
22#include <linux/module.h>
23#include <asm/atomic.h>
24
25#define MAX_PHY_LAYERS 7
26 23
27#define container_obj(layr) container_of(layr, struct cfcnfg, layer) 24#define container_obj(layr) container_of(layr, struct cfcnfg, layer)
28 25
@@ -30,6 +27,9 @@
30 * to manage physical interfaces 27 * to manage physical interfaces
31 */ 28 */
32struct cfcnfg_phyinfo { 29struct cfcnfg_phyinfo {
30 struct list_head node;
31 bool up;
32
33 /* Pointer to the layer below the MUX (framing layer) */ 33 /* Pointer to the layer below the MUX (framing layer) */
34 struct cflayer *frm_layer; 34 struct cflayer *frm_layer;
35 /* Pointer to the lowest actual physical layer */ 35 /* Pointer to the lowest actual physical layer */
@@ -39,9 +39,6 @@ struct cfcnfg_phyinfo {
39 /* Preference of the physical in interface */ 39 /* Preference of the physical in interface */
40 enum cfcnfg_phy_preference pref; 40 enum cfcnfg_phy_preference pref;
41 41
42 /* Reference count, number of channels using the device */
43 int phy_ref_count;
44
45 /* Information about the physical device */ 42 /* Information about the physical device */
46 struct dev_info dev_info; 43 struct dev_info dev_info;
47 44
@@ -59,8 +56,8 @@ struct cfcnfg {
59 struct cflayer layer; 56 struct cflayer layer;
60 struct cflayer *ctrl; 57 struct cflayer *ctrl;
61 struct cflayer *mux; 58 struct cflayer *mux;
62 u8 last_phyid; 59 struct list_head phys;
63 struct cfcnfg_phyinfo phy_layers[MAX_PHY_LAYERS]; 60 struct mutex lock;
64}; 61};
65 62
66static void cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id, 63static void cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id,
@@ -76,6 +73,9 @@ struct cfcnfg *cfcnfg_create(void)
76{ 73{
77 struct cfcnfg *this; 74 struct cfcnfg *this;
78 struct cfctrl_rsp *resp; 75 struct cfctrl_rsp *resp;
76
77 might_sleep();
78
79 /* Initiate this layer */ 79 /* Initiate this layer */
80 this = kzalloc(sizeof(struct cfcnfg), GFP_ATOMIC); 80 this = kzalloc(sizeof(struct cfcnfg), GFP_ATOMIC);
81 if (!this) { 81 if (!this) {
@@ -99,27 +99,33 @@ struct cfcnfg *cfcnfg_create(void)
99 resp->radioset_rsp = cfctrl_resp_func; 99 resp->radioset_rsp = cfctrl_resp_func;
100 resp->linksetup_rsp = cfcnfg_linkup_rsp; 100 resp->linksetup_rsp = cfcnfg_linkup_rsp;
101 resp->reject_rsp = cfcnfg_reject_rsp; 101 resp->reject_rsp = cfcnfg_reject_rsp;
102 102 INIT_LIST_HEAD(&this->phys);
103 this->last_phyid = 1;
104 103
105 cfmuxl_set_uplayer(this->mux, this->ctrl, 0); 104 cfmuxl_set_uplayer(this->mux, this->ctrl, 0);
106 layer_set_dn(this->ctrl, this->mux); 105 layer_set_dn(this->ctrl, this->mux);
107 layer_set_up(this->ctrl, this); 106 layer_set_up(this->ctrl, this);
107 mutex_init(&this->lock);
108
108 return this; 109 return this;
109out_of_mem: 110out_of_mem:
110 pr_warn("Out of memory\n"); 111 pr_warn("Out of memory\n");
112
113 synchronize_rcu();
114
111 kfree(this->mux); 115 kfree(this->mux);
112 kfree(this->ctrl); 116 kfree(this->ctrl);
113 kfree(this); 117 kfree(this);
114 return NULL; 118 return NULL;
115} 119}
116EXPORT_SYMBOL(cfcnfg_create);
117 120
118void cfcnfg_remove(struct cfcnfg *cfg) 121void cfcnfg_remove(struct cfcnfg *cfg)
119{ 122{
123 might_sleep();
120 if (cfg) { 124 if (cfg) {
125 synchronize_rcu();
126
121 kfree(cfg->mux); 127 kfree(cfg->mux);
122 kfree(cfg->ctrl); 128 cfctrl_remove(cfg->ctrl);
123 kfree(cfg); 129 kfree(cfg);
124 } 130 }
125} 131}
@@ -128,132 +134,83 @@ static void cfctrl_resp_func(void)
128{ 134{
129} 135}
130 136
137static struct cfcnfg_phyinfo *cfcnfg_get_phyinfo_rcu(struct cfcnfg *cnfg,
138 u8 phyid)
139{
140 struct cfcnfg_phyinfo *phy;
141
142 list_for_each_entry_rcu(phy, &cnfg->phys, node)
143 if (phy->id == phyid)
144 return phy;
145 return NULL;
146}
147
131static void cfctrl_enum_resp(void) 148static void cfctrl_enum_resp(void)
132{ 149{
133} 150}
134 151
135struct dev_info *cfcnfg_get_phyid(struct cfcnfg *cnfg, 152static struct dev_info *cfcnfg_get_phyid(struct cfcnfg *cnfg,
136 enum cfcnfg_phy_preference phy_pref) 153 enum cfcnfg_phy_preference phy_pref)
137{ 154{
138 u16 i;
139
140 /* Try to match with specified preference */ 155 /* Try to match with specified preference */
141 for (i = 1; i < MAX_PHY_LAYERS; i++) { 156 struct cfcnfg_phyinfo *phy;
142 if (cnfg->phy_layers[i].id == i && 157
143 cnfg->phy_layers[i].pref == phy_pref && 158 list_for_each_entry_rcu(phy, &cnfg->phys, node) {
144 cnfg->phy_layers[i].frm_layer != NULL) { 159 if (phy->up && phy->pref == phy_pref &&
145 caif_assert(cnfg->phy_layers != NULL); 160 phy->frm_layer != NULL)
146 caif_assert(cnfg->phy_layers[i].id == i); 161
147 return &cnfg->phy_layers[i].dev_info; 162 return &phy->dev_info;
148 }
149 } 163 }
164
150 /* Otherwise just return something */ 165 /* Otherwise just return something */
151 for (i = 1; i < MAX_PHY_LAYERS; i++) { 166 list_for_each_entry_rcu(phy, &cnfg->phys, node)
152 if (cnfg->phy_layers[i].id == i) { 167 if (phy->up)
153 caif_assert(cnfg->phy_layers != NULL); 168 return &phy->dev_info;
154 caif_assert(cnfg->phy_layers[i].id == i);
155 return &cnfg->phy_layers[i].dev_info;
156 }
157 }
158 169
159 return NULL; 170 return NULL;
160} 171}
161 172
162static struct cfcnfg_phyinfo *cfcnfg_get_phyinfo(struct cfcnfg *cnfg, 173static int cfcnfg_get_id_from_ifi(struct cfcnfg *cnfg, int ifi)
163 u8 phyid)
164{ 174{
165 int i; 175 struct cfcnfg_phyinfo *phy;
166 /* Try to match with specified preference */
167 for (i = 0; i < MAX_PHY_LAYERS; i++)
168 if (cnfg->phy_layers[i].frm_layer != NULL &&
169 cnfg->phy_layers[i].id == phyid)
170 return &cnfg->phy_layers[i];
171 return NULL;
172}
173 176
174 177 list_for_each_entry_rcu(phy, &cnfg->phys, node)
175int cfcnfg_get_id_from_ifi(struct cfcnfg *cnfg, int ifi) 178 if (phy->ifindex == ifi && phy->up)
176{ 179 return phy->id;
177 int i;
178 for (i = 0; i < MAX_PHY_LAYERS; i++)
179 if (cnfg->phy_layers[i].frm_layer != NULL &&
180 cnfg->phy_layers[i].ifindex == ifi)
181 return i;
182 return -ENODEV; 180 return -ENODEV;
183} 181}
184 182
185int cfcnfg_disconn_adapt_layer(struct cfcnfg *cnfg, struct cflayer *adap_layer) 183int caif_disconnect_client(struct net *net, struct cflayer *adap_layer)
186{ 184{
187 u8 channel_id = 0; 185 u8 channel_id;
188 int ret = 0; 186 struct cfcnfg *cfg = get_cfcnfg(net);
189 struct cflayer *servl = NULL;
190 struct cfcnfg_phyinfo *phyinfo = NULL;
191 u8 phyid = 0;
192 187
193 caif_assert(adap_layer != NULL); 188 caif_assert(adap_layer != NULL);
189 cfctrl_cancel_req(cfg->ctrl, adap_layer);
194 channel_id = adap_layer->id; 190 channel_id = adap_layer->id;
195 if (adap_layer->dn == NULL || channel_id == 0) { 191 if (channel_id != 0) {
196 pr_err("adap_layer->dn == NULL or adap_layer->id is 0\n"); 192 struct cflayer *servl;
197 ret = -ENOTCONN; 193 servl = cfmuxl_remove_uplayer(cfg->mux, channel_id);
198 goto end; 194 if (servl != NULL)
199 } 195 layer_set_up(servl, NULL);
200 servl = cfmuxl_remove_uplayer(cnfg->mux, channel_id); 196 } else
201 if (servl == NULL) { 197 pr_debug("nothing to disconnect\n");
202 pr_err("PROTOCOL ERROR - Error removing service_layer Channel_Id(%d)", 198 cfctrl_linkdown_req(cfg->ctrl, channel_id, adap_layer);
203 channel_id); 199
204 ret = -EINVAL; 200 /* Do RCU sync before initiating cleanup */
205 goto end; 201 synchronize_rcu();
206 }
207 layer_set_up(servl, NULL);
208 ret = cfctrl_linkdown_req(cnfg->ctrl, channel_id, adap_layer);
209 if (ret)
210 goto end;
211 caif_assert(channel_id == servl->id);
212 if (adap_layer->dn != NULL) {
213 phyid = cfsrvl_getphyid(adap_layer->dn);
214
215 phyinfo = cfcnfg_get_phyinfo(cnfg, phyid);
216 if (phyinfo == NULL) {
217 pr_warn("No interface to send disconnect to\n");
218 ret = -ENODEV;
219 goto end;
220 }
221 if (phyinfo->id != phyid ||
222 phyinfo->phy_layer->id != phyid ||
223 phyinfo->frm_layer->id != phyid) {
224 pr_err("Inconsistency in phy registration\n");
225 ret = -EINVAL;
226 goto end;
227 }
228 }
229 if (phyinfo != NULL && --phyinfo->phy_ref_count == 0 &&
230 phyinfo->phy_layer != NULL &&
231 phyinfo->phy_layer->modemcmd != NULL) {
232 phyinfo->phy_layer->modemcmd(phyinfo->phy_layer,
233 _CAIF_MODEMCMD_PHYIF_USELESS);
234 }
235end:
236 cfsrvl_put(servl);
237 cfctrl_cancel_req(cnfg->ctrl, adap_layer);
238 if (adap_layer->ctrlcmd != NULL) 202 if (adap_layer->ctrlcmd != NULL)
239 adap_layer->ctrlcmd(adap_layer, CAIF_CTRLCMD_DEINIT_RSP, 0); 203 adap_layer->ctrlcmd(adap_layer, CAIF_CTRLCMD_DEINIT_RSP, 0);
240 return ret; 204 return 0;
241
242}
243EXPORT_SYMBOL(cfcnfg_disconn_adapt_layer);
244 205
245void cfcnfg_release_adap_layer(struct cflayer *adap_layer)
246{
247 if (adap_layer->dn)
248 cfsrvl_put(adap_layer->dn);
249} 206}
250EXPORT_SYMBOL(cfcnfg_release_adap_layer); 207EXPORT_SYMBOL(caif_disconnect_client);
251 208
252static void cfcnfg_linkdestroy_rsp(struct cflayer *layer, u8 channel_id) 209static void cfcnfg_linkdestroy_rsp(struct cflayer *layer, u8 channel_id)
253{ 210{
254} 211}
255 212
256int protohead[CFCTRL_SRV_MASK] = { 213static const int protohead[CFCTRL_SRV_MASK] = {
257 [CFCTRL_SRV_VEI] = 4, 214 [CFCTRL_SRV_VEI] = 4,
258 [CFCTRL_SRV_DATAGRAM] = 7, 215 [CFCTRL_SRV_DATAGRAM] = 7,
259 [CFCTRL_SRV_UTIL] = 4, 216 [CFCTRL_SRV_UTIL] = 4,
@@ -261,49 +218,157 @@ int protohead[CFCTRL_SRV_MASK] = {
261 [CFCTRL_SRV_DBG] = 3, 218 [CFCTRL_SRV_DBG] = 3,
262}; 219};
263 220
264int cfcnfg_add_adaptation_layer(struct cfcnfg *cnfg, 221
265 struct cfctrl_link_param *param, 222static int caif_connect_req_to_link_param(struct cfcnfg *cnfg,
266 struct cflayer *adap_layer, 223 struct caif_connect_request *s,
267 int *ifindex, 224 struct cfctrl_link_param *l)
225{
226 struct dev_info *dev_info;
227 enum cfcnfg_phy_preference pref;
228 int res;
229
230 memset(l, 0, sizeof(*l));
231 /* In caif protocol low value is high priority */
232 l->priority = CAIF_PRIO_MAX - s->priority + 1;
233
234 if (s->ifindex != 0) {
235 res = cfcnfg_get_id_from_ifi(cnfg, s->ifindex);
236 if (res < 0)
237 return res;
238 l->phyid = res;
239 } else {
240 switch (s->link_selector) {
241 case CAIF_LINK_HIGH_BANDW:
242 pref = CFPHYPREF_HIGH_BW;
243 break;
244 case CAIF_LINK_LOW_LATENCY:
245 pref = CFPHYPREF_LOW_LAT;
246 break;
247 default:
248 return -EINVAL;
249 }
250 dev_info = cfcnfg_get_phyid(cnfg, pref);
251 if (dev_info == NULL)
252 return -ENODEV;
253 l->phyid = dev_info->id;
254 }
255 switch (s->protocol) {
256 case CAIFPROTO_AT:
257 l->linktype = CFCTRL_SRV_VEI;
258 l->endpoint = (s->sockaddr.u.at.type >> 2) & 0x3;
259 l->chtype = s->sockaddr.u.at.type & 0x3;
260 break;
261 case CAIFPROTO_DATAGRAM:
262 l->linktype = CFCTRL_SRV_DATAGRAM;
263 l->chtype = 0x00;
264 l->u.datagram.connid = s->sockaddr.u.dgm.connection_id;
265 break;
266 case CAIFPROTO_DATAGRAM_LOOP:
267 l->linktype = CFCTRL_SRV_DATAGRAM;
268 l->chtype = 0x03;
269 l->endpoint = 0x00;
270 l->u.datagram.connid = s->sockaddr.u.dgm.connection_id;
271 break;
272 case CAIFPROTO_RFM:
273 l->linktype = CFCTRL_SRV_RFM;
274 l->u.datagram.connid = s->sockaddr.u.rfm.connection_id;
275 strncpy(l->u.rfm.volume, s->sockaddr.u.rfm.volume,
276 sizeof(l->u.rfm.volume)-1);
277 l->u.rfm.volume[sizeof(l->u.rfm.volume)-1] = 0;
278 break;
279 case CAIFPROTO_UTIL:
280 l->linktype = CFCTRL_SRV_UTIL;
281 l->endpoint = 0x00;
282 l->chtype = 0x00;
283 strncpy(l->u.utility.name, s->sockaddr.u.util.service,
284 sizeof(l->u.utility.name)-1);
285 l->u.utility.name[sizeof(l->u.utility.name)-1] = 0;
286 caif_assert(sizeof(l->u.utility.name) > 10);
287 l->u.utility.paramlen = s->param.size;
288 if (l->u.utility.paramlen > sizeof(l->u.utility.params))
289 l->u.utility.paramlen = sizeof(l->u.utility.params);
290
291 memcpy(l->u.utility.params, s->param.data,
292 l->u.utility.paramlen);
293
294 break;
295 case CAIFPROTO_DEBUG:
296 l->linktype = CFCTRL_SRV_DBG;
297 l->endpoint = s->sockaddr.u.dbg.service;
298 l->chtype = s->sockaddr.u.dbg.type;
299 break;
300 default:
301 return -EINVAL;
302 }
303 return 0;
304}
305
306int caif_connect_client(struct net *net, struct caif_connect_request *conn_req,
307 struct cflayer *adap_layer, int *ifindex,
268 int *proto_head, 308 int *proto_head,
269 int *proto_tail) 309 int *proto_tail)
270{ 310{
271 struct cflayer *frml; 311 struct cflayer *frml;
312 struct cfcnfg_phyinfo *phy;
313 int err;
314 struct cfctrl_link_param param;
315 struct cfcnfg *cfg = get_cfcnfg(net);
316 caif_assert(cfg != NULL);
317
318 rcu_read_lock();
319 err = caif_connect_req_to_link_param(cfg, conn_req, &param);
320 if (err)
321 goto unlock;
322
323 phy = cfcnfg_get_phyinfo_rcu(cfg, param.phyid);
324 if (!phy) {
325 err = -ENODEV;
326 goto unlock;
327 }
328 err = -EINVAL;
329
272 if (adap_layer == NULL) { 330 if (adap_layer == NULL) {
273 pr_err("adap_layer is zero\n"); 331 pr_err("adap_layer is zero\n");
274 return -EINVAL; 332 goto unlock;
275 } 333 }
276 if (adap_layer->receive == NULL) { 334 if (adap_layer->receive == NULL) {
277 pr_err("adap_layer->receive is NULL\n"); 335 pr_err("adap_layer->receive is NULL\n");
278 return -EINVAL; 336 goto unlock;
279 } 337 }
280 if (adap_layer->ctrlcmd == NULL) { 338 if (adap_layer->ctrlcmd == NULL) {
281 pr_err("adap_layer->ctrlcmd == NULL\n"); 339 pr_err("adap_layer->ctrlcmd == NULL\n");
282 return -EINVAL; 340 goto unlock;
283 } 341 }
284 frml = cnfg->phy_layers[param->phyid].frm_layer; 342
343 err = -ENODEV;
344 frml = phy->frm_layer;
285 if (frml == NULL) { 345 if (frml == NULL) {
286 pr_err("Specified PHY type does not exist!\n"); 346 pr_err("Specified PHY type does not exist!\n");
287 return -ENODEV; 347 goto unlock;
288 } 348 }
289 caif_assert(param->phyid == cnfg->phy_layers[param->phyid].id); 349 caif_assert(param.phyid == phy->id);
290 caif_assert(cnfg->phy_layers[param->phyid].frm_layer->id == 350 caif_assert(phy->frm_layer->id ==
291 param->phyid); 351 param.phyid);
292 caif_assert(cnfg->phy_layers[param->phyid].phy_layer->id == 352 caif_assert(phy->phy_layer->id ==
293 param->phyid); 353 param.phyid);
294 354
295 *ifindex = cnfg->phy_layers[param->phyid].ifindex; 355 *ifindex = phy->ifindex;
356 *proto_tail = 2;
296 *proto_head = 357 *proto_head =
297 protohead[param->linktype]+
298 (cnfg->phy_layers[param->phyid].use_stx ? 1 : 0);
299 358
300 *proto_tail = 2; 359 protohead[param.linktype] + (phy->use_stx ? 1 : 0);
360
361 rcu_read_unlock();
301 362
302 /* FIXME: ENUMERATE INITIALLY WHEN ACTIVATING PHYSICAL INTERFACE */ 363 /* FIXME: ENUMERATE INITIALLY WHEN ACTIVATING PHYSICAL INTERFACE */
303 cfctrl_enum_req(cnfg->ctrl, param->phyid); 364 cfctrl_enum_req(cfg->ctrl, param.phyid);
304 return cfctrl_linkup_request(cnfg->ctrl, param, adap_layer); 365 return cfctrl_linkup_request(cfg->ctrl, &param, adap_layer);
366
367unlock:
368 rcu_read_unlock();
369 return err;
305} 370}
306EXPORT_SYMBOL(cfcnfg_add_adaptation_layer); 371EXPORT_SYMBOL(caif_connect_client);
307 372
308static void cfcnfg_reject_rsp(struct cflayer *layer, u8 channel_id, 373static void cfcnfg_reject_rsp(struct cflayer *layer, u8 channel_id,
309 struct cflayer *adapt_layer) 374 struct cflayer *adapt_layer)
@@ -315,32 +380,45 @@ static void cfcnfg_reject_rsp(struct cflayer *layer, u8 channel_id,
315 380
316static void 381static void
317cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id, enum cfctrl_srv serv, 382cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id, enum cfctrl_srv serv,
318 u8 phyid, struct cflayer *adapt_layer) 383 u8 phyid, struct cflayer *adapt_layer)
319{ 384{
320 struct cfcnfg *cnfg = container_obj(layer); 385 struct cfcnfg *cnfg = container_obj(layer);
321 struct cflayer *servicel = NULL; 386 struct cflayer *servicel = NULL;
322 struct cfcnfg_phyinfo *phyinfo; 387 struct cfcnfg_phyinfo *phyinfo;
323 struct net_device *netdev; 388 struct net_device *netdev;
324 389
390 if (channel_id == 0) {
391 pr_warn("received channel_id zero\n");
392 if (adapt_layer != NULL && adapt_layer->ctrlcmd != NULL)
393 adapt_layer->ctrlcmd(adapt_layer,
394 CAIF_CTRLCMD_INIT_FAIL_RSP, 0);
395 return;
396 }
397
398 rcu_read_lock();
399
325 if (adapt_layer == NULL) { 400 if (adapt_layer == NULL) {
326 pr_debug("link setup response but no client exist, send linkdown back\n"); 401 pr_debug("link setup response but no client exist,"
402 "send linkdown back\n");
327 cfctrl_linkdown_req(cnfg->ctrl, channel_id, NULL); 403 cfctrl_linkdown_req(cnfg->ctrl, channel_id, NULL);
328 return; 404 goto unlock;
329 } 405 }
330 406
331 caif_assert(cnfg != NULL); 407 caif_assert(cnfg != NULL);
332 caif_assert(phyid != 0); 408 caif_assert(phyid != 0);
333 phyinfo = &cnfg->phy_layers[phyid]; 409
410 phyinfo = cfcnfg_get_phyinfo_rcu(cnfg, phyid);
411 if (phyinfo == NULL) {
412 pr_err("ERROR: Link Layer Device dissapeared"
413 "while connecting\n");
414 goto unlock;
415 }
416
417 caif_assert(phyinfo != NULL);
334 caif_assert(phyinfo->id == phyid); 418 caif_assert(phyinfo->id == phyid);
335 caif_assert(phyinfo->phy_layer != NULL); 419 caif_assert(phyinfo->phy_layer != NULL);
336 caif_assert(phyinfo->phy_layer->id == phyid); 420 caif_assert(phyinfo->phy_layer->id == phyid);
337 421
338 phyinfo->phy_ref_count++;
339 if (phyinfo->phy_ref_count == 1 &&
340 phyinfo->phy_layer->modemcmd != NULL) {
341 phyinfo->phy_layer->modemcmd(phyinfo->phy_layer,
342 _CAIF_MODEMCMD_PHYIF_USEFULL);
343 }
344 adapt_layer->id = channel_id; 422 adapt_layer->id = channel_id;
345 423
346 switch (serv) { 424 switch (serv) {
@@ -348,7 +426,8 @@ cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id, enum cfctrl_srv serv,
348 servicel = cfvei_create(channel_id, &phyinfo->dev_info); 426 servicel = cfvei_create(channel_id, &phyinfo->dev_info);
349 break; 427 break;
350 case CFCTRL_SRV_DATAGRAM: 428 case CFCTRL_SRV_DATAGRAM:
351 servicel = cfdgml_create(channel_id, &phyinfo->dev_info); 429 servicel = cfdgml_create(channel_id,
430 &phyinfo->dev_info);
352 break; 431 break;
353 case CFCTRL_SRV_RFM: 432 case CFCTRL_SRV_RFM:
354 netdev = phyinfo->dev_info.dev; 433 netdev = phyinfo->dev_info.dev;
@@ -365,94 +444,92 @@ cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id, enum cfctrl_srv serv,
365 servicel = cfdbgl_create(channel_id, &phyinfo->dev_info); 444 servicel = cfdbgl_create(channel_id, &phyinfo->dev_info);
366 break; 445 break;
367 default: 446 default:
368 pr_err("Protocol error. Link setup response - unknown channel type\n"); 447 pr_err("Protocol error. Link setup response "
369 return; 448 "- unknown channel type\n");
449 goto unlock;
370 } 450 }
371 if (!servicel) { 451 if (!servicel) {
372 pr_warn("Out of memory\n"); 452 pr_warn("Out of memory\n");
373 return; 453 goto unlock;
374 } 454 }
375 layer_set_dn(servicel, cnfg->mux); 455 layer_set_dn(servicel, cnfg->mux);
376 cfmuxl_set_uplayer(cnfg->mux, servicel, channel_id); 456 cfmuxl_set_uplayer(cnfg->mux, servicel, channel_id);
377 layer_set_up(servicel, adapt_layer); 457 layer_set_up(servicel, adapt_layer);
378 layer_set_dn(adapt_layer, servicel); 458 layer_set_dn(adapt_layer, servicel);
379 cfsrvl_get(servicel); 459
460 rcu_read_unlock();
461
380 servicel->ctrlcmd(servicel, CAIF_CTRLCMD_INIT_RSP, 0); 462 servicel->ctrlcmd(servicel, CAIF_CTRLCMD_INIT_RSP, 0);
463 return;
464unlock:
465 rcu_read_unlock();
381} 466}
382 467
383void 468void
384cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type, 469cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type,
385 struct net_device *dev, struct cflayer *phy_layer, 470 struct net_device *dev, struct cflayer *phy_layer,
386 u16 *phyid, enum cfcnfg_phy_preference pref, 471 enum cfcnfg_phy_preference pref,
387 bool fcs, bool stx) 472 bool fcs, bool stx)
388{ 473{
389 struct cflayer *frml; 474 struct cflayer *frml;
390 struct cflayer *phy_driver = NULL; 475 struct cflayer *phy_driver = NULL;
476 struct cfcnfg_phyinfo *phyinfo;
391 int i; 477 int i;
478 u8 phyid;
392 479
480 mutex_lock(&cnfg->lock);
393 481
394 if (cnfg->phy_layers[cnfg->last_phyid].frm_layer == NULL) { 482 /* CAIF protocol allow maximum 6 link-layers */
395 *phyid = cnfg->last_phyid; 483 for (i = 0; i < 7; i++) {
396 484 phyid = (dev->ifindex + i) & 0x7;
397 /* range: * 1..(MAX_PHY_LAYERS-1) */ 485 if (phyid == 0)
398 cnfg->last_phyid = 486 continue;
399 (cnfg->last_phyid % (MAX_PHY_LAYERS - 1)) + 1; 487 if (cfcnfg_get_phyinfo_rcu(cnfg, phyid) == NULL)
400 } else { 488 goto got_phyid;
401 *phyid = 0;
402 for (i = 1; i < MAX_PHY_LAYERS; i++) {
403 if (cnfg->phy_layers[i].frm_layer == NULL) {
404 *phyid = i;
405 break;
406 }
407 }
408 }
409 if (*phyid == 0) {
410 pr_err("No Available PHY ID\n");
411 return;
412 } 489 }
490 pr_warn("Too many CAIF Link Layers (max 6)\n");
491 goto out;
492
493got_phyid:
494 phyinfo = kzalloc(sizeof(struct cfcnfg_phyinfo), GFP_ATOMIC);
413 495
414 switch (phy_type) { 496 switch (phy_type) {
415 case CFPHYTYPE_FRAG: 497 case CFPHYTYPE_FRAG:
416 phy_driver = 498 phy_driver =
417 cfserl_create(CFPHYTYPE_FRAG, *phyid, stx); 499 cfserl_create(CFPHYTYPE_FRAG, phyid, stx);
418 if (!phy_driver) { 500 if (!phy_driver) {
419 pr_warn("Out of memory\n"); 501 pr_warn("Out of memory\n");
420 return; 502 goto out;
421 } 503 }
422
423 break; 504 break;
424 case CFPHYTYPE_CAIF: 505 case CFPHYTYPE_CAIF:
425 phy_driver = NULL; 506 phy_driver = NULL;
426 break; 507 break;
427 default: 508 default:
428 pr_err("%d\n", phy_type); 509 goto out;
429 return;
430 break;
431 } 510 }
511 phy_layer->id = phyid;
512 phyinfo->pref = pref;
513 phyinfo->id = phyid;
514 phyinfo->dev_info.id = phyid;
515 phyinfo->dev_info.dev = dev;
516 phyinfo->phy_layer = phy_layer;
517 phyinfo->ifindex = dev->ifindex;
518 phyinfo->use_stx = stx;
519 phyinfo->use_fcs = fcs;
520
521 frml = cffrml_create(phyid, fcs);
432 522
433 phy_layer->id = *phyid;
434 cnfg->phy_layers[*phyid].pref = pref;
435 cnfg->phy_layers[*phyid].id = *phyid;
436 cnfg->phy_layers[*phyid].dev_info.id = *phyid;
437 cnfg->phy_layers[*phyid].dev_info.dev = dev;
438 cnfg->phy_layers[*phyid].phy_layer = phy_layer;
439 cnfg->phy_layers[*phyid].phy_ref_count = 0;
440 cnfg->phy_layers[*phyid].ifindex = dev->ifindex;
441 cnfg->phy_layers[*phyid].use_stx = stx;
442 cnfg->phy_layers[*phyid].use_fcs = fcs;
443
444 phy_layer->type = phy_type;
445 frml = cffrml_create(*phyid, fcs);
446 if (!frml) { 523 if (!frml) {
447 pr_warn("Out of memory\n"); 524 pr_warn("Out of memory\n");
448 return; 525 kfree(phyinfo);
526 goto out;
449 } 527 }
450 cnfg->phy_layers[*phyid].frm_layer = frml; 528 phyinfo->frm_layer = frml;
451 cfmuxl_set_dnlayer(cnfg->mux, frml, *phyid);
452 layer_set_up(frml, cnfg->mux); 529 layer_set_up(frml, cnfg->mux);
453 530
454 if (phy_driver != NULL) { 531 if (phy_driver != NULL) {
455 phy_driver->id = *phyid; 532 phy_driver->id = phyid;
456 layer_set_dn(frml, phy_driver); 533 layer_set_dn(frml, phy_driver);
457 layer_set_up(phy_driver, frml); 534 layer_set_up(phy_driver, frml);
458 layer_set_dn(phy_driver, phy_layer); 535 layer_set_dn(phy_driver, phy_layer);
@@ -461,33 +538,95 @@ cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type,
461 layer_set_dn(frml, phy_layer); 538 layer_set_dn(frml, phy_layer);
462 layer_set_up(phy_layer, frml); 539 layer_set_up(phy_layer, frml);
463 } 540 }
541
542 list_add_rcu(&phyinfo->node, &cnfg->phys);
543out:
544 mutex_unlock(&cnfg->lock);
464} 545}
465EXPORT_SYMBOL(cfcnfg_add_phy_layer); 546EXPORT_SYMBOL(cfcnfg_add_phy_layer);
466 547
548int cfcnfg_set_phy_state(struct cfcnfg *cnfg, struct cflayer *phy_layer,
549 bool up)
550{
551 struct cfcnfg_phyinfo *phyinfo;
552
553 rcu_read_lock();
554 phyinfo = cfcnfg_get_phyinfo_rcu(cnfg, phy_layer->id);
555 if (phyinfo == NULL) {
556 rcu_read_unlock();
557 return -ENODEV;
558 }
559
560 if (phyinfo->up == up) {
561 rcu_read_unlock();
562 return 0;
563 }
564 phyinfo->up = up;
565
566 if (up) {
567 cffrml_hold(phyinfo->frm_layer);
568 cfmuxl_set_dnlayer(cnfg->mux, phyinfo->frm_layer,
569 phy_layer->id);
570 } else {
571 cfmuxl_remove_dnlayer(cnfg->mux, phy_layer->id);
572 cffrml_put(phyinfo->frm_layer);
573 }
574
575 rcu_read_unlock();
576 return 0;
577}
578EXPORT_SYMBOL(cfcnfg_set_phy_state);
579
467int cfcnfg_del_phy_layer(struct cfcnfg *cnfg, struct cflayer *phy_layer) 580int cfcnfg_del_phy_layer(struct cfcnfg *cnfg, struct cflayer *phy_layer)
468{ 581{
469 struct cflayer *frml, *frml_dn; 582 struct cflayer *frml, *frml_dn;
470 u16 phyid; 583 u16 phyid;
584 struct cfcnfg_phyinfo *phyinfo;
585
586 might_sleep();
587
588 mutex_lock(&cnfg->lock);
589
471 phyid = phy_layer->id; 590 phyid = phy_layer->id;
472 caif_assert(phyid == cnfg->phy_layers[phyid].id); 591 phyinfo = cfcnfg_get_phyinfo_rcu(cnfg, phyid);
473 caif_assert(phy_layer == cnfg->phy_layers[phyid].phy_layer); 592
593 if (phyinfo == NULL) {
594 mutex_unlock(&cnfg->lock);
595 return 0;
596 }
597 caif_assert(phyid == phyinfo->id);
598 caif_assert(phy_layer == phyinfo->phy_layer);
474 caif_assert(phy_layer->id == phyid); 599 caif_assert(phy_layer->id == phyid);
475 caif_assert(cnfg->phy_layers[phyid].frm_layer->id == phyid); 600 caif_assert(phyinfo->frm_layer->id == phyid);
601
602 list_del_rcu(&phyinfo->node);
603 synchronize_rcu();
476 604
477 memset(&cnfg->phy_layers[phy_layer->id], 0, 605 /* Fail if reference count is not zero */
478 sizeof(struct cfcnfg_phyinfo)); 606 if (cffrml_refcnt_read(phyinfo->frm_layer) != 0) {
479 frml = cfmuxl_remove_dnlayer(cnfg->mux, phy_layer->id); 607 pr_info("Wait for device inuse\n");
608 list_add_rcu(&phyinfo->node, &cnfg->phys);
609 mutex_unlock(&cnfg->lock);
610 return -EAGAIN;
611 }
612
613 frml = phyinfo->frm_layer;
480 frml_dn = frml->dn; 614 frml_dn = frml->dn;
481 cffrml_set_uplayer(frml, NULL); 615 cffrml_set_uplayer(frml, NULL);
482 cffrml_set_dnlayer(frml, NULL); 616 cffrml_set_dnlayer(frml, NULL);
483 kfree(frml);
484
485 if (phy_layer != frml_dn) { 617 if (phy_layer != frml_dn) {
486 layer_set_up(frml_dn, NULL); 618 layer_set_up(frml_dn, NULL);
487 layer_set_dn(frml_dn, NULL); 619 layer_set_dn(frml_dn, NULL);
488 kfree(frml_dn);
489 } 620 }
490 layer_set_up(phy_layer, NULL); 621 layer_set_up(phy_layer, NULL);
622
623 if (phyinfo->phy_layer != frml_dn)
624 kfree(frml_dn);
625
626 cffrml_free(frml);
627 kfree(phyinfo);
628 mutex_unlock(&cnfg->lock);
629
491 return 0; 630 return 0;
492} 631}
493EXPORT_SYMBOL(cfcnfg_del_phy_layer); 632EXPORT_SYMBOL(cfcnfg_del_phy_layer);
diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
index 3cd8f978e309..e22671bed669 100644
--- a/net/caif/cfctrl.c
+++ b/net/caif/cfctrl.c
@@ -17,7 +17,6 @@
17#define UTILITY_NAME_LENGTH 16 17#define UTILITY_NAME_LENGTH 16
18#define CFPKT_CTRL_PKT_LEN 20 18#define CFPKT_CTRL_PKT_LEN 20
19 19
20
21#ifdef CAIF_NO_LOOP 20#ifdef CAIF_NO_LOOP
22static int handle_loop(struct cfctrl *ctrl, 21static int handle_loop(struct cfctrl *ctrl,
23 int cmd, struct cfpkt *pkt){ 22 int cmd, struct cfpkt *pkt){
@@ -51,14 +50,31 @@ struct cflayer *cfctrl_create(void)
51 this->serv.layer.receive = cfctrl_recv; 50 this->serv.layer.receive = cfctrl_recv;
52 sprintf(this->serv.layer.name, "ctrl"); 51 sprintf(this->serv.layer.name, "ctrl");
53 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd; 52 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
53#ifndef CAIF_NO_LOOP
54 spin_lock_init(&this->loop_linkid_lock); 54 spin_lock_init(&this->loop_linkid_lock);
55 this->loop_linkid = 1;
56#endif
55 spin_lock_init(&this->info_list_lock); 57 spin_lock_init(&this->info_list_lock);
56 INIT_LIST_HEAD(&this->list); 58 INIT_LIST_HEAD(&this->list);
57 this->loop_linkid = 1;
58 return &this->serv.layer; 59 return &this->serv.layer;
59} 60}
60 61
61static bool param_eq(struct cfctrl_link_param *p1, struct cfctrl_link_param *p2) 62void cfctrl_remove(struct cflayer *layer)
63{
64 struct cfctrl_request_info *p, *tmp;
65 struct cfctrl *ctrl = container_obj(layer);
66
67 spin_lock_bh(&ctrl->info_list_lock);
68 list_for_each_entry_safe(p, tmp, &ctrl->list, list) {
69 list_del(&p->list);
70 kfree(p);
71 }
72 spin_unlock_bh(&ctrl->info_list_lock);
73 kfree(layer);
74}
75
76static bool param_eq(const struct cfctrl_link_param *p1,
77 const struct cfctrl_link_param *p2)
62{ 78{
63 bool eq = 79 bool eq =
64 p1->linktype == p2->linktype && 80 p1->linktype == p2->linktype &&
@@ -100,8 +116,8 @@ static bool param_eq(struct cfctrl_link_param *p1, struct cfctrl_link_param *p2)
100 return false; 116 return false;
101} 117}
102 118
103bool cfctrl_req_eq(struct cfctrl_request_info *r1, 119static bool cfctrl_req_eq(const struct cfctrl_request_info *r1,
104 struct cfctrl_request_info *r2) 120 const struct cfctrl_request_info *r2)
105{ 121{
106 if (r1->cmd != r2->cmd) 122 if (r1->cmd != r2->cmd)
107 return false; 123 return false;
@@ -112,23 +128,22 @@ bool cfctrl_req_eq(struct cfctrl_request_info *r1,
112} 128}
113 129
114/* Insert request at the end */ 130/* Insert request at the end */
115void cfctrl_insert_req(struct cfctrl *ctrl, 131static void cfctrl_insert_req(struct cfctrl *ctrl,
116 struct cfctrl_request_info *req) 132 struct cfctrl_request_info *req)
117{ 133{
118 spin_lock(&ctrl->info_list_lock); 134 spin_lock_bh(&ctrl->info_list_lock);
119 atomic_inc(&ctrl->req_seq_no); 135 atomic_inc(&ctrl->req_seq_no);
120 req->sequence_no = atomic_read(&ctrl->req_seq_no); 136 req->sequence_no = atomic_read(&ctrl->req_seq_no);
121 list_add_tail(&req->list, &ctrl->list); 137 list_add_tail(&req->list, &ctrl->list);
122 spin_unlock(&ctrl->info_list_lock); 138 spin_unlock_bh(&ctrl->info_list_lock);
123} 139}
124 140
125/* Compare and remove request */ 141/* Compare and remove request */
126struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl, 142static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
127 struct cfctrl_request_info *req) 143 struct cfctrl_request_info *req)
128{ 144{
129 struct cfctrl_request_info *p, *tmp, *first; 145 struct cfctrl_request_info *p, *tmp, *first;
130 146
131 spin_lock(&ctrl->info_list_lock);
132 first = list_first_entry(&ctrl->list, struct cfctrl_request_info, list); 147 first = list_first_entry(&ctrl->list, struct cfctrl_request_info, list);
133 148
134 list_for_each_entry_safe(p, tmp, &ctrl->list, list) { 149 list_for_each_entry_safe(p, tmp, &ctrl->list, list) {
@@ -144,7 +159,6 @@ struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
144 } 159 }
145 p = NULL; 160 p = NULL;
146out: 161out:
147 spin_unlock(&ctrl->info_list_lock);
148 return p; 162 return p;
149} 163}
150 164
@@ -154,16 +168,6 @@ struct cfctrl_rsp *cfctrl_get_respfuncs(struct cflayer *layer)
154 return &this->res; 168 return &this->res;
155} 169}
156 170
157void cfctrl_set_dnlayer(struct cflayer *this, struct cflayer *dn)
158{
159 this->dn = dn;
160}
161
162void cfctrl_set_uplayer(struct cflayer *this, struct cflayer *up)
163{
164 this->up = up;
165}
166
167static void init_info(struct caif_payload_info *info, struct cfctrl *cfctrl) 171static void init_info(struct caif_payload_info *info, struct cfctrl *cfctrl)
168{ 172{
169 info->hdr_len = 0; 173 info->hdr_len = 0;
@@ -174,24 +178,23 @@ static void init_info(struct caif_payload_info *info, struct cfctrl *cfctrl)
174void cfctrl_enum_req(struct cflayer *layer, u8 physlinkid) 178void cfctrl_enum_req(struct cflayer *layer, u8 physlinkid)
175{ 179{
176 struct cfctrl *cfctrl = container_obj(layer); 180 struct cfctrl *cfctrl = container_obj(layer);
177 int ret;
178 struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN); 181 struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
182 struct cflayer *dn = cfctrl->serv.layer.dn;
179 if (!pkt) { 183 if (!pkt) {
180 pr_warn("Out of memory\n"); 184 pr_warn("Out of memory\n");
181 return; 185 return;
182 } 186 }
187 if (!dn) {
188 pr_debug("not able to send enum request\n");
189 return;
190 }
183 caif_assert(offsetof(struct cfctrl, serv.layer) == 0); 191 caif_assert(offsetof(struct cfctrl, serv.layer) == 0);
184 init_info(cfpkt_info(pkt), cfctrl); 192 init_info(cfpkt_info(pkt), cfctrl);
185 cfpkt_info(pkt)->dev_info->id = physlinkid; 193 cfpkt_info(pkt)->dev_info->id = physlinkid;
186 cfctrl->serv.dev_info.id = physlinkid; 194 cfctrl->serv.dev_info.id = physlinkid;
187 cfpkt_addbdy(pkt, CFCTRL_CMD_ENUM); 195 cfpkt_addbdy(pkt, CFCTRL_CMD_ENUM);
188 cfpkt_addbdy(pkt, physlinkid); 196 cfpkt_addbdy(pkt, physlinkid);
189 ret = 197 dn->transmit(dn, pkt);
190 cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt);
191 if (ret < 0) {
192 pr_err("Could not transmit enum message\n");
193 cfpkt_destroy(pkt);
194 }
195} 198}
196 199
197int cfctrl_linkup_request(struct cflayer *layer, 200int cfctrl_linkup_request(struct cflayer *layer,
@@ -205,14 +208,29 @@ int cfctrl_linkup_request(struct cflayer *layer,
205 struct cfctrl_request_info *req; 208 struct cfctrl_request_info *req;
206 int ret; 209 int ret;
207 char utility_name[16]; 210 char utility_name[16];
208 struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN); 211 struct cfpkt *pkt;
212 struct cflayer *dn = cfctrl->serv.layer.dn;
213
214 if (!dn) {
215 pr_debug("not able to send linkup request\n");
216 return -ENODEV;
217 }
218
219 if (cfctrl_cancel_req(layer, user_layer) > 0) {
220 /* Slight Paranoia, check if already connecting */
221 pr_err("Duplicate connect request for same client\n");
222 WARN_ON(1);
223 return -EALREADY;
224 }
225
226 pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
209 if (!pkt) { 227 if (!pkt) {
210 pr_warn("Out of memory\n"); 228 pr_warn("Out of memory\n");
211 return -ENOMEM; 229 return -ENOMEM;
212 } 230 }
213 cfpkt_addbdy(pkt, CFCTRL_CMD_LINK_SETUP); 231 cfpkt_addbdy(pkt, CFCTRL_CMD_LINK_SETUP);
214 cfpkt_addbdy(pkt, (param->chtype << 4) + param->linktype); 232 cfpkt_addbdy(pkt, (param->chtype << 4) | param->linktype);
215 cfpkt_addbdy(pkt, (param->priority << 3) + param->phyid); 233 cfpkt_addbdy(pkt, (param->priority << 3) | param->phyid);
216 cfpkt_addbdy(pkt, param->endpoint & 0x03); 234 cfpkt_addbdy(pkt, param->endpoint & 0x03);
217 235
218 switch (param->linktype) { 236 switch (param->linktype) {
@@ -273,11 +291,15 @@ int cfctrl_linkup_request(struct cflayer *layer,
273 */ 291 */
274 cfpkt_info(pkt)->dev_info->id = param->phyid; 292 cfpkt_info(pkt)->dev_info->id = param->phyid;
275 ret = 293 ret =
276 cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt); 294 dn->transmit(dn, pkt);
277 if (ret < 0) { 295 if (ret < 0) {
278 pr_err("Could not transmit linksetup request\n"); 296 int count;
279 cfpkt_destroy(pkt); 297
280 return -ENODEV; 298 count = cfctrl_cancel_req(&cfctrl->serv.layer,
299 user_layer);
300 if (count != 1)
301 pr_err("Could not remove request (%d)", count);
302 return -ENODEV;
281 } 303 }
282 return 0; 304 return 0;
283} 305}
@@ -288,89 +310,46 @@ int cfctrl_linkdown_req(struct cflayer *layer, u8 channelid,
288 int ret; 310 int ret;
289 struct cfctrl *cfctrl = container_obj(layer); 311 struct cfctrl *cfctrl = container_obj(layer);
290 struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN); 312 struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
291 if (!pkt) { 313 struct cflayer *dn = cfctrl->serv.layer.dn;
292 pr_warn("Out of memory\n");
293 return -ENOMEM;
294 }
295 cfpkt_addbdy(pkt, CFCTRL_CMD_LINK_DESTROY);
296 cfpkt_addbdy(pkt, channelid);
297 init_info(cfpkt_info(pkt), cfctrl);
298 ret =
299 cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt);
300 if (ret < 0) {
301 pr_err("Could not transmit link-down request\n");
302 cfpkt_destroy(pkt);
303 }
304 return ret;
305}
306 314
307void cfctrl_sleep_req(struct cflayer *layer)
308{
309 int ret;
310 struct cfctrl *cfctrl = container_obj(layer);
311 struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
312 if (!pkt) { 315 if (!pkt) {
313 pr_warn("Out of memory\n"); 316 pr_warn("Out of memory\n");
314 return; 317 return -ENOMEM;
315 } 318 }
316 cfpkt_addbdy(pkt, CFCTRL_CMD_SLEEP);
317 init_info(cfpkt_info(pkt), cfctrl);
318 ret =
319 cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt);
320 if (ret < 0)
321 cfpkt_destroy(pkt);
322}
323 319
324void cfctrl_wake_req(struct cflayer *layer) 320 if (!dn) {
325{ 321 pr_debug("not able to send link-down request\n");
326 int ret; 322 return -ENODEV;
327 struct cfctrl *cfctrl = container_obj(layer);
328 struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
329 if (!pkt) {
330 pr_warn("Out of memory\n");
331 return;
332 } 323 }
333 cfpkt_addbdy(pkt, CFCTRL_CMD_WAKE);
334 init_info(cfpkt_info(pkt), cfctrl);
335 ret =
336 cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt);
337 if (ret < 0)
338 cfpkt_destroy(pkt);
339}
340 324
341void cfctrl_getstartreason_req(struct cflayer *layer) 325 cfpkt_addbdy(pkt, CFCTRL_CMD_LINK_DESTROY);
342{ 326 cfpkt_addbdy(pkt, channelid);
343 int ret;
344 struct cfctrl *cfctrl = container_obj(layer);
345 struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
346 if (!pkt) {
347 pr_warn("Out of memory\n");
348 return;
349 }
350 cfpkt_addbdy(pkt, CFCTRL_CMD_START_REASON);
351 init_info(cfpkt_info(pkt), cfctrl); 327 init_info(cfpkt_info(pkt), cfctrl);
352 ret = 328 ret =
353 cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt); 329 dn->transmit(dn, pkt);
354 if (ret < 0) 330#ifndef CAIF_NO_LOOP
355 cfpkt_destroy(pkt); 331 cfctrl->loop_linkused[channelid] = 0;
332#endif
333 return ret;
356} 334}
357 335
358 336int cfctrl_cancel_req(struct cflayer *layr, struct cflayer *adap_layer)
359void cfctrl_cancel_req(struct cflayer *layr, struct cflayer *adap_layer)
360{ 337{
361 struct cfctrl_request_info *p, *tmp; 338 struct cfctrl_request_info *p, *tmp;
362 struct cfctrl *ctrl = container_obj(layr); 339 struct cfctrl *ctrl = container_obj(layr);
363 spin_lock(&ctrl->info_list_lock); 340 int found = 0;
341 spin_lock_bh(&ctrl->info_list_lock);
364 342
365 list_for_each_entry_safe(p, tmp, &ctrl->list, list) { 343 list_for_each_entry_safe(p, tmp, &ctrl->list, list) {
366 if (p->client_layer == adap_layer) { 344 if (p->client_layer == adap_layer) {
367 pr_debug("cancel req :%d\n", p->sequence_no);
368 list_del(&p->list); 345 list_del(&p->list);
369 kfree(p); 346 kfree(p);
347 found++;
370 } 348 }
371 } 349 }
372 350
373 spin_unlock(&ctrl->info_list_lock); 351 spin_unlock_bh(&ctrl->info_list_lock);
352 return found;
374} 353}
375 354
376static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt) 355static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
@@ -389,7 +368,8 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
389 cfpkt_extr_head(pkt, &cmdrsp, 1); 368 cfpkt_extr_head(pkt, &cmdrsp, 1);
390 cmd = cmdrsp & CFCTRL_CMD_MASK; 369 cmd = cmdrsp & CFCTRL_CMD_MASK;
391 if (cmd != CFCTRL_CMD_LINK_ERR 370 if (cmd != CFCTRL_CMD_LINK_ERR
392 && CFCTRL_RSP_BIT != (CFCTRL_RSP_BIT & cmdrsp)) { 371 && CFCTRL_RSP_BIT != (CFCTRL_RSP_BIT & cmdrsp)
372 && CFCTRL_ERR_BIT != (CFCTRL_ERR_BIT & cmdrsp)) {
393 if (handle_loop(cfctrl, cmd, pkt) != 0) 373 if (handle_loop(cfctrl, cmd, pkt) != 0)
394 cmdrsp |= CFCTRL_ERR_BIT; 374 cmdrsp |= CFCTRL_ERR_BIT;
395 } 375 }
@@ -515,18 +495,20 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
515 cfpkt_extr_head(pkt, &param, len); 495 cfpkt_extr_head(pkt, &param, len);
516 break; 496 break;
517 default: 497 default:
518 pr_warn("Request setup - invalid link type (%d)\n", 498 pr_warn("Request setup, invalid type (%d)\n",
519 serv); 499 serv);
520 goto error; 500 goto error;
521 } 501 }
522 502
523 rsp.cmd = cmd; 503 rsp.cmd = cmd;
524 rsp.param = linkparam; 504 rsp.param = linkparam;
505 spin_lock_bh(&cfctrl->info_list_lock);
525 req = cfctrl_remove_req(cfctrl, &rsp); 506 req = cfctrl_remove_req(cfctrl, &rsp);
526 507
527 if (CFCTRL_ERR_BIT == (CFCTRL_ERR_BIT & cmdrsp) || 508 if (CFCTRL_ERR_BIT == (CFCTRL_ERR_BIT & cmdrsp) ||
528 cfpkt_erroneous(pkt)) { 509 cfpkt_erroneous(pkt)) {
529 pr_err("Invalid O/E bit or parse error on CAIF control channel\n"); 510 pr_err("Invalid O/E bit or parse error "
511 "on CAIF control channel\n");
530 cfctrl->res.reject_rsp(cfctrl->serv.layer.up, 512 cfctrl->res.reject_rsp(cfctrl->serv.layer.up,
531 0, 513 0,
532 req ? req->client_layer 514 req ? req->client_layer
@@ -541,6 +523,8 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
541 523
542 if (req != NULL) 524 if (req != NULL)
543 kfree(req); 525 kfree(req);
526
527 spin_unlock_bh(&cfctrl->info_list_lock);
544 } 528 }
545 break; 529 break;
546 case CFCTRL_CMD_LINK_DESTROY: 530 case CFCTRL_CMD_LINK_DESTROY:
@@ -584,12 +568,28 @@ static void cfctrl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
584 switch (ctrl) { 568 switch (ctrl) {
585 case _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND: 569 case _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND:
586 case CAIF_CTRLCMD_FLOW_OFF_IND: 570 case CAIF_CTRLCMD_FLOW_OFF_IND:
587 spin_lock(&this->info_list_lock); 571 spin_lock_bh(&this->info_list_lock);
588 if (!list_empty(&this->list)) { 572 if (!list_empty(&this->list))
589 pr_debug("Received flow off in control layer\n"); 573 pr_debug("Received flow off in control layer\n");
574 spin_unlock_bh(&this->info_list_lock);
575 break;
576 case _CAIF_CTRLCMD_PHYIF_DOWN_IND: {
577 struct cfctrl_request_info *p, *tmp;
578
579 /* Find all connect request and report failure */
580 spin_lock_bh(&this->info_list_lock);
581 list_for_each_entry_safe(p, tmp, &this->list, list) {
582 if (p->param.phyid == phyid) {
583 list_del(&p->list);
584 p->client_layer->ctrlcmd(p->client_layer,
585 CAIF_CTRLCMD_INIT_FAIL_RSP,
586 phyid);
587 kfree(p);
588 }
590 } 589 }
591 spin_unlock(&this->info_list_lock); 590 spin_unlock_bh(&this->info_list_lock);
592 break; 591 break;
592 }
593 default: 593 default:
594 break; 594 break;
595 } 595 }
@@ -599,27 +599,33 @@ static void cfctrl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
599static int handle_loop(struct cfctrl *ctrl, int cmd, struct cfpkt *pkt) 599static int handle_loop(struct cfctrl *ctrl, int cmd, struct cfpkt *pkt)
600{ 600{
601 static int last_linkid; 601 static int last_linkid;
602 static int dec;
602 u8 linkid, linktype, tmp; 603 u8 linkid, linktype, tmp;
603 switch (cmd) { 604 switch (cmd) {
604 case CFCTRL_CMD_LINK_SETUP: 605 case CFCTRL_CMD_LINK_SETUP:
605 spin_lock(&ctrl->loop_linkid_lock); 606 spin_lock_bh(&ctrl->loop_linkid_lock);
606 for (linkid = last_linkid + 1; linkid < 255; linkid++) 607 if (!dec) {
607 if (!ctrl->loop_linkused[linkid]) 608 for (linkid = last_linkid + 1; linkid < 254; linkid++)
608 goto found; 609 if (!ctrl->loop_linkused[linkid])
609 for (linkid = last_linkid - 1; linkid > 0; linkid--) 610 goto found;
611 }
612 dec = 1;
613 for (linkid = last_linkid - 1; linkid > 1; linkid--)
610 if (!ctrl->loop_linkused[linkid]) 614 if (!ctrl->loop_linkused[linkid])
611 goto found; 615 goto found;
612 spin_unlock(&ctrl->loop_linkid_lock); 616 spin_unlock_bh(&ctrl->loop_linkid_lock);
613 pr_err("Out of link-ids\n"); 617 return -1;
614 return -EINVAL;
615found: 618found:
619 if (linkid < 10)
620 dec = 0;
621
616 if (!ctrl->loop_linkused[linkid]) 622 if (!ctrl->loop_linkused[linkid])
617 ctrl->loop_linkused[linkid] = 1; 623 ctrl->loop_linkused[linkid] = 1;
618 624
619 last_linkid = linkid; 625 last_linkid = linkid;
620 626
621 cfpkt_add_trail(pkt, &linkid, 1); 627 cfpkt_add_trail(pkt, &linkid, 1);
622 spin_unlock(&ctrl->loop_linkid_lock); 628 spin_unlock_bh(&ctrl->loop_linkid_lock);
623 cfpkt_peek_head(pkt, &linktype, 1); 629 cfpkt_peek_head(pkt, &linktype, 1);
624 if (linktype == CFCTRL_SRV_UTIL) { 630 if (linktype == CFCTRL_SRV_UTIL) {
625 tmp = 0x01; 631 tmp = 0x01;
@@ -629,10 +635,10 @@ found:
629 break; 635 break;
630 636
631 case CFCTRL_CMD_LINK_DESTROY: 637 case CFCTRL_CMD_LINK_DESTROY:
632 spin_lock(&ctrl->loop_linkid_lock); 638 spin_lock_bh(&ctrl->loop_linkid_lock);
633 cfpkt_peek_head(pkt, &linkid, 1); 639 cfpkt_peek_head(pkt, &linkid, 1);
634 ctrl->loop_linkused[linkid] = 0; 640 ctrl->loop_linkused[linkid] = 0;
635 spin_unlock(&ctrl->loop_linkid_lock); 641 spin_unlock_bh(&ctrl->loop_linkid_lock);
636 break; 642 break;
637 default: 643 default:
638 break; 644 break;
diff --git a/net/caif/cfdgml.c b/net/caif/cfdgml.c
index 054fdb5aeb88..0382dec84fdc 100644
--- a/net/caif/cfdgml.c
+++ b/net/caif/cfdgml.c
@@ -108,10 +108,5 @@ static int cfdgml_transmit(struct cflayer *layr, struct cfpkt *pkt)
108 */ 108 */
109 info->hdr_len = 4; 109 info->hdr_len = 4;
110 info->dev_info = &service->dev_info; 110 info->dev_info = &service->dev_info;
111 ret = layr->dn->transmit(layr->dn, pkt); 111 return layr->dn->transmit(layr->dn, pkt);
112 if (ret < 0) {
113 u32 tmp32;
114 cfpkt_extr_head(pkt, &tmp32, 4);
115 }
116 return ret;
117} 112}
diff --git a/net/caif/cffrml.c b/net/caif/cffrml.c
index a445043931ae..04204b202718 100644
--- a/net/caif/cffrml.c
+++ b/net/caif/cffrml.c
@@ -12,6 +12,7 @@
12#include <linux/spinlock.h> 12#include <linux/spinlock.h>
13#include <linux/slab.h> 13#include <linux/slab.h>
14#include <linux/crc-ccitt.h> 14#include <linux/crc-ccitt.h>
15#include <linux/netdevice.h>
15#include <net/caif/caif_layer.h> 16#include <net/caif/caif_layer.h>
16#include <net/caif/cfpkt.h> 17#include <net/caif/cfpkt.h>
17#include <net/caif/cffrml.h> 18#include <net/caif/cffrml.h>
@@ -21,6 +22,7 @@
21struct cffrml { 22struct cffrml {
22 struct cflayer layer; 23 struct cflayer layer;
23 bool dofcs; /* !< FCS active */ 24 bool dofcs; /* !< FCS active */
25 int __percpu *pcpu_refcnt;
24}; 26};
25 27
26static int cffrml_receive(struct cflayer *layr, struct cfpkt *pkt); 28static int cffrml_receive(struct cflayer *layr, struct cfpkt *pkt);
@@ -37,6 +39,12 @@ struct cflayer *cffrml_create(u16 phyid, bool use_fcs)
37 pr_warn("Out of memory\n"); 39 pr_warn("Out of memory\n");
38 return NULL; 40 return NULL;
39 } 41 }
42 this->pcpu_refcnt = alloc_percpu(int);
43 if (this->pcpu_refcnt == NULL) {
44 kfree(this);
45 return NULL;
46 }
47
40 caif_assert(offsetof(struct cffrml, layer) == 0); 48 caif_assert(offsetof(struct cffrml, layer) == 0);
41 49
42 memset(this, 0, sizeof(struct cflayer)); 50 memset(this, 0, sizeof(struct cflayer));
@@ -49,6 +57,13 @@ struct cflayer *cffrml_create(u16 phyid, bool use_fcs)
49 return (struct cflayer *) this; 57 return (struct cflayer *) this;
50} 58}
51 59
60void cffrml_free(struct cflayer *layer)
61{
62 struct cffrml *this = container_obj(layer);
63 free_percpu(this->pcpu_refcnt);
64 kfree(layer);
65}
66
52void cffrml_set_uplayer(struct cflayer *this, struct cflayer *up) 67void cffrml_set_uplayer(struct cflayer *this, struct cflayer *up)
53{ 68{
54 this->up = up; 69 this->up = up;
@@ -112,6 +127,13 @@ static int cffrml_receive(struct cflayer *layr, struct cfpkt *pkt)
112 cfpkt_destroy(pkt); 127 cfpkt_destroy(pkt);
113 return -EPROTO; 128 return -EPROTO;
114 } 129 }
130
131 if (layr->up == NULL) {
132 pr_err("Layr up is missing!\n");
133 cfpkt_destroy(pkt);
134 return -EINVAL;
135 }
136
115 return layr->up->receive(layr->up, pkt); 137 return layr->up->receive(layr->up, pkt);
116} 138}
117 139
@@ -120,7 +142,6 @@ static int cffrml_transmit(struct cflayer *layr, struct cfpkt *pkt)
120 int tmp; 142 int tmp;
121 u16 chks; 143 u16 chks;
122 u16 len; 144 u16 len;
123 int ret;
124 struct cffrml *this = container_obj(layr); 145 struct cffrml *this = container_obj(layr);
125 if (this->dofcs) { 146 if (this->dofcs) {
126 chks = cfpkt_iterate(pkt, cffrml_checksum, 0xffff); 147 chks = cfpkt_iterate(pkt, cffrml_checksum, 0xffff);
@@ -135,19 +156,44 @@ static int cffrml_transmit(struct cflayer *layr, struct cfpkt *pkt)
135 cfpkt_info(pkt)->hdr_len += 2; 156 cfpkt_info(pkt)->hdr_len += 2;
136 if (cfpkt_erroneous(pkt)) { 157 if (cfpkt_erroneous(pkt)) {
137 pr_err("Packet is erroneous!\n"); 158 pr_err("Packet is erroneous!\n");
159 cfpkt_destroy(pkt);
138 return -EPROTO; 160 return -EPROTO;
139 } 161 }
140 ret = layr->dn->transmit(layr->dn, pkt); 162
141 if (ret < 0) { 163 if (layr->dn == NULL) {
142 /* Remove header on faulty packet. */ 164 cfpkt_destroy(pkt);
143 cfpkt_extr_head(pkt, &tmp, 2); 165 return -ENODEV;
166
144 } 167 }
145 return ret; 168 return layr->dn->transmit(layr->dn, pkt);
146} 169}
147 170
148static void cffrml_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, 171static void cffrml_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
149 int phyid) 172 int phyid)
150{ 173{
151 if (layr->up->ctrlcmd) 174 if (layr->up && layr->up->ctrlcmd)
152 layr->up->ctrlcmd(layr->up, ctrl, layr->id); 175 layr->up->ctrlcmd(layr->up, ctrl, layr->id);
153} 176}
177
178void cffrml_put(struct cflayer *layr)
179{
180 struct cffrml *this = container_obj(layr);
181 if (layr != NULL && this->pcpu_refcnt != NULL)
182 irqsafe_cpu_dec(*this->pcpu_refcnt);
183}
184
185void cffrml_hold(struct cflayer *layr)
186{
187 struct cffrml *this = container_obj(layr);
188 if (layr != NULL && this->pcpu_refcnt != NULL)
189 irqsafe_cpu_inc(*this->pcpu_refcnt);
190}
191
192int cffrml_refcnt_read(struct cflayer *layr)
193{
194 int i, refcnt = 0;
195 struct cffrml *this = container_obj(layr);
196 for_each_possible_cpu(i)
197 refcnt += *per_cpu_ptr(this->pcpu_refcnt, i);
198 return refcnt;
199}
diff --git a/net/caif/cfmuxl.c b/net/caif/cfmuxl.c
index 24f1ffa74b06..c23979e79dfa 100644
--- a/net/caif/cfmuxl.c
+++ b/net/caif/cfmuxl.c
@@ -9,6 +9,7 @@
9#include <linux/stddef.h> 9#include <linux/stddef.h>
10#include <linux/spinlock.h> 10#include <linux/spinlock.h>
11#include <linux/slab.h> 11#include <linux/slab.h>
12#include <linux/rculist.h>
12#include <net/caif/cfpkt.h> 13#include <net/caif/cfpkt.h>
13#include <net/caif/cfmuxl.h> 14#include <net/caif/cfmuxl.h>
14#include <net/caif/cfsrvl.h> 15#include <net/caif/cfsrvl.h>
@@ -61,111 +62,88 @@ struct cflayer *cfmuxl_create(void)
61 return &this->layer; 62 return &this->layer;
62} 63}
63 64
64int cfmuxl_set_uplayer(struct cflayer *layr, struct cflayer *up, u8 linkid) 65int cfmuxl_set_dnlayer(struct cflayer *layr, struct cflayer *dn, u8 phyid)
65{ 66{
66 struct cfmuxl *muxl = container_obj(layr); 67 struct cfmuxl *muxl = (struct cfmuxl *) layr;
67 spin_lock(&muxl->receive_lock); 68
68 cfsrvl_get(up); 69 spin_lock_bh(&muxl->transmit_lock);
69 list_add(&up->node, &muxl->srvl_list); 70 list_add_rcu(&dn->node, &muxl->frml_list);
70 spin_unlock(&muxl->receive_lock); 71 spin_unlock_bh(&muxl->transmit_lock);
71 return 0; 72 return 0;
72} 73}
73 74
74bool cfmuxl_is_phy_inuse(struct cflayer *layr, u8 phyid) 75static struct cflayer *get_from_id(struct list_head *list, u16 id)
75{ 76{
76 struct list_head *node; 77 struct cflayer *lyr;
77 struct cflayer *layer; 78 list_for_each_entry_rcu(lyr, list, node) {
78 struct cfmuxl *muxl = container_obj(layr); 79 if (lyr->id == id)
79 bool match = false; 80 return lyr;
80 spin_lock(&muxl->receive_lock);
81
82 list_for_each(node, &muxl->srvl_list) {
83 layer = list_entry(node, struct cflayer, node);
84 if (cfsrvl_phyid_match(layer, phyid)) {
85 match = true;
86 break;
87 }
88
89 } 81 }
90 spin_unlock(&muxl->receive_lock); 82
91 return match; 83 return NULL;
92} 84}
93 85
94u8 cfmuxl_get_phyid(struct cflayer *layr, u8 channel_id) 86int cfmuxl_set_uplayer(struct cflayer *layr, struct cflayer *up, u8 linkid)
95{ 87{
96 struct cflayer *up;
97 int phyid;
98 struct cfmuxl *muxl = container_obj(layr); 88 struct cfmuxl *muxl = container_obj(layr);
99 spin_lock(&muxl->receive_lock); 89 struct cflayer *old;
100 up = get_up(muxl, channel_id);
101 if (up != NULL)
102 phyid = cfsrvl_getphyid(up);
103 else
104 phyid = 0;
105 spin_unlock(&muxl->receive_lock);
106 return phyid;
107}
108 90
109int cfmuxl_set_dnlayer(struct cflayer *layr, struct cflayer *dn, u8 phyid) 91 spin_lock_bh(&muxl->receive_lock);
110{
111 struct cfmuxl *muxl = (struct cfmuxl *) layr;
112 spin_lock(&muxl->transmit_lock);
113 list_add(&dn->node, &muxl->frml_list);
114 spin_unlock(&muxl->transmit_lock);
115 return 0;
116}
117 92
118static struct cflayer *get_from_id(struct list_head *list, u16 id) 93 /* Two entries with same id is wrong, so remove old layer from mux */
119{ 94 old = get_from_id(&muxl->srvl_list, linkid);
120 struct list_head *node; 95 if (old != NULL)
121 struct cflayer *layer; 96 list_del_rcu(&old->node);
122 list_for_each(node, list) { 97
123 layer = list_entry(node, struct cflayer, node); 98 list_add_rcu(&up->node, &muxl->srvl_list);
124 if (layer->id == id) 99 spin_unlock_bh(&muxl->receive_lock);
125 return layer; 100
126 } 101 return 0;
127 return NULL;
128} 102}
129 103
130struct cflayer *cfmuxl_remove_dnlayer(struct cflayer *layr, u8 phyid) 104struct cflayer *cfmuxl_remove_dnlayer(struct cflayer *layr, u8 phyid)
131{ 105{
132 struct cfmuxl *muxl = container_obj(layr); 106 struct cfmuxl *muxl = container_obj(layr);
133 struct cflayer *dn; 107 struct cflayer *dn;
134 spin_lock(&muxl->transmit_lock); 108 int idx = phyid % DN_CACHE_SIZE;
135 memset(muxl->dn_cache, 0, sizeof(muxl->dn_cache)); 109
110 spin_lock_bh(&muxl->transmit_lock);
111 rcu_assign_pointer(muxl->dn_cache[idx], NULL);
136 dn = get_from_id(&muxl->frml_list, phyid); 112 dn = get_from_id(&muxl->frml_list, phyid);
137 if (dn == NULL) { 113 if (dn == NULL)
138 spin_unlock(&muxl->transmit_lock); 114 goto out;
139 return NULL; 115
140 } 116 list_del_rcu(&dn->node);
141 list_del(&dn->node);
142 caif_assert(dn != NULL); 117 caif_assert(dn != NULL);
143 spin_unlock(&muxl->transmit_lock); 118out:
119 spin_unlock_bh(&muxl->transmit_lock);
144 return dn; 120 return dn;
145} 121}
146 122
147/* Invariant: lock is taken */
148static struct cflayer *get_up(struct cfmuxl *muxl, u16 id) 123static struct cflayer *get_up(struct cfmuxl *muxl, u16 id)
149{ 124{
150 struct cflayer *up; 125 struct cflayer *up;
151 int idx = id % UP_CACHE_SIZE; 126 int idx = id % UP_CACHE_SIZE;
152 up = muxl->up_cache[idx]; 127 up = rcu_dereference(muxl->up_cache[idx]);
153 if (up == NULL || up->id != id) { 128 if (up == NULL || up->id != id) {
129 spin_lock_bh(&muxl->receive_lock);
154 up = get_from_id(&muxl->srvl_list, id); 130 up = get_from_id(&muxl->srvl_list, id);
155 muxl->up_cache[idx] = up; 131 rcu_assign_pointer(muxl->up_cache[idx], up);
132 spin_unlock_bh(&muxl->receive_lock);
156 } 133 }
157 return up; 134 return up;
158} 135}
159 136
160/* Invariant: lock is taken */
161static struct cflayer *get_dn(struct cfmuxl *muxl, struct dev_info *dev_info) 137static struct cflayer *get_dn(struct cfmuxl *muxl, struct dev_info *dev_info)
162{ 138{
163 struct cflayer *dn; 139 struct cflayer *dn;
164 int idx = dev_info->id % DN_CACHE_SIZE; 140 int idx = dev_info->id % DN_CACHE_SIZE;
165 dn = muxl->dn_cache[idx]; 141 dn = rcu_dereference(muxl->dn_cache[idx]);
166 if (dn == NULL || dn->id != dev_info->id) { 142 if (dn == NULL || dn->id != dev_info->id) {
143 spin_lock_bh(&muxl->transmit_lock);
167 dn = get_from_id(&muxl->frml_list, dev_info->id); 144 dn = get_from_id(&muxl->frml_list, dev_info->id);
168 muxl->dn_cache[idx] = dn; 145 rcu_assign_pointer(muxl->dn_cache[idx], dn);
146 spin_unlock_bh(&muxl->transmit_lock);
169 } 147 }
170 return dn; 148 return dn;
171} 149}
@@ -174,15 +152,22 @@ struct cflayer *cfmuxl_remove_uplayer(struct cflayer *layr, u8 id)
174{ 152{
175 struct cflayer *up; 153 struct cflayer *up;
176 struct cfmuxl *muxl = container_obj(layr); 154 struct cfmuxl *muxl = container_obj(layr);
177 spin_lock(&muxl->receive_lock); 155 int idx = id % UP_CACHE_SIZE;
178 up = get_up(muxl, id); 156
157 if (id == 0) {
158 pr_warn("Trying to remove control layer\n");
159 return NULL;
160 }
161
162 spin_lock_bh(&muxl->receive_lock);
163 up = get_from_id(&muxl->srvl_list, id);
179 if (up == NULL) 164 if (up == NULL)
180 goto out; 165 goto out;
181 memset(muxl->up_cache, 0, sizeof(muxl->up_cache)); 166
182 list_del(&up->node); 167 rcu_assign_pointer(muxl->up_cache[idx], NULL);
183 cfsrvl_put(up); 168 list_del_rcu(&up->node);
184out: 169out:
185 spin_unlock(&muxl->receive_lock); 170 spin_unlock_bh(&muxl->receive_lock);
186 return up; 171 return up;
187} 172}
188 173
@@ -197,58 +182,92 @@ static int cfmuxl_receive(struct cflayer *layr, struct cfpkt *pkt)
197 cfpkt_destroy(pkt); 182 cfpkt_destroy(pkt);
198 return -EPROTO; 183 return -EPROTO;
199 } 184 }
200 185 rcu_read_lock();
201 spin_lock(&muxl->receive_lock);
202 up = get_up(muxl, id); 186 up = get_up(muxl, id);
203 spin_unlock(&muxl->receive_lock); 187
204 if (up == NULL) { 188 if (up == NULL) {
205 pr_info("Received data on unknown link ID = %d (0x%x) up == NULL", 189 pr_debug("Received data on unknown link ID = %d (0x%x)"
206 id, id); 190 " up == NULL", id, id);
207 cfpkt_destroy(pkt); 191 cfpkt_destroy(pkt);
208 /* 192 /*
209 * Don't return ERROR, since modem misbehaves and sends out 193 * Don't return ERROR, since modem misbehaves and sends out
210 * flow on before linksetup response. 194 * flow on before linksetup response.
211 */ 195 */
196
197 rcu_read_unlock();
212 return /* CFGLU_EPROT; */ 0; 198 return /* CFGLU_EPROT; */ 0;
213 } 199 }
200
201 /* We can't hold rcu_lock during receive, so take a ref count instead */
214 cfsrvl_get(up); 202 cfsrvl_get(up);
203 rcu_read_unlock();
204
215 ret = up->receive(up, pkt); 205 ret = up->receive(up, pkt);
206
216 cfsrvl_put(up); 207 cfsrvl_put(up);
217 return ret; 208 return ret;
218} 209}
219 210
220static int cfmuxl_transmit(struct cflayer *layr, struct cfpkt *pkt) 211static int cfmuxl_transmit(struct cflayer *layr, struct cfpkt *pkt)
221{ 212{
222 int ret;
223 struct cfmuxl *muxl = container_obj(layr); 213 struct cfmuxl *muxl = container_obj(layr);
214 int err;
224 u8 linkid; 215 u8 linkid;
225 struct cflayer *dn; 216 struct cflayer *dn;
226 struct caif_payload_info *info = cfpkt_info(pkt); 217 struct caif_payload_info *info = cfpkt_info(pkt);
227 dn = get_dn(muxl, cfpkt_info(pkt)->dev_info); 218 BUG_ON(!info);
219
220 rcu_read_lock();
221
222 dn = get_dn(muxl, info->dev_info);
228 if (dn == NULL) { 223 if (dn == NULL) {
229 pr_warn("Send data on unknown phy ID = %d (0x%x)\n", 224 pr_debug("Send data on unknown phy ID = %d (0x%x)\n",
230 info->dev_info->id, info->dev_info->id); 225 info->dev_info->id, info->dev_info->id);
226 rcu_read_unlock();
227 cfpkt_destroy(pkt);
231 return -ENOTCONN; 228 return -ENOTCONN;
232 } 229 }
230
233 info->hdr_len += 1; 231 info->hdr_len += 1;
234 linkid = info->channel_id; 232 linkid = info->channel_id;
235 cfpkt_add_head(pkt, &linkid, 1); 233 cfpkt_add_head(pkt, &linkid, 1);
236 ret = dn->transmit(dn, pkt); 234
237 /* Remove MUX protocol header upon error. */ 235 /* We can't hold rcu_lock during receive, so take a ref count instead */
238 if (ret < 0) 236 cffrml_hold(dn);
239 cfpkt_extr_head(pkt, &linkid, 1); 237
240 return ret; 238 rcu_read_unlock();
239
240 err = dn->transmit(dn, pkt);
241
242 cffrml_put(dn);
243 return err;
241} 244}
242 245
243static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, 246static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
244 int phyid) 247 int phyid)
245{ 248{
246 struct cfmuxl *muxl = container_obj(layr); 249 struct cfmuxl *muxl = container_obj(layr);
247 struct list_head *node, *next;
248 struct cflayer *layer; 250 struct cflayer *layer;
249 list_for_each_safe(node, next, &muxl->srvl_list) { 251 int idx;
250 layer = list_entry(node, struct cflayer, node); 252
251 if (cfsrvl_phyid_match(layer, phyid)) 253 rcu_read_lock();
254 list_for_each_entry_rcu(layer, &muxl->srvl_list, node) {
255
256 if (cfsrvl_phyid_match(layer, phyid) && layer->ctrlcmd) {
257
258 if ((ctrl == _CAIF_CTRLCMD_PHYIF_DOWN_IND ||
259 ctrl == CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND) &&
260 layer->id != 0) {
261
262 idx = layer->id % UP_CACHE_SIZE;
263 spin_lock_bh(&muxl->receive_lock);
264 rcu_assign_pointer(muxl->up_cache[idx], NULL);
265 list_del_rcu(&layer->node);
266 spin_unlock_bh(&muxl->receive_lock);
267 }
268 /* NOTE: ctrlcmd is not allowed to block */
252 layer->ctrlcmd(layer, ctrl, phyid); 269 layer->ctrlcmd(layer, ctrl, phyid);
270 }
253 } 271 }
272 rcu_read_unlock();
254} 273}
diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c
index d7e865e2ff65..75d4bfae1a78 100644
--- a/net/caif/cfpkt_skbuff.c
+++ b/net/caif/cfpkt_skbuff.c
@@ -42,22 +42,22 @@ struct cfpkt_priv_data {
42 bool erronous; 42 bool erronous;
43}; 43};
44 44
45inline struct cfpkt_priv_data *cfpkt_priv(struct cfpkt *pkt) 45static inline struct cfpkt_priv_data *cfpkt_priv(struct cfpkt *pkt)
46{ 46{
47 return (struct cfpkt_priv_data *) pkt->skb.cb; 47 return (struct cfpkt_priv_data *) pkt->skb.cb;
48} 48}
49 49
50inline bool is_erronous(struct cfpkt *pkt) 50static inline bool is_erronous(struct cfpkt *pkt)
51{ 51{
52 return cfpkt_priv(pkt)->erronous; 52 return cfpkt_priv(pkt)->erronous;
53} 53}
54 54
55inline struct sk_buff *pkt_to_skb(struct cfpkt *pkt) 55static inline struct sk_buff *pkt_to_skb(struct cfpkt *pkt)
56{ 56{
57 return &pkt->skb; 57 return &pkt->skb;
58} 58}
59 59
60inline struct cfpkt *skb_to_pkt(struct sk_buff *skb) 60static inline struct cfpkt *skb_to_pkt(struct sk_buff *skb)
61{ 61{
62 return (struct cfpkt *) skb; 62 return (struct cfpkt *) skb;
63} 63}
@@ -97,21 +97,20 @@ inline struct cfpkt *cfpkt_create(u16 len)
97{ 97{
98 return cfpkt_create_pfx(len + PKT_POSTFIX, PKT_PREFIX); 98 return cfpkt_create_pfx(len + PKT_POSTFIX, PKT_PREFIX);
99} 99}
100EXPORT_SYMBOL(cfpkt_create);
101 100
102void cfpkt_destroy(struct cfpkt *pkt) 101void cfpkt_destroy(struct cfpkt *pkt)
103{ 102{
104 struct sk_buff *skb = pkt_to_skb(pkt); 103 struct sk_buff *skb = pkt_to_skb(pkt);
105 kfree_skb(skb); 104 kfree_skb(skb);
106} 105}
107EXPORT_SYMBOL(cfpkt_destroy); 106
108 107
109inline bool cfpkt_more(struct cfpkt *pkt) 108inline bool cfpkt_more(struct cfpkt *pkt)
110{ 109{
111 struct sk_buff *skb = pkt_to_skb(pkt); 110 struct sk_buff *skb = pkt_to_skb(pkt);
112 return skb->len > 0; 111 return skb->len > 0;
113} 112}
114EXPORT_SYMBOL(cfpkt_more); 113
115 114
116int cfpkt_peek_head(struct cfpkt *pkt, void *data, u16 len) 115int cfpkt_peek_head(struct cfpkt *pkt, void *data, u16 len)
117{ 116{
@@ -123,7 +122,6 @@ int cfpkt_peek_head(struct cfpkt *pkt, void *data, u16 len)
123 return !cfpkt_extr_head(pkt, data, len) && 122 return !cfpkt_extr_head(pkt, data, len) &&
124 !cfpkt_add_head(pkt, data, len); 123 !cfpkt_add_head(pkt, data, len);
125} 124}
126EXPORT_SYMBOL(cfpkt_peek_head);
127 125
128int cfpkt_extr_head(struct cfpkt *pkt, void *data, u16 len) 126int cfpkt_extr_head(struct cfpkt *pkt, void *data, u16 len)
129{ 127{
@@ -148,7 +146,6 @@ int cfpkt_extr_head(struct cfpkt *pkt, void *data, u16 len)
148 memcpy(data, from, len); 146 memcpy(data, from, len);
149 return 0; 147 return 0;
150} 148}
151EXPORT_SYMBOL(cfpkt_extr_head);
152 149
153int cfpkt_extr_trail(struct cfpkt *pkt, void *dta, u16 len) 150int cfpkt_extr_trail(struct cfpkt *pkt, void *dta, u16 len)
154{ 151{
@@ -171,13 +168,13 @@ int cfpkt_extr_trail(struct cfpkt *pkt, void *dta, u16 len)
171 memcpy(data, from, len); 168 memcpy(data, from, len);
172 return 0; 169 return 0;
173} 170}
174EXPORT_SYMBOL(cfpkt_extr_trail); 171
175 172
176int cfpkt_pad_trail(struct cfpkt *pkt, u16 len) 173int cfpkt_pad_trail(struct cfpkt *pkt, u16 len)
177{ 174{
178 return cfpkt_add_body(pkt, NULL, len); 175 return cfpkt_add_body(pkt, NULL, len);
179} 176}
180EXPORT_SYMBOL(cfpkt_pad_trail); 177
181 178
182int cfpkt_add_body(struct cfpkt *pkt, const void *data, u16 len) 179int cfpkt_add_body(struct cfpkt *pkt, const void *data, u16 len)
183{ 180{
@@ -226,13 +223,11 @@ int cfpkt_add_body(struct cfpkt *pkt, const void *data, u16 len)
226 memcpy(to, data, len); 223 memcpy(to, data, len);
227 return 0; 224 return 0;
228} 225}
229EXPORT_SYMBOL(cfpkt_add_body);
230 226
231inline int cfpkt_addbdy(struct cfpkt *pkt, u8 data) 227inline int cfpkt_addbdy(struct cfpkt *pkt, u8 data)
232{ 228{
233 return cfpkt_add_body(pkt, &data, 1); 229 return cfpkt_add_body(pkt, &data, 1);
234} 230}
235EXPORT_SYMBOL(cfpkt_addbdy);
236 231
237int cfpkt_add_head(struct cfpkt *pkt, const void *data2, u16 len) 232int cfpkt_add_head(struct cfpkt *pkt, const void *data2, u16 len)
238{ 233{
@@ -259,20 +254,20 @@ int cfpkt_add_head(struct cfpkt *pkt, const void *data2, u16 len)
259 memcpy(to, data, len); 254 memcpy(to, data, len);
260 return 0; 255 return 0;
261} 256}
262EXPORT_SYMBOL(cfpkt_add_head); 257
263 258
264inline int cfpkt_add_trail(struct cfpkt *pkt, const void *data, u16 len) 259inline int cfpkt_add_trail(struct cfpkt *pkt, const void *data, u16 len)
265{ 260{
266 return cfpkt_add_body(pkt, data, len); 261 return cfpkt_add_body(pkt, data, len);
267} 262}
268EXPORT_SYMBOL(cfpkt_add_trail); 263
269 264
270inline u16 cfpkt_getlen(struct cfpkt *pkt) 265inline u16 cfpkt_getlen(struct cfpkt *pkt)
271{ 266{
272 struct sk_buff *skb = pkt_to_skb(pkt); 267 struct sk_buff *skb = pkt_to_skb(pkt);
273 return skb->len; 268 return skb->len;
274} 269}
275EXPORT_SYMBOL(cfpkt_getlen); 270
276 271
277inline u16 cfpkt_iterate(struct cfpkt *pkt, 272inline u16 cfpkt_iterate(struct cfpkt *pkt,
278 u16 (*iter_func)(u16, void *, u16), 273 u16 (*iter_func)(u16, void *, u16),
@@ -290,7 +285,7 @@ inline u16 cfpkt_iterate(struct cfpkt *pkt,
290 } 285 }
291 return iter_func(data, pkt->skb.data, cfpkt_getlen(pkt)); 286 return iter_func(data, pkt->skb.data, cfpkt_getlen(pkt));
292} 287}
293EXPORT_SYMBOL(cfpkt_iterate); 288
294 289
295int cfpkt_setlen(struct cfpkt *pkt, u16 len) 290int cfpkt_setlen(struct cfpkt *pkt, u16 len)
296{ 291{
@@ -315,18 +310,6 @@ int cfpkt_setlen(struct cfpkt *pkt, u16 len)
315 310
316 return cfpkt_getlen(pkt); 311 return cfpkt_getlen(pkt);
317} 312}
318EXPORT_SYMBOL(cfpkt_setlen);
319
320struct cfpkt *cfpkt_create_uplink(const unsigned char *data, unsigned int len)
321{
322 struct cfpkt *pkt = cfpkt_create_pfx(len + PKT_POSTFIX, PKT_PREFIX);
323 if (!pkt)
324 return NULL;
325 if (unlikely(data != NULL))
326 cfpkt_add_body(pkt, data, len);
327 return pkt;
328}
329EXPORT_SYMBOL(cfpkt_create_uplink);
330 313
331struct cfpkt *cfpkt_append(struct cfpkt *dstpkt, 314struct cfpkt *cfpkt_append(struct cfpkt *dstpkt,
332 struct cfpkt *addpkt, 315 struct cfpkt *addpkt,
@@ -368,7 +351,6 @@ struct cfpkt *cfpkt_append(struct cfpkt *dstpkt,
368 dst->len += addlen; 351 dst->len += addlen;
369 return skb_to_pkt(dst); 352 return skb_to_pkt(dst);
370} 353}
371EXPORT_SYMBOL(cfpkt_append);
372 354
373struct cfpkt *cfpkt_split(struct cfpkt *pkt, u16 pos) 355struct cfpkt *cfpkt_split(struct cfpkt *pkt, u16 pos)
374{ 356{
@@ -406,174 +388,13 @@ struct cfpkt *cfpkt_split(struct cfpkt *pkt, u16 pos)
406 skb2->len += len2nd; 388 skb2->len += len2nd;
407 return skb_to_pkt(skb2); 389 return skb_to_pkt(skb2);
408} 390}
409EXPORT_SYMBOL(cfpkt_split);
410
411char *cfpkt_log_pkt(struct cfpkt *pkt, char *buf, int buflen)
412{
413 struct sk_buff *skb = pkt_to_skb(pkt);
414 char *p = buf;
415 int i;
416
417 /*
418 * Sanity check buffer length, it needs to be at least as large as
419 * the header info: ~=50+ bytes
420 */
421 if (buflen < 50)
422 return NULL;
423
424 snprintf(buf, buflen, "%s: pkt:%p len:%ld(%ld+%ld) {%ld,%ld} data: [",
425 is_erronous(pkt) ? "ERRONOUS-SKB" :
426 (skb->data_len != 0 ? "COMPLEX-SKB" : "SKB"),
427 skb,
428 (long) skb->len,
429 (long) (skb_tail_pointer(skb) - skb->data),
430 (long) skb->data_len,
431 (long) (skb->data - skb->head),
432 (long) (skb_tail_pointer(skb) - skb->head));
433 p = buf + strlen(buf);
434
435 for (i = 0; i < skb_tail_pointer(skb) - skb->data && i < 300; i++) {
436 if (p > buf + buflen - 10) {
437 sprintf(p, "...");
438 p = buf + strlen(buf);
439 break;
440 }
441 sprintf(p, "%02x,", skb->data[i]);
442 p = buf + strlen(buf);
443 }
444 sprintf(p, "]\n");
445 return buf;
446}
447EXPORT_SYMBOL(cfpkt_log_pkt);
448
449int cfpkt_raw_append(struct cfpkt *pkt, void **buf, unsigned int buflen)
450{
451 struct sk_buff *skb = pkt_to_skb(pkt);
452 struct sk_buff *lastskb;
453
454 caif_assert(buf != NULL);
455 if (unlikely(is_erronous(pkt)))
456 return -EPROTO;
457 /* Make sure SKB is writable */
458 if (unlikely(skb_cow_data(skb, 0, &lastskb) < 0)) {
459 PKT_ERROR(pkt, "skb_cow_data failed\n");
460 return -EPROTO;
461 }
462
463 if (unlikely(skb_linearize(skb) != 0)) {
464 PKT_ERROR(pkt, "linearize failed\n");
465 return -EPROTO;
466 }
467
468 if (unlikely(skb_tailroom(skb) < buflen)) {
469 PKT_ERROR(pkt, "buffer too short - failed\n");
470 return -EPROTO;
471 }
472
473 *buf = skb_put(skb, buflen);
474 return 1;
475}
476EXPORT_SYMBOL(cfpkt_raw_append);
477 391
478int cfpkt_raw_extract(struct cfpkt *pkt, void **buf, unsigned int buflen) 392bool cfpkt_erroneous(struct cfpkt *pkt)
479{
480 struct sk_buff *skb = pkt_to_skb(pkt);
481
482 caif_assert(buf != NULL);
483 if (unlikely(is_erronous(pkt)))
484 return -EPROTO;
485
486 if (unlikely(buflen > skb->len)) {
487 PKT_ERROR(pkt, "buflen too large - failed\n");
488 return -EPROTO;
489 }
490
491 if (unlikely(buflen > skb_headlen(skb))) {
492 if (unlikely(skb_linearize(skb) != 0)) {
493 PKT_ERROR(pkt, "linearize failed\n");
494 return -EPROTO;
495 }
496 }
497
498 *buf = skb->data;
499 skb_pull(skb, buflen);
500
501 return 1;
502}
503EXPORT_SYMBOL(cfpkt_raw_extract);
504
505inline bool cfpkt_erroneous(struct cfpkt *pkt)
506{ 393{
507 return cfpkt_priv(pkt)->erronous; 394 return cfpkt_priv(pkt)->erronous;
508} 395}
509EXPORT_SYMBOL(cfpkt_erroneous);
510
511struct cfpktq *cfpktq_create(void)
512{
513 struct cfpktq *q = kmalloc(sizeof(struct cfpktq), GFP_ATOMIC);
514 if (!q)
515 return NULL;
516 skb_queue_head_init(&q->head);
517 atomic_set(&q->count, 0);
518 spin_lock_init(&q->lock);
519 return q;
520}
521EXPORT_SYMBOL(cfpktq_create);
522
523void cfpkt_queue(struct cfpktq *pktq, struct cfpkt *pkt, unsigned short prio)
524{
525 atomic_inc(&pktq->count);
526 spin_lock(&pktq->lock);
527 skb_queue_tail(&pktq->head, pkt_to_skb(pkt));
528 spin_unlock(&pktq->lock);
529
530}
531EXPORT_SYMBOL(cfpkt_queue);
532
533struct cfpkt *cfpkt_qpeek(struct cfpktq *pktq)
534{
535 struct cfpkt *tmp;
536 spin_lock(&pktq->lock);
537 tmp = skb_to_pkt(skb_peek(&pktq->head));
538 spin_unlock(&pktq->lock);
539 return tmp;
540}
541EXPORT_SYMBOL(cfpkt_qpeek);
542
543struct cfpkt *cfpkt_dequeue(struct cfpktq *pktq)
544{
545 struct cfpkt *pkt;
546 spin_lock(&pktq->lock);
547 pkt = skb_to_pkt(skb_dequeue(&pktq->head));
548 if (pkt) {
549 atomic_dec(&pktq->count);
550 caif_assert(atomic_read(&pktq->count) >= 0);
551 }
552 spin_unlock(&pktq->lock);
553 return pkt;
554}
555EXPORT_SYMBOL(cfpkt_dequeue);
556
557int cfpkt_qcount(struct cfpktq *pktq)
558{
559 return atomic_read(&pktq->count);
560}
561EXPORT_SYMBOL(cfpkt_qcount);
562
563struct cfpkt *cfpkt_clone_release(struct cfpkt *pkt)
564{
565 struct cfpkt *clone;
566 clone = skb_to_pkt(skb_clone(pkt_to_skb(pkt), GFP_ATOMIC));
567 /* Free original packet. */
568 cfpkt_destroy(pkt);
569 if (!clone)
570 return NULL;
571 return clone;
572}
573EXPORT_SYMBOL(cfpkt_clone_release);
574 396
575struct caif_payload_info *cfpkt_info(struct cfpkt *pkt) 397struct caif_payload_info *cfpkt_info(struct cfpkt *pkt)
576{ 398{
577 return (struct caif_payload_info *)&pkt_to_skb(pkt)->cb; 399 return (struct caif_payload_info *)&pkt_to_skb(pkt)->cb;
578} 400}
579EXPORT_SYMBOL(cfpkt_info);
diff --git a/net/caif/cfrfml.c b/net/caif/cfrfml.c
index e2fb5fa75795..0deabb440051 100644
--- a/net/caif/cfrfml.c
+++ b/net/caif/cfrfml.c
@@ -31,9 +31,9 @@ struct cfrfml {
31 spinlock_t sync; 31 spinlock_t sync;
32}; 32};
33 33
34static void cfrfml_release(struct kref *kref) 34static void cfrfml_release(struct cflayer *layer)
35{ 35{
36 struct cfsrvl *srvl = container_of(kref, struct cfsrvl, ref); 36 struct cfsrvl *srvl = container_of(layer, struct cfsrvl, layer);
37 struct cfrfml *rfml = container_obj(&srvl->layer); 37 struct cfrfml *rfml = container_obj(&srvl->layer);
38 38
39 if (rfml->incomplete_frm) 39 if (rfml->incomplete_frm)
diff --git a/net/caif/cfserl.c b/net/caif/cfserl.c
index 8303fe3ebf89..2715c84cfa87 100644
--- a/net/caif/cfserl.c
+++ b/net/caif/cfserl.c
@@ -179,15 +179,10 @@ static int cfserl_receive(struct cflayer *l, struct cfpkt *newpkt)
179static int cfserl_transmit(struct cflayer *layer, struct cfpkt *newpkt) 179static int cfserl_transmit(struct cflayer *layer, struct cfpkt *newpkt)
180{ 180{
181 struct cfserl *layr = container_obj(layer); 181 struct cfserl *layr = container_obj(layer);
182 int ret;
183 u8 tmp8 = CFSERL_STX; 182 u8 tmp8 = CFSERL_STX;
184 if (layr->usestx) 183 if (layr->usestx)
185 cfpkt_add_head(newpkt, &tmp8, 1); 184 cfpkt_add_head(newpkt, &tmp8, 1);
186 ret = layer->dn->transmit(layer->dn, newpkt); 185 return layer->dn->transmit(layer->dn, newpkt);
187 if (ret < 0)
188 cfpkt_extr_head(newpkt, &tmp8, 1);
189
190 return ret;
191} 186}
192 187
193static void cfserl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, 188static void cfserl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
diff --git a/net/caif/cfsrvl.c b/net/caif/cfsrvl.c
index ab5e542526bf..535a1e72b366 100644
--- a/net/caif/cfsrvl.c
+++ b/net/caif/cfsrvl.c
@@ -10,6 +10,7 @@
10#include <linux/types.h> 10#include <linux/types.h>
11#include <linux/errno.h> 11#include <linux/errno.h>
12#include <linux/slab.h> 12#include <linux/slab.h>
13#include <linux/module.h>
13#include <net/caif/caif_layer.h> 14#include <net/caif/caif_layer.h>
14#include <net/caif/cfsrvl.h> 15#include <net/caif/cfsrvl.h>
15#include <net/caif/cfpkt.h> 16#include <net/caif/cfpkt.h>
@@ -27,8 +28,8 @@ static void cfservl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
27{ 28{
28 struct cfsrvl *service = container_obj(layr); 29 struct cfsrvl *service = container_obj(layr);
29 30
30 caif_assert(layr->up != NULL); 31 if (layr->up == NULL || layr->up->ctrlcmd == NULL)
31 caif_assert(layr->up->ctrlcmd != NULL); 32 return;
32 33
33 switch (ctrl) { 34 switch (ctrl) {
34 case CAIF_CTRLCMD_INIT_RSP: 35 case CAIF_CTRLCMD_INIT_RSP:
@@ -151,14 +152,9 @@ static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl)
151 return -EINVAL; 152 return -EINVAL;
152} 153}
153 154
154void cfservl_destroy(struct cflayer *layer) 155static void cfsrvl_release(struct cflayer *layer)
155{ 156{
156 kfree(layer); 157 struct cfsrvl *service = container_of(layer, struct cfsrvl, layer);
157}
158
159void cfsrvl_release(struct kref *kref)
160{
161 struct cfsrvl *service = container_of(kref, struct cfsrvl, ref);
162 kfree(service); 158 kfree(service);
163} 159}
164 160
@@ -178,10 +174,8 @@ void cfsrvl_init(struct cfsrvl *service,
178 service->dev_info = *dev_info; 174 service->dev_info = *dev_info;
179 service->supports_flowctrl = supports_flowctrl; 175 service->supports_flowctrl = supports_flowctrl;
180 service->release = cfsrvl_release; 176 service->release = cfsrvl_release;
181 kref_init(&service->ref);
182} 177}
183 178
184
185bool cfsrvl_ready(struct cfsrvl *service, int *err) 179bool cfsrvl_ready(struct cfsrvl *service, int *err)
186{ 180{
187 if (service->open && service->modem_flow_on && service->phy_flow_on) 181 if (service->open && service->modem_flow_on && service->phy_flow_on)
@@ -194,6 +188,7 @@ bool cfsrvl_ready(struct cfsrvl *service, int *err)
194 *err = -EAGAIN; 188 *err = -EAGAIN;
195 return false; 189 return false;
196} 190}
191
197u8 cfsrvl_getphyid(struct cflayer *layer) 192u8 cfsrvl_getphyid(struct cflayer *layer)
198{ 193{
199 struct cfsrvl *servl = container_obj(layer); 194 struct cfsrvl *servl = container_obj(layer);
@@ -205,3 +200,26 @@ bool cfsrvl_phyid_match(struct cflayer *layer, int phyid)
205 struct cfsrvl *servl = container_obj(layer); 200 struct cfsrvl *servl = container_obj(layer);
206 return servl->dev_info.id == phyid; 201 return servl->dev_info.id == phyid;
207} 202}
203
204void caif_free_client(struct cflayer *adap_layer)
205{
206 struct cfsrvl *servl;
207 if (adap_layer == NULL || adap_layer->dn == NULL)
208 return;
209 servl = container_obj(adap_layer->dn);
210 servl->release(&servl->layer);
211}
212EXPORT_SYMBOL(caif_free_client);
213
214void caif_client_register_refcnt(struct cflayer *adapt_layer,
215 void (*hold)(struct cflayer *lyr),
216 void (*put)(struct cflayer *lyr))
217{
218 struct cfsrvl *service;
219 service = container_of(adapt_layer->dn, struct cfsrvl, layer);
220
221 WARN_ON(adapt_layer == NULL || adapt_layer->dn == NULL);
222 service->hold = hold;
223 service->put = put;
224}
225EXPORT_SYMBOL(caif_client_register_refcnt);
diff --git a/net/caif/cfutill.c b/net/caif/cfutill.c
index 315c0d601368..98e027db18ed 100644
--- a/net/caif/cfutill.c
+++ b/net/caif/cfutill.c
@@ -100,10 +100,5 @@ static int cfutill_transmit(struct cflayer *layr, struct cfpkt *pkt)
100 */ 100 */
101 info->hdr_len = 1; 101 info->hdr_len = 1;
102 info->dev_info = &service->dev_info; 102 info->dev_info = &service->dev_info;
103 ret = layr->dn->transmit(layr->dn, pkt); 103 return layr->dn->transmit(layr->dn, pkt);
104 if (ret < 0) {
105 u32 tmp32;
106 cfpkt_extr_head(pkt, &tmp32, 4);
107 }
108 return ret;
109} 104}
diff --git a/net/caif/cfveil.c b/net/caif/cfveil.c
index c3b1dec4acf6..3ec83fbc2887 100644
--- a/net/caif/cfveil.c
+++ b/net/caif/cfveil.c
@@ -82,13 +82,14 @@ static int cfvei_transmit(struct cflayer *layr, struct cfpkt *pkt)
82 int ret; 82 int ret;
83 struct cfsrvl *service = container_obj(layr); 83 struct cfsrvl *service = container_obj(layr);
84 if (!cfsrvl_ready(service, &ret)) 84 if (!cfsrvl_ready(service, &ret))
85 return ret; 85 goto err;
86 caif_assert(layr->dn != NULL); 86 caif_assert(layr->dn != NULL);
87 caif_assert(layr->dn->transmit != NULL); 87 caif_assert(layr->dn->transmit != NULL);
88 88
89 if (cfpkt_add_head(pkt, &tmp, 1) < 0) { 89 if (cfpkt_add_head(pkt, &tmp, 1) < 0) {
90 pr_err("Packet is erroneous!\n"); 90 pr_err("Packet is erroneous!\n");
91 return -EPROTO; 91 ret = -EPROTO;
92 goto err;
92 } 93 }
93 94
94 /* Add info-> for MUX-layer to route the packet out. */ 95 /* Add info-> for MUX-layer to route the packet out. */
@@ -96,8 +97,8 @@ static int cfvei_transmit(struct cflayer *layr, struct cfpkt *pkt)
96 info->channel_id = service->layer.id; 97 info->channel_id = service->layer.id;
97 info->hdr_len = 1; 98 info->hdr_len = 1;
98 info->dev_info = &service->dev_info; 99 info->dev_info = &service->dev_info;
99 ret = layr->dn->transmit(layr->dn, pkt); 100 return layr->dn->transmit(layr->dn, pkt);
100 if (ret < 0) 101err:
101 cfpkt_extr_head(pkt, &tmp, 1); 102 cfpkt_destroy(pkt);
102 return ret; 103 return ret;
103} 104}
diff --git a/net/caif/cfvidl.c b/net/caif/cfvidl.c
index bf6fef2a0eff..b2f5989ad455 100644
--- a/net/caif/cfvidl.c
+++ b/net/caif/cfvidl.c
@@ -60,8 +60,5 @@ static int cfvidl_transmit(struct cflayer *layr, struct cfpkt *pkt)
60 info = cfpkt_info(pkt); 60 info = cfpkt_info(pkt);
61 info->channel_id = service->layer.id; 61 info->channel_id = service->layer.id;
62 info->dev_info = &service->dev_info; 62 info->dev_info = &service->dev_info;
63 ret = layr->dn->transmit(layr->dn, pkt); 63 return layr->dn->transmit(layr->dn, pkt);
64 if (ret < 0)
65 cfpkt_extr_head(pkt, &videoheader, 4);
66 return ret;
67} 64}
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
index 6008d6dc18a0..adbb424403d4 100644
--- a/net/caif/chnl_net.c
+++ b/net/caif/chnl_net.c
@@ -20,7 +20,6 @@
20#include <linux/caif/if_caif.h> 20#include <linux/caif/if_caif.h>
21#include <net/rtnetlink.h> 21#include <net/rtnetlink.h>
22#include <net/caif/caif_layer.h> 22#include <net/caif/caif_layer.h>
23#include <net/caif/cfcnfg.h>
24#include <net/caif/cfpkt.h> 23#include <net/caif/cfpkt.h>
25#include <net/caif/caif_dev.h> 24#include <net/caif/caif_dev.h>
26 25
@@ -84,10 +83,11 @@ static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt)
84 if (!priv) 83 if (!priv)
85 return -EINVAL; 84 return -EINVAL;
86 85
86 skb = (struct sk_buff *) cfpkt_tonative(pkt);
87
87 /* Get length of CAIF packet. */ 88 /* Get length of CAIF packet. */
88 pktlen = cfpkt_getlen(pkt); 89 pktlen = skb->len;
89 90
90 skb = (struct sk_buff *) cfpkt_tonative(pkt);
91 /* Pass some minimum information and 91 /* Pass some minimum information and
92 * send the packet to the net stack. 92 * send the packet to the net stack.
93 */ 93 */
@@ -139,20 +139,29 @@ static void close_work(struct work_struct *work)
139 struct chnl_net *dev = NULL; 139 struct chnl_net *dev = NULL;
140 struct list_head *list_node; 140 struct list_head *list_node;
141 struct list_head *_tmp; 141 struct list_head *_tmp;
142 /* May be called with or without RTNL lock held */ 142
143 int islocked = rtnl_is_locked(); 143 rtnl_lock();
144 if (!islocked)
145 rtnl_lock();
146 list_for_each_safe(list_node, _tmp, &chnl_net_list) { 144 list_for_each_safe(list_node, _tmp, &chnl_net_list) {
147 dev = list_entry(list_node, struct chnl_net, list_field); 145 dev = list_entry(list_node, struct chnl_net, list_field);
148 if (dev->state == CAIF_SHUTDOWN) 146 if (dev->state == CAIF_SHUTDOWN)
149 dev_close(dev->netdev); 147 dev_close(dev->netdev);
150 } 148 }
151 if (!islocked) 149 rtnl_unlock();
152 rtnl_unlock();
153} 150}
154static DECLARE_WORK(close_worker, close_work); 151static DECLARE_WORK(close_worker, close_work);
155 152
153static void chnl_hold(struct cflayer *lyr)
154{
155 struct chnl_net *priv = container_of(lyr, struct chnl_net, chnl);
156 dev_hold(priv->netdev);
157}
158
159static void chnl_put(struct cflayer *lyr)
160{
161 struct chnl_net *priv = container_of(lyr, struct chnl_net, chnl);
162 dev_put(priv->netdev);
163}
164
156static void chnl_flowctrl_cb(struct cflayer *layr, enum caif_ctrlcmd flow, 165static void chnl_flowctrl_cb(struct cflayer *layr, enum caif_ctrlcmd flow,
157 int phyid) 166 int phyid)
158{ 167{
@@ -190,6 +199,7 @@ static void chnl_flowctrl_cb(struct cflayer *layr, enum caif_ctrlcmd flow,
190 netif_wake_queue(priv->netdev); 199 netif_wake_queue(priv->netdev);
191 break; 200 break;
192 case CAIF_CTRLCMD_INIT_RSP: 201 case CAIF_CTRLCMD_INIT_RSP:
202 caif_client_register_refcnt(&priv->chnl, chnl_hold, chnl_put);
193 priv->state = CAIF_CONNECTED; 203 priv->state = CAIF_CONNECTED;
194 priv->flowenabled = true; 204 priv->flowenabled = true;
195 netif_wake_queue(priv->netdev); 205 netif_wake_queue(priv->netdev);
@@ -257,8 +267,9 @@ static int chnl_net_open(struct net_device *dev)
257 267
258 if (priv->state != CAIF_CONNECTING) { 268 if (priv->state != CAIF_CONNECTING) {
259 priv->state = CAIF_CONNECTING; 269 priv->state = CAIF_CONNECTING;
260 result = caif_connect_client(&priv->conn_req, &priv->chnl, 270 result = caif_connect_client(dev_net(dev), &priv->conn_req,
261 &llifindex, &headroom, &tailroom); 271 &priv->chnl, &llifindex,
272 &headroom, &tailroom);
262 if (result != 0) { 273 if (result != 0) {
263 pr_debug("err: " 274 pr_debug("err: "
264 "Unable to register and open device," 275 "Unable to register and open device,"
@@ -314,7 +325,7 @@ static int chnl_net_open(struct net_device *dev)
314 325
315 if (result == 0) { 326 if (result == 0) {
316 pr_debug("connect timeout\n"); 327 pr_debug("connect timeout\n");
317 caif_disconnect_client(&priv->chnl); 328 caif_disconnect_client(dev_net(dev), &priv->chnl);
318 priv->state = CAIF_DISCONNECTED; 329 priv->state = CAIF_DISCONNECTED;
319 pr_debug("state disconnected\n"); 330 pr_debug("state disconnected\n");
320 result = -ETIMEDOUT; 331 result = -ETIMEDOUT;
@@ -330,7 +341,7 @@ static int chnl_net_open(struct net_device *dev)
330 return 0; 341 return 0;
331 342
332error: 343error:
333 caif_disconnect_client(&priv->chnl); 344 caif_disconnect_client(dev_net(dev), &priv->chnl);
334 priv->state = CAIF_DISCONNECTED; 345 priv->state = CAIF_DISCONNECTED;
335 pr_debug("state disconnected\n"); 346 pr_debug("state disconnected\n");
336 return result; 347 return result;
@@ -344,7 +355,7 @@ static int chnl_net_stop(struct net_device *dev)
344 ASSERT_RTNL(); 355 ASSERT_RTNL();
345 priv = netdev_priv(dev); 356 priv = netdev_priv(dev);
346 priv->state = CAIF_DISCONNECTED; 357 priv->state = CAIF_DISCONNECTED;
347 caif_disconnect_client(&priv->chnl); 358 caif_disconnect_client(dev_net(dev), &priv->chnl);
348 return 0; 359 return 0;
349} 360}
350 361
@@ -373,11 +384,18 @@ static const struct net_device_ops netdev_ops = {
373 .ndo_start_xmit = chnl_net_start_xmit, 384 .ndo_start_xmit = chnl_net_start_xmit,
374}; 385};
375 386
387static void chnl_net_destructor(struct net_device *dev)
388{
389 struct chnl_net *priv = netdev_priv(dev);
390 caif_free_client(&priv->chnl);
391 free_netdev(dev);
392}
393
376static void ipcaif_net_setup(struct net_device *dev) 394static void ipcaif_net_setup(struct net_device *dev)
377{ 395{
378 struct chnl_net *priv; 396 struct chnl_net *priv;
379 dev->netdev_ops = &netdev_ops; 397 dev->netdev_ops = &netdev_ops;
380 dev->destructor = free_netdev; 398 dev->destructor = chnl_net_destructor;
381 dev->flags |= IFF_NOARP; 399 dev->flags |= IFF_NOARP;
382 dev->flags |= IFF_POINTOPOINT; 400 dev->flags |= IFF_POINTOPOINT;
383 dev->mtu = GPRS_PDP_MTU; 401 dev->mtu = GPRS_PDP_MTU;
@@ -391,7 +409,7 @@ static void ipcaif_net_setup(struct net_device *dev)
391 priv->conn_req.link_selector = CAIF_LINK_HIGH_BANDW; 409 priv->conn_req.link_selector = CAIF_LINK_HIGH_BANDW;
392 priv->conn_req.priority = CAIF_PRIO_LOW; 410 priv->conn_req.priority = CAIF_PRIO_LOW;
393 /* Insert illegal value */ 411 /* Insert illegal value */
394 priv->conn_req.sockaddr.u.dgm.connection_id = -1; 412 priv->conn_req.sockaddr.u.dgm.connection_id = 0;
395 priv->flowenabled = false; 413 priv->flowenabled = false;
396 414
397 init_waitqueue_head(&priv->netmgmt_wq); 415 init_waitqueue_head(&priv->netmgmt_wq);
@@ -453,6 +471,10 @@ static int ipcaif_newlink(struct net *src_net, struct net_device *dev,
453 pr_warn("device rtml registration failed\n"); 471 pr_warn("device rtml registration failed\n");
454 else 472 else
455 list_add(&caifdev->list_field, &chnl_net_list); 473 list_add(&caifdev->list_field, &chnl_net_list);
474
475 /* Take ifindex as connection-id if null */
476 if (caifdev->conn_req.sockaddr.u.dgm.connection_id == 0)
477 caifdev->conn_req.sockaddr.u.dgm.connection_id = dev->ifindex;
456 return ret; 478 return ret;
457} 479}
458 480
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 733d66f1b05a..094fc5332d42 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -84,8 +84,8 @@ static DEFINE_SPINLOCK(can_rcvlists_lock);
84static struct kmem_cache *rcv_cache __read_mostly; 84static struct kmem_cache *rcv_cache __read_mostly;
85 85
86/* table of registered CAN protocols */ 86/* table of registered CAN protocols */
87static struct can_proto *proto_tab[CAN_NPROTO] __read_mostly; 87static const struct can_proto *proto_tab[CAN_NPROTO] __read_mostly;
88static DEFINE_SPINLOCK(proto_tab_lock); 88static DEFINE_MUTEX(proto_tab_lock);
89 89
90struct timer_list can_stattimer; /* timer for statistics update */ 90struct timer_list can_stattimer; /* timer for statistics update */
91struct s_stats can_stats; /* packet statistics */ 91struct s_stats can_stats; /* packet statistics */
@@ -115,11 +115,29 @@ static void can_sock_destruct(struct sock *sk)
115 skb_queue_purge(&sk->sk_receive_queue); 115 skb_queue_purge(&sk->sk_receive_queue);
116} 116}
117 117
118static const struct can_proto *can_get_proto(int protocol)
119{
120 const struct can_proto *cp;
121
122 rcu_read_lock();
123 cp = rcu_dereference(proto_tab[protocol]);
124 if (cp && !try_module_get(cp->prot->owner))
125 cp = NULL;
126 rcu_read_unlock();
127
128 return cp;
129}
130
131static inline void can_put_proto(const struct can_proto *cp)
132{
133 module_put(cp->prot->owner);
134}
135
118static int can_create(struct net *net, struct socket *sock, int protocol, 136static int can_create(struct net *net, struct socket *sock, int protocol,
119 int kern) 137 int kern)
120{ 138{
121 struct sock *sk; 139 struct sock *sk;
122 struct can_proto *cp; 140 const struct can_proto *cp;
123 int err = 0; 141 int err = 0;
124 142
125 sock->state = SS_UNCONNECTED; 143 sock->state = SS_UNCONNECTED;
@@ -130,9 +148,12 @@ static int can_create(struct net *net, struct socket *sock, int protocol,
130 if (!net_eq(net, &init_net)) 148 if (!net_eq(net, &init_net))
131 return -EAFNOSUPPORT; 149 return -EAFNOSUPPORT;
132 150
151 cp = can_get_proto(protocol);
152
133#ifdef CONFIG_MODULES 153#ifdef CONFIG_MODULES
134 /* try to load protocol module kernel is modular */ 154 if (!cp) {
135 if (!proto_tab[protocol]) { 155 /* try to load protocol module if kernel is modular */
156
136 err = request_module("can-proto-%d", protocol); 157 err = request_module("can-proto-%d", protocol);
137 158
138 /* 159 /*
@@ -143,22 +164,18 @@ static int can_create(struct net *net, struct socket *sock, int protocol,
143 if (err && printk_ratelimit()) 164 if (err && printk_ratelimit())
144 printk(KERN_ERR "can: request_module " 165 printk(KERN_ERR "can: request_module "
145 "(can-proto-%d) failed.\n", protocol); 166 "(can-proto-%d) failed.\n", protocol);
167
168 cp = can_get_proto(protocol);
146 } 169 }
147#endif 170#endif
148 171
149 spin_lock(&proto_tab_lock);
150 cp = proto_tab[protocol];
151 if (cp && !try_module_get(cp->prot->owner))
152 cp = NULL;
153 spin_unlock(&proto_tab_lock);
154
155 /* check for available protocol and correct usage */ 172 /* check for available protocol and correct usage */
156 173
157 if (!cp) 174 if (!cp)
158 return -EPROTONOSUPPORT; 175 return -EPROTONOSUPPORT;
159 176
160 if (cp->type != sock->type) { 177 if (cp->type != sock->type) {
161 err = -EPROTONOSUPPORT; 178 err = -EPROTOTYPE;
162 goto errout; 179 goto errout;
163 } 180 }
164 181
@@ -183,7 +200,7 @@ static int can_create(struct net *net, struct socket *sock, int protocol,
183 } 200 }
184 201
185 errout: 202 errout:
186 module_put(cp->prot->owner); 203 can_put_proto(cp);
187 return err; 204 return err;
188} 205}
189 206
@@ -679,7 +696,7 @@ drop:
679 * -EBUSY protocol already in use 696 * -EBUSY protocol already in use
680 * -ENOBUF if proto_register() fails 697 * -ENOBUF if proto_register() fails
681 */ 698 */
682int can_proto_register(struct can_proto *cp) 699int can_proto_register(const struct can_proto *cp)
683{ 700{
684 int proto = cp->protocol; 701 int proto = cp->protocol;
685 int err = 0; 702 int err = 0;
@@ -694,15 +711,16 @@ int can_proto_register(struct can_proto *cp)
694 if (err < 0) 711 if (err < 0)
695 return err; 712 return err;
696 713
697 spin_lock(&proto_tab_lock); 714 mutex_lock(&proto_tab_lock);
715
698 if (proto_tab[proto]) { 716 if (proto_tab[proto]) {
699 printk(KERN_ERR "can: protocol %d already registered\n", 717 printk(KERN_ERR "can: protocol %d already registered\n",
700 proto); 718 proto);
701 err = -EBUSY; 719 err = -EBUSY;
702 } else 720 } else
703 proto_tab[proto] = cp; 721 rcu_assign_pointer(proto_tab[proto], cp);
704 722
705 spin_unlock(&proto_tab_lock); 723 mutex_unlock(&proto_tab_lock);
706 724
707 if (err < 0) 725 if (err < 0)
708 proto_unregister(cp->prot); 726 proto_unregister(cp->prot);
@@ -715,17 +733,16 @@ EXPORT_SYMBOL(can_proto_register);
715 * can_proto_unregister - unregister CAN transport protocol 733 * can_proto_unregister - unregister CAN transport protocol
716 * @cp: pointer to CAN protocol structure 734 * @cp: pointer to CAN protocol structure
717 */ 735 */
718void can_proto_unregister(struct can_proto *cp) 736void can_proto_unregister(const struct can_proto *cp)
719{ 737{
720 int proto = cp->protocol; 738 int proto = cp->protocol;
721 739
722 spin_lock(&proto_tab_lock); 740 mutex_lock(&proto_tab_lock);
723 if (!proto_tab[proto]) { 741 BUG_ON(proto_tab[proto] != cp);
724 printk(KERN_ERR "BUG: can: protocol %d is not registered\n", 742 rcu_assign_pointer(proto_tab[proto], NULL);
725 proto); 743 mutex_unlock(&proto_tab_lock);
726 } 744
727 proto_tab[proto] = NULL; 745 synchronize_rcu();
728 spin_unlock(&proto_tab_lock);
729 746
730 proto_unregister(cp->prot); 747 proto_unregister(cp->prot);
731} 748}
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 8a6a05e7c3c8..184a6572b67e 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -165,9 +165,9 @@ static int bcm_proc_show(struct seq_file *m, void *v)
165 struct bcm_sock *bo = bcm_sk(sk); 165 struct bcm_sock *bo = bcm_sk(sk);
166 struct bcm_op *op; 166 struct bcm_op *op;
167 167
168 seq_printf(m, ">>> socket %p", sk->sk_socket); 168 seq_printf(m, ">>> socket %pK", sk->sk_socket);
169 seq_printf(m, " / sk %p", sk); 169 seq_printf(m, " / sk %pK", sk);
170 seq_printf(m, " / bo %p", bo); 170 seq_printf(m, " / bo %pK", bo);
171 seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs); 171 seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
172 seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex)); 172 seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex));
173 seq_printf(m, " <<<\n"); 173 seq_printf(m, " <<<\n");
@@ -1601,7 +1601,7 @@ static struct proto bcm_proto __read_mostly = {
1601 .init = bcm_init, 1601 .init = bcm_init,
1602}; 1602};
1603 1603
1604static struct can_proto bcm_can_proto __read_mostly = { 1604static const struct can_proto bcm_can_proto = {
1605 .type = SOCK_DGRAM, 1605 .type = SOCK_DGRAM,
1606 .protocol = CAN_BCM, 1606 .protocol = CAN_BCM,
1607 .ops = &bcm_ops, 1607 .ops = &bcm_ops,
diff --git a/net/can/proc.c b/net/can/proc.c
index f4265cc9c3fb..0016f7339699 100644
--- a/net/can/proc.c
+++ b/net/can/proc.c
@@ -204,12 +204,11 @@ static void can_print_rcvlist(struct seq_file *m, struct hlist_head *rx_list,
204 204
205 hlist_for_each_entry_rcu(r, n, rx_list, list) { 205 hlist_for_each_entry_rcu(r, n, rx_list, list) {
206 char *fmt = (r->can_id & CAN_EFF_FLAG)? 206 char *fmt = (r->can_id & CAN_EFF_FLAG)?
207 " %-5s %08X %08x %08x %08x %8ld %s\n" : 207 " %-5s %08x %08x %pK %pK %8ld %s\n" :
208 " %-5s %03X %08x %08lx %08lx %8ld %s\n"; 208 " %-5s %03x %08x %pK %pK %8ld %s\n";
209 209
210 seq_printf(m, fmt, DNAME(dev), r->can_id, r->mask, 210 seq_printf(m, fmt, DNAME(dev), r->can_id, r->mask,
211 (unsigned long)r->func, (unsigned long)r->data, 211 r->func, r->data, r->matches, r->ident);
212 r->matches, r->ident);
213 } 212 }
214} 213}
215 214
diff --git a/net/can/raw.c b/net/can/raw.c
index 0eb39a7fdf64..dea99a6e596c 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -774,7 +774,7 @@ static struct proto raw_proto __read_mostly = {
774 .init = raw_init, 774 .init = raw_init,
775}; 775};
776 776
777static struct can_proto raw_can_proto __read_mostly = { 777static const struct can_proto raw_can_proto = {
778 .type = SOCK_RAW, 778 .type = SOCK_RAW,
779 .protocol = CAN_RAW, 779 .protocol = CAN_RAW,
780 .ops = &raw_ops, 780 .ops = &raw_ops,
diff --git a/net/compat.c b/net/compat.c
index 3649d5895361..c578d9382e19 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -722,11 +722,11 @@ EXPORT_SYMBOL(compat_mc_getsockopt);
722 722
723/* Argument list sizes for compat_sys_socketcall */ 723/* Argument list sizes for compat_sys_socketcall */
724#define AL(x) ((x) * sizeof(u32)) 724#define AL(x) ((x) * sizeof(u32))
725static unsigned char nas[20] = { 725static unsigned char nas[21] = {
726 AL(0), AL(3), AL(3), AL(3), AL(2), AL(3), 726 AL(0), AL(3), AL(3), AL(3), AL(2), AL(3),
727 AL(3), AL(3), AL(4), AL(4), AL(4), AL(6), 727 AL(3), AL(3), AL(4), AL(4), AL(4), AL(6),
728 AL(6), AL(2), AL(5), AL(5), AL(3), AL(3), 728 AL(6), AL(2), AL(5), AL(5), AL(3), AL(3),
729 AL(4), AL(5) 729 AL(4), AL(5), AL(4)
730}; 730};
731#undef AL 731#undef AL
732 732
@@ -735,6 +735,13 @@ asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg, uns
735 return sys_sendmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT); 735 return sys_sendmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
736} 736}
737 737
738asmlinkage long compat_sys_sendmmsg(int fd, struct compat_mmsghdr __user *mmsg,
739 unsigned vlen, unsigned int flags)
740{
741 return __sys_sendmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
742 flags | MSG_CMSG_COMPAT);
743}
744
738asmlinkage long compat_sys_recvmsg(int fd, struct compat_msghdr __user *msg, unsigned int flags) 745asmlinkage long compat_sys_recvmsg(int fd, struct compat_msghdr __user *msg, unsigned int flags)
739{ 746{
740 return sys_recvmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT); 747 return sys_recvmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
@@ -780,7 +787,7 @@ asmlinkage long compat_sys_socketcall(int call, u32 __user *args)
780 u32 a[6]; 787 u32 a[6];
781 u32 a0, a1; 788 u32 a0, a1;
782 789
783 if (call < SYS_SOCKET || call > SYS_RECVMMSG) 790 if (call < SYS_SOCKET || call > SYS_SENDMMSG)
784 return -EINVAL; 791 return -EINVAL;
785 if (copy_from_user(a, args, nas[call])) 792 if (copy_from_user(a, args, nas[call]))
786 return -EFAULT; 793 return -EFAULT;
@@ -839,6 +846,9 @@ asmlinkage long compat_sys_socketcall(int call, u32 __user *args)
839 case SYS_SENDMSG: 846 case SYS_SENDMSG:
840 ret = compat_sys_sendmsg(a0, compat_ptr(a1), a[2]); 847 ret = compat_sys_sendmsg(a0, compat_ptr(a1), a[2]);
841 break; 848 break;
849 case SYS_SENDMMSG:
850 ret = compat_sys_sendmmsg(a0, compat_ptr(a1), a[2], a[3]);
851 break;
842 case SYS_RECVMSG: 852 case SYS_RECVMSG:
843 ret = compat_sys_recvmsg(a0, compat_ptr(a1), a[2]); 853 ret = compat_sys_recvmsg(a0, compat_ptr(a1), a[2]);
844 break; 854 break;
diff --git a/net/core/dev.c b/net/core/dev.c
index b624fe4d9bd7..9c58c1ec41a9 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -948,7 +948,7 @@ int dev_alloc_name(struct net_device *dev, const char *name)
948} 948}
949EXPORT_SYMBOL(dev_alloc_name); 949EXPORT_SYMBOL(dev_alloc_name);
950 950
951static int dev_get_valid_name(struct net_device *dev, const char *name, bool fmt) 951static int dev_get_valid_name(struct net_device *dev, const char *name)
952{ 952{
953 struct net *net; 953 struct net *net;
954 954
@@ -958,7 +958,7 @@ static int dev_get_valid_name(struct net_device *dev, const char *name, bool fmt
958 if (!dev_valid_name(name)) 958 if (!dev_valid_name(name))
959 return -EINVAL; 959 return -EINVAL;
960 960
961 if (fmt && strchr(name, '%')) 961 if (strchr(name, '%'))
962 return dev_alloc_name(dev, name); 962 return dev_alloc_name(dev, name);
963 else if (__dev_get_by_name(net, name)) 963 else if (__dev_get_by_name(net, name))
964 return -EEXIST; 964 return -EEXIST;
@@ -995,7 +995,7 @@ int dev_change_name(struct net_device *dev, const char *newname)
995 995
996 memcpy(oldname, dev->name, IFNAMSIZ); 996 memcpy(oldname, dev->name, IFNAMSIZ);
997 997
998 err = dev_get_valid_name(dev, newname, 1); 998 err = dev_get_valid_name(dev, newname);
999 if (err < 0) 999 if (err < 0)
1000 return err; 1000 return err;
1001 1001
@@ -1007,7 +1007,7 @@ rollback:
1007 } 1007 }
1008 1008
1009 write_lock_bh(&dev_base_lock); 1009 write_lock_bh(&dev_base_lock);
1010 hlist_del(&dev->name_hlist); 1010 hlist_del_rcu(&dev->name_hlist);
1011 write_unlock_bh(&dev_base_lock); 1011 write_unlock_bh(&dev_base_lock);
1012 1012
1013 synchronize_rcu(); 1013 synchronize_rcu();
@@ -1308,6 +1308,13 @@ void dev_disable_lro(struct net_device *dev)
1308{ 1308{
1309 u32 flags; 1309 u32 flags;
1310 1310
1311 /*
1312 * If we're trying to disable lro on a vlan device
1313 * use the underlying physical device instead
1314 */
1315 if (is_vlan_dev(dev))
1316 dev = vlan_dev_real_dev(dev);
1317
1311 if (dev->ethtool_ops && dev->ethtool_ops->get_flags) 1318 if (dev->ethtool_ops && dev->ethtool_ops->get_flags)
1312 flags = dev->ethtool_ops->get_flags(dev); 1319 flags = dev->ethtool_ops->get_flags(dev);
1313 else 1320 else
@@ -1317,7 +1324,8 @@ void dev_disable_lro(struct net_device *dev)
1317 return; 1324 return;
1318 1325
1319 __ethtool_set_flags(dev, flags & ~ETH_FLAG_LRO); 1326 __ethtool_set_flags(dev, flags & ~ETH_FLAG_LRO);
1320 WARN_ON(dev->features & NETIF_F_LRO); 1327 if (unlikely(dev->features & NETIF_F_LRO))
1328 netdev_WARN(dev, "failed to disable LRO!\n");
1321} 1329}
1322EXPORT_SYMBOL(dev_disable_lro); 1330EXPORT_SYMBOL(dev_disable_lro);
1323 1331
@@ -2088,6 +2096,7 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2088{ 2096{
2089 const struct net_device_ops *ops = dev->netdev_ops; 2097 const struct net_device_ops *ops = dev->netdev_ops;
2090 int rc = NETDEV_TX_OK; 2098 int rc = NETDEV_TX_OK;
2099 unsigned int skb_len;
2091 2100
2092 if (likely(!skb->next)) { 2101 if (likely(!skb->next)) {
2093 u32 features; 2102 u32 features;
@@ -2138,8 +2147,9 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2138 } 2147 }
2139 } 2148 }
2140 2149
2150 skb_len = skb->len;
2141 rc = ops->ndo_start_xmit(skb, dev); 2151 rc = ops->ndo_start_xmit(skb, dev);
2142 trace_net_dev_xmit(skb, rc); 2152 trace_net_dev_xmit(skb, rc, dev, skb_len);
2143 if (rc == NETDEV_TX_OK) 2153 if (rc == NETDEV_TX_OK)
2144 txq_trans_update(txq); 2154 txq_trans_update(txq);
2145 return rc; 2155 return rc;
@@ -2159,8 +2169,9 @@ gso:
2159 if (dev->priv_flags & IFF_XMIT_DST_RELEASE) 2169 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
2160 skb_dst_drop(nskb); 2170 skb_dst_drop(nskb);
2161 2171
2172 skb_len = nskb->len;
2162 rc = ops->ndo_start_xmit(nskb, dev); 2173 rc = ops->ndo_start_xmit(nskb, dev);
2163 trace_net_dev_xmit(nskb, rc); 2174 trace_net_dev_xmit(nskb, rc, dev, skb_len);
2164 if (unlikely(rc != NETDEV_TX_OK)) { 2175 if (unlikely(rc != NETDEV_TX_OK)) {
2165 if (rc & ~NETDEV_TX_MASK) 2176 if (rc & ~NETDEV_TX_MASK)
2166 goto out_kfree_gso_skb; 2177 goto out_kfree_gso_skb;
@@ -2504,8 +2515,8 @@ static inline void ____napi_schedule(struct softnet_data *sd,
2504__u32 __skb_get_rxhash(struct sk_buff *skb) 2515__u32 __skb_get_rxhash(struct sk_buff *skb)
2505{ 2516{
2506 int nhoff, hash = 0, poff; 2517 int nhoff, hash = 0, poff;
2507 struct ipv6hdr *ip6; 2518 const struct ipv6hdr *ip6;
2508 struct iphdr *ip; 2519 const struct iphdr *ip;
2509 u8 ip_proto; 2520 u8 ip_proto;
2510 u32 addr1, addr2, ihl; 2521 u32 addr1, addr2, ihl;
2511 union { 2522 union {
@@ -2520,7 +2531,7 @@ __u32 __skb_get_rxhash(struct sk_buff *skb)
2520 if (!pskb_may_pull(skb, sizeof(*ip) + nhoff)) 2531 if (!pskb_may_pull(skb, sizeof(*ip) + nhoff))
2521 goto done; 2532 goto done;
2522 2533
2523 ip = (struct iphdr *) (skb->data + nhoff); 2534 ip = (const struct iphdr *) (skb->data + nhoff);
2524 if (ip->frag_off & htons(IP_MF | IP_OFFSET)) 2535 if (ip->frag_off & htons(IP_MF | IP_OFFSET))
2525 ip_proto = 0; 2536 ip_proto = 0;
2526 else 2537 else
@@ -2533,7 +2544,7 @@ __u32 __skb_get_rxhash(struct sk_buff *skb)
2533 if (!pskb_may_pull(skb, sizeof(*ip6) + nhoff)) 2544 if (!pskb_may_pull(skb, sizeof(*ip6) + nhoff))
2534 goto done; 2545 goto done;
2535 2546
2536 ip6 = (struct ipv6hdr *) (skb->data + nhoff); 2547 ip6 = (const struct ipv6hdr *) (skb->data + nhoff);
2537 ip_proto = ip6->nexthdr; 2548 ip_proto = ip6->nexthdr;
2538 addr1 = (__force u32) ip6->saddr.s6_addr32[3]; 2549 addr1 = (__force u32) ip6->saddr.s6_addr32[3];
2539 addr2 = (__force u32) ip6->daddr.s6_addr32[3]; 2550 addr2 = (__force u32) ip6->daddr.s6_addr32[3];
@@ -3078,25 +3089,6 @@ void netdev_rx_handler_unregister(struct net_device *dev)
3078} 3089}
3079EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister); 3090EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
3080 3091
3081static void vlan_on_bond_hook(struct sk_buff *skb)
3082{
3083 /*
3084 * Make sure ARP frames received on VLAN interfaces stacked on
3085 * bonding interfaces still make their way to any base bonding
3086 * device that may have registered for a specific ptype.
3087 */
3088 if (skb->dev->priv_flags & IFF_802_1Q_VLAN &&
3089 vlan_dev_real_dev(skb->dev)->priv_flags & IFF_BONDING &&
3090 skb->protocol == htons(ETH_P_ARP)) {
3091 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
3092
3093 if (!skb2)
3094 return;
3095 skb2->dev = vlan_dev_real_dev(skb->dev);
3096 netif_rx(skb2);
3097 }
3098}
3099
3100static int __netif_receive_skb(struct sk_buff *skb) 3092static int __netif_receive_skb(struct sk_buff *skb)
3101{ 3093{
3102 struct packet_type *ptype, *pt_prev; 3094 struct packet_type *ptype, *pt_prev;
@@ -3122,7 +3114,7 @@ static int __netif_receive_skb(struct sk_buff *skb)
3122 3114
3123 skb_reset_network_header(skb); 3115 skb_reset_network_header(skb);
3124 skb_reset_transport_header(skb); 3116 skb_reset_transport_header(skb);
3125 skb->mac_len = skb->network_header - skb->mac_header; 3117 skb_reset_mac_len(skb);
3126 3118
3127 pt_prev = NULL; 3119 pt_prev = NULL;
3128 3120
@@ -3132,6 +3124,12 @@ another_round:
3132 3124
3133 __this_cpu_inc(softnet_data.processed); 3125 __this_cpu_inc(softnet_data.processed);
3134 3126
3127 if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
3128 skb = vlan_untag(skb);
3129 if (unlikely(!skb))
3130 goto out;
3131 }
3132
3135#ifdef CONFIG_NET_CLS_ACT 3133#ifdef CONFIG_NET_CLS_ACT
3136 if (skb->tc_verd & TC_NCLS) { 3134 if (skb->tc_verd & TC_NCLS) {
3137 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd); 3135 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
@@ -3179,15 +3177,13 @@ ncls:
3179 ret = deliver_skb(skb, pt_prev, orig_dev); 3177 ret = deliver_skb(skb, pt_prev, orig_dev);
3180 pt_prev = NULL; 3178 pt_prev = NULL;
3181 } 3179 }
3182 if (vlan_hwaccel_do_receive(&skb)) { 3180 if (vlan_do_receive(&skb)) {
3183 ret = __netif_receive_skb(skb); 3181 ret = __netif_receive_skb(skb);
3184 goto out; 3182 goto out;
3185 } else if (unlikely(!skb)) 3183 } else if (unlikely(!skb))
3186 goto out; 3184 goto out;
3187 } 3185 }
3188 3186
3189 vlan_on_bond_hook(skb);
3190
3191 /* deliver only exact match when indicated */ 3187 /* deliver only exact match when indicated */
3192 null_or_dev = deliver_exact ? skb->dev : NULL; 3188 null_or_dev = deliver_exact ? skb->dev : NULL;
3193 3189
@@ -4308,10 +4304,8 @@ int netdev_set_master(struct net_device *slave, struct net_device *master)
4308 4304
4309 slave->master = master; 4305 slave->master = master;
4310 4306
4311 if (old) { 4307 if (old)
4312 synchronize_net();
4313 dev_put(old); 4308 dev_put(old);
4314 }
4315 return 0; 4309 return 0;
4316} 4310}
4317EXPORT_SYMBOL(netdev_set_master); 4311EXPORT_SYMBOL(netdev_set_master);
@@ -4512,6 +4506,30 @@ void dev_set_rx_mode(struct net_device *dev)
4512} 4506}
4513 4507
4514/** 4508/**
4509 * dev_ethtool_get_settings - call device's ethtool_ops::get_settings()
4510 * @dev: device
4511 * @cmd: memory area for ethtool_ops::get_settings() result
4512 *
4513 * The cmd arg is initialized properly (cleared and
4514 * ethtool_cmd::cmd field set to ETHTOOL_GSET).
4515 *
4516 * Return device's ethtool_ops::get_settings() result value or
4517 * -EOPNOTSUPP when device doesn't expose
4518 * ethtool_ops::get_settings() operation.
4519 */
4520int dev_ethtool_get_settings(struct net_device *dev,
4521 struct ethtool_cmd *cmd)
4522{
4523 if (!dev->ethtool_ops || !dev->ethtool_ops->get_settings)
4524 return -EOPNOTSUPP;
4525
4526 memset(cmd, 0, sizeof(struct ethtool_cmd));
4527 cmd->cmd = ETHTOOL_GSET;
4528 return dev->ethtool_ops->get_settings(dev, cmd);
4529}
4530EXPORT_SYMBOL(dev_ethtool_get_settings);
4531
4532/**
4515 * dev_get_flags - get flags reported to userspace 4533 * dev_get_flags - get flags reported to userspace
4516 * @dev: device 4534 * @dev: device
4517 * 4535 *
@@ -5116,7 +5134,7 @@ static void rollback_registered_many(struct list_head *head)
5116 list_del(&dev->unreg_list); 5134 list_del(&dev->unreg_list);
5117 continue; 5135 continue;
5118 } 5136 }
5119 5137 dev->dismantle = true;
5120 BUG_ON(dev->reg_state != NETREG_REGISTERED); 5138 BUG_ON(dev->reg_state != NETREG_REGISTERED);
5121 } 5139 }
5122 5140
@@ -5242,11 +5260,13 @@ u32 netdev_fix_features(struct net_device *dev, u32 features)
5242} 5260}
5243EXPORT_SYMBOL(netdev_fix_features); 5261EXPORT_SYMBOL(netdev_fix_features);
5244 5262
5245void netdev_update_features(struct net_device *dev) 5263int __netdev_update_features(struct net_device *dev)
5246{ 5264{
5247 u32 features; 5265 u32 features;
5248 int err = 0; 5266 int err = 0;
5249 5267
5268 ASSERT_RTNL();
5269
5250 features = netdev_get_wanted_features(dev); 5270 features = netdev_get_wanted_features(dev);
5251 5271
5252 if (dev->netdev_ops->ndo_fix_features) 5272 if (dev->netdev_ops->ndo_fix_features)
@@ -5256,24 +5276,60 @@ void netdev_update_features(struct net_device *dev)
5256 features = netdev_fix_features(dev, features); 5276 features = netdev_fix_features(dev, features);
5257 5277
5258 if (dev->features == features) 5278 if (dev->features == features)
5259 return; 5279 return 0;
5260 5280
5261 netdev_info(dev, "Features changed: 0x%08x -> 0x%08x\n", 5281 netdev_dbg(dev, "Features changed: 0x%08x -> 0x%08x\n",
5262 dev->features, features); 5282 dev->features, features);
5263 5283
5264 if (dev->netdev_ops->ndo_set_features) 5284 if (dev->netdev_ops->ndo_set_features)
5265 err = dev->netdev_ops->ndo_set_features(dev, features); 5285 err = dev->netdev_ops->ndo_set_features(dev, features);
5266 5286
5267 if (!err) 5287 if (unlikely(err < 0)) {
5268 dev->features = features;
5269 else if (err < 0)
5270 netdev_err(dev, 5288 netdev_err(dev,
5271 "set_features() failed (%d); wanted 0x%08x, left 0x%08x\n", 5289 "set_features() failed (%d); wanted 0x%08x, left 0x%08x\n",
5272 err, features, dev->features); 5290 err, features, dev->features);
5291 return -1;
5292 }
5293
5294 if (!err)
5295 dev->features = features;
5296
5297 return 1;
5298}
5299
5300/**
5301 * netdev_update_features - recalculate device features
5302 * @dev: the device to check
5303 *
5304 * Recalculate dev->features set and send notifications if it
5305 * has changed. Should be called after driver or hardware dependent
5306 * conditions might have changed that influence the features.
5307 */
5308void netdev_update_features(struct net_device *dev)
5309{
5310 if (__netdev_update_features(dev))
5311 netdev_features_change(dev);
5273} 5312}
5274EXPORT_SYMBOL(netdev_update_features); 5313EXPORT_SYMBOL(netdev_update_features);
5275 5314
5276/** 5315/**
5316 * netdev_change_features - recalculate device features
5317 * @dev: the device to check
5318 *
5319 * Recalculate dev->features set and send notifications even
5320 * if they have not changed. Should be called instead of
5321 * netdev_update_features() if also dev->vlan_features might
5322 * have changed to allow the changes to be propagated to stacked
5323 * VLAN devices.
5324 */
5325void netdev_change_features(struct net_device *dev)
5326{
5327 __netdev_update_features(dev);
5328 netdev_features_change(dev);
5329}
5330EXPORT_SYMBOL(netdev_change_features);
5331
5332/**
5277 * netif_stacked_transfer_operstate - transfer operstate 5333 * netif_stacked_transfer_operstate - transfer operstate
5278 * @rootdev: the root or lower level device to transfer state from 5334 * @rootdev: the root or lower level device to transfer state from
5279 * @dev: the device to transfer operstate to 5335 * @dev: the device to transfer operstate to
@@ -5389,6 +5445,10 @@ int register_netdevice(struct net_device *dev)
5389 5445
5390 dev->iflink = -1; 5446 dev->iflink = -1;
5391 5447
5448 ret = dev_get_valid_name(dev, dev->name);
5449 if (ret < 0)
5450 goto out;
5451
5392 /* Init, if this function is available */ 5452 /* Init, if this function is available */
5393 if (dev->netdev_ops->ndo_init) { 5453 if (dev->netdev_ops->ndo_init) {
5394 ret = dev->netdev_ops->ndo_init(dev); 5454 ret = dev->netdev_ops->ndo_init(dev);
@@ -5399,10 +5459,6 @@ int register_netdevice(struct net_device *dev)
5399 } 5459 }
5400 } 5460 }
5401 5461
5402 ret = dev_get_valid_name(dev, dev->name, 0);
5403 if (ret)
5404 goto err_uninit;
5405
5406 dev->ifindex = dev_new_index(net); 5462 dev->ifindex = dev_new_index(net);
5407 if (dev->iflink == -1) 5463 if (dev->iflink == -1)
5408 dev->iflink = dev->ifindex; 5464 dev->iflink = dev->ifindex;
@@ -5414,6 +5470,14 @@ int register_netdevice(struct net_device *dev)
5414 dev->features |= NETIF_F_SOFT_FEATURES; 5470 dev->features |= NETIF_F_SOFT_FEATURES;
5415 dev->wanted_features = dev->features & dev->hw_features; 5471 dev->wanted_features = dev->features & dev->hw_features;
5416 5472
5473 /* Turn on no cache copy if HW is doing checksum */
5474 dev->hw_features |= NETIF_F_NOCACHE_COPY;
5475 if ((dev->features & NETIF_F_ALL_CSUM) &&
5476 !(dev->features & NETIF_F_NO_CSUM)) {
5477 dev->wanted_features |= NETIF_F_NOCACHE_COPY;
5478 dev->features |= NETIF_F_NOCACHE_COPY;
5479 }
5480
5417 /* Enable GRO and NETIF_F_HIGHDMA for vlans by default, 5481 /* Enable GRO and NETIF_F_HIGHDMA for vlans by default,
5418 * vlan_dev_init() will do the dev->features check, so these features 5482 * vlan_dev_init() will do the dev->features check, so these features
5419 * are enabled only if supported by underlying device. 5483 * are enabled only if supported by underlying device.
@@ -5430,7 +5494,7 @@ int register_netdevice(struct net_device *dev)
5430 goto err_uninit; 5494 goto err_uninit;
5431 dev->reg_state = NETREG_REGISTERED; 5495 dev->reg_state = NETREG_REGISTERED;
5432 5496
5433 netdev_update_features(dev); 5497 __netdev_update_features(dev);
5434 5498
5435 /* 5499 /*
5436 * Default initial state at registry is that the 5500 * Default initial state at registry is that the
@@ -5527,19 +5591,7 @@ int register_netdev(struct net_device *dev)
5527 int err; 5591 int err;
5528 5592
5529 rtnl_lock(); 5593 rtnl_lock();
5530
5531 /*
5532 * If the name is a format string the caller wants us to do a
5533 * name allocation.
5534 */
5535 if (strchr(dev->name, '%')) {
5536 err = dev_alloc_name(dev, dev->name);
5537 if (err < 0)
5538 goto out;
5539 }
5540
5541 err = register_netdevice(dev); 5594 err = register_netdevice(dev);
5542out:
5543 rtnl_unlock(); 5595 rtnl_unlock();
5544 return err; 5596 return err;
5545} 5597}
@@ -5912,7 +5964,10 @@ EXPORT_SYMBOL(free_netdev);
5912void synchronize_net(void) 5964void synchronize_net(void)
5913{ 5965{
5914 might_sleep(); 5966 might_sleep();
5915 synchronize_rcu(); 5967 if (rtnl_is_locked())
5968 synchronize_rcu_expedited();
5969 else
5970 synchronize_rcu();
5916} 5971}
5917EXPORT_SYMBOL(synchronize_net); 5972EXPORT_SYMBOL(synchronize_net);
5918 5973
@@ -6021,7 +6076,7 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
6021 /* We get here if we can't use the current device name */ 6076 /* We get here if we can't use the current device name */
6022 if (!pat) 6077 if (!pat)
6023 goto out; 6078 goto out;
6024 if (dev_get_valid_name(dev, pat, 1)) 6079 if (dev_get_valid_name(dev, pat) < 0)
6025 goto out; 6080 goto out;
6026 } 6081 }
6027 6082
@@ -6123,6 +6178,11 @@ static int dev_cpu_callback(struct notifier_block *nfb,
6123 oldsd->output_queue = NULL; 6178 oldsd->output_queue = NULL;
6124 oldsd->output_queue_tailp = &oldsd->output_queue; 6179 oldsd->output_queue_tailp = &oldsd->output_queue;
6125 } 6180 }
6181 /* Append NAPI poll list from offline CPU. */
6182 if (!list_empty(&oldsd->poll_list)) {
6183 list_splice_init(&oldsd->poll_list, &sd->poll_list);
6184 raise_softirq_irqoff(NET_RX_SOFTIRQ);
6185 }
6126 6186
6127 raise_softirq_irqoff(NET_TX_SOFTIRQ); 6187 raise_softirq_irqoff(NET_TX_SOFTIRQ);
6128 local_irq_enable(); 6188 local_irq_enable();
@@ -6153,29 +6213,20 @@ static int dev_cpu_callback(struct notifier_block *nfb,
6153 */ 6213 */
6154u32 netdev_increment_features(u32 all, u32 one, u32 mask) 6214u32 netdev_increment_features(u32 all, u32 one, u32 mask)
6155{ 6215{
6156 /* If device needs checksumming, downgrade to it. */ 6216 if (mask & NETIF_F_GEN_CSUM)
6157 if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM)) 6217 mask |= NETIF_F_ALL_CSUM;
6158 all ^= NETIF_F_NO_CSUM | (one & NETIF_F_ALL_CSUM); 6218 mask |= NETIF_F_VLAN_CHALLENGED;
6159 else if (mask & NETIF_F_ALL_CSUM) {
6160 /* If one device supports v4/v6 checksumming, set for all. */
6161 if (one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM) &&
6162 !(all & NETIF_F_GEN_CSUM)) {
6163 all &= ~NETIF_F_ALL_CSUM;
6164 all |= one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
6165 }
6166 6219
6167 /* If one device supports hw checksumming, set for all. */ 6220 all |= one & (NETIF_F_ONE_FOR_ALL|NETIF_F_ALL_CSUM) & mask;
6168 if (one & NETIF_F_GEN_CSUM && !(all & NETIF_F_GEN_CSUM)) { 6221 all &= one | ~NETIF_F_ALL_FOR_ALL;
6169 all &= ~NETIF_F_ALL_CSUM;
6170 all |= NETIF_F_HW_CSUM;
6171 }
6172 }
6173 6222
6174 one |= NETIF_F_ALL_CSUM; 6223 /* If device needs checksumming, downgrade to it. */
6224 if (all & (NETIF_F_ALL_CSUM & ~NETIF_F_NO_CSUM))
6225 all &= ~NETIF_F_NO_CSUM;
6175 6226
6176 one |= all & NETIF_F_ONE_FOR_ALL; 6227 /* If one device supports hw checksumming, set for all. */
6177 all &= one | NETIF_F_LLTX | NETIF_F_GSO | NETIF_F_UFO; 6228 if (all & NETIF_F_GEN_CSUM)
6178 all |= one & mask & NETIF_F_ONE_FOR_ALL; 6229 all &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
6179 6230
6180 return all; 6231 return all;
6181} 6232}
@@ -6218,29 +6269,23 @@ err_name:
6218/** 6269/**
6219 * netdev_drivername - network driver for the device 6270 * netdev_drivername - network driver for the device
6220 * @dev: network device 6271 * @dev: network device
6221 * @buffer: buffer for resulting name
6222 * @len: size of buffer
6223 * 6272 *
6224 * Determine network driver for device. 6273 * Determine network driver for device.
6225 */ 6274 */
6226char *netdev_drivername(const struct net_device *dev, char *buffer, int len) 6275const char *netdev_drivername(const struct net_device *dev)
6227{ 6276{
6228 const struct device_driver *driver; 6277 const struct device_driver *driver;
6229 const struct device *parent; 6278 const struct device *parent;
6230 6279 const char *empty = "";
6231 if (len <= 0 || !buffer)
6232 return buffer;
6233 buffer[0] = 0;
6234 6280
6235 parent = dev->dev.parent; 6281 parent = dev->dev.parent;
6236
6237 if (!parent) 6282 if (!parent)
6238 return buffer; 6283 return empty;
6239 6284
6240 driver = parent->driver; 6285 driver = parent->driver;
6241 if (driver && driver->name) 6286 if (driver && driver->name)
6242 strlcpy(buffer, driver->name, len); 6287 return driver->name;
6243 return buffer; 6288 return empty;
6244} 6289}
6245 6290
6246static int __netdev_printk(const char *level, const struct net_device *dev, 6291static int __netdev_printk(const char *level, const struct net_device *dev,
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
index 7b39f3ed2fda..e2e66939ed00 100644
--- a/net/core/dev_addr_lists.c
+++ b/net/core/dev_addr_lists.c
@@ -68,14 +68,6 @@ static int __hw_addr_add(struct netdev_hw_addr_list *list, unsigned char *addr,
68 return __hw_addr_add_ex(list, addr, addr_len, addr_type, false); 68 return __hw_addr_add_ex(list, addr, addr_len, addr_type, false);
69} 69}
70 70
71static void ha_rcu_free(struct rcu_head *head)
72{
73 struct netdev_hw_addr *ha;
74
75 ha = container_of(head, struct netdev_hw_addr, rcu_head);
76 kfree(ha);
77}
78
79static int __hw_addr_del_ex(struct netdev_hw_addr_list *list, 71static int __hw_addr_del_ex(struct netdev_hw_addr_list *list,
80 unsigned char *addr, int addr_len, 72 unsigned char *addr, int addr_len,
81 unsigned char addr_type, bool global) 73 unsigned char addr_type, bool global)
@@ -94,7 +86,7 @@ static int __hw_addr_del_ex(struct netdev_hw_addr_list *list,
94 if (--ha->refcount) 86 if (--ha->refcount)
95 return 0; 87 return 0;
96 list_del_rcu(&ha->list); 88 list_del_rcu(&ha->list);
97 call_rcu(&ha->rcu_head, ha_rcu_free); 89 kfree_rcu(ha, rcu_head);
98 list->count--; 90 list->count--;
99 return 0; 91 return 0;
100 } 92 }
@@ -197,7 +189,7 @@ void __hw_addr_flush(struct netdev_hw_addr_list *list)
197 189
198 list_for_each_entry_safe(ha, tmp, &list->list, list) { 190 list_for_each_entry_safe(ha, tmp, &list->list, list) {
199 list_del_rcu(&ha->list); 191 list_del_rcu(&ha->list);
200 call_rcu(&ha->rcu_head, ha_rcu_free); 192 kfree_rcu(ha, rcu_head);
201 } 193 }
202 list->count = 0; 194 list->count = 0;
203} 195}
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
index 706502ff64aa..7f36b38e060f 100644
--- a/net/core/drop_monitor.c
+++ b/net/core/drop_monitor.c
@@ -207,14 +207,6 @@ static void trace_napi_poll_hit(void *ignore, struct napi_struct *napi)
207 rcu_read_unlock(); 207 rcu_read_unlock();
208} 208}
209 209
210
211static void free_dm_hw_stat(struct rcu_head *head)
212{
213 struct dm_hw_stat_delta *n;
214 n = container_of(head, struct dm_hw_stat_delta, rcu);
215 kfree(n);
216}
217
218static int set_all_monitor_traces(int state) 210static int set_all_monitor_traces(int state)
219{ 211{
220 int rc = 0; 212 int rc = 0;
@@ -245,7 +237,7 @@ static int set_all_monitor_traces(int state)
245 list_for_each_entry_safe(new_stat, temp, &hw_stats_list, list) { 237 list_for_each_entry_safe(new_stat, temp, &hw_stats_list, list) {
246 if (new_stat->dev == NULL) { 238 if (new_stat->dev == NULL) {
247 list_del_rcu(&new_stat->list); 239 list_del_rcu(&new_stat->list);
248 call_rcu(&new_stat->rcu, free_dm_hw_stat); 240 kfree_rcu(new_stat, rcu);
249 } 241 }
250 } 242 }
251 break; 243 break;
@@ -314,7 +306,7 @@ static int dropmon_net_event(struct notifier_block *ev_block,
314 new_stat->dev = NULL; 306 new_stat->dev = NULL;
315 if (trace_state == TRACE_OFF) { 307 if (trace_state == TRACE_OFF) {
316 list_del_rcu(&new_stat->list); 308 list_del_rcu(&new_stat->list);
317 call_rcu(&new_stat->rcu, free_dm_hw_stat); 309 kfree_rcu(new_stat, rcu);
318 break; 310 break;
319 } 311 }
320 } 312 }
diff --git a/net/core/dst.c b/net/core/dst.c
index 91104d35de7d..6135f3671692 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -19,6 +19,7 @@
19#include <linux/types.h> 19#include <linux/types.h>
20#include <net/net_namespace.h> 20#include <net/net_namespace.h>
21#include <linux/sched.h> 21#include <linux/sched.h>
22#include <linux/prefetch.h>
22 23
23#include <net/dst.h> 24#include <net/dst.h>
24 25
@@ -33,9 +34,6 @@
33 * 3) This list is guarded by a mutex, 34 * 3) This list is guarded by a mutex,
34 * so that the gc_task and dst_dev_event() can be synchronized. 35 * so that the gc_task and dst_dev_event() can be synchronized.
35 */ 36 */
36#if RT_CACHE_DEBUG >= 2
37static atomic_t dst_total = ATOMIC_INIT(0);
38#endif
39 37
40/* 38/*
41 * We want to keep lock & list close together 39 * We want to keep lock & list close together
@@ -69,10 +67,6 @@ static void dst_gc_task(struct work_struct *work)
69 unsigned long expires = ~0L; 67 unsigned long expires = ~0L;
70 struct dst_entry *dst, *next, head; 68 struct dst_entry *dst, *next, head;
71 struct dst_entry *last = &head; 69 struct dst_entry *last = &head;
72#if RT_CACHE_DEBUG >= 2
73 ktime_t time_start = ktime_get();
74 struct timespec elapsed;
75#endif
76 70
77 mutex_lock(&dst_gc_mutex); 71 mutex_lock(&dst_gc_mutex);
78 next = dst_busy_list; 72 next = dst_busy_list;
@@ -146,15 +140,6 @@ loop:
146 140
147 spin_unlock_bh(&dst_garbage.lock); 141 spin_unlock_bh(&dst_garbage.lock);
148 mutex_unlock(&dst_gc_mutex); 142 mutex_unlock(&dst_gc_mutex);
149#if RT_CACHE_DEBUG >= 2
150 elapsed = ktime_to_timespec(ktime_sub(ktime_get(), time_start));
151 printk(KERN_DEBUG "dst_total: %d delayed: %d work_perf: %d"
152 " expires: %lu elapsed: %lu us\n",
153 atomic_read(&dst_total), delayed, work_performed,
154 expires,
155 elapsed.tv_sec * USEC_PER_SEC +
156 elapsed.tv_nsec / NSEC_PER_USEC);
157#endif
158} 143}
159 144
160int dst_discard(struct sk_buff *skb) 145int dst_discard(struct sk_buff *skb)
@@ -166,7 +151,8 @@ EXPORT_SYMBOL(dst_discard);
166 151
167const u32 dst_default_metrics[RTAX_MAX]; 152const u32 dst_default_metrics[RTAX_MAX];
168 153
169void *dst_alloc(struct dst_ops *ops, int initial_ref) 154void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
155 int initial_ref, int initial_obsolete, int flags)
170{ 156{
171 struct dst_entry *dst; 157 struct dst_entry *dst;
172 158
@@ -174,19 +160,38 @@ void *dst_alloc(struct dst_ops *ops, int initial_ref)
174 if (ops->gc(ops)) 160 if (ops->gc(ops))
175 return NULL; 161 return NULL;
176 } 162 }
177 dst = kmem_cache_zalloc(ops->kmem_cachep, GFP_ATOMIC); 163 dst = kmem_cache_alloc(ops->kmem_cachep, GFP_ATOMIC);
178 if (!dst) 164 if (!dst)
179 return NULL; 165 return NULL;
180 atomic_set(&dst->__refcnt, initial_ref); 166 dst->child = NULL;
167 dst->dev = dev;
168 if (dev)
169 dev_hold(dev);
181 dst->ops = ops; 170 dst->ops = ops;
182 dst->lastuse = jiffies;
183 dst->path = dst;
184 dst->input = dst->output = dst_discard;
185 dst_init_metrics(dst, dst_default_metrics, true); 171 dst_init_metrics(dst, dst_default_metrics, true);
186#if RT_CACHE_DEBUG >= 2 172 dst->expires = 0UL;
187 atomic_inc(&dst_total); 173 dst->path = dst;
174 dst->neighbour = NULL;
175 dst->hh = NULL;
176#ifdef CONFIG_XFRM
177 dst->xfrm = NULL;
178#endif
179 dst->input = dst_discard;
180 dst->output = dst_discard;
181 dst->error = 0;
182 dst->obsolete = initial_obsolete;
183 dst->header_len = 0;
184 dst->trailer_len = 0;
185#ifdef CONFIG_IP_ROUTE_CLASSID
186 dst->tclassid = 0;
188#endif 187#endif
189 dst_entries_add(ops, 1); 188 atomic_set(&dst->__refcnt, initial_ref);
189 dst->__use = 0;
190 dst->lastuse = jiffies;
191 dst->flags = flags;
192 dst->next = NULL;
193 if (!(flags & DST_NOCOUNT))
194 dst_entries_add(ops, 1);
190 return dst; 195 return dst;
191} 196}
192EXPORT_SYMBOL(dst_alloc); 197EXPORT_SYMBOL(dst_alloc);
@@ -239,15 +244,13 @@ again:
239 neigh_release(neigh); 244 neigh_release(neigh);
240 } 245 }
241 246
242 dst_entries_add(dst->ops, -1); 247 if (!(dst->flags & DST_NOCOUNT))
248 dst_entries_add(dst->ops, -1);
243 249
244 if (dst->ops->destroy) 250 if (dst->ops->destroy)
245 dst->ops->destroy(dst); 251 dst->ops->destroy(dst);
246 if (dst->dev) 252 if (dst->dev)
247 dev_put(dst->dev); 253 dev_put(dst->dev);
248#if RT_CACHE_DEBUG >= 2
249 atomic_dec(&dst_total);
250#endif
251 kmem_cache_free(dst->ops->kmem_cachep, dst); 254 kmem_cache_free(dst->ops->kmem_cachep, dst);
252 255
253 dst = child; 256 dst = child;
@@ -314,7 +317,7 @@ void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old)
314{ 317{
315 unsigned long prev, new; 318 unsigned long prev, new;
316 319
317 new = (unsigned long) dst_default_metrics; 320 new = ((unsigned long) dst_default_metrics) | DST_METRICS_READ_ONLY;
318 prev = cmpxchg(&dst->_metrics, old, new); 321 prev = cmpxchg(&dst->_metrics, old, new);
319 if (prev == old) 322 if (prev == old)
320 kfree(__DST_METRICS_PTR(old)); 323 kfree(__DST_METRICS_PTR(old));
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 74ead9eca126..fd14116ad7f0 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -21,6 +21,8 @@
21#include <linux/uaccess.h> 21#include <linux/uaccess.h>
22#include <linux/vmalloc.h> 22#include <linux/vmalloc.h>
23#include <linux/slab.h> 23#include <linux/slab.h>
24#include <linux/rtnetlink.h>
25#include <linux/sched.h>
24 26
25/* 27/*
26 * Some useful ethtool_ops methods that're device independent. 28 * Some useful ethtool_ops methods that're device independent.
@@ -231,6 +233,29 @@ static int ethtool_set_feature_compat(struct net_device *dev,
231 return 1; 233 return 1;
232} 234}
233 235
236static int ethtool_set_flags_compat(struct net_device *dev,
237 int (*legacy_set)(struct net_device *, u32),
238 struct ethtool_set_features_block *features, u32 mask)
239{
240 u32 value;
241
242 if (!legacy_set)
243 return 0;
244
245 if (!(features[0].valid & mask))
246 return 0;
247
248 value = dev->features & ~features[0].valid;
249 value |= features[0].requested;
250
251 features[0].valid &= ~mask;
252
253 if (legacy_set(dev, value & mask) < 0)
254 netdev_info(dev, "Legacy flags change failed\n");
255
256 return 1;
257}
258
234static int ethtool_set_features_compat(struct net_device *dev, 259static int ethtool_set_features_compat(struct net_device *dev,
235 struct ethtool_set_features_block *features) 260 struct ethtool_set_features_block *features)
236{ 261{
@@ -247,7 +272,7 @@ static int ethtool_set_features_compat(struct net_device *dev,
247 features, NETIF_F_ALL_TSO); 272 features, NETIF_F_ALL_TSO);
248 compat |= ethtool_set_feature_compat(dev, dev->ethtool_ops->set_rx_csum, 273 compat |= ethtool_set_feature_compat(dev, dev->ethtool_ops->set_rx_csum,
249 features, NETIF_F_RXCSUM); 274 features, NETIF_F_RXCSUM);
250 compat |= ethtool_set_feature_compat(dev, dev->ethtool_ops->set_flags, 275 compat |= ethtool_set_flags_compat(dev, dev->ethtool_ops->set_flags,
251 features, flags_dup_features); 276 features, flags_dup_features);
252 277
253 return compat; 278 return compat;
@@ -317,7 +342,7 @@ static int ethtool_set_features(struct net_device *dev, void __user *useraddr)
317 342
318 dev->wanted_features &= ~features[0].valid; 343 dev->wanted_features &= ~features[0].valid;
319 dev->wanted_features |= features[0].valid & features[0].requested; 344 dev->wanted_features |= features[0].valid & features[0].requested;
320 netdev_update_features(dev); 345 __netdev_update_features(dev);
321 346
322 if ((dev->wanted_features ^ dev->features) & features[0].valid) 347 if ((dev->wanted_features ^ dev->features) & features[0].valid)
323 ret |= ETHTOOL_F_WISH; 348 ret |= ETHTOOL_F_WISH;
@@ -330,7 +355,7 @@ static const char netdev_features_strings[ETHTOOL_DEV_FEATURE_WORDS * 32][ETH_GS
330 /* NETIF_F_IP_CSUM */ "tx-checksum-ipv4", 355 /* NETIF_F_IP_CSUM */ "tx-checksum-ipv4",
331 /* NETIF_F_NO_CSUM */ "tx-checksum-unneeded", 356 /* NETIF_F_NO_CSUM */ "tx-checksum-unneeded",
332 /* NETIF_F_HW_CSUM */ "tx-checksum-ip-generic", 357 /* NETIF_F_HW_CSUM */ "tx-checksum-ip-generic",
333 /* NETIF_F_IPV6_CSUM */ "tx_checksum-ipv6", 358 /* NETIF_F_IPV6_CSUM */ "tx-checksum-ipv6",
334 /* NETIF_F_HIGHDMA */ "highdma", 359 /* NETIF_F_HIGHDMA */ "highdma",
335 /* NETIF_F_FRAGLIST */ "tx-scatter-gather-fraglist", 360 /* NETIF_F_FRAGLIST */ "tx-scatter-gather-fraglist",
336 /* NETIF_F_HW_VLAN_TX */ "tx-vlan-hw-insert", 361 /* NETIF_F_HW_VLAN_TX */ "tx-vlan-hw-insert",
@@ -359,8 +384,8 @@ static const char netdev_features_strings[ETHTOOL_DEV_FEATURE_WORDS * 32][ETH_GS
359 /* NETIF_F_NTUPLE */ "rx-ntuple-filter", 384 /* NETIF_F_NTUPLE */ "rx-ntuple-filter",
360 /* NETIF_F_RXHASH */ "rx-hashing", 385 /* NETIF_F_RXHASH */ "rx-hashing",
361 /* NETIF_F_RXCSUM */ "rx-checksum", 386 /* NETIF_F_RXCSUM */ "rx-checksum",
362 "", 387 /* NETIF_F_NOCACHE_COPY */ "tx-nocache-copy",
363 "", 388 /* NETIF_F_LOOPBACK */ "loopback",
364}; 389};
365 390
366static int __ethtool_get_sset_count(struct net_device *dev, int sset) 391static int __ethtool_get_sset_count(struct net_device *dev, int sset)
@@ -499,7 +524,7 @@ static int ethtool_set_one_feature(struct net_device *dev,
499 else 524 else
500 dev->wanted_features &= ~mask; 525 dev->wanted_features &= ~mask;
501 526
502 netdev_update_features(dev); 527 __netdev_update_features(dev);
503 return 0; 528 return 0;
504 } 529 }
505 530
@@ -544,14 +569,14 @@ int __ethtool_set_flags(struct net_device *dev, u32 data)
544 } 569 }
545 570
546 /* allow changing only bits set in hw_features */ 571 /* allow changing only bits set in hw_features */
547 changed = (data ^ dev->wanted_features) & flags_dup_features; 572 changed = (data ^ dev->features) & flags_dup_features;
548 if (changed & ~dev->hw_features) 573 if (changed & ~dev->hw_features)
549 return (changed & dev->hw_features) ? -EINVAL : -EOPNOTSUPP; 574 return (changed & dev->hw_features) ? -EINVAL : -EOPNOTSUPP;
550 575
551 dev->wanted_features = 576 dev->wanted_features =
552 (dev->wanted_features & ~changed) | data; 577 (dev->wanted_features & ~changed) | (data & dev->hw_features);
553 578
554 netdev_update_features(dev); 579 __netdev_update_features(dev);
555 580
556 return 0; 581 return 0;
557} 582}
@@ -908,6 +933,9 @@ static noinline_for_stack int ethtool_set_rx_ntuple(struct net_device *dev,
908 struct ethtool_rx_ntuple_flow_spec_container *fsc = NULL; 933 struct ethtool_rx_ntuple_flow_spec_container *fsc = NULL;
909 int ret; 934 int ret;
910 935
936 if (!ops->set_rx_ntuple)
937 return -EOPNOTSUPP;
938
911 if (!(dev->features & NETIF_F_NTUPLE)) 939 if (!(dev->features & NETIF_F_NTUPLE))
912 return -EINVAL; 940 return -EINVAL;
913 941
@@ -1441,6 +1469,35 @@ static int ethtool_set_ringparam(struct net_device *dev, void __user *useraddr)
1441 return dev->ethtool_ops->set_ringparam(dev, &ringparam); 1469 return dev->ethtool_ops->set_ringparam(dev, &ringparam);
1442} 1470}
1443 1471
1472static noinline_for_stack int ethtool_get_channels(struct net_device *dev,
1473 void __user *useraddr)
1474{
1475 struct ethtool_channels channels = { .cmd = ETHTOOL_GCHANNELS };
1476
1477 if (!dev->ethtool_ops->get_channels)
1478 return -EOPNOTSUPP;
1479
1480 dev->ethtool_ops->get_channels(dev, &channels);
1481
1482 if (copy_to_user(useraddr, &channels, sizeof(channels)))
1483 return -EFAULT;
1484 return 0;
1485}
1486
1487static noinline_for_stack int ethtool_set_channels(struct net_device *dev,
1488 void __user *useraddr)
1489{
1490 struct ethtool_channels channels;
1491
1492 if (!dev->ethtool_ops->set_channels)
1493 return -EOPNOTSUPP;
1494
1495 if (copy_from_user(&channels, useraddr, sizeof(channels)))
1496 return -EFAULT;
1497
1498 return dev->ethtool_ops->set_channels(dev, &channels);
1499}
1500
1444static int ethtool_get_pauseparam(struct net_device *dev, void __user *useraddr) 1501static int ethtool_get_pauseparam(struct net_device *dev, void __user *useraddr)
1445{ 1502{
1446 struct ethtool_pauseparam pauseparam = { ETHTOOL_GPAUSEPARAM }; 1503 struct ethtool_pauseparam pauseparam = { ETHTOOL_GPAUSEPARAM };
@@ -1618,14 +1675,60 @@ out:
1618static int ethtool_phys_id(struct net_device *dev, void __user *useraddr) 1675static int ethtool_phys_id(struct net_device *dev, void __user *useraddr)
1619{ 1676{
1620 struct ethtool_value id; 1677 struct ethtool_value id;
1678 static bool busy;
1679 int rc;
1621 1680
1622 if (!dev->ethtool_ops->phys_id) 1681 if (!dev->ethtool_ops->set_phys_id)
1623 return -EOPNOTSUPP; 1682 return -EOPNOTSUPP;
1624 1683
1684 if (busy)
1685 return -EBUSY;
1686
1625 if (copy_from_user(&id, useraddr, sizeof(id))) 1687 if (copy_from_user(&id, useraddr, sizeof(id)))
1626 return -EFAULT; 1688 return -EFAULT;
1627 1689
1628 return dev->ethtool_ops->phys_id(dev, id.data); 1690 rc = dev->ethtool_ops->set_phys_id(dev, ETHTOOL_ID_ACTIVE);
1691 if (rc < 0)
1692 return rc;
1693
1694 /* Drop the RTNL lock while waiting, but prevent reentry or
1695 * removal of the device.
1696 */
1697 busy = true;
1698 dev_hold(dev);
1699 rtnl_unlock();
1700
1701 if (rc == 0) {
1702 /* Driver will handle this itself */
1703 schedule_timeout_interruptible(
1704 id.data ? (id.data * HZ) : MAX_SCHEDULE_TIMEOUT);
1705 } else {
1706 /* Driver expects to be called at twice the frequency in rc */
1707 int n = rc * 2, i, interval = HZ / n;
1708
1709 /* Count down seconds */
1710 do {
1711 /* Count down iterations per second */
1712 i = n;
1713 do {
1714 rtnl_lock();
1715 rc = dev->ethtool_ops->set_phys_id(dev,
1716 (i & 1) ? ETHTOOL_ID_OFF : ETHTOOL_ID_ON);
1717 rtnl_unlock();
1718 if (rc)
1719 break;
1720 schedule_timeout_interruptible(interval);
1721 } while (!signal_pending(current) && --i != 0);
1722 } while (!signal_pending(current) &&
1723 (id.data == 0 || --id.data != 0));
1724 }
1725
1726 rtnl_lock();
1727 dev_put(dev);
1728 busy = false;
1729
1730 (void)dev->ethtool_ops->set_phys_id(dev, ETHTOOL_ID_INACTIVE);
1731 return rc;
1629} 1732}
1630 1733
1631static int ethtool_get_stats(struct net_device *dev, void __user *useraddr) 1734static int ethtool_get_stats(struct net_device *dev, void __user *useraddr)
@@ -1743,6 +1846,87 @@ static noinline_for_stack int ethtool_flash_device(struct net_device *dev,
1743 return dev->ethtool_ops->flash_device(dev, &efl); 1846 return dev->ethtool_ops->flash_device(dev, &efl);
1744} 1847}
1745 1848
1849static int ethtool_set_dump(struct net_device *dev,
1850 void __user *useraddr)
1851{
1852 struct ethtool_dump dump;
1853
1854 if (!dev->ethtool_ops->set_dump)
1855 return -EOPNOTSUPP;
1856
1857 if (copy_from_user(&dump, useraddr, sizeof(dump)))
1858 return -EFAULT;
1859
1860 return dev->ethtool_ops->set_dump(dev, &dump);
1861}
1862
1863static int ethtool_get_dump_flag(struct net_device *dev,
1864 void __user *useraddr)
1865{
1866 int ret;
1867 struct ethtool_dump dump;
1868 const struct ethtool_ops *ops = dev->ethtool_ops;
1869
1870 if (!dev->ethtool_ops->get_dump_flag)
1871 return -EOPNOTSUPP;
1872
1873 if (copy_from_user(&dump, useraddr, sizeof(dump)))
1874 return -EFAULT;
1875
1876 ret = ops->get_dump_flag(dev, &dump);
1877 if (ret)
1878 return ret;
1879
1880 if (copy_to_user(useraddr, &dump, sizeof(dump)))
1881 return -EFAULT;
1882 return 0;
1883}
1884
1885static int ethtool_get_dump_data(struct net_device *dev,
1886 void __user *useraddr)
1887{
1888 int ret;
1889 __u32 len;
1890 struct ethtool_dump dump, tmp;
1891 const struct ethtool_ops *ops = dev->ethtool_ops;
1892 void *data = NULL;
1893
1894 if (!dev->ethtool_ops->get_dump_data ||
1895 !dev->ethtool_ops->get_dump_flag)
1896 return -EOPNOTSUPP;
1897
1898 if (copy_from_user(&dump, useraddr, sizeof(dump)))
1899 return -EFAULT;
1900
1901 memset(&tmp, 0, sizeof(tmp));
1902 tmp.cmd = ETHTOOL_GET_DUMP_FLAG;
1903 ret = ops->get_dump_flag(dev, &tmp);
1904 if (ret)
1905 return ret;
1906
1907 len = (tmp.len > dump.len) ? dump.len : tmp.len;
1908 if (!len)
1909 return -EFAULT;
1910
1911 data = vzalloc(tmp.len);
1912 if (!data)
1913 return -ENOMEM;
1914 ret = ops->get_dump_data(dev, &dump, data);
1915 if (ret)
1916 goto out;
1917
1918 if (copy_to_user(useraddr, &dump, sizeof(dump))) {
1919 ret = -EFAULT;
1920 goto out;
1921 }
1922 useraddr += offsetof(struct ethtool_dump, data);
1923 if (copy_to_user(useraddr, data, len))
1924 ret = -EFAULT;
1925out:
1926 vfree(data);
1927 return ret;
1928}
1929
1746/* The main entry point in this file. Called from net/core/dev.c */ 1930/* The main entry point in this file. Called from net/core/dev.c */
1747 1931
1748int dev_ethtool(struct net *net, struct ifreq *ifr) 1932int dev_ethtool(struct net *net, struct ifreq *ifr)
@@ -1953,6 +2137,21 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
1953 case ETHTOOL_SGRO: 2137 case ETHTOOL_SGRO:
1954 rc = ethtool_set_one_feature(dev, useraddr, ethcmd); 2138 rc = ethtool_set_one_feature(dev, useraddr, ethcmd);
1955 break; 2139 break;
2140 case ETHTOOL_GCHANNELS:
2141 rc = ethtool_get_channels(dev, useraddr);
2142 break;
2143 case ETHTOOL_SCHANNELS:
2144 rc = ethtool_set_channels(dev, useraddr);
2145 break;
2146 case ETHTOOL_SET_DUMP:
2147 rc = ethtool_set_dump(dev, useraddr);
2148 break;
2149 case ETHTOOL_GET_DUMP_FLAG:
2150 rc = ethtool_get_dump_flag(dev, useraddr);
2151 break;
2152 case ETHTOOL_GET_DUMP_DATA:
2153 rc = ethtool_get_dump_data(dev, useraddr);
2154 break;
1956 default: 2155 default:
1957 rc = -EOPNOTSUPP; 2156 rc = -EOPNOTSUPP;
1958 } 2157 }
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 8248ebb5891d..008dc70b064b 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -590,7 +590,8 @@ static int dump_rules(struct sk_buff *skb, struct netlink_callback *cb,
590 int idx = 0; 590 int idx = 0;
591 struct fib_rule *rule; 591 struct fib_rule *rule;
592 592
593 list_for_each_entry(rule, &ops->rules_list, list) { 593 rcu_read_lock();
594 list_for_each_entry_rcu(rule, &ops->rules_list, list) {
594 if (idx < cb->args[1]) 595 if (idx < cb->args[1])
595 goto skip; 596 goto skip;
596 597
@@ -601,6 +602,7 @@ static int dump_rules(struct sk_buff *skb, struct netlink_callback *cb,
601skip: 602skip:
602 idx++; 603 idx++;
603 } 604 }
605 rcu_read_unlock();
604 cb->args[1] = idx; 606 cb->args[1] = idx;
605 rules_ops_put(ops); 607 rules_ops_put(ops);
606 608
diff --git a/net/core/filter.c b/net/core/filter.c
index afb8afb066bb..36f975fa87cb 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -38,65 +38,7 @@
38#include <asm/unaligned.h> 38#include <asm/unaligned.h>
39#include <linux/filter.h> 39#include <linux/filter.h>
40#include <linux/reciprocal_div.h> 40#include <linux/reciprocal_div.h>
41 41#include <linux/ratelimit.h>
42enum {
43 BPF_S_RET_K = 1,
44 BPF_S_RET_A,
45 BPF_S_ALU_ADD_K,
46 BPF_S_ALU_ADD_X,
47 BPF_S_ALU_SUB_K,
48 BPF_S_ALU_SUB_X,
49 BPF_S_ALU_MUL_K,
50 BPF_S_ALU_MUL_X,
51 BPF_S_ALU_DIV_X,
52 BPF_S_ALU_AND_K,
53 BPF_S_ALU_AND_X,
54 BPF_S_ALU_OR_K,
55 BPF_S_ALU_OR_X,
56 BPF_S_ALU_LSH_K,
57 BPF_S_ALU_LSH_X,
58 BPF_S_ALU_RSH_K,
59 BPF_S_ALU_RSH_X,
60 BPF_S_ALU_NEG,
61 BPF_S_LD_W_ABS,
62 BPF_S_LD_H_ABS,
63 BPF_S_LD_B_ABS,
64 BPF_S_LD_W_LEN,
65 BPF_S_LD_W_IND,
66 BPF_S_LD_H_IND,
67 BPF_S_LD_B_IND,
68 BPF_S_LD_IMM,
69 BPF_S_LDX_W_LEN,
70 BPF_S_LDX_B_MSH,
71 BPF_S_LDX_IMM,
72 BPF_S_MISC_TAX,
73 BPF_S_MISC_TXA,
74 BPF_S_ALU_DIV_K,
75 BPF_S_LD_MEM,
76 BPF_S_LDX_MEM,
77 BPF_S_ST,
78 BPF_S_STX,
79 BPF_S_JMP_JA,
80 BPF_S_JMP_JEQ_K,
81 BPF_S_JMP_JEQ_X,
82 BPF_S_JMP_JGE_K,
83 BPF_S_JMP_JGE_X,
84 BPF_S_JMP_JGT_K,
85 BPF_S_JMP_JGT_X,
86 BPF_S_JMP_JSET_K,
87 BPF_S_JMP_JSET_X,
88 /* Ancillary data */
89 BPF_S_ANC_PROTOCOL,
90 BPF_S_ANC_PKTTYPE,
91 BPF_S_ANC_IFINDEX,
92 BPF_S_ANC_NLATTR,
93 BPF_S_ANC_NLATTR_NEST,
94 BPF_S_ANC_MARK,
95 BPF_S_ANC_QUEUE,
96 BPF_S_ANC_HATYPE,
97 BPF_S_ANC_RXHASH,
98 BPF_S_ANC_CPU,
99};
100 42
101/* No hurry in this branch */ 43/* No hurry in this branch */
102static void *__load_pointer(const struct sk_buff *skb, int k, unsigned int size) 44static void *__load_pointer(const struct sk_buff *skb, int k, unsigned int size)
@@ -145,7 +87,7 @@ int sk_filter(struct sock *sk, struct sk_buff *skb)
145 rcu_read_lock(); 87 rcu_read_lock();
146 filter = rcu_dereference(sk->sk_filter); 88 filter = rcu_dereference(sk->sk_filter);
147 if (filter) { 89 if (filter) {
148 unsigned int pkt_len = sk_run_filter(skb, filter->insns); 90 unsigned int pkt_len = SK_RUN_FILTER(filter, skb);
149 91
150 err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM; 92 err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM;
151 } 93 }
@@ -409,7 +351,9 @@ load_b:
409 continue; 351 continue;
410 } 352 }
411 default: 353 default:
412 WARN_ON(1); 354 WARN_RATELIMIT(1, "Unknown code:%u jt:%u tf:%u k:%u\n",
355 fentry->code, fentry->jt,
356 fentry->jf, fentry->k);
413 return 0; 357 return 0;
414 } 358 }
415 } 359 }
@@ -638,6 +582,7 @@ void sk_filter_release_rcu(struct rcu_head *rcu)
638{ 582{
639 struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu); 583 struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);
640 584
585 bpf_jit_free(fp);
641 kfree(fp); 586 kfree(fp);
642} 587}
643EXPORT_SYMBOL(sk_filter_release_rcu); 588EXPORT_SYMBOL(sk_filter_release_rcu);
@@ -672,6 +617,7 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
672 617
673 atomic_set(&fp->refcnt, 1); 618 atomic_set(&fp->refcnt, 1);
674 fp->len = fprog->len; 619 fp->len = fprog->len;
620 fp->bpf_func = sk_run_filter;
675 621
676 err = sk_chk_filter(fp->insns, fp->len); 622 err = sk_chk_filter(fp->insns, fp->len);
677 if (err) { 623 if (err) {
@@ -679,6 +625,8 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
679 return err; 625 return err;
680 } 626 }
681 627
628 bpf_jit_compile(fp);
629
682 old_fp = rcu_dereference_protected(sk->sk_filter, 630 old_fp = rcu_dereference_protected(sk->sk_filter,
683 sock_owned_by_user(sk)); 631 sock_owned_by_user(sk));
684 rcu_assign_pointer(sk->sk_filter, fp); 632 rcu_assign_pointer(sk->sk_filter, fp);
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
index 7c2373321b74..43b03dd71e85 100644
--- a/net/core/gen_estimator.c
+++ b/net/core/gen_estimator.c
@@ -249,13 +249,6 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
249} 249}
250EXPORT_SYMBOL(gen_new_estimator); 250EXPORT_SYMBOL(gen_new_estimator);
251 251
252static void __gen_kill_estimator(struct rcu_head *head)
253{
254 struct gen_estimator *e = container_of(head,
255 struct gen_estimator, e_rcu);
256 kfree(e);
257}
258
259/** 252/**
260 * gen_kill_estimator - remove a rate estimator 253 * gen_kill_estimator - remove a rate estimator
261 * @bstats: basic statistics 254 * @bstats: basic statistics
@@ -279,7 +272,7 @@ void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
279 write_unlock(&est_lock); 272 write_unlock(&est_lock);
280 273
281 list_del_rcu(&e->list); 274 list_del_rcu(&e->list);
282 call_rcu(&e->e_rcu, __gen_kill_estimator); 275 kfree_rcu(e, e_rcu);
283 } 276 }
284 spin_unlock_bh(&est_tree_lock); 277 spin_unlock_bh(&est_tree_lock);
285} 278}
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 5ceb257e860c..33d2a1fba131 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -28,6 +28,7 @@
28static const char fmt_hex[] = "%#x\n"; 28static const char fmt_hex[] = "%#x\n";
29static const char fmt_long_hex[] = "%#lx\n"; 29static const char fmt_long_hex[] = "%#lx\n";
30static const char fmt_dec[] = "%d\n"; 30static const char fmt_dec[] = "%d\n";
31static const char fmt_udec[] = "%u\n";
31static const char fmt_ulong[] = "%lu\n"; 32static const char fmt_ulong[] = "%lu\n";
32static const char fmt_u64[] = "%llu\n"; 33static const char fmt_u64[] = "%llu\n";
33 34
@@ -145,13 +146,10 @@ static ssize_t show_speed(struct device *dev,
145 if (!rtnl_trylock()) 146 if (!rtnl_trylock())
146 return restart_syscall(); 147 return restart_syscall();
147 148
148 if (netif_running(netdev) && 149 if (netif_running(netdev)) {
149 netdev->ethtool_ops && 150 struct ethtool_cmd cmd;
150 netdev->ethtool_ops->get_settings) { 151 if (!dev_ethtool_get_settings(netdev, &cmd))
151 struct ethtool_cmd cmd = { ETHTOOL_GSET }; 152 ret = sprintf(buf, fmt_udec, ethtool_cmd_speed(&cmd));
152
153 if (!netdev->ethtool_ops->get_settings(netdev, &cmd))
154 ret = sprintf(buf, fmt_dec, ethtool_cmd_speed(&cmd));
155 } 153 }
156 rtnl_unlock(); 154 rtnl_unlock();
157 return ret; 155 return ret;
@@ -166,13 +164,11 @@ static ssize_t show_duplex(struct device *dev,
166 if (!rtnl_trylock()) 164 if (!rtnl_trylock())
167 return restart_syscall(); 165 return restart_syscall();
168 166
169 if (netif_running(netdev) && 167 if (netif_running(netdev)) {
170 netdev->ethtool_ops && 168 struct ethtool_cmd cmd;
171 netdev->ethtool_ops->get_settings) { 169 if (!dev_ethtool_get_settings(netdev, &cmd))
172 struct ethtool_cmd cmd = { ETHTOOL_GSET }; 170 ret = sprintf(buf, "%s\n",
173 171 cmd.duplex ? "full" : "half");
174 if (!netdev->ethtool_ops->get_settings(netdev, &cmd))
175 ret = sprintf(buf, "%s\n", cmd.duplex ? "full" : "half");
176 } 172 }
177 rtnl_unlock(); 173 rtnl_unlock();
178 return ret; 174 return ret;
@@ -565,13 +561,6 @@ static ssize_t show_rps_map(struct netdev_rx_queue *queue,
565 return len; 561 return len;
566} 562}
567 563
568static void rps_map_release(struct rcu_head *rcu)
569{
570 struct rps_map *map = container_of(rcu, struct rps_map, rcu);
571
572 kfree(map);
573}
574
575static ssize_t store_rps_map(struct netdev_rx_queue *queue, 564static ssize_t store_rps_map(struct netdev_rx_queue *queue,
576 struct rx_queue_attribute *attribute, 565 struct rx_queue_attribute *attribute,
577 const char *buf, size_t len) 566 const char *buf, size_t len)
@@ -619,7 +608,7 @@ static ssize_t store_rps_map(struct netdev_rx_queue *queue,
619 spin_unlock(&rps_map_lock); 608 spin_unlock(&rps_map_lock);
620 609
621 if (old_map) 610 if (old_map)
622 call_rcu(&old_map->rcu, rps_map_release); 611 kfree_rcu(old_map, rcu);
623 612
624 free_cpumask_var(mask); 613 free_cpumask_var(mask);
625 return len; 614 return len;
@@ -728,7 +717,7 @@ static void rx_queue_release(struct kobject *kobj)
728 map = rcu_dereference_raw(queue->rps_map); 717 map = rcu_dereference_raw(queue->rps_map);
729 if (map) { 718 if (map) {
730 RCU_INIT_POINTER(queue->rps_map, NULL); 719 RCU_INIT_POINTER(queue->rps_map, NULL);
731 call_rcu(&map->rcu, rps_map_release); 720 kfree_rcu(map, rcu);
732 } 721 }
733 722
734 flow_table = rcu_dereference_raw(queue->rps_flow_table); 723 flow_table = rcu_dereference_raw(queue->rps_flow_table);
@@ -898,21 +887,6 @@ static ssize_t show_xps_map(struct netdev_queue *queue,
898 return len; 887 return len;
899} 888}
900 889
901static void xps_map_release(struct rcu_head *rcu)
902{
903 struct xps_map *map = container_of(rcu, struct xps_map, rcu);
904
905 kfree(map);
906}
907
908static void xps_dev_maps_release(struct rcu_head *rcu)
909{
910 struct xps_dev_maps *dev_maps =
911 container_of(rcu, struct xps_dev_maps, rcu);
912
913 kfree(dev_maps);
914}
915
916static DEFINE_MUTEX(xps_map_mutex); 890static DEFINE_MUTEX(xps_map_mutex);
917#define xmap_dereference(P) \ 891#define xmap_dereference(P) \
918 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex)) 892 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
@@ -968,7 +942,7 @@ static ssize_t store_xps_map(struct netdev_queue *queue,
968 } else 942 } else
969 pos = map_len = alloc_len = 0; 943 pos = map_len = alloc_len = 0;
970 944
971 need_set = cpu_isset(cpu, *mask) && cpu_online(cpu); 945 need_set = cpumask_test_cpu(cpu, mask) && cpu_online(cpu);
972#ifdef CONFIG_NUMA 946#ifdef CONFIG_NUMA
973 if (need_set) { 947 if (need_set) {
974 if (numa_node == -2) 948 if (numa_node == -2)
@@ -1009,7 +983,7 @@ static ssize_t store_xps_map(struct netdev_queue *queue,
1009 map = dev_maps ? 983 map = dev_maps ?
1010 xmap_dereference(dev_maps->cpu_map[cpu]) : NULL; 984 xmap_dereference(dev_maps->cpu_map[cpu]) : NULL;
1011 if (map && xmap_dereference(new_dev_maps->cpu_map[cpu]) != map) 985 if (map && xmap_dereference(new_dev_maps->cpu_map[cpu]) != map)
1012 call_rcu(&map->rcu, xps_map_release); 986 kfree_rcu(map, rcu);
1013 if (new_dev_maps->cpu_map[cpu]) 987 if (new_dev_maps->cpu_map[cpu])
1014 nonempty = 1; 988 nonempty = 1;
1015 } 989 }
@@ -1022,7 +996,7 @@ static ssize_t store_xps_map(struct netdev_queue *queue,
1022 } 996 }
1023 997
1024 if (dev_maps) 998 if (dev_maps)
1025 call_rcu(&dev_maps->rcu, xps_dev_maps_release); 999 kfree_rcu(dev_maps, rcu);
1026 1000
1027 netdev_queue_numa_node_write(queue, (numa_node >= 0) ? numa_node : 1001 netdev_queue_numa_node_write(queue, (numa_node >= 0) ? numa_node :
1028 NUMA_NO_NODE); 1002 NUMA_NO_NODE);
@@ -1084,7 +1058,7 @@ static void netdev_queue_release(struct kobject *kobj)
1084 else { 1058 else {
1085 RCU_INIT_POINTER(dev_maps->cpu_map[i], 1059 RCU_INIT_POINTER(dev_maps->cpu_map[i],
1086 NULL); 1060 NULL);
1087 call_rcu(&map->rcu, xps_map_release); 1061 kfree_rcu(map, rcu);
1088 map = NULL; 1062 map = NULL;
1089 } 1063 }
1090 } 1064 }
@@ -1094,7 +1068,7 @@ static void netdev_queue_release(struct kobject *kobj)
1094 1068
1095 if (!nonempty) { 1069 if (!nonempty) {
1096 RCU_INIT_POINTER(dev->xps_maps, NULL); 1070 RCU_INIT_POINTER(dev->xps_maps, NULL);
1097 call_rcu(&dev_maps->rcu, xps_dev_maps_release); 1071 kfree_rcu(dev_maps, rcu);
1098 } 1072 }
1099 } 1073 }
1100 1074
@@ -1205,9 +1179,14 @@ static void remove_queue_kobjects(struct net_device *net)
1205#endif 1179#endif
1206} 1180}
1207 1181
1208static const void *net_current_ns(void) 1182static void *net_grab_current_ns(void)
1209{ 1183{
1210 return current->nsproxy->net_ns; 1184 struct net *ns = current->nsproxy->net_ns;
1185#ifdef CONFIG_NET_NS
1186 if (ns)
1187 atomic_inc(&ns->passive);
1188#endif
1189 return ns;
1211} 1190}
1212 1191
1213static const void *net_initial_ns(void) 1192static const void *net_initial_ns(void)
@@ -1222,22 +1201,13 @@ static const void *net_netlink_ns(struct sock *sk)
1222 1201
1223struct kobj_ns_type_operations net_ns_type_operations = { 1202struct kobj_ns_type_operations net_ns_type_operations = {
1224 .type = KOBJ_NS_TYPE_NET, 1203 .type = KOBJ_NS_TYPE_NET,
1225 .current_ns = net_current_ns, 1204 .grab_current_ns = net_grab_current_ns,
1226 .netlink_ns = net_netlink_ns, 1205 .netlink_ns = net_netlink_ns,
1227 .initial_ns = net_initial_ns, 1206 .initial_ns = net_initial_ns,
1207 .drop_ns = net_drop_ns,
1228}; 1208};
1229EXPORT_SYMBOL_GPL(net_ns_type_operations); 1209EXPORT_SYMBOL_GPL(net_ns_type_operations);
1230 1210
1231static void net_kobj_ns_exit(struct net *net)
1232{
1233 kobj_ns_exit(KOBJ_NS_TYPE_NET, net);
1234}
1235
1236static struct pernet_operations kobj_net_ops = {
1237 .exit = net_kobj_ns_exit,
1238};
1239
1240
1241#ifdef CONFIG_HOTPLUG 1211#ifdef CONFIG_HOTPLUG
1242static int netdev_uevent(struct device *d, struct kobj_uevent_env *env) 1212static int netdev_uevent(struct device *d, struct kobj_uevent_env *env)
1243{ 1213{
@@ -1365,6 +1335,5 @@ EXPORT_SYMBOL(netdev_class_remove_file);
1365int netdev_kobject_init(void) 1335int netdev_kobject_init(void)
1366{ 1336{
1367 kobj_ns_type_register(&net_ns_type_operations); 1337 kobj_ns_type_register(&net_ns_type_operations);
1368 register_pernet_subsys(&kobj_net_ops);
1369 return class_register(&net_class); 1338 return class_register(&net_class);
1370} 1339}
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 3f860261c5ee..ea489db1bc23 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -8,6 +8,8 @@
8#include <linux/idr.h> 8#include <linux/idr.h>
9#include <linux/rculist.h> 9#include <linux/rculist.h>
10#include <linux/nsproxy.h> 10#include <linux/nsproxy.h>
11#include <linux/proc_fs.h>
12#include <linux/file.h>
11#include <net/net_namespace.h> 13#include <net/net_namespace.h>
12#include <net/netns/generic.h> 14#include <net/netns/generic.h>
13 15
@@ -27,14 +29,6 @@ EXPORT_SYMBOL(init_net);
27 29
28#define INITIAL_NET_GEN_PTRS 13 /* +1 for len +2 for rcu_head */ 30#define INITIAL_NET_GEN_PTRS 13 /* +1 for len +2 for rcu_head */
29 31
30static void net_generic_release(struct rcu_head *rcu)
31{
32 struct net_generic *ng;
33
34 ng = container_of(rcu, struct net_generic, rcu);
35 kfree(ng);
36}
37
38static int net_assign_generic(struct net *net, int id, void *data) 32static int net_assign_generic(struct net *net, int id, void *data)
39{ 33{
40 struct net_generic *ng, *old_ng; 34 struct net_generic *ng, *old_ng;
@@ -68,7 +62,7 @@ static int net_assign_generic(struct net *net, int id, void *data)
68 memcpy(&ng->ptr, &old_ng->ptr, old_ng->len * sizeof(void*)); 62 memcpy(&ng->ptr, &old_ng->ptr, old_ng->len * sizeof(void*));
69 63
70 rcu_assign_pointer(net->gen, ng); 64 rcu_assign_pointer(net->gen, ng);
71 call_rcu(&old_ng->rcu, net_generic_release); 65 kfree_rcu(old_ng, rcu);
72assign: 66assign:
73 ng->ptr[id - 1] = data; 67 ng->ptr[id - 1] = data;
74 return 0; 68 return 0;
@@ -134,6 +128,7 @@ static __net_init int setup_net(struct net *net)
134 LIST_HEAD(net_exit_list); 128 LIST_HEAD(net_exit_list);
135 129
136 atomic_set(&net->count, 1); 130 atomic_set(&net->count, 1);
131 atomic_set(&net->passive, 1);
137 132
138#ifdef NETNS_REFCNT_DEBUG 133#ifdef NETNS_REFCNT_DEBUG
139 atomic_set(&net->use_count, 0); 134 atomic_set(&net->use_count, 0);
@@ -216,11 +211,21 @@ static void net_free(struct net *net)
216 kmem_cache_free(net_cachep, net); 211 kmem_cache_free(net_cachep, net);
217} 212}
218 213
219static struct net *net_create(void) 214void net_drop_ns(void *p)
215{
216 struct net *ns = p;
217 if (ns && atomic_dec_and_test(&ns->passive))
218 net_free(ns);
219}
220
221struct net *copy_net_ns(unsigned long flags, struct net *old_net)
220{ 222{
221 struct net *net; 223 struct net *net;
222 int rv; 224 int rv;
223 225
226 if (!(flags & CLONE_NEWNET))
227 return get_net(old_net);
228
224 net = net_alloc(); 229 net = net_alloc();
225 if (!net) 230 if (!net)
226 return ERR_PTR(-ENOMEM); 231 return ERR_PTR(-ENOMEM);
@@ -233,19 +238,12 @@ static struct net *net_create(void)
233 } 238 }
234 mutex_unlock(&net_mutex); 239 mutex_unlock(&net_mutex);
235 if (rv < 0) { 240 if (rv < 0) {
236 net_free(net); 241 net_drop_ns(net);
237 return ERR_PTR(rv); 242 return ERR_PTR(rv);
238 } 243 }
239 return net; 244 return net;
240} 245}
241 246
242struct net *copy_net_ns(unsigned long flags, struct net *old_net)
243{
244 if (!(flags & CLONE_NEWNET))
245 return get_net(old_net);
246 return net_create();
247}
248
249static DEFINE_SPINLOCK(cleanup_list_lock); 247static DEFINE_SPINLOCK(cleanup_list_lock);
250static LIST_HEAD(cleanup_list); /* Must hold cleanup_list_lock to touch */ 248static LIST_HEAD(cleanup_list); /* Must hold cleanup_list_lock to touch */
251 249
@@ -296,7 +294,7 @@ static void cleanup_net(struct work_struct *work)
296 /* Finally it is safe to free my network namespace structure */ 294 /* Finally it is safe to free my network namespace structure */
297 list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) { 295 list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) {
298 list_del_init(&net->exit_list); 296 list_del_init(&net->exit_list);
299 net_free(net); 297 net_drop_ns(net);
300 } 298 }
301} 299}
302static DECLARE_WORK(net_cleanup_work, cleanup_net); 300static DECLARE_WORK(net_cleanup_work, cleanup_net);
@@ -314,6 +312,26 @@ void __put_net(struct net *net)
314} 312}
315EXPORT_SYMBOL_GPL(__put_net); 313EXPORT_SYMBOL_GPL(__put_net);
316 314
315struct net *get_net_ns_by_fd(int fd)
316{
317 struct proc_inode *ei;
318 struct file *file;
319 struct net *net;
320
321 file = proc_ns_fget(fd);
322 if (IS_ERR(file))
323 return ERR_CAST(file);
324
325 ei = PROC_I(file->f_dentry->d_inode);
326 if (ei->ns_ops == &netns_operations)
327 net = get_net(ei->ns);
328 else
329 net = ERR_PTR(-EINVAL);
330
331 fput(file);
332 return net;
333}
334
317#else 335#else
318struct net *copy_net_ns(unsigned long flags, struct net *old_net) 336struct net *copy_net_ns(unsigned long flags, struct net *old_net)
319{ 337{
@@ -321,6 +339,11 @@ struct net *copy_net_ns(unsigned long flags, struct net *old_net)
321 return ERR_PTR(-EINVAL); 339 return ERR_PTR(-EINVAL);
322 return old_net; 340 return old_net;
323} 341}
342
343struct net *get_net_ns_by_fd(int fd)
344{
345 return ERR_PTR(-EINVAL);
346}
324#endif 347#endif
325 348
326struct net *get_net_ns_by_pid(pid_t pid) 349struct net *get_net_ns_by_pid(pid_t pid)
@@ -573,3 +596,39 @@ void unregister_pernet_device(struct pernet_operations *ops)
573 mutex_unlock(&net_mutex); 596 mutex_unlock(&net_mutex);
574} 597}
575EXPORT_SYMBOL_GPL(unregister_pernet_device); 598EXPORT_SYMBOL_GPL(unregister_pernet_device);
599
600#ifdef CONFIG_NET_NS
601static void *netns_get(struct task_struct *task)
602{
603 struct net *net = NULL;
604 struct nsproxy *nsproxy;
605
606 rcu_read_lock();
607 nsproxy = task_nsproxy(task);
608 if (nsproxy)
609 net = get_net(nsproxy->net_ns);
610 rcu_read_unlock();
611
612 return net;
613}
614
615static void netns_put(void *ns)
616{
617 put_net(ns);
618}
619
620static int netns_install(struct nsproxy *nsproxy, void *ns)
621{
622 put_net(nsproxy->net_ns);
623 nsproxy->net_ns = get_net(ns);
624 return 0;
625}
626
627const struct proc_ns_operations netns_operations = {
628 .name = "net",
629 .type = CLONE_NEWNET,
630 .get = netns_get,
631 .put = netns_put,
632 .install = netns_install,
633};
634#endif
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 06be2431753e..18d9cbda3a39 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -539,7 +539,7 @@ int __netpoll_rx(struct sk_buff *skb)
539{ 539{
540 int proto, len, ulen; 540 int proto, len, ulen;
541 int hits = 0; 541 int hits = 0;
542 struct iphdr *iph; 542 const struct iphdr *iph;
543 struct udphdr *uh; 543 struct udphdr *uh;
544 struct netpoll_info *npinfo = skb->dev->npinfo; 544 struct netpoll_info *npinfo = skb->dev->npinfo;
545 struct netpoll *np, *tmp; 545 struct netpoll *np, *tmp;
@@ -698,32 +698,8 @@ int netpoll_parse_options(struct netpoll *np, char *opt)
698 698
699 if (*cur != 0) { 699 if (*cur != 0) {
700 /* MAC address */ 700 /* MAC address */
701 if ((delim = strchr(cur, ':')) == NULL) 701 if (!mac_pton(cur, np->remote_mac))
702 goto parse_failed;
703 *delim = 0;
704 np->remote_mac[0] = simple_strtol(cur, NULL, 16);
705 cur = delim + 1;
706 if ((delim = strchr(cur, ':')) == NULL)
707 goto parse_failed;
708 *delim = 0;
709 np->remote_mac[1] = simple_strtol(cur, NULL, 16);
710 cur = delim + 1;
711 if ((delim = strchr(cur, ':')) == NULL)
712 goto parse_failed;
713 *delim = 0;
714 np->remote_mac[2] = simple_strtol(cur, NULL, 16);
715 cur = delim + 1;
716 if ((delim = strchr(cur, ':')) == NULL)
717 goto parse_failed; 702 goto parse_failed;
718 *delim = 0;
719 np->remote_mac[3] = simple_strtol(cur, NULL, 16);
720 cur = delim + 1;
721 if ((delim = strchr(cur, ':')) == NULL)
722 goto parse_failed;
723 *delim = 0;
724 np->remote_mac[4] = simple_strtol(cur, NULL, 16);
725 cur = delim + 1;
726 np->remote_mac[5] = simple_strtol(cur, NULL, 16);
727 } 703 }
728 704
729 netpoll_print_options(np); 705 netpoll_print_options(np);
@@ -816,6 +792,13 @@ int netpoll_setup(struct netpoll *np)
816 return -ENODEV; 792 return -ENODEV;
817 } 793 }
818 794
795 if (ndev->master) {
796 printk(KERN_ERR "%s: %s is a slave device, aborting.\n",
797 np->name, np->dev_name);
798 err = -EBUSY;
799 goto put;
800 }
801
819 if (!netif_running(ndev)) { 802 if (!netif_running(ndev)) {
820 unsigned long atmost, atleast; 803 unsigned long atmost, atleast;
821 804
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index aeeece72b72f..f76079cd750c 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -156,6 +156,7 @@
156#include <linux/wait.h> 156#include <linux/wait.h>
157#include <linux/etherdevice.h> 157#include <linux/etherdevice.h>
158#include <linux/kthread.h> 158#include <linux/kthread.h>
159#include <linux/prefetch.h>
159#include <net/net_namespace.h> 160#include <net/net_namespace.h>
160#include <net/checksum.h> 161#include <net/checksum.h>
161#include <net/ipv6.h> 162#include <net/ipv6.h>
@@ -449,7 +450,6 @@ static void pktgen_stop(struct pktgen_thread *t);
449static void pktgen_clear_counters(struct pktgen_dev *pkt_dev); 450static void pktgen_clear_counters(struct pktgen_dev *pkt_dev);
450 451
451static unsigned int scan_ip6(const char *s, char ip[16]); 452static unsigned int scan_ip6(const char *s, char ip[16]);
452static unsigned int fmt_ip6(char *s, const char ip[16]);
453 453
454/* Module parameters, defaults. */ 454/* Module parameters, defaults. */
455static int pg_count_d __read_mostly = 1000; 455static int pg_count_d __read_mostly = 1000;
@@ -556,21 +556,13 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
556 pkt_dev->skb_priority); 556 pkt_dev->skb_priority);
557 557
558 if (pkt_dev->flags & F_IPV6) { 558 if (pkt_dev->flags & F_IPV6) {
559 char b1[128], b2[128], b3[128];
560 fmt_ip6(b1, pkt_dev->in6_saddr.s6_addr);
561 fmt_ip6(b2, pkt_dev->min_in6_saddr.s6_addr);
562 fmt_ip6(b3, pkt_dev->max_in6_saddr.s6_addr);
563 seq_printf(seq, 559 seq_printf(seq,
564 " saddr: %s min_saddr: %s max_saddr: %s\n", b1, 560 " saddr: %pI6c min_saddr: %pI6c max_saddr: %pI6c\n"
565 b2, b3); 561 " daddr: %pI6c min_daddr: %pI6c max_daddr: %pI6c\n",
566 562 &pkt_dev->in6_saddr,
567 fmt_ip6(b1, pkt_dev->in6_daddr.s6_addr); 563 &pkt_dev->min_in6_saddr, &pkt_dev->max_in6_saddr,
568 fmt_ip6(b2, pkt_dev->min_in6_daddr.s6_addr); 564 &pkt_dev->in6_daddr,
569 fmt_ip6(b3, pkt_dev->max_in6_daddr.s6_addr); 565 &pkt_dev->min_in6_daddr, &pkt_dev->max_in6_daddr);
570 seq_printf(seq,
571 " daddr: %s min_daddr: %s max_daddr: %s\n", b1,
572 b2, b3);
573
574 } else { 566 } else {
575 seq_printf(seq, 567 seq_printf(seq,
576 " dst_min: %s dst_max: %s\n", 568 " dst_min: %s dst_max: %s\n",
@@ -706,10 +698,9 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
706 pkt_dev->cur_src_mac_offset); 698 pkt_dev->cur_src_mac_offset);
707 699
708 if (pkt_dev->flags & F_IPV6) { 700 if (pkt_dev->flags & F_IPV6) {
709 char b1[128], b2[128]; 701 seq_printf(seq, " cur_saddr: %pI6c cur_daddr: %pI6c\n",
710 fmt_ip6(b1, pkt_dev->cur_in6_daddr.s6_addr); 702 &pkt_dev->cur_in6_saddr,
711 fmt_ip6(b2, pkt_dev->cur_in6_saddr.s6_addr); 703 &pkt_dev->cur_in6_daddr);
712 seq_printf(seq, " cur_saddr: %s cur_daddr: %s\n", b2, b1);
713 } else 704 } else
714 seq_printf(seq, " cur_saddr: 0x%x cur_daddr: 0x%x\n", 705 seq_printf(seq, " cur_saddr: 0x%x cur_daddr: 0x%x\n",
715 pkt_dev->cur_saddr, pkt_dev->cur_daddr); 706 pkt_dev->cur_saddr, pkt_dev->cur_daddr);
@@ -1309,7 +1300,7 @@ static ssize_t pktgen_if_write(struct file *file,
1309 buf[len] = 0; 1300 buf[len] = 0;
1310 1301
1311 scan_ip6(buf, pkt_dev->in6_daddr.s6_addr); 1302 scan_ip6(buf, pkt_dev->in6_daddr.s6_addr);
1312 fmt_ip6(buf, pkt_dev->in6_daddr.s6_addr); 1303 snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->in6_daddr);
1313 1304
1314 ipv6_addr_copy(&pkt_dev->cur_in6_daddr, &pkt_dev->in6_daddr); 1305 ipv6_addr_copy(&pkt_dev->cur_in6_daddr, &pkt_dev->in6_daddr);
1315 1306
@@ -1332,7 +1323,7 @@ static ssize_t pktgen_if_write(struct file *file,
1332 buf[len] = 0; 1323 buf[len] = 0;
1333 1324
1334 scan_ip6(buf, pkt_dev->min_in6_daddr.s6_addr); 1325 scan_ip6(buf, pkt_dev->min_in6_daddr.s6_addr);
1335 fmt_ip6(buf, pkt_dev->min_in6_daddr.s6_addr); 1326 snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->min_in6_daddr);
1336 1327
1337 ipv6_addr_copy(&pkt_dev->cur_in6_daddr, 1328 ipv6_addr_copy(&pkt_dev->cur_in6_daddr,
1338 &pkt_dev->min_in6_daddr); 1329 &pkt_dev->min_in6_daddr);
@@ -1355,7 +1346,7 @@ static ssize_t pktgen_if_write(struct file *file,
1355 buf[len] = 0; 1346 buf[len] = 0;
1356 1347
1357 scan_ip6(buf, pkt_dev->max_in6_daddr.s6_addr); 1348 scan_ip6(buf, pkt_dev->max_in6_daddr.s6_addr);
1358 fmt_ip6(buf, pkt_dev->max_in6_daddr.s6_addr); 1349 snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->max_in6_daddr);
1359 1350
1360 if (debug) 1351 if (debug)
1361 printk(KERN_DEBUG "pktgen: dst6_max set to: %s\n", buf); 1352 printk(KERN_DEBUG "pktgen: dst6_max set to: %s\n", buf);
@@ -1376,7 +1367,7 @@ static ssize_t pktgen_if_write(struct file *file,
1376 buf[len] = 0; 1367 buf[len] = 0;
1377 1368
1378 scan_ip6(buf, pkt_dev->in6_saddr.s6_addr); 1369 scan_ip6(buf, pkt_dev->in6_saddr.s6_addr);
1379 fmt_ip6(buf, pkt_dev->in6_saddr.s6_addr); 1370 snprintf(buf, sizeof(buf), "%pI6c", &pkt_dev->in6_saddr);
1380 1371
1381 ipv6_addr_copy(&pkt_dev->cur_in6_saddr, &pkt_dev->in6_saddr); 1372 ipv6_addr_copy(&pkt_dev->cur_in6_saddr, &pkt_dev->in6_saddr);
1382 1373
@@ -1430,11 +1421,6 @@ static ssize_t pktgen_if_write(struct file *file,
1430 return count; 1421 return count;
1431 } 1422 }
1432 if (!strcmp(name, "dst_mac")) { 1423 if (!strcmp(name, "dst_mac")) {
1433 char *v = valstr;
1434 unsigned char old_dmac[ETH_ALEN];
1435 unsigned char *m = pkt_dev->dst_mac;
1436 memcpy(old_dmac, pkt_dev->dst_mac, ETH_ALEN);
1437
1438 len = strn_len(&user_buffer[i], sizeof(valstr) - 1); 1424 len = strn_len(&user_buffer[i], sizeof(valstr) - 1);
1439 if (len < 0) 1425 if (len < 0)
1440 return len; 1426 return len;
@@ -1442,35 +1428,16 @@ static ssize_t pktgen_if_write(struct file *file,
1442 memset(valstr, 0, sizeof(valstr)); 1428 memset(valstr, 0, sizeof(valstr));
1443 if (copy_from_user(valstr, &user_buffer[i], len)) 1429 if (copy_from_user(valstr, &user_buffer[i], len))
1444 return -EFAULT; 1430 return -EFAULT;
1445 i += len;
1446
1447 for (*m = 0; *v && m < pkt_dev->dst_mac + 6; v++) {
1448 int value;
1449
1450 value = hex_to_bin(*v);
1451 if (value >= 0)
1452 *m = *m * 16 + value;
1453
1454 if (*v == ':') {
1455 m++;
1456 *m = 0;
1457 }
1458 }
1459 1431
1432 if (!mac_pton(valstr, pkt_dev->dst_mac))
1433 return -EINVAL;
1460 /* Set up Dest MAC */ 1434 /* Set up Dest MAC */
1461 if (compare_ether_addr(old_dmac, pkt_dev->dst_mac)) 1435 memcpy(&pkt_dev->hh[0], pkt_dev->dst_mac, ETH_ALEN);
1462 memcpy(&(pkt_dev->hh[0]), pkt_dev->dst_mac, ETH_ALEN);
1463 1436
1464 sprintf(pg_result, "OK: dstmac"); 1437 sprintf(pg_result, "OK: dstmac %pM", pkt_dev->dst_mac);
1465 return count; 1438 return count;
1466 } 1439 }
1467 if (!strcmp(name, "src_mac")) { 1440 if (!strcmp(name, "src_mac")) {
1468 char *v = valstr;
1469 unsigned char old_smac[ETH_ALEN];
1470 unsigned char *m = pkt_dev->src_mac;
1471
1472 memcpy(old_smac, pkt_dev->src_mac, ETH_ALEN);
1473
1474 len = strn_len(&user_buffer[i], sizeof(valstr) - 1); 1441 len = strn_len(&user_buffer[i], sizeof(valstr) - 1);
1475 if (len < 0) 1442 if (len < 0)
1476 return len; 1443 return len;
@@ -1478,26 +1445,13 @@ static ssize_t pktgen_if_write(struct file *file,
1478 memset(valstr, 0, sizeof(valstr)); 1445 memset(valstr, 0, sizeof(valstr));
1479 if (copy_from_user(valstr, &user_buffer[i], len)) 1446 if (copy_from_user(valstr, &user_buffer[i], len))
1480 return -EFAULT; 1447 return -EFAULT;
1481 i += len;
1482
1483 for (*m = 0; *v && m < pkt_dev->src_mac + 6; v++) {
1484 int value;
1485
1486 value = hex_to_bin(*v);
1487 if (value >= 0)
1488 *m = *m * 16 + value;
1489
1490 if (*v == ':') {
1491 m++;
1492 *m = 0;
1493 }
1494 }
1495 1448
1449 if (!mac_pton(valstr, pkt_dev->src_mac))
1450 return -EINVAL;
1496 /* Set up Src MAC */ 1451 /* Set up Src MAC */
1497 if (compare_ether_addr(old_smac, pkt_dev->src_mac)) 1452 memcpy(&pkt_dev->hh[6], pkt_dev->src_mac, ETH_ALEN);
1498 memcpy(&(pkt_dev->hh[6]), pkt_dev->src_mac, ETH_ALEN);
1499 1453
1500 sprintf(pg_result, "OK: srcmac"); 1454 sprintf(pg_result, "OK: srcmac %pM", pkt_dev->src_mac);
1501 return count; 1455 return count;
1502 } 1456 }
1503 1457
@@ -2514,7 +2468,6 @@ static int pktgen_output_ipsec(struct sk_buff *skb, struct pktgen_dev *pkt_dev)
2514{ 2468{
2515 struct xfrm_state *x = pkt_dev->flows[pkt_dev->curfl].x; 2469 struct xfrm_state *x = pkt_dev->flows[pkt_dev->curfl].x;
2516 int err = 0; 2470 int err = 0;
2517 struct iphdr *iph;
2518 2471
2519 if (!x) 2472 if (!x)
2520 return 0; 2473 return 0;
@@ -2524,7 +2477,6 @@ static int pktgen_output_ipsec(struct sk_buff *skb, struct pktgen_dev *pkt_dev)
2524 return 0; 2477 return 0;
2525 2478
2526 spin_lock(&x->lock); 2479 spin_lock(&x->lock);
2527 iph = ip_hdr(skb);
2528 2480
2529 err = x->outer_mode->output(x, skb); 2481 err = x->outer_mode->output(x, skb);
2530 if (err) 2482 if (err)
@@ -2624,6 +2576,7 @@ static void pktgen_finalize_skb(struct pktgen_dev *pkt_dev, struct sk_buff *skb,
2624 } else { 2576 } else {
2625 int frags = pkt_dev->nfrags; 2577 int frags = pkt_dev->nfrags;
2626 int i, len; 2578 int i, len;
2579 int frag_len;
2627 2580
2628 2581
2629 if (frags > MAX_SKB_FRAGS) 2582 if (frags > MAX_SKB_FRAGS)
@@ -2635,6 +2588,8 @@ static void pktgen_finalize_skb(struct pktgen_dev *pkt_dev, struct sk_buff *skb,
2635 } 2588 }
2636 2589
2637 i = 0; 2590 i = 0;
2591 frag_len = (datalen/frags) < PAGE_SIZE ?
2592 (datalen/frags) : PAGE_SIZE;
2638 while (datalen > 0) { 2593 while (datalen > 0) {
2639 if (unlikely(!pkt_dev->page)) { 2594 if (unlikely(!pkt_dev->page)) {
2640 int node = numa_node_id(); 2595 int node = numa_node_id();
@@ -2648,38 +2603,18 @@ static void pktgen_finalize_skb(struct pktgen_dev *pkt_dev, struct sk_buff *skb,
2648 skb_shinfo(skb)->frags[i].page = pkt_dev->page; 2603 skb_shinfo(skb)->frags[i].page = pkt_dev->page;
2649 get_page(pkt_dev->page); 2604 get_page(pkt_dev->page);
2650 skb_shinfo(skb)->frags[i].page_offset = 0; 2605 skb_shinfo(skb)->frags[i].page_offset = 0;
2651 skb_shinfo(skb)->frags[i].size = 2606 /*last fragment, fill rest of data*/
2652 (datalen < PAGE_SIZE ? datalen : PAGE_SIZE); 2607 if (i == (frags - 1))
2608 skb_shinfo(skb)->frags[i].size =
2609 (datalen < PAGE_SIZE ? datalen : PAGE_SIZE);
2610 else
2611 skb_shinfo(skb)->frags[i].size = frag_len;
2653 datalen -= skb_shinfo(skb)->frags[i].size; 2612 datalen -= skb_shinfo(skb)->frags[i].size;
2654 skb->len += skb_shinfo(skb)->frags[i].size; 2613 skb->len += skb_shinfo(skb)->frags[i].size;
2655 skb->data_len += skb_shinfo(skb)->frags[i].size; 2614 skb->data_len += skb_shinfo(skb)->frags[i].size;
2656 i++; 2615 i++;
2657 skb_shinfo(skb)->nr_frags = i; 2616 skb_shinfo(skb)->nr_frags = i;
2658 } 2617 }
2659
2660 while (i < frags) {
2661 int rem;
2662
2663 if (i == 0)
2664 break;
2665
2666 rem = skb_shinfo(skb)->frags[i - 1].size / 2;
2667 if (rem == 0)
2668 break;
2669
2670 skb_shinfo(skb)->frags[i - 1].size -= rem;
2671
2672 skb_shinfo(skb)->frags[i] =
2673 skb_shinfo(skb)->frags[i - 1];
2674 get_page(skb_shinfo(skb)->frags[i].page);
2675 skb_shinfo(skb)->frags[i].page =
2676 skb_shinfo(skb)->frags[i - 1].page;
2677 skb_shinfo(skb)->frags[i].page_offset +=
2678 skb_shinfo(skb)->frags[i - 1].size;
2679 skb_shinfo(skb)->frags[i].size = rem;
2680 i++;
2681 skb_shinfo(skb)->nr_frags = i;
2682 }
2683 } 2618 }
2684 2619
2685 /* Stamp the time, and sequence number, 2620 /* Stamp the time, and sequence number,
@@ -2917,79 +2852,6 @@ static unsigned int scan_ip6(const char *s, char ip[16])
2917 return len; 2852 return len;
2918} 2853}
2919 2854
2920static char tohex(char hexdigit)
2921{
2922 return hexdigit > 9 ? hexdigit + 'a' - 10 : hexdigit + '0';
2923}
2924
2925static int fmt_xlong(char *s, unsigned int i)
2926{
2927 char *bak = s;
2928 *s = tohex((i >> 12) & 0xf);
2929 if (s != bak || *s != '0')
2930 ++s;
2931 *s = tohex((i >> 8) & 0xf);
2932 if (s != bak || *s != '0')
2933 ++s;
2934 *s = tohex((i >> 4) & 0xf);
2935 if (s != bak || *s != '0')
2936 ++s;
2937 *s = tohex(i & 0xf);
2938 return s - bak + 1;
2939}
2940
2941static unsigned int fmt_ip6(char *s, const char ip[16])
2942{
2943 unsigned int len;
2944 unsigned int i;
2945 unsigned int temp;
2946 unsigned int compressing;
2947 int j;
2948
2949 len = 0;
2950 compressing = 0;
2951 for (j = 0; j < 16; j += 2) {
2952
2953#ifdef V4MAPPEDPREFIX
2954 if (j == 12 && !memcmp(ip, V4mappedprefix, 12)) {
2955 inet_ntoa_r(*(struct in_addr *)(ip + 12), s);
2956 temp = strlen(s);
2957 return len + temp;
2958 }
2959#endif
2960 temp = ((unsigned long)(unsigned char)ip[j] << 8) +
2961 (unsigned long)(unsigned char)ip[j + 1];
2962 if (temp == 0) {
2963 if (!compressing) {
2964 compressing = 1;
2965 if (j == 0) {
2966 *s++ = ':';
2967 ++len;
2968 }
2969 }
2970 } else {
2971 if (compressing) {
2972 compressing = 0;
2973 *s++ = ':';
2974 ++len;
2975 }
2976 i = fmt_xlong(s, temp);
2977 len += i;
2978 s += i;
2979 if (j < 14) {
2980 *s++ = ':';
2981 ++len;
2982 }
2983 }
2984 }
2985 if (compressing) {
2986 *s++ = ':';
2987 ++len;
2988 }
2989 *s = 0;
2990 return len;
2991}
2992
2993static struct sk_buff *fill_packet_ipv6(struct net_device *odev, 2855static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
2994 struct pktgen_dev *pkt_dev) 2856 struct pktgen_dev *pkt_dev)
2995{ 2857{
@@ -3682,13 +3544,12 @@ static int pktgen_add_device(struct pktgen_thread *t, const char *ifname)
3682 return -ENOMEM; 3544 return -ENOMEM;
3683 3545
3684 strcpy(pkt_dev->odevname, ifname); 3546 strcpy(pkt_dev->odevname, ifname);
3685 pkt_dev->flows = vmalloc_node(MAX_CFLOWS * sizeof(struct flow_state), 3547 pkt_dev->flows = vzalloc_node(MAX_CFLOWS * sizeof(struct flow_state),
3686 node); 3548 node);
3687 if (pkt_dev->flows == NULL) { 3549 if (pkt_dev->flows == NULL) {
3688 kfree(pkt_dev); 3550 kfree(pkt_dev);
3689 return -ENOMEM; 3551 return -ENOMEM;
3690 } 3552 }
3691 memset(pkt_dev->flows, 0, MAX_CFLOWS * sizeof(struct flow_state));
3692 3553
3693 pkt_dev->removal_mark = 0; 3554 pkt_dev->removal_mark = 0;
3694 pkt_dev->min_pkt_size = ETH_ZLEN; 3555 pkt_dev->min_pkt_size = ETH_ZLEN;
@@ -3846,6 +3707,7 @@ static int __init pg_init(void)
3846{ 3707{
3847 int cpu; 3708 int cpu;
3848 struct proc_dir_entry *pe; 3709 struct proc_dir_entry *pe;
3710 int ret = 0;
3849 3711
3850 pr_info("%s", version); 3712 pr_info("%s", version);
3851 3713
@@ -3856,11 +3718,10 @@ static int __init pg_init(void)
3856 pe = proc_create(PGCTRL, 0600, pg_proc_dir, &pktgen_fops); 3718 pe = proc_create(PGCTRL, 0600, pg_proc_dir, &pktgen_fops);
3857 if (pe == NULL) { 3719 if (pe == NULL) {
3858 pr_err("ERROR: cannot create %s procfs entry\n", PGCTRL); 3720 pr_err("ERROR: cannot create %s procfs entry\n", PGCTRL);
3859 proc_net_remove(&init_net, PG_PROC_DIR); 3721 ret = -EINVAL;
3860 return -EINVAL; 3722 goto remove_dir;
3861 } 3723 }
3862 3724
3863 /* Register us to receive netdevice events */
3864 register_netdevice_notifier(&pktgen_notifier_block); 3725 register_netdevice_notifier(&pktgen_notifier_block);
3865 3726
3866 for_each_online_cpu(cpu) { 3727 for_each_online_cpu(cpu) {
@@ -3874,13 +3735,18 @@ static int __init pg_init(void)
3874 3735
3875 if (list_empty(&pktgen_threads)) { 3736 if (list_empty(&pktgen_threads)) {
3876 pr_err("ERROR: Initialization failed for all threads\n"); 3737 pr_err("ERROR: Initialization failed for all threads\n");
3877 unregister_netdevice_notifier(&pktgen_notifier_block); 3738 ret = -ENODEV;
3878 remove_proc_entry(PGCTRL, pg_proc_dir); 3739 goto unregister;
3879 proc_net_remove(&init_net, PG_PROC_DIR);
3880 return -ENODEV;
3881 } 3740 }
3882 3741
3883 return 0; 3742 return 0;
3743
3744 unregister:
3745 unregister_netdevice_notifier(&pktgen_notifier_block);
3746 remove_proc_entry(PGCTRL, pg_proc_dir);
3747 remove_dir:
3748 proc_net_remove(&init_net, PG_PROC_DIR);
3749 return ret;
3884} 3750}
3885 3751
3886static void __exit pg_cleanup(void) 3752static void __exit pg_cleanup(void)
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index d7c4bb4b1820..abd936d8a716 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -850,6 +850,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
850 struct nlattr *attr, *af_spec; 850 struct nlattr *attr, *af_spec;
851 struct rtnl_af_ops *af_ops; 851 struct rtnl_af_ops *af_ops;
852 852
853 ASSERT_RTNL();
853 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags); 854 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags);
854 if (nlh == NULL) 855 if (nlh == NULL)
855 return -EMSGSIZE; 856 return -EMSGSIZE;
@@ -1007,10 +1008,11 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
1007 s_h = cb->args[0]; 1008 s_h = cb->args[0];
1008 s_idx = cb->args[1]; 1009 s_idx = cb->args[1];
1009 1010
1011 rcu_read_lock();
1010 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { 1012 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
1011 idx = 0; 1013 idx = 0;
1012 head = &net->dev_index_head[h]; 1014 head = &net->dev_index_head[h];
1013 hlist_for_each_entry(dev, node, head, index_hlist) { 1015 hlist_for_each_entry_rcu(dev, node, head, index_hlist) {
1014 if (idx < s_idx) 1016 if (idx < s_idx)
1015 goto cont; 1017 goto cont;
1016 if (rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK, 1018 if (rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
@@ -1023,6 +1025,7 @@ cont:
1023 } 1025 }
1024 } 1026 }
1025out: 1027out:
1028 rcu_read_unlock();
1026 cb->args[1] = idx; 1029 cb->args[1] = idx;
1027 cb->args[0] = h; 1030 cb->args[0] = h;
1028 1031
@@ -1043,6 +1046,7 @@ const struct nla_policy ifla_policy[IFLA_MAX+1] = {
1043 [IFLA_LINKMODE] = { .type = NLA_U8 }, 1046 [IFLA_LINKMODE] = { .type = NLA_U8 },
1044 [IFLA_LINKINFO] = { .type = NLA_NESTED }, 1047 [IFLA_LINKINFO] = { .type = NLA_NESTED },
1045 [IFLA_NET_NS_PID] = { .type = NLA_U32 }, 1048 [IFLA_NET_NS_PID] = { .type = NLA_U32 },
1049 [IFLA_NET_NS_FD] = { .type = NLA_U32 },
1046 [IFLA_IFALIAS] = { .type = NLA_STRING, .len = IFALIASZ-1 }, 1050 [IFLA_IFALIAS] = { .type = NLA_STRING, .len = IFALIASZ-1 },
1047 [IFLA_VFINFO_LIST] = {. type = NLA_NESTED }, 1051 [IFLA_VFINFO_LIST] = {. type = NLA_NESTED },
1048 [IFLA_VF_PORTS] = { .type = NLA_NESTED }, 1052 [IFLA_VF_PORTS] = { .type = NLA_NESTED },
@@ -1091,6 +1095,8 @@ struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[])
1091 */ 1095 */
1092 if (tb[IFLA_NET_NS_PID]) 1096 if (tb[IFLA_NET_NS_PID])
1093 net = get_net_ns_by_pid(nla_get_u32(tb[IFLA_NET_NS_PID])); 1097 net = get_net_ns_by_pid(nla_get_u32(tb[IFLA_NET_NS_PID]));
1098 else if (tb[IFLA_NET_NS_FD])
1099 net = get_net_ns_by_fd(nla_get_u32(tb[IFLA_NET_NS_FD]));
1094 else 1100 else
1095 net = get_net(src_net); 1101 net = get_net(src_net);
1096 return net; 1102 return net;
@@ -1221,7 +1227,7 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
1221 int send_addr_notify = 0; 1227 int send_addr_notify = 0;
1222 int err; 1228 int err;
1223 1229
1224 if (tb[IFLA_NET_NS_PID]) { 1230 if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD]) {
1225 struct net *net = rtnl_link_get_net(dev_net(dev), tb); 1231 struct net *net = rtnl_link_get_net(dev_net(dev), tb);
1226 if (IS_ERR(net)) { 1232 if (IS_ERR(net)) {
1227 err = PTR_ERR(net); 1233 err = PTR_ERR(net);
@@ -1499,6 +1505,7 @@ static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1499 char ifname[IFNAMSIZ]; 1505 char ifname[IFNAMSIZ];
1500 struct nlattr *tb[IFLA_MAX+1]; 1506 struct nlattr *tb[IFLA_MAX+1];
1501 int err; 1507 int err;
1508 LIST_HEAD(list_kill);
1502 1509
1503 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy); 1510 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy);
1504 if (err < 0) 1511 if (err < 0)
@@ -1522,7 +1529,9 @@ static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1522 if (!ops) 1529 if (!ops)
1523 return -EOPNOTSUPP; 1530 return -EOPNOTSUPP;
1524 1531
1525 ops->dellink(dev, NULL); 1532 ops->dellink(dev, &list_kill);
1533 unregister_netdevice_many(&list_kill);
1534 list_del(&list_kill);
1526 return 0; 1535 return 0;
1527} 1536}
1528 1537
@@ -1570,12 +1579,6 @@ struct net_device *rtnl_create_link(struct net *src_net, struct net *net,
1570 dev->rtnl_link_state = RTNL_LINK_INITIALIZING; 1579 dev->rtnl_link_state = RTNL_LINK_INITIALIZING;
1571 dev->real_num_tx_queues = real_num_queues; 1580 dev->real_num_tx_queues = real_num_queues;
1572 1581
1573 if (strchr(dev->name, '%')) {
1574 err = dev_alloc_name(dev, dev->name);
1575 if (err < 0)
1576 goto err_free;
1577 }
1578
1579 if (tb[IFLA_MTU]) 1582 if (tb[IFLA_MTU])
1580 dev->mtu = nla_get_u32(tb[IFLA_MTU]); 1583 dev->mtu = nla_get_u32(tb[IFLA_MTU]);
1581 if (tb[IFLA_ADDRESS]) 1584 if (tb[IFLA_ADDRESS])
@@ -1595,8 +1598,6 @@ struct net_device *rtnl_create_link(struct net *src_net, struct net *net,
1595 1598
1596 return dev; 1599 return dev;
1597 1600
1598err_free:
1599 free_netdev(dev);
1600err: 1601err:
1601 return ERR_PTR(err); 1602 return ERR_PTR(err);
1602} 1603}
@@ -1963,6 +1964,8 @@ static int rtnetlink_event(struct notifier_block *this, unsigned long event, voi
1963 case NETDEV_GOING_DOWN: 1964 case NETDEV_GOING_DOWN:
1964 case NETDEV_UNREGISTER: 1965 case NETDEV_UNREGISTER:
1965 case NETDEV_UNREGISTER_BATCH: 1966 case NETDEV_UNREGISTER_BATCH:
1967 case NETDEV_RELEASE:
1968 case NETDEV_JOIN:
1966 break; 1969 break;
1967 default: 1970 default:
1968 rtmsg_ifinfo(RTM_NEWLINK, dev, 0); 1971 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 7ebeed0a877c..46cbd28f40f9 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -57,6 +57,7 @@
57#include <linux/init.h> 57#include <linux/init.h>
58#include <linux/scatterlist.h> 58#include <linux/scatterlist.h>
59#include <linux/errqueue.h> 59#include <linux/errqueue.h>
60#include <linux/prefetch.h>
60 61
61#include <net/protocol.h> 62#include <net/protocol.h>
62#include <net/dst.h> 63#include <net/dst.h>
@@ -2993,6 +2994,9 @@ int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
2993 skb->destructor = sock_rmem_free; 2994 skb->destructor = sock_rmem_free;
2994 atomic_add(skb->truesize, &sk->sk_rmem_alloc); 2995 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
2995 2996
2997 /* before exiting rcu section, make sure dst is refcounted */
2998 skb_dst_force(skb);
2999
2996 skb_queue_tail(&sk->sk_error_queue, skb); 3000 skb_queue_tail(&sk->sk_error_queue, skb);
2997 if (!sock_flag(sk, SOCK_DEAD)) 3001 if (!sock_flag(sk, SOCK_DEAD))
2998 sk->sk_data_ready(sk, skb->len); 3002 sk->sk_data_ready(sk, skb->len);
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 385b6095fdc4..77a65f031488 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -17,6 +17,7 @@
17 17
18#include <net/ip.h> 18#include <net/ip.h>
19#include <net/sock.h> 19#include <net/sock.h>
20#include <net/net_ratelimit.h>
20 21
21#ifdef CONFIG_RPS 22#ifdef CONFIG_RPS
22static int rps_sock_flow_sysctl(ctl_table *table, int write, 23static int rps_sock_flow_sysctl(ctl_table *table, int write,
@@ -122,6 +123,15 @@ static struct ctl_table net_core_table[] = {
122 .mode = 0644, 123 .mode = 0644,
123 .proc_handler = proc_dointvec 124 .proc_handler = proc_dointvec
124 }, 125 },
126#ifdef CONFIG_BPF_JIT
127 {
128 .procname = "bpf_jit_enable",
129 .data = &bpf_jit_enable,
130 .maxlen = sizeof(int),
131 .mode = 0644,
132 .proc_handler = proc_dointvec
133 },
134#endif
125 { 135 {
126 .procname = "netdev_tstamp_prequeue", 136 .procname = "netdev_tstamp_prequeue",
127 .data = &netdev_tstamp_prequeue, 137 .data = &netdev_tstamp_prequeue,
diff --git a/net/core/utils.c b/net/core/utils.c
index 5fea0ab21902..386e263f6066 100644
--- a/net/core/utils.c
+++ b/net/core/utils.c
@@ -27,6 +27,7 @@
27#include <linux/ratelimit.h> 27#include <linux/ratelimit.h>
28 28
29#include <net/sock.h> 29#include <net/sock.h>
30#include <net/net_ratelimit.h>
30 31
31#include <asm/byteorder.h> 32#include <asm/byteorder.h>
32#include <asm/system.h> 33#include <asm/system.h>
@@ -296,3 +297,27 @@ void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb,
296 csum_unfold(*sum))); 297 csum_unfold(*sum)));
297} 298}
298EXPORT_SYMBOL(inet_proto_csum_replace4); 299EXPORT_SYMBOL(inet_proto_csum_replace4);
300
301int mac_pton(const char *s, u8 *mac)
302{
303 int i;
304
305 /* XX:XX:XX:XX:XX:XX */
306 if (strlen(s) < 3 * ETH_ALEN - 1)
307 return 0;
308
309 /* Don't dirty result unless string is valid MAC. */
310 for (i = 0; i < ETH_ALEN; i++) {
311 if (!strchr("0123456789abcdefABCDEF", s[i * 3]))
312 return 0;
313 if (!strchr("0123456789abcdefABCDEF", s[i * 3 + 1]))
314 return 0;
315 if (i != ETH_ALEN - 1 && s[i * 3 + 2] != ':')
316 return 0;
317 }
318 for (i = 0; i < ETH_ALEN; i++) {
319 mac[i] = (hex_to_bin(s[i * 3]) << 4) | hex_to_bin(s[i * 3 + 1]);
320 }
321 return 1;
322}
323EXPORT_SYMBOL(mac_pton);
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index ae451c6d83ba..8c36adfd1919 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -40,13 +40,15 @@
40 40
41int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) 41int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
42{ 42{
43 const struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
43 struct inet_sock *inet = inet_sk(sk); 44 struct inet_sock *inet = inet_sk(sk);
44 struct dccp_sock *dp = dccp_sk(sk); 45 struct dccp_sock *dp = dccp_sk(sk);
45 const struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
46 __be16 orig_sport, orig_dport; 46 __be16 orig_sport, orig_dport;
47 struct rtable *rt;
48 __be32 daddr, nexthop; 47 __be32 daddr, nexthop;
48 struct flowi4 *fl4;
49 struct rtable *rt;
49 int err; 50 int err;
51 struct ip_options_rcu *inet_opt;
50 52
51 dp->dccps_role = DCCP_ROLE_CLIENT; 53 dp->dccps_role = DCCP_ROLE_CLIENT;
52 54
@@ -57,15 +59,19 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
57 return -EAFNOSUPPORT; 59 return -EAFNOSUPPORT;
58 60
59 nexthop = daddr = usin->sin_addr.s_addr; 61 nexthop = daddr = usin->sin_addr.s_addr;
60 if (inet->opt != NULL && inet->opt->srr) { 62
63 inet_opt = rcu_dereference_protected(inet->inet_opt,
64 sock_owned_by_user(sk));
65 if (inet_opt != NULL && inet_opt->opt.srr) {
61 if (daddr == 0) 66 if (daddr == 0)
62 return -EINVAL; 67 return -EINVAL;
63 nexthop = inet->opt->faddr; 68 nexthop = inet_opt->opt.faddr;
64 } 69 }
65 70
66 orig_sport = inet->inet_sport; 71 orig_sport = inet->inet_sport;
67 orig_dport = usin->sin_port; 72 orig_dport = usin->sin_port;
68 rt = ip_route_connect(nexthop, inet->inet_saddr, 73 fl4 = &inet->cork.fl.u.ip4;
74 rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
69 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if, 75 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
70 IPPROTO_DCCP, 76 IPPROTO_DCCP,
71 orig_sport, orig_dport, sk, true); 77 orig_sport, orig_dport, sk, true);
@@ -77,19 +83,19 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
77 return -ENETUNREACH; 83 return -ENETUNREACH;
78 } 84 }
79 85
80 if (inet->opt == NULL || !inet->opt->srr) 86 if (inet_opt == NULL || !inet_opt->opt.srr)
81 daddr = rt->rt_dst; 87 daddr = fl4->daddr;
82 88
83 if (inet->inet_saddr == 0) 89 if (inet->inet_saddr == 0)
84 inet->inet_saddr = rt->rt_src; 90 inet->inet_saddr = fl4->saddr;
85 inet->inet_rcv_saddr = inet->inet_saddr; 91 inet->inet_rcv_saddr = inet->inet_saddr;
86 92
87 inet->inet_dport = usin->sin_port; 93 inet->inet_dport = usin->sin_port;
88 inet->inet_daddr = daddr; 94 inet->inet_daddr = daddr;
89 95
90 inet_csk(sk)->icsk_ext_hdr_len = 0; 96 inet_csk(sk)->icsk_ext_hdr_len = 0;
91 if (inet->opt != NULL) 97 if (inet_opt)
92 inet_csk(sk)->icsk_ext_hdr_len = inet->opt->optlen; 98 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
93 /* 99 /*
94 * Socket identity is still unknown (sport may be zero). 100 * Socket identity is still unknown (sport may be zero).
95 * However we set state to DCCP_REQUESTING and not releasing socket 101 * However we set state to DCCP_REQUESTING and not releasing socket
@@ -101,8 +107,7 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
101 if (err != 0) 107 if (err != 0)
102 goto failure; 108 goto failure;
103 109
104 rt = ip_route_newports(rt, IPPROTO_DCCP, 110 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
105 orig_sport, orig_dport,
106 inet->inet_sport, inet->inet_dport, sk); 111 inet->inet_sport, inet->inet_dport, sk);
107 if (IS_ERR(rt)) { 112 if (IS_ERR(rt)) {
108 rt = NULL; 113 rt = NULL;
@@ -391,32 +396,30 @@ struct sock *dccp_v4_request_recv_sock(struct sock *sk, struct sk_buff *skb,
391 if (sk_acceptq_is_full(sk)) 396 if (sk_acceptq_is_full(sk))
392 goto exit_overflow; 397 goto exit_overflow;
393 398
394 if (dst == NULL && (dst = inet_csk_route_req(sk, req)) == NULL)
395 goto exit;
396
397 newsk = dccp_create_openreq_child(sk, req, skb); 399 newsk = dccp_create_openreq_child(sk, req, skb);
398 if (newsk == NULL) 400 if (newsk == NULL)
399 goto exit_nonewsk; 401 goto exit_nonewsk;
400 402
401 sk_setup_caps(newsk, dst);
402
403 newinet = inet_sk(newsk); 403 newinet = inet_sk(newsk);
404 ireq = inet_rsk(req); 404 ireq = inet_rsk(req);
405 newinet->inet_daddr = ireq->rmt_addr; 405 newinet->inet_daddr = ireq->rmt_addr;
406 newinet->inet_rcv_saddr = ireq->loc_addr; 406 newinet->inet_rcv_saddr = ireq->loc_addr;
407 newinet->inet_saddr = ireq->loc_addr; 407 newinet->inet_saddr = ireq->loc_addr;
408 newinet->opt = ireq->opt; 408 newinet->inet_opt = ireq->opt;
409 ireq->opt = NULL; 409 ireq->opt = NULL;
410 newinet->mc_index = inet_iif(skb); 410 newinet->mc_index = inet_iif(skb);
411 newinet->mc_ttl = ip_hdr(skb)->ttl; 411 newinet->mc_ttl = ip_hdr(skb)->ttl;
412 newinet->inet_id = jiffies; 412 newinet->inet_id = jiffies;
413 413
414 if (dst == NULL && (dst = inet_csk_route_child_sock(sk, newsk, req)) == NULL)
415 goto put_and_exit;
416
417 sk_setup_caps(newsk, dst);
418
414 dccp_sync_mss(newsk, dst_mtu(dst)); 419 dccp_sync_mss(newsk, dst_mtu(dst));
415 420
416 if (__inet_inherit_port(sk, newsk) < 0) { 421 if (__inet_inherit_port(sk, newsk) < 0)
417 sock_put(newsk); 422 goto put_and_exit;
418 goto exit;
419 }
420 __inet_hash_nolisten(newsk, NULL); 423 __inet_hash_nolisten(newsk, NULL);
421 424
422 return newsk; 425 return newsk;
@@ -428,6 +431,9 @@ exit_nonewsk:
428exit: 431exit:
429 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); 432 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
430 return NULL; 433 return NULL;
434put_and_exit:
435 sock_put(newsk);
436 goto exit;
431} 437}
432 438
433EXPORT_SYMBOL_GPL(dccp_v4_request_recv_sock); 439EXPORT_SYMBOL_GPL(dccp_v4_request_recv_sock);
@@ -491,8 +497,9 @@ static int dccp_v4_send_response(struct sock *sk, struct request_sock *req,
491 int err = -1; 497 int err = -1;
492 struct sk_buff *skb; 498 struct sk_buff *skb;
493 struct dst_entry *dst; 499 struct dst_entry *dst;
500 struct flowi4 fl4;
494 501
495 dst = inet_csk_route_req(sk, req); 502 dst = inet_csk_route_req(sk, &fl4, req);
496 if (dst == NULL) 503 if (dst == NULL)
497 goto out; 504 goto out;
498 505
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index de1b7e37ad5b..8dc4348774a5 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -54,8 +54,8 @@ static void dccp_v6_hash(struct sock *sk)
54 54
55/* add pseudo-header to DCCP checksum stored in skb->csum */ 55/* add pseudo-header to DCCP checksum stored in skb->csum */
56static inline __sum16 dccp_v6_csum_finish(struct sk_buff *skb, 56static inline __sum16 dccp_v6_csum_finish(struct sk_buff *skb,
57 struct in6_addr *saddr, 57 const struct in6_addr *saddr,
58 struct in6_addr *daddr) 58 const struct in6_addr *daddr)
59{ 59{
60 return csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_DCCP, skb->csum); 60 return csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_DCCP, skb->csum);
61} 61}
@@ -87,7 +87,7 @@ static inline __u32 dccp_v6_init_sequence(struct sk_buff *skb)
87static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 87static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
88 u8 type, u8 code, int offset, __be32 info) 88 u8 type, u8 code, int offset, __be32 info)
89{ 89{
90 struct ipv6hdr *hdr = (struct ipv6hdr *)skb->data; 90 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
91 const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + offset); 91 const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + offset);
92 struct dccp_sock *dp; 92 struct dccp_sock *dp;
93 struct ipv6_pinfo *np; 93 struct ipv6_pinfo *np;
@@ -296,7 +296,7 @@ static void dccp_v6_reqsk_destructor(struct request_sock *req)
296 296
297static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb) 297static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb)
298{ 298{
299 struct ipv6hdr *rxip6h; 299 const struct ipv6hdr *rxip6h;
300 struct sk_buff *skb; 300 struct sk_buff *skb;
301 struct flowi6 fl6; 301 struct flowi6 fl6;
302 struct net *net = dev_net(skb_dst(rxskb)->dev); 302 struct net *net = dev_net(skb_dst(rxskb)->dev);
@@ -573,7 +573,7 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
573 573
574 First: no IPv4 options. 574 First: no IPv4 options.
575 */ 575 */
576 newinet->opt = NULL; 576 newinet->inet_opt = NULL;
577 577
578 /* Clone RX bits */ 578 /* Clone RX bits */
579 newnp->rxopt.all = np->rxopt.all; 579 newnp->rxopt.all = np->rxopt.all;
diff --git a/net/dccp/output.c b/net/dccp/output.c
index 136d41cbcd02..fab108e51e5a 100644
--- a/net/dccp/output.c
+++ b/net/dccp/output.c
@@ -43,7 +43,7 @@ static void dccp_skb_entail(struct sock *sk, struct sk_buff *skb)
43static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb) 43static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb)
44{ 44{
45 if (likely(skb != NULL)) { 45 if (likely(skb != NULL)) {
46 const struct inet_sock *inet = inet_sk(sk); 46 struct inet_sock *inet = inet_sk(sk);
47 const struct inet_connection_sock *icsk = inet_csk(sk); 47 const struct inet_connection_sock *icsk = inet_csk(sk);
48 struct dccp_sock *dp = dccp_sk(sk); 48 struct dccp_sock *dp = dccp_sk(sk);
49 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb); 49 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
@@ -136,7 +136,7 @@ static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb)
136 136
137 DCCP_INC_STATS(DCCP_MIB_OUTSEGS); 137 DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
138 138
139 err = icsk->icsk_af_ops->queue_xmit(skb); 139 err = icsk->icsk_af_ops->queue_xmit(skb, &inet->cork.fl);
140 return net_xmit_eval(err); 140 return net_xmit_eval(err);
141 } 141 }
142 return -ENOBUFS; 142 return -ENOBUFS;
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index 0dcaa903e00e..cf26ac74a188 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -332,14 +332,9 @@ static struct dn_ifaddr *dn_dev_alloc_ifa(void)
332 return ifa; 332 return ifa;
333} 333}
334 334
335static void dn_dev_free_ifa_rcu(struct rcu_head *head)
336{
337 kfree(container_of(head, struct dn_ifaddr, rcu));
338}
339
340static void dn_dev_free_ifa(struct dn_ifaddr *ifa) 335static void dn_dev_free_ifa(struct dn_ifaddr *ifa)
341{ 336{
342 call_rcu(&ifa->rcu, dn_dev_free_ifa_rcu); 337 kfree_rcu(ifa, rcu);
343} 338}
344 339
345static void dn_dev_del_ifa(struct dn_dev *dn_db, struct dn_ifaddr __rcu **ifap, int destroy) 340static void dn_dev_del_ifa(struct dn_dev *dn_db, struct dn_ifaddr __rcu **ifap, int destroy)
@@ -752,7 +747,8 @@ static int dn_nl_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
752 skip_naddr = cb->args[1]; 747 skip_naddr = cb->args[1];
753 748
754 idx = 0; 749 idx = 0;
755 for_each_netdev(&init_net, dev) { 750 rcu_read_lock();
751 for_each_netdev_rcu(&init_net, dev) {
756 if (idx < skip_ndevs) 752 if (idx < skip_ndevs)
757 goto cont; 753 goto cont;
758 else if (idx > skip_ndevs) { 754 else if (idx > skip_ndevs) {
@@ -761,11 +757,11 @@ static int dn_nl_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
761 skip_naddr = 0; 757 skip_naddr = 0;
762 } 758 }
763 759
764 if ((dn_db = rtnl_dereference(dev->dn_ptr)) == NULL) 760 if ((dn_db = rcu_dereference(dev->dn_ptr)) == NULL)
765 goto cont; 761 goto cont;
766 762
767 for (ifa = rtnl_dereference(dn_db->ifa_list), dn_idx = 0; ifa; 763 for (ifa = rcu_dereference(dn_db->ifa_list), dn_idx = 0; ifa;
768 ifa = rtnl_dereference(ifa->ifa_next), dn_idx++) { 764 ifa = rcu_dereference(ifa->ifa_next), dn_idx++) {
769 if (dn_idx < skip_naddr) 765 if (dn_idx < skip_naddr)
770 continue; 766 continue;
771 767
@@ -778,6 +774,7 @@ cont:
778 idx++; 774 idx++;
779 } 775 }
780done: 776done:
777 rcu_read_unlock();
781 cb->args[0] = idx; 778 cb->args[0] = idx;
782 cb->args[1] = dn_idx; 779 cb->args[1] = dn_idx;
783 780
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 9f09d4fc2880..74544bc6fdec 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -1125,13 +1125,11 @@ make_route:
1125 if (dev_out->flags & IFF_LOOPBACK) 1125 if (dev_out->flags & IFF_LOOPBACK)
1126 flags |= RTCF_LOCAL; 1126 flags |= RTCF_LOCAL;
1127 1127
1128 rt = dst_alloc(&dn_dst_ops, 0); 1128 rt = dst_alloc(&dn_dst_ops, dev_out, 1, 0, DST_HOST);
1129 if (rt == NULL) 1129 if (rt == NULL)
1130 goto e_nobufs; 1130 goto e_nobufs;
1131 1131
1132 atomic_set(&rt->dst.__refcnt, 1); 1132 memset(&rt->fld, 0, sizeof(rt->fld));
1133 rt->dst.flags = DST_HOST;
1134
1135 rt->fld.saddr = oldflp->saddr; 1133 rt->fld.saddr = oldflp->saddr;
1136 rt->fld.daddr = oldflp->daddr; 1134 rt->fld.daddr = oldflp->daddr;
1137 rt->fld.flowidn_oif = oldflp->flowidn_oif; 1135 rt->fld.flowidn_oif = oldflp->flowidn_oif;
@@ -1146,8 +1144,6 @@ make_route:
1146 rt->rt_dst_map = fld.daddr; 1144 rt->rt_dst_map = fld.daddr;
1147 rt->rt_src_map = fld.saddr; 1145 rt->rt_src_map = fld.saddr;
1148 1146
1149 rt->dst.dev = dev_out;
1150 dev_hold(dev_out);
1151 rt->dst.neighbour = neigh; 1147 rt->dst.neighbour = neigh;
1152 neigh = NULL; 1148 neigh = NULL;
1153 1149
@@ -1399,10 +1395,11 @@ static int dn_route_input_slow(struct sk_buff *skb)
1399 } 1395 }
1400 1396
1401make_route: 1397make_route:
1402 rt = dst_alloc(&dn_dst_ops, 0); 1398 rt = dst_alloc(&dn_dst_ops, out_dev, 0, 0, DST_HOST);
1403 if (rt == NULL) 1399 if (rt == NULL)
1404 goto e_nobufs; 1400 goto e_nobufs;
1405 1401
1402 memset(&rt->fld, 0, sizeof(rt->fld));
1406 rt->rt_saddr = fld.saddr; 1403 rt->rt_saddr = fld.saddr;
1407 rt->rt_daddr = fld.daddr; 1404 rt->rt_daddr = fld.daddr;
1408 rt->rt_gateway = fld.daddr; 1405 rt->rt_gateway = fld.daddr;
@@ -1419,9 +1416,7 @@ make_route:
1419 rt->fld.flowidn_iif = in_dev->ifindex; 1416 rt->fld.flowidn_iif = in_dev->ifindex;
1420 rt->fld.flowidn_mark = fld.flowidn_mark; 1417 rt->fld.flowidn_mark = fld.flowidn_mark;
1421 1418
1422 rt->dst.flags = DST_HOST;
1423 rt->dst.neighbour = neigh; 1419 rt->dst.neighbour = neigh;
1424 rt->dst.dev = out_dev;
1425 rt->dst.lastuse = jiffies; 1420 rt->dst.lastuse = jiffies;
1426 rt->dst.output = dn_rt_bug; 1421 rt->dst.output = dn_rt_bug;
1427 switch(res.type) { 1422 switch(res.type) {
@@ -1440,8 +1435,6 @@ make_route:
1440 rt->dst.input = dst_discard; 1435 rt->dst.input = dst_discard;
1441 } 1436 }
1442 rt->rt_flags = flags; 1437 rt->rt_flags = flags;
1443 if (rt->dst.dev)
1444 dev_hold(rt->dst.dev);
1445 1438
1446 err = dn_rt_set_next_hop(rt, &res); 1439 err = dn_rt_set_next_hop(rt, &res);
1447 if (err) 1440 if (err)
diff --git a/net/decnet/dn_table.c b/net/decnet/dn_table.c
index 99d8d3a40998..bd0a52dd1d40 100644
--- a/net/decnet/dn_table.c
+++ b/net/decnet/dn_table.c
@@ -123,11 +123,11 @@ static inline void dn_rebuild_zone(struct dn_zone *dz,
123 struct dn_fib_node **old_ht, 123 struct dn_fib_node **old_ht,
124 int old_divisor) 124 int old_divisor)
125{ 125{
126 int i;
127 struct dn_fib_node *f, **fp, *next; 126 struct dn_fib_node *f, **fp, *next;
127 int i;
128 128
129 for(i = 0; i < old_divisor; i++) { 129 for(i = 0; i < old_divisor; i++) {
130 for(f = old_ht[i]; f; f = f->fn_next) { 130 for(f = old_ht[i]; f; f = next) {
131 next = f->fn_next; 131 next = f->fn_next;
132 for(fp = dn_chain_p(f->fn_key, dz); 132 for(fp = dn_chain_p(f->fn_key, dz);
133 *fp && dn_key_leq((*fp)->fn_key, f->fn_key); 133 *fp && dn_key_leq((*fp)->fn_key, f->fn_key);
diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c
index cfa7a5e1c5c9..fa000d26dc60 100644
--- a/net/dns_resolver/dns_key.c
+++ b/net/dns_resolver/dns_key.c
@@ -212,10 +212,12 @@ static void dns_resolver_describe(const struct key *key, struct seq_file *m)
212 int err = key->type_data.x[0]; 212 int err = key->type_data.x[0];
213 213
214 seq_puts(m, key->description); 214 seq_puts(m, key->description);
215 if (err) 215 if (key_is_instantiated(key)) {
216 seq_printf(m, ": %d", err); 216 if (err)
217 else 217 seq_printf(m, ": %d", err);
218 seq_printf(m, ": %u", key->datalen); 218 else
219 seq_printf(m, ": %u", key->datalen);
220 }
219} 221}
220 222
221/* 223/*
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 64ca2a6fa0d4..0a47b6c37038 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -288,7 +288,6 @@ static const struct ethtool_ops dsa_slave_ethtool_ops = {
288 .get_drvinfo = dsa_slave_get_drvinfo, 288 .get_drvinfo = dsa_slave_get_drvinfo,
289 .nway_reset = dsa_slave_nway_reset, 289 .nway_reset = dsa_slave_nway_reset,
290 .get_link = dsa_slave_get_link, 290 .get_link = dsa_slave_get_link,
291 .set_sg = ethtool_op_set_sg,
292 .get_strings = dsa_slave_get_strings, 291 .get_strings = dsa_slave_get_strings,
293 .get_ethtool_stats = dsa_slave_get_ethtool_stats, 292 .get_ethtool_stats = dsa_slave_get_ethtool_stats,
294 .get_sset_count = dsa_slave_get_sset_count, 293 .get_sset_count = dsa_slave_get_sset_count,
diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c
index 116d3fd3d669..a1d9f3787dd5 100644
--- a/net/econet/af_econet.c
+++ b/net/econet/af_econet.c
@@ -935,7 +935,6 @@ static void aun_data_available(struct sock *sk, int slen)
935 struct sk_buff *skb; 935 struct sk_buff *skb;
936 unsigned char *data; 936 unsigned char *data;
937 struct aunhdr *ah; 937 struct aunhdr *ah;
938 struct iphdr *ip;
939 size_t len; 938 size_t len;
940 939
941 while ((skb = skb_recv_datagram(sk, 0, 1, &err)) == NULL) { 940 while ((skb = skb_recv_datagram(sk, 0, 1, &err)) == NULL) {
@@ -949,7 +948,6 @@ static void aun_data_available(struct sock *sk, int slen)
949 data = skb_transport_header(skb) + sizeof(struct udphdr); 948 data = skb_transport_header(skb) + sizeof(struct udphdr);
950 ah = (struct aunhdr *)data; 949 ah = (struct aunhdr *)data;
951 len = skb->len - sizeof(struct udphdr); 950 len = skb->len - sizeof(struct udphdr);
952 ip = ip_hdr(skb);
953 951
954 switch (ah->code) 952 switch (ah->code)
955 { 953 {
@@ -962,12 +960,6 @@ static void aun_data_available(struct sock *sk, int slen)
962 case 4: 960 case 4:
963 aun_tx_ack(ah->handle, ECTYPE_TRANSMIT_NOT_LISTENING); 961 aun_tx_ack(ah->handle, ECTYPE_TRANSMIT_NOT_LISTENING);
964 break; 962 break;
965#if 0
966 /* This isn't quite right yet. */
967 case 5:
968 aun_send_response(ip->saddr, ah->handle, 6, ah->cb);
969 break;
970#endif
971 default: 963 default:
972 printk(KERN_DEBUG "unknown AUN packet (type %d)\n", data[0]); 964 printk(KERN_DEBUG "unknown AUN packet (type %d)\n", data[0]);
973 } 965 }
diff --git a/net/ieee802154/nl-phy.c b/net/ieee802154/nl-phy.c
index ed0eab39f531..02548b292b53 100644
--- a/net/ieee802154/nl-phy.c
+++ b/net/ieee802154/nl-phy.c
@@ -44,7 +44,7 @@ static int ieee802154_nl_fill_phy(struct sk_buff *msg, u32 pid,
44 pr_debug("%s\n", __func__); 44 pr_debug("%s\n", __func__);
45 45
46 if (!buf) 46 if (!buf)
47 goto out; 47 return -EMSGSIZE;
48 48
49 hdr = genlmsg_put(msg, 0, seq, &nl802154_family, flags, 49 hdr = genlmsg_put(msg, 0, seq, &nl802154_family, flags,
50 IEEE802154_LIST_PHY); 50 IEEE802154_LIST_PHY);
@@ -65,6 +65,7 @@ static int ieee802154_nl_fill_phy(struct sk_buff *msg, u32 pid,
65 pages * sizeof(uint32_t), buf); 65 pages * sizeof(uint32_t), buf);
66 66
67 mutex_unlock(&phy->pib_lock); 67 mutex_unlock(&phy->pib_lock);
68 kfree(buf);
68 return genlmsg_end(msg, hdr); 69 return genlmsg_end(msg, hdr);
69 70
70nla_put_failure: 71nla_put_failure:
diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile
index 0dc772d0d125..f2dc69cffb57 100644
--- a/net/ipv4/Makefile
+++ b/net/ipv4/Makefile
@@ -11,7 +11,7 @@ obj-y := route.o inetpeer.o protocol.o \
11 datagram.o raw.o udp.o udplite.o \ 11 datagram.o raw.o udp.o udplite.o \
12 arp.o icmp.o devinet.o af_inet.o igmp.o \ 12 arp.o icmp.o devinet.o af_inet.o igmp.o \
13 fib_frontend.o fib_semantics.o fib_trie.o \ 13 fib_frontend.o fib_semantics.o fib_trie.o \
14 inet_fragment.o 14 inet_fragment.o ping.o
15 15
16obj-$(CONFIG_SYSCTL) += sysctl_net_ipv4.o 16obj-$(CONFIG_SYSCTL) += sysctl_net_ipv4.o
17obj-$(CONFIG_PROC_FS) += proc.o 17obj-$(CONFIG_PROC_FS) += proc.o
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 807d83c02ef6..ef1528af7abf 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -105,6 +105,7 @@
105#include <net/tcp.h> 105#include <net/tcp.h>
106#include <net/udp.h> 106#include <net/udp.h>
107#include <net/udplite.h> 107#include <net/udplite.h>
108#include <net/ping.h>
108#include <linux/skbuff.h> 109#include <linux/skbuff.h>
109#include <net/sock.h> 110#include <net/sock.h>
110#include <net/raw.h> 111#include <net/raw.h>
@@ -153,7 +154,7 @@ void inet_sock_destruct(struct sock *sk)
153 WARN_ON(sk->sk_wmem_queued); 154 WARN_ON(sk->sk_wmem_queued);
154 WARN_ON(sk->sk_forward_alloc); 155 WARN_ON(sk->sk_forward_alloc);
155 156
156 kfree(inet->opt); 157 kfree(rcu_dereference_protected(inet->inet_opt, 1));
157 dst_release(rcu_dereference_check(sk->sk_dst_cache, 1)); 158 dst_release(rcu_dereference_check(sk->sk_dst_cache, 1));
158 sk_refcnt_debug_dec(sk); 159 sk_refcnt_debug_dec(sk);
159} 160}
@@ -464,6 +465,11 @@ int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
464 if (addr_len < sizeof(struct sockaddr_in)) 465 if (addr_len < sizeof(struct sockaddr_in))
465 goto out; 466 goto out;
466 467
468 if (addr->sin_family != AF_INET) {
469 err = -EAFNOSUPPORT;
470 goto out;
471 }
472
467 chk_addr_ret = inet_addr_type(sock_net(sk), addr->sin_addr.s_addr); 473 chk_addr_ret = inet_addr_type(sock_net(sk), addr->sin_addr.s_addr);
468 474
469 /* Not specified by any standard per-se, however it breaks too 475 /* Not specified by any standard per-se, however it breaks too
@@ -672,6 +678,7 @@ int inet_accept(struct socket *sock, struct socket *newsock, int flags)
672 678
673 lock_sock(sk2); 679 lock_sock(sk2);
674 680
681 sock_rps_record_flow(sk2);
675 WARN_ON(!((1 << sk2->sk_state) & 682 WARN_ON(!((1 << sk2->sk_state) &
676 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_CLOSE))); 683 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_CLOSE)));
677 684
@@ -1008,6 +1015,14 @@ static struct inet_protosw inetsw_array[] =
1008 .flags = INET_PROTOSW_PERMANENT, 1015 .flags = INET_PROTOSW_PERMANENT,
1009 }, 1016 },
1010 1017
1018 {
1019 .type = SOCK_DGRAM,
1020 .protocol = IPPROTO_ICMP,
1021 .prot = &ping_prot,
1022 .ops = &inet_dgram_ops,
1023 .no_check = UDP_CSUM_DEFAULT,
1024 .flags = INET_PROTOSW_REUSE,
1025 },
1011 1026
1012 { 1027 {
1013 .type = SOCK_RAW, 1028 .type = SOCK_RAW,
@@ -1103,14 +1118,19 @@ static int inet_sk_reselect_saddr(struct sock *sk)
1103 struct inet_sock *inet = inet_sk(sk); 1118 struct inet_sock *inet = inet_sk(sk);
1104 __be32 old_saddr = inet->inet_saddr; 1119 __be32 old_saddr = inet->inet_saddr;
1105 __be32 daddr = inet->inet_daddr; 1120 __be32 daddr = inet->inet_daddr;
1121 struct flowi4 *fl4;
1106 struct rtable *rt; 1122 struct rtable *rt;
1107 __be32 new_saddr; 1123 __be32 new_saddr;
1124 struct ip_options_rcu *inet_opt;
1108 1125
1109 if (inet->opt && inet->opt->srr) 1126 inet_opt = rcu_dereference_protected(inet->inet_opt,
1110 daddr = inet->opt->faddr; 1127 sock_owned_by_user(sk));
1128 if (inet_opt && inet_opt->opt.srr)
1129 daddr = inet_opt->opt.faddr;
1111 1130
1112 /* Query new route. */ 1131 /* Query new route. */
1113 rt = ip_route_connect(daddr, 0, RT_CONN_FLAGS(sk), 1132 fl4 = &inet->cork.fl.u.ip4;
1133 rt = ip_route_connect(fl4, daddr, 0, RT_CONN_FLAGS(sk),
1114 sk->sk_bound_dev_if, sk->sk_protocol, 1134 sk->sk_bound_dev_if, sk->sk_protocol,
1115 inet->inet_sport, inet->inet_dport, sk, false); 1135 inet->inet_sport, inet->inet_dport, sk, false);
1116 if (IS_ERR(rt)) 1136 if (IS_ERR(rt))
@@ -1118,7 +1138,7 @@ static int inet_sk_reselect_saddr(struct sock *sk)
1118 1138
1119 sk_setup_caps(sk, &rt->dst); 1139 sk_setup_caps(sk, &rt->dst);
1120 1140
1121 new_saddr = rt->rt_src; 1141 new_saddr = fl4->saddr;
1122 1142
1123 if (new_saddr == old_saddr) 1143 if (new_saddr == old_saddr)
1124 return 0; 1144 return 0;
@@ -1147,6 +1167,8 @@ int inet_sk_rebuild_header(struct sock *sk)
1147 struct inet_sock *inet = inet_sk(sk); 1167 struct inet_sock *inet = inet_sk(sk);
1148 struct rtable *rt = (struct rtable *)__sk_dst_check(sk, 0); 1168 struct rtable *rt = (struct rtable *)__sk_dst_check(sk, 0);
1149 __be32 daddr; 1169 __be32 daddr;
1170 struct ip_options_rcu *inet_opt;
1171 struct flowi4 *fl4;
1150 int err; 1172 int err;
1151 1173
1152 /* Route is OK, nothing to do. */ 1174 /* Route is OK, nothing to do. */
@@ -1154,10 +1176,14 @@ int inet_sk_rebuild_header(struct sock *sk)
1154 return 0; 1176 return 0;
1155 1177
1156 /* Reroute. */ 1178 /* Reroute. */
1179 rcu_read_lock();
1180 inet_opt = rcu_dereference(inet->inet_opt);
1157 daddr = inet->inet_daddr; 1181 daddr = inet->inet_daddr;
1158 if (inet->opt && inet->opt->srr) 1182 if (inet_opt && inet_opt->opt.srr)
1159 daddr = inet->opt->faddr; 1183 daddr = inet_opt->opt.faddr;
1160 rt = ip_route_output_ports(sock_net(sk), sk, daddr, inet->inet_saddr, 1184 rcu_read_unlock();
1185 fl4 = &inet->cork.fl.u.ip4;
1186 rt = ip_route_output_ports(sock_net(sk), fl4, sk, daddr, inet->inet_saddr,
1161 inet->inet_dport, inet->inet_sport, 1187 inet->inet_dport, inet->inet_sport,
1162 sk->sk_protocol, RT_CONN_FLAGS(sk), 1188 sk->sk_protocol, RT_CONN_FLAGS(sk),
1163 sk->sk_bound_dev_if); 1189 sk->sk_bound_dev_if);
@@ -1186,7 +1212,7 @@ EXPORT_SYMBOL(inet_sk_rebuild_header);
1186 1212
1187static int inet_gso_send_check(struct sk_buff *skb) 1213static int inet_gso_send_check(struct sk_buff *skb)
1188{ 1214{
1189 struct iphdr *iph; 1215 const struct iphdr *iph;
1190 const struct net_protocol *ops; 1216 const struct net_protocol *ops;
1191 int proto; 1217 int proto;
1192 int ihl; 1218 int ihl;
@@ -1293,7 +1319,7 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head,
1293 const struct net_protocol *ops; 1319 const struct net_protocol *ops;
1294 struct sk_buff **pp = NULL; 1320 struct sk_buff **pp = NULL;
1295 struct sk_buff *p; 1321 struct sk_buff *p;
1296 struct iphdr *iph; 1322 const struct iphdr *iph;
1297 unsigned int hlen; 1323 unsigned int hlen;
1298 unsigned int off; 1324 unsigned int off;
1299 unsigned int id; 1325 unsigned int id;
@@ -1516,6 +1542,7 @@ static const struct net_protocol udp_protocol = {
1516 1542
1517static const struct net_protocol icmp_protocol = { 1543static const struct net_protocol icmp_protocol = {
1518 .handler = icmp_rcv, 1544 .handler = icmp_rcv,
1545 .err_handler = ping_err,
1519 .no_policy = 1, 1546 .no_policy = 1,
1520 .netns_ok = 1, 1547 .netns_ok = 1,
1521}; 1548};
@@ -1631,6 +1658,10 @@ static int __init inet_init(void)
1631 if (rc) 1658 if (rc)
1632 goto out_unregister_udp_proto; 1659 goto out_unregister_udp_proto;
1633 1660
1661 rc = proto_register(&ping_prot, 1);
1662 if (rc)
1663 goto out_unregister_raw_proto;
1664
1634 /* 1665 /*
1635 * Tell SOCKET that we are alive... 1666 * Tell SOCKET that we are alive...
1636 */ 1667 */
@@ -1686,6 +1717,8 @@ static int __init inet_init(void)
1686 /* Add UDP-Lite (RFC 3828) */ 1717 /* Add UDP-Lite (RFC 3828) */
1687 udplite4_register(); 1718 udplite4_register();
1688 1719
1720 ping_init();
1721
1689 /* 1722 /*
1690 * Set the ICMP layer up 1723 * Set the ICMP layer up
1691 */ 1724 */
@@ -1716,6 +1749,8 @@ static int __init inet_init(void)
1716 rc = 0; 1749 rc = 0;
1717out: 1750out:
1718 return rc; 1751 return rc;
1752out_unregister_raw_proto:
1753 proto_unregister(&raw_prot);
1719out_unregister_udp_proto: 1754out_unregister_udp_proto:
1720 proto_unregister(&udp_prot); 1755 proto_unregister(&udp_prot);
1721out_unregister_tcp_proto: 1756out_unregister_tcp_proto:
@@ -1740,11 +1775,15 @@ static int __init ipv4_proc_init(void)
1740 goto out_tcp; 1775 goto out_tcp;
1741 if (udp4_proc_init()) 1776 if (udp4_proc_init())
1742 goto out_udp; 1777 goto out_udp;
1778 if (ping_proc_init())
1779 goto out_ping;
1743 if (ip_misc_proc_init()) 1780 if (ip_misc_proc_init())
1744 goto out_misc; 1781 goto out_misc;
1745out: 1782out:
1746 return rc; 1783 return rc;
1747out_misc: 1784out_misc:
1785 ping_proc_exit();
1786out_ping:
1748 udp4_proc_exit(); 1787 udp4_proc_exit();
1749out_udp: 1788out_udp:
1750 tcp4_proc_exit(); 1789 tcp4_proc_exit();
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index 4286fd3cc0e2..c1f4154552fc 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -73,7 +73,7 @@ static inline struct scatterlist *ah_req_sg(struct crypto_ahash *ahash,
73 * into IP header for icv calculation. Options are already checked 73 * into IP header for icv calculation. Options are already checked
74 * for validity, so paranoia is not required. */ 74 * for validity, so paranoia is not required. */
75 75
76static int ip_clear_mutable_options(struct iphdr *iph, __be32 *daddr) 76static int ip_clear_mutable_options(const struct iphdr *iph, __be32 *daddr)
77{ 77{
78 unsigned char * optptr = (unsigned char*)(iph+1); 78 unsigned char * optptr = (unsigned char*)(iph+1);
79 int l = iph->ihl*4 - sizeof(struct iphdr); 79 int l = iph->ihl*4 - sizeof(struct iphdr);
@@ -396,7 +396,7 @@ out:
396static void ah4_err(struct sk_buff *skb, u32 info) 396static void ah4_err(struct sk_buff *skb, u32 info)
397{ 397{
398 struct net *net = dev_net(skb->dev); 398 struct net *net = dev_net(skb->dev);
399 struct iphdr *iph = (struct iphdr *)skb->data; 399 const struct iphdr *iph = (const struct iphdr *)skb->data;
400 struct ip_auth_hdr *ah = (struct ip_auth_hdr *)(skb->data+(iph->ihl<<2)); 400 struct ip_auth_hdr *ah = (struct ip_auth_hdr *)(skb->data+(iph->ihl<<2));
401 struct xfrm_state *x; 401 struct xfrm_state *x;
402 402
@@ -404,7 +404,8 @@ static void ah4_err(struct sk_buff *skb, u32 info)
404 icmp_hdr(skb)->code != ICMP_FRAG_NEEDED) 404 icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
405 return; 405 return;
406 406
407 x = xfrm_state_lookup(net, skb->mark, (xfrm_address_t *)&iph->daddr, ah->spi, IPPROTO_AH, AF_INET); 407 x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
408 ah->spi, IPPROTO_AH, AF_INET);
408 if (!x) 409 if (!x)
409 return; 410 return;
410 printk(KERN_DEBUG "pmtu discovery on SA AH/%08x/%08x\n", 411 printk(KERN_DEBUG "pmtu discovery on SA AH/%08x/%08x\n",
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index a0af7ea87870..2b3c23c287cd 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -1857,6 +1857,11 @@ static int cipso_v4_genopt(unsigned char *buf, u32 buf_len,
1857 return CIPSO_V4_HDR_LEN + ret_val; 1857 return CIPSO_V4_HDR_LEN + ret_val;
1858} 1858}
1859 1859
1860static void opt_kfree_rcu(struct rcu_head *head)
1861{
1862 kfree(container_of(head, struct ip_options_rcu, rcu));
1863}
1864
1860/** 1865/**
1861 * cipso_v4_sock_setattr - Add a CIPSO option to a socket 1866 * cipso_v4_sock_setattr - Add a CIPSO option to a socket
1862 * @sk: the socket 1867 * @sk: the socket
@@ -1879,7 +1884,7 @@ int cipso_v4_sock_setattr(struct sock *sk,
1879 unsigned char *buf = NULL; 1884 unsigned char *buf = NULL;
1880 u32 buf_len; 1885 u32 buf_len;
1881 u32 opt_len; 1886 u32 opt_len;
1882 struct ip_options *opt = NULL; 1887 struct ip_options_rcu *old, *opt = NULL;
1883 struct inet_sock *sk_inet; 1888 struct inet_sock *sk_inet;
1884 struct inet_connection_sock *sk_conn; 1889 struct inet_connection_sock *sk_conn;
1885 1890
@@ -1915,22 +1920,25 @@ int cipso_v4_sock_setattr(struct sock *sk,
1915 ret_val = -ENOMEM; 1920 ret_val = -ENOMEM;
1916 goto socket_setattr_failure; 1921 goto socket_setattr_failure;
1917 } 1922 }
1918 memcpy(opt->__data, buf, buf_len); 1923 memcpy(opt->opt.__data, buf, buf_len);
1919 opt->optlen = opt_len; 1924 opt->opt.optlen = opt_len;
1920 opt->cipso = sizeof(struct iphdr); 1925 opt->opt.cipso = sizeof(struct iphdr);
1921 kfree(buf); 1926 kfree(buf);
1922 buf = NULL; 1927 buf = NULL;
1923 1928
1924 sk_inet = inet_sk(sk); 1929 sk_inet = inet_sk(sk);
1930
1931 old = rcu_dereference_protected(sk_inet->inet_opt, sock_owned_by_user(sk));
1925 if (sk_inet->is_icsk) { 1932 if (sk_inet->is_icsk) {
1926 sk_conn = inet_csk(sk); 1933 sk_conn = inet_csk(sk);
1927 if (sk_inet->opt) 1934 if (old)
1928 sk_conn->icsk_ext_hdr_len -= sk_inet->opt->optlen; 1935 sk_conn->icsk_ext_hdr_len -= old->opt.optlen;
1929 sk_conn->icsk_ext_hdr_len += opt->optlen; 1936 sk_conn->icsk_ext_hdr_len += opt->opt.optlen;
1930 sk_conn->icsk_sync_mss(sk, sk_conn->icsk_pmtu_cookie); 1937 sk_conn->icsk_sync_mss(sk, sk_conn->icsk_pmtu_cookie);
1931 } 1938 }
1932 opt = xchg(&sk_inet->opt, opt); 1939 rcu_assign_pointer(sk_inet->inet_opt, opt);
1933 kfree(opt); 1940 if (old)
1941 call_rcu(&old->rcu, opt_kfree_rcu);
1934 1942
1935 return 0; 1943 return 0;
1936 1944
@@ -1960,7 +1968,7 @@ int cipso_v4_req_setattr(struct request_sock *req,
1960 unsigned char *buf = NULL; 1968 unsigned char *buf = NULL;
1961 u32 buf_len; 1969 u32 buf_len;
1962 u32 opt_len; 1970 u32 opt_len;
1963 struct ip_options *opt = NULL; 1971 struct ip_options_rcu *opt = NULL;
1964 struct inet_request_sock *req_inet; 1972 struct inet_request_sock *req_inet;
1965 1973
1966 /* We allocate the maximum CIPSO option size here so we are probably 1974 /* We allocate the maximum CIPSO option size here so we are probably
@@ -1988,15 +1996,16 @@ int cipso_v4_req_setattr(struct request_sock *req,
1988 ret_val = -ENOMEM; 1996 ret_val = -ENOMEM;
1989 goto req_setattr_failure; 1997 goto req_setattr_failure;
1990 } 1998 }
1991 memcpy(opt->__data, buf, buf_len); 1999 memcpy(opt->opt.__data, buf, buf_len);
1992 opt->optlen = opt_len; 2000 opt->opt.optlen = opt_len;
1993 opt->cipso = sizeof(struct iphdr); 2001 opt->opt.cipso = sizeof(struct iphdr);
1994 kfree(buf); 2002 kfree(buf);
1995 buf = NULL; 2003 buf = NULL;
1996 2004
1997 req_inet = inet_rsk(req); 2005 req_inet = inet_rsk(req);
1998 opt = xchg(&req_inet->opt, opt); 2006 opt = xchg(&req_inet->opt, opt);
1999 kfree(opt); 2007 if (opt)
2008 call_rcu(&opt->rcu, opt_kfree_rcu);
2000 2009
2001 return 0; 2010 return 0;
2002 2011
@@ -2016,34 +2025,34 @@ req_setattr_failure:
2016 * values on failure. 2025 * values on failure.
2017 * 2026 *
2018 */ 2027 */
2019static int cipso_v4_delopt(struct ip_options **opt_ptr) 2028static int cipso_v4_delopt(struct ip_options_rcu **opt_ptr)
2020{ 2029{
2021 int hdr_delta = 0; 2030 int hdr_delta = 0;
2022 struct ip_options *opt = *opt_ptr; 2031 struct ip_options_rcu *opt = *opt_ptr;
2023 2032
2024 if (opt->srr || opt->rr || opt->ts || opt->router_alert) { 2033 if (opt->opt.srr || opt->opt.rr || opt->opt.ts || opt->opt.router_alert) {
2025 u8 cipso_len; 2034 u8 cipso_len;
2026 u8 cipso_off; 2035 u8 cipso_off;
2027 unsigned char *cipso_ptr; 2036 unsigned char *cipso_ptr;
2028 int iter; 2037 int iter;
2029 int optlen_new; 2038 int optlen_new;
2030 2039
2031 cipso_off = opt->cipso - sizeof(struct iphdr); 2040 cipso_off = opt->opt.cipso - sizeof(struct iphdr);
2032 cipso_ptr = &opt->__data[cipso_off]; 2041 cipso_ptr = &opt->opt.__data[cipso_off];
2033 cipso_len = cipso_ptr[1]; 2042 cipso_len = cipso_ptr[1];
2034 2043
2035 if (opt->srr > opt->cipso) 2044 if (opt->opt.srr > opt->opt.cipso)
2036 opt->srr -= cipso_len; 2045 opt->opt.srr -= cipso_len;
2037 if (opt->rr > opt->cipso) 2046 if (opt->opt.rr > opt->opt.cipso)
2038 opt->rr -= cipso_len; 2047 opt->opt.rr -= cipso_len;
2039 if (opt->ts > opt->cipso) 2048 if (opt->opt.ts > opt->opt.cipso)
2040 opt->ts -= cipso_len; 2049 opt->opt.ts -= cipso_len;
2041 if (opt->router_alert > opt->cipso) 2050 if (opt->opt.router_alert > opt->opt.cipso)
2042 opt->router_alert -= cipso_len; 2051 opt->opt.router_alert -= cipso_len;
2043 opt->cipso = 0; 2052 opt->opt.cipso = 0;
2044 2053
2045 memmove(cipso_ptr, cipso_ptr + cipso_len, 2054 memmove(cipso_ptr, cipso_ptr + cipso_len,
2046 opt->optlen - cipso_off - cipso_len); 2055 opt->opt.optlen - cipso_off - cipso_len);
2047 2056
2048 /* determining the new total option length is tricky because of 2057 /* determining the new total option length is tricky because of
2049 * the padding necessary, the only thing i can think to do at 2058 * the padding necessary, the only thing i can think to do at
@@ -2052,21 +2061,21 @@ static int cipso_v4_delopt(struct ip_options **opt_ptr)
2052 * from there we can determine the new total option length */ 2061 * from there we can determine the new total option length */
2053 iter = 0; 2062 iter = 0;
2054 optlen_new = 0; 2063 optlen_new = 0;
2055 while (iter < opt->optlen) 2064 while (iter < opt->opt.optlen)
2056 if (opt->__data[iter] != IPOPT_NOP) { 2065 if (opt->opt.__data[iter] != IPOPT_NOP) {
2057 iter += opt->__data[iter + 1]; 2066 iter += opt->opt.__data[iter + 1];
2058 optlen_new = iter; 2067 optlen_new = iter;
2059 } else 2068 } else
2060 iter++; 2069 iter++;
2061 hdr_delta = opt->optlen; 2070 hdr_delta = opt->opt.optlen;
2062 opt->optlen = (optlen_new + 3) & ~3; 2071 opt->opt.optlen = (optlen_new + 3) & ~3;
2063 hdr_delta -= opt->optlen; 2072 hdr_delta -= opt->opt.optlen;
2064 } else { 2073 } else {
2065 /* only the cipso option was present on the socket so we can 2074 /* only the cipso option was present on the socket so we can
2066 * remove the entire option struct */ 2075 * remove the entire option struct */
2067 *opt_ptr = NULL; 2076 *opt_ptr = NULL;
2068 hdr_delta = opt->optlen; 2077 hdr_delta = opt->opt.optlen;
2069 kfree(opt); 2078 call_rcu(&opt->rcu, opt_kfree_rcu);
2070 } 2079 }
2071 2080
2072 return hdr_delta; 2081 return hdr_delta;
@@ -2083,15 +2092,15 @@ static int cipso_v4_delopt(struct ip_options **opt_ptr)
2083void cipso_v4_sock_delattr(struct sock *sk) 2092void cipso_v4_sock_delattr(struct sock *sk)
2084{ 2093{
2085 int hdr_delta; 2094 int hdr_delta;
2086 struct ip_options *opt; 2095 struct ip_options_rcu *opt;
2087 struct inet_sock *sk_inet; 2096 struct inet_sock *sk_inet;
2088 2097
2089 sk_inet = inet_sk(sk); 2098 sk_inet = inet_sk(sk);
2090 opt = sk_inet->opt; 2099 opt = rcu_dereference_protected(sk_inet->inet_opt, 1);
2091 if (opt == NULL || opt->cipso == 0) 2100 if (opt == NULL || opt->opt.cipso == 0)
2092 return; 2101 return;
2093 2102
2094 hdr_delta = cipso_v4_delopt(&sk_inet->opt); 2103 hdr_delta = cipso_v4_delopt(&sk_inet->inet_opt);
2095 if (sk_inet->is_icsk && hdr_delta > 0) { 2104 if (sk_inet->is_icsk && hdr_delta > 0) {
2096 struct inet_connection_sock *sk_conn = inet_csk(sk); 2105 struct inet_connection_sock *sk_conn = inet_csk(sk);
2097 sk_conn->icsk_ext_hdr_len -= hdr_delta; 2106 sk_conn->icsk_ext_hdr_len -= hdr_delta;
@@ -2109,12 +2118,12 @@ void cipso_v4_sock_delattr(struct sock *sk)
2109 */ 2118 */
2110void cipso_v4_req_delattr(struct request_sock *req) 2119void cipso_v4_req_delattr(struct request_sock *req)
2111{ 2120{
2112 struct ip_options *opt; 2121 struct ip_options_rcu *opt;
2113 struct inet_request_sock *req_inet; 2122 struct inet_request_sock *req_inet;
2114 2123
2115 req_inet = inet_rsk(req); 2124 req_inet = inet_rsk(req);
2116 opt = req_inet->opt; 2125 opt = req_inet->opt;
2117 if (opt == NULL || opt->cipso == 0) 2126 if (opt == NULL || opt->opt.cipso == 0)
2118 return; 2127 return;
2119 2128
2120 cipso_v4_delopt(&req_inet->opt); 2129 cipso_v4_delopt(&req_inet->opt);
@@ -2184,14 +2193,18 @@ getattr_return:
2184 */ 2193 */
2185int cipso_v4_sock_getattr(struct sock *sk, struct netlbl_lsm_secattr *secattr) 2194int cipso_v4_sock_getattr(struct sock *sk, struct netlbl_lsm_secattr *secattr)
2186{ 2195{
2187 struct ip_options *opt; 2196 struct ip_options_rcu *opt;
2197 int res = -ENOMSG;
2188 2198
2189 opt = inet_sk(sk)->opt; 2199 rcu_read_lock();
2190 if (opt == NULL || opt->cipso == 0) 2200 opt = rcu_dereference(inet_sk(sk)->inet_opt);
2191 return -ENOMSG; 2201 if (opt && opt->opt.cipso)
2192 2202 res = cipso_v4_getattr(opt->opt.__data +
2193 return cipso_v4_getattr(opt->__data + opt->cipso - sizeof(struct iphdr), 2203 opt->opt.cipso -
2194 secattr); 2204 sizeof(struct iphdr),
2205 secattr);
2206 rcu_read_unlock();
2207 return res;
2195} 2208}
2196 2209
2197/** 2210/**
diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c
index 85bd24ca4f6d..424fafbc8cb0 100644
--- a/net/ipv4/datagram.c
+++ b/net/ipv4/datagram.c
@@ -24,6 +24,7 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
24{ 24{
25 struct inet_sock *inet = inet_sk(sk); 25 struct inet_sock *inet = inet_sk(sk);
26 struct sockaddr_in *usin = (struct sockaddr_in *) uaddr; 26 struct sockaddr_in *usin = (struct sockaddr_in *) uaddr;
27 struct flowi4 *fl4;
27 struct rtable *rt; 28 struct rtable *rt;
28 __be32 saddr; 29 __be32 saddr;
29 int oif; 30 int oif;
@@ -38,6 +39,8 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
38 39
39 sk_dst_reset(sk); 40 sk_dst_reset(sk);
40 41
42 lock_sock(sk);
43
41 oif = sk->sk_bound_dev_if; 44 oif = sk->sk_bound_dev_if;
42 saddr = inet->inet_saddr; 45 saddr = inet->inet_saddr;
43 if (ipv4_is_multicast(usin->sin_addr.s_addr)) { 46 if (ipv4_is_multicast(usin->sin_addr.s_addr)) {
@@ -46,7 +49,8 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
46 if (!saddr) 49 if (!saddr)
47 saddr = inet->mc_addr; 50 saddr = inet->mc_addr;
48 } 51 }
49 rt = ip_route_connect(usin->sin_addr.s_addr, saddr, 52 fl4 = &inet->cork.fl.u.ip4;
53 rt = ip_route_connect(fl4, usin->sin_addr.s_addr, saddr,
50 RT_CONN_FLAGS(sk), oif, 54 RT_CONN_FLAGS(sk), oif,
51 sk->sk_protocol, 55 sk->sk_protocol,
52 inet->inet_sport, usin->sin_port, sk, true); 56 inet->inet_sport, usin->sin_port, sk, true);
@@ -54,26 +58,30 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
54 err = PTR_ERR(rt); 58 err = PTR_ERR(rt);
55 if (err == -ENETUNREACH) 59 if (err == -ENETUNREACH)
56 IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES); 60 IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
57 return err; 61 goto out;
58 } 62 }
59 63
60 if ((rt->rt_flags & RTCF_BROADCAST) && !sock_flag(sk, SOCK_BROADCAST)) { 64 if ((rt->rt_flags & RTCF_BROADCAST) && !sock_flag(sk, SOCK_BROADCAST)) {
61 ip_rt_put(rt); 65 ip_rt_put(rt);
62 return -EACCES; 66 err = -EACCES;
67 goto out;
63 } 68 }
64 if (!inet->inet_saddr) 69 if (!inet->inet_saddr)
65 inet->inet_saddr = rt->rt_src; /* Update source address */ 70 inet->inet_saddr = fl4->saddr; /* Update source address */
66 if (!inet->inet_rcv_saddr) { 71 if (!inet->inet_rcv_saddr) {
67 inet->inet_rcv_saddr = rt->rt_src; 72 inet->inet_rcv_saddr = fl4->saddr;
68 if (sk->sk_prot->rehash) 73 if (sk->sk_prot->rehash)
69 sk->sk_prot->rehash(sk); 74 sk->sk_prot->rehash(sk);
70 } 75 }
71 inet->inet_daddr = rt->rt_dst; 76 inet->inet_daddr = fl4->daddr;
72 inet->inet_dport = usin->sin_port; 77 inet->inet_dport = usin->sin_port;
73 sk->sk_state = TCP_ESTABLISHED; 78 sk->sk_state = TCP_ESTABLISHED;
74 inet->inet_id = jiffies; 79 inet->inet_id = jiffies;
75 80
76 sk_dst_set(sk, &rt->dst); 81 sk_dst_set(sk, &rt->dst);
77 return 0; 82 err = 0;
83out:
84 release_sock(sk);
85 return err;
78} 86}
79EXPORT_SYMBOL(ip4_datagram_connect); 87EXPORT_SYMBOL(ip4_datagram_connect);
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index cd9ca0811cfa..0d4a184af16f 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1369,7 +1369,7 @@ errout:
1369 1369
1370static size_t inet_get_link_af_size(const struct net_device *dev) 1370static size_t inet_get_link_af_size(const struct net_device *dev)
1371{ 1371{
1372 struct in_device *in_dev = __in_dev_get_rtnl(dev); 1372 struct in_device *in_dev = rcu_dereference_rtnl(dev->ip_ptr);
1373 1373
1374 if (!in_dev) 1374 if (!in_dev)
1375 return 0; 1375 return 0;
@@ -1379,7 +1379,7 @@ static size_t inet_get_link_af_size(const struct net_device *dev)
1379 1379
1380static int inet_fill_link_af(struct sk_buff *skb, const struct net_device *dev) 1380static int inet_fill_link_af(struct sk_buff *skb, const struct net_device *dev)
1381{ 1381{
1382 struct in_device *in_dev = __in_dev_get_rtnl(dev); 1382 struct in_device *in_dev = rcu_dereference_rtnl(dev->ip_ptr);
1383 struct nlattr *nla; 1383 struct nlattr *nla;
1384 int i; 1384 int i;
1385 1385
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 03f994bcf7de..a5b413416da3 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -276,7 +276,7 @@ error:
276 276
277static int esp_input_done2(struct sk_buff *skb, int err) 277static int esp_input_done2(struct sk_buff *skb, int err)
278{ 278{
279 struct iphdr *iph; 279 const struct iphdr *iph;
280 struct xfrm_state *x = xfrm_input_state(skb); 280 struct xfrm_state *x = xfrm_input_state(skb);
281 struct esp_data *esp = x->data; 281 struct esp_data *esp = x->data;
282 struct crypto_aead *aead = esp->aead; 282 struct crypto_aead *aead = esp->aead;
@@ -484,7 +484,7 @@ static u32 esp4_get_mtu(struct xfrm_state *x, int mtu)
484static void esp4_err(struct sk_buff *skb, u32 info) 484static void esp4_err(struct sk_buff *skb, u32 info)
485{ 485{
486 struct net *net = dev_net(skb->dev); 486 struct net *net = dev_net(skb->dev);
487 struct iphdr *iph = (struct iphdr *)skb->data; 487 const struct iphdr *iph = (const struct iphdr *)skb->data;
488 struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2)); 488 struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2));
489 struct xfrm_state *x; 489 struct xfrm_state *x;
490 490
@@ -492,7 +492,8 @@ static void esp4_err(struct sk_buff *skb, u32 info)
492 icmp_hdr(skb)->code != ICMP_FRAG_NEEDED) 492 icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
493 return; 493 return;
494 494
495 x = xfrm_state_lookup(net, skb->mark, (xfrm_address_t *)&iph->daddr, esph->spi, IPPROTO_ESP, AF_INET); 495 x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
496 esph->spi, IPPROTO_ESP, AF_INET);
496 if (!x) 497 if (!x)
497 return; 498 return;
498 NETDEBUG(KERN_DEBUG "pmtu discovery on SA ESP/%08x/%08x\n", 499 NETDEBUG(KERN_DEBUG "pmtu discovery on SA ESP/%08x/%08x\n",
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 451088330bbb..22524716fe70 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -44,6 +44,7 @@
44#include <net/arp.h> 44#include <net/arp.h>
45#include <net/ip_fib.h> 45#include <net/ip_fib.h>
46#include <net/rtnetlink.h> 46#include <net/rtnetlink.h>
47#include <net/xfrm.h>
47 48
48#ifndef CONFIG_IP_MULTIPLE_TABLES 49#ifndef CONFIG_IP_MULTIPLE_TABLES
49 50
@@ -188,9 +189,9 @@ EXPORT_SYMBOL(inet_dev_addr_type);
188 * - check, that packet arrived from expected physical interface. 189 * - check, that packet arrived from expected physical interface.
189 * called with rcu_read_lock() 190 * called with rcu_read_lock()
190 */ 191 */
191int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif, 192int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, u8 tos,
192 struct net_device *dev, __be32 *spec_dst, 193 int oif, struct net_device *dev, __be32 *spec_dst,
193 u32 *itag, u32 mark) 194 u32 *itag)
194{ 195{
195 struct in_device *in_dev; 196 struct in_device *in_dev;
196 struct flowi4 fl4; 197 struct flowi4 fl4;
@@ -202,7 +203,6 @@ int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif,
202 203
203 fl4.flowi4_oif = 0; 204 fl4.flowi4_oif = 0;
204 fl4.flowi4_iif = oif; 205 fl4.flowi4_iif = oif;
205 fl4.flowi4_mark = mark;
206 fl4.daddr = src; 206 fl4.daddr = src;
207 fl4.saddr = dst; 207 fl4.saddr = dst;
208 fl4.flowi4_tos = tos; 208 fl4.flowi4_tos = tos;
@@ -212,10 +212,12 @@ int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif,
212 in_dev = __in_dev_get_rcu(dev); 212 in_dev = __in_dev_get_rcu(dev);
213 if (in_dev) { 213 if (in_dev) {
214 no_addr = in_dev->ifa_list == NULL; 214 no_addr = in_dev->ifa_list == NULL;
215 rpf = IN_DEV_RPFILTER(in_dev); 215
216 /* Ignore rp_filter for packets protected by IPsec. */
217 rpf = secpath_exists(skb) ? 0 : IN_DEV_RPFILTER(in_dev);
218
216 accept_local = IN_DEV_ACCEPT_LOCAL(in_dev); 219 accept_local = IN_DEV_ACCEPT_LOCAL(in_dev);
217 if (mark && !IN_DEV_SRC_VMARK(in_dev)) 220 fl4.flowi4_mark = IN_DEV_SRC_VMARK(in_dev) ? skb->mark : 0;
218 fl4.flowi4_mark = 0;
219 } 221 }
220 222
221 if (in_dev == NULL) 223 if (in_dev == NULL)
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 641a5a2a9f9c..33e2c35b74b7 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -141,18 +141,8 @@ const struct fib_prop fib_props[RTN_MAX + 1] = {
141 }, 141 },
142}; 142};
143 143
144
145/* Release a nexthop info record */ 144/* Release a nexthop info record */
146 145
147static void free_fib_info_rcu(struct rcu_head *head)
148{
149 struct fib_info *fi = container_of(head, struct fib_info, rcu);
150
151 if (fi->fib_metrics != (u32 *) dst_default_metrics)
152 kfree(fi->fib_metrics);
153 kfree(fi);
154}
155
156void free_fib_info(struct fib_info *fi) 146void free_fib_info(struct fib_info *fi)
157{ 147{
158 if (fi->fib_dead == 0) { 148 if (fi->fib_dead == 0) {
@@ -166,7 +156,7 @@ void free_fib_info(struct fib_info *fi)
166 } endfor_nexthops(fi); 156 } endfor_nexthops(fi);
167 fib_info_cnt--; 157 fib_info_cnt--;
168 release_net(fi->fib_net); 158 release_net(fi->fib_net);
169 call_rcu(&fi->rcu, free_fib_info_rcu); 159 kfree_rcu(fi, rcu);
170} 160}
171 161
172void fib_release_info(struct fib_info *fi) 162void fib_release_info(struct fib_info *fi)
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 5fe9b8b41df3..58c25ea5a5c1 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -72,6 +72,7 @@
72#include <linux/init.h> 72#include <linux/init.h>
73#include <linux/list.h> 73#include <linux/list.h>
74#include <linux/slab.h> 74#include <linux/slab.h>
75#include <linux/prefetch.h>
75#include <net/net_namespace.h> 76#include <net/net_namespace.h>
76#include <net/ip.h> 77#include <net/ip.h>
77#include <net/protocol.h> 78#include <net/protocol.h>
@@ -126,7 +127,7 @@ struct tnode {
126 struct work_struct work; 127 struct work_struct work;
127 struct tnode *tnode_free; 128 struct tnode *tnode_free;
128 }; 129 };
129 struct rt_trie_node *child[0]; 130 struct rt_trie_node __rcu *child[0];
130}; 131};
131 132
132#ifdef CONFIG_IP_FIB_TRIE_STATS 133#ifdef CONFIG_IP_FIB_TRIE_STATS
@@ -151,7 +152,7 @@ struct trie_stat {
151}; 152};
152 153
153struct trie { 154struct trie {
154 struct rt_trie_node *trie; 155 struct rt_trie_node __rcu *trie;
155#ifdef CONFIG_IP_FIB_TRIE_STATS 156#ifdef CONFIG_IP_FIB_TRIE_STATS
156 struct trie_use_stats stats; 157 struct trie_use_stats stats;
157#endif 158#endif
@@ -177,16 +178,29 @@ static const int sync_pages = 128;
177static struct kmem_cache *fn_alias_kmem __read_mostly; 178static struct kmem_cache *fn_alias_kmem __read_mostly;
178static struct kmem_cache *trie_leaf_kmem __read_mostly; 179static struct kmem_cache *trie_leaf_kmem __read_mostly;
179 180
180static inline struct tnode *node_parent(struct rt_trie_node *node) 181/*
182 * caller must hold RTNL
183 */
184static inline struct tnode *node_parent(const struct rt_trie_node *node)
181{ 185{
182 return (struct tnode *)(node->parent & ~NODE_TYPE_MASK); 186 unsigned long parent;
187
188 parent = rcu_dereference_index_check(node->parent, lockdep_rtnl_is_held());
189
190 return (struct tnode *)(parent & ~NODE_TYPE_MASK);
183} 191}
184 192
185static inline struct tnode *node_parent_rcu(struct rt_trie_node *node) 193/*
194 * caller must hold RCU read lock or RTNL
195 */
196static inline struct tnode *node_parent_rcu(const struct rt_trie_node *node)
186{ 197{
187 struct tnode *ret = node_parent(node); 198 unsigned long parent;
188 199
189 return rcu_dereference_rtnl(ret); 200 parent = rcu_dereference_index_check(node->parent, rcu_read_lock_held() ||
201 lockdep_rtnl_is_held());
202
203 return (struct tnode *)(parent & ~NODE_TYPE_MASK);
190} 204}
191 205
192/* Same as rcu_assign_pointer 206/* Same as rcu_assign_pointer
@@ -198,18 +212,24 @@ static inline void node_set_parent(struct rt_trie_node *node, struct tnode *ptr)
198 node->parent = (unsigned long)ptr | NODE_TYPE(node); 212 node->parent = (unsigned long)ptr | NODE_TYPE(node);
199} 213}
200 214
201static inline struct rt_trie_node *tnode_get_child(struct tnode *tn, unsigned int i) 215/*
216 * caller must hold RTNL
217 */
218static inline struct rt_trie_node *tnode_get_child(const struct tnode *tn, unsigned int i)
202{ 219{
203 BUG_ON(i >= 1U << tn->bits); 220 BUG_ON(i >= 1U << tn->bits);
204 221
205 return tn->child[i]; 222 return rtnl_dereference(tn->child[i]);
206} 223}
207 224
208static inline struct rt_trie_node *tnode_get_child_rcu(struct tnode *tn, unsigned int i) 225/*
226 * caller must hold RCU read lock or RTNL
227 */
228static inline struct rt_trie_node *tnode_get_child_rcu(const struct tnode *tn, unsigned int i)
209{ 229{
210 struct rt_trie_node *ret = tnode_get_child(tn, i); 230 BUG_ON(i >= 1U << tn->bits);
211 231
212 return rcu_dereference_rtnl(ret); 232 return rcu_dereference_rtnl(tn->child[i]);
213} 233}
214 234
215static inline int tnode_child_length(const struct tnode *tn) 235static inline int tnode_child_length(const struct tnode *tn)
@@ -350,14 +370,9 @@ static inline void free_leaf(struct leaf *l)
350 call_rcu_bh(&l->rcu, __leaf_free_rcu); 370 call_rcu_bh(&l->rcu, __leaf_free_rcu);
351} 371}
352 372
353static void __leaf_info_free_rcu(struct rcu_head *head)
354{
355 kfree(container_of(head, struct leaf_info, rcu));
356}
357
358static inline void free_leaf_info(struct leaf_info *leaf) 373static inline void free_leaf_info(struct leaf_info *leaf)
359{ 374{
360 call_rcu(&leaf->rcu, __leaf_info_free_rcu); 375 kfree_rcu(leaf, rcu);
361} 376}
362 377
363static struct tnode *tnode_alloc(size_t size) 378static struct tnode *tnode_alloc(size_t size)
@@ -487,7 +502,7 @@ static inline void put_child(struct trie *t, struct tnode *tn, int i,
487static void tnode_put_child_reorg(struct tnode *tn, int i, struct rt_trie_node *n, 502static void tnode_put_child_reorg(struct tnode *tn, int i, struct rt_trie_node *n,
488 int wasfull) 503 int wasfull)
489{ 504{
490 struct rt_trie_node *chi = tn->child[i]; 505 struct rt_trie_node *chi = rtnl_dereference(tn->child[i]);
491 int isfull; 506 int isfull;
492 507
493 BUG_ON(i >= 1<<tn->bits); 508 BUG_ON(i >= 1<<tn->bits);
@@ -665,7 +680,7 @@ one_child:
665 for (i = 0; i < tnode_child_length(tn); i++) { 680 for (i = 0; i < tnode_child_length(tn); i++) {
666 struct rt_trie_node *n; 681 struct rt_trie_node *n;
667 682
668 n = tn->child[i]; 683 n = rtnl_dereference(tn->child[i]);
669 if (!n) 684 if (!n)
670 continue; 685 continue;
671 686
@@ -679,6 +694,20 @@ one_child:
679 return (struct rt_trie_node *) tn; 694 return (struct rt_trie_node *) tn;
680} 695}
681 696
697
698static void tnode_clean_free(struct tnode *tn)
699{
700 int i;
701 struct tnode *tofree;
702
703 for (i = 0; i < tnode_child_length(tn); i++) {
704 tofree = (struct tnode *)rtnl_dereference(tn->child[i]);
705 if (tofree)
706 tnode_free(tofree);
707 }
708 tnode_free(tn);
709}
710
682static struct tnode *inflate(struct trie *t, struct tnode *tn) 711static struct tnode *inflate(struct trie *t, struct tnode *tn)
683{ 712{
684 struct tnode *oldtnode = tn; 713 struct tnode *oldtnode = tn;
@@ -755,8 +784,8 @@ static struct tnode *inflate(struct trie *t, struct tnode *tn)
755 inode = (struct tnode *) node; 784 inode = (struct tnode *) node;
756 785
757 if (inode->bits == 1) { 786 if (inode->bits == 1) {
758 put_child(t, tn, 2*i, inode->child[0]); 787 put_child(t, tn, 2*i, rtnl_dereference(inode->child[0]));
759 put_child(t, tn, 2*i+1, inode->child[1]); 788 put_child(t, tn, 2*i+1, rtnl_dereference(inode->child[1]));
760 789
761 tnode_free_safe(inode); 790 tnode_free_safe(inode);
762 continue; 791 continue;
@@ -797,8 +826,8 @@ static struct tnode *inflate(struct trie *t, struct tnode *tn)
797 826
798 size = tnode_child_length(left); 827 size = tnode_child_length(left);
799 for (j = 0; j < size; j++) { 828 for (j = 0; j < size; j++) {
800 put_child(t, left, j, inode->child[j]); 829 put_child(t, left, j, rtnl_dereference(inode->child[j]));
801 put_child(t, right, j, inode->child[j + size]); 830 put_child(t, right, j, rtnl_dereference(inode->child[j + size]));
802 } 831 }
803 put_child(t, tn, 2*i, resize(t, left)); 832 put_child(t, tn, 2*i, resize(t, left));
804 put_child(t, tn, 2*i+1, resize(t, right)); 833 put_child(t, tn, 2*i+1, resize(t, right));
@@ -808,18 +837,8 @@ static struct tnode *inflate(struct trie *t, struct tnode *tn)
808 tnode_free_safe(oldtnode); 837 tnode_free_safe(oldtnode);
809 return tn; 838 return tn;
810nomem: 839nomem:
811 { 840 tnode_clean_free(tn);
812 int size = tnode_child_length(tn); 841 return ERR_PTR(-ENOMEM);
813 int j;
814
815 for (j = 0; j < size; j++)
816 if (tn->child[j])
817 tnode_free((struct tnode *)tn->child[j]);
818
819 tnode_free(tn);
820
821 return ERR_PTR(-ENOMEM);
822 }
823} 842}
824 843
825static struct tnode *halve(struct trie *t, struct tnode *tn) 844static struct tnode *halve(struct trie *t, struct tnode *tn)
@@ -890,18 +909,8 @@ static struct tnode *halve(struct trie *t, struct tnode *tn)
890 tnode_free_safe(oldtnode); 909 tnode_free_safe(oldtnode);
891 return tn; 910 return tn;
892nomem: 911nomem:
893 { 912 tnode_clean_free(tn);
894 int size = tnode_child_length(tn); 913 return ERR_PTR(-ENOMEM);
895 int j;
896
897 for (j = 0; j < size; j++)
898 if (tn->child[j])
899 tnode_free((struct tnode *)tn->child[j]);
900
901 tnode_free(tn);
902
903 return ERR_PTR(-ENOMEM);
904 }
905} 914}
906 915
907/* readside must use rcu_read_lock currently dump routines 916/* readside must use rcu_read_lock currently dump routines
@@ -1033,7 +1042,7 @@ static struct list_head *fib_insert_node(struct trie *t, u32 key, int plen)
1033 t_key cindex; 1042 t_key cindex;
1034 1043
1035 pos = 0; 1044 pos = 0;
1036 n = t->trie; 1045 n = rtnl_dereference(t->trie);
1037 1046
1038 /* If we point to NULL, stop. Either the tree is empty and we should 1047 /* If we point to NULL, stop. Either the tree is empty and we should
1039 * just put a new leaf in if, or we have reached an empty child slot, 1048 * just put a new leaf in if, or we have reached an empty child slot,
@@ -1319,6 +1328,9 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
1319 } 1328 }
1320 } 1329 }
1321 1330
1331 if (!plen)
1332 tb->tb_num_default++;
1333
1322 list_add_tail_rcu(&new_fa->fa_list, 1334 list_add_tail_rcu(&new_fa->fa_list,
1323 (fa ? &fa->fa_list : fa_head)); 1335 (fa ? &fa->fa_list : fa_head));
1324 1336
@@ -1684,6 +1696,9 @@ int fib_table_delete(struct fib_table *tb, struct fib_config *cfg)
1684 1696
1685 list_del_rcu(&fa->fa_list); 1697 list_del_rcu(&fa->fa_list);
1686 1698
1699 if (!plen)
1700 tb->tb_num_default--;
1701
1687 if (list_empty(fa_head)) { 1702 if (list_empty(fa_head)) {
1688 hlist_del_rcu(&li->hlist); 1703 hlist_del_rcu(&li->hlist);
1689 free_leaf_info(li); 1704 free_leaf_info(li);
@@ -1756,7 +1771,7 @@ static struct leaf *leaf_walk_rcu(struct tnode *p, struct rt_trie_node *c)
1756 continue; 1771 continue;
1757 1772
1758 if (IS_LEAF(c)) { 1773 if (IS_LEAF(c)) {
1759 prefetch(p->child[idx]); 1774 prefetch(rcu_dereference_rtnl(p->child[idx]));
1760 return (struct leaf *) c; 1775 return (struct leaf *) c;
1761 } 1776 }
1762 1777
@@ -1974,6 +1989,7 @@ struct fib_table *fib_trie_table(u32 id)
1974 1989
1975 tb->tb_id = id; 1990 tb->tb_id = id;
1976 tb->tb_default = -1; 1991 tb->tb_default = -1;
1992 tb->tb_num_default = 0;
1977 1993
1978 t = (struct trie *) tb->tb_data; 1994 t = (struct trie *) tb->tb_data;
1979 memset(t, 0, sizeof(*t)); 1995 memset(t, 0, sizeof(*t));
@@ -2269,7 +2285,7 @@ static void *fib_trie_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2269 2285
2270 /* walk rest of this hash chain */ 2286 /* walk rest of this hash chain */
2271 h = tb->tb_id & (FIB_TABLE_HASHSZ - 1); 2287 h = tb->tb_id & (FIB_TABLE_HASHSZ - 1);
2272 while ( (tb_node = rcu_dereference(tb->tb_hlist.next)) ) { 2288 while ((tb_node = rcu_dereference(hlist_next_rcu(&tb->tb_hlist)))) {
2273 tb = hlist_entry(tb_node, struct fib_table, tb_hlist); 2289 tb = hlist_entry(tb_node, struct fib_table, tb_hlist);
2274 n = fib_trie_get_first(iter, (struct trie *) tb->tb_data); 2290 n = fib_trie_get_first(iter, (struct trie *) tb->tb_data);
2275 if (n) 2291 if (n)
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index e5f8a71d3a2a..5395e45dcce6 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -83,6 +83,7 @@
83#include <net/tcp.h> 83#include <net/tcp.h>
84#include <net/udp.h> 84#include <net/udp.h>
85#include <net/raw.h> 85#include <net/raw.h>
86#include <net/ping.h>
86#include <linux/skbuff.h> 87#include <linux/skbuff.h>
87#include <net/sock.h> 88#include <net/sock.h>
88#include <linux/errno.h> 89#include <linux/errno.h>
@@ -108,8 +109,7 @@ struct icmp_bxm {
108 __be32 times[3]; 109 __be32 times[3];
109 } data; 110 } data;
110 int head_len; 111 int head_len;
111 struct ip_options replyopts; 112 struct ip_options_data replyopts;
112 unsigned char optbuf[40];
113}; 113};
114 114
115/* An array of errno for error messages from dest unreach. */ 115/* An array of errno for error messages from dest unreach. */
@@ -234,7 +234,7 @@ static inline void icmp_xmit_unlock(struct sock *sk)
234 */ 234 */
235 235
236static inline bool icmpv4_xrlim_allow(struct net *net, struct rtable *rt, 236static inline bool icmpv4_xrlim_allow(struct net *net, struct rtable *rt,
237 int type, int code) 237 struct flowi4 *fl4, int type, int code)
238{ 238{
239 struct dst_entry *dst = &rt->dst; 239 struct dst_entry *dst = &rt->dst;
240 bool rc = true; 240 bool rc = true;
@@ -253,7 +253,7 @@ static inline bool icmpv4_xrlim_allow(struct net *net, struct rtable *rt,
253 /* Limit if icmp type is enabled in ratemask. */ 253 /* Limit if icmp type is enabled in ratemask. */
254 if ((1 << type) & net->ipv4.sysctl_icmp_ratemask) { 254 if ((1 << type) & net->ipv4.sysctl_icmp_ratemask) {
255 if (!rt->peer) 255 if (!rt->peer)
256 rt_bind_peer(rt, 1); 256 rt_bind_peer(rt, fl4->daddr, 1);
257 rc = inet_peer_xrlim_allow(rt->peer, 257 rc = inet_peer_xrlim_allow(rt->peer,
258 net->ipv4.sysctl_icmp_ratelimit); 258 net->ipv4.sysctl_icmp_ratelimit);
259 } 259 }
@@ -291,13 +291,14 @@ static int icmp_glue_bits(void *from, char *to, int offset, int len, int odd,
291} 291}
292 292
293static void icmp_push_reply(struct icmp_bxm *icmp_param, 293static void icmp_push_reply(struct icmp_bxm *icmp_param,
294 struct flowi4 *fl4,
294 struct ipcm_cookie *ipc, struct rtable **rt) 295 struct ipcm_cookie *ipc, struct rtable **rt)
295{ 296{
296 struct sock *sk; 297 struct sock *sk;
297 struct sk_buff *skb; 298 struct sk_buff *skb;
298 299
299 sk = icmp_sk(dev_net((*rt)->dst.dev)); 300 sk = icmp_sk(dev_net((*rt)->dst.dev));
300 if (ip_append_data(sk, icmp_glue_bits, icmp_param, 301 if (ip_append_data(sk, fl4, icmp_glue_bits, icmp_param,
301 icmp_param->data_len+icmp_param->head_len, 302 icmp_param->data_len+icmp_param->head_len,
302 icmp_param->head_len, 303 icmp_param->head_len,
303 ipc, rt, MSG_DONTWAIT) < 0) { 304 ipc, rt, MSG_DONTWAIT) < 0) {
@@ -316,7 +317,7 @@ static void icmp_push_reply(struct icmp_bxm *icmp_param,
316 icmp_param->head_len, csum); 317 icmp_param->head_len, csum);
317 icmph->checksum = csum_fold(csum); 318 icmph->checksum = csum_fold(csum);
318 skb->ip_summed = CHECKSUM_NONE; 319 skb->ip_summed = CHECKSUM_NONE;
319 ip_push_pending_frames(sk); 320 ip_push_pending_frames(sk, fl4);
320 } 321 }
321} 322}
322 323
@@ -329,11 +330,12 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
329 struct ipcm_cookie ipc; 330 struct ipcm_cookie ipc;
330 struct rtable *rt = skb_rtable(skb); 331 struct rtable *rt = skb_rtable(skb);
331 struct net *net = dev_net(rt->dst.dev); 332 struct net *net = dev_net(rt->dst.dev);
333 struct flowi4 fl4;
332 struct sock *sk; 334 struct sock *sk;
333 struct inet_sock *inet; 335 struct inet_sock *inet;
334 __be32 daddr; 336 __be32 daddr;
335 337
336 if (ip_options_echo(&icmp_param->replyopts, skb)) 338 if (ip_options_echo(&icmp_param->replyopts.opt.opt, skb))
337 return; 339 return;
338 340
339 sk = icmp_xmit_lock(net); 341 sk = icmp_xmit_lock(net);
@@ -344,65 +346,60 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
344 icmp_param->data.icmph.checksum = 0; 346 icmp_param->data.icmph.checksum = 0;
345 347
346 inet->tos = ip_hdr(skb)->tos; 348 inet->tos = ip_hdr(skb)->tos;
347 daddr = ipc.addr = rt->rt_src; 349 daddr = ipc.addr = ip_hdr(skb)->saddr;
348 ipc.opt = NULL; 350 ipc.opt = NULL;
349 ipc.tx_flags = 0; 351 ipc.tx_flags = 0;
350 if (icmp_param->replyopts.optlen) { 352 if (icmp_param->replyopts.opt.opt.optlen) {
351 ipc.opt = &icmp_param->replyopts; 353 ipc.opt = &icmp_param->replyopts.opt;
352 if (ipc.opt->srr) 354 if (ipc.opt->opt.srr)
353 daddr = icmp_param->replyopts.faddr; 355 daddr = icmp_param->replyopts.opt.opt.faddr;
354 } 356 }
355 { 357 memset(&fl4, 0, sizeof(fl4));
356 struct flowi4 fl4 = { 358 fl4.daddr = daddr;
357 .daddr = daddr, 359 fl4.saddr = rt->rt_spec_dst;
358 .saddr = rt->rt_spec_dst, 360 fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos);
359 .flowi4_tos = RT_TOS(ip_hdr(skb)->tos), 361 fl4.flowi4_proto = IPPROTO_ICMP;
360 .flowi4_proto = IPPROTO_ICMP, 362 security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
361 }; 363 rt = ip_route_output_key(net, &fl4);
362 security_skb_classify_flow(skb, flowi4_to_flowi(&fl4)); 364 if (IS_ERR(rt))
363 rt = ip_route_output_key(net, &fl4); 365 goto out_unlock;
364 if (IS_ERR(rt)) 366 if (icmpv4_xrlim_allow(net, rt, &fl4, icmp_param->data.icmph.type,
365 goto out_unlock;
366 }
367 if (icmpv4_xrlim_allow(net, rt, icmp_param->data.icmph.type,
368 icmp_param->data.icmph.code)) 367 icmp_param->data.icmph.code))
369 icmp_push_reply(icmp_param, &ipc, &rt); 368 icmp_push_reply(icmp_param, &fl4, &ipc, &rt);
370 ip_rt_put(rt); 369 ip_rt_put(rt);
371out_unlock: 370out_unlock:
372 icmp_xmit_unlock(sk); 371 icmp_xmit_unlock(sk);
373} 372}
374 373
375static struct rtable *icmp_route_lookup(struct net *net, struct sk_buff *skb_in, 374static struct rtable *icmp_route_lookup(struct net *net,
376 struct iphdr *iph, 375 struct flowi4 *fl4,
376 struct sk_buff *skb_in,
377 const struct iphdr *iph,
377 __be32 saddr, u8 tos, 378 __be32 saddr, u8 tos,
378 int type, int code, 379 int type, int code,
379 struct icmp_bxm *param) 380 struct icmp_bxm *param)
380{ 381{
381 struct flowi4 fl4 = {
382 .daddr = (param->replyopts.srr ?
383 param->replyopts.faddr : iph->saddr),
384 .saddr = saddr,
385 .flowi4_tos = RT_TOS(tos),
386 .flowi4_proto = IPPROTO_ICMP,
387 .fl4_icmp_type = type,
388 .fl4_icmp_code = code,
389 };
390 struct rtable *rt, *rt2; 382 struct rtable *rt, *rt2;
391 int err; 383 int err;
392 384
393 security_skb_classify_flow(skb_in, flowi4_to_flowi(&fl4)); 385 memset(fl4, 0, sizeof(*fl4));
394 rt = __ip_route_output_key(net, &fl4); 386 fl4->daddr = (param->replyopts.opt.opt.srr ?
387 param->replyopts.opt.opt.faddr : iph->saddr);
388 fl4->saddr = saddr;
389 fl4->flowi4_tos = RT_TOS(tos);
390 fl4->flowi4_proto = IPPROTO_ICMP;
391 fl4->fl4_icmp_type = type;
392 fl4->fl4_icmp_code = code;
393 security_skb_classify_flow(skb_in, flowi4_to_flowi(fl4));
394 rt = __ip_route_output_key(net, fl4);
395 if (IS_ERR(rt)) 395 if (IS_ERR(rt))
396 return rt; 396 return rt;
397 397
398 /* No need to clone since we're just using its address. */ 398 /* No need to clone since we're just using its address. */
399 rt2 = rt; 399 rt2 = rt;
400 400
401 if (!fl4.saddr)
402 fl4.saddr = rt->rt_src;
403
404 rt = (struct rtable *) xfrm_lookup(net, &rt->dst, 401 rt = (struct rtable *) xfrm_lookup(net, &rt->dst,
405 flowi4_to_flowi(&fl4), NULL, 0); 402 flowi4_to_flowi(fl4), NULL, 0);
406 if (!IS_ERR(rt)) { 403 if (!IS_ERR(rt)) {
407 if (rt != rt2) 404 if (rt != rt2)
408 return rt; 405 return rt;
@@ -411,19 +408,19 @@ static struct rtable *icmp_route_lookup(struct net *net, struct sk_buff *skb_in,
411 } else 408 } else
412 return rt; 409 return rt;
413 410
414 err = xfrm_decode_session_reverse(skb_in, flowi4_to_flowi(&fl4), AF_INET); 411 err = xfrm_decode_session_reverse(skb_in, flowi4_to_flowi(fl4), AF_INET);
415 if (err) 412 if (err)
416 goto relookup_failed; 413 goto relookup_failed;
417 414
418 if (inet_addr_type(net, fl4.saddr) == RTN_LOCAL) { 415 if (inet_addr_type(net, fl4->saddr) == RTN_LOCAL) {
419 rt2 = __ip_route_output_key(net, &fl4); 416 rt2 = __ip_route_output_key(net, fl4);
420 if (IS_ERR(rt2)) 417 if (IS_ERR(rt2))
421 err = PTR_ERR(rt2); 418 err = PTR_ERR(rt2);
422 } else { 419 } else {
423 struct flowi4 fl4_2 = {}; 420 struct flowi4 fl4_2 = {};
424 unsigned long orefdst; 421 unsigned long orefdst;
425 422
426 fl4_2.daddr = fl4.saddr; 423 fl4_2.daddr = fl4->saddr;
427 rt2 = ip_route_output_key(net, &fl4_2); 424 rt2 = ip_route_output_key(net, &fl4_2);
428 if (IS_ERR(rt2)) { 425 if (IS_ERR(rt2)) {
429 err = PTR_ERR(rt2); 426 err = PTR_ERR(rt2);
@@ -431,7 +428,7 @@ static struct rtable *icmp_route_lookup(struct net *net, struct sk_buff *skb_in,
431 } 428 }
432 /* Ugh! */ 429 /* Ugh! */
433 orefdst = skb_in->_skb_refdst; /* save old refdst */ 430 orefdst = skb_in->_skb_refdst; /* save old refdst */
434 err = ip_route_input(skb_in, fl4.daddr, fl4.saddr, 431 err = ip_route_input(skb_in, fl4->daddr, fl4->saddr,
435 RT_TOS(tos), rt2->dst.dev); 432 RT_TOS(tos), rt2->dst.dev);
436 433
437 dst_release(&rt2->dst); 434 dst_release(&rt2->dst);
@@ -443,7 +440,7 @@ static struct rtable *icmp_route_lookup(struct net *net, struct sk_buff *skb_in,
443 goto relookup_failed; 440 goto relookup_failed;
444 441
445 rt2 = (struct rtable *) xfrm_lookup(net, &rt2->dst, 442 rt2 = (struct rtable *) xfrm_lookup(net, &rt2->dst,
446 flowi4_to_flowi(&fl4), NULL, 443 flowi4_to_flowi(fl4), NULL,
447 XFRM_LOOKUP_ICMP); 444 XFRM_LOOKUP_ICMP);
448 if (!IS_ERR(rt2)) { 445 if (!IS_ERR(rt2)) {
449 dst_release(&rt->dst); 446 dst_release(&rt->dst);
@@ -482,6 +479,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
482 struct icmp_bxm icmp_param; 479 struct icmp_bxm icmp_param;
483 struct rtable *rt = skb_rtable(skb_in); 480 struct rtable *rt = skb_rtable(skb_in);
484 struct ipcm_cookie ipc; 481 struct ipcm_cookie ipc;
482 struct flowi4 fl4;
485 __be32 saddr; 483 __be32 saddr;
486 u8 tos; 484 u8 tos;
487 struct net *net; 485 struct net *net;
@@ -581,7 +579,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
581 IPTOS_PREC_INTERNETCONTROL) : 579 IPTOS_PREC_INTERNETCONTROL) :
582 iph->tos; 580 iph->tos;
583 581
584 if (ip_options_echo(&icmp_param.replyopts, skb_in)) 582 if (ip_options_echo(&icmp_param.replyopts.opt.opt, skb_in))
585 goto out_unlock; 583 goto out_unlock;
586 584
587 585
@@ -597,15 +595,15 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
597 icmp_param.offset = skb_network_offset(skb_in); 595 icmp_param.offset = skb_network_offset(skb_in);
598 inet_sk(sk)->tos = tos; 596 inet_sk(sk)->tos = tos;
599 ipc.addr = iph->saddr; 597 ipc.addr = iph->saddr;
600 ipc.opt = &icmp_param.replyopts; 598 ipc.opt = &icmp_param.replyopts.opt;
601 ipc.tx_flags = 0; 599 ipc.tx_flags = 0;
602 600
603 rt = icmp_route_lookup(net, skb_in, iph, saddr, tos, 601 rt = icmp_route_lookup(net, &fl4, skb_in, iph, saddr, tos,
604 type, code, &icmp_param); 602 type, code, &icmp_param);
605 if (IS_ERR(rt)) 603 if (IS_ERR(rt))
606 goto out_unlock; 604 goto out_unlock;
607 605
608 if (!icmpv4_xrlim_allow(net, rt, type, code)) 606 if (!icmpv4_xrlim_allow(net, rt, &fl4, type, code))
609 goto ende; 607 goto ende;
610 608
611 /* RFC says return as much as we can without exceeding 576 bytes. */ 609 /* RFC says return as much as we can without exceeding 576 bytes. */
@@ -613,7 +611,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
613 room = dst_mtu(&rt->dst); 611 room = dst_mtu(&rt->dst);
614 if (room > 576) 612 if (room > 576)
615 room = 576; 613 room = 576;
616 room -= sizeof(struct iphdr) + icmp_param.replyopts.optlen; 614 room -= sizeof(struct iphdr) + icmp_param.replyopts.opt.opt.optlen;
617 room -= sizeof(struct icmphdr); 615 room -= sizeof(struct icmphdr);
618 616
619 icmp_param.data_len = skb_in->len - icmp_param.offset; 617 icmp_param.data_len = skb_in->len - icmp_param.offset;
@@ -621,7 +619,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
621 icmp_param.data_len = room; 619 icmp_param.data_len = room;
622 icmp_param.head_len = sizeof(struct icmphdr); 620 icmp_param.head_len = sizeof(struct icmphdr);
623 621
624 icmp_push_reply(&icmp_param, &ipc, &rt); 622 icmp_push_reply(&icmp_param, &fl4, &ipc, &rt);
625ende: 623ende:
626 ip_rt_put(rt); 624 ip_rt_put(rt);
627out_unlock: 625out_unlock:
@@ -637,7 +635,7 @@ EXPORT_SYMBOL(icmp_send);
637 635
638static void icmp_unreach(struct sk_buff *skb) 636static void icmp_unreach(struct sk_buff *skb)
639{ 637{
640 struct iphdr *iph; 638 const struct iphdr *iph;
641 struct icmphdr *icmph; 639 struct icmphdr *icmph;
642 int hash, protocol; 640 int hash, protocol;
643 const struct net_protocol *ipprot; 641 const struct net_protocol *ipprot;
@@ -656,7 +654,7 @@ static void icmp_unreach(struct sk_buff *skb)
656 goto out_err; 654 goto out_err;
657 655
658 icmph = icmp_hdr(skb); 656 icmph = icmp_hdr(skb);
659 iph = (struct iphdr *)skb->data; 657 iph = (const struct iphdr *)skb->data;
660 658
661 if (iph->ihl < 5) /* Mangled header, drop. */ 659 if (iph->ihl < 5) /* Mangled header, drop. */
662 goto out_err; 660 goto out_err;
@@ -729,7 +727,7 @@ static void icmp_unreach(struct sk_buff *skb)
729 if (!pskb_may_pull(skb, iph->ihl * 4 + 8)) 727 if (!pskb_may_pull(skb, iph->ihl * 4 + 8))
730 goto out; 728 goto out;
731 729
732 iph = (struct iphdr *)skb->data; 730 iph = (const struct iphdr *)skb->data;
733 protocol = iph->protocol; 731 protocol = iph->protocol;
734 732
735 /* 733 /*
@@ -758,7 +756,7 @@ out_err:
758 756
759static void icmp_redirect(struct sk_buff *skb) 757static void icmp_redirect(struct sk_buff *skb)
760{ 758{
761 struct iphdr *iph; 759 const struct iphdr *iph;
762 760
763 if (skb->len < sizeof(struct iphdr)) 761 if (skb->len < sizeof(struct iphdr))
764 goto out_err; 762 goto out_err;
@@ -769,7 +767,7 @@ static void icmp_redirect(struct sk_buff *skb)
769 if (!pskb_may_pull(skb, sizeof(struct iphdr))) 767 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
770 goto out; 768 goto out;
771 769
772 iph = (struct iphdr *)skb->data; 770 iph = (const struct iphdr *)skb->data;
773 771
774 switch (icmp_hdr(skb)->code & 7) { 772 switch (icmp_hdr(skb)->code & 7) {
775 case ICMP_REDIR_NET: 773 case ICMP_REDIR_NET:
@@ -784,6 +782,15 @@ static void icmp_redirect(struct sk_buff *skb)
784 iph->saddr, skb->dev); 782 iph->saddr, skb->dev);
785 break; 783 break;
786 } 784 }
785
786 /* Ping wants to see redirects.
787 * Let's pretend they are errors of sorts... */
788 if (iph->protocol == IPPROTO_ICMP &&
789 iph->ihl >= 5 &&
790 pskb_may_pull(skb, (iph->ihl<<2)+8)) {
791 ping_err(skb, icmp_hdr(skb)->un.gateway);
792 }
793
787out: 794out:
788 return; 795 return;
789out_err: 796out_err:
@@ -933,12 +940,12 @@ static void icmp_address_reply(struct sk_buff *skb)
933 BUG_ON(mp == NULL); 940 BUG_ON(mp == NULL);
934 for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) { 941 for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
935 if (*mp == ifa->ifa_mask && 942 if (*mp == ifa->ifa_mask &&
936 inet_ifa_match(rt->rt_src, ifa)) 943 inet_ifa_match(ip_hdr(skb)->saddr, ifa))
937 break; 944 break;
938 } 945 }
939 if (!ifa && net_ratelimit()) { 946 if (!ifa && net_ratelimit()) {
940 printk(KERN_INFO "Wrong address mask %pI4 from %s/%pI4\n", 947 printk(KERN_INFO "Wrong address mask %pI4 from %s/%pI4\n",
941 mp, dev->name, &rt->rt_src); 948 mp, dev->name, &ip_hdr(skb)->saddr);
942 } 949 }
943 } 950 }
944} 951}
@@ -1044,7 +1051,7 @@ error:
1044 */ 1051 */
1045static const struct icmp_control icmp_pointers[NR_ICMP_TYPES + 1] = { 1052static const struct icmp_control icmp_pointers[NR_ICMP_TYPES + 1] = {
1046 [ICMP_ECHOREPLY] = { 1053 [ICMP_ECHOREPLY] = {
1047 .handler = icmp_discard, 1054 .handler = ping_rcv,
1048 }, 1055 },
1049 [1] = { 1056 [1] = {
1050 .handler = icmp_discard, 1057 .handler = icmp_discard,
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 1fd3d9ce8398..f1d27f6c9351 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -149,17 +149,11 @@ static void ip_mc_clear_src(struct ip_mc_list *pmc);
149static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode, 149static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
150 int sfcount, __be32 *psfsrc, int delta); 150 int sfcount, __be32 *psfsrc, int delta);
151 151
152
153static void ip_mc_list_reclaim(struct rcu_head *head)
154{
155 kfree(container_of(head, struct ip_mc_list, rcu));
156}
157
158static void ip_ma_put(struct ip_mc_list *im) 152static void ip_ma_put(struct ip_mc_list *im)
159{ 153{
160 if (atomic_dec_and_test(&im->refcnt)) { 154 if (atomic_dec_and_test(&im->refcnt)) {
161 in_dev_put(im->interface); 155 in_dev_put(im->interface);
162 call_rcu(&im->rcu, ip_mc_list_reclaim); 156 kfree_rcu(im, rcu);
163 } 157 }
164} 158}
165 159
@@ -309,6 +303,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
309 struct iphdr *pip; 303 struct iphdr *pip;
310 struct igmpv3_report *pig; 304 struct igmpv3_report *pig;
311 struct net *net = dev_net(dev); 305 struct net *net = dev_net(dev);
306 struct flowi4 fl4;
312 307
313 while (1) { 308 while (1) {
314 skb = alloc_skb(size + LL_ALLOCATED_SPACE(dev), 309 skb = alloc_skb(size + LL_ALLOCATED_SPACE(dev),
@@ -321,18 +316,13 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
321 } 316 }
322 igmp_skb_size(skb) = size; 317 igmp_skb_size(skb) = size;
323 318
324 rt = ip_route_output_ports(net, NULL, IGMPV3_ALL_MCR, 0, 319 rt = ip_route_output_ports(net, &fl4, NULL, IGMPV3_ALL_MCR, 0,
325 0, 0, 320 0, 0,
326 IPPROTO_IGMP, 0, dev->ifindex); 321 IPPROTO_IGMP, 0, dev->ifindex);
327 if (IS_ERR(rt)) { 322 if (IS_ERR(rt)) {
328 kfree_skb(skb); 323 kfree_skb(skb);
329 return NULL; 324 return NULL;
330 } 325 }
331 if (rt->rt_src == 0) {
332 kfree_skb(skb);
333 ip_rt_put(rt);
334 return NULL;
335 }
336 326
337 skb_dst_set(skb, &rt->dst); 327 skb_dst_set(skb, &rt->dst);
338 skb->dev = dev; 328 skb->dev = dev;
@@ -348,8 +338,8 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
348 pip->tos = 0xc0; 338 pip->tos = 0xc0;
349 pip->frag_off = htons(IP_DF); 339 pip->frag_off = htons(IP_DF);
350 pip->ttl = 1; 340 pip->ttl = 1;
351 pip->daddr = rt->rt_dst; 341 pip->daddr = fl4.daddr;
352 pip->saddr = rt->rt_src; 342 pip->saddr = fl4.saddr;
353 pip->protocol = IPPROTO_IGMP; 343 pip->protocol = IPPROTO_IGMP;
354 pip->tot_len = 0; /* filled in later */ 344 pip->tot_len = 0; /* filled in later */
355 ip_select_ident(pip, &rt->dst, NULL); 345 ip_select_ident(pip, &rt->dst, NULL);
@@ -655,6 +645,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
655 struct net_device *dev = in_dev->dev; 645 struct net_device *dev = in_dev->dev;
656 struct net *net = dev_net(dev); 646 struct net *net = dev_net(dev);
657 __be32 group = pmc ? pmc->multiaddr : 0; 647 __be32 group = pmc ? pmc->multiaddr : 0;
648 struct flowi4 fl4;
658 __be32 dst; 649 __be32 dst;
659 650
660 if (type == IGMPV3_HOST_MEMBERSHIP_REPORT) 651 if (type == IGMPV3_HOST_MEMBERSHIP_REPORT)
@@ -664,17 +655,12 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
664 else 655 else
665 dst = group; 656 dst = group;
666 657
667 rt = ip_route_output_ports(net, NULL, dst, 0, 658 rt = ip_route_output_ports(net, &fl4, NULL, dst, 0,
668 0, 0, 659 0, 0,
669 IPPROTO_IGMP, 0, dev->ifindex); 660 IPPROTO_IGMP, 0, dev->ifindex);
670 if (IS_ERR(rt)) 661 if (IS_ERR(rt))
671 return -1; 662 return -1;
672 663
673 if (rt->rt_src == 0) {
674 ip_rt_put(rt);
675 return -1;
676 }
677
678 skb = alloc_skb(IGMP_SIZE+LL_ALLOCATED_SPACE(dev), GFP_ATOMIC); 664 skb = alloc_skb(IGMP_SIZE+LL_ALLOCATED_SPACE(dev), GFP_ATOMIC);
679 if (skb == NULL) { 665 if (skb == NULL) {
680 ip_rt_put(rt); 666 ip_rt_put(rt);
@@ -695,7 +681,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
695 iph->frag_off = htons(IP_DF); 681 iph->frag_off = htons(IP_DF);
696 iph->ttl = 1; 682 iph->ttl = 1;
697 iph->daddr = dst; 683 iph->daddr = dst;
698 iph->saddr = rt->rt_src; 684 iph->saddr = fl4.saddr;
699 iph->protocol = IPPROTO_IGMP; 685 iph->protocol = IPPROTO_IGMP;
700 ip_select_ident(iph, &rt->dst, NULL); 686 ip_select_ident(iph, &rt->dst, NULL);
701 ((u8*)&iph[1])[0] = IPOPT_RA; 687 ((u8*)&iph[1])[0] = IPOPT_RA;
@@ -1169,20 +1155,18 @@ static void igmp_group_dropped(struct ip_mc_list *im)
1169 1155
1170 if (!in_dev->dead) { 1156 if (!in_dev->dead) {
1171 if (IGMP_V1_SEEN(in_dev)) 1157 if (IGMP_V1_SEEN(in_dev))
1172 goto done; 1158 return;
1173 if (IGMP_V2_SEEN(in_dev)) { 1159 if (IGMP_V2_SEEN(in_dev)) {
1174 if (reporter) 1160 if (reporter)
1175 igmp_send_report(in_dev, im, IGMP_HOST_LEAVE_MESSAGE); 1161 igmp_send_report(in_dev, im, IGMP_HOST_LEAVE_MESSAGE);
1176 goto done; 1162 return;
1177 } 1163 }
1178 /* IGMPv3 */ 1164 /* IGMPv3 */
1179 igmpv3_add_delrec(in_dev, im); 1165 igmpv3_add_delrec(in_dev, im);
1180 1166
1181 igmp_ifc_event(in_dev); 1167 igmp_ifc_event(in_dev);
1182 } 1168 }
1183done:
1184#endif 1169#endif
1185 ip_mc_clear_src(im);
1186} 1170}
1187 1171
1188static void igmp_group_added(struct ip_mc_list *im) 1172static void igmp_group_added(struct ip_mc_list *im)
@@ -1319,6 +1303,7 @@ void ip_mc_dec_group(struct in_device *in_dev, __be32 addr)
1319 *ip = i->next_rcu; 1303 *ip = i->next_rcu;
1320 in_dev->mc_count--; 1304 in_dev->mc_count--;
1321 igmp_group_dropped(i); 1305 igmp_group_dropped(i);
1306 ip_mc_clear_src(i);
1322 1307
1323 if (!in_dev->dead) 1308 if (!in_dev->dead)
1324 ip_rt_multicast_event(in_dev); 1309 ip_rt_multicast_event(in_dev);
@@ -1428,7 +1413,8 @@ void ip_mc_destroy_dev(struct in_device *in_dev)
1428 in_dev->mc_list = i->next_rcu; 1413 in_dev->mc_list = i->next_rcu;
1429 in_dev->mc_count--; 1414 in_dev->mc_count--;
1430 1415
1431 igmp_group_dropped(i); 1416 /* We've dropped the groups in ip_mc_down already */
1417 ip_mc_clear_src(i);
1432 ip_ma_put(i); 1418 ip_ma_put(i);
1433 } 1419 }
1434} 1420}
@@ -1836,12 +1822,6 @@ done:
1836} 1822}
1837EXPORT_SYMBOL(ip_mc_join_group); 1823EXPORT_SYMBOL(ip_mc_join_group);
1838 1824
1839static void ip_sf_socklist_reclaim(struct rcu_head *rp)
1840{
1841 kfree(container_of(rp, struct ip_sf_socklist, rcu));
1842 /* sk_omem_alloc should have been decreased by the caller*/
1843}
1844
1845static int ip_mc_leave_src(struct sock *sk, struct ip_mc_socklist *iml, 1825static int ip_mc_leave_src(struct sock *sk, struct ip_mc_socklist *iml,
1846 struct in_device *in_dev) 1826 struct in_device *in_dev)
1847{ 1827{
@@ -1858,18 +1838,10 @@ static int ip_mc_leave_src(struct sock *sk, struct ip_mc_socklist *iml,
1858 rcu_assign_pointer(iml->sflist, NULL); 1838 rcu_assign_pointer(iml->sflist, NULL);
1859 /* decrease mem now to avoid the memleak warning */ 1839 /* decrease mem now to avoid the memleak warning */
1860 atomic_sub(IP_SFLSIZE(psf->sl_max), &sk->sk_omem_alloc); 1840 atomic_sub(IP_SFLSIZE(psf->sl_max), &sk->sk_omem_alloc);
1861 call_rcu(&psf->rcu, ip_sf_socklist_reclaim); 1841 kfree_rcu(psf, rcu);
1862 return err; 1842 return err;
1863} 1843}
1864 1844
1865
1866static void ip_mc_socklist_reclaim(struct rcu_head *rp)
1867{
1868 kfree(container_of(rp, struct ip_mc_socklist, rcu));
1869 /* sk_omem_alloc should have been decreased by the caller*/
1870}
1871
1872
1873/* 1845/*
1874 * Ask a socket to leave a group. 1846 * Ask a socket to leave a group.
1875 */ 1847 */
@@ -1909,7 +1881,7 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
1909 rtnl_unlock(); 1881 rtnl_unlock();
1910 /* decrease mem now to avoid the memleak warning */ 1882 /* decrease mem now to avoid the memleak warning */
1911 atomic_sub(sizeof(*iml), &sk->sk_omem_alloc); 1883 atomic_sub(sizeof(*iml), &sk->sk_omem_alloc);
1912 call_rcu(&iml->rcu, ip_mc_socklist_reclaim); 1884 kfree_rcu(iml, rcu);
1913 return 0; 1885 return 0;
1914 } 1886 }
1915 if (!in_dev) 1887 if (!in_dev)
@@ -2026,7 +1998,7 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct
2026 newpsl->sl_addr[i] = psl->sl_addr[i]; 1998 newpsl->sl_addr[i] = psl->sl_addr[i];
2027 /* decrease mem now to avoid the memleak warning */ 1999 /* decrease mem now to avoid the memleak warning */
2028 atomic_sub(IP_SFLSIZE(psl->sl_max), &sk->sk_omem_alloc); 2000 atomic_sub(IP_SFLSIZE(psl->sl_max), &sk->sk_omem_alloc);
2029 call_rcu(&psl->rcu, ip_sf_socklist_reclaim); 2001 kfree_rcu(psl, rcu);
2030 } 2002 }
2031 rcu_assign_pointer(pmc->sflist, newpsl); 2003 rcu_assign_pointer(pmc->sflist, newpsl);
2032 psl = newpsl; 2004 psl = newpsl;
@@ -2127,7 +2099,7 @@ int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex)
2127 psl->sl_count, psl->sl_addr, 0); 2099 psl->sl_count, psl->sl_addr, 0);
2128 /* decrease mem now to avoid the memleak warning */ 2100 /* decrease mem now to avoid the memleak warning */
2129 atomic_sub(IP_SFLSIZE(psl->sl_max), &sk->sk_omem_alloc); 2101 atomic_sub(IP_SFLSIZE(psl->sl_max), &sk->sk_omem_alloc);
2130 call_rcu(&psl->rcu, ip_sf_socklist_reclaim); 2102 kfree_rcu(psl, rcu);
2131 } else 2103 } else
2132 (void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode, 2104 (void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode,
2133 0, NULL, 0); 2105 0, NULL, 0);
@@ -2324,7 +2296,7 @@ void ip_mc_drop_socket(struct sock *sk)
2324 ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr); 2296 ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr);
2325 /* decrease mem now to avoid the memleak warning */ 2297 /* decrease mem now to avoid the memleak warning */
2326 atomic_sub(sizeof(*iml), &sk->sk_omem_alloc); 2298 atomic_sub(sizeof(*iml), &sk->sk_omem_alloc);
2327 call_rcu(&iml->rcu, ip_mc_socklist_reclaim); 2299 kfree_rcu(iml, rcu);
2328 } 2300 }
2329 rtnl_unlock(); 2301 rtnl_unlock();
2330} 2302}
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 38f23e721b80..c14d88ad348d 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -33,7 +33,7 @@ EXPORT_SYMBOL(inet_csk_timer_bug_msg);
33 * This struct holds the first and last local port number. 33 * This struct holds the first and last local port number.
34 */ 34 */
35struct local_ports sysctl_local_ports __read_mostly = { 35struct local_ports sysctl_local_ports __read_mostly = {
36 .lock = SEQLOCK_UNLOCKED, 36 .lock = __SEQLOCK_UNLOCKED(sysctl_local_ports.lock),
37 .range = { 32768, 61000 }, 37 .range = { 32768, 61000 },
38}; 38};
39 39
@@ -350,30 +350,24 @@ void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long len)
350EXPORT_SYMBOL(inet_csk_reset_keepalive_timer); 350EXPORT_SYMBOL(inet_csk_reset_keepalive_timer);
351 351
352struct dst_entry *inet_csk_route_req(struct sock *sk, 352struct dst_entry *inet_csk_route_req(struct sock *sk,
353 struct flowi4 *fl4,
353 const struct request_sock *req) 354 const struct request_sock *req)
354{ 355{
355 struct rtable *rt; 356 struct rtable *rt;
356 const struct inet_request_sock *ireq = inet_rsk(req); 357 const struct inet_request_sock *ireq = inet_rsk(req);
357 struct ip_options *opt = inet_rsk(req)->opt; 358 struct ip_options_rcu *opt = inet_rsk(req)->opt;
358 struct flowi4 fl4 = {
359 .flowi4_oif = sk->sk_bound_dev_if,
360 .flowi4_mark = sk->sk_mark,
361 .daddr = ((opt && opt->srr) ?
362 opt->faddr : ireq->rmt_addr),
363 .saddr = ireq->loc_addr,
364 .flowi4_tos = RT_CONN_FLAGS(sk),
365 .flowi4_proto = sk->sk_protocol,
366 .flowi4_flags = inet_sk_flowi_flags(sk),
367 .fl4_sport = inet_sk(sk)->inet_sport,
368 .fl4_dport = ireq->rmt_port,
369 };
370 struct net *net = sock_net(sk); 359 struct net *net = sock_net(sk);
371 360
372 security_req_classify_flow(req, flowi4_to_flowi(&fl4)); 361 flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
373 rt = ip_route_output_flow(net, &fl4, sk); 362 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
363 sk->sk_protocol, inet_sk_flowi_flags(sk),
364 (opt && opt->opt.srr) ? opt->opt.faddr : ireq->rmt_addr,
365 ireq->loc_addr, ireq->rmt_port, inet_sk(sk)->inet_sport);
366 security_req_classify_flow(req, flowi4_to_flowi(fl4));
367 rt = ip_route_output_flow(net, fl4, sk);
374 if (IS_ERR(rt)) 368 if (IS_ERR(rt))
375 goto no_route; 369 goto no_route;
376 if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway) 370 if (opt && opt->opt.is_strictroute && fl4->daddr != rt->rt_gateway)
377 goto route_err; 371 goto route_err;
378 return &rt->dst; 372 return &rt->dst;
379 373
@@ -385,6 +379,39 @@ no_route:
385} 379}
386EXPORT_SYMBOL_GPL(inet_csk_route_req); 380EXPORT_SYMBOL_GPL(inet_csk_route_req);
387 381
382struct dst_entry *inet_csk_route_child_sock(struct sock *sk,
383 struct sock *newsk,
384 const struct request_sock *req)
385{
386 const struct inet_request_sock *ireq = inet_rsk(req);
387 struct inet_sock *newinet = inet_sk(newsk);
388 struct ip_options_rcu *opt = ireq->opt;
389 struct net *net = sock_net(sk);
390 struct flowi4 *fl4;
391 struct rtable *rt;
392
393 fl4 = &newinet->cork.fl.u.ip4;
394 flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
395 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
396 sk->sk_protocol, inet_sk_flowi_flags(sk),
397 (opt && opt->opt.srr) ? opt->opt.faddr : ireq->rmt_addr,
398 ireq->loc_addr, ireq->rmt_port, inet_sk(sk)->inet_sport);
399 security_req_classify_flow(req, flowi4_to_flowi(fl4));
400 rt = ip_route_output_flow(net, fl4, sk);
401 if (IS_ERR(rt))
402 goto no_route;
403 if (opt && opt->opt.is_strictroute && fl4->daddr != rt->rt_gateway)
404 goto route_err;
405 return &rt->dst;
406
407route_err:
408 ip_rt_put(rt);
409no_route:
410 IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
411 return NULL;
412}
413EXPORT_SYMBOL_GPL(inet_csk_route_child_sock);
414
388static inline u32 inet_synq_hash(const __be32 raddr, const __be16 rport, 415static inline u32 inet_synq_hash(const __be32 raddr, const __be16 rport,
389 const u32 rnd, const u32 synq_hsize) 416 const u32 rnd, const u32 synq_hsize)
390{ 417{
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 2ada17129fce..3267d3898437 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -124,7 +124,7 @@ static int inet_csk_diag_fill(struct sock *sk,
124 124
125#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) 125#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
126 if (r->idiag_family == AF_INET6) { 126 if (r->idiag_family == AF_INET6) {
127 struct ipv6_pinfo *np = inet6_sk(sk); 127 const struct ipv6_pinfo *np = inet6_sk(sk);
128 128
129 ipv6_addr_copy((struct in6_addr *)r->id.idiag_src, 129 ipv6_addr_copy((struct in6_addr *)r->id.idiag_src,
130 &np->rcv_saddr); 130 &np->rcv_saddr);
@@ -437,7 +437,7 @@ static int valid_cc(const void *bc, int len, int cc)
437 return 0; 437 return 0;
438 if (cc == len) 438 if (cc == len)
439 return 1; 439 return 1;
440 if (op->yes < 4) 440 if (op->yes < 4 || op->yes & 3)
441 return 0; 441 return 0;
442 len -= op->yes; 442 len -= op->yes;
443 bc += op->yes; 443 bc += op->yes;
@@ -447,11 +447,11 @@ static int valid_cc(const void *bc, int len, int cc)
447 447
448static int inet_diag_bc_audit(const void *bytecode, int bytecode_len) 448static int inet_diag_bc_audit(const void *bytecode, int bytecode_len)
449{ 449{
450 const unsigned char *bc = bytecode; 450 const void *bc = bytecode;
451 int len = bytecode_len; 451 int len = bytecode_len;
452 452
453 while (len > 0) { 453 while (len > 0) {
454 struct inet_diag_bc_op *op = (struct inet_diag_bc_op *)bc; 454 const struct inet_diag_bc_op *op = bc;
455 455
456//printk("BC: %d %d %d {%d} / %d\n", op->code, op->yes, op->no, op[1].no, len); 456//printk("BC: %d %d %d {%d} / %d\n", op->code, op->yes, op->no, op[1].no, len);
457 switch (op->code) { 457 switch (op->code) {
@@ -462,22 +462,20 @@ static int inet_diag_bc_audit(const void *bytecode, int bytecode_len)
462 case INET_DIAG_BC_S_LE: 462 case INET_DIAG_BC_S_LE:
463 case INET_DIAG_BC_D_GE: 463 case INET_DIAG_BC_D_GE:
464 case INET_DIAG_BC_D_LE: 464 case INET_DIAG_BC_D_LE:
465 if (op->yes < 4 || op->yes > len + 4)
466 return -EINVAL;
467 case INET_DIAG_BC_JMP: 465 case INET_DIAG_BC_JMP:
468 if (op->no < 4 || op->no > len + 4) 466 if (op->no < 4 || op->no > len + 4 || op->no & 3)
469 return -EINVAL; 467 return -EINVAL;
470 if (op->no < len && 468 if (op->no < len &&
471 !valid_cc(bytecode, bytecode_len, len - op->no)) 469 !valid_cc(bytecode, bytecode_len, len - op->no))
472 return -EINVAL; 470 return -EINVAL;
473 break; 471 break;
474 case INET_DIAG_BC_NOP: 472 case INET_DIAG_BC_NOP:
475 if (op->yes < 4 || op->yes > len + 4)
476 return -EINVAL;
477 break; 473 break;
478 default: 474 default:
479 return -EINVAL; 475 return -EINVAL;
480 } 476 }
477 if (op->yes < 4 || op->yes > len + 4 || op->yes & 3)
478 return -EINVAL;
481 bc += op->yes; 479 bc += op->yes;
482 len -= op->yes; 480 len -= op->yes;
483 } 481 }
diff --git a/net/ipv4/inet_lro.c b/net/ipv4/inet_lro.c
index 47038cb6c138..85a0f75dae64 100644
--- a/net/ipv4/inet_lro.c
+++ b/net/ipv4/inet_lro.c
@@ -51,8 +51,8 @@ MODULE_DESCRIPTION("Large Receive Offload (ipv4 / tcp)");
51 * Basic tcp checks whether packet is suitable for LRO 51 * Basic tcp checks whether packet is suitable for LRO
52 */ 52 */
53 53
54static int lro_tcp_ip_check(struct iphdr *iph, struct tcphdr *tcph, 54static int lro_tcp_ip_check(const struct iphdr *iph, const struct tcphdr *tcph,
55 int len, struct net_lro_desc *lro_desc) 55 int len, const struct net_lro_desc *lro_desc)
56{ 56{
57 /* check ip header: don't aggregate padded frames */ 57 /* check ip header: don't aggregate padded frames */
58 if (ntohs(iph->tot_len) != len) 58 if (ntohs(iph->tot_len) != len)
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index 9df4e635fb5f..ce616d92cc54 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -154,11 +154,9 @@ void __init inet_initpeers(void)
154/* Called with or without local BH being disabled. */ 154/* Called with or without local BH being disabled. */
155static void unlink_from_unused(struct inet_peer *p) 155static void unlink_from_unused(struct inet_peer *p)
156{ 156{
157 if (!list_empty(&p->unused)) { 157 spin_lock_bh(&unused_peers.lock);
158 spin_lock_bh(&unused_peers.lock); 158 list_del_init(&p->unused);
159 list_del_init(&p->unused); 159 spin_unlock_bh(&unused_peers.lock);
160 spin_unlock_bh(&unused_peers.lock);
161 }
162} 160}
163 161
164static int addr_compare(const struct inetpeer_addr *a, 162static int addr_compare(const struct inetpeer_addr *a,
@@ -205,6 +203,20 @@ static int addr_compare(const struct inetpeer_addr *a,
205 u; \ 203 u; \
206}) 204})
207 205
206static bool atomic_add_unless_return(atomic_t *ptr, int a, int u, int *newv)
207{
208 int cur, old = atomic_read(ptr);
209
210 while (old != u) {
211 *newv = old + a;
212 cur = atomic_cmpxchg(ptr, old, *newv);
213 if (cur == old)
214 return true;
215 old = cur;
216 }
217 return false;
218}
219
208/* 220/*
209 * Called with rcu_read_lock() 221 * Called with rcu_read_lock()
210 * Because we hold no lock against a writer, its quite possible we fall 222 * Because we hold no lock against a writer, its quite possible we fall
@@ -213,7 +225,8 @@ static int addr_compare(const struct inetpeer_addr *a,
213 * We exit from this function if number of links exceeds PEER_MAXDEPTH 225 * We exit from this function if number of links exceeds PEER_MAXDEPTH
214 */ 226 */
215static struct inet_peer *lookup_rcu(const struct inetpeer_addr *daddr, 227static struct inet_peer *lookup_rcu(const struct inetpeer_addr *daddr,
216 struct inet_peer_base *base) 228 struct inet_peer_base *base,
229 int *newrefcnt)
217{ 230{
218 struct inet_peer *u = rcu_dereference(base->root); 231 struct inet_peer *u = rcu_dereference(base->root);
219 int count = 0; 232 int count = 0;
@@ -226,7 +239,7 @@ static struct inet_peer *lookup_rcu(const struct inetpeer_addr *daddr,
226 * distinction between an unused entry (refcnt=0) and 239 * distinction between an unused entry (refcnt=0) and
227 * a freed one. 240 * a freed one.
228 */ 241 */
229 if (unlikely(!atomic_add_unless(&u->refcnt, 1, -1))) 242 if (!atomic_add_unless_return(&u->refcnt, 1, -1, newrefcnt))
230 u = NULL; 243 u = NULL;
231 return u; 244 return u;
232 } 245 }
@@ -465,22 +478,23 @@ struct inet_peer *inet_getpeer(struct inetpeer_addr *daddr, int create)
465 struct inet_peer_base *base = family_to_base(daddr->family); 478 struct inet_peer_base *base = family_to_base(daddr->family);
466 struct inet_peer *p; 479 struct inet_peer *p;
467 unsigned int sequence; 480 unsigned int sequence;
468 int invalidated; 481 int invalidated, newrefcnt = 0;
469 482
470 /* Look up for the address quickly, lockless. 483 /* Look up for the address quickly, lockless.
471 * Because of a concurrent writer, we might not find an existing entry. 484 * Because of a concurrent writer, we might not find an existing entry.
472 */ 485 */
473 rcu_read_lock(); 486 rcu_read_lock();
474 sequence = read_seqbegin(&base->lock); 487 sequence = read_seqbegin(&base->lock);
475 p = lookup_rcu(daddr, base); 488 p = lookup_rcu(daddr, base, &newrefcnt);
476 invalidated = read_seqretry(&base->lock, sequence); 489 invalidated = read_seqretry(&base->lock, sequence);
477 rcu_read_unlock(); 490 rcu_read_unlock();
478 491
479 if (p) { 492 if (p) {
480 /* The existing node has been found. 493found: /* The existing node has been found.
481 * Remove the entry from unused list if it was there. 494 * Remove the entry from unused list if it was there.
482 */ 495 */
483 unlink_from_unused(p); 496 if (newrefcnt == 1)
497 unlink_from_unused(p);
484 return p; 498 return p;
485 } 499 }
486 500
@@ -494,11 +508,9 @@ struct inet_peer *inet_getpeer(struct inetpeer_addr *daddr, int create)
494 write_seqlock_bh(&base->lock); 508 write_seqlock_bh(&base->lock);
495 p = lookup(daddr, stack, base); 509 p = lookup(daddr, stack, base);
496 if (p != peer_avl_empty) { 510 if (p != peer_avl_empty) {
497 atomic_inc(&p->refcnt); 511 newrefcnt = atomic_inc_return(&p->refcnt);
498 write_sequnlock_bh(&base->lock); 512 write_sequnlock_bh(&base->lock);
499 /* Remove the entry from unused list if it was there. */ 513 goto found;
500 unlink_from_unused(p);
501 return p;
502 } 514 }
503 p = create ? kmem_cache_alloc(peer_cachep, GFP_ATOMIC) : NULL; 515 p = create ? kmem_cache_alloc(peer_cachep, GFP_ATOMIC) : NULL;
504 if (p) { 516 if (p) {
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index 99461f09320f..3b34d1c86270 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -84,7 +84,7 @@ int ip_forward(struct sk_buff *skb)
84 84
85 rt = skb_rtable(skb); 85 rt = skb_rtable(skb);
86 86
87 if (opt->is_strictroute && rt->rt_dst != rt->rt_gateway) 87 if (opt->is_strictroute && ip_hdr(skb)->daddr != rt->rt_gateway)
88 goto sr_failed; 88 goto sr_failed;
89 89
90 if (unlikely(skb->len > dst_mtu(&rt->dst) && !skb_is_gso(skb) && 90 if (unlikely(skb->len > dst_mtu(&rt->dst) && !skb_is_gso(skb) &&
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index b1d282f11be7..0ad6035f6366 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -77,22 +77,40 @@ struct ipq {
77 struct inet_peer *peer; 77 struct inet_peer *peer;
78}; 78};
79 79
80#define IPFRAG_ECN_CLEAR 0x01 /* one frag had INET_ECN_NOT_ECT */ 80/* RFC 3168 support :
81#define IPFRAG_ECN_SET_CE 0x04 /* one frag had INET_ECN_CE */ 81 * We want to check ECN values of all fragments, do detect invalid combinations.
82 * In ipq->ecn, we store the OR value of each ip4_frag_ecn() fragment value.
83 */
84#define IPFRAG_ECN_NOT_ECT 0x01 /* one frag had ECN_NOT_ECT */
85#define IPFRAG_ECN_ECT_1 0x02 /* one frag had ECN_ECT_1 */
86#define IPFRAG_ECN_ECT_0 0x04 /* one frag had ECN_ECT_0 */
87#define IPFRAG_ECN_CE 0x08 /* one frag had ECN_CE */
82 88
83static inline u8 ip4_frag_ecn(u8 tos) 89static inline u8 ip4_frag_ecn(u8 tos)
84{ 90{
85 tos = (tos & INET_ECN_MASK) + 1; 91 return 1 << (tos & INET_ECN_MASK);
86 /*
87 * After the last operation we have (in binary):
88 * INET_ECN_NOT_ECT => 001
89 * INET_ECN_ECT_1 => 010
90 * INET_ECN_ECT_0 => 011
91 * INET_ECN_CE => 100
92 */
93 return (tos & 2) ? 0 : tos;
94} 92}
95 93
94/* Given the OR values of all fragments, apply RFC 3168 5.3 requirements
95 * Value : 0xff if frame should be dropped.
96 * 0 or INET_ECN_CE value, to be ORed in to final iph->tos field
97 */
98static const u8 ip4_frag_ecn_table[16] = {
99 /* at least one fragment had CE, and others ECT_0 or ECT_1 */
100 [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = INET_ECN_CE,
101 [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = INET_ECN_CE,
102 [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = INET_ECN_CE,
103
104 /* invalid combinations : drop frame */
105 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE] = 0xff,
106 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0] = 0xff,
107 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_1] = 0xff,
108 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
109 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = 0xff,
110 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = 0xff,
111 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
112};
113
96static struct inet_frags ip4_frags; 114static struct inet_frags ip4_frags;
97 115
98int ip_frag_nqueues(struct net *net) 116int ip_frag_nqueues(struct net *net)
@@ -524,9 +542,15 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
524 int len; 542 int len;
525 int ihlen; 543 int ihlen;
526 int err; 544 int err;
545 u8 ecn;
527 546
528 ipq_kill(qp); 547 ipq_kill(qp);
529 548
549 ecn = ip4_frag_ecn_table[qp->ecn];
550 if (unlikely(ecn == 0xff)) {
551 err = -EINVAL;
552 goto out_fail;
553 }
530 /* Make the one we just received the head. */ 554 /* Make the one we just received the head. */
531 if (prev) { 555 if (prev) {
532 head = prev->next; 556 head = prev->next;
@@ -605,17 +629,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
605 iph = ip_hdr(head); 629 iph = ip_hdr(head);
606 iph->frag_off = 0; 630 iph->frag_off = 0;
607 iph->tot_len = htons(len); 631 iph->tot_len = htons(len);
608 /* RFC3168 5.3 Fragmentation support 632 iph->tos |= ecn;
609 * If one fragment had INET_ECN_NOT_ECT,
610 * reassembled frame also has INET_ECN_NOT_ECT
611 * Elif one fragment had INET_ECN_CE
612 * reassembled frame also has INET_ECN_CE
613 */
614 if (qp->ecn & IPFRAG_ECN_CLEAR)
615 iph->tos &= ~INET_ECN_MASK;
616 else if (qp->ecn & IPFRAG_ECN_SET_CE)
617 iph->tos |= INET_ECN_CE;
618
619 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS); 633 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS);
620 qp->q.fragments = NULL; 634 qp->q.fragments = NULL;
621 qp->q.fragments_tail = NULL; 635 qp->q.fragments_tail = NULL;
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index da5941f18c3c..8871067560db 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -413,11 +413,6 @@ static struct ip_tunnel *ipgre_tunnel_locate(struct net *net,
413 413
414 dev_net_set(dev, net); 414 dev_net_set(dev, net);
415 415
416 if (strchr(name, '%')) {
417 if (dev_alloc_name(dev, name) < 0)
418 goto failed_free;
419 }
420
421 nt = netdev_priv(dev); 416 nt = netdev_priv(dev);
422 nt->parms = *parms; 417 nt->parms = *parms;
423 dev->rtnl_link_ops = &ipgre_link_ops; 418 dev->rtnl_link_ops = &ipgre_link_ops;
@@ -462,7 +457,7 @@ static void ipgre_err(struct sk_buff *skb, u32 info)
462 by themself??? 457 by themself???
463 */ 458 */
464 459
465 struct iphdr *iph = (struct iphdr *)skb->data; 460 const struct iphdr *iph = (const struct iphdr *)skb->data;
466 __be16 *p = (__be16*)(skb->data+(iph->ihl<<2)); 461 __be16 *p = (__be16*)(skb->data+(iph->ihl<<2));
467 int grehlen = (iph->ihl<<2) + 4; 462 int grehlen = (iph->ihl<<2) + 4;
468 const int type = icmp_hdr(skb)->type; 463 const int type = icmp_hdr(skb)->type;
@@ -534,7 +529,7 @@ out:
534 rcu_read_unlock(); 529 rcu_read_unlock();
535} 530}
536 531
537static inline void ipgre_ecn_decapsulate(struct iphdr *iph, struct sk_buff *skb) 532static inline void ipgre_ecn_decapsulate(const struct iphdr *iph, struct sk_buff *skb)
538{ 533{
539 if (INET_ECN_is_ce(iph->tos)) { 534 if (INET_ECN_is_ce(iph->tos)) {
540 if (skb->protocol == htons(ETH_P_IP)) { 535 if (skb->protocol == htons(ETH_P_IP)) {
@@ -546,19 +541,19 @@ static inline void ipgre_ecn_decapsulate(struct iphdr *iph, struct sk_buff *skb)
546} 541}
547 542
548static inline u8 543static inline u8
549ipgre_ecn_encapsulate(u8 tos, struct iphdr *old_iph, struct sk_buff *skb) 544ipgre_ecn_encapsulate(u8 tos, const struct iphdr *old_iph, struct sk_buff *skb)
550{ 545{
551 u8 inner = 0; 546 u8 inner = 0;
552 if (skb->protocol == htons(ETH_P_IP)) 547 if (skb->protocol == htons(ETH_P_IP))
553 inner = old_iph->tos; 548 inner = old_iph->tos;
554 else if (skb->protocol == htons(ETH_P_IPV6)) 549 else if (skb->protocol == htons(ETH_P_IPV6))
555 inner = ipv6_get_dsfield((struct ipv6hdr *)old_iph); 550 inner = ipv6_get_dsfield((const struct ipv6hdr *)old_iph);
556 return INET_ECN_encapsulate(tos, inner); 551 return INET_ECN_encapsulate(tos, inner);
557} 552}
558 553
559static int ipgre_rcv(struct sk_buff *skb) 554static int ipgre_rcv(struct sk_buff *skb)
560{ 555{
561 struct iphdr *iph; 556 const struct iphdr *iph;
562 u8 *h; 557 u8 *h;
563 __be16 flags; 558 __be16 flags;
564 __sum16 csum = 0; 559 __sum16 csum = 0;
@@ -697,8 +692,9 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
697{ 692{
698 struct ip_tunnel *tunnel = netdev_priv(dev); 693 struct ip_tunnel *tunnel = netdev_priv(dev);
699 struct pcpu_tstats *tstats; 694 struct pcpu_tstats *tstats;
700 struct iphdr *old_iph = ip_hdr(skb); 695 const struct iphdr *old_iph = ip_hdr(skb);
701 struct iphdr *tiph; 696 const struct iphdr *tiph;
697 struct flowi4 fl4;
702 u8 tos; 698 u8 tos;
703 __be16 df; 699 __be16 df;
704 struct rtable *rt; /* Route to the other host */ 700 struct rtable *rt; /* Route to the other host */
@@ -714,7 +710,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
714 710
715 if (dev->header_ops && dev->type == ARPHRD_IPGRE) { 711 if (dev->header_ops && dev->type == ARPHRD_IPGRE) {
716 gre_hlen = 0; 712 gre_hlen = 0;
717 tiph = (struct iphdr *)skb->data; 713 tiph = (const struct iphdr *)skb->data;
718 } else { 714 } else {
719 gre_hlen = tunnel->hlen; 715 gre_hlen = tunnel->hlen;
720 tiph = &tunnel->parms.iph; 716 tiph = &tunnel->parms.iph;
@@ -735,14 +731,14 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
735 } 731 }
736#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 732#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
737 else if (skb->protocol == htons(ETH_P_IPV6)) { 733 else if (skb->protocol == htons(ETH_P_IPV6)) {
738 struct in6_addr *addr6; 734 const struct in6_addr *addr6;
739 int addr_type; 735 int addr_type;
740 struct neighbour *neigh = skb_dst(skb)->neighbour; 736 struct neighbour *neigh = skb_dst(skb)->neighbour;
741 737
742 if (neigh == NULL) 738 if (neigh == NULL)
743 goto tx_error; 739 goto tx_error;
744 740
745 addr6 = (struct in6_addr *)&neigh->primary_key; 741 addr6 = (const struct in6_addr *)&neigh->primary_key;
746 addr_type = ipv6_addr_type(addr6); 742 addr_type = ipv6_addr_type(addr6);
747 743
748 if (addr_type == IPV6_ADDR_ANY) { 744 if (addr_type == IPV6_ADDR_ANY) {
@@ -766,10 +762,10 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
766 if (skb->protocol == htons(ETH_P_IP)) 762 if (skb->protocol == htons(ETH_P_IP))
767 tos = old_iph->tos; 763 tos = old_iph->tos;
768 else if (skb->protocol == htons(ETH_P_IPV6)) 764 else if (skb->protocol == htons(ETH_P_IPV6))
769 tos = ipv6_get_dsfield((struct ipv6hdr *)old_iph); 765 tos = ipv6_get_dsfield((const struct ipv6hdr *)old_iph);
770 } 766 }
771 767
772 rt = ip_route_output_gre(dev_net(dev), dst, tiph->saddr, 768 rt = ip_route_output_gre(dev_net(dev), &fl4, dst, tiph->saddr,
773 tunnel->parms.o_key, RT_TOS(tos), 769 tunnel->parms.o_key, RT_TOS(tos),
774 tunnel->parms.link); 770 tunnel->parms.link);
775 if (IS_ERR(rt)) { 771 if (IS_ERR(rt)) {
@@ -873,15 +869,15 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
873 iph->frag_off = df; 869 iph->frag_off = df;
874 iph->protocol = IPPROTO_GRE; 870 iph->protocol = IPPROTO_GRE;
875 iph->tos = ipgre_ecn_encapsulate(tos, old_iph, skb); 871 iph->tos = ipgre_ecn_encapsulate(tos, old_iph, skb);
876 iph->daddr = rt->rt_dst; 872 iph->daddr = fl4.daddr;
877 iph->saddr = rt->rt_src; 873 iph->saddr = fl4.saddr;
878 874
879 if ((iph->ttl = tiph->ttl) == 0) { 875 if ((iph->ttl = tiph->ttl) == 0) {
880 if (skb->protocol == htons(ETH_P_IP)) 876 if (skb->protocol == htons(ETH_P_IP))
881 iph->ttl = old_iph->ttl; 877 iph->ttl = old_iph->ttl;
882#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 878#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
883 else if (skb->protocol == htons(ETH_P_IPV6)) 879 else if (skb->protocol == htons(ETH_P_IPV6))
884 iph->ttl = ((struct ipv6hdr *)old_iph)->hop_limit; 880 iph->ttl = ((const struct ipv6hdr *)old_iph)->hop_limit;
885#endif 881#endif
886 else 882 else
887 iph->ttl = ip4_dst_hoplimit(&rt->dst); 883 iph->ttl = ip4_dst_hoplimit(&rt->dst);
@@ -927,7 +923,7 @@ static int ipgre_tunnel_bind_dev(struct net_device *dev)
927{ 923{
928 struct net_device *tdev = NULL; 924 struct net_device *tdev = NULL;
929 struct ip_tunnel *tunnel; 925 struct ip_tunnel *tunnel;
930 struct iphdr *iph; 926 const struct iphdr *iph;
931 int hlen = LL_MAX_HEADER; 927 int hlen = LL_MAX_HEADER;
932 int mtu = ETH_DATA_LEN; 928 int mtu = ETH_DATA_LEN;
933 int addend = sizeof(struct iphdr) + 4; 929 int addend = sizeof(struct iphdr) + 4;
@@ -938,12 +934,14 @@ static int ipgre_tunnel_bind_dev(struct net_device *dev)
938 /* Guess output device to choose reasonable mtu and needed_headroom */ 934 /* Guess output device to choose reasonable mtu and needed_headroom */
939 935
940 if (iph->daddr) { 936 if (iph->daddr) {
941 struct rtable *rt = ip_route_output_gre(dev_net(dev), 937 struct flowi4 fl4;
942 iph->daddr, iph->saddr, 938 struct rtable *rt;
943 tunnel->parms.o_key, 939
944 RT_TOS(iph->tos), 940 rt = ip_route_output_gre(dev_net(dev), &fl4,
945 tunnel->parms.link); 941 iph->daddr, iph->saddr,
946 942 tunnel->parms.o_key,
943 RT_TOS(iph->tos),
944 tunnel->parms.link);
947 if (!IS_ERR(rt)) { 945 if (!IS_ERR(rt)) {
948 tdev = rt->dst.dev; 946 tdev = rt->dst.dev;
949 ip_rt_put(rt); 947 ip_rt_put(rt);
@@ -1180,7 +1178,7 @@ static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
1180 1178
1181static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr) 1179static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr)
1182{ 1180{
1183 struct iphdr *iph = (struct iphdr *) skb_mac_header(skb); 1181 const struct iphdr *iph = (const struct iphdr *) skb_mac_header(skb);
1184 memcpy(haddr, &iph->saddr, 4); 1182 memcpy(haddr, &iph->saddr, 4);
1185 return 4; 1183 return 4;
1186} 1184}
@@ -1196,13 +1194,15 @@ static int ipgre_open(struct net_device *dev)
1196 struct ip_tunnel *t = netdev_priv(dev); 1194 struct ip_tunnel *t = netdev_priv(dev);
1197 1195
1198 if (ipv4_is_multicast(t->parms.iph.daddr)) { 1196 if (ipv4_is_multicast(t->parms.iph.daddr)) {
1199 struct rtable *rt = ip_route_output_gre(dev_net(dev), 1197 struct flowi4 fl4;
1200 t->parms.iph.daddr, 1198 struct rtable *rt;
1201 t->parms.iph.saddr, 1199
1202 t->parms.o_key, 1200 rt = ip_route_output_gre(dev_net(dev), &fl4,
1203 RT_TOS(t->parms.iph.tos), 1201 t->parms.iph.daddr,
1204 t->parms.link); 1202 t->parms.iph.saddr,
1205 1203 t->parms.o_key,
1204 RT_TOS(t->parms.iph.tos),
1205 t->parms.link);
1206 if (IS_ERR(rt)) 1206 if (IS_ERR(rt))
1207 return -EADDRNOTAVAIL; 1207 return -EADDRNOTAVAIL;
1208 dev = rt->dst.dev; 1208 dev = rt->dst.dev;
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index d7b2b0987a3b..c8f48efc5fd3 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -268,7 +268,7 @@ int ip_local_deliver(struct sk_buff *skb)
268static inline int ip_rcv_options(struct sk_buff *skb) 268static inline int ip_rcv_options(struct sk_buff *skb)
269{ 269{
270 struct ip_options *opt; 270 struct ip_options *opt;
271 struct iphdr *iph; 271 const struct iphdr *iph;
272 struct net_device *dev = skb->dev; 272 struct net_device *dev = skb->dev;
273 273
274 /* It looks as overkill, because not all 274 /* It looks as overkill, because not all
@@ -374,7 +374,7 @@ drop:
374 */ 374 */
375int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) 375int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
376{ 376{
377 struct iphdr *iph; 377 const struct iphdr *iph;
378 u32 len; 378 u32 len;
379 379
380 /* When the interface is in promisc. mode, drop all the crap 380 /* When the interface is in promisc. mode, drop all the crap
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index 2391b24e8251..ec93335901dd 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -14,6 +14,7 @@
14#include <linux/slab.h> 14#include <linux/slab.h>
15#include <linux/types.h> 15#include <linux/types.h>
16#include <asm/uaccess.h> 16#include <asm/uaccess.h>
17#include <asm/unaligned.h>
17#include <linux/skbuff.h> 18#include <linux/skbuff.h>
18#include <linux/ip.h> 19#include <linux/ip.h>
19#include <linux/icmp.h> 20#include <linux/icmp.h>
@@ -36,8 +37,8 @@
36 * saddr is address of outgoing interface. 37 * saddr is address of outgoing interface.
37 */ 38 */
38 39
39void ip_options_build(struct sk_buff * skb, struct ip_options * opt, 40void ip_options_build(struct sk_buff *skb, struct ip_options *opt,
40 __be32 daddr, struct rtable *rt, int is_frag) 41 __be32 daddr, struct rtable *rt, int is_frag)
41{ 42{
42 unsigned char *iph = skb_network_header(skb); 43 unsigned char *iph = skb_network_header(skb);
43 44
@@ -50,9 +51,9 @@ void ip_options_build(struct sk_buff * skb, struct ip_options * opt,
50 51
51 if (!is_frag) { 52 if (!is_frag) {
52 if (opt->rr_needaddr) 53 if (opt->rr_needaddr)
53 ip_rt_get_source(iph+opt->rr+iph[opt->rr+2]-5, rt); 54 ip_rt_get_source(iph+opt->rr+iph[opt->rr+2]-5, skb, rt);
54 if (opt->ts_needaddr) 55 if (opt->ts_needaddr)
55 ip_rt_get_source(iph+opt->ts+iph[opt->ts+2]-9, rt); 56 ip_rt_get_source(iph+opt->ts+iph[opt->ts+2]-9, skb, rt);
56 if (opt->ts_needtime) { 57 if (opt->ts_needtime) {
57 struct timespec tv; 58 struct timespec tv;
58 __be32 midtime; 59 __be32 midtime;
@@ -83,9 +84,9 @@ void ip_options_build(struct sk_buff * skb, struct ip_options * opt,
83 * NOTE: dopt cannot point to skb. 84 * NOTE: dopt cannot point to skb.
84 */ 85 */
85 86
86int ip_options_echo(struct ip_options * dopt, struct sk_buff * skb) 87int ip_options_echo(struct ip_options *dopt, struct sk_buff *skb)
87{ 88{
88 struct ip_options *sopt; 89 const struct ip_options *sopt;
89 unsigned char *sptr, *dptr; 90 unsigned char *sptr, *dptr;
90 int soffset, doffset; 91 int soffset, doffset;
91 int optlen; 92 int optlen;
@@ -95,10 +96,8 @@ int ip_options_echo(struct ip_options * dopt, struct sk_buff * skb)
95 96
96 sopt = &(IPCB(skb)->opt); 97 sopt = &(IPCB(skb)->opt);
97 98
98 if (sopt->optlen == 0) { 99 if (sopt->optlen == 0)
99 dopt->optlen = 0;
100 return 0; 100 return 0;
101 }
102 101
103 sptr = skb_network_header(skb); 102 sptr = skb_network_header(skb);
104 dptr = dopt->__data; 103 dptr = dopt->__data;
@@ -157,7 +156,7 @@ int ip_options_echo(struct ip_options * dopt, struct sk_buff * skb)
157 dopt->optlen += optlen; 156 dopt->optlen += optlen;
158 } 157 }
159 if (sopt->srr) { 158 if (sopt->srr) {
160 unsigned char * start = sptr+sopt->srr; 159 unsigned char *start = sptr+sopt->srr;
161 __be32 faddr; 160 __be32 faddr;
162 161
163 optlen = start[1]; 162 optlen = start[1];
@@ -352,7 +351,7 @@ int ip_options_compile(struct net *net,
352 goto error; 351 goto error;
353 } 352 }
354 if (optptr[2] <= optlen) { 353 if (optptr[2] <= optlen) {
355 __be32 *timeptr = NULL; 354 unsigned char *timeptr = NULL;
356 if (optptr[2]+3 > optptr[1]) { 355 if (optptr[2]+3 > optptr[1]) {
357 pp_ptr = optptr + 2; 356 pp_ptr = optptr + 2;
358 goto error; 357 goto error;
@@ -361,7 +360,7 @@ int ip_options_compile(struct net *net,
361 case IPOPT_TS_TSONLY: 360 case IPOPT_TS_TSONLY:
362 opt->ts = optptr - iph; 361 opt->ts = optptr - iph;
363 if (skb) 362 if (skb)
364 timeptr = (__be32*)&optptr[optptr[2]-1]; 363 timeptr = &optptr[optptr[2]-1];
365 opt->ts_needtime = 1; 364 opt->ts_needtime = 1;
366 optptr[2] += 4; 365 optptr[2] += 4;
367 break; 366 break;
@@ -373,7 +372,7 @@ int ip_options_compile(struct net *net,
373 opt->ts = optptr - iph; 372 opt->ts = optptr - iph;
374 if (rt) { 373 if (rt) {
375 memcpy(&optptr[optptr[2]-1], &rt->rt_spec_dst, 4); 374 memcpy(&optptr[optptr[2]-1], &rt->rt_spec_dst, 4);
376 timeptr = (__be32*)&optptr[optptr[2]+3]; 375 timeptr = &optptr[optptr[2]+3];
377 } 376 }
378 opt->ts_needaddr = 1; 377 opt->ts_needaddr = 1;
379 opt->ts_needtime = 1; 378 opt->ts_needtime = 1;
@@ -391,7 +390,7 @@ int ip_options_compile(struct net *net,
391 if (inet_addr_type(net, addr) == RTN_UNICAST) 390 if (inet_addr_type(net, addr) == RTN_UNICAST)
392 break; 391 break;
393 if (skb) 392 if (skb)
394 timeptr = (__be32*)&optptr[optptr[2]+3]; 393 timeptr = &optptr[optptr[2]+3];
395 } 394 }
396 opt->ts_needtime = 1; 395 opt->ts_needtime = 1;
397 optptr[2] += 8; 396 optptr[2] += 8;
@@ -405,10 +404,10 @@ int ip_options_compile(struct net *net,
405 } 404 }
406 if (timeptr) { 405 if (timeptr) {
407 struct timespec tv; 406 struct timespec tv;
408 __be32 midtime; 407 u32 midtime;
409 getnstimeofday(&tv); 408 getnstimeofday(&tv);
410 midtime = htonl((tv.tv_sec % 86400) * MSEC_PER_SEC + tv.tv_nsec / NSEC_PER_MSEC); 409 midtime = (tv.tv_sec % 86400) * MSEC_PER_SEC + tv.tv_nsec / NSEC_PER_MSEC;
411 memcpy(timeptr, &midtime, sizeof(__be32)); 410 put_unaligned_be32(midtime, timeptr);
412 opt->is_changed = 1; 411 opt->is_changed = 1;
413 } 412 }
414 } else { 413 } else {
@@ -499,19 +498,19 @@ void ip_options_undo(struct ip_options * opt)
499 } 498 }
500} 499}
501 500
502static struct ip_options *ip_options_get_alloc(const int optlen) 501static struct ip_options_rcu *ip_options_get_alloc(const int optlen)
503{ 502{
504 return kzalloc(sizeof(struct ip_options) + ((optlen + 3) & ~3), 503 return kzalloc(sizeof(struct ip_options_rcu) + ((optlen + 3) & ~3),
505 GFP_KERNEL); 504 GFP_KERNEL);
506} 505}
507 506
508static int ip_options_get_finish(struct net *net, struct ip_options **optp, 507static int ip_options_get_finish(struct net *net, struct ip_options_rcu **optp,
509 struct ip_options *opt, int optlen) 508 struct ip_options_rcu *opt, int optlen)
510{ 509{
511 while (optlen & 3) 510 while (optlen & 3)
512 opt->__data[optlen++] = IPOPT_END; 511 opt->opt.__data[optlen++] = IPOPT_END;
513 opt->optlen = optlen; 512 opt->opt.optlen = optlen;
514 if (optlen && ip_options_compile(net, opt, NULL)) { 513 if (optlen && ip_options_compile(net, &opt->opt, NULL)) {
515 kfree(opt); 514 kfree(opt);
516 return -EINVAL; 515 return -EINVAL;
517 } 516 }
@@ -520,29 +519,29 @@ static int ip_options_get_finish(struct net *net, struct ip_options **optp,
520 return 0; 519 return 0;
521} 520}
522 521
523int ip_options_get_from_user(struct net *net, struct ip_options **optp, 522int ip_options_get_from_user(struct net *net, struct ip_options_rcu **optp,
524 unsigned char __user *data, int optlen) 523 unsigned char __user *data, int optlen)
525{ 524{
526 struct ip_options *opt = ip_options_get_alloc(optlen); 525 struct ip_options_rcu *opt = ip_options_get_alloc(optlen);
527 526
528 if (!opt) 527 if (!opt)
529 return -ENOMEM; 528 return -ENOMEM;
530 if (optlen && copy_from_user(opt->__data, data, optlen)) { 529 if (optlen && copy_from_user(opt->opt.__data, data, optlen)) {
531 kfree(opt); 530 kfree(opt);
532 return -EFAULT; 531 return -EFAULT;
533 } 532 }
534 return ip_options_get_finish(net, optp, opt, optlen); 533 return ip_options_get_finish(net, optp, opt, optlen);
535} 534}
536 535
537int ip_options_get(struct net *net, struct ip_options **optp, 536int ip_options_get(struct net *net, struct ip_options_rcu **optp,
538 unsigned char *data, int optlen) 537 unsigned char *data, int optlen)
539{ 538{
540 struct ip_options *opt = ip_options_get_alloc(optlen); 539 struct ip_options_rcu *opt = ip_options_get_alloc(optlen);
541 540
542 if (!opt) 541 if (!opt)
543 return -ENOMEM; 542 return -ENOMEM;
544 if (optlen) 543 if (optlen)
545 memcpy(opt->__data, data, optlen); 544 memcpy(opt->opt.__data, data, optlen);
546 return ip_options_get_finish(net, optp, opt, optlen); 545 return ip_options_get_finish(net, optp, opt, optlen);
547} 546}
548 547
@@ -555,7 +554,7 @@ void ip_forward_options(struct sk_buff *skb)
555 554
556 if (opt->rr_needaddr) { 555 if (opt->rr_needaddr) {
557 optptr = (unsigned char *)raw + opt->rr; 556 optptr = (unsigned char *)raw + opt->rr;
558 ip_rt_get_source(&optptr[optptr[2]-5], rt); 557 ip_rt_get_source(&optptr[optptr[2]-5], skb, rt);
559 opt->is_changed = 1; 558 opt->is_changed = 1;
560 } 559 }
561 if (opt->srr_is_hit) { 560 if (opt->srr_is_hit) {
@@ -569,19 +568,18 @@ void ip_forward_options(struct sk_buff *skb)
569 ) { 568 ) {
570 if (srrptr + 3 > srrspace) 569 if (srrptr + 3 > srrspace)
571 break; 570 break;
572 if (memcmp(&rt->rt_dst, &optptr[srrptr-1], 4) == 0) 571 if (memcmp(&ip_hdr(skb)->daddr, &optptr[srrptr-1], 4) == 0)
573 break; 572 break;
574 } 573 }
575 if (srrptr + 3 <= srrspace) { 574 if (srrptr + 3 <= srrspace) {
576 opt->is_changed = 1; 575 opt->is_changed = 1;
577 ip_rt_get_source(&optptr[srrptr-1], rt); 576 ip_rt_get_source(&optptr[srrptr-1], skb, rt);
578 ip_hdr(skb)->daddr = rt->rt_dst;
579 optptr[2] = srrptr+4; 577 optptr[2] = srrptr+4;
580 } else if (net_ratelimit()) 578 } else if (net_ratelimit())
581 printk(KERN_CRIT "ip_forward(): Argh! Destination lost!\n"); 579 printk(KERN_CRIT "ip_forward(): Argh! Destination lost!\n");
582 if (opt->ts_needaddr) { 580 if (opt->ts_needaddr) {
583 optptr = raw + opt->ts; 581 optptr = raw + opt->ts;
584 ip_rt_get_source(&optptr[optptr[2]-9], rt); 582 ip_rt_get_source(&optptr[optptr[2]-9], skb, rt);
585 opt->is_changed = 1; 583 opt->is_changed = 1;
586 } 584 }
587 } 585 }
@@ -603,7 +601,7 @@ int ip_options_rcv_srr(struct sk_buff *skb)
603 unsigned long orefdst; 601 unsigned long orefdst;
604 int err; 602 int err;
605 603
606 if (!opt->srr || !rt) 604 if (!rt)
607 return 0; 605 return 0;
608 606
609 if (skb->pkt_type != PACKET_HOST) 607 if (skb->pkt_type != PACKET_HOST)
@@ -637,7 +635,7 @@ int ip_options_rcv_srr(struct sk_buff *skb)
637 if (rt2->rt_type != RTN_LOCAL) 635 if (rt2->rt_type != RTN_LOCAL)
638 break; 636 break;
639 /* Superfast 8) loopback forward */ 637 /* Superfast 8) loopback forward */
640 memcpy(&iph->daddr, &optptr[srrptr-1], 4); 638 iph->daddr = nexthop;
641 opt->is_changed = 1; 639 opt->is_changed = 1;
642 } 640 }
643 if (srrptr <= srrspace) { 641 if (srrptr <= srrspace) {
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 459c011b1d4a..84f26e8e6c60 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -140,14 +140,14 @@ static inline int ip_select_ttl(struct inet_sock *inet, struct dst_entry *dst)
140 * 140 *
141 */ 141 */
142int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk, 142int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
143 __be32 saddr, __be32 daddr, struct ip_options *opt) 143 __be32 saddr, __be32 daddr, struct ip_options_rcu *opt)
144{ 144{
145 struct inet_sock *inet = inet_sk(sk); 145 struct inet_sock *inet = inet_sk(sk);
146 struct rtable *rt = skb_rtable(skb); 146 struct rtable *rt = skb_rtable(skb);
147 struct iphdr *iph; 147 struct iphdr *iph;
148 148
149 /* Build the IP header. */ 149 /* Build the IP header. */
150 skb_push(skb, sizeof(struct iphdr) + (opt ? opt->optlen : 0)); 150 skb_push(skb, sizeof(struct iphdr) + (opt ? opt->opt.optlen : 0));
151 skb_reset_network_header(skb); 151 skb_reset_network_header(skb);
152 iph = ip_hdr(skb); 152 iph = ip_hdr(skb);
153 iph->version = 4; 153 iph->version = 4;
@@ -158,14 +158,14 @@ int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
158 else 158 else
159 iph->frag_off = 0; 159 iph->frag_off = 0;
160 iph->ttl = ip_select_ttl(inet, &rt->dst); 160 iph->ttl = ip_select_ttl(inet, &rt->dst);
161 iph->daddr = rt->rt_dst; 161 iph->daddr = (opt && opt->opt.srr ? opt->opt.faddr : daddr);
162 iph->saddr = rt->rt_src; 162 iph->saddr = saddr;
163 iph->protocol = sk->sk_protocol; 163 iph->protocol = sk->sk_protocol;
164 ip_select_ident(iph, &rt->dst, sk); 164 ip_select_ident(iph, &rt->dst, sk);
165 165
166 if (opt && opt->optlen) { 166 if (opt && opt->opt.optlen) {
167 iph->ihl += opt->optlen>>2; 167 iph->ihl += opt->opt.optlen>>2;
168 ip_options_build(skb, opt, daddr, rt, 0); 168 ip_options_build(skb, &opt->opt, daddr, rt, 0);
169 } 169 }
170 170
171 skb->priority = sk->sk_priority; 171 skb->priority = sk->sk_priority;
@@ -312,11 +312,12 @@ int ip_output(struct sk_buff *skb)
312 !(IPCB(skb)->flags & IPSKB_REROUTED)); 312 !(IPCB(skb)->flags & IPSKB_REROUTED));
313} 313}
314 314
315int ip_queue_xmit(struct sk_buff *skb) 315int ip_queue_xmit(struct sk_buff *skb, struct flowi *fl)
316{ 316{
317 struct sock *sk = skb->sk; 317 struct sock *sk = skb->sk;
318 struct inet_sock *inet = inet_sk(sk); 318 struct inet_sock *inet = inet_sk(sk);
319 struct ip_options *opt = inet->opt; 319 struct ip_options_rcu *inet_opt;
320 struct flowi4 *fl4;
320 struct rtable *rt; 321 struct rtable *rt;
321 struct iphdr *iph; 322 struct iphdr *iph;
322 int res; 323 int res;
@@ -325,6 +326,8 @@ int ip_queue_xmit(struct sk_buff *skb)
325 * f.e. by something like SCTP. 326 * f.e. by something like SCTP.
326 */ 327 */
327 rcu_read_lock(); 328 rcu_read_lock();
329 inet_opt = rcu_dereference(inet->inet_opt);
330 fl4 = &fl->u.ip4;
328 rt = skb_rtable(skb); 331 rt = skb_rtable(skb);
329 if (rt != NULL) 332 if (rt != NULL)
330 goto packet_routed; 333 goto packet_routed;
@@ -336,14 +339,14 @@ int ip_queue_xmit(struct sk_buff *skb)
336 339
337 /* Use correct destination address if we have options. */ 340 /* Use correct destination address if we have options. */
338 daddr = inet->inet_daddr; 341 daddr = inet->inet_daddr;
339 if(opt && opt->srr) 342 if (inet_opt && inet_opt->opt.srr)
340 daddr = opt->faddr; 343 daddr = inet_opt->opt.faddr;
341 344
342 /* If this fails, retransmit mechanism of transport layer will 345 /* If this fails, retransmit mechanism of transport layer will
343 * keep trying until route appears or the connection times 346 * keep trying until route appears or the connection times
344 * itself out. 347 * itself out.
345 */ 348 */
346 rt = ip_route_output_ports(sock_net(sk), sk, 349 rt = ip_route_output_ports(sock_net(sk), fl4, sk,
347 daddr, inet->inet_saddr, 350 daddr, inet->inet_saddr,
348 inet->inet_dport, 351 inet->inet_dport,
349 inet->inet_sport, 352 inet->inet_sport,
@@ -357,11 +360,11 @@ int ip_queue_xmit(struct sk_buff *skb)
357 skb_dst_set_noref(skb, &rt->dst); 360 skb_dst_set_noref(skb, &rt->dst);
358 361
359packet_routed: 362packet_routed:
360 if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway) 363 if (inet_opt && inet_opt->opt.is_strictroute && fl4->daddr != rt->rt_gateway)
361 goto no_route; 364 goto no_route;
362 365
363 /* OK, we know where to send it, allocate and build IP header. */ 366 /* OK, we know where to send it, allocate and build IP header. */
364 skb_push(skb, sizeof(struct iphdr) + (opt ? opt->optlen : 0)); 367 skb_push(skb, sizeof(struct iphdr) + (inet_opt ? inet_opt->opt.optlen : 0));
365 skb_reset_network_header(skb); 368 skb_reset_network_header(skb);
366 iph = ip_hdr(skb); 369 iph = ip_hdr(skb);
367 *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff)); 370 *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff));
@@ -371,13 +374,13 @@ packet_routed:
371 iph->frag_off = 0; 374 iph->frag_off = 0;
372 iph->ttl = ip_select_ttl(inet, &rt->dst); 375 iph->ttl = ip_select_ttl(inet, &rt->dst);
373 iph->protocol = sk->sk_protocol; 376 iph->protocol = sk->sk_protocol;
374 iph->saddr = rt->rt_src; 377 iph->saddr = fl4->saddr;
375 iph->daddr = rt->rt_dst; 378 iph->daddr = fl4->daddr;
376 /* Transport layer set skb->h.foo itself. */ 379 /* Transport layer set skb->h.foo itself. */
377 380
378 if (opt && opt->optlen) { 381 if (inet_opt && inet_opt->opt.optlen) {
379 iph->ihl += opt->optlen >> 2; 382 iph->ihl += inet_opt->opt.optlen >> 2;
380 ip_options_build(skb, opt, inet->inet_daddr, rt, 0); 383 ip_options_build(skb, &inet_opt->opt, inet->inet_daddr, rt, 0);
381 } 384 }
382 385
383 ip_select_ident_more(iph, &rt->dst, sk, 386 ip_select_ident_more(iph, &rt->dst, sk,
@@ -773,7 +776,9 @@ static inline int ip_ufo_append_data(struct sock *sk,
773 (length - transhdrlen)); 776 (length - transhdrlen));
774} 777}
775 778
776static int __ip_append_data(struct sock *sk, struct sk_buff_head *queue, 779static int __ip_append_data(struct sock *sk,
780 struct flowi4 *fl4,
781 struct sk_buff_head *queue,
777 struct inet_cork *cork, 782 struct inet_cork *cork,
778 int getfrag(void *from, char *to, int offset, 783 int getfrag(void *from, char *to, int offset,
779 int len, int odd, struct sk_buff *skb), 784 int len, int odd, struct sk_buff *skb),
@@ -794,9 +799,9 @@ static int __ip_append_data(struct sock *sk, struct sk_buff_head *queue,
794 int csummode = CHECKSUM_NONE; 799 int csummode = CHECKSUM_NONE;
795 struct rtable *rt = (struct rtable *)cork->dst; 800 struct rtable *rt = (struct rtable *)cork->dst;
796 801
797 exthdrlen = transhdrlen ? rt->dst.header_len : 0; 802 skb = skb_peek_tail(queue);
798 length += exthdrlen; 803
799 transhdrlen += exthdrlen; 804 exthdrlen = !skb ? rt->dst.header_len : 0;
800 mtu = cork->fragsize; 805 mtu = cork->fragsize;
801 806
802 hh_len = LL_RESERVED_SPACE(rt->dst.dev); 807 hh_len = LL_RESERVED_SPACE(rt->dst.dev);
@@ -805,7 +810,7 @@ static int __ip_append_data(struct sock *sk, struct sk_buff_head *queue,
805 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen; 810 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
806 811
807 if (cork->length + length > 0xFFFF - fragheaderlen) { 812 if (cork->length + length > 0xFFFF - fragheaderlen) {
808 ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->inet_dport, 813 ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
809 mtu-exthdrlen); 814 mtu-exthdrlen);
810 return -EMSGSIZE; 815 return -EMSGSIZE;
811 } 816 }
@@ -820,12 +825,10 @@ static int __ip_append_data(struct sock *sk, struct sk_buff_head *queue,
820 !exthdrlen) 825 !exthdrlen)
821 csummode = CHECKSUM_PARTIAL; 826 csummode = CHECKSUM_PARTIAL;
822 827
823 skb = skb_peek_tail(queue);
824
825 cork->length += length; 828 cork->length += length;
826 if (((length > mtu) || (skb && skb_is_gso(skb))) && 829 if (((length > mtu) || (skb && skb_is_gso(skb))) &&
827 (sk->sk_protocol == IPPROTO_UDP) && 830 (sk->sk_protocol == IPPROTO_UDP) &&
828 (rt->dst.dev->features & NETIF_F_UFO)) { 831 (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len) {
829 err = ip_ufo_append_data(sk, queue, getfrag, from, length, 832 err = ip_ufo_append_data(sk, queue, getfrag, from, length,
830 hh_len, fragheaderlen, transhdrlen, 833 hh_len, fragheaderlen, transhdrlen,
831 mtu, flags); 834 mtu, flags);
@@ -878,17 +881,16 @@ alloc_new_skb:
878 else 881 else
879 alloclen = fraglen; 882 alloclen = fraglen;
880 883
884 alloclen += exthdrlen;
885
881 /* The last fragment gets additional space at tail. 886 /* The last fragment gets additional space at tail.
882 * Note, with MSG_MORE we overallocate on fragments, 887 * Note, with MSG_MORE we overallocate on fragments,
883 * because we have no idea what fragment will be 888 * because we have no idea what fragment will be
884 * the last. 889 * the last.
885 */ 890 */
886 if (datalen == length + fraggap) { 891 if (datalen == length + fraggap)
887 alloclen += rt->dst.trailer_len; 892 alloclen += rt->dst.trailer_len;
888 /* make sure mtu is not reached */ 893
889 if (datalen > mtu - fragheaderlen - rt->dst.trailer_len)
890 datalen -= ALIGN(rt->dst.trailer_len, 8);
891 }
892 if (transhdrlen) { 894 if (transhdrlen) {
893 skb = sock_alloc_send_skb(sk, 895 skb = sock_alloc_send_skb(sk,
894 alloclen + hh_len + 15, 896 alloclen + hh_len + 15,
@@ -921,11 +923,11 @@ alloc_new_skb:
921 /* 923 /*
922 * Find where to start putting bytes. 924 * Find where to start putting bytes.
923 */ 925 */
924 data = skb_put(skb, fraglen); 926 data = skb_put(skb, fraglen + exthdrlen);
925 skb_set_network_header(skb, exthdrlen); 927 skb_set_network_header(skb, exthdrlen);
926 skb->transport_header = (skb->network_header + 928 skb->transport_header = (skb->network_header +
927 fragheaderlen); 929 fragheaderlen);
928 data += fragheaderlen; 930 data += fragheaderlen + exthdrlen;
929 931
930 if (fraggap) { 932 if (fraggap) {
931 skb->csum = skb_copy_and_csum_bits( 933 skb->csum = skb_copy_and_csum_bits(
@@ -1033,7 +1035,7 @@ static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
1033 struct ipcm_cookie *ipc, struct rtable **rtp) 1035 struct ipcm_cookie *ipc, struct rtable **rtp)
1034{ 1036{
1035 struct inet_sock *inet = inet_sk(sk); 1037 struct inet_sock *inet = inet_sk(sk);
1036 struct ip_options *opt; 1038 struct ip_options_rcu *opt;
1037 struct rtable *rt; 1039 struct rtable *rt;
1038 1040
1039 /* 1041 /*
@@ -1047,7 +1049,7 @@ static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
1047 if (unlikely(cork->opt == NULL)) 1049 if (unlikely(cork->opt == NULL))
1048 return -ENOBUFS; 1050 return -ENOBUFS;
1049 } 1051 }
1050 memcpy(cork->opt, opt, sizeof(struct ip_options) + opt->optlen); 1052 memcpy(cork->opt, &opt->opt, sizeof(struct ip_options) + opt->opt.optlen);
1051 cork->flags |= IPCORK_OPT; 1053 cork->flags |= IPCORK_OPT;
1052 cork->addr = ipc->addr; 1054 cork->addr = ipc->addr;
1053 } 1055 }
@@ -1059,7 +1061,7 @@ static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
1059 */ 1061 */
1060 *rtp = NULL; 1062 *rtp = NULL;
1061 cork->fragsize = inet->pmtudisc == IP_PMTUDISC_PROBE ? 1063 cork->fragsize = inet->pmtudisc == IP_PMTUDISC_PROBE ?
1062 rt->dst.dev->mtu : dst_mtu(rt->dst.path); 1064 rt->dst.dev->mtu : dst_mtu(&rt->dst);
1063 cork->dst = &rt->dst; 1065 cork->dst = &rt->dst;
1064 cork->length = 0; 1066 cork->length = 0;
1065 cork->tx_flags = ipc->tx_flags; 1067 cork->tx_flags = ipc->tx_flags;
@@ -1080,7 +1082,7 @@ static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
1080 * 1082 *
1081 * LATER: length must be adjusted by pad at tail, when it is required. 1083 * LATER: length must be adjusted by pad at tail, when it is required.
1082 */ 1084 */
1083int ip_append_data(struct sock *sk, 1085int ip_append_data(struct sock *sk, struct flowi4 *fl4,
1084 int getfrag(void *from, char *to, int offset, int len, 1086 int getfrag(void *from, char *to, int offset, int len,
1085 int odd, struct sk_buff *skb), 1087 int odd, struct sk_buff *skb),
1086 void *from, int length, int transhdrlen, 1088 void *from, int length, int transhdrlen,
@@ -1094,24 +1096,25 @@ int ip_append_data(struct sock *sk,
1094 return 0; 1096 return 0;
1095 1097
1096 if (skb_queue_empty(&sk->sk_write_queue)) { 1098 if (skb_queue_empty(&sk->sk_write_queue)) {
1097 err = ip_setup_cork(sk, &inet->cork, ipc, rtp); 1099 err = ip_setup_cork(sk, &inet->cork.base, ipc, rtp);
1098 if (err) 1100 if (err)
1099 return err; 1101 return err;
1100 } else { 1102 } else {
1101 transhdrlen = 0; 1103 transhdrlen = 0;
1102 } 1104 }
1103 1105
1104 return __ip_append_data(sk, &sk->sk_write_queue, &inet->cork, getfrag, 1106 return __ip_append_data(sk, fl4, &sk->sk_write_queue, &inet->cork.base, getfrag,
1105 from, length, transhdrlen, flags); 1107 from, length, transhdrlen, flags);
1106} 1108}
1107 1109
1108ssize_t ip_append_page(struct sock *sk, struct page *page, 1110ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
1109 int offset, size_t size, int flags) 1111 int offset, size_t size, int flags)
1110{ 1112{
1111 struct inet_sock *inet = inet_sk(sk); 1113 struct inet_sock *inet = inet_sk(sk);
1112 struct sk_buff *skb; 1114 struct sk_buff *skb;
1113 struct rtable *rt; 1115 struct rtable *rt;
1114 struct ip_options *opt = NULL; 1116 struct ip_options *opt = NULL;
1117 struct inet_cork *cork;
1115 int hh_len; 1118 int hh_len;
1116 int mtu; 1119 int mtu;
1117 int len; 1120 int len;
@@ -1127,28 +1130,29 @@ ssize_t ip_append_page(struct sock *sk, struct page *page,
1127 if (skb_queue_empty(&sk->sk_write_queue)) 1130 if (skb_queue_empty(&sk->sk_write_queue))
1128 return -EINVAL; 1131 return -EINVAL;
1129 1132
1130 rt = (struct rtable *)inet->cork.dst; 1133 cork = &inet->cork.base;
1131 if (inet->cork.flags & IPCORK_OPT) 1134 rt = (struct rtable *)cork->dst;
1132 opt = inet->cork.opt; 1135 if (cork->flags & IPCORK_OPT)
1136 opt = cork->opt;
1133 1137
1134 if (!(rt->dst.dev->features&NETIF_F_SG)) 1138 if (!(rt->dst.dev->features&NETIF_F_SG))
1135 return -EOPNOTSUPP; 1139 return -EOPNOTSUPP;
1136 1140
1137 hh_len = LL_RESERVED_SPACE(rt->dst.dev); 1141 hh_len = LL_RESERVED_SPACE(rt->dst.dev);
1138 mtu = inet->cork.fragsize; 1142 mtu = cork->fragsize;
1139 1143
1140 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0); 1144 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
1141 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen; 1145 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
1142 1146
1143 if (inet->cork.length + size > 0xFFFF - fragheaderlen) { 1147 if (cork->length + size > 0xFFFF - fragheaderlen) {
1144 ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->inet_dport, mtu); 1148 ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport, mtu);
1145 return -EMSGSIZE; 1149 return -EMSGSIZE;
1146 } 1150 }
1147 1151
1148 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) 1152 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
1149 return -EINVAL; 1153 return -EINVAL;
1150 1154
1151 inet->cork.length += size; 1155 cork->length += size;
1152 if ((size + skb->len > mtu) && 1156 if ((size + skb->len > mtu) &&
1153 (sk->sk_protocol == IPPROTO_UDP) && 1157 (sk->sk_protocol == IPPROTO_UDP) &&
1154 (rt->dst.dev->features & NETIF_F_UFO)) { 1158 (rt->dst.dev->features & NETIF_F_UFO)) {
@@ -1243,7 +1247,7 @@ ssize_t ip_append_page(struct sock *sk, struct page *page,
1243 return 0; 1247 return 0;
1244 1248
1245error: 1249error:
1246 inet->cork.length -= size; 1250 cork->length -= size;
1247 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS); 1251 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
1248 return err; 1252 return err;
1249} 1253}
@@ -1262,6 +1266,7 @@ static void ip_cork_release(struct inet_cork *cork)
1262 * and push them out. 1266 * and push them out.
1263 */ 1267 */
1264struct sk_buff *__ip_make_skb(struct sock *sk, 1268struct sk_buff *__ip_make_skb(struct sock *sk,
1269 struct flowi4 *fl4,
1265 struct sk_buff_head *queue, 1270 struct sk_buff_head *queue,
1266 struct inet_cork *cork) 1271 struct inet_cork *cork)
1267{ 1272{
@@ -1319,17 +1324,18 @@ struct sk_buff *__ip_make_skb(struct sock *sk,
1319 iph = (struct iphdr *)skb->data; 1324 iph = (struct iphdr *)skb->data;
1320 iph->version = 4; 1325 iph->version = 4;
1321 iph->ihl = 5; 1326 iph->ihl = 5;
1322 if (opt) {
1323 iph->ihl += opt->optlen>>2;
1324 ip_options_build(skb, opt, cork->addr, rt, 0);
1325 }
1326 iph->tos = inet->tos; 1327 iph->tos = inet->tos;
1327 iph->frag_off = df; 1328 iph->frag_off = df;
1328 ip_select_ident(iph, &rt->dst, sk); 1329 ip_select_ident(iph, &rt->dst, sk);
1329 iph->ttl = ttl; 1330 iph->ttl = ttl;
1330 iph->protocol = sk->sk_protocol; 1331 iph->protocol = sk->sk_protocol;
1331 iph->saddr = rt->rt_src; 1332 iph->saddr = fl4->saddr;
1332 iph->daddr = rt->rt_dst; 1333 iph->daddr = fl4->daddr;
1334
1335 if (opt) {
1336 iph->ihl += opt->optlen>>2;
1337 ip_options_build(skb, opt, cork->addr, rt, 0);
1338 }
1333 1339
1334 skb->priority = sk->sk_priority; 1340 skb->priority = sk->sk_priority;
1335 skb->mark = sk->sk_mark; 1341 skb->mark = sk->sk_mark;
@@ -1365,11 +1371,11 @@ int ip_send_skb(struct sk_buff *skb)
1365 return err; 1371 return err;
1366} 1372}
1367 1373
1368int ip_push_pending_frames(struct sock *sk) 1374int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4)
1369{ 1375{
1370 struct sk_buff *skb; 1376 struct sk_buff *skb;
1371 1377
1372 skb = ip_finish_skb(sk); 1378 skb = ip_finish_skb(sk, fl4);
1373 if (!skb) 1379 if (!skb)
1374 return 0; 1380 return 0;
1375 1381
@@ -1394,17 +1400,18 @@ static void __ip_flush_pending_frames(struct sock *sk,
1394 1400
1395void ip_flush_pending_frames(struct sock *sk) 1401void ip_flush_pending_frames(struct sock *sk)
1396{ 1402{
1397 __ip_flush_pending_frames(sk, &sk->sk_write_queue, &inet_sk(sk)->cork); 1403 __ip_flush_pending_frames(sk, &sk->sk_write_queue, &inet_sk(sk)->cork.base);
1398} 1404}
1399 1405
1400struct sk_buff *ip_make_skb(struct sock *sk, 1406struct sk_buff *ip_make_skb(struct sock *sk,
1407 struct flowi4 *fl4,
1401 int getfrag(void *from, char *to, int offset, 1408 int getfrag(void *from, char *to, int offset,
1402 int len, int odd, struct sk_buff *skb), 1409 int len, int odd, struct sk_buff *skb),
1403 void *from, int length, int transhdrlen, 1410 void *from, int length, int transhdrlen,
1404 struct ipcm_cookie *ipc, struct rtable **rtp, 1411 struct ipcm_cookie *ipc, struct rtable **rtp,
1405 unsigned int flags) 1412 unsigned int flags)
1406{ 1413{
1407 struct inet_cork cork = {}; 1414 struct inet_cork cork;
1408 struct sk_buff_head queue; 1415 struct sk_buff_head queue;
1409 int err; 1416 int err;
1410 1417
@@ -1413,18 +1420,21 @@ struct sk_buff *ip_make_skb(struct sock *sk,
1413 1420
1414 __skb_queue_head_init(&queue); 1421 __skb_queue_head_init(&queue);
1415 1422
1423 cork.flags = 0;
1424 cork.addr = 0;
1425 cork.opt = NULL;
1416 err = ip_setup_cork(sk, &cork, ipc, rtp); 1426 err = ip_setup_cork(sk, &cork, ipc, rtp);
1417 if (err) 1427 if (err)
1418 return ERR_PTR(err); 1428 return ERR_PTR(err);
1419 1429
1420 err = __ip_append_data(sk, &queue, &cork, getfrag, 1430 err = __ip_append_data(sk, fl4, &queue, &cork, getfrag,
1421 from, length, transhdrlen, flags); 1431 from, length, transhdrlen, flags);
1422 if (err) { 1432 if (err) {
1423 __ip_flush_pending_frames(sk, &queue, &cork); 1433 __ip_flush_pending_frames(sk, &queue, &cork);
1424 return ERR_PTR(err); 1434 return ERR_PTR(err);
1425 } 1435 }
1426 1436
1427 return __ip_make_skb(sk, &queue, &cork); 1437 return __ip_make_skb(sk, fl4, &queue, &cork);
1428} 1438}
1429 1439
1430/* 1440/*
@@ -1447,48 +1457,39 @@ static int ip_reply_glue_bits(void *dptr, char *to, int offset,
1447 * Should run single threaded per socket because it uses the sock 1457 * Should run single threaded per socket because it uses the sock
1448 * structure to pass arguments. 1458 * structure to pass arguments.
1449 */ 1459 */
1450void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *arg, 1460void ip_send_reply(struct sock *sk, struct sk_buff *skb, __be32 daddr,
1451 unsigned int len) 1461 struct ip_reply_arg *arg, unsigned int len)
1452{ 1462{
1453 struct inet_sock *inet = inet_sk(sk); 1463 struct inet_sock *inet = inet_sk(sk);
1454 struct { 1464 struct ip_options_data replyopts;
1455 struct ip_options opt;
1456 char data[40];
1457 } replyopts;
1458 struct ipcm_cookie ipc; 1465 struct ipcm_cookie ipc;
1459 __be32 daddr; 1466 struct flowi4 fl4;
1460 struct rtable *rt = skb_rtable(skb); 1467 struct rtable *rt = skb_rtable(skb);
1461 1468
1462 if (ip_options_echo(&replyopts.opt, skb)) 1469 if (ip_options_echo(&replyopts.opt.opt, skb))
1463 return; 1470 return;
1464 1471
1465 daddr = ipc.addr = rt->rt_src; 1472 ipc.addr = daddr;
1466 ipc.opt = NULL; 1473 ipc.opt = NULL;
1467 ipc.tx_flags = 0; 1474 ipc.tx_flags = 0;
1468 1475
1469 if (replyopts.opt.optlen) { 1476 if (replyopts.opt.opt.optlen) {
1470 ipc.opt = &replyopts.opt; 1477 ipc.opt = &replyopts.opt;
1471 1478
1472 if (ipc.opt->srr) 1479 if (replyopts.opt.opt.srr)
1473 daddr = replyopts.opt.faddr; 1480 daddr = replyopts.opt.opt.faddr;
1474 } 1481 }
1475 1482
1476 { 1483 flowi4_init_output(&fl4, arg->bound_dev_if, 0,
1477 struct flowi4 fl4 = { 1484 RT_TOS(ip_hdr(skb)->tos),
1478 .flowi4_oif = arg->bound_dev_if, 1485 RT_SCOPE_UNIVERSE, sk->sk_protocol,
1479 .daddr = daddr, 1486 ip_reply_arg_flowi_flags(arg),
1480 .saddr = rt->rt_spec_dst, 1487 daddr, rt->rt_spec_dst,
1481 .flowi4_tos = RT_TOS(ip_hdr(skb)->tos), 1488 tcp_hdr(skb)->source, tcp_hdr(skb)->dest);
1482 .fl4_sport = tcp_hdr(skb)->dest, 1489 security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
1483 .fl4_dport = tcp_hdr(skb)->source, 1490 rt = ip_route_output_key(sock_net(sk), &fl4);
1484 .flowi4_proto = sk->sk_protocol, 1491 if (IS_ERR(rt))
1485 .flowi4_flags = ip_reply_arg_flowi_flags(arg), 1492 return;
1486 };
1487 security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
1488 rt = ip_route_output_key(sock_net(sk), &fl4);
1489 if (IS_ERR(rt))
1490 return;
1491 }
1492 1493
1493 /* And let IP do all the hard work. 1494 /* And let IP do all the hard work.
1494 1495
@@ -1501,7 +1502,7 @@ void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *ar
1501 sk->sk_priority = skb->priority; 1502 sk->sk_priority = skb->priority;
1502 sk->sk_protocol = ip_hdr(skb)->protocol; 1503 sk->sk_protocol = ip_hdr(skb)->protocol;
1503 sk->sk_bound_dev_if = arg->bound_dev_if; 1504 sk->sk_bound_dev_if = arg->bound_dev_if;
1504 ip_append_data(sk, ip_reply_glue_bits, arg->iov->iov_base, len, 0, 1505 ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base, len, 0,
1505 &ipc, &rt, MSG_DONTWAIT); 1506 &ipc, &rt, MSG_DONTWAIT);
1506 if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) { 1507 if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) {
1507 if (arg->csumoffset >= 0) 1508 if (arg->csumoffset >= 0)
@@ -1509,7 +1510,7 @@ void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *ar
1509 arg->csumoffset) = csum_fold(csum_add(skb->csum, 1510 arg->csumoffset) = csum_fold(csum_add(skb->csum,
1510 arg->csum)); 1511 arg->csum));
1511 skb->ip_summed = CHECKSUM_NONE; 1512 skb->ip_summed = CHECKSUM_NONE;
1512 ip_push_pending_frames(sk); 1513 ip_push_pending_frames(sk, &fl4);
1513 } 1514 }
1514 1515
1515 bh_unlock_sock(sk); 1516 bh_unlock_sock(sk);
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 3948c86e59ca..ab0c9efd1efa 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -131,7 +131,7 @@ static void ip_cmsg_recv_security(struct msghdr *msg, struct sk_buff *skb)
131static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb) 131static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
132{ 132{
133 struct sockaddr_in sin; 133 struct sockaddr_in sin;
134 struct iphdr *iph = ip_hdr(skb); 134 const struct iphdr *iph = ip_hdr(skb);
135 __be16 *ports = (__be16 *)skb_transport_header(skb); 135 __be16 *ports = (__be16 *)skb_transport_header(skb);
136 136
137 if (skb_transport_offset(skb) + 4 > skb->len) 137 if (skb_transport_offset(skb) + 4 > skb->len)
@@ -451,6 +451,11 @@ out:
451} 451}
452 452
453 453
454static void opt_kfree_rcu(struct rcu_head *head)
455{
456 kfree(container_of(head, struct ip_options_rcu, rcu));
457}
458
454/* 459/*
455 * Socket option code for IP. This is the end of the line after any 460 * Socket option code for IP. This is the end of the line after any
456 * TCP,UDP etc options on an IP socket. 461 * TCP,UDP etc options on an IP socket.
@@ -497,13 +502,16 @@ static int do_ip_setsockopt(struct sock *sk, int level,
497 switch (optname) { 502 switch (optname) {
498 case IP_OPTIONS: 503 case IP_OPTIONS:
499 { 504 {
500 struct ip_options *opt = NULL; 505 struct ip_options_rcu *old, *opt = NULL;
506
501 if (optlen > 40) 507 if (optlen > 40)
502 goto e_inval; 508 goto e_inval;
503 err = ip_options_get_from_user(sock_net(sk), &opt, 509 err = ip_options_get_from_user(sock_net(sk), &opt,
504 optval, optlen); 510 optval, optlen);
505 if (err) 511 if (err)
506 break; 512 break;
513 old = rcu_dereference_protected(inet->inet_opt,
514 sock_owned_by_user(sk));
507 if (inet->is_icsk) { 515 if (inet->is_icsk) {
508 struct inet_connection_sock *icsk = inet_csk(sk); 516 struct inet_connection_sock *icsk = inet_csk(sk);
509#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 517#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
@@ -512,17 +520,18 @@ static int do_ip_setsockopt(struct sock *sk, int level,
512 (TCPF_LISTEN | TCPF_CLOSE)) && 520 (TCPF_LISTEN | TCPF_CLOSE)) &&
513 inet->inet_daddr != LOOPBACK4_IPV6)) { 521 inet->inet_daddr != LOOPBACK4_IPV6)) {
514#endif 522#endif
515 if (inet->opt) 523 if (old)
516 icsk->icsk_ext_hdr_len -= inet->opt->optlen; 524 icsk->icsk_ext_hdr_len -= old->opt.optlen;
517 if (opt) 525 if (opt)
518 icsk->icsk_ext_hdr_len += opt->optlen; 526 icsk->icsk_ext_hdr_len += opt->opt.optlen;
519 icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie); 527 icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie);
520#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 528#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
521 } 529 }
522#endif 530#endif
523 } 531 }
524 opt = xchg(&inet->opt, opt); 532 rcu_assign_pointer(inet->inet_opt, opt);
525 kfree(opt); 533 if (old)
534 call_rcu(&old->rcu, opt_kfree_rcu);
526 break; 535 break;
527 } 536 }
528 case IP_PKTINFO: 537 case IP_PKTINFO:
@@ -1081,12 +1090,16 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
1081 case IP_OPTIONS: 1090 case IP_OPTIONS:
1082 { 1091 {
1083 unsigned char optbuf[sizeof(struct ip_options)+40]; 1092 unsigned char optbuf[sizeof(struct ip_options)+40];
1084 struct ip_options * opt = (struct ip_options *)optbuf; 1093 struct ip_options *opt = (struct ip_options *)optbuf;
1094 struct ip_options_rcu *inet_opt;
1095
1096 inet_opt = rcu_dereference_protected(inet->inet_opt,
1097 sock_owned_by_user(sk));
1085 opt->optlen = 0; 1098 opt->optlen = 0;
1086 if (inet->opt) 1099 if (inet_opt)
1087 memcpy(optbuf, inet->opt, 1100 memcpy(optbuf, &inet_opt->opt,
1088 sizeof(struct ip_options)+ 1101 sizeof(struct ip_options) +
1089 inet->opt->optlen); 1102 inet_opt->opt.optlen);
1090 release_sock(sk); 1103 release_sock(sk);
1091 1104
1092 if (opt->optlen == 0) 1105 if (opt->optlen == 0)
diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
index 629067571f02..c857f6f49b03 100644
--- a/net/ipv4/ipcomp.c
+++ b/net/ipv4/ipcomp.c
@@ -27,7 +27,7 @@ static void ipcomp4_err(struct sk_buff *skb, u32 info)
27{ 27{
28 struct net *net = dev_net(skb->dev); 28 struct net *net = dev_net(skb->dev);
29 __be32 spi; 29 __be32 spi;
30 struct iphdr *iph = (struct iphdr *)skb->data; 30 const struct iphdr *iph = (const struct iphdr *)skb->data;
31 struct ip_comp_hdr *ipch = (struct ip_comp_hdr *)(skb->data+(iph->ihl<<2)); 31 struct ip_comp_hdr *ipch = (struct ip_comp_hdr *)(skb->data+(iph->ihl<<2));
32 struct xfrm_state *x; 32 struct xfrm_state *x;
33 33
@@ -36,7 +36,7 @@ static void ipcomp4_err(struct sk_buff *skb, u32 info)
36 return; 36 return;
37 37
38 spi = htonl(ntohs(ipch->cpi)); 38 spi = htonl(ntohs(ipch->cpi));
39 x = xfrm_state_lookup(net, skb->mark, (xfrm_address_t *)&iph->daddr, 39 x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
40 spi, IPPROTO_COMP, AF_INET); 40 spi, IPPROTO_COMP, AF_INET);
41 if (!x) 41 if (!x)
42 return; 42 return;
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index cbff2ecccf3d..ab7e5542c1cf 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -87,8 +87,8 @@
87#endif 87#endif
88 88
89/* Define the friendly delay before and after opening net devices */ 89/* Define the friendly delay before and after opening net devices */
90#define CONF_PRE_OPEN 500 /* Before opening: 1/2 second */ 90#define CONF_POST_OPEN 10 /* After opening: 10 msecs */
91#define CONF_POST_OPEN 1 /* After opening: 1 second */ 91#define CONF_CARRIER_TIMEOUT 120000 /* Wait for carrier timeout */
92 92
93/* Define the timeout for waiting for a DHCP/BOOTP/RARP reply */ 93/* Define the timeout for waiting for a DHCP/BOOTP/RARP reply */
94#define CONF_OPEN_RETRIES 2 /* (Re)open devices twice */ 94#define CONF_OPEN_RETRIES 2 /* (Re)open devices twice */
@@ -188,14 +188,14 @@ struct ic_device {
188static struct ic_device *ic_first_dev __initdata = NULL;/* List of open device */ 188static struct ic_device *ic_first_dev __initdata = NULL;/* List of open device */
189static struct net_device *ic_dev __initdata = NULL; /* Selected device */ 189static struct net_device *ic_dev __initdata = NULL; /* Selected device */
190 190
191static bool __init ic_device_match(struct net_device *dev) 191static bool __init ic_is_init_dev(struct net_device *dev)
192{ 192{
193 if (user_dev_name[0] ? !strcmp(dev->name, user_dev_name) : 193 if (dev->flags & IFF_LOOPBACK)
194 return false;
195 return user_dev_name[0] ? !strcmp(dev->name, user_dev_name) :
194 (!(dev->flags & IFF_LOOPBACK) && 196 (!(dev->flags & IFF_LOOPBACK) &&
195 (dev->flags & (IFF_POINTOPOINT|IFF_BROADCAST)) && 197 (dev->flags & (IFF_POINTOPOINT|IFF_BROADCAST)) &&
196 strncmp(dev->name, "dummy", 5))) 198 strncmp(dev->name, "dummy", 5));
197 return true;
198 return false;
199} 199}
200 200
201static int __init ic_open_devs(void) 201static int __init ic_open_devs(void)
@@ -203,6 +203,7 @@ static int __init ic_open_devs(void)
203 struct ic_device *d, **last; 203 struct ic_device *d, **last;
204 struct net_device *dev; 204 struct net_device *dev;
205 unsigned short oflags; 205 unsigned short oflags;
206 unsigned long start;
206 207
207 last = &ic_first_dev; 208 last = &ic_first_dev;
208 rtnl_lock(); 209 rtnl_lock();
@@ -216,9 +217,7 @@ static int __init ic_open_devs(void)
216 } 217 }
217 218
218 for_each_netdev(&init_net, dev) { 219 for_each_netdev(&init_net, dev) {
219 if (dev->flags & IFF_LOOPBACK) 220 if (ic_is_init_dev(dev)) {
220 continue;
221 if (ic_device_match(dev)) {
222 int able = 0; 221 int able = 0;
223 if (dev->mtu >= 364) 222 if (dev->mtu >= 364)
224 able |= IC_BOOTP; 223 able |= IC_BOOTP;
@@ -252,6 +251,17 @@ static int __init ic_open_devs(void)
252 dev->name, able, d->xid)); 251 dev->name, able, d->xid));
253 } 252 }
254 } 253 }
254
255 /* wait for a carrier on at least one device */
256 start = jiffies;
257 while (jiffies - start < msecs_to_jiffies(CONF_CARRIER_TIMEOUT)) {
258 for_each_netdev(&init_net, dev)
259 if (ic_is_init_dev(dev) && netif_carrier_ok(dev))
260 goto have_carrier;
261
262 msleep(1);
263 }
264have_carrier:
255 rtnl_unlock(); 265 rtnl_unlock();
256 266
257 *last = NULL; 267 *last = NULL;
@@ -1324,14 +1334,13 @@ static int __init wait_for_devices(void)
1324{ 1334{
1325 int i; 1335 int i;
1326 1336
1327 msleep(CONF_PRE_OPEN);
1328 for (i = 0; i < DEVICE_WAIT_MAX; i++) { 1337 for (i = 0; i < DEVICE_WAIT_MAX; i++) {
1329 struct net_device *dev; 1338 struct net_device *dev;
1330 int found = 0; 1339 int found = 0;
1331 1340
1332 rtnl_lock(); 1341 rtnl_lock();
1333 for_each_netdev(&init_net, dev) { 1342 for_each_netdev(&init_net, dev) {
1334 if (ic_device_match(dev)) { 1343 if (ic_is_init_dev(dev)) {
1335 found = 1; 1344 found = 1;
1336 break; 1345 break;
1337 } 1346 }
@@ -1378,7 +1387,7 @@ static int __init ip_auto_config(void)
1378 return err; 1387 return err;
1379 1388
1380 /* Give drivers a chance to settle */ 1389 /* Give drivers a chance to settle */
1381 ssleep(CONF_POST_OPEN); 1390 msleep(CONF_POST_OPEN);
1382 1391
1383 /* 1392 /*
1384 * If the config information is insufficient (e.g., our IP address or 1393 * If the config information is insufficient (e.g., our IP address or
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index bfc17c5914e7..378b20b7ca6e 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -276,11 +276,6 @@ static struct ip_tunnel * ipip_tunnel_locate(struct net *net,
276 276
277 dev_net_set(dev, net); 277 dev_net_set(dev, net);
278 278
279 if (strchr(name, '%')) {
280 if (dev_alloc_name(dev, name) < 0)
281 goto failed_free;
282 }
283
284 nt = netdev_priv(dev); 279 nt = netdev_priv(dev);
285 nt->parms = *parms; 280 nt->parms = *parms;
286 281
@@ -319,7 +314,7 @@ static int ipip_err(struct sk_buff *skb, u32 info)
319 8 bytes of packet payload. It means, that precise relaying of 314 8 bytes of packet payload. It means, that precise relaying of
320 ICMP in the real Internet is absolutely infeasible. 315 ICMP in the real Internet is absolutely infeasible.
321 */ 316 */
322 struct iphdr *iph = (struct iphdr *)skb->data; 317 const struct iphdr *iph = (const struct iphdr *)skb->data;
323 const int type = icmp_hdr(skb)->type; 318 const int type = icmp_hdr(skb)->type;
324 const int code = icmp_hdr(skb)->code; 319 const int code = icmp_hdr(skb)->code;
325 struct ip_tunnel *t; 320 struct ip_tunnel *t;
@@ -433,15 +428,16 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
433{ 428{
434 struct ip_tunnel *tunnel = netdev_priv(dev); 429 struct ip_tunnel *tunnel = netdev_priv(dev);
435 struct pcpu_tstats *tstats; 430 struct pcpu_tstats *tstats;
436 struct iphdr *tiph = &tunnel->parms.iph; 431 const struct iphdr *tiph = &tunnel->parms.iph;
437 u8 tos = tunnel->parms.iph.tos; 432 u8 tos = tunnel->parms.iph.tos;
438 __be16 df = tiph->frag_off; 433 __be16 df = tiph->frag_off;
439 struct rtable *rt; /* Route to the other host */ 434 struct rtable *rt; /* Route to the other host */
440 struct net_device *tdev; /* Device to other host */ 435 struct net_device *tdev; /* Device to other host */
441 struct iphdr *old_iph = ip_hdr(skb); 436 const struct iphdr *old_iph = ip_hdr(skb);
442 struct iphdr *iph; /* Our new IP header */ 437 struct iphdr *iph; /* Our new IP header */
443 unsigned int max_headroom; /* The extra header space needed */ 438 unsigned int max_headroom; /* The extra header space needed */
444 __be32 dst = tiph->daddr; 439 __be32 dst = tiph->daddr;
440 struct flowi4 fl4;
445 int mtu; 441 int mtu;
446 442
447 if (skb->protocol != htons(ETH_P_IP)) 443 if (skb->protocol != htons(ETH_P_IP))
@@ -460,7 +456,7 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
460 goto tx_error_icmp; 456 goto tx_error_icmp;
461 } 457 }
462 458
463 rt = ip_route_output_ports(dev_net(dev), NULL, 459 rt = ip_route_output_ports(dev_net(dev), &fl4, NULL,
464 dst, tiph->saddr, 460 dst, tiph->saddr,
465 0, 0, 461 0, 0,
466 IPPROTO_IPIP, RT_TOS(tos), 462 IPPROTO_IPIP, RT_TOS(tos),
@@ -549,8 +545,8 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
549 iph->frag_off = df; 545 iph->frag_off = df;
550 iph->protocol = IPPROTO_IPIP; 546 iph->protocol = IPPROTO_IPIP;
551 iph->tos = INET_ECN_encapsulate(tos, old_iph->tos); 547 iph->tos = INET_ECN_encapsulate(tos, old_iph->tos);
552 iph->daddr = rt->rt_dst; 548 iph->daddr = fl4.daddr;
553 iph->saddr = rt->rt_src; 549 iph->saddr = fl4.saddr;
554 550
555 if ((iph->ttl = tiph->ttl) == 0) 551 if ((iph->ttl = tiph->ttl) == 0)
556 iph->ttl = old_iph->ttl; 552 iph->ttl = old_iph->ttl;
@@ -572,19 +568,21 @@ static void ipip_tunnel_bind_dev(struct net_device *dev)
572{ 568{
573 struct net_device *tdev = NULL; 569 struct net_device *tdev = NULL;
574 struct ip_tunnel *tunnel; 570 struct ip_tunnel *tunnel;
575 struct iphdr *iph; 571 const struct iphdr *iph;
576 572
577 tunnel = netdev_priv(dev); 573 tunnel = netdev_priv(dev);
578 iph = &tunnel->parms.iph; 574 iph = &tunnel->parms.iph;
579 575
580 if (iph->daddr) { 576 if (iph->daddr) {
581 struct rtable *rt = ip_route_output_ports(dev_net(dev), NULL, 577 struct rtable *rt;
582 iph->daddr, iph->saddr, 578 struct flowi4 fl4;
583 0, 0, 579
584 IPPROTO_IPIP, 580 rt = ip_route_output_ports(dev_net(dev), &fl4, NULL,
585 RT_TOS(iph->tos), 581 iph->daddr, iph->saddr,
586 tunnel->parms.link); 582 0, 0,
587 583 IPPROTO_IPIP,
584 RT_TOS(iph->tos),
585 tunnel->parms.link);
588 if (!IS_ERR(rt)) { 586 if (!IS_ERR(rt)) {
589 tdev = rt->dst.dev; 587 tdev = rt->dst.dev;
590 ip_rt_put(rt); 588 ip_rt_put(rt);
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 1f62eaeb6de4..30a7763c400e 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -1549,7 +1549,7 @@ static struct notifier_block ip_mr_notifier = {
1549static void ip_encap(struct sk_buff *skb, __be32 saddr, __be32 daddr) 1549static void ip_encap(struct sk_buff *skb, __be32 saddr, __be32 daddr)
1550{ 1550{
1551 struct iphdr *iph; 1551 struct iphdr *iph;
1552 struct iphdr *old_iph = ip_hdr(skb); 1552 const struct iphdr *old_iph = ip_hdr(skb);
1553 1553
1554 skb_push(skb, sizeof(struct iphdr)); 1554 skb_push(skb, sizeof(struct iphdr));
1555 skb->transport_header = skb->network_header; 1555 skb->transport_header = skb->network_header;
@@ -1595,6 +1595,7 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
1595 struct vif_device *vif = &mrt->vif_table[vifi]; 1595 struct vif_device *vif = &mrt->vif_table[vifi];
1596 struct net_device *dev; 1596 struct net_device *dev;
1597 struct rtable *rt; 1597 struct rtable *rt;
1598 struct flowi4 fl4;
1598 int encap = 0; 1599 int encap = 0;
1599 1600
1600 if (vif->dev == NULL) 1601 if (vif->dev == NULL)
@@ -1612,7 +1613,7 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
1612#endif 1613#endif
1613 1614
1614 if (vif->flags & VIFF_TUNNEL) { 1615 if (vif->flags & VIFF_TUNNEL) {
1615 rt = ip_route_output_ports(net, NULL, 1616 rt = ip_route_output_ports(net, &fl4, NULL,
1616 vif->remote, vif->local, 1617 vif->remote, vif->local,
1617 0, 0, 1618 0, 0,
1618 IPPROTO_IPIP, 1619 IPPROTO_IPIP,
@@ -1621,7 +1622,7 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
1621 goto out_free; 1622 goto out_free;
1622 encap = sizeof(struct iphdr); 1623 encap = sizeof(struct iphdr);
1623 } else { 1624 } else {
1624 rt = ip_route_output_ports(net, NULL, iph->daddr, 0, 1625 rt = ip_route_output_ports(net, &fl4, NULL, iph->daddr, 0,
1625 0, 0, 1626 0, 0,
1626 IPPROTO_IPIP, 1627 IPPROTO_IPIP,
1627 RT_TOS(iph->tos), vif->link); 1628 RT_TOS(iph->tos), vif->link);
@@ -1788,12 +1789,14 @@ dont_forward:
1788 return 0; 1789 return 0;
1789} 1790}
1790 1791
1791static struct mr_table *ipmr_rt_fib_lookup(struct net *net, struct rtable *rt) 1792static struct mr_table *ipmr_rt_fib_lookup(struct net *net, struct sk_buff *skb)
1792{ 1793{
1794 struct rtable *rt = skb_rtable(skb);
1795 struct iphdr *iph = ip_hdr(skb);
1793 struct flowi4 fl4 = { 1796 struct flowi4 fl4 = {
1794 .daddr = rt->rt_key_dst, 1797 .daddr = iph->daddr,
1795 .saddr = rt->rt_key_src, 1798 .saddr = iph->saddr,
1796 .flowi4_tos = rt->rt_tos, 1799 .flowi4_tos = iph->tos,
1797 .flowi4_oif = rt->rt_oif, 1800 .flowi4_oif = rt->rt_oif,
1798 .flowi4_iif = rt->rt_iif, 1801 .flowi4_iif = rt->rt_iif,
1799 .flowi4_mark = rt->rt_mark, 1802 .flowi4_mark = rt->rt_mark,
@@ -1825,7 +1828,7 @@ int ip_mr_input(struct sk_buff *skb)
1825 if (IPCB(skb)->flags & IPSKB_FORWARDED) 1828 if (IPCB(skb)->flags & IPSKB_FORWARDED)
1826 goto dont_forward; 1829 goto dont_forward;
1827 1830
1828 mrt = ipmr_rt_fib_lookup(net, skb_rtable(skb)); 1831 mrt = ipmr_rt_fib_lookup(net, skb);
1829 if (IS_ERR(mrt)) { 1832 if (IS_ERR(mrt)) {
1830 kfree_skb(skb); 1833 kfree_skb(skb);
1831 return PTR_ERR(mrt); 1834 return PTR_ERR(mrt);
@@ -1957,7 +1960,7 @@ int pim_rcv_v1(struct sk_buff *skb)
1957 1960
1958 pim = igmp_hdr(skb); 1961 pim = igmp_hdr(skb);
1959 1962
1960 mrt = ipmr_rt_fib_lookup(net, skb_rtable(skb)); 1963 mrt = ipmr_rt_fib_lookup(net, skb);
1961 if (IS_ERR(mrt)) 1964 if (IS_ERR(mrt))
1962 goto drop; 1965 goto drop;
1963 if (!mrt->mroute_do_pim || 1966 if (!mrt->mroute_do_pim ||
@@ -1989,7 +1992,7 @@ static int pim_rcv(struct sk_buff *skb)
1989 csum_fold(skb_checksum(skb, 0, skb->len, 0)))) 1992 csum_fold(skb_checksum(skb, 0, skb->len, 0))))
1990 goto drop; 1993 goto drop;
1991 1994
1992 mrt = ipmr_rt_fib_lookup(net, skb_rtable(skb)); 1995 mrt = ipmr_rt_fib_lookup(net, skb);
1993 if (IS_ERR(mrt)) 1996 if (IS_ERR(mrt))
1994 goto drop; 1997 goto drop;
1995 if (__pim_rcv(mrt, skb, sizeof(*pim))) { 1998 if (__pim_rcv(mrt, skb, sizeof(*pim))) {
@@ -2038,20 +2041,20 @@ rtattr_failure:
2038 return -EMSGSIZE; 2041 return -EMSGSIZE;
2039} 2042}
2040 2043
2041int ipmr_get_route(struct net *net, 2044int ipmr_get_route(struct net *net, struct sk_buff *skb,
2042 struct sk_buff *skb, struct rtmsg *rtm, int nowait) 2045 __be32 saddr, __be32 daddr,
2046 struct rtmsg *rtm, int nowait)
2043{ 2047{
2044 int err;
2045 struct mr_table *mrt;
2046 struct mfc_cache *cache; 2048 struct mfc_cache *cache;
2047 struct rtable *rt = skb_rtable(skb); 2049 struct mr_table *mrt;
2050 int err;
2048 2051
2049 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT); 2052 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
2050 if (mrt == NULL) 2053 if (mrt == NULL)
2051 return -ENOENT; 2054 return -ENOENT;
2052 2055
2053 rcu_read_lock(); 2056 rcu_read_lock();
2054 cache = ipmr_cache_find(mrt, rt->rt_src, rt->rt_dst); 2057 cache = ipmr_cache_find(mrt, saddr, daddr);
2055 2058
2056 if (cache == NULL) { 2059 if (cache == NULL) {
2057 struct sk_buff *skb2; 2060 struct sk_buff *skb2;
@@ -2084,8 +2087,8 @@ int ipmr_get_route(struct net *net,
2084 skb_reset_network_header(skb2); 2087 skb_reset_network_header(skb2);
2085 iph = ip_hdr(skb2); 2088 iph = ip_hdr(skb2);
2086 iph->ihl = sizeof(struct iphdr) >> 2; 2089 iph->ihl = sizeof(struct iphdr) >> 2;
2087 iph->saddr = rt->rt_src; 2090 iph->saddr = saddr;
2088 iph->daddr = rt->rt_dst; 2091 iph->daddr = daddr;
2089 iph->version = 0; 2092 iph->version = 0;
2090 err = ipmr_cache_unresolved(mrt, vif, skb2); 2093 err = ipmr_cache_unresolved(mrt, vif, skb2);
2091 read_unlock(&mrt_lock); 2094 read_unlock(&mrt_lock);
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c
index 4614babdc45f..2e97e3ec1eb7 100644
--- a/net/ipv4/netfilter.c
+++ b/net/ipv4/netfilter.c
@@ -17,51 +17,35 @@ int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type)
17 const struct iphdr *iph = ip_hdr(skb); 17 const struct iphdr *iph = ip_hdr(skb);
18 struct rtable *rt; 18 struct rtable *rt;
19 struct flowi4 fl4 = {}; 19 struct flowi4 fl4 = {};
20 unsigned long orefdst; 20 __be32 saddr = iph->saddr;
21 __u8 flags = 0;
21 unsigned int hh_len; 22 unsigned int hh_len;
22 unsigned int type;
23 23
24 type = inet_addr_type(net, iph->saddr); 24 if (!skb->sk && addr_type != RTN_LOCAL) {
25 if (skb->sk && inet_sk(skb->sk)->transparent) 25 if (addr_type == RTN_UNSPEC)
26 type = RTN_LOCAL; 26 addr_type = inet_addr_type(net, saddr);
27 if (addr_type == RTN_UNSPEC) 27 if (addr_type == RTN_LOCAL || addr_type == RTN_UNICAST)
28 addr_type = type; 28 flags |= FLOWI_FLAG_ANYSRC;
29 else
30 saddr = 0;
31 }
29 32
30 /* some non-standard hacks like ipt_REJECT.c:send_reset() can cause 33 /* some non-standard hacks like ipt_REJECT.c:send_reset() can cause
31 * packets with foreign saddr to appear on the NF_INET_LOCAL_OUT hook. 34 * packets with foreign saddr to appear on the NF_INET_LOCAL_OUT hook.
32 */ 35 */
33 if (addr_type == RTN_LOCAL) { 36 fl4.daddr = iph->daddr;
34 fl4.daddr = iph->daddr; 37 fl4.saddr = saddr;
35 if (type == RTN_LOCAL) 38 fl4.flowi4_tos = RT_TOS(iph->tos);
36 fl4.saddr = iph->saddr; 39 fl4.flowi4_oif = skb->sk ? skb->sk->sk_bound_dev_if : 0;
37 fl4.flowi4_tos = RT_TOS(iph->tos); 40 fl4.flowi4_mark = skb->mark;
38 fl4.flowi4_oif = skb->sk ? skb->sk->sk_bound_dev_if : 0; 41 fl4.flowi4_flags = skb->sk ? inet_sk_flowi_flags(skb->sk) : flags;
39 fl4.flowi4_mark = skb->mark; 42 rt = ip_route_output_key(net, &fl4);
40 fl4.flowi4_flags = skb->sk ? inet_sk_flowi_flags(skb->sk) : 0; 43 if (IS_ERR(rt))
41 rt = ip_route_output_key(net, &fl4); 44 return -1;
42 if (IS_ERR(rt))
43 return -1;
44
45 /* Drop old route. */
46 skb_dst_drop(skb);
47 skb_dst_set(skb, &rt->dst);
48 } else {
49 /* non-local src, find valid iif to satisfy
50 * rp-filter when calling ip_route_input. */
51 fl4.daddr = iph->saddr;
52 rt = ip_route_output_key(net, &fl4);
53 if (IS_ERR(rt))
54 return -1;
55 45
56 orefdst = skb->_skb_refdst; 46 /* Drop old route. */
57 if (ip_route_input(skb, iph->daddr, iph->saddr, 47 skb_dst_drop(skb);
58 RT_TOS(iph->tos), rt->dst.dev) != 0) { 48 skb_dst_set(skb, &rt->dst);
59 dst_release(&rt->dst);
60 return -1;
61 }
62 dst_release(&rt->dst);
63 refdst_drop(orefdst);
64 }
65 49
66 if (skb_dst(skb)->error) 50 if (skb_dst(skb)->error)
67 return -1; 51 return -1;
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 89bc7e66d598..fd7a3f68917f 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -260,6 +260,7 @@ unsigned int arpt_do_table(struct sk_buff *skb,
260 void *table_base; 260 void *table_base;
261 const struct xt_table_info *private; 261 const struct xt_table_info *private;
262 struct xt_action_param acpar; 262 struct xt_action_param acpar;
263 unsigned int addend;
263 264
264 if (!pskb_may_pull(skb, arp_hdr_len(skb->dev))) 265 if (!pskb_may_pull(skb, arp_hdr_len(skb->dev)))
265 return NF_DROP; 266 return NF_DROP;
@@ -267,7 +268,8 @@ unsigned int arpt_do_table(struct sk_buff *skb,
267 indev = in ? in->name : nulldevname; 268 indev = in ? in->name : nulldevname;
268 outdev = out ? out->name : nulldevname; 269 outdev = out ? out->name : nulldevname;
269 270
270 xt_info_rdlock_bh(); 271 local_bh_disable();
272 addend = xt_write_recseq_begin();
271 private = table->private; 273 private = table->private;
272 table_base = private->entries[smp_processor_id()]; 274 table_base = private->entries[smp_processor_id()];
273 275
@@ -338,7 +340,8 @@ unsigned int arpt_do_table(struct sk_buff *skb,
338 /* Verdict */ 340 /* Verdict */
339 break; 341 break;
340 } while (!acpar.hotdrop); 342 } while (!acpar.hotdrop);
341 xt_info_rdunlock_bh(); 343 xt_write_recseq_end(addend);
344 local_bh_enable();
342 345
343 if (acpar.hotdrop) 346 if (acpar.hotdrop)
344 return NF_DROP; 347 return NF_DROP;
@@ -712,7 +715,7 @@ static void get_counters(const struct xt_table_info *t,
712 unsigned int i; 715 unsigned int i;
713 716
714 for_each_possible_cpu(cpu) { 717 for_each_possible_cpu(cpu) {
715 seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock; 718 seqcount_t *s = &per_cpu(xt_recseq, cpu);
716 719
717 i = 0; 720 i = 0;
718 xt_entry_foreach(iter, t->entries[cpu], t->size) { 721 xt_entry_foreach(iter, t->entries[cpu], t->size) {
@@ -720,10 +723,10 @@ static void get_counters(const struct xt_table_info *t,
720 unsigned int start; 723 unsigned int start;
721 724
722 do { 725 do {
723 start = read_seqbegin(lock); 726 start = read_seqcount_begin(s);
724 bcnt = iter->counters.bcnt; 727 bcnt = iter->counters.bcnt;
725 pcnt = iter->counters.pcnt; 728 pcnt = iter->counters.pcnt;
726 } while (read_seqretry(lock, start)); 729 } while (read_seqcount_retry(s, start));
727 730
728 ADD_COUNTER(counters[i], bcnt, pcnt); 731 ADD_COUNTER(counters[i], bcnt, pcnt);
729 ++i; 732 ++i;
@@ -1115,6 +1118,7 @@ static int do_add_counters(struct net *net, const void __user *user,
1115 int ret = 0; 1118 int ret = 0;
1116 void *loc_cpu_entry; 1119 void *loc_cpu_entry;
1117 struct arpt_entry *iter; 1120 struct arpt_entry *iter;
1121 unsigned int addend;
1118#ifdef CONFIG_COMPAT 1122#ifdef CONFIG_COMPAT
1119 struct compat_xt_counters_info compat_tmp; 1123 struct compat_xt_counters_info compat_tmp;
1120 1124
@@ -1171,12 +1175,12 @@ static int do_add_counters(struct net *net, const void __user *user,
1171 /* Choose the copy that is on our node */ 1175 /* Choose the copy that is on our node */
1172 curcpu = smp_processor_id(); 1176 curcpu = smp_processor_id();
1173 loc_cpu_entry = private->entries[curcpu]; 1177 loc_cpu_entry = private->entries[curcpu];
1174 xt_info_wrlock(curcpu); 1178 addend = xt_write_recseq_begin();
1175 xt_entry_foreach(iter, loc_cpu_entry, private->size) { 1179 xt_entry_foreach(iter, loc_cpu_entry, private->size) {
1176 ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt); 1180 ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt);
1177 ++i; 1181 ++i;
1178 } 1182 }
1179 xt_info_wrunlock(curcpu); 1183 xt_write_recseq_end(addend);
1180 unlock_up_free: 1184 unlock_up_free:
1181 local_bh_enable(); 1185 local_bh_enable();
1182 xt_table_unlock(t); 1186 xt_table_unlock(t);
diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c
index d2c1311cb28d..5c9b9d963918 100644
--- a/net/ipv4/netfilter/ip_queue.c
+++ b/net/ipv4/netfilter/ip_queue.c
@@ -203,7 +203,8 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)
203 else 203 else
204 pmsg->outdev_name[0] = '\0'; 204 pmsg->outdev_name[0] = '\0';
205 205
206 if (entry->indev && entry->skb->dev) { 206 if (entry->indev && entry->skb->dev &&
207 entry->skb->mac_header != entry->skb->network_header) {
207 pmsg->hw_type = entry->skb->dev->type; 208 pmsg->hw_type = entry->skb->dev->type;
208 pmsg->hw_addrlen = dev_parse_header(entry->skb, 209 pmsg->hw_addrlen = dev_parse_header(entry->skb,
209 pmsg->hw_addr); 210 pmsg->hw_addr);
@@ -402,7 +403,8 @@ ipq_dev_drop(int ifindex)
402static inline void 403static inline void
403__ipq_rcv_skb(struct sk_buff *skb) 404__ipq_rcv_skb(struct sk_buff *skb)
404{ 405{
405 int status, type, pid, flags, nlmsglen, skblen; 406 int status, type, pid, flags;
407 unsigned int nlmsglen, skblen;
406 struct nlmsghdr *nlh; 408 struct nlmsghdr *nlh;
407 409
408 skblen = skb->len; 410 skblen = skb->len;
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 704915028009..24e556e83a3b 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -68,15 +68,6 @@ void *ipt_alloc_initial_table(const struct xt_table *info)
68} 68}
69EXPORT_SYMBOL_GPL(ipt_alloc_initial_table); 69EXPORT_SYMBOL_GPL(ipt_alloc_initial_table);
70 70
71/*
72 We keep a set of rules for each CPU, so we can avoid write-locking
73 them in the softirq when updating the counters and therefore
74 only need to read-lock in the softirq; doing a write_lock_bh() in user
75 context stops packets coming through and allows user context to read
76 the counters or update the rules.
77
78 Hence the start of any table is given by get_table() below. */
79
80/* Returns whether matches rule or not. */ 71/* Returns whether matches rule or not. */
81/* Performance critical - called for every packet */ 72/* Performance critical - called for every packet */
82static inline bool 73static inline bool
@@ -311,6 +302,7 @@ ipt_do_table(struct sk_buff *skb,
311 unsigned int *stackptr, origptr, cpu; 302 unsigned int *stackptr, origptr, cpu;
312 const struct xt_table_info *private; 303 const struct xt_table_info *private;
313 struct xt_action_param acpar; 304 struct xt_action_param acpar;
305 unsigned int addend;
314 306
315 /* Initialization */ 307 /* Initialization */
316 ip = ip_hdr(skb); 308 ip = ip_hdr(skb);
@@ -331,7 +323,8 @@ ipt_do_table(struct sk_buff *skb,
331 acpar.hooknum = hook; 323 acpar.hooknum = hook;
332 324
333 IP_NF_ASSERT(table->valid_hooks & (1 << hook)); 325 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
334 xt_info_rdlock_bh(); 326 local_bh_disable();
327 addend = xt_write_recseq_begin();
335 private = table->private; 328 private = table->private;
336 cpu = smp_processor_id(); 329 cpu = smp_processor_id();
337 table_base = private->entries[cpu]; 330 table_base = private->entries[cpu];
@@ -430,7 +423,9 @@ ipt_do_table(struct sk_buff *skb,
430 pr_debug("Exiting %s; resetting sp from %u to %u\n", 423 pr_debug("Exiting %s; resetting sp from %u to %u\n",
431 __func__, *stackptr, origptr); 424 __func__, *stackptr, origptr);
432 *stackptr = origptr; 425 *stackptr = origptr;
433 xt_info_rdunlock_bh(); 426 xt_write_recseq_end(addend);
427 local_bh_enable();
428
434#ifdef DEBUG_ALLOW_ALL 429#ifdef DEBUG_ALLOW_ALL
435 return NF_ACCEPT; 430 return NF_ACCEPT;
436#else 431#else
@@ -571,7 +566,7 @@ check_entry(const struct ipt_entry *e, const char *name)
571 const struct xt_entry_target *t; 566 const struct xt_entry_target *t;
572 567
573 if (!ip_checkentry(&e->ip)) { 568 if (!ip_checkentry(&e->ip)) {
574 duprintf("ip check failed %p %s.\n", e, par->match->name); 569 duprintf("ip check failed %p %s.\n", e, name);
575 return -EINVAL; 570 return -EINVAL;
576 } 571 }
577 572
@@ -886,7 +881,7 @@ get_counters(const struct xt_table_info *t,
886 unsigned int i; 881 unsigned int i;
887 882
888 for_each_possible_cpu(cpu) { 883 for_each_possible_cpu(cpu) {
889 seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock; 884 seqcount_t *s = &per_cpu(xt_recseq, cpu);
890 885
891 i = 0; 886 i = 0;
892 xt_entry_foreach(iter, t->entries[cpu], t->size) { 887 xt_entry_foreach(iter, t->entries[cpu], t->size) {
@@ -894,10 +889,10 @@ get_counters(const struct xt_table_info *t,
894 unsigned int start; 889 unsigned int start;
895 890
896 do { 891 do {
897 start = read_seqbegin(lock); 892 start = read_seqcount_begin(s);
898 bcnt = iter->counters.bcnt; 893 bcnt = iter->counters.bcnt;
899 pcnt = iter->counters.pcnt; 894 pcnt = iter->counters.pcnt;
900 } while (read_seqretry(lock, start)); 895 } while (read_seqcount_retry(s, start));
901 896
902 ADD_COUNTER(counters[i], bcnt, pcnt); 897 ADD_COUNTER(counters[i], bcnt, pcnt);
903 ++i; /* macro does multi eval of i */ 898 ++i; /* macro does multi eval of i */
@@ -1312,6 +1307,7 @@ do_add_counters(struct net *net, const void __user *user,
1312 int ret = 0; 1307 int ret = 0;
1313 void *loc_cpu_entry; 1308 void *loc_cpu_entry;
1314 struct ipt_entry *iter; 1309 struct ipt_entry *iter;
1310 unsigned int addend;
1315#ifdef CONFIG_COMPAT 1311#ifdef CONFIG_COMPAT
1316 struct compat_xt_counters_info compat_tmp; 1312 struct compat_xt_counters_info compat_tmp;
1317 1313
@@ -1368,12 +1364,12 @@ do_add_counters(struct net *net, const void __user *user,
1368 /* Choose the copy that is on our node */ 1364 /* Choose the copy that is on our node */
1369 curcpu = smp_processor_id(); 1365 curcpu = smp_processor_id();
1370 loc_cpu_entry = private->entries[curcpu]; 1366 loc_cpu_entry = private->entries[curcpu];
1371 xt_info_wrlock(curcpu); 1367 addend = xt_write_recseq_begin();
1372 xt_entry_foreach(iter, loc_cpu_entry, private->size) { 1368 xt_entry_foreach(iter, loc_cpu_entry, private->size) {
1373 ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt); 1369 ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt);
1374 ++i; 1370 ++i;
1375 } 1371 }
1376 xt_info_wrunlock(curcpu); 1372 xt_write_recseq_end(addend);
1377 unlock_up_free: 1373 unlock_up_free:
1378 local_bh_enable(); 1374 local_bh_enable();
1379 xt_table_unlock(t); 1375 xt_table_unlock(t);
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index d609ac3cb9a4..5c9e97c79017 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -307,7 +307,7 @@ clusterip_tg(struct sk_buff *skb, const struct xt_action_param *par)
307 * error messages (RELATED) and information requests (see below) */ 307 * error messages (RELATED) and information requests (see below) */
308 if (ip_hdr(skb)->protocol == IPPROTO_ICMP && 308 if (ip_hdr(skb)->protocol == IPPROTO_ICMP &&
309 (ctinfo == IP_CT_RELATED || 309 (ctinfo == IP_CT_RELATED ||
310 ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY)) 310 ctinfo == IP_CT_RELATED_REPLY))
311 return XT_CONTINUE; 311 return XT_CONTINUE;
312 312
313 /* ip_conntrack_icmp guarantees us that we only have ICMP_ECHO, 313 /* ip_conntrack_icmp guarantees us that we only have ICMP_ECHO,
@@ -321,12 +321,12 @@ clusterip_tg(struct sk_buff *skb, const struct xt_action_param *par)
321 ct->mark = hash; 321 ct->mark = hash;
322 break; 322 break;
323 case IP_CT_RELATED: 323 case IP_CT_RELATED:
324 case IP_CT_RELATED+IP_CT_IS_REPLY: 324 case IP_CT_RELATED_REPLY:
325 /* FIXME: we don't handle expectations at the 325 /* FIXME: we don't handle expectations at the
326 * moment. they can arrive on a different node than 326 * moment. they can arrive on a different node than
327 * the master connection (e.g. FTP passive mode) */ 327 * the master connection (e.g. FTP passive mode) */
328 case IP_CT_ESTABLISHED: 328 case IP_CT_ESTABLISHED:
329 case IP_CT_ESTABLISHED+IP_CT_IS_REPLY: 329 case IP_CT_ESTABLISHED_REPLY:
330 break; 330 break;
331 default: 331 default:
332 break; 332 break;
diff --git a/net/ipv4/netfilter/ipt_MASQUERADE.c b/net/ipv4/netfilter/ipt_MASQUERADE.c
index d2ed9dc74ebc..9931152a78b5 100644
--- a/net/ipv4/netfilter/ipt_MASQUERADE.c
+++ b/net/ipv4/netfilter/ipt_MASQUERADE.c
@@ -60,7 +60,7 @@ masquerade_tg(struct sk_buff *skb, const struct xt_action_param *par)
60 nat = nfct_nat(ct); 60 nat = nfct_nat(ct);
61 61
62 NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED || 62 NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED ||
63 ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY)); 63 ctinfo == IP_CT_RELATED_REPLY));
64 64
65 /* Source address is 0.0.0.0 - locally generated packet that is 65 /* Source address is 0.0.0.0 - locally generated packet that is
66 * probably not supposed to be masqueraded. 66 * probably not supposed to be masqueraded.
diff --git a/net/ipv4/netfilter/ipt_REJECT.c b/net/ipv4/netfilter/ipt_REJECT.c
index 1ff79e557f96..51f13f8ec724 100644
--- a/net/ipv4/netfilter/ipt_REJECT.c
+++ b/net/ipv4/netfilter/ipt_REJECT.c
@@ -40,7 +40,6 @@ static void send_reset(struct sk_buff *oldskb, int hook)
40 struct iphdr *niph; 40 struct iphdr *niph;
41 const struct tcphdr *oth; 41 const struct tcphdr *oth;
42 struct tcphdr _otcph, *tcph; 42 struct tcphdr _otcph, *tcph;
43 unsigned int addr_type;
44 43
45 /* IP header checks: fragment. */ 44 /* IP header checks: fragment. */
46 if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET)) 45 if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET))
@@ -55,6 +54,9 @@ static void send_reset(struct sk_buff *oldskb, int hook)
55 if (oth->rst) 54 if (oth->rst)
56 return; 55 return;
57 56
57 if (skb_rtable(oldskb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
58 return;
59
58 /* Check checksum */ 60 /* Check checksum */
59 if (nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), IPPROTO_TCP)) 61 if (nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), IPPROTO_TCP))
60 return; 62 return;
@@ -101,19 +103,11 @@ static void send_reset(struct sk_buff *oldskb, int hook)
101 nskb->csum_start = (unsigned char *)tcph - nskb->head; 103 nskb->csum_start = (unsigned char *)tcph - nskb->head;
102 nskb->csum_offset = offsetof(struct tcphdr, check); 104 nskb->csum_offset = offsetof(struct tcphdr, check);
103 105
104 addr_type = RTN_UNSPEC;
105 if (hook != NF_INET_FORWARD
106#ifdef CONFIG_BRIDGE_NETFILTER
107 || (nskb->nf_bridge && nskb->nf_bridge->mask & BRNF_BRIDGED)
108#endif
109 )
110 addr_type = RTN_LOCAL;
111
112 /* ip_route_me_harder expects skb->dst to be set */ 106 /* ip_route_me_harder expects skb->dst to be set */
113 skb_dst_set_noref(nskb, skb_dst(oldskb)); 107 skb_dst_set_noref(nskb, skb_dst(oldskb));
114 108
115 nskb->protocol = htons(ETH_P_IP); 109 nskb->protocol = htons(ETH_P_IP);
116 if (ip_route_me_harder(nskb, addr_type)) 110 if (ip_route_me_harder(nskb, RTN_UNSPEC))
117 goto free_nskb; 111 goto free_nskb;
118 112
119 niph->ttl = ip4_dst_hoplimit(skb_dst(nskb)); 113 niph->ttl = ip4_dst_hoplimit(skb_dst(nskb));
diff --git a/net/ipv4/netfilter/ipt_ecn.c b/net/ipv4/netfilter/ipt_ecn.c
index af6e9c778345..2b57e52c746c 100644
--- a/net/ipv4/netfilter/ipt_ecn.c
+++ b/net/ipv4/netfilter/ipt_ecn.c
@@ -25,7 +25,8 @@ MODULE_LICENSE("GPL");
25static inline bool match_ip(const struct sk_buff *skb, 25static inline bool match_ip(const struct sk_buff *skb,
26 const struct ipt_ecn_info *einfo) 26 const struct ipt_ecn_info *einfo)
27{ 27{
28 return (ip_hdr(skb)->tos & IPT_ECN_IP_MASK) == einfo->ip_ect; 28 return ((ip_hdr(skb)->tos & IPT_ECN_IP_MASK) == einfo->ip_ect) ^
29 !!(einfo->invert & IPT_ECN_OP_MATCH_IP);
29} 30}
30 31
31static inline bool match_tcp(const struct sk_buff *skb, 32static inline bool match_tcp(const struct sk_buff *skb,
@@ -76,8 +77,6 @@ static bool ecn_mt(const struct sk_buff *skb, struct xt_action_param *par)
76 return false; 77 return false;
77 78
78 if (info->operation & (IPT_ECN_OP_MATCH_ECE|IPT_ECN_OP_MATCH_CWR)) { 79 if (info->operation & (IPT_ECN_OP_MATCH_ECE|IPT_ECN_OP_MATCH_CWR)) {
79 if (ip_hdr(skb)->protocol != IPPROTO_TCP)
80 return false;
81 if (!match_tcp(skb, info, &par->hotdrop)) 80 if (!match_tcp(skb, info, &par->hotdrop))
82 return false; 81 return false;
83 } 82 }
@@ -97,7 +96,7 @@ static int ecn_mt_check(const struct xt_mtchk_param *par)
97 return -EINVAL; 96 return -EINVAL;
98 97
99 if (info->operation & (IPT_ECN_OP_MATCH_ECE|IPT_ECN_OP_MATCH_CWR) && 98 if (info->operation & (IPT_ECN_OP_MATCH_ECE|IPT_ECN_OP_MATCH_CWR) &&
100 ip->proto != IPPROTO_TCP) { 99 (ip->proto != IPPROTO_TCP || ip->invflags & IPT_INV_PROTO)) {
101 pr_info("cannot match TCP bits in rule for non-tcp packets\n"); 100 pr_info("cannot match TCP bits in rule for non-tcp packets\n");
102 return -EINVAL; 101 return -EINVAL;
103 } 102 }
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index 5a03c02af999..de9da21113a1 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -101,7 +101,7 @@ static unsigned int ipv4_confirm(unsigned int hooknum,
101 101
102 /* This is where we call the helper: as the packet goes out. */ 102 /* This is where we call the helper: as the packet goes out. */
103 ct = nf_ct_get(skb, &ctinfo); 103 ct = nf_ct_get(skb, &ctinfo);
104 if (!ct || ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY) 104 if (!ct || ctinfo == IP_CT_RELATED_REPLY)
105 goto out; 105 goto out;
106 106
107 help = nfct_help(ct); 107 help = nfct_help(ct);
@@ -121,7 +121,9 @@ static unsigned int ipv4_confirm(unsigned int hooknum,
121 return ret; 121 return ret;
122 } 122 }
123 123
124 if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status)) { 124 /* adjust seqs for loopback traffic only in outgoing direction */
125 if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status) &&
126 !nf_is_loopback_packet(skb)) {
125 typeof(nf_nat_seq_adjust_hook) seq_adjust; 127 typeof(nf_nat_seq_adjust_hook) seq_adjust;
126 128
127 seq_adjust = rcu_dereference(nf_nat_seq_adjust_hook); 129 seq_adjust = rcu_dereference(nf_nat_seq_adjust_hook);
diff --git a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
index 7404bde95994..ab5b27a2916f 100644
--- a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
+++ b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
@@ -160,7 +160,7 @@ icmp_error_message(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb,
160 /* Update skb to refer to this connection */ 160 /* Update skb to refer to this connection */
161 skb->nfct = &nf_ct_tuplehash_to_ctrack(h)->ct_general; 161 skb->nfct = &nf_ct_tuplehash_to_ctrack(h)->ct_general;
162 skb->nfctinfo = *ctinfo; 162 skb->nfctinfo = *ctinfo;
163 return -NF_ACCEPT; 163 return NF_ACCEPT;
164} 164}
165 165
166/* Small and modified version of icmp_rcv */ 166/* Small and modified version of icmp_rcv */
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
index 9c71b2755ce3..3346de5d94d0 100644
--- a/net/ipv4/netfilter/nf_nat_core.c
+++ b/net/ipv4/netfilter/nf_nat_core.c
@@ -433,7 +433,7 @@ int nf_nat_icmp_reply_translation(struct nf_conn *ct,
433 433
434 /* Must be RELATED */ 434 /* Must be RELATED */
435 NF_CT_ASSERT(skb->nfctinfo == IP_CT_RELATED || 435 NF_CT_ASSERT(skb->nfctinfo == IP_CT_RELATED ||
436 skb->nfctinfo == IP_CT_RELATED+IP_CT_IS_REPLY); 436 skb->nfctinfo == IP_CT_RELATED_REPLY);
437 437
438 /* Redirects on non-null nats must be dropped, else they'll 438 /* Redirects on non-null nats must be dropped, else they'll
439 start talking to each other without our translation, and be 439 start talking to each other without our translation, and be
diff --git a/net/ipv4/netfilter/nf_nat_helper.c b/net/ipv4/netfilter/nf_nat_helper.c
index 31427fb57aa8..ebc5f8894f99 100644
--- a/net/ipv4/netfilter/nf_nat_helper.c
+++ b/net/ipv4/netfilter/nf_nat_helper.c
@@ -153,14 +153,14 @@ void nf_nat_set_seq_adjust(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
153} 153}
154EXPORT_SYMBOL_GPL(nf_nat_set_seq_adjust); 154EXPORT_SYMBOL_GPL(nf_nat_set_seq_adjust);
155 155
156static void nf_nat_csum(struct sk_buff *skb, struct iphdr *iph, void *data, 156static void nf_nat_csum(struct sk_buff *skb, const struct iphdr *iph, void *data,
157 int datalen, __sum16 *check, int oldlen) 157 int datalen, __sum16 *check, int oldlen)
158{ 158{
159 struct rtable *rt = skb_rtable(skb); 159 struct rtable *rt = skb_rtable(skb);
160 160
161 if (skb->ip_summed != CHECKSUM_PARTIAL) { 161 if (skb->ip_summed != CHECKSUM_PARTIAL) {
162 if (!(rt->rt_flags & RTCF_LOCAL) && 162 if (!(rt->rt_flags & RTCF_LOCAL) &&
163 skb->dev->features & NETIF_F_V4_CSUM) { 163 (!skb->dev || skb->dev->features & NETIF_F_V4_CSUM)) {
164 skb->ip_summed = CHECKSUM_PARTIAL; 164 skb->ip_summed = CHECKSUM_PARTIAL;
165 skb->csum_start = skb_headroom(skb) + 165 skb->csum_start = skb_headroom(skb) +
166 skb_network_offset(skb) + 166 skb_network_offset(skb) +
diff --git a/net/ipv4/netfilter/nf_nat_rule.c b/net/ipv4/netfilter/nf_nat_rule.c
index 21c30426480b..733c9abc1cbd 100644
--- a/net/ipv4/netfilter/nf_nat_rule.c
+++ b/net/ipv4/netfilter/nf_nat_rule.c
@@ -53,7 +53,7 @@ ipt_snat_target(struct sk_buff *skb, const struct xt_action_param *par)
53 53
54 /* Connection must be valid and new. */ 54 /* Connection must be valid and new. */
55 NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED || 55 NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED ||
56 ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY)); 56 ctinfo == IP_CT_RELATED_REPLY));
57 NF_CT_ASSERT(par->out != NULL); 57 NF_CT_ASSERT(par->out != NULL);
58 58
59 return nf_nat_setup_info(ct, &mr->range[0], IP_NAT_MANIP_SRC); 59 return nf_nat_setup_info(ct, &mr->range[0], IP_NAT_MANIP_SRC);
diff --git a/net/ipv4/netfilter/nf_nat_standalone.c b/net/ipv4/netfilter/nf_nat_standalone.c
index 7317bdf1d457..483b76d042da 100644
--- a/net/ipv4/netfilter/nf_nat_standalone.c
+++ b/net/ipv4/netfilter/nf_nat_standalone.c
@@ -116,7 +116,7 @@ nf_nat_fn(unsigned int hooknum,
116 116
117 switch (ctinfo) { 117 switch (ctinfo) {
118 case IP_CT_RELATED: 118 case IP_CT_RELATED:
119 case IP_CT_RELATED+IP_CT_IS_REPLY: 119 case IP_CT_RELATED_REPLY:
120 if (ip_hdr(skb)->protocol == IPPROTO_ICMP) { 120 if (ip_hdr(skb)->protocol == IPPROTO_ICMP) {
121 if (!nf_nat_icmp_reply_translation(ct, ctinfo, 121 if (!nf_nat_icmp_reply_translation(ct, ctinfo,
122 hooknum, skb)) 122 hooknum, skb))
@@ -144,7 +144,7 @@ nf_nat_fn(unsigned int hooknum,
144 default: 144 default:
145 /* ESTABLISHED */ 145 /* ESTABLISHED */
146 NF_CT_ASSERT(ctinfo == IP_CT_ESTABLISHED || 146 NF_CT_ASSERT(ctinfo == IP_CT_ESTABLISHED ||
147 ctinfo == (IP_CT_ESTABLISHED+IP_CT_IS_REPLY)); 147 ctinfo == IP_CT_ESTABLISHED_REPLY);
148 } 148 }
149 149
150 return nf_nat_packet(ct, ctinfo, hooknum, skb); 150 return nf_nat_packet(ct, ctinfo, hooknum, skb);
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
new file mode 100644
index 000000000000..39b403f854c6
--- /dev/null
+++ b/net/ipv4/ping.c
@@ -0,0 +1,931 @@
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * "Ping" sockets
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 *
13 * Based on ipv4/udp.c code.
14 *
15 * Authors: Vasiliy Kulikov / Openwall (for Linux 2.6),
16 * Pavel Kankovsky (for Linux 2.4.32)
17 *
18 * Pavel gave all rights to bugs to Vasiliy,
19 * none of the bugs are Pavel's now.
20 *
21 */
22
23#include <asm/system.h>
24#include <linux/uaccess.h>
25#include <linux/types.h>
26#include <linux/fcntl.h>
27#include <linux/socket.h>
28#include <linux/sockios.h>
29#include <linux/in.h>
30#include <linux/errno.h>
31#include <linux/timer.h>
32#include <linux/mm.h>
33#include <linux/inet.h>
34#include <linux/netdevice.h>
35#include <net/snmp.h>
36#include <net/ip.h>
37#include <net/ipv6.h>
38#include <net/icmp.h>
39#include <net/protocol.h>
40#include <linux/skbuff.h>
41#include <linux/proc_fs.h>
42#include <net/sock.h>
43#include <net/ping.h>
44#include <net/udp.h>
45#include <net/route.h>
46#include <net/inet_common.h>
47#include <net/checksum.h>
48
49
50static struct ping_table ping_table;
51
52static u16 ping_port_rover;
53
54static inline int ping_hashfn(struct net *net, unsigned num, unsigned mask)
55{
56 int res = (num + net_hash_mix(net)) & mask;
57 pr_debug("hash(%d) = %d\n", num, res);
58 return res;
59}
60
61static inline struct hlist_nulls_head *ping_hashslot(struct ping_table *table,
62 struct net *net, unsigned num)
63{
64 return &table->hash[ping_hashfn(net, num, PING_HTABLE_MASK)];
65}
66
67static int ping_v4_get_port(struct sock *sk, unsigned short ident)
68{
69 struct hlist_nulls_node *node;
70 struct hlist_nulls_head *hlist;
71 struct inet_sock *isk, *isk2;
72 struct sock *sk2 = NULL;
73
74 isk = inet_sk(sk);
75 write_lock_bh(&ping_table.lock);
76 if (ident == 0) {
77 u32 i;
78 u16 result = ping_port_rover + 1;
79
80 for (i = 0; i < (1L << 16); i++, result++) {
81 if (!result)
82 result++; /* avoid zero */
83 hlist = ping_hashslot(&ping_table, sock_net(sk),
84 result);
85 ping_portaddr_for_each_entry(sk2, node, hlist) {
86 isk2 = inet_sk(sk2);
87
88 if (isk2->inet_num == result)
89 goto next_port;
90 }
91
92 /* found */
93 ping_port_rover = ident = result;
94 break;
95next_port:
96 ;
97 }
98 if (i >= (1L << 16))
99 goto fail;
100 } else {
101 hlist = ping_hashslot(&ping_table, sock_net(sk), ident);
102 ping_portaddr_for_each_entry(sk2, node, hlist) {
103 isk2 = inet_sk(sk2);
104
105 if ((isk2->inet_num == ident) &&
106 (sk2 != sk) &&
107 (!sk2->sk_reuse || !sk->sk_reuse))
108 goto fail;
109 }
110 }
111
112 pr_debug("found port/ident = %d\n", ident);
113 isk->inet_num = ident;
114 if (sk_unhashed(sk)) {
115 pr_debug("was not hashed\n");
116 sock_hold(sk);
117 hlist_nulls_add_head(&sk->sk_nulls_node, hlist);
118 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
119 }
120 write_unlock_bh(&ping_table.lock);
121 return 0;
122
123fail:
124 write_unlock_bh(&ping_table.lock);
125 return 1;
126}
127
128static void ping_v4_hash(struct sock *sk)
129{
130 pr_debug("ping_v4_hash(sk->port=%u)\n", inet_sk(sk)->inet_num);
131 BUG(); /* "Please do not press this button again." */
132}
133
134static void ping_v4_unhash(struct sock *sk)
135{
136 struct inet_sock *isk = inet_sk(sk);
137 pr_debug("ping_v4_unhash(isk=%p,isk->num=%u)\n", isk, isk->inet_num);
138 if (sk_hashed(sk)) {
139 write_lock_bh(&ping_table.lock);
140 hlist_nulls_del(&sk->sk_nulls_node);
141 sock_put(sk);
142 isk->inet_num = isk->inet_sport = 0;
143 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
144 write_unlock_bh(&ping_table.lock);
145 }
146}
147
148static struct sock *ping_v4_lookup(struct net *net, u32 saddr, u32 daddr,
149 u16 ident, int dif)
150{
151 struct hlist_nulls_head *hslot = ping_hashslot(&ping_table, net, ident);
152 struct sock *sk = NULL;
153 struct inet_sock *isk;
154 struct hlist_nulls_node *hnode;
155
156 pr_debug("try to find: num = %d, daddr = %ld, dif = %d\n",
157 (int)ident, (unsigned long)daddr, dif);
158 read_lock_bh(&ping_table.lock);
159
160 ping_portaddr_for_each_entry(sk, hnode, hslot) {
161 isk = inet_sk(sk);
162
163 pr_debug("found: %p: num = %d, daddr = %ld, dif = %d\n", sk,
164 (int)isk->inet_num, (unsigned long)isk->inet_rcv_saddr,
165 sk->sk_bound_dev_if);
166
167 pr_debug("iterate\n");
168 if (isk->inet_num != ident)
169 continue;
170 if (isk->inet_rcv_saddr && isk->inet_rcv_saddr != daddr)
171 continue;
172 if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)
173 continue;
174
175 sock_hold(sk);
176 goto exit;
177 }
178
179 sk = NULL;
180exit:
181 read_unlock_bh(&ping_table.lock);
182
183 return sk;
184}
185
186static void inet_get_ping_group_range_net(struct net *net, gid_t *low,
187 gid_t *high)
188{
189 gid_t *data = net->ipv4.sysctl_ping_group_range;
190 unsigned seq;
191 do {
192 seq = read_seqbegin(&sysctl_local_ports.lock);
193
194 *low = data[0];
195 *high = data[1];
196 } while (read_seqretry(&sysctl_local_ports.lock, seq));
197}
198
199
200static int ping_init_sock(struct sock *sk)
201{
202 struct net *net = sock_net(sk);
203 gid_t group = current_egid();
204 gid_t range[2];
205 struct group_info *group_info = get_current_groups();
206 int i, j, count = group_info->ngroups;
207
208 inet_get_ping_group_range_net(net, range, range+1);
209 if (range[0] <= group && group <= range[1])
210 return 0;
211
212 for (i = 0; i < group_info->nblocks; i++) {
213 int cp_count = min_t(int, NGROUPS_PER_BLOCK, count);
214
215 for (j = 0; j < cp_count; j++) {
216 group = group_info->blocks[i][j];
217 if (range[0] <= group && group <= range[1])
218 return 0;
219 }
220
221 count -= cp_count;
222 }
223
224 return -EACCES;
225}
226
227static void ping_close(struct sock *sk, long timeout)
228{
229 pr_debug("ping_close(sk=%p,sk->num=%u)\n",
230 inet_sk(sk), inet_sk(sk)->inet_num);
231 pr_debug("isk->refcnt = %d\n", sk->sk_refcnt.counter);
232
233 sk_common_release(sk);
234}
235
236/*
237 * We need our own bind because there are no privileged id's == local ports.
238 * Moreover, we don't allow binding to multi- and broadcast addresses.
239 */
240
241static int ping_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
242{
243 struct sockaddr_in *addr = (struct sockaddr_in *)uaddr;
244 struct inet_sock *isk = inet_sk(sk);
245 unsigned short snum;
246 int chk_addr_ret;
247 int err;
248
249 if (addr_len < sizeof(struct sockaddr_in))
250 return -EINVAL;
251
252 pr_debug("ping_v4_bind(sk=%p,sa_addr=%08x,sa_port=%d)\n",
253 sk, addr->sin_addr.s_addr, ntohs(addr->sin_port));
254
255 chk_addr_ret = inet_addr_type(sock_net(sk), addr->sin_addr.s_addr);
256 if (addr->sin_addr.s_addr == INADDR_ANY)
257 chk_addr_ret = RTN_LOCAL;
258
259 if ((sysctl_ip_nonlocal_bind == 0 &&
260 isk->freebind == 0 && isk->transparent == 0 &&
261 chk_addr_ret != RTN_LOCAL) ||
262 chk_addr_ret == RTN_MULTICAST ||
263 chk_addr_ret == RTN_BROADCAST)
264 return -EADDRNOTAVAIL;
265
266 lock_sock(sk);
267
268 err = -EINVAL;
269 if (isk->inet_num != 0)
270 goto out;
271
272 err = -EADDRINUSE;
273 isk->inet_rcv_saddr = isk->inet_saddr = addr->sin_addr.s_addr;
274 snum = ntohs(addr->sin_port);
275 if (ping_v4_get_port(sk, snum) != 0) {
276 isk->inet_saddr = isk->inet_rcv_saddr = 0;
277 goto out;
278 }
279
280 pr_debug("after bind(): num = %d, daddr = %ld, dif = %d\n",
281 (int)isk->inet_num,
282 (unsigned long) isk->inet_rcv_saddr,
283 (int)sk->sk_bound_dev_if);
284
285 err = 0;
286 if (isk->inet_rcv_saddr)
287 sk->sk_userlocks |= SOCK_BINDADDR_LOCK;
288 if (snum)
289 sk->sk_userlocks |= SOCK_BINDPORT_LOCK;
290 isk->inet_sport = htons(isk->inet_num);
291 isk->inet_daddr = 0;
292 isk->inet_dport = 0;
293 sk_dst_reset(sk);
294out:
295 release_sock(sk);
296 pr_debug("ping_v4_bind -> %d\n", err);
297 return err;
298}
299
300/*
301 * Is this a supported type of ICMP message?
302 */
303
304static inline int ping_supported(int type, int code)
305{
306 if (type == ICMP_ECHO && code == 0)
307 return 1;
308 return 0;
309}
310
311/*
312 * This routine is called by the ICMP module when it gets some
313 * sort of error condition.
314 */
315
316static int ping_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
317
318void ping_err(struct sk_buff *skb, u32 info)
319{
320 struct iphdr *iph = (struct iphdr *)skb->data;
321 struct icmphdr *icmph = (struct icmphdr *)(skb->data+(iph->ihl<<2));
322 struct inet_sock *inet_sock;
323 int type = icmph->type;
324 int code = icmph->code;
325 struct net *net = dev_net(skb->dev);
326 struct sock *sk;
327 int harderr;
328 int err;
329
330 /* We assume the packet has already been checked by icmp_unreach */
331
332 if (!ping_supported(icmph->type, icmph->code))
333 return;
334
335 pr_debug("ping_err(type=%04x,code=%04x,id=%04x,seq=%04x)\n", type,
336 code, ntohs(icmph->un.echo.id), ntohs(icmph->un.echo.sequence));
337
338 sk = ping_v4_lookup(net, iph->daddr, iph->saddr,
339 ntohs(icmph->un.echo.id), skb->dev->ifindex);
340 if (sk == NULL) {
341 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
342 pr_debug("no socket, dropping\n");
343 return; /* No socket for error */
344 }
345 pr_debug("err on socket %p\n", sk);
346
347 err = 0;
348 harderr = 0;
349 inet_sock = inet_sk(sk);
350
351 switch (type) {
352 default:
353 case ICMP_TIME_EXCEEDED:
354 err = EHOSTUNREACH;
355 break;
356 case ICMP_SOURCE_QUENCH:
357 /* This is not a real error but ping wants to see it.
358 * Report it with some fake errno. */
359 err = EREMOTEIO;
360 break;
361 case ICMP_PARAMETERPROB:
362 err = EPROTO;
363 harderr = 1;
364 break;
365 case ICMP_DEST_UNREACH:
366 if (code == ICMP_FRAG_NEEDED) { /* Path MTU discovery */
367 if (inet_sock->pmtudisc != IP_PMTUDISC_DONT) {
368 err = EMSGSIZE;
369 harderr = 1;
370 break;
371 }
372 goto out;
373 }
374 err = EHOSTUNREACH;
375 if (code <= NR_ICMP_UNREACH) {
376 harderr = icmp_err_convert[code].fatal;
377 err = icmp_err_convert[code].errno;
378 }
379 break;
380 case ICMP_REDIRECT:
381 /* See ICMP_SOURCE_QUENCH */
382 err = EREMOTEIO;
383 break;
384 }
385
386 /*
387 * RFC1122: OK. Passes ICMP errors back to application, as per
388 * 4.1.3.3.
389 */
390 if (!inet_sock->recverr) {
391 if (!harderr || sk->sk_state != TCP_ESTABLISHED)
392 goto out;
393 } else {
394 ip_icmp_error(sk, skb, err, 0 /* no remote port */,
395 info, (u8 *)icmph);
396 }
397 sk->sk_err = err;
398 sk->sk_error_report(sk);
399out:
400 sock_put(sk);
401}
402
403/*
404 * Copy and checksum an ICMP Echo packet from user space into a buffer.
405 */
406
407struct pingfakehdr {
408 struct icmphdr icmph;
409 struct iovec *iov;
410 u32 wcheck;
411};
412
413static int ping_getfrag(void *from, char * to,
414 int offset, int fraglen, int odd, struct sk_buff *skb)
415{
416 struct pingfakehdr *pfh = (struct pingfakehdr *)from;
417
418 if (offset == 0) {
419 if (fraglen < sizeof(struct icmphdr))
420 BUG();
421 if (csum_partial_copy_fromiovecend(to + sizeof(struct icmphdr),
422 pfh->iov, 0, fraglen - sizeof(struct icmphdr),
423 &pfh->wcheck))
424 return -EFAULT;
425
426 return 0;
427 }
428 if (offset < sizeof(struct icmphdr))
429 BUG();
430 if (csum_partial_copy_fromiovecend
431 (to, pfh->iov, offset - sizeof(struct icmphdr),
432 fraglen, &pfh->wcheck))
433 return -EFAULT;
434 return 0;
435}
436
437static int ping_push_pending_frames(struct sock *sk, struct pingfakehdr *pfh,
438 struct flowi4 *fl4)
439{
440 struct sk_buff *skb = skb_peek(&sk->sk_write_queue);
441
442 pfh->wcheck = csum_partial((char *)&pfh->icmph,
443 sizeof(struct icmphdr), pfh->wcheck);
444 pfh->icmph.checksum = csum_fold(pfh->wcheck);
445 memcpy(icmp_hdr(skb), &pfh->icmph, sizeof(struct icmphdr));
446 skb->ip_summed = CHECKSUM_NONE;
447 return ip_push_pending_frames(sk, fl4);
448}
449
450static int ping_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
451 size_t len)
452{
453 struct net *net = sock_net(sk);
454 struct flowi4 fl4;
455 struct inet_sock *inet = inet_sk(sk);
456 struct ipcm_cookie ipc;
457 struct icmphdr user_icmph;
458 struct pingfakehdr pfh;
459 struct rtable *rt = NULL;
460 struct ip_options_data opt_copy;
461 int free = 0;
462 u32 saddr, daddr, faddr;
463 u8 tos;
464 int err;
465
466 pr_debug("ping_sendmsg(sk=%p,sk->num=%u)\n", inet, inet->inet_num);
467
468
469 if (len > 0xFFFF)
470 return -EMSGSIZE;
471
472 /*
473 * Check the flags.
474 */
475
476 /* Mirror BSD error message compatibility */
477 if (msg->msg_flags & MSG_OOB)
478 return -EOPNOTSUPP;
479
480 /*
481 * Fetch the ICMP header provided by the userland.
482 * iovec is modified!
483 */
484
485 if (memcpy_fromiovec((u8 *)&user_icmph, msg->msg_iov,
486 sizeof(struct icmphdr)))
487 return -EFAULT;
488 if (!ping_supported(user_icmph.type, user_icmph.code))
489 return -EINVAL;
490
491 /*
492 * Get and verify the address.
493 */
494
495 if (msg->msg_name) {
496 struct sockaddr_in *usin = (struct sockaddr_in *)msg->msg_name;
497 if (msg->msg_namelen < sizeof(*usin))
498 return -EINVAL;
499 if (usin->sin_family != AF_INET)
500 return -EINVAL;
501 daddr = usin->sin_addr.s_addr;
502 /* no remote port */
503 } else {
504 if (sk->sk_state != TCP_ESTABLISHED)
505 return -EDESTADDRREQ;
506 daddr = inet->inet_daddr;
507 /* no remote port */
508 }
509
510 ipc.addr = inet->inet_saddr;
511 ipc.opt = NULL;
512 ipc.oif = sk->sk_bound_dev_if;
513 ipc.tx_flags = 0;
514 err = sock_tx_timestamp(sk, &ipc.tx_flags);
515 if (err)
516 return err;
517
518 if (msg->msg_controllen) {
519 err = ip_cmsg_send(sock_net(sk), msg, &ipc);
520 if (err)
521 return err;
522 if (ipc.opt)
523 free = 1;
524 }
525 if (!ipc.opt) {
526 struct ip_options_rcu *inet_opt;
527
528 rcu_read_lock();
529 inet_opt = rcu_dereference(inet->inet_opt);
530 if (inet_opt) {
531 memcpy(&opt_copy, inet_opt,
532 sizeof(*inet_opt) + inet_opt->opt.optlen);
533 ipc.opt = &opt_copy.opt;
534 }
535 rcu_read_unlock();
536 }
537
538 saddr = ipc.addr;
539 ipc.addr = faddr = daddr;
540
541 if (ipc.opt && ipc.opt->opt.srr) {
542 if (!daddr)
543 return -EINVAL;
544 faddr = ipc.opt->opt.faddr;
545 }
546 tos = RT_TOS(inet->tos);
547 if (sock_flag(sk, SOCK_LOCALROUTE) ||
548 (msg->msg_flags & MSG_DONTROUTE) ||
549 (ipc.opt && ipc.opt->opt.is_strictroute)) {
550 tos |= RTO_ONLINK;
551 }
552
553 if (ipv4_is_multicast(daddr)) {
554 if (!ipc.oif)
555 ipc.oif = inet->mc_index;
556 if (!saddr)
557 saddr = inet->mc_addr;
558 }
559
560 flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos,
561 RT_SCOPE_UNIVERSE, sk->sk_protocol,
562 inet_sk_flowi_flags(sk), faddr, saddr, 0, 0);
563
564 security_sk_classify_flow(sk, flowi4_to_flowi(&fl4));
565 rt = ip_route_output_flow(net, &fl4, sk);
566 if (IS_ERR(rt)) {
567 err = PTR_ERR(rt);
568 rt = NULL;
569 if (err == -ENETUNREACH)
570 IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
571 goto out;
572 }
573
574 err = -EACCES;
575 if ((rt->rt_flags & RTCF_BROADCAST) &&
576 !sock_flag(sk, SOCK_BROADCAST))
577 goto out;
578
579 if (msg->msg_flags & MSG_CONFIRM)
580 goto do_confirm;
581back_from_confirm:
582
583 if (!ipc.addr)
584 ipc.addr = fl4.daddr;
585
586 lock_sock(sk);
587
588 pfh.icmph.type = user_icmph.type; /* already checked */
589 pfh.icmph.code = user_icmph.code; /* ditto */
590 pfh.icmph.checksum = 0;
591 pfh.icmph.un.echo.id = inet->inet_sport;
592 pfh.icmph.un.echo.sequence = user_icmph.un.echo.sequence;
593 pfh.iov = msg->msg_iov;
594 pfh.wcheck = 0;
595
596 err = ip_append_data(sk, &fl4, ping_getfrag, &pfh, len,
597 0, &ipc, &rt, msg->msg_flags);
598 if (err)
599 ip_flush_pending_frames(sk);
600 else
601 err = ping_push_pending_frames(sk, &pfh, &fl4);
602 release_sock(sk);
603
604out:
605 ip_rt_put(rt);
606 if (free)
607 kfree(ipc.opt);
608 if (!err) {
609 icmp_out_count(sock_net(sk), user_icmph.type);
610 return len;
611 }
612 return err;
613
614do_confirm:
615 dst_confirm(&rt->dst);
616 if (!(msg->msg_flags & MSG_PROBE) || len)
617 goto back_from_confirm;
618 err = 0;
619 goto out;
620}
621
622static int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
623 size_t len, int noblock, int flags, int *addr_len)
624{
625 struct inet_sock *isk = inet_sk(sk);
626 struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name;
627 struct sk_buff *skb;
628 int copied, err;
629
630 pr_debug("ping_recvmsg(sk=%p,sk->num=%u)\n", isk, isk->inet_num);
631
632 if (flags & MSG_OOB)
633 goto out;
634
635 if (addr_len)
636 *addr_len = sizeof(*sin);
637
638 if (flags & MSG_ERRQUEUE)
639 return ip_recv_error(sk, msg, len);
640
641 skb = skb_recv_datagram(sk, flags, noblock, &err);
642 if (!skb)
643 goto out;
644
645 copied = skb->len;
646 if (copied > len) {
647 msg->msg_flags |= MSG_TRUNC;
648 copied = len;
649 }
650
651 /* Don't bother checking the checksum */
652 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
653 if (err)
654 goto done;
655
656 sock_recv_timestamp(msg, sk, skb);
657
658 /* Copy the address. */
659 if (sin) {
660 sin->sin_family = AF_INET;
661 sin->sin_port = 0 /* skb->h.uh->source */;
662 sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
663 memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
664 }
665 if (isk->cmsg_flags)
666 ip_cmsg_recv(msg, skb);
667 err = copied;
668
669done:
670 skb_free_datagram(sk, skb);
671out:
672 pr_debug("ping_recvmsg -> %d\n", err);
673 return err;
674}
675
676static int ping_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
677{
678 pr_debug("ping_queue_rcv_skb(sk=%p,sk->num=%d,skb=%p)\n",
679 inet_sk(sk), inet_sk(sk)->inet_num, skb);
680 if (sock_queue_rcv_skb(sk, skb) < 0) {
681 ICMP_INC_STATS_BH(sock_net(sk), ICMP_MIB_INERRORS);
682 kfree_skb(skb);
683 pr_debug("ping_queue_rcv_skb -> failed\n");
684 return -1;
685 }
686 return 0;
687}
688
689
690/*
691 * All we need to do is get the socket.
692 */
693
694void ping_rcv(struct sk_buff *skb)
695{
696 struct sock *sk;
697 struct net *net = dev_net(skb->dev);
698 struct iphdr *iph = ip_hdr(skb);
699 struct icmphdr *icmph = icmp_hdr(skb);
700 u32 saddr = iph->saddr;
701 u32 daddr = iph->daddr;
702
703 /* We assume the packet has already been checked by icmp_rcv */
704
705 pr_debug("ping_rcv(skb=%p,id=%04x,seq=%04x)\n",
706 skb, ntohs(icmph->un.echo.id), ntohs(icmph->un.echo.sequence));
707
708 /* Push ICMP header back */
709 skb_push(skb, skb->data - (u8 *)icmph);
710
711 sk = ping_v4_lookup(net, saddr, daddr, ntohs(icmph->un.echo.id),
712 skb->dev->ifindex);
713 if (sk != NULL) {
714 pr_debug("rcv on socket %p\n", sk);
715 ping_queue_rcv_skb(sk, skb_get(skb));
716 sock_put(sk);
717 return;
718 }
719 pr_debug("no socket, dropping\n");
720
721 /* We're called from icmp_rcv(). kfree_skb() is done there. */
722}
723
724struct proto ping_prot = {
725 .name = "PING",
726 .owner = THIS_MODULE,
727 .init = ping_init_sock,
728 .close = ping_close,
729 .connect = ip4_datagram_connect,
730 .disconnect = udp_disconnect,
731 .setsockopt = ip_setsockopt,
732 .getsockopt = ip_getsockopt,
733 .sendmsg = ping_sendmsg,
734 .recvmsg = ping_recvmsg,
735 .bind = ping_bind,
736 .backlog_rcv = ping_queue_rcv_skb,
737 .hash = ping_v4_hash,
738 .unhash = ping_v4_unhash,
739 .get_port = ping_v4_get_port,
740 .obj_size = sizeof(struct inet_sock),
741};
742EXPORT_SYMBOL(ping_prot);
743
744#ifdef CONFIG_PROC_FS
745
746static struct sock *ping_get_first(struct seq_file *seq, int start)
747{
748 struct sock *sk;
749 struct ping_iter_state *state = seq->private;
750 struct net *net = seq_file_net(seq);
751
752 for (state->bucket = start; state->bucket < PING_HTABLE_SIZE;
753 ++state->bucket) {
754 struct hlist_nulls_node *node;
755 struct hlist_nulls_head *hslot;
756
757 hslot = &ping_table.hash[state->bucket];
758
759 if (hlist_nulls_empty(hslot))
760 continue;
761
762 sk_nulls_for_each(sk, node, hslot) {
763 if (net_eq(sock_net(sk), net))
764 goto found;
765 }
766 }
767 sk = NULL;
768found:
769 return sk;
770}
771
772static struct sock *ping_get_next(struct seq_file *seq, struct sock *sk)
773{
774 struct ping_iter_state *state = seq->private;
775 struct net *net = seq_file_net(seq);
776
777 do {
778 sk = sk_nulls_next(sk);
779 } while (sk && (!net_eq(sock_net(sk), net)));
780
781 if (!sk)
782 return ping_get_first(seq, state->bucket + 1);
783 return sk;
784}
785
786static struct sock *ping_get_idx(struct seq_file *seq, loff_t pos)
787{
788 struct sock *sk = ping_get_first(seq, 0);
789
790 if (sk)
791 while (pos && (sk = ping_get_next(seq, sk)) != NULL)
792 --pos;
793 return pos ? NULL : sk;
794}
795
796static void *ping_seq_start(struct seq_file *seq, loff_t *pos)
797{
798 struct ping_iter_state *state = seq->private;
799 state->bucket = 0;
800
801 read_lock_bh(&ping_table.lock);
802
803 return *pos ? ping_get_idx(seq, *pos-1) : SEQ_START_TOKEN;
804}
805
806static void *ping_seq_next(struct seq_file *seq, void *v, loff_t *pos)
807{
808 struct sock *sk;
809
810 if (v == SEQ_START_TOKEN)
811 sk = ping_get_idx(seq, 0);
812 else
813 sk = ping_get_next(seq, v);
814
815 ++*pos;
816 return sk;
817}
818
819static void ping_seq_stop(struct seq_file *seq, void *v)
820{
821 read_unlock_bh(&ping_table.lock);
822}
823
824static void ping_format_sock(struct sock *sp, struct seq_file *f,
825 int bucket, int *len)
826{
827 struct inet_sock *inet = inet_sk(sp);
828 __be32 dest = inet->inet_daddr;
829 __be32 src = inet->inet_rcv_saddr;
830 __u16 destp = ntohs(inet->inet_dport);
831 __u16 srcp = ntohs(inet->inet_sport);
832
833 seq_printf(f, "%5d: %08X:%04X %08X:%04X"
834 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %d%n",
835 bucket, src, srcp, dest, destp, sp->sk_state,
836 sk_wmem_alloc_get(sp),
837 sk_rmem_alloc_get(sp),
838 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
839 atomic_read(&sp->sk_refcnt), sp,
840 atomic_read(&sp->sk_drops), len);
841}
842
843static int ping_seq_show(struct seq_file *seq, void *v)
844{
845 if (v == SEQ_START_TOKEN)
846 seq_printf(seq, "%-127s\n",
847 " sl local_address rem_address st tx_queue "
848 "rx_queue tr tm->when retrnsmt uid timeout "
849 "inode ref pointer drops");
850 else {
851 struct ping_iter_state *state = seq->private;
852 int len;
853
854 ping_format_sock(v, seq, state->bucket, &len);
855 seq_printf(seq, "%*s\n", 127 - len, "");
856 }
857 return 0;
858}
859
860static const struct seq_operations ping_seq_ops = {
861 .show = ping_seq_show,
862 .start = ping_seq_start,
863 .next = ping_seq_next,
864 .stop = ping_seq_stop,
865};
866
867static int ping_seq_open(struct inode *inode, struct file *file)
868{
869 return seq_open_net(inode, file, &ping_seq_ops,
870 sizeof(struct ping_iter_state));
871}
872
873static const struct file_operations ping_seq_fops = {
874 .open = ping_seq_open,
875 .read = seq_read,
876 .llseek = seq_lseek,
877 .release = seq_release_net,
878};
879
880static int ping_proc_register(struct net *net)
881{
882 struct proc_dir_entry *p;
883 int rc = 0;
884
885 p = proc_net_fops_create(net, "icmp", S_IRUGO, &ping_seq_fops);
886 if (!p)
887 rc = -ENOMEM;
888 return rc;
889}
890
891static void ping_proc_unregister(struct net *net)
892{
893 proc_net_remove(net, "icmp");
894}
895
896
897static int __net_init ping_proc_init_net(struct net *net)
898{
899 return ping_proc_register(net);
900}
901
902static void __net_exit ping_proc_exit_net(struct net *net)
903{
904 ping_proc_unregister(net);
905}
906
907static struct pernet_operations ping_net_ops = {
908 .init = ping_proc_init_net,
909 .exit = ping_proc_exit_net,
910};
911
912int __init ping_proc_init(void)
913{
914 return register_pernet_subsys(&ping_net_ops);
915}
916
917void ping_proc_exit(void)
918{
919 unregister_pernet_subsys(&ping_net_ops);
920}
921
922#endif
923
924void __init ping_init(void)
925{
926 int i;
927
928 for (i = 0; i < PING_HTABLE_SIZE; i++)
929 INIT_HLIST_NULLS_HEAD(&ping_table.hash[i], i);
930 rwlock_init(&ping_table.lock);
931}
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index bceaec42c37d..c9893d43242e 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -154,7 +154,7 @@ static __inline__ int icmp_filter(struct sock *sk, struct sk_buff *skb)
154 * RFC 1122: SHOULD pass TOS value up to the transport layer. 154 * RFC 1122: SHOULD pass TOS value up to the transport layer.
155 * -> It does. And not only TOS, but all IP header. 155 * -> It does. And not only TOS, but all IP header.
156 */ 156 */
157static int raw_v4_input(struct sk_buff *skb, struct iphdr *iph, int hash) 157static int raw_v4_input(struct sk_buff *skb, const struct iphdr *iph, int hash)
158{ 158{
159 struct sock *sk; 159 struct sock *sk;
160 struct hlist_head *head; 160 struct hlist_head *head;
@@ -247,7 +247,7 @@ static void raw_err(struct sock *sk, struct sk_buff *skb, u32 info)
247 } 247 }
248 248
249 if (inet->recverr) { 249 if (inet->recverr) {
250 struct iphdr *iph = (struct iphdr *)skb->data; 250 const struct iphdr *iph = (const struct iphdr *)skb->data;
251 u8 *payload = skb->data + (iph->ihl << 2); 251 u8 *payload = skb->data + (iph->ihl << 2);
252 252
253 if (inet->hdrincl) 253 if (inet->hdrincl)
@@ -265,7 +265,7 @@ void raw_icmp_error(struct sk_buff *skb, int protocol, u32 info)
265{ 265{
266 int hash; 266 int hash;
267 struct sock *raw_sk; 267 struct sock *raw_sk;
268 struct iphdr *iph; 268 const struct iphdr *iph;
269 struct net *net; 269 struct net *net;
270 270
271 hash = protocol & (RAW_HTABLE_SIZE - 1); 271 hash = protocol & (RAW_HTABLE_SIZE - 1);
@@ -273,7 +273,7 @@ void raw_icmp_error(struct sk_buff *skb, int protocol, u32 info)
273 read_lock(&raw_v4_hashinfo.lock); 273 read_lock(&raw_v4_hashinfo.lock);
274 raw_sk = sk_head(&raw_v4_hashinfo.ht[hash]); 274 raw_sk = sk_head(&raw_v4_hashinfo.ht[hash]);
275 if (raw_sk != NULL) { 275 if (raw_sk != NULL) {
276 iph = (struct iphdr *)skb->data; 276 iph = (const struct iphdr *)skb->data;
277 net = dev_net(skb->dev); 277 net = dev_net(skb->dev);
278 278
279 while ((raw_sk = __raw_v4_lookup(net, raw_sk, protocol, 279 while ((raw_sk = __raw_v4_lookup(net, raw_sk, protocol,
@@ -281,7 +281,7 @@ void raw_icmp_error(struct sk_buff *skb, int protocol, u32 info)
281 skb->dev->ifindex)) != NULL) { 281 skb->dev->ifindex)) != NULL) {
282 raw_err(raw_sk, skb, info); 282 raw_err(raw_sk, skb, info);
283 raw_sk = sk_next(raw_sk); 283 raw_sk = sk_next(raw_sk);
284 iph = (struct iphdr *)skb->data; 284 iph = (const struct iphdr *)skb->data;
285 } 285 }
286 } 286 }
287 read_unlock(&raw_v4_hashinfo.lock); 287 read_unlock(&raw_v4_hashinfo.lock);
@@ -314,9 +314,10 @@ int raw_rcv(struct sock *sk, struct sk_buff *skb)
314 return 0; 314 return 0;
315} 315}
316 316
317static int raw_send_hdrinc(struct sock *sk, void *from, size_t length, 317static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4,
318 struct rtable **rtp, 318 void *from, size_t length,
319 unsigned int flags) 319 struct rtable **rtp,
320 unsigned int flags)
320{ 321{
321 struct inet_sock *inet = inet_sk(sk); 322 struct inet_sock *inet = inet_sk(sk);
322 struct net *net = sock_net(sk); 323 struct net *net = sock_net(sk);
@@ -327,7 +328,7 @@ static int raw_send_hdrinc(struct sock *sk, void *from, size_t length,
327 struct rtable *rt = *rtp; 328 struct rtable *rt = *rtp;
328 329
329 if (length > rt->dst.dev->mtu) { 330 if (length > rt->dst.dev->mtu) {
330 ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->inet_dport, 331 ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
331 rt->dst.dev->mtu); 332 rt->dst.dev->mtu);
332 return -EMSGSIZE; 333 return -EMSGSIZE;
333 } 334 }
@@ -372,7 +373,7 @@ static int raw_send_hdrinc(struct sock *sk, void *from, size_t length,
372 373
373 if (iphlen >= sizeof(*iph)) { 374 if (iphlen >= sizeof(*iph)) {
374 if (!iph->saddr) 375 if (!iph->saddr)
375 iph->saddr = rt->rt_src; 376 iph->saddr = fl4->saddr;
376 iph->check = 0; 377 iph->check = 0;
377 iph->tot_len = htons(length); 378 iph->tot_len = htons(length);
378 if (!iph->id) 379 if (!iph->id)
@@ -455,11 +456,13 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
455 struct inet_sock *inet = inet_sk(sk); 456 struct inet_sock *inet = inet_sk(sk);
456 struct ipcm_cookie ipc; 457 struct ipcm_cookie ipc;
457 struct rtable *rt = NULL; 458 struct rtable *rt = NULL;
459 struct flowi4 fl4;
458 int free = 0; 460 int free = 0;
459 __be32 daddr; 461 __be32 daddr;
460 __be32 saddr; 462 __be32 saddr;
461 u8 tos; 463 u8 tos;
462 int err; 464 int err;
465 struct ip_options_data opt_copy;
463 466
464 err = -EMSGSIZE; 467 err = -EMSGSIZE;
465 if (len > 0xFFFF) 468 if (len > 0xFFFF)
@@ -520,8 +523,18 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
520 saddr = ipc.addr; 523 saddr = ipc.addr;
521 ipc.addr = daddr; 524 ipc.addr = daddr;
522 525
523 if (!ipc.opt) 526 if (!ipc.opt) {
524 ipc.opt = inet->opt; 527 struct ip_options_rcu *inet_opt;
528
529 rcu_read_lock();
530 inet_opt = rcu_dereference(inet->inet_opt);
531 if (inet_opt) {
532 memcpy(&opt_copy, inet_opt,
533 sizeof(*inet_opt) + inet_opt->opt.optlen);
534 ipc.opt = &opt_copy.opt;
535 }
536 rcu_read_unlock();
537 }
525 538
526 if (ipc.opt) { 539 if (ipc.opt) {
527 err = -EINVAL; 540 err = -EINVAL;
@@ -530,10 +543,10 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
530 */ 543 */
531 if (inet->hdrincl) 544 if (inet->hdrincl)
532 goto done; 545 goto done;
533 if (ipc.opt->srr) { 546 if (ipc.opt->opt.srr) {
534 if (!daddr) 547 if (!daddr)
535 goto done; 548 goto done;
536 daddr = ipc.opt->faddr; 549 daddr = ipc.opt->opt.faddr;
537 } 550 }
538 } 551 }
539 tos = RT_CONN_FLAGS(sk); 552 tos = RT_CONN_FLAGS(sk);
@@ -547,31 +560,23 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
547 saddr = inet->mc_addr; 560 saddr = inet->mc_addr;
548 } 561 }
549 562
550 { 563 flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos,
551 struct flowi4 fl4 = { 564 RT_SCOPE_UNIVERSE,
552 .flowi4_oif = ipc.oif, 565 inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
553 .flowi4_mark = sk->sk_mark, 566 FLOWI_FLAG_CAN_SLEEP, daddr, saddr, 0, 0);
554 .daddr = daddr,
555 .saddr = saddr,
556 .flowi4_tos = tos,
557 .flowi4_proto = (inet->hdrincl ?
558 IPPROTO_RAW :
559 sk->sk_protocol),
560 .flowi4_flags = FLOWI_FLAG_CAN_SLEEP,
561 };
562 if (!inet->hdrincl) {
563 err = raw_probe_proto_opt(&fl4, msg);
564 if (err)
565 goto done;
566 }
567 567
568 security_sk_classify_flow(sk, flowi4_to_flowi(&fl4)); 568 if (!inet->hdrincl) {
569 rt = ip_route_output_flow(sock_net(sk), &fl4, sk); 569 err = raw_probe_proto_opt(&fl4, msg);
570 if (IS_ERR(rt)) { 570 if (err)
571 err = PTR_ERR(rt);
572 rt = NULL;
573 goto done; 571 goto done;
574 } 572 }
573
574 security_sk_classify_flow(sk, flowi4_to_flowi(&fl4));
575 rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
576 if (IS_ERR(rt)) {
577 err = PTR_ERR(rt);
578 rt = NULL;
579 goto done;
575 } 580 }
576 581
577 err = -EACCES; 582 err = -EACCES;
@@ -583,19 +588,20 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
583back_from_confirm: 588back_from_confirm:
584 589
585 if (inet->hdrincl) 590 if (inet->hdrincl)
586 err = raw_send_hdrinc(sk, msg->msg_iov, len, 591 err = raw_send_hdrinc(sk, &fl4, msg->msg_iov, len,
587 &rt, msg->msg_flags); 592 &rt, msg->msg_flags);
588 593
589 else { 594 else {
590 if (!ipc.addr) 595 if (!ipc.addr)
591 ipc.addr = rt->rt_dst; 596 ipc.addr = fl4.daddr;
592 lock_sock(sk); 597 lock_sock(sk);
593 err = ip_append_data(sk, ip_generic_getfrag, msg->msg_iov, len, 0, 598 err = ip_append_data(sk, &fl4, ip_generic_getfrag,
594 &ipc, &rt, msg->msg_flags); 599 msg->msg_iov, len, 0,
600 &ipc, &rt, msg->msg_flags);
595 if (err) 601 if (err)
596 ip_flush_pending_frames(sk); 602 ip_flush_pending_frames(sk);
597 else if (!(msg->msg_flags & MSG_MORE)) { 603 else if (!(msg->msg_flags & MSG_MORE)) {
598 err = ip_push_pending_frames(sk); 604 err = ip_push_pending_frames(sk, &fl4);
599 if (err == -ENOBUFS && !inet->recverr) 605 if (err == -ENOBUFS && !inet->recverr)
600 err = 0; 606 err = 0;
601 } 607 }
@@ -973,7 +979,7 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
973 srcp = inet->inet_num; 979 srcp = inet->inet_num;
974 980
975 seq_printf(seq, "%4d: %08X:%04X %08X:%04X" 981 seq_printf(seq, "%4d: %08X:%04X %08X:%04X"
976 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d\n", 982 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %d\n",
977 i, src, srcp, dest, destp, sp->sk_state, 983 i, src, srcp, dest, destp, sp->sk_state,
978 sk_wmem_alloc_get(sp), 984 sk_wmem_alloc_get(sp),
979 sk_rmem_alloc_get(sp), 985 sk_rmem_alloc_get(sp),
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 99e6e4bb1c72..aa13ef105110 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -156,7 +156,7 @@ static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old)
156 u32 *p = NULL; 156 u32 *p = NULL;
157 157
158 if (!rt->peer) 158 if (!rt->peer)
159 rt_bind_peer(rt, 1); 159 rt_bind_peer(rt, rt->rt_dst, 1);
160 160
161 peer = rt->peer; 161 peer = rt->peer;
162 if (peer) { 162 if (peer) {
@@ -424,7 +424,7 @@ static int rt_cache_seq_show(struct seq_file *seq, void *v)
424 dst_metric(&r->dst, RTAX_WINDOW), 424 dst_metric(&r->dst, RTAX_WINDOW),
425 (int)((dst_metric(&r->dst, RTAX_RTT) >> 3) + 425 (int)((dst_metric(&r->dst, RTAX_RTT) >> 3) +
426 dst_metric(&r->dst, RTAX_RTTVAR)), 426 dst_metric(&r->dst, RTAX_RTTVAR)),
427 r->rt_tos, 427 r->rt_key_tos,
428 r->dst.hh ? atomic_read(&r->dst.hh->hh_refcnt) : -1, 428 r->dst.hh ? atomic_read(&r->dst.hh->hh_refcnt) : -1,
429 r->dst.hh ? (r->dst.hh->hh_output == 429 r->dst.hh ? (r->dst.hh->hh_output ==
430 dev_queue_xmit) : 0, 430 dev_queue_xmit) : 0,
@@ -724,7 +724,7 @@ static inline int compare_keys(struct rtable *rt1, struct rtable *rt2)
724 return (((__force u32)rt1->rt_key_dst ^ (__force u32)rt2->rt_key_dst) | 724 return (((__force u32)rt1->rt_key_dst ^ (__force u32)rt2->rt_key_dst) |
725 ((__force u32)rt1->rt_key_src ^ (__force u32)rt2->rt_key_src) | 725 ((__force u32)rt1->rt_key_src ^ (__force u32)rt2->rt_key_src) |
726 (rt1->rt_mark ^ rt2->rt_mark) | 726 (rt1->rt_mark ^ rt2->rt_mark) |
727 (rt1->rt_tos ^ rt2->rt_tos) | 727 (rt1->rt_key_tos ^ rt2->rt_key_tos) |
728 (rt1->rt_oif ^ rt2->rt_oif) | 728 (rt1->rt_oif ^ rt2->rt_oif) |
729 (rt1->rt_iif ^ rt2->rt_iif)) == 0; 729 (rt1->rt_iif ^ rt2->rt_iif)) == 0;
730} 730}
@@ -968,10 +968,6 @@ static int rt_garbage_collect(struct dst_ops *ops)
968 break; 968 break;
969 969
970 expire >>= 1; 970 expire >>= 1;
971#if RT_CACHE_DEBUG >= 2
972 printk(KERN_DEBUG "expire>> %u %d %d %d\n", expire,
973 dst_entries_get_fast(&ipv4_dst_ops), goal, i);
974#endif
975 971
976 if (dst_entries_get_fast(&ipv4_dst_ops) < ip_rt_max_size) 972 if (dst_entries_get_fast(&ipv4_dst_ops) < ip_rt_max_size)
977 goto out; 973 goto out;
@@ -992,10 +988,6 @@ work_done:
992 dst_entries_get_fast(&ipv4_dst_ops) < ipv4_dst_ops.gc_thresh || 988 dst_entries_get_fast(&ipv4_dst_ops) < ipv4_dst_ops.gc_thresh ||
993 dst_entries_get_slow(&ipv4_dst_ops) < ipv4_dst_ops.gc_thresh) 989 dst_entries_get_slow(&ipv4_dst_ops) < ipv4_dst_ops.gc_thresh)
994 expire = ip_rt_gc_timeout; 990 expire = ip_rt_gc_timeout;
995#if RT_CACHE_DEBUG >= 2
996 printk(KERN_DEBUG "expire++ %u %d %d %d\n", expire,
997 dst_entries_get_fast(&ipv4_dst_ops), goal, rover);
998#endif
999out: return 0; 991out: return 0;
1000} 992}
1001 993
@@ -1179,16 +1171,6 @@ restart:
1179 1171
1180 rt->dst.rt_next = rt_hash_table[hash].chain; 1172 rt->dst.rt_next = rt_hash_table[hash].chain;
1181 1173
1182#if RT_CACHE_DEBUG >= 2
1183 if (rt->dst.rt_next) {
1184 struct rtable *trt;
1185 printk(KERN_DEBUG "rt_cache @%02x: %pI4",
1186 hash, &rt->rt_dst);
1187 for (trt = rt->dst.rt_next; trt; trt = trt->dst.rt_next)
1188 printk(" . %pI4", &trt->rt_dst);
1189 printk("\n");
1190 }
1191#endif
1192 /* 1174 /*
1193 * Since lookup is lockfree, we must make sure 1175 * Since lookup is lockfree, we must make sure
1194 * previous writes to rt are committed to memory 1176 * previous writes to rt are committed to memory
@@ -1211,11 +1193,11 @@ static u32 rt_peer_genid(void)
1211 return atomic_read(&__rt_peer_genid); 1193 return atomic_read(&__rt_peer_genid);
1212} 1194}
1213 1195
1214void rt_bind_peer(struct rtable *rt, int create) 1196void rt_bind_peer(struct rtable *rt, __be32 daddr, int create)
1215{ 1197{
1216 struct inet_peer *peer; 1198 struct inet_peer *peer;
1217 1199
1218 peer = inet_getpeer_v4(rt->rt_dst, create); 1200 peer = inet_getpeer_v4(daddr, create);
1219 1201
1220 if (peer && cmpxchg(&rt->peer, NULL, peer) != NULL) 1202 if (peer && cmpxchg(&rt->peer, NULL, peer) != NULL)
1221 inet_putpeer(peer); 1203 inet_putpeer(peer);
@@ -1249,7 +1231,7 @@ void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
1249 1231
1250 if (rt) { 1232 if (rt) {
1251 if (rt->peer == NULL) 1233 if (rt->peer == NULL)
1252 rt_bind_peer(rt, 1); 1234 rt_bind_peer(rt, rt->rt_dst, 1);
1253 1235
1254 /* If peer is attached to destination, it is never detached, 1236 /* If peer is attached to destination, it is never detached,
1255 so that we need not to grab a lock to dereference it. 1237 so that we need not to grab a lock to dereference it.
@@ -1334,6 +1316,23 @@ reject_redirect:
1334 ; 1316 ;
1335} 1317}
1336 1318
1319static bool peer_pmtu_expired(struct inet_peer *peer)
1320{
1321 unsigned long orig = ACCESS_ONCE(peer->pmtu_expires);
1322
1323 return orig &&
1324 time_after_eq(jiffies, orig) &&
1325 cmpxchg(&peer->pmtu_expires, orig, 0) == orig;
1326}
1327
1328static bool peer_pmtu_cleaned(struct inet_peer *peer)
1329{
1330 unsigned long orig = ACCESS_ONCE(peer->pmtu_expires);
1331
1332 return orig &&
1333 cmpxchg(&peer->pmtu_expires, orig, 0) == orig;
1334}
1335
1337static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst) 1336static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
1338{ 1337{
1339 struct rtable *rt = (struct rtable *)dst; 1338 struct rtable *rt = (struct rtable *)dst;
@@ -1347,20 +1346,10 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
1347 unsigned hash = rt_hash(rt->rt_key_dst, rt->rt_key_src, 1346 unsigned hash = rt_hash(rt->rt_key_dst, rt->rt_key_src,
1348 rt->rt_oif, 1347 rt->rt_oif,
1349 rt_genid(dev_net(dst->dev))); 1348 rt_genid(dev_net(dst->dev)));
1350#if RT_CACHE_DEBUG >= 1
1351 printk(KERN_DEBUG "ipv4_negative_advice: redirect to %pI4/%02x dropped\n",
1352 &rt->rt_dst, rt->rt_tos);
1353#endif
1354 rt_del(hash, rt); 1349 rt_del(hash, rt);
1355 ret = NULL; 1350 ret = NULL;
1356 } else if (rt->peer && 1351 } else if (rt->peer && peer_pmtu_expired(rt->peer)) {
1357 rt->peer->pmtu_expires && 1352 dst_metric_set(dst, RTAX_MTU, rt->peer->pmtu_orig);
1358 time_after_eq(jiffies, rt->peer->pmtu_expires)) {
1359 unsigned long orig = rt->peer->pmtu_expires;
1360
1361 if (cmpxchg(&rt->peer->pmtu_expires, orig, 0) == orig)
1362 dst_metric_set(dst, RTAX_MTU,
1363 rt->peer->pmtu_orig);
1364 } 1353 }
1365 } 1354 }
1366 return ret; 1355 return ret;
@@ -1399,7 +1388,7 @@ void ip_rt_send_redirect(struct sk_buff *skb)
1399 rcu_read_unlock(); 1388 rcu_read_unlock();
1400 1389
1401 if (!rt->peer) 1390 if (!rt->peer)
1402 rt_bind_peer(rt, 1); 1391 rt_bind_peer(rt, rt->rt_dst, 1);
1403 peer = rt->peer; 1392 peer = rt->peer;
1404 if (!peer) { 1393 if (!peer) {
1405 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway); 1394 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway);
@@ -1435,7 +1424,7 @@ void ip_rt_send_redirect(struct sk_buff *skb)
1435 peer->rate_tokens == ip_rt_redirect_number && 1424 peer->rate_tokens == ip_rt_redirect_number &&
1436 net_ratelimit()) 1425 net_ratelimit())
1437 printk(KERN_WARNING "host %pI4/if%d ignores redirects for %pI4 to %pI4.\n", 1426 printk(KERN_WARNING "host %pI4/if%d ignores redirects for %pI4 to %pI4.\n",
1438 &rt->rt_src, rt->rt_iif, 1427 &ip_hdr(skb)->saddr, rt->rt_iif,
1439 &rt->rt_dst, &rt->rt_gateway); 1428 &rt->rt_dst, &rt->rt_gateway);
1440#endif 1429#endif
1441 } 1430 }
@@ -1467,7 +1456,7 @@ static int ip_error(struct sk_buff *skb)
1467 } 1456 }
1468 1457
1469 if (!rt->peer) 1458 if (!rt->peer)
1470 rt_bind_peer(rt, 1); 1459 rt_bind_peer(rt, rt->rt_dst, 1);
1471 peer = rt->peer; 1460 peer = rt->peer;
1472 1461
1473 send = true; 1462 send = true;
@@ -1507,7 +1496,7 @@ static inline unsigned short guess_mtu(unsigned short old_mtu)
1507 return 68; 1496 return 68;
1508} 1497}
1509 1498
1510unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph, 1499unsigned short ip_rt_frag_needed(struct net *net, const struct iphdr *iph,
1511 unsigned short new_mtu, 1500 unsigned short new_mtu,
1512 struct net_device *dev) 1501 struct net_device *dev)
1513{ 1502{
@@ -1553,8 +1542,10 @@ unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph,
1553 1542
1554static void check_peer_pmtu(struct dst_entry *dst, struct inet_peer *peer) 1543static void check_peer_pmtu(struct dst_entry *dst, struct inet_peer *peer)
1555{ 1544{
1556 unsigned long expires = peer->pmtu_expires; 1545 unsigned long expires = ACCESS_ONCE(peer->pmtu_expires);
1557 1546
1547 if (!expires)
1548 return;
1558 if (time_before(jiffies, expires)) { 1549 if (time_before(jiffies, expires)) {
1559 u32 orig_dst_mtu = dst_mtu(dst); 1550 u32 orig_dst_mtu = dst_mtu(dst);
1560 if (peer->pmtu_learned < orig_dst_mtu) { 1551 if (peer->pmtu_learned < orig_dst_mtu) {
@@ -1574,13 +1565,14 @@ static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
1574 dst_confirm(dst); 1565 dst_confirm(dst);
1575 1566
1576 if (!rt->peer) 1567 if (!rt->peer)
1577 rt_bind_peer(rt, 1); 1568 rt_bind_peer(rt, rt->rt_dst, 1);
1578 peer = rt->peer; 1569 peer = rt->peer;
1579 if (peer) { 1570 if (peer) {
1571 unsigned long pmtu_expires = ACCESS_ONCE(peer->pmtu_expires);
1572
1580 if (mtu < ip_rt_min_pmtu) 1573 if (mtu < ip_rt_min_pmtu)
1581 mtu = ip_rt_min_pmtu; 1574 mtu = ip_rt_min_pmtu;
1582 if (!peer->pmtu_expires || mtu < peer->pmtu_learned) { 1575 if (!pmtu_expires || mtu < peer->pmtu_learned) {
1583 unsigned long pmtu_expires;
1584 1576
1585 pmtu_expires = jiffies + ip_rt_mtu_expires; 1577 pmtu_expires = jiffies + ip_rt_mtu_expires;
1586 if (!pmtu_expires) 1578 if (!pmtu_expires)
@@ -1631,16 +1623,17 @@ static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
1631 struct inet_peer *peer; 1623 struct inet_peer *peer;
1632 1624
1633 if (!rt->peer) 1625 if (!rt->peer)
1634 rt_bind_peer(rt, 0); 1626 rt_bind_peer(rt, rt->rt_dst, 0);
1635 1627
1636 peer = rt->peer; 1628 peer = rt->peer;
1637 if (peer && peer->pmtu_expires) 1629 if (peer) {
1638 check_peer_pmtu(dst, peer); 1630 check_peer_pmtu(dst, peer);
1639 1631
1640 if (peer && peer->redirect_learned.a4 && 1632 if (peer->redirect_learned.a4 &&
1641 peer->redirect_learned.a4 != rt->rt_gateway) { 1633 peer->redirect_learned.a4 != rt->rt_gateway) {
1642 if (check_peer_redir(dst, peer)) 1634 if (check_peer_redir(dst, peer))
1643 return NULL; 1635 return NULL;
1636 }
1644 } 1637 }
1645 1638
1646 rt->rt_peer_genid = rt_peer_genid(); 1639 rt->rt_peer_genid = rt_peer_genid();
@@ -1671,14 +1664,8 @@ static void ipv4_link_failure(struct sk_buff *skb)
1671 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0); 1664 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
1672 1665
1673 rt = skb_rtable(skb); 1666 rt = skb_rtable(skb);
1674 if (rt && 1667 if (rt && rt->peer && peer_pmtu_cleaned(rt->peer))
1675 rt->peer && 1668 dst_metric_set(&rt->dst, RTAX_MTU, rt->peer->pmtu_orig);
1676 rt->peer->pmtu_expires) {
1677 unsigned long orig = rt->peer->pmtu_expires;
1678
1679 if (cmpxchg(&rt->peer->pmtu_expires, orig, 0) == orig)
1680 dst_metric_set(&rt->dst, RTAX_MTU, rt->peer->pmtu_orig);
1681 }
1682} 1669}
1683 1670
1684static int ip_rt_bug(struct sk_buff *skb) 1671static int ip_rt_bug(struct sk_buff *skb)
@@ -1687,6 +1674,7 @@ static int ip_rt_bug(struct sk_buff *skb)
1687 &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr, 1674 &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
1688 skb->dev ? skb->dev->name : "?"); 1675 skb->dev ? skb->dev->name : "?");
1689 kfree_skb(skb); 1676 kfree_skb(skb);
1677 WARN_ON(1);
1690 return 0; 1678 return 0;
1691} 1679}
1692 1680
@@ -1699,22 +1687,26 @@ static int ip_rt_bug(struct sk_buff *skb)
1699 in IP options! 1687 in IP options!
1700 */ 1688 */
1701 1689
1702void ip_rt_get_source(u8 *addr, struct rtable *rt) 1690void ip_rt_get_source(u8 *addr, struct sk_buff *skb, struct rtable *rt)
1703{ 1691{
1704 __be32 src; 1692 __be32 src;
1705 struct fib_result res;
1706 1693
1707 if (rt_is_output_route(rt)) 1694 if (rt_is_output_route(rt))
1708 src = rt->rt_src; 1695 src = ip_hdr(skb)->saddr;
1709 else { 1696 else {
1710 struct flowi4 fl4 = { 1697 struct fib_result res;
1711 .daddr = rt->rt_key_dst, 1698 struct flowi4 fl4;
1712 .saddr = rt->rt_key_src, 1699 struct iphdr *iph;
1713 .flowi4_tos = rt->rt_tos, 1700
1714 .flowi4_oif = rt->rt_oif, 1701 iph = ip_hdr(skb);
1715 .flowi4_iif = rt->rt_iif, 1702
1716 .flowi4_mark = rt->rt_mark, 1703 memset(&fl4, 0, sizeof(fl4));
1717 }; 1704 fl4.daddr = iph->daddr;
1705 fl4.saddr = iph->saddr;
1706 fl4.flowi4_tos = iph->tos;
1707 fl4.flowi4_oif = rt->dst.dev->ifindex;
1708 fl4.flowi4_iif = skb->dev->ifindex;
1709 fl4.flowi4_mark = skb->mark;
1718 1710
1719 rcu_read_lock(); 1711 rcu_read_lock();
1720 if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res) == 0) 1712 if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res) == 0)
@@ -1767,7 +1759,7 @@ static unsigned int ipv4_default_mtu(const struct dst_entry *dst)
1767 return mtu; 1759 return mtu;
1768} 1760}
1769 1761
1770static void rt_init_metrics(struct rtable *rt, const struct flowi4 *oldflp4, 1762static void rt_init_metrics(struct rtable *rt, const struct flowi4 *fl4,
1771 struct fib_info *fi) 1763 struct fib_info *fi)
1772{ 1764{
1773 struct inet_peer *peer; 1765 struct inet_peer *peer;
@@ -1776,7 +1768,7 @@ static void rt_init_metrics(struct rtable *rt, const struct flowi4 *oldflp4,
1776 /* If a peer entry exists for this destination, we must hook 1768 /* If a peer entry exists for this destination, we must hook
1777 * it up in order to get at cached metrics. 1769 * it up in order to get at cached metrics.
1778 */ 1770 */
1779 if (oldflp4 && (oldflp4->flowi4_flags & FLOWI_FLAG_PRECOW_METRICS)) 1771 if (fl4 && (fl4->flowi4_flags & FLOWI_FLAG_PRECOW_METRICS))
1780 create = 1; 1772 create = 1;
1781 1773
1782 rt->peer = peer = inet_getpeer_v4(rt->rt_dst, create); 1774 rt->peer = peer = inet_getpeer_v4(rt->rt_dst, create);
@@ -1787,8 +1779,7 @@ static void rt_init_metrics(struct rtable *rt, const struct flowi4 *oldflp4,
1787 sizeof(u32) * RTAX_MAX); 1779 sizeof(u32) * RTAX_MAX);
1788 dst_init_metrics(&rt->dst, peer->metrics, false); 1780 dst_init_metrics(&rt->dst, peer->metrics, false);
1789 1781
1790 if (peer->pmtu_expires) 1782 check_peer_pmtu(&rt->dst, peer);
1791 check_peer_pmtu(&rt->dst, peer);
1792 if (peer->redirect_learned.a4 && 1783 if (peer->redirect_learned.a4 &&
1793 peer->redirect_learned.a4 != rt->rt_gateway) { 1784 peer->redirect_learned.a4 != rt->rt_gateway) {
1794 rt->rt_gateway = peer->redirect_learned.a4; 1785 rt->rt_gateway = peer->redirect_learned.a4;
@@ -1803,7 +1794,7 @@ static void rt_init_metrics(struct rtable *rt, const struct flowi4 *oldflp4,
1803 } 1794 }
1804} 1795}
1805 1796
1806static void rt_set_nexthop(struct rtable *rt, const struct flowi4 *oldflp4, 1797static void rt_set_nexthop(struct rtable *rt, const struct flowi4 *fl4,
1807 const struct fib_result *res, 1798 const struct fib_result *res,
1808 struct fib_info *fi, u16 type, u32 itag) 1799 struct fib_info *fi, u16 type, u32 itag)
1809{ 1800{
@@ -1813,7 +1804,7 @@ static void rt_set_nexthop(struct rtable *rt, const struct flowi4 *oldflp4,
1813 if (FIB_RES_GW(*res) && 1804 if (FIB_RES_GW(*res) &&
1814 FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK) 1805 FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)
1815 rt->rt_gateway = FIB_RES_GW(*res); 1806 rt->rt_gateway = FIB_RES_GW(*res);
1816 rt_init_metrics(rt, oldflp4, fi); 1807 rt_init_metrics(rt, fl4, fi);
1817#ifdef CONFIG_IP_ROUTE_CLASSID 1808#ifdef CONFIG_IP_ROUTE_CLASSID
1818 dst->tclassid = FIB_RES_NH(*res).nh_tclassid; 1809 dst->tclassid = FIB_RES_NH(*res).nh_tclassid;
1819#endif 1810#endif
@@ -1830,20 +1821,15 @@ static void rt_set_nexthop(struct rtable *rt, const struct flowi4 *oldflp4,
1830#endif 1821#endif
1831 set_class_tag(rt, itag); 1822 set_class_tag(rt, itag);
1832#endif 1823#endif
1833 rt->rt_type = type;
1834} 1824}
1835 1825
1836static struct rtable *rt_dst_alloc(bool nopolicy, bool noxfrm) 1826static struct rtable *rt_dst_alloc(struct net_device *dev,
1827 bool nopolicy, bool noxfrm)
1837{ 1828{
1838 struct rtable *rt = dst_alloc(&ipv4_dst_ops, 1); 1829 return dst_alloc(&ipv4_dst_ops, dev, 1, -1,
1839 if (rt) { 1830 DST_HOST |
1840 rt->dst.obsolete = -1; 1831 (nopolicy ? DST_NOPOLICY : 0) |
1841 1832 (noxfrm ? DST_NOXFRM : 0));
1842 rt->dst.flags = DST_HOST |
1843 (nopolicy ? DST_NOPOLICY : 0) |
1844 (noxfrm ? DST_NOXFRM : 0);
1845 }
1846 return rt;
1847} 1833}
1848 1834
1849/* called in rcu_read_lock() section */ 1835/* called in rcu_read_lock() section */
@@ -1871,36 +1857,38 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1871 goto e_inval; 1857 goto e_inval;
1872 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK); 1858 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
1873 } else { 1859 } else {
1874 err = fib_validate_source(saddr, 0, tos, 0, dev, &spec_dst, 1860 err = fib_validate_source(skb, saddr, 0, tos, 0, dev, &spec_dst,
1875 &itag, 0); 1861 &itag);
1876 if (err < 0) 1862 if (err < 0)
1877 goto e_err; 1863 goto e_err;
1878 } 1864 }
1879 rth = rt_dst_alloc(IN_DEV_CONF_GET(in_dev, NOPOLICY), false); 1865 rth = rt_dst_alloc(init_net.loopback_dev,
1866 IN_DEV_CONF_GET(in_dev, NOPOLICY), false);
1880 if (!rth) 1867 if (!rth)
1881 goto e_nobufs; 1868 goto e_nobufs;
1882 1869
1870#ifdef CONFIG_IP_ROUTE_CLASSID
1871 rth->dst.tclassid = itag;
1872#endif
1883 rth->dst.output = ip_rt_bug; 1873 rth->dst.output = ip_rt_bug;
1884 1874
1885 rth->rt_key_dst = daddr; 1875 rth->rt_key_dst = daddr;
1886 rth->rt_dst = daddr;
1887 rth->rt_tos = tos;
1888 rth->rt_mark = skb->mark;
1889 rth->rt_key_src = saddr; 1876 rth->rt_key_src = saddr;
1877 rth->rt_genid = rt_genid(dev_net(dev));
1878 rth->rt_flags = RTCF_MULTICAST;
1879 rth->rt_type = RTN_MULTICAST;
1880 rth->rt_key_tos = tos;
1881 rth->rt_dst = daddr;
1890 rth->rt_src = saddr; 1882 rth->rt_src = saddr;
1891#ifdef CONFIG_IP_ROUTE_CLASSID
1892 rth->dst.tclassid = itag;
1893#endif
1894 rth->rt_route_iif = dev->ifindex; 1883 rth->rt_route_iif = dev->ifindex;
1895 rth->rt_iif = dev->ifindex; 1884 rth->rt_iif = dev->ifindex;
1896 rth->dst.dev = init_net.loopback_dev;
1897 dev_hold(rth->dst.dev);
1898 rth->rt_oif = 0; 1885 rth->rt_oif = 0;
1886 rth->rt_mark = skb->mark;
1899 rth->rt_gateway = daddr; 1887 rth->rt_gateway = daddr;
1900 rth->rt_spec_dst= spec_dst; 1888 rth->rt_spec_dst= spec_dst;
1901 rth->rt_genid = rt_genid(dev_net(dev)); 1889 rth->rt_peer_genid = 0;
1902 rth->rt_flags = RTCF_MULTICAST; 1890 rth->peer = NULL;
1903 rth->rt_type = RTN_MULTICAST; 1891 rth->fi = NULL;
1904 if (our) { 1892 if (our) {
1905 rth->dst.input= ip_local_deliver; 1893 rth->dst.input= ip_local_deliver;
1906 rth->rt_flags |= RTCF_LOCAL; 1894 rth->rt_flags |= RTCF_LOCAL;
@@ -1914,9 +1902,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1914 1902
1915 hash = rt_hash(daddr, saddr, dev->ifindex, rt_genid(dev_net(dev))); 1903 hash = rt_hash(daddr, saddr, dev->ifindex, rt_genid(dev_net(dev)));
1916 rth = rt_intern_hash(hash, rth, skb, dev->ifindex); 1904 rth = rt_intern_hash(hash, rth, skb, dev->ifindex);
1917 err = 0; 1905 return IS_ERR(rth) ? PTR_ERR(rth) : 0;
1918 if (IS_ERR(rth))
1919 err = PTR_ERR(rth);
1920 1906
1921e_nobufs: 1907e_nobufs:
1922 return -ENOBUFS; 1908 return -ENOBUFS;
@@ -1981,8 +1967,8 @@ static int __mkroute_input(struct sk_buff *skb,
1981 } 1967 }
1982 1968
1983 1969
1984 err = fib_validate_source(saddr, daddr, tos, FIB_RES_OIF(*res), 1970 err = fib_validate_source(skb, saddr, daddr, tos, FIB_RES_OIF(*res),
1985 in_dev->dev, &spec_dst, &itag, skb->mark); 1971 in_dev->dev, &spec_dst, &itag);
1986 if (err < 0) { 1972 if (err < 0) {
1987 ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr, 1973 ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
1988 saddr); 1974 saddr);
@@ -2013,7 +1999,8 @@ static int __mkroute_input(struct sk_buff *skb,
2013 } 1999 }
2014 } 2000 }
2015 2001
2016 rth = rt_dst_alloc(IN_DEV_CONF_GET(in_dev, NOPOLICY), 2002 rth = rt_dst_alloc(out_dev->dev,
2003 IN_DEV_CONF_GET(in_dev, NOPOLICY),
2017 IN_DEV_CONF_GET(out_dev, NOXFRM)); 2004 IN_DEV_CONF_GET(out_dev, NOXFRM));
2018 if (!rth) { 2005 if (!rth) {
2019 err = -ENOBUFS; 2006 err = -ENOBUFS;
@@ -2021,27 +2008,28 @@ static int __mkroute_input(struct sk_buff *skb,
2021 } 2008 }
2022 2009
2023 rth->rt_key_dst = daddr; 2010 rth->rt_key_dst = daddr;
2024 rth->rt_dst = daddr;
2025 rth->rt_tos = tos;
2026 rth->rt_mark = skb->mark;
2027 rth->rt_key_src = saddr; 2011 rth->rt_key_src = saddr;
2012 rth->rt_genid = rt_genid(dev_net(rth->dst.dev));
2013 rth->rt_flags = flags;
2014 rth->rt_type = res->type;
2015 rth->rt_key_tos = tos;
2016 rth->rt_dst = daddr;
2028 rth->rt_src = saddr; 2017 rth->rt_src = saddr;
2029 rth->rt_gateway = daddr;
2030 rth->rt_route_iif = in_dev->dev->ifindex; 2018 rth->rt_route_iif = in_dev->dev->ifindex;
2031 rth->rt_iif = in_dev->dev->ifindex; 2019 rth->rt_iif = in_dev->dev->ifindex;
2032 rth->dst.dev = (out_dev)->dev;
2033 dev_hold(rth->dst.dev);
2034 rth->rt_oif = 0; 2020 rth->rt_oif = 0;
2021 rth->rt_mark = skb->mark;
2022 rth->rt_gateway = daddr;
2035 rth->rt_spec_dst= spec_dst; 2023 rth->rt_spec_dst= spec_dst;
2024 rth->rt_peer_genid = 0;
2025 rth->peer = NULL;
2026 rth->fi = NULL;
2036 2027
2037 rth->dst.input = ip_forward; 2028 rth->dst.input = ip_forward;
2038 rth->dst.output = ip_output; 2029 rth->dst.output = ip_output;
2039 rth->rt_genid = rt_genid(dev_net(rth->dst.dev));
2040 2030
2041 rt_set_nexthop(rth, NULL, res, res->fi, res->type, itag); 2031 rt_set_nexthop(rth, NULL, res, res->fi, res->type, itag);
2042 2032
2043 rth->rt_flags = flags;
2044
2045 *result = rth; 2033 *result = rth;
2046 err = 0; 2034 err = 0;
2047 cleanup: 2035 cleanup:
@@ -2150,9 +2138,9 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2150 goto brd_input; 2138 goto brd_input;
2151 2139
2152 if (res.type == RTN_LOCAL) { 2140 if (res.type == RTN_LOCAL) {
2153 err = fib_validate_source(saddr, daddr, tos, 2141 err = fib_validate_source(skb, saddr, daddr, tos,
2154 net->loopback_dev->ifindex, 2142 net->loopback_dev->ifindex,
2155 dev, &spec_dst, &itag, skb->mark); 2143 dev, &spec_dst, &itag);
2156 if (err < 0) 2144 if (err < 0)
2157 goto martian_source_keep_err; 2145 goto martian_source_keep_err;
2158 if (err) 2146 if (err)
@@ -2176,8 +2164,8 @@ brd_input:
2176 if (ipv4_is_zeronet(saddr)) 2164 if (ipv4_is_zeronet(saddr))
2177 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK); 2165 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
2178 else { 2166 else {
2179 err = fib_validate_source(saddr, 0, tos, 0, dev, &spec_dst, 2167 err = fib_validate_source(skb, saddr, 0, tos, 0, dev, &spec_dst,
2180 &itag, skb->mark); 2168 &itag);
2181 if (err < 0) 2169 if (err < 0)
2182 goto martian_source_keep_err; 2170 goto martian_source_keep_err;
2183 if (err) 2171 if (err)
@@ -2188,36 +2176,42 @@ brd_input:
2188 RT_CACHE_STAT_INC(in_brd); 2176 RT_CACHE_STAT_INC(in_brd);
2189 2177
2190local_input: 2178local_input:
2191 rth = rt_dst_alloc(IN_DEV_CONF_GET(in_dev, NOPOLICY), false); 2179 rth = rt_dst_alloc(net->loopback_dev,
2180 IN_DEV_CONF_GET(in_dev, NOPOLICY), false);
2192 if (!rth) 2181 if (!rth)
2193 goto e_nobufs; 2182 goto e_nobufs;
2194 2183
2184 rth->dst.input= ip_local_deliver;
2195 rth->dst.output= ip_rt_bug; 2185 rth->dst.output= ip_rt_bug;
2196 rth->rt_genid = rt_genid(net); 2186#ifdef CONFIG_IP_ROUTE_CLASSID
2187 rth->dst.tclassid = itag;
2188#endif
2197 2189
2198 rth->rt_key_dst = daddr; 2190 rth->rt_key_dst = daddr;
2199 rth->rt_dst = daddr;
2200 rth->rt_tos = tos;
2201 rth->rt_mark = skb->mark;
2202 rth->rt_key_src = saddr; 2191 rth->rt_key_src = saddr;
2192 rth->rt_genid = rt_genid(net);
2193 rth->rt_flags = flags|RTCF_LOCAL;
2194 rth->rt_type = res.type;
2195 rth->rt_key_tos = tos;
2196 rth->rt_dst = daddr;
2203 rth->rt_src = saddr; 2197 rth->rt_src = saddr;
2204#ifdef CONFIG_IP_ROUTE_CLASSID 2198#ifdef CONFIG_IP_ROUTE_CLASSID
2205 rth->dst.tclassid = itag; 2199 rth->dst.tclassid = itag;
2206#endif 2200#endif
2207 rth->rt_route_iif = dev->ifindex; 2201 rth->rt_route_iif = dev->ifindex;
2208 rth->rt_iif = dev->ifindex; 2202 rth->rt_iif = dev->ifindex;
2209 rth->dst.dev = net->loopback_dev; 2203 rth->rt_oif = 0;
2210 dev_hold(rth->dst.dev); 2204 rth->rt_mark = skb->mark;
2211 rth->rt_gateway = daddr; 2205 rth->rt_gateway = daddr;
2212 rth->rt_spec_dst= spec_dst; 2206 rth->rt_spec_dst= spec_dst;
2213 rth->dst.input= ip_local_deliver; 2207 rth->rt_peer_genid = 0;
2214 rth->rt_flags = flags|RTCF_LOCAL; 2208 rth->peer = NULL;
2209 rth->fi = NULL;
2215 if (res.type == RTN_UNREACHABLE) { 2210 if (res.type == RTN_UNREACHABLE) {
2216 rth->dst.input= ip_error; 2211 rth->dst.input= ip_error;
2217 rth->dst.error= -err; 2212 rth->dst.error= -err;
2218 rth->rt_flags &= ~RTCF_LOCAL; 2213 rth->rt_flags &= ~RTCF_LOCAL;
2219 } 2214 }
2220 rth->rt_type = res.type;
2221 hash = rt_hash(daddr, saddr, fl4.flowi4_iif, rt_genid(net)); 2215 hash = rt_hash(daddr, saddr, fl4.flowi4_iif, rt_genid(net));
2222 rth = rt_intern_hash(hash, rth, skb, fl4.flowi4_iif); 2216 rth = rt_intern_hash(hash, rth, skb, fl4.flowi4_iif);
2223 err = 0; 2217 err = 0;
@@ -2288,7 +2282,7 @@ int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2288 ((__force u32)rth->rt_key_src ^ (__force u32)saddr) | 2282 ((__force u32)rth->rt_key_src ^ (__force u32)saddr) |
2289 (rth->rt_iif ^ iif) | 2283 (rth->rt_iif ^ iif) |
2290 rth->rt_oif | 2284 rth->rt_oif |
2291 (rth->rt_tos ^ tos)) == 0 && 2285 (rth->rt_key_tos ^ tos)) == 0 &&
2292 rth->rt_mark == skb->mark && 2286 rth->rt_mark == skb->mark &&
2293 net_eq(dev_net(rth->dst.dev), net) && 2287 net_eq(dev_net(rth->dst.dev), net) &&
2294 !rt_is_expired(rth)) { 2288 !rt_is_expired(rth)) {
@@ -2349,12 +2343,12 @@ EXPORT_SYMBOL(ip_route_input_common);
2349/* called with rcu_read_lock() */ 2343/* called with rcu_read_lock() */
2350static struct rtable *__mkroute_output(const struct fib_result *res, 2344static struct rtable *__mkroute_output(const struct fib_result *res,
2351 const struct flowi4 *fl4, 2345 const struct flowi4 *fl4,
2352 const struct flowi4 *oldflp4, 2346 __be32 orig_daddr, __be32 orig_saddr,
2353 struct net_device *dev_out, 2347 int orig_oif, struct net_device *dev_out,
2354 unsigned int flags) 2348 unsigned int flags)
2355{ 2349{
2356 struct fib_info *fi = res->fi; 2350 struct fib_info *fi = res->fi;
2357 u32 tos = RT_FL_TOS(oldflp4); 2351 u32 tos = RT_FL_TOS(fl4);
2358 struct in_device *in_dev; 2352 struct in_device *in_dev;
2359 u16 type = res->type; 2353 u16 type = res->type;
2360 struct rtable *rth; 2354 struct rtable *rth;
@@ -2381,8 +2375,8 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
2381 fi = NULL; 2375 fi = NULL;
2382 } else if (type == RTN_MULTICAST) { 2376 } else if (type == RTN_MULTICAST) {
2383 flags |= RTCF_MULTICAST | RTCF_LOCAL; 2377 flags |= RTCF_MULTICAST | RTCF_LOCAL;
2384 if (!ip_check_mc_rcu(in_dev, oldflp4->daddr, oldflp4->saddr, 2378 if (!ip_check_mc_rcu(in_dev, fl4->daddr, fl4->saddr,
2385 oldflp4->flowi4_proto)) 2379 fl4->flowi4_proto))
2386 flags &= ~RTCF_LOCAL; 2380 flags &= ~RTCF_LOCAL;
2387 /* If multicast route do not exist use 2381 /* If multicast route do not exist use
2388 * default one, but do not gateway in this case. 2382 * default one, but do not gateway in this case.
@@ -2392,29 +2386,31 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
2392 fi = NULL; 2386 fi = NULL;
2393 } 2387 }
2394 2388
2395 rth = rt_dst_alloc(IN_DEV_CONF_GET(in_dev, NOPOLICY), 2389 rth = rt_dst_alloc(dev_out,
2390 IN_DEV_CONF_GET(in_dev, NOPOLICY),
2396 IN_DEV_CONF_GET(in_dev, NOXFRM)); 2391 IN_DEV_CONF_GET(in_dev, NOXFRM));
2397 if (!rth) 2392 if (!rth)
2398 return ERR_PTR(-ENOBUFS); 2393 return ERR_PTR(-ENOBUFS);
2399 2394
2400 rth->rt_key_dst = oldflp4->daddr; 2395 rth->dst.output = ip_output;
2401 rth->rt_tos = tos; 2396
2402 rth->rt_key_src = oldflp4->saddr; 2397 rth->rt_key_dst = orig_daddr;
2403 rth->rt_oif = oldflp4->flowi4_oif; 2398 rth->rt_key_src = orig_saddr;
2404 rth->rt_mark = oldflp4->flowi4_mark; 2399 rth->rt_genid = rt_genid(dev_net(dev_out));
2400 rth->rt_flags = flags;
2401 rth->rt_type = type;
2402 rth->rt_key_tos = tos;
2405 rth->rt_dst = fl4->daddr; 2403 rth->rt_dst = fl4->daddr;
2406 rth->rt_src = fl4->saddr; 2404 rth->rt_src = fl4->saddr;
2407 rth->rt_route_iif = 0; 2405 rth->rt_route_iif = 0;
2408 rth->rt_iif = oldflp4->flowi4_oif ? : dev_out->ifindex; 2406 rth->rt_iif = orig_oif ? : dev_out->ifindex;
2409 /* get references to the devices that are to be hold by the routing 2407 rth->rt_oif = orig_oif;
2410 cache entry */ 2408 rth->rt_mark = fl4->flowi4_mark;
2411 rth->dst.dev = dev_out;
2412 dev_hold(dev_out);
2413 rth->rt_gateway = fl4->daddr; 2409 rth->rt_gateway = fl4->daddr;
2414 rth->rt_spec_dst= fl4->saddr; 2410 rth->rt_spec_dst= fl4->saddr;
2415 2411 rth->rt_peer_genid = 0;
2416 rth->dst.output=ip_output; 2412 rth->peer = NULL;
2417 rth->rt_genid = rt_genid(dev_net(dev_out)); 2413 rth->fi = NULL;
2418 2414
2419 RT_CACHE_STAT_INC(out_slow_tot); 2415 RT_CACHE_STAT_INC(out_slow_tot);
2420 2416
@@ -2432,7 +2428,7 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
2432#ifdef CONFIG_IP_MROUTE 2428#ifdef CONFIG_IP_MROUTE
2433 if (type == RTN_MULTICAST) { 2429 if (type == RTN_MULTICAST) {
2434 if (IN_DEV_MFORWARD(in_dev) && 2430 if (IN_DEV_MFORWARD(in_dev) &&
2435 !ipv4_is_local_multicast(oldflp4->daddr)) { 2431 !ipv4_is_local_multicast(fl4->daddr)) {
2436 rth->dst.input = ip_mr_input; 2432 rth->dst.input = ip_mr_input;
2437 rth->dst.output = ip_mc_output; 2433 rth->dst.output = ip_mc_output;
2438 } 2434 }
@@ -2440,9 +2436,8 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
2440#endif 2436#endif
2441 } 2437 }
2442 2438
2443 rt_set_nexthop(rth, oldflp4, res, fi, type, 0); 2439 rt_set_nexthop(rth, fl4, res, fi, type, 0);
2444 2440
2445 rth->rt_flags = flags;
2446 return rth; 2441 return rth;
2447} 2442}
2448 2443
@@ -2451,36 +2446,37 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
2451 * called with rcu_read_lock(); 2446 * called with rcu_read_lock();
2452 */ 2447 */
2453 2448
2454static struct rtable *ip_route_output_slow(struct net *net, 2449static struct rtable *ip_route_output_slow(struct net *net, struct flowi4 *fl4)
2455 const struct flowi4 *oldflp4)
2456{ 2450{
2457 u32 tos = RT_FL_TOS(oldflp4);
2458 struct flowi4 fl4;
2459 struct fib_result res;
2460 unsigned int flags = 0;
2461 struct net_device *dev_out = NULL; 2451 struct net_device *dev_out = NULL;
2452 u32 tos = RT_FL_TOS(fl4);
2453 unsigned int flags = 0;
2454 struct fib_result res;
2462 struct rtable *rth; 2455 struct rtable *rth;
2456 __be32 orig_daddr;
2457 __be32 orig_saddr;
2458 int orig_oif;
2463 2459
2464 res.fi = NULL; 2460 res.fi = NULL;
2465#ifdef CONFIG_IP_MULTIPLE_TABLES 2461#ifdef CONFIG_IP_MULTIPLE_TABLES
2466 res.r = NULL; 2462 res.r = NULL;
2467#endif 2463#endif
2468 2464
2469 fl4.flowi4_oif = oldflp4->flowi4_oif; 2465 orig_daddr = fl4->daddr;
2470 fl4.flowi4_iif = net->loopback_dev->ifindex; 2466 orig_saddr = fl4->saddr;
2471 fl4.flowi4_mark = oldflp4->flowi4_mark; 2467 orig_oif = fl4->flowi4_oif;
2472 fl4.daddr = oldflp4->daddr; 2468
2473 fl4.saddr = oldflp4->saddr; 2469 fl4->flowi4_iif = net->loopback_dev->ifindex;
2474 fl4.flowi4_tos = tos & IPTOS_RT_MASK; 2470 fl4->flowi4_tos = tos & IPTOS_RT_MASK;
2475 fl4.flowi4_scope = ((tos & RTO_ONLINK) ? 2471 fl4->flowi4_scope = ((tos & RTO_ONLINK) ?
2476 RT_SCOPE_LINK : RT_SCOPE_UNIVERSE); 2472 RT_SCOPE_LINK : RT_SCOPE_UNIVERSE);
2477 2473
2478 rcu_read_lock(); 2474 rcu_read_lock();
2479 if (oldflp4->saddr) { 2475 if (fl4->saddr) {
2480 rth = ERR_PTR(-EINVAL); 2476 rth = ERR_PTR(-EINVAL);
2481 if (ipv4_is_multicast(oldflp4->saddr) || 2477 if (ipv4_is_multicast(fl4->saddr) ||
2482 ipv4_is_lbcast(oldflp4->saddr) || 2478 ipv4_is_lbcast(fl4->saddr) ||
2483 ipv4_is_zeronet(oldflp4->saddr)) 2479 ipv4_is_zeronet(fl4->saddr))
2484 goto out; 2480 goto out;
2485 2481
2486 /* I removed check for oif == dev_out->oif here. 2482 /* I removed check for oif == dev_out->oif here.
@@ -2491,11 +2487,11 @@ static struct rtable *ip_route_output_slow(struct net *net,
2491 of another iface. --ANK 2487 of another iface. --ANK
2492 */ 2488 */
2493 2489
2494 if (oldflp4->flowi4_oif == 0 && 2490 if (fl4->flowi4_oif == 0 &&
2495 (ipv4_is_multicast(oldflp4->daddr) || 2491 (ipv4_is_multicast(fl4->daddr) ||
2496 ipv4_is_lbcast(oldflp4->daddr))) { 2492 ipv4_is_lbcast(fl4->daddr))) {
2497 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */ 2493 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2498 dev_out = __ip_dev_find(net, oldflp4->saddr, false); 2494 dev_out = __ip_dev_find(net, fl4->saddr, false);
2499 if (dev_out == NULL) 2495 if (dev_out == NULL)
2500 goto out; 2496 goto out;
2501 2497
@@ -2514,20 +2510,20 @@ static struct rtable *ip_route_output_slow(struct net *net,
2514 Luckily, this hack is good workaround. 2510 Luckily, this hack is good workaround.
2515 */ 2511 */
2516 2512
2517 fl4.flowi4_oif = dev_out->ifindex; 2513 fl4->flowi4_oif = dev_out->ifindex;
2518 goto make_route; 2514 goto make_route;
2519 } 2515 }
2520 2516
2521 if (!(oldflp4->flowi4_flags & FLOWI_FLAG_ANYSRC)) { 2517 if (!(fl4->flowi4_flags & FLOWI_FLAG_ANYSRC)) {
2522 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */ 2518 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2523 if (!__ip_dev_find(net, oldflp4->saddr, false)) 2519 if (!__ip_dev_find(net, fl4->saddr, false))
2524 goto out; 2520 goto out;
2525 } 2521 }
2526 } 2522 }
2527 2523
2528 2524
2529 if (oldflp4->flowi4_oif) { 2525 if (fl4->flowi4_oif) {
2530 dev_out = dev_get_by_index_rcu(net, oldflp4->flowi4_oif); 2526 dev_out = dev_get_by_index_rcu(net, fl4->flowi4_oif);
2531 rth = ERR_PTR(-ENODEV); 2527 rth = ERR_PTR(-ENODEV);
2532 if (dev_out == NULL) 2528 if (dev_out == NULL)
2533 goto out; 2529 goto out;
@@ -2537,37 +2533,37 @@ static struct rtable *ip_route_output_slow(struct net *net,
2537 rth = ERR_PTR(-ENETUNREACH); 2533 rth = ERR_PTR(-ENETUNREACH);
2538 goto out; 2534 goto out;
2539 } 2535 }
2540 if (ipv4_is_local_multicast(oldflp4->daddr) || 2536 if (ipv4_is_local_multicast(fl4->daddr) ||
2541 ipv4_is_lbcast(oldflp4->daddr)) { 2537 ipv4_is_lbcast(fl4->daddr)) {
2542 if (!fl4.saddr) 2538 if (!fl4->saddr)
2543 fl4.saddr = inet_select_addr(dev_out, 0, 2539 fl4->saddr = inet_select_addr(dev_out, 0,
2544 RT_SCOPE_LINK); 2540 RT_SCOPE_LINK);
2545 goto make_route; 2541 goto make_route;
2546 } 2542 }
2547 if (!fl4.saddr) { 2543 if (fl4->saddr) {
2548 if (ipv4_is_multicast(oldflp4->daddr)) 2544 if (ipv4_is_multicast(fl4->daddr))
2549 fl4.saddr = inet_select_addr(dev_out, 0, 2545 fl4->saddr = inet_select_addr(dev_out, 0,
2550 fl4.flowi4_scope); 2546 fl4->flowi4_scope);
2551 else if (!oldflp4->daddr) 2547 else if (!fl4->daddr)
2552 fl4.saddr = inet_select_addr(dev_out, 0, 2548 fl4->saddr = inet_select_addr(dev_out, 0,
2553 RT_SCOPE_HOST); 2549 RT_SCOPE_HOST);
2554 } 2550 }
2555 } 2551 }
2556 2552
2557 if (!fl4.daddr) { 2553 if (!fl4->daddr) {
2558 fl4.daddr = fl4.saddr; 2554 fl4->daddr = fl4->saddr;
2559 if (!fl4.daddr) 2555 if (!fl4->daddr)
2560 fl4.daddr = fl4.saddr = htonl(INADDR_LOOPBACK); 2556 fl4->daddr = fl4->saddr = htonl(INADDR_LOOPBACK);
2561 dev_out = net->loopback_dev; 2557 dev_out = net->loopback_dev;
2562 fl4.flowi4_oif = net->loopback_dev->ifindex; 2558 fl4->flowi4_oif = net->loopback_dev->ifindex;
2563 res.type = RTN_LOCAL; 2559 res.type = RTN_LOCAL;
2564 flags |= RTCF_LOCAL; 2560 flags |= RTCF_LOCAL;
2565 goto make_route; 2561 goto make_route;
2566 } 2562 }
2567 2563
2568 if (fib_lookup(net, &fl4, &res)) { 2564 if (fib_lookup(net, fl4, &res)) {
2569 res.fi = NULL; 2565 res.fi = NULL;
2570 if (oldflp4->flowi4_oif) { 2566 if (fl4->flowi4_oif) {
2571 /* Apparently, routing tables are wrong. Assume, 2567 /* Apparently, routing tables are wrong. Assume,
2572 that the destination is on link. 2568 that the destination is on link.
2573 2569
@@ -2586,9 +2582,9 @@ static struct rtable *ip_route_output_slow(struct net *net,
2586 likely IPv6, but we do not. 2582 likely IPv6, but we do not.
2587 */ 2583 */
2588 2584
2589 if (fl4.saddr == 0) 2585 if (fl4->saddr == 0)
2590 fl4.saddr = inet_select_addr(dev_out, 0, 2586 fl4->saddr = inet_select_addr(dev_out, 0,
2591 RT_SCOPE_LINK); 2587 RT_SCOPE_LINK);
2592 res.type = RTN_UNICAST; 2588 res.type = RTN_UNICAST;
2593 goto make_route; 2589 goto make_route;
2594 } 2590 }
@@ -2597,42 +2593,45 @@ static struct rtable *ip_route_output_slow(struct net *net,
2597 } 2593 }
2598 2594
2599 if (res.type == RTN_LOCAL) { 2595 if (res.type == RTN_LOCAL) {
2600 if (!fl4.saddr) { 2596 if (!fl4->saddr) {
2601 if (res.fi->fib_prefsrc) 2597 if (res.fi->fib_prefsrc)
2602 fl4.saddr = res.fi->fib_prefsrc; 2598 fl4->saddr = res.fi->fib_prefsrc;
2603 else 2599 else
2604 fl4.saddr = fl4.daddr; 2600 fl4->saddr = fl4->daddr;
2605 } 2601 }
2606 dev_out = net->loopback_dev; 2602 dev_out = net->loopback_dev;
2607 fl4.flowi4_oif = dev_out->ifindex; 2603 fl4->flowi4_oif = dev_out->ifindex;
2608 res.fi = NULL; 2604 res.fi = NULL;
2609 flags |= RTCF_LOCAL; 2605 flags |= RTCF_LOCAL;
2610 goto make_route; 2606 goto make_route;
2611 } 2607 }
2612 2608
2613#ifdef CONFIG_IP_ROUTE_MULTIPATH 2609#ifdef CONFIG_IP_ROUTE_MULTIPATH
2614 if (res.fi->fib_nhs > 1 && fl4.flowi4_oif == 0) 2610 if (res.fi->fib_nhs > 1 && fl4->flowi4_oif == 0)
2615 fib_select_multipath(&res); 2611 fib_select_multipath(&res);
2616 else 2612 else
2617#endif 2613#endif
2618 if (!res.prefixlen && res.type == RTN_UNICAST && !fl4.flowi4_oif) 2614 if (!res.prefixlen &&
2615 res.table->tb_num_default > 1 &&
2616 res.type == RTN_UNICAST && !fl4->flowi4_oif)
2619 fib_select_default(&res); 2617 fib_select_default(&res);
2620 2618
2621 if (!fl4.saddr) 2619 if (!fl4->saddr)
2622 fl4.saddr = FIB_RES_PREFSRC(net, res); 2620 fl4->saddr = FIB_RES_PREFSRC(net, res);
2623 2621
2624 dev_out = FIB_RES_DEV(res); 2622 dev_out = FIB_RES_DEV(res);
2625 fl4.flowi4_oif = dev_out->ifindex; 2623 fl4->flowi4_oif = dev_out->ifindex;
2626 2624
2627 2625
2628make_route: 2626make_route:
2629 rth = __mkroute_output(&res, &fl4, oldflp4, dev_out, flags); 2627 rth = __mkroute_output(&res, fl4, orig_daddr, orig_saddr, orig_oif,
2628 dev_out, flags);
2630 if (!IS_ERR(rth)) { 2629 if (!IS_ERR(rth)) {
2631 unsigned int hash; 2630 unsigned int hash;
2632 2631
2633 hash = rt_hash(oldflp4->daddr, oldflp4->saddr, oldflp4->flowi4_oif, 2632 hash = rt_hash(orig_daddr, orig_saddr, orig_oif,
2634 rt_genid(dev_net(dev_out))); 2633 rt_genid(dev_net(dev_out)));
2635 rth = rt_intern_hash(hash, rth, NULL, oldflp4->flowi4_oif); 2634 rth = rt_intern_hash(hash, rth, NULL, orig_oif);
2636 } 2635 }
2637 2636
2638out: 2637out:
@@ -2640,7 +2639,7 @@ out:
2640 return rth; 2639 return rth;
2641} 2640}
2642 2641
2643struct rtable *__ip_route_output_key(struct net *net, const struct flowi4 *flp4) 2642struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *flp4)
2644{ 2643{
2645 struct rtable *rth; 2644 struct rtable *rth;
2646 unsigned int hash; 2645 unsigned int hash;
@@ -2658,13 +2657,17 @@ struct rtable *__ip_route_output_key(struct net *net, const struct flowi4 *flp4)
2658 rt_is_output_route(rth) && 2657 rt_is_output_route(rth) &&
2659 rth->rt_oif == flp4->flowi4_oif && 2658 rth->rt_oif == flp4->flowi4_oif &&
2660 rth->rt_mark == flp4->flowi4_mark && 2659 rth->rt_mark == flp4->flowi4_mark &&
2661 !((rth->rt_tos ^ flp4->flowi4_tos) & 2660 !((rth->rt_key_tos ^ flp4->flowi4_tos) &
2662 (IPTOS_RT_MASK | RTO_ONLINK)) && 2661 (IPTOS_RT_MASK | RTO_ONLINK)) &&
2663 net_eq(dev_net(rth->dst.dev), net) && 2662 net_eq(dev_net(rth->dst.dev), net) &&
2664 !rt_is_expired(rth)) { 2663 !rt_is_expired(rth)) {
2665 dst_use(&rth->dst, jiffies); 2664 dst_use(&rth->dst, jiffies);
2666 RT_CACHE_STAT_INC(out_hit); 2665 RT_CACHE_STAT_INC(out_hit);
2667 rcu_read_unlock_bh(); 2666 rcu_read_unlock_bh();
2667 if (!flp4->saddr)
2668 flp4->saddr = rth->rt_src;
2669 if (!flp4->daddr)
2670 flp4->daddr = rth->rt_dst;
2668 return rth; 2671 return rth;
2669 } 2672 }
2670 RT_CACHE_STAT_INC(out_hlist_search); 2673 RT_CACHE_STAT_INC(out_hlist_search);
@@ -2709,7 +2712,7 @@ static struct dst_ops ipv4_dst_blackhole_ops = {
2709 2712
2710struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig) 2713struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig)
2711{ 2714{
2712 struct rtable *rt = dst_alloc(&ipv4_dst_blackhole_ops, 1); 2715 struct rtable *rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, 0, 0);
2713 struct rtable *ort = (struct rtable *) dst_orig; 2716 struct rtable *ort = (struct rtable *) dst_orig;
2714 2717
2715 if (rt) { 2718 if (rt) {
@@ -2726,7 +2729,7 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or
2726 2729
2727 rt->rt_key_dst = ort->rt_key_dst; 2730 rt->rt_key_dst = ort->rt_key_dst;
2728 rt->rt_key_src = ort->rt_key_src; 2731 rt->rt_key_src = ort->rt_key_src;
2729 rt->rt_tos = ort->rt_tos; 2732 rt->rt_key_tos = ort->rt_key_tos;
2730 rt->rt_route_iif = ort->rt_route_iif; 2733 rt->rt_route_iif = ort->rt_route_iif;
2731 rt->rt_iif = ort->rt_iif; 2734 rt->rt_iif = ort->rt_iif;
2732 rt->rt_oif = ort->rt_oif; 2735 rt->rt_oif = ort->rt_oif;
@@ -2762,15 +2765,10 @@ struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
2762 if (IS_ERR(rt)) 2765 if (IS_ERR(rt))
2763 return rt; 2766 return rt;
2764 2767
2765 if (flp4->flowi4_proto) { 2768 if (flp4->flowi4_proto)
2766 if (!flp4->saddr)
2767 flp4->saddr = rt->rt_src;
2768 if (!flp4->daddr)
2769 flp4->daddr = rt->rt_dst;
2770 rt = (struct rtable *) xfrm_lookup(net, &rt->dst, 2769 rt = (struct rtable *) xfrm_lookup(net, &rt->dst,
2771 flowi4_to_flowi(flp4), 2770 flowi4_to_flowi(flp4),
2772 sk, 0); 2771 sk, 0);
2773 }
2774 2772
2775 return rt; 2773 return rt;
2776} 2774}
@@ -2783,7 +2781,8 @@ static int rt_fill_info(struct net *net,
2783 struct rtable *rt = skb_rtable(skb); 2781 struct rtable *rt = skb_rtable(skb);
2784 struct rtmsg *r; 2782 struct rtmsg *r;
2785 struct nlmsghdr *nlh; 2783 struct nlmsghdr *nlh;
2786 long expires; 2784 long expires = 0;
2785 const struct inet_peer *peer = rt->peer;
2787 u32 id = 0, ts = 0, tsage = 0, error; 2786 u32 id = 0, ts = 0, tsage = 0, error;
2788 2787
2789 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*r), flags); 2788 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*r), flags);
@@ -2794,7 +2793,7 @@ static int rt_fill_info(struct net *net,
2794 r->rtm_family = AF_INET; 2793 r->rtm_family = AF_INET;
2795 r->rtm_dst_len = 32; 2794 r->rtm_dst_len = 32;
2796 r->rtm_src_len = 0; 2795 r->rtm_src_len = 0;
2797 r->rtm_tos = rt->rt_tos; 2796 r->rtm_tos = rt->rt_key_tos;
2798 r->rtm_table = RT_TABLE_MAIN; 2797 r->rtm_table = RT_TABLE_MAIN;
2799 NLA_PUT_U32(skb, RTA_TABLE, RT_TABLE_MAIN); 2798 NLA_PUT_U32(skb, RTA_TABLE, RT_TABLE_MAIN);
2800 r->rtm_type = rt->rt_type; 2799 r->rtm_type = rt->rt_type;
@@ -2831,15 +2830,16 @@ static int rt_fill_info(struct net *net,
2831 NLA_PUT_BE32(skb, RTA_MARK, rt->rt_mark); 2830 NLA_PUT_BE32(skb, RTA_MARK, rt->rt_mark);
2832 2831
2833 error = rt->dst.error; 2832 error = rt->dst.error;
2834 expires = (rt->peer && rt->peer->pmtu_expires) ? 2833 if (peer) {
2835 rt->peer->pmtu_expires - jiffies : 0;
2836 if (rt->peer) {
2837 inet_peer_refcheck(rt->peer); 2834 inet_peer_refcheck(rt->peer);
2838 id = atomic_read(&rt->peer->ip_id_count) & 0xffff; 2835 id = atomic_read(&peer->ip_id_count) & 0xffff;
2839 if (rt->peer->tcp_ts_stamp) { 2836 if (peer->tcp_ts_stamp) {
2840 ts = rt->peer->tcp_ts; 2837 ts = peer->tcp_ts;
2841 tsage = get_seconds() - rt->peer->tcp_ts_stamp; 2838 tsage = get_seconds() - peer->tcp_ts_stamp;
2842 } 2839 }
2840 expires = ACCESS_ONCE(peer->pmtu_expires);
2841 if (expires)
2842 expires -= jiffies;
2843 } 2843 }
2844 2844
2845 if (rt_is_input_route(rt)) { 2845 if (rt_is_input_route(rt)) {
@@ -2848,7 +2848,9 @@ static int rt_fill_info(struct net *net,
2848 2848
2849 if (ipv4_is_multicast(dst) && !ipv4_is_local_multicast(dst) && 2849 if (ipv4_is_multicast(dst) && !ipv4_is_local_multicast(dst) &&
2850 IPV4_DEVCONF_ALL(net, MC_FORWARDING)) { 2850 IPV4_DEVCONF_ALL(net, MC_FORWARDING)) {
2851 int err = ipmr_get_route(net, skb, r, nowait); 2851 int err = ipmr_get_route(net, skb,
2852 rt->rt_src, rt->rt_dst,
2853 r, nowait);
2852 if (err <= 0) { 2854 if (err <= 0) {
2853 if (!nowait) { 2855 if (!nowait) {
2854 if (err == 0) 2856 if (err == 0)
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 8b44c6d2a79b..26461492a847 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -321,10 +321,10 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
321 * the ACK carries the same options again (see RFC1122 4.2.3.8) 321 * the ACK carries the same options again (see RFC1122 4.2.3.8)
322 */ 322 */
323 if (opt && opt->optlen) { 323 if (opt && opt->optlen) {
324 int opt_size = sizeof(struct ip_options) + opt->optlen; 324 int opt_size = sizeof(struct ip_options_rcu) + opt->optlen;
325 325
326 ireq->opt = kmalloc(opt_size, GFP_ATOMIC); 326 ireq->opt = kmalloc(opt_size, GFP_ATOMIC);
327 if (ireq->opt != NULL && ip_options_echo(ireq->opt, skb)) { 327 if (ireq->opt != NULL && ip_options_echo(&ireq->opt->opt, skb)) {
328 kfree(ireq->opt); 328 kfree(ireq->opt);
329 ireq->opt = NULL; 329 ireq->opt = NULL;
330 } 330 }
@@ -345,17 +345,13 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
345 * no easy way to do this. 345 * no easy way to do this.
346 */ 346 */
347 { 347 {
348 struct flowi4 fl4 = { 348 struct flowi4 fl4;
349 .flowi4_mark = sk->sk_mark, 349
350 .daddr = ((opt && opt->srr) ? 350 flowi4_init_output(&fl4, 0, sk->sk_mark, RT_CONN_FLAGS(sk),
351 opt->faddr : ireq->rmt_addr), 351 RT_SCOPE_UNIVERSE, IPPROTO_TCP,
352 .saddr = ireq->loc_addr, 352 inet_sk_flowi_flags(sk),
353 .flowi4_tos = RT_CONN_FLAGS(sk), 353 (opt && opt->srr) ? opt->faddr : ireq->rmt_addr,
354 .flowi4_proto = IPPROTO_TCP, 354 ireq->loc_addr, th->source, th->dest);
355 .flowi4_flags = inet_sk_flowi_flags(sk),
356 .fl4_sport = th->dest,
357 .fl4_dport = th->source,
358 };
359 security_req_classify_flow(req, flowi4_to_flowi(&fl4)); 355 security_req_classify_flow(req, flowi4_to_flowi(&fl4));
360 rt = ip_route_output_key(sock_net(sk), &fl4); 356 rt = ip_route_output_key(sock_net(sk), &fl4);
361 if (IS_ERR(rt)) { 357 if (IS_ERR(rt)) {
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 321e6e84dbcc..57d0752e239a 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -13,6 +13,7 @@
13#include <linux/seqlock.h> 13#include <linux/seqlock.h>
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/nsproxy.h>
16#include <net/snmp.h> 17#include <net/snmp.h>
17#include <net/icmp.h> 18#include <net/icmp.h>
18#include <net/ip.h> 19#include <net/ip.h>
@@ -21,6 +22,7 @@
21#include <net/udp.h> 22#include <net/udp.h>
22#include <net/cipso_ipv4.h> 23#include <net/cipso_ipv4.h>
23#include <net/inet_frag.h> 24#include <net/inet_frag.h>
25#include <net/ping.h>
24 26
25static int zero; 27static int zero;
26static int tcp_retr1_max = 255; 28static int tcp_retr1_max = 255;
@@ -30,6 +32,8 @@ static int tcp_adv_win_scale_min = -31;
30static int tcp_adv_win_scale_max = 31; 32static int tcp_adv_win_scale_max = 31;
31static int ip_ttl_min = 1; 33static int ip_ttl_min = 1;
32static int ip_ttl_max = 255; 34static int ip_ttl_max = 255;
35static int ip_ping_group_range_min[] = { 0, 0 };
36static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
33 37
34/* Update system visible IP port range */ 38/* Update system visible IP port range */
35static void set_local_port_range(int range[2]) 39static void set_local_port_range(int range[2])
@@ -68,6 +72,53 @@ static int ipv4_local_port_range(ctl_table *table, int write,
68 return ret; 72 return ret;
69} 73}
70 74
75
76void inet_get_ping_group_range_table(struct ctl_table *table, gid_t *low, gid_t *high)
77{
78 gid_t *data = table->data;
79 unsigned seq;
80 do {
81 seq = read_seqbegin(&sysctl_local_ports.lock);
82
83 *low = data[0];
84 *high = data[1];
85 } while (read_seqretry(&sysctl_local_ports.lock, seq));
86}
87
88/* Update system visible IP port range */
89static void set_ping_group_range(struct ctl_table *table, int range[2])
90{
91 gid_t *data = table->data;
92 write_seqlock(&sysctl_local_ports.lock);
93 data[0] = range[0];
94 data[1] = range[1];
95 write_sequnlock(&sysctl_local_ports.lock);
96}
97
98/* Validate changes from /proc interface. */
99static int ipv4_ping_group_range(ctl_table *table, int write,
100 void __user *buffer,
101 size_t *lenp, loff_t *ppos)
102{
103 int ret;
104 gid_t range[2];
105 ctl_table tmp = {
106 .data = &range,
107 .maxlen = sizeof(range),
108 .mode = table->mode,
109 .extra1 = &ip_ping_group_range_min,
110 .extra2 = &ip_ping_group_range_max,
111 };
112
113 inet_get_ping_group_range_table(table, range, range + 1);
114 ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
115
116 if (write && ret == 0)
117 set_ping_group_range(table, range);
118
119 return ret;
120}
121
71static int proc_tcp_congestion_control(ctl_table *ctl, int write, 122static int proc_tcp_congestion_control(ctl_table *ctl, int write,
72 void __user *buffer, size_t *lenp, loff_t *ppos) 123 void __user *buffer, size_t *lenp, loff_t *ppos)
73{ 124{
@@ -677,6 +728,13 @@ static struct ctl_table ipv4_net_table[] = {
677 .mode = 0644, 728 .mode = 0644,
678 .proc_handler = proc_dointvec 729 .proc_handler = proc_dointvec
679 }, 730 },
731 {
732 .procname = "ping_group_range",
733 .data = &init_net.ipv4.sysctl_ping_group_range,
734 .maxlen = sizeof(init_net.ipv4.sysctl_ping_group_range),
735 .mode = 0644,
736 .proc_handler = ipv4_ping_group_range,
737 },
680 { } 738 { }
681}; 739};
682 740
@@ -711,8 +769,18 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
711 &net->ipv4.sysctl_icmp_ratemask; 769 &net->ipv4.sysctl_icmp_ratemask;
712 table[6].data = 770 table[6].data =
713 &net->ipv4.sysctl_rt_cache_rebuild_count; 771 &net->ipv4.sysctl_rt_cache_rebuild_count;
772 table[7].data =
773 &net->ipv4.sysctl_ping_group_range;
774
714 } 775 }
715 776
777 /*
778 * Sane defaults - nobody may create ping sockets.
779 * Boot scripts should set this to distro-specific group.
780 */
781 net->ipv4.sysctl_ping_group_range[0] = 1;
782 net->ipv4.sysctl_ping_group_range[1] = 0;
783
716 net->ipv4.sysctl_rt_cache_rebuild_count = 4; 784 net->ipv4.sysctl_rt_cache_rebuild_count = 4;
717 785
718 net->ipv4.ipv4_hdr = register_net_sysctl_table(net, 786 net->ipv4.ipv4_hdr = register_net_sysctl_table(net,
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index b22d45010545..46febcacb729 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -999,7 +999,8 @@ new_segment:
999 /* We have some space in skb head. Superb! */ 999 /* We have some space in skb head. Superb! */
1000 if (copy > skb_tailroom(skb)) 1000 if (copy > skb_tailroom(skb))
1001 copy = skb_tailroom(skb); 1001 copy = skb_tailroom(skb);
1002 if ((err = skb_add_data(skb, from, copy)) != 0) 1002 err = skb_add_data_nocache(sk, skb, from, copy);
1003 if (err)
1003 goto do_fault; 1004 goto do_fault;
1004 } else { 1005 } else {
1005 int merge = 0; 1006 int merge = 0;
@@ -1042,8 +1043,8 @@ new_segment:
1042 1043
1043 /* Time to copy data. We are close to 1044 /* Time to copy data. We are close to
1044 * the end! */ 1045 * the end! */
1045 err = skb_copy_to_page(sk, from, skb, page, 1046 err = skb_copy_to_page_nocache(sk, from, skb,
1046 off, copy); 1047 page, off, copy);
1047 if (err) { 1048 if (err) {
1048 /* If this page was new, give it to the 1049 /* If this page was new, give it to the
1049 * socket so it does not get leaked. 1050 * socket so it does not get leaked.
@@ -3219,7 +3220,7 @@ __setup("thash_entries=", set_thash_entries);
3219void __init tcp_init(void) 3220void __init tcp_init(void)
3220{ 3221{
3221 struct sk_buff *skb = NULL; 3222 struct sk_buff *skb = NULL;
3222 unsigned long nr_pages, limit; 3223 unsigned long limit;
3223 int i, max_share, cnt; 3224 int i, max_share, cnt;
3224 unsigned long jiffy = jiffies; 3225 unsigned long jiffy = jiffies;
3225 3226
@@ -3276,13 +3277,7 @@ void __init tcp_init(void)
3276 sysctl_tcp_max_orphans = cnt / 2; 3277 sysctl_tcp_max_orphans = cnt / 2;
3277 sysctl_max_syn_backlog = max(128, cnt / 256); 3278 sysctl_max_syn_backlog = max(128, cnt / 256);
3278 3279
3279 /* Set the pressure threshold to be a fraction of global memory that 3280 limit = nr_free_buffer_pages() / 8;
3280 * is up to 1/2 at 256 MB, decreasing toward zero with the amount of
3281 * memory, with a floor of 128 pages.
3282 */
3283 nr_pages = totalram_pages - totalhigh_pages;
3284 limit = min(nr_pages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT);
3285 limit = (limit * (nr_pages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11);
3286 limit = max(limit, 128UL); 3281 limit = max(limit, 128UL);
3287 sysctl_tcp_mem[0] = limit / 4 * 3; 3282 sysctl_tcp_mem[0] = limit / 4 * 3;
3288 sysctl_tcp_mem[1] = limit; 3283 sysctl_tcp_mem[1] = limit;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index f7e6c2c2d2bb..708dc203b034 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -146,13 +146,15 @@ EXPORT_SYMBOL_GPL(tcp_twsk_unique);
146/* This will initiate an outgoing connection. */ 146/* This will initiate an outgoing connection. */
147int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) 147int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
148{ 148{
149 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
149 struct inet_sock *inet = inet_sk(sk); 150 struct inet_sock *inet = inet_sk(sk);
150 struct tcp_sock *tp = tcp_sk(sk); 151 struct tcp_sock *tp = tcp_sk(sk);
151 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
152 __be16 orig_sport, orig_dport; 152 __be16 orig_sport, orig_dport;
153 struct rtable *rt;
154 __be32 daddr, nexthop; 153 __be32 daddr, nexthop;
154 struct flowi4 *fl4;
155 struct rtable *rt;
155 int err; 156 int err;
157 struct ip_options_rcu *inet_opt;
156 158
157 if (addr_len < sizeof(struct sockaddr_in)) 159 if (addr_len < sizeof(struct sockaddr_in))
158 return -EINVAL; 160 return -EINVAL;
@@ -161,15 +163,18 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
161 return -EAFNOSUPPORT; 163 return -EAFNOSUPPORT;
162 164
163 nexthop = daddr = usin->sin_addr.s_addr; 165 nexthop = daddr = usin->sin_addr.s_addr;
164 if (inet->opt && inet->opt->srr) { 166 inet_opt = rcu_dereference_protected(inet->inet_opt,
167 sock_owned_by_user(sk));
168 if (inet_opt && inet_opt->opt.srr) {
165 if (!daddr) 169 if (!daddr)
166 return -EINVAL; 170 return -EINVAL;
167 nexthop = inet->opt->faddr; 171 nexthop = inet_opt->opt.faddr;
168 } 172 }
169 173
170 orig_sport = inet->inet_sport; 174 orig_sport = inet->inet_sport;
171 orig_dport = usin->sin_port; 175 orig_dport = usin->sin_port;
172 rt = ip_route_connect(nexthop, inet->inet_saddr, 176 fl4 = &inet->cork.fl.u.ip4;
177 rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
173 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if, 178 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
174 IPPROTO_TCP, 179 IPPROTO_TCP,
175 orig_sport, orig_dport, sk, true); 180 orig_sport, orig_dport, sk, true);
@@ -185,11 +190,11 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
185 return -ENETUNREACH; 190 return -ENETUNREACH;
186 } 191 }
187 192
188 if (!inet->opt || !inet->opt->srr) 193 if (!inet_opt || !inet_opt->opt.srr)
189 daddr = rt->rt_dst; 194 daddr = fl4->daddr;
190 195
191 if (!inet->inet_saddr) 196 if (!inet->inet_saddr)
192 inet->inet_saddr = rt->rt_src; 197 inet->inet_saddr = fl4->saddr;
193 inet->inet_rcv_saddr = inet->inet_saddr; 198 inet->inet_rcv_saddr = inet->inet_saddr;
194 199
195 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) { 200 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
@@ -200,8 +205,8 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
200 } 205 }
201 206
202 if (tcp_death_row.sysctl_tw_recycle && 207 if (tcp_death_row.sysctl_tw_recycle &&
203 !tp->rx_opt.ts_recent_stamp && rt->rt_dst == daddr) { 208 !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr) {
204 struct inet_peer *peer = rt_get_peer(rt); 209 struct inet_peer *peer = rt_get_peer(rt, fl4->daddr);
205 /* 210 /*
206 * VJ's idea. We save last timestamp seen from 211 * VJ's idea. We save last timestamp seen from
207 * the destination in peer table, when entering state 212 * the destination in peer table, when entering state
@@ -221,8 +226,8 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
221 inet->inet_daddr = daddr; 226 inet->inet_daddr = daddr;
222 227
223 inet_csk(sk)->icsk_ext_hdr_len = 0; 228 inet_csk(sk)->icsk_ext_hdr_len = 0;
224 if (inet->opt) 229 if (inet_opt)
225 inet_csk(sk)->icsk_ext_hdr_len = inet->opt->optlen; 230 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
226 231
227 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT; 232 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
228 233
@@ -236,8 +241,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
236 if (err) 241 if (err)
237 goto failure; 242 goto failure;
238 243
239 rt = ip_route_newports(rt, IPPROTO_TCP, 244 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
240 orig_sport, orig_dport,
241 inet->inet_sport, inet->inet_dport, sk); 245 inet->inet_sport, inet->inet_dport, sk);
242 if (IS_ERR(rt)) { 246 if (IS_ERR(rt)) {
243 err = PTR_ERR(rt); 247 err = PTR_ERR(rt);
@@ -279,7 +283,7 @@ EXPORT_SYMBOL(tcp_v4_connect);
279/* 283/*
280 * This routine does path mtu discovery as defined in RFC1191. 284 * This routine does path mtu discovery as defined in RFC1191.
281 */ 285 */
282static void do_pmtu_discovery(struct sock *sk, struct iphdr *iph, u32 mtu) 286static void do_pmtu_discovery(struct sock *sk, const struct iphdr *iph, u32 mtu)
283{ 287{
284 struct dst_entry *dst; 288 struct dst_entry *dst;
285 struct inet_sock *inet = inet_sk(sk); 289 struct inet_sock *inet = inet_sk(sk);
@@ -341,7 +345,7 @@ static void do_pmtu_discovery(struct sock *sk, struct iphdr *iph, u32 mtu)
341 345
342void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) 346void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
343{ 347{
344 struct iphdr *iph = (struct iphdr *)icmp_skb->data; 348 const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
345 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2)); 349 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
346 struct inet_connection_sock *icsk; 350 struct inet_connection_sock *icsk;
347 struct tcp_sock *tp; 351 struct tcp_sock *tp;
@@ -647,7 +651,7 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
647 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0; 651 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
648 652
649 net = dev_net(skb_dst(skb)->dev); 653 net = dev_net(skb_dst(skb)->dev);
650 ip_send_reply(net->ipv4.tcp_sock, skb, 654 ip_send_reply(net->ipv4.tcp_sock, skb, ip_hdr(skb)->saddr,
651 &arg, arg.iov[0].iov_len); 655 &arg, arg.iov[0].iov_len);
652 656
653 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS); 657 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
@@ -722,7 +726,7 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
722 if (oif) 726 if (oif)
723 arg.bound_dev_if = oif; 727 arg.bound_dev_if = oif;
724 728
725 ip_send_reply(net->ipv4.tcp_sock, skb, 729 ip_send_reply(net->ipv4.tcp_sock, skb, ip_hdr(skb)->saddr,
726 &arg, arg.iov[0].iov_len); 730 &arg, arg.iov[0].iov_len);
727 731
728 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS); 732 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
@@ -765,11 +769,12 @@ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
765 struct request_values *rvp) 769 struct request_values *rvp)
766{ 770{
767 const struct inet_request_sock *ireq = inet_rsk(req); 771 const struct inet_request_sock *ireq = inet_rsk(req);
772 struct flowi4 fl4;
768 int err = -1; 773 int err = -1;
769 struct sk_buff * skb; 774 struct sk_buff * skb;
770 775
771 /* First, grab a route. */ 776 /* First, grab a route. */
772 if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL) 777 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
773 return -1; 778 return -1;
774 779
775 skb = tcp_make_synack(sk, dst, req, rvp); 780 skb = tcp_make_synack(sk, dst, req, rvp);
@@ -820,17 +825,18 @@ static void syn_flood_warning(const struct sk_buff *skb)
820/* 825/*
821 * Save and compile IPv4 options into the request_sock if needed. 826 * Save and compile IPv4 options into the request_sock if needed.
822 */ 827 */
823static struct ip_options *tcp_v4_save_options(struct sock *sk, 828static struct ip_options_rcu *tcp_v4_save_options(struct sock *sk,
824 struct sk_buff *skb) 829 struct sk_buff *skb)
825{ 830{
826 struct ip_options *opt = &(IPCB(skb)->opt); 831 const struct ip_options *opt = &(IPCB(skb)->opt);
827 struct ip_options *dopt = NULL; 832 struct ip_options_rcu *dopt = NULL;
828 833
829 if (opt && opt->optlen) { 834 if (opt && opt->optlen) {
830 int opt_size = optlength(opt); 835 int opt_size = sizeof(*dopt) + opt->optlen;
836
831 dopt = kmalloc(opt_size, GFP_ATOMIC); 837 dopt = kmalloc(opt_size, GFP_ATOMIC);
832 if (dopt) { 838 if (dopt) {
833 if (ip_options_echo(dopt, skb)) { 839 if (ip_options_echo(&dopt->opt, skb)) {
834 kfree(dopt); 840 kfree(dopt);
835 dopt = NULL; 841 dopt = NULL;
836 } 842 }
@@ -1333,6 +1339,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1333 req->cookie_ts = tmp_opt.tstamp_ok; 1339 req->cookie_ts = tmp_opt.tstamp_ok;
1334 } else if (!isn) { 1340 } else if (!isn) {
1335 struct inet_peer *peer = NULL; 1341 struct inet_peer *peer = NULL;
1342 struct flowi4 fl4;
1336 1343
1337 /* VJ's idea. We save last timestamp seen 1344 /* VJ's idea. We save last timestamp seen
1338 * from the destination in peer table, when entering 1345 * from the destination in peer table, when entering
@@ -1345,9 +1352,9 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1345 */ 1352 */
1346 if (tmp_opt.saw_tstamp && 1353 if (tmp_opt.saw_tstamp &&
1347 tcp_death_row.sysctl_tw_recycle && 1354 tcp_death_row.sysctl_tw_recycle &&
1348 (dst = inet_csk_route_req(sk, req)) != NULL && 1355 (dst = inet_csk_route_req(sk, &fl4, req)) != NULL &&
1349 (peer = rt_get_peer((struct rtable *)dst)) != NULL && 1356 fl4.daddr == saddr &&
1350 peer->daddr.addr.a4 == saddr) { 1357 (peer = rt_get_peer((struct rtable *)dst, fl4.daddr)) != NULL) {
1351 inet_peer_refcheck(peer); 1358 inet_peer_refcheck(peer);
1352 if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL && 1359 if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
1353 (s32)(peer->tcp_ts - req->ts_recent) > 1360 (s32)(peer->tcp_ts - req->ts_recent) >
@@ -1411,19 +1418,16 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1411#ifdef CONFIG_TCP_MD5SIG 1418#ifdef CONFIG_TCP_MD5SIG
1412 struct tcp_md5sig_key *key; 1419 struct tcp_md5sig_key *key;
1413#endif 1420#endif
1421 struct ip_options_rcu *inet_opt;
1414 1422
1415 if (sk_acceptq_is_full(sk)) 1423 if (sk_acceptq_is_full(sk))
1416 goto exit_overflow; 1424 goto exit_overflow;
1417 1425
1418 if (!dst && (dst = inet_csk_route_req(sk, req)) == NULL)
1419 goto exit;
1420
1421 newsk = tcp_create_openreq_child(sk, req, skb); 1426 newsk = tcp_create_openreq_child(sk, req, skb);
1422 if (!newsk) 1427 if (!newsk)
1423 goto exit_nonewsk; 1428 goto exit_nonewsk;
1424 1429
1425 newsk->sk_gso_type = SKB_GSO_TCPV4; 1430 newsk->sk_gso_type = SKB_GSO_TCPV4;
1426 sk_setup_caps(newsk, dst);
1427 1431
1428 newtp = tcp_sk(newsk); 1432 newtp = tcp_sk(newsk);
1429 newinet = inet_sk(newsk); 1433 newinet = inet_sk(newsk);
@@ -1431,15 +1435,21 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1431 newinet->inet_daddr = ireq->rmt_addr; 1435 newinet->inet_daddr = ireq->rmt_addr;
1432 newinet->inet_rcv_saddr = ireq->loc_addr; 1436 newinet->inet_rcv_saddr = ireq->loc_addr;
1433 newinet->inet_saddr = ireq->loc_addr; 1437 newinet->inet_saddr = ireq->loc_addr;
1434 newinet->opt = ireq->opt; 1438 inet_opt = ireq->opt;
1439 rcu_assign_pointer(newinet->inet_opt, inet_opt);
1435 ireq->opt = NULL; 1440 ireq->opt = NULL;
1436 newinet->mc_index = inet_iif(skb); 1441 newinet->mc_index = inet_iif(skb);
1437 newinet->mc_ttl = ip_hdr(skb)->ttl; 1442 newinet->mc_ttl = ip_hdr(skb)->ttl;
1438 inet_csk(newsk)->icsk_ext_hdr_len = 0; 1443 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1439 if (newinet->opt) 1444 if (inet_opt)
1440 inet_csk(newsk)->icsk_ext_hdr_len = newinet->opt->optlen; 1445 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1441 newinet->inet_id = newtp->write_seq ^ jiffies; 1446 newinet->inet_id = newtp->write_seq ^ jiffies;
1442 1447
1448 if (!dst && (dst = inet_csk_route_child_sock(sk, newsk, req)) == NULL)
1449 goto put_and_exit;
1450
1451 sk_setup_caps(newsk, dst);
1452
1443 tcp_mtup_init(newsk); 1453 tcp_mtup_init(newsk);
1444 tcp_sync_mss(newsk, dst_mtu(dst)); 1454 tcp_sync_mss(newsk, dst_mtu(dst));
1445 newtp->advmss = dst_metric_advmss(dst); 1455 newtp->advmss = dst_metric_advmss(dst);
@@ -1467,10 +1477,8 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1467 } 1477 }
1468#endif 1478#endif
1469 1479
1470 if (__inet_inherit_port(sk, newsk) < 0) { 1480 if (__inet_inherit_port(sk, newsk) < 0)
1471 sock_put(newsk); 1481 goto put_and_exit;
1472 goto exit;
1473 }
1474 __inet_hash_nolisten(newsk, NULL); 1482 __inet_hash_nolisten(newsk, NULL);
1475 1483
1476 return newsk; 1484 return newsk;
@@ -1482,6 +1490,9 @@ exit_nonewsk:
1482exit: 1490exit:
1483 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); 1491 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1484 return NULL; 1492 return NULL;
1493put_and_exit:
1494 sock_put(newsk);
1495 goto exit;
1485} 1496}
1486EXPORT_SYMBOL(tcp_v4_syn_recv_sock); 1497EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1487 1498
@@ -1578,6 +1589,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1578 goto discard; 1589 goto discard;
1579 1590
1580 if (nsk != sk) { 1591 if (nsk != sk) {
1592 sock_rps_save_rxhash(nsk, skb->rxhash);
1581 if (tcp_child_process(sk, nsk, skb)) { 1593 if (tcp_child_process(sk, nsk, skb)) {
1582 rsk = nsk; 1594 rsk = nsk;
1583 goto reset; 1595 goto reset;
@@ -1764,12 +1776,13 @@ struct inet_peer *tcp_v4_get_peer(struct sock *sk, bool *release_it)
1764 struct inet_sock *inet = inet_sk(sk); 1776 struct inet_sock *inet = inet_sk(sk);
1765 struct inet_peer *peer; 1777 struct inet_peer *peer;
1766 1778
1767 if (!rt || rt->rt_dst != inet->inet_daddr) { 1779 if (!rt ||
1780 inet->cork.fl.u.ip4.daddr != inet->inet_daddr) {
1768 peer = inet_getpeer_v4(inet->inet_daddr, 1); 1781 peer = inet_getpeer_v4(inet->inet_daddr, 1);
1769 *release_it = true; 1782 *release_it = true;
1770 } else { 1783 } else {
1771 if (!rt->peer) 1784 if (!rt->peer)
1772 rt_bind_peer(rt, 1); 1785 rt_bind_peer(rt, inet->inet_daddr, 1);
1773 peer = rt->peer; 1786 peer = rt->peer;
1774 *release_it = false; 1787 *release_it = false;
1775 } 1788 }
@@ -2359,7 +2372,7 @@ static void get_openreq4(struct sock *sk, struct request_sock *req,
2359 int ttd = req->expires - jiffies; 2372 int ttd = req->expires - jiffies;
2360 2373
2361 seq_printf(f, "%4d: %08X:%04X %08X:%04X" 2374 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2362 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %p%n", 2375 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %pK%n",
2363 i, 2376 i,
2364 ireq->loc_addr, 2377 ireq->loc_addr,
2365 ntohs(inet_sk(sk)->inet_sport), 2378 ntohs(inet_sk(sk)->inet_sport),
@@ -2414,7 +2427,7 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
2414 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0); 2427 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2415 2428
2416 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX " 2429 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2417 "%08X %5d %8d %lu %d %p %lu %lu %u %u %d%n", 2430 "%08X %5d %8d %lu %d %pK %lu %lu %u %u %d%n",
2418 i, src, srcp, dest, destp, sk->sk_state, 2431 i, src, srcp, dest, destp, sk->sk_state,
2419 tp->write_seq - tp->snd_una, 2432 tp->write_seq - tp->snd_una,
2420 rx_queue, 2433 rx_queue,
@@ -2449,7 +2462,7 @@ static void get_timewait4_sock(struct inet_timewait_sock *tw,
2449 srcp = ntohs(tw->tw_sport); 2462 srcp = ntohs(tw->tw_sport);
2450 2463
2451 seq_printf(f, "%4d: %08X:%04X %08X:%04X" 2464 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2452 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n", 2465 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
2453 i, src, srcp, dest, destp, tw->tw_substate, 0, 0, 2466 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2454 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0, 2467 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2455 atomic_read(&tw->tw_refcnt), tw, len); 2468 atomic_read(&tw->tw_refcnt), tw, len);
@@ -2527,7 +2540,7 @@ void tcp4_proc_exit(void)
2527 2540
2528struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb) 2541struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2529{ 2542{
2530 struct iphdr *iph = skb_gro_network_header(skb); 2543 const struct iphdr *iph = skb_gro_network_header(skb);
2531 2544
2532 switch (skb->ip_summed) { 2545 switch (skb->ip_summed) {
2533 case CHECKSUM_COMPLETE: 2546 case CHECKSUM_COMPLETE:
@@ -2548,7 +2561,7 @@ struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2548 2561
2549int tcp4_gro_complete(struct sk_buff *skb) 2562int tcp4_gro_complete(struct sk_buff *skb)
2550{ 2563{
2551 struct iphdr *iph = ip_hdr(skb); 2564 const struct iphdr *iph = ip_hdr(skb);
2552 struct tcphdr *th = tcp_hdr(skb); 2565 struct tcphdr *th = tcp_hdr(skb);
2553 2566
2554 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb), 2567 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 17388c7f49c4..882e0b0964d0 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -899,7 +899,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
899 TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS, 899 TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS,
900 tcp_skb_pcount(skb)); 900 tcp_skb_pcount(skb));
901 901
902 err = icsk->icsk_af_ops->queue_xmit(skb); 902 err = icsk->icsk_af_ops->queue_xmit(skb, &inet->cork.fl);
903 if (likely(err <= 0)) 903 if (likely(err <= 0))
904 return err; 904 return err;
905 905
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index f87a8eb76f3b..198f75b7bdd3 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -578,7 +578,7 @@ found:
578void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable) 578void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
579{ 579{
580 struct inet_sock *inet; 580 struct inet_sock *inet;
581 struct iphdr *iph = (struct iphdr *)skb->data; 581 const struct iphdr *iph = (const struct iphdr *)skb->data;
582 struct udphdr *uh = (struct udphdr *)(skb->data+(iph->ihl<<2)); 582 struct udphdr *uh = (struct udphdr *)(skb->data+(iph->ihl<<2));
583 const int type = icmp_hdr(skb)->type; 583 const int type = icmp_hdr(skb)->type;
584 const int code = icmp_hdr(skb)->code; 584 const int code = icmp_hdr(skb)->code;
@@ -706,12 +706,11 @@ static void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst)
706 } 706 }
707} 707}
708 708
709static int udp_send_skb(struct sk_buff *skb, __be32 daddr, __be32 dport) 709static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4)
710{ 710{
711 struct sock *sk = skb->sk; 711 struct sock *sk = skb->sk;
712 struct inet_sock *inet = inet_sk(sk); 712 struct inet_sock *inet = inet_sk(sk);
713 struct udphdr *uh; 713 struct udphdr *uh;
714 struct rtable *rt = (struct rtable *)skb_dst(skb);
715 int err = 0; 714 int err = 0;
716 int is_udplite = IS_UDPLITE(sk); 715 int is_udplite = IS_UDPLITE(sk);
717 int offset = skb_transport_offset(skb); 716 int offset = skb_transport_offset(skb);
@@ -723,7 +722,7 @@ static int udp_send_skb(struct sk_buff *skb, __be32 daddr, __be32 dport)
723 */ 722 */
724 uh = udp_hdr(skb); 723 uh = udp_hdr(skb);
725 uh->source = inet->inet_sport; 724 uh->source = inet->inet_sport;
726 uh->dest = dport; 725 uh->dest = fl4->fl4_dport;
727 uh->len = htons(len); 726 uh->len = htons(len);
728 uh->check = 0; 727 uh->check = 0;
729 728
@@ -737,14 +736,14 @@ static int udp_send_skb(struct sk_buff *skb, __be32 daddr, __be32 dport)
737 736
738 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */ 737 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
739 738
740 udp4_hwcsum(skb, rt->rt_src, daddr); 739 udp4_hwcsum(skb, fl4->saddr, fl4->daddr);
741 goto send; 740 goto send;
742 741
743 } else 742 } else
744 csum = udp_csum(skb); 743 csum = udp_csum(skb);
745 744
746 /* add protocol-dependent pseudo-header */ 745 /* add protocol-dependent pseudo-header */
747 uh->check = csum_tcpudp_magic(rt->rt_src, daddr, len, 746 uh->check = csum_tcpudp_magic(fl4->saddr, fl4->daddr, len,
748 sk->sk_protocol, csum); 747 sk->sk_protocol, csum);
749 if (uh->check == 0) 748 if (uh->check == 0)
750 uh->check = CSUM_MANGLED_0; 749 uh->check = CSUM_MANGLED_0;
@@ -774,11 +773,11 @@ static int udp_push_pending_frames(struct sock *sk)
774 struct sk_buff *skb; 773 struct sk_buff *skb;
775 int err = 0; 774 int err = 0;
776 775
777 skb = ip_finish_skb(sk); 776 skb = ip_finish_skb(sk, fl4);
778 if (!skb) 777 if (!skb)
779 goto out; 778 goto out;
780 779
781 err = udp_send_skb(skb, fl4->daddr, fl4->fl4_dport); 780 err = udp_send_skb(skb, fl4);
782 781
783out: 782out:
784 up->len = 0; 783 up->len = 0;
@@ -791,6 +790,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
791{ 790{
792 struct inet_sock *inet = inet_sk(sk); 791 struct inet_sock *inet = inet_sk(sk);
793 struct udp_sock *up = udp_sk(sk); 792 struct udp_sock *up = udp_sk(sk);
793 struct flowi4 fl4_stack;
794 struct flowi4 *fl4; 794 struct flowi4 *fl4;
795 int ulen = len; 795 int ulen = len;
796 struct ipcm_cookie ipc; 796 struct ipcm_cookie ipc;
@@ -804,6 +804,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
804 int corkreq = up->corkflag || msg->msg_flags&MSG_MORE; 804 int corkreq = up->corkflag || msg->msg_flags&MSG_MORE;
805 int (*getfrag)(void *, char *, int, int, int, struct sk_buff *); 805 int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
806 struct sk_buff *skb; 806 struct sk_buff *skb;
807 struct ip_options_data opt_copy;
807 808
808 if (len > 0xFFFF) 809 if (len > 0xFFFF)
809 return -EMSGSIZE; 810 return -EMSGSIZE;
@@ -820,6 +821,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
820 821
821 getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag; 822 getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;
822 823
824 fl4 = &inet->cork.fl.u.ip4;
823 if (up->pending) { 825 if (up->pending) {
824 /* 826 /*
825 * There are pending frames. 827 * There are pending frames.
@@ -877,22 +879,32 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
877 free = 1; 879 free = 1;
878 connected = 0; 880 connected = 0;
879 } 881 }
880 if (!ipc.opt) 882 if (!ipc.opt) {
881 ipc.opt = inet->opt; 883 struct ip_options_rcu *inet_opt;
884
885 rcu_read_lock();
886 inet_opt = rcu_dereference(inet->inet_opt);
887 if (inet_opt) {
888 memcpy(&opt_copy, inet_opt,
889 sizeof(*inet_opt) + inet_opt->opt.optlen);
890 ipc.opt = &opt_copy.opt;
891 }
892 rcu_read_unlock();
893 }
882 894
883 saddr = ipc.addr; 895 saddr = ipc.addr;
884 ipc.addr = faddr = daddr; 896 ipc.addr = faddr = daddr;
885 897
886 if (ipc.opt && ipc.opt->srr) { 898 if (ipc.opt && ipc.opt->opt.srr) {
887 if (!daddr) 899 if (!daddr)
888 return -EINVAL; 900 return -EINVAL;
889 faddr = ipc.opt->faddr; 901 faddr = ipc.opt->opt.faddr;
890 connected = 0; 902 connected = 0;
891 } 903 }
892 tos = RT_TOS(inet->tos); 904 tos = RT_TOS(inet->tos);
893 if (sock_flag(sk, SOCK_LOCALROUTE) || 905 if (sock_flag(sk, SOCK_LOCALROUTE) ||
894 (msg->msg_flags & MSG_DONTROUTE) || 906 (msg->msg_flags & MSG_DONTROUTE) ||
895 (ipc.opt && ipc.opt->is_strictroute)) { 907 (ipc.opt && ipc.opt->opt.is_strictroute)) {
896 tos |= RTO_ONLINK; 908 tos |= RTO_ONLINK;
897 connected = 0; 909 connected = 0;
898 } 910 }
@@ -909,22 +921,16 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
909 rt = (struct rtable *)sk_dst_check(sk, 0); 921 rt = (struct rtable *)sk_dst_check(sk, 0);
910 922
911 if (rt == NULL) { 923 if (rt == NULL) {
912 struct flowi4 fl4 = {
913 .flowi4_oif = ipc.oif,
914 .flowi4_mark = sk->sk_mark,
915 .daddr = faddr,
916 .saddr = saddr,
917 .flowi4_tos = tos,
918 .flowi4_proto = sk->sk_protocol,
919 .flowi4_flags = (inet_sk_flowi_flags(sk) |
920 FLOWI_FLAG_CAN_SLEEP),
921 .fl4_sport = inet->inet_sport,
922 .fl4_dport = dport,
923 };
924 struct net *net = sock_net(sk); 924 struct net *net = sock_net(sk);
925 925
926 security_sk_classify_flow(sk, flowi4_to_flowi(&fl4)); 926 fl4 = &fl4_stack;
927 rt = ip_route_output_flow(net, &fl4, sk); 927 flowi4_init_output(fl4, ipc.oif, sk->sk_mark, tos,
928 RT_SCOPE_UNIVERSE, sk->sk_protocol,
929 inet_sk_flowi_flags(sk)|FLOWI_FLAG_CAN_SLEEP,
930 faddr, saddr, dport, inet->inet_sport);
931
932 security_sk_classify_flow(sk, flowi4_to_flowi(fl4));
933 rt = ip_route_output_flow(net, fl4, sk);
928 if (IS_ERR(rt)) { 934 if (IS_ERR(rt)) {
929 err = PTR_ERR(rt); 935 err = PTR_ERR(rt);
930 rt = NULL; 936 rt = NULL;
@@ -945,18 +951,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
945 goto do_confirm; 951 goto do_confirm;
946back_from_confirm: 952back_from_confirm:
947 953
948 saddr = rt->rt_src; 954 saddr = fl4->saddr;
949 if (!ipc.addr) 955 if (!ipc.addr)
950 daddr = ipc.addr = rt->rt_dst; 956 daddr = ipc.addr = fl4->daddr;
951 957
952 /* Lockless fast path for the non-corking case. */ 958 /* Lockless fast path for the non-corking case. */
953 if (!corkreq) { 959 if (!corkreq) {
954 skb = ip_make_skb(sk, getfrag, msg->msg_iov, ulen, 960 skb = ip_make_skb(sk, fl4, getfrag, msg->msg_iov, ulen,
955 sizeof(struct udphdr), &ipc, &rt, 961 sizeof(struct udphdr), &ipc, &rt,
956 msg->msg_flags); 962 msg->msg_flags);
957 err = PTR_ERR(skb); 963 err = PTR_ERR(skb);
958 if (skb && !IS_ERR(skb)) 964 if (skb && !IS_ERR(skb))
959 err = udp_send_skb(skb, daddr, dport); 965 err = udp_send_skb(skb, fl4);
960 goto out; 966 goto out;
961 } 967 }
962 968
@@ -982,9 +988,9 @@ back_from_confirm:
982 988
983do_append_data: 989do_append_data:
984 up->len += ulen; 990 up->len += ulen;
985 err = ip_append_data(sk, getfrag, msg->msg_iov, ulen, 991 err = ip_append_data(sk, fl4, getfrag, msg->msg_iov, ulen,
986 sizeof(struct udphdr), &ipc, &rt, 992 sizeof(struct udphdr), &ipc, &rt,
987 corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags); 993 corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags);
988 if (err) 994 if (err)
989 udp_flush_pending_frames(sk); 995 udp_flush_pending_frames(sk);
990 else if (!corkreq) 996 else if (!corkreq)
@@ -1024,6 +1030,7 @@ EXPORT_SYMBOL(udp_sendmsg);
1024int udp_sendpage(struct sock *sk, struct page *page, int offset, 1030int udp_sendpage(struct sock *sk, struct page *page, int offset,
1025 size_t size, int flags) 1031 size_t size, int flags)
1026{ 1032{
1033 struct inet_sock *inet = inet_sk(sk);
1027 struct udp_sock *up = udp_sk(sk); 1034 struct udp_sock *up = udp_sk(sk);
1028 int ret; 1035 int ret;
1029 1036
@@ -1048,7 +1055,8 @@ int udp_sendpage(struct sock *sk, struct page *page, int offset,
1048 return -EINVAL; 1055 return -EINVAL;
1049 } 1056 }
1050 1057
1051 ret = ip_append_page(sk, page, offset, size, flags); 1058 ret = ip_append_page(sk, &inet->cork.fl.u.ip4,
1059 page, offset, size, flags);
1052 if (ret == -EOPNOTSUPP) { 1060 if (ret == -EOPNOTSUPP) {
1053 release_sock(sk); 1061 release_sock(sk);
1054 return sock_no_sendpage(sk->sk_socket, page, offset, 1062 return sock_no_sendpage(sk->sk_socket, page, offset,
@@ -1241,6 +1249,9 @@ csum_copy_err:
1241 1249
1242 if (noblock) 1250 if (noblock)
1243 return -EAGAIN; 1251 return -EAGAIN;
1252
1253 /* starting over for a new packet */
1254 msg->msg_flags &= ~MSG_TRUNC;
1244 goto try_again; 1255 goto try_again;
1245} 1256}
1246 1257
@@ -2082,7 +2093,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
2082 __u16 srcp = ntohs(inet->inet_sport); 2093 __u16 srcp = ntohs(inet->inet_sport);
2083 2094
2084 seq_printf(f, "%5d: %08X:%04X %08X:%04X" 2095 seq_printf(f, "%5d: %08X:%04X %08X:%04X"
2085 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d%n", 2096 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %d%n",
2086 bucket, src, srcp, dest, destp, sp->sk_state, 2097 bucket, src, srcp, dest, destp, sp->sk_state,
2087 sk_wmem_alloc_get(sp), 2098 sk_wmem_alloc_get(sp),
2088 sk_rmem_alloc_get(sp), 2099 sk_rmem_alloc_get(sp),
@@ -2198,16 +2209,10 @@ void __init udp_table_init(struct udp_table *table, const char *name)
2198 2209
2199void __init udp_init(void) 2210void __init udp_init(void)
2200{ 2211{
2201 unsigned long nr_pages, limit; 2212 unsigned long limit;
2202 2213
2203 udp_table_init(&udp_table, "UDP"); 2214 udp_table_init(&udp_table, "UDP");
2204 /* Set the pressure threshold up by the same strategy of TCP. It is a 2215 limit = nr_free_buffer_pages() / 8;
2205 * fraction of global memory that is up to 1/2 at 256 MB, decreasing
2206 * toward zero with the amount of memory, with a floor of 128 pages.
2207 */
2208 nr_pages = totalram_pages - totalhigh_pages;
2209 limit = min(nr_pages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT);
2210 limit = (limit * (nr_pages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11);
2211 limit = max(limit, 128UL); 2216 limit = max(limit, 128UL);
2212 sysctl_udp_mem[0] = limit / 4 * 3; 2217 sysctl_udp_mem[0] = limit / 4 * 3;
2213 sysctl_udp_mem[1] = limit; 2218 sysctl_udp_mem[1] = limit;
diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c
index 2d51840e53a1..327a617d594c 100644
--- a/net/ipv4/xfrm4_output.c
+++ b/net/ipv4/xfrm4_output.c
@@ -32,7 +32,12 @@ static int xfrm4_tunnel_check_size(struct sk_buff *skb)
32 dst = skb_dst(skb); 32 dst = skb_dst(skb);
33 mtu = dst_mtu(dst); 33 mtu = dst_mtu(dst);
34 if (skb->len > mtu) { 34 if (skb->len > mtu) {
35 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); 35 if (skb->sk)
36 ip_local_error(skb->sk, EMSGSIZE, ip_hdr(skb)->daddr,
37 inet_sk(skb->sk)->inet_dport, mtu);
38 else
39 icmp_send(skb, ICMP_DEST_UNREACH,
40 ICMP_FRAG_NEEDED, htonl(mtu));
36 ret = -EMSGSIZE; 41 ret = -EMSGSIZE;
37 } 42 }
38out: 43out:
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index d20a05e970d8..981e43eaf704 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -18,38 +18,46 @@
18 18
19static struct xfrm_policy_afinfo xfrm4_policy_afinfo; 19static struct xfrm_policy_afinfo xfrm4_policy_afinfo;
20 20
21static struct dst_entry *xfrm4_dst_lookup(struct net *net, int tos, 21static struct dst_entry *__xfrm4_dst_lookup(struct net *net, struct flowi4 *fl4,
22 const xfrm_address_t *saddr, 22 int tos,
23 const xfrm_address_t *daddr) 23 const xfrm_address_t *saddr,
24 const xfrm_address_t *daddr)
24{ 25{
25 struct flowi4 fl4 = {
26 .daddr = daddr->a4,
27 .flowi4_tos = tos,
28 };
29 struct rtable *rt; 26 struct rtable *rt;
30 27
28 memset(fl4, 0, sizeof(*fl4));
29 fl4->daddr = daddr->a4;
30 fl4->flowi4_tos = tos;
31 if (saddr) 31 if (saddr)
32 fl4.saddr = saddr->a4; 32 fl4->saddr = saddr->a4;
33 33
34 rt = __ip_route_output_key(net, &fl4); 34 rt = __ip_route_output_key(net, fl4);
35 if (!IS_ERR(rt)) 35 if (!IS_ERR(rt))
36 return &rt->dst; 36 return &rt->dst;
37 37
38 return ERR_CAST(rt); 38 return ERR_CAST(rt);
39} 39}
40 40
41static struct dst_entry *xfrm4_dst_lookup(struct net *net, int tos,
42 const xfrm_address_t *saddr,
43 const xfrm_address_t *daddr)
44{
45 struct flowi4 fl4;
46
47 return __xfrm4_dst_lookup(net, &fl4, tos, saddr, daddr);
48}
49
41static int xfrm4_get_saddr(struct net *net, 50static int xfrm4_get_saddr(struct net *net,
42 xfrm_address_t *saddr, xfrm_address_t *daddr) 51 xfrm_address_t *saddr, xfrm_address_t *daddr)
43{ 52{
44 struct dst_entry *dst; 53 struct dst_entry *dst;
45 struct rtable *rt; 54 struct flowi4 fl4;
46 55
47 dst = xfrm4_dst_lookup(net, 0, NULL, daddr); 56 dst = __xfrm4_dst_lookup(net, &fl4, 0, NULL, daddr);
48 if (IS_ERR(dst)) 57 if (IS_ERR(dst))
49 return -EHOSTUNREACH; 58 return -EHOSTUNREACH;
50 59
51 rt = (struct rtable *)dst; 60 saddr->a4 = fl4.saddr;
52 saddr->a4 = rt->rt_src;
53 dst_release(dst); 61 dst_release(dst);
54 return 0; 62 return 0;
55} 63}
@@ -73,7 +81,7 @@ static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
73 81
74 rt->rt_key_dst = fl4->daddr; 82 rt->rt_key_dst = fl4->daddr;
75 rt->rt_key_src = fl4->saddr; 83 rt->rt_key_src = fl4->saddr;
76 rt->rt_tos = fl4->flowi4_tos; 84 rt->rt_key_tos = fl4->flowi4_tos;
77 rt->rt_route_iif = fl4->flowi4_iif; 85 rt->rt_route_iif = fl4->flowi4_iif;
78 rt->rt_iif = fl4->flowi4_iif; 86 rt->rt_iif = fl4->flowi4_iif;
79 rt->rt_oif = fl4->flowi4_oif; 87 rt->rt_oif = fl4->flowi4_oif;
@@ -102,7 +110,7 @@ static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
102static void 110static void
103_decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse) 111_decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
104{ 112{
105 struct iphdr *iph = ip_hdr(skb); 113 const struct iphdr *iph = ip_hdr(skb);
106 u8 *xprth = skb_network_header(skb) + iph->ihl * 4; 114 u8 *xprth = skb_network_header(skb) + iph->ihl * 4;
107 struct flowi4 *fl4 = &fl->u.ip4; 115 struct flowi4 *fl4 = &fl->u.ip4;
108 116
diff --git a/net/ipv4/xfrm4_state.c b/net/ipv4/xfrm4_state.c
index 805d63ef4340..d9ac0a0058b5 100644
--- a/net/ipv4/xfrm4_state.c
+++ b/net/ipv4/xfrm4_state.c
@@ -55,7 +55,7 @@ xfrm4_init_temprop(struct xfrm_state *x, const struct xfrm_tmpl *tmpl,
55 55
56int xfrm4_extract_header(struct sk_buff *skb) 56int xfrm4_extract_header(struct sk_buff *skb)
57{ 57{
58 struct iphdr *iph = ip_hdr(skb); 58 const struct iphdr *iph = ip_hdr(skb);
59 59
60 XFRM_MODE_SKB_CB(skb)->ihl = sizeof(*iph); 60 XFRM_MODE_SKB_CB(skb)->ihl = sizeof(*iph);
61 XFRM_MODE_SKB_CB(skb)->id = iph->id; 61 XFRM_MODE_SKB_CB(skb)->id = iph->id;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index a7bda0757053..498b927f68be 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -289,19 +289,19 @@ static int snmp6_alloc_dev(struct inet6_dev *idev)
289 sizeof(struct ipstats_mib), 289 sizeof(struct ipstats_mib),
290 __alignof__(struct ipstats_mib)) < 0) 290 __alignof__(struct ipstats_mib)) < 0)
291 goto err_ip; 291 goto err_ip;
292 if (snmp_mib_init((void __percpu **)idev->stats.icmpv6, 292 idev->stats.icmpv6dev = kzalloc(sizeof(struct icmpv6_mib_device),
293 sizeof(struct icmpv6_mib), 293 GFP_KERNEL);
294 __alignof__(struct icmpv6_mib)) < 0) 294 if (!idev->stats.icmpv6dev)
295 goto err_icmp; 295 goto err_icmp;
296 if (snmp_mib_init((void __percpu **)idev->stats.icmpv6msg, 296 idev->stats.icmpv6msgdev = kzalloc(sizeof(struct icmpv6msg_mib_device),
297 sizeof(struct icmpv6msg_mib), 297 GFP_KERNEL);
298 __alignof__(struct icmpv6msg_mib)) < 0) 298 if (!idev->stats.icmpv6msgdev)
299 goto err_icmpmsg; 299 goto err_icmpmsg;
300 300
301 return 0; 301 return 0;
302 302
303err_icmpmsg: 303err_icmpmsg:
304 snmp_mib_free((void __percpu **)idev->stats.icmpv6); 304 kfree(idev->stats.icmpv6dev);
305err_icmp: 305err_icmp:
306 snmp_mib_free((void __percpu **)idev->stats.ipv6); 306 snmp_mib_free((void __percpu **)idev->stats.ipv6);
307err_ip: 307err_ip:
@@ -310,19 +310,13 @@ err_ip:
310 310
311static void snmp6_free_dev(struct inet6_dev *idev) 311static void snmp6_free_dev(struct inet6_dev *idev)
312{ 312{
313 snmp_mib_free((void __percpu **)idev->stats.icmpv6msg); 313 kfree(idev->stats.icmpv6msgdev);
314 snmp_mib_free((void __percpu **)idev->stats.icmpv6); 314 kfree(idev->stats.icmpv6dev);
315 snmp_mib_free((void __percpu **)idev->stats.ipv6); 315 snmp_mib_free((void __percpu **)idev->stats.ipv6);
316} 316}
317 317
318/* Nobody refers to this device, we may destroy it. */ 318/* Nobody refers to this device, we may destroy it. */
319 319
320static void in6_dev_finish_destroy_rcu(struct rcu_head *head)
321{
322 struct inet6_dev *idev = container_of(head, struct inet6_dev, rcu);
323 kfree(idev);
324}
325
326void in6_dev_finish_destroy(struct inet6_dev *idev) 320void in6_dev_finish_destroy(struct inet6_dev *idev)
327{ 321{
328 struct net_device *dev = idev->dev; 322 struct net_device *dev = idev->dev;
@@ -339,7 +333,7 @@ void in6_dev_finish_destroy(struct inet6_dev *idev)
339 return; 333 return;
340 } 334 }
341 snmp6_free_dev(idev); 335 snmp6_free_dev(idev);
342 call_rcu(&idev->rcu, in6_dev_finish_destroy_rcu); 336 kfree_rcu(idev, rcu);
343} 337}
344 338
345EXPORT_SYMBOL(in6_dev_finish_destroy); 339EXPORT_SYMBOL(in6_dev_finish_destroy);
@@ -535,12 +529,6 @@ static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int old)
535} 529}
536#endif 530#endif
537 531
538static void inet6_ifa_finish_destroy_rcu(struct rcu_head *head)
539{
540 struct inet6_ifaddr *ifp = container_of(head, struct inet6_ifaddr, rcu);
541 kfree(ifp);
542}
543
544/* Nobody refers to this ifaddr, destroy it */ 532/* Nobody refers to this ifaddr, destroy it */
545void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp) 533void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp)
546{ 534{
@@ -561,7 +549,7 @@ void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp)
561 } 549 }
562 dst_release(&ifp->rt->dst); 550 dst_release(&ifp->rt->dst);
563 551
564 call_rcu(&ifp->rcu, inet6_ifa_finish_destroy_rcu); 552 kfree_rcu(ifp, rcu);
565} 553}
566 554
567static void 555static void
@@ -825,6 +813,8 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)
825 dst_release(&rt->dst); 813 dst_release(&rt->dst);
826 } 814 }
827 815
816 /* clean up prefsrc entries */
817 rt6_remove_prefsrc(ifp);
828out: 818out:
829 in6_ifa_put(ifp); 819 in6_ifa_put(ifp);
830} 820}
@@ -1281,7 +1271,7 @@ static int ipv6_count_addresses(struct inet6_dev *idev)
1281 return cnt; 1271 return cnt;
1282} 1272}
1283 1273
1284int ipv6_chk_addr(struct net *net, struct in6_addr *addr, 1274int ipv6_chk_addr(struct net *net, const struct in6_addr *addr,
1285 struct net_device *dev, int strict) 1275 struct net_device *dev, int strict)
1286{ 1276{
1287 struct inet6_ifaddr *ifp; 1277 struct inet6_ifaddr *ifp;
@@ -1324,7 +1314,7 @@ static bool ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr,
1324 return false; 1314 return false;
1325} 1315}
1326 1316
1327int ipv6_chk_prefix(struct in6_addr *addr, struct net_device *dev) 1317int ipv6_chk_prefix(const struct in6_addr *addr, struct net_device *dev)
1328{ 1318{
1329 struct inet6_dev *idev; 1319 struct inet6_dev *idev;
1330 struct inet6_ifaddr *ifa; 1320 struct inet6_ifaddr *ifa;
@@ -1455,7 +1445,7 @@ void addrconf_dad_failure(struct inet6_ifaddr *ifp)
1455 1445
1456/* Join to solicited addr multicast group. */ 1446/* Join to solicited addr multicast group. */
1457 1447
1458void addrconf_join_solict(struct net_device *dev, struct in6_addr *addr) 1448void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr)
1459{ 1449{
1460 struct in6_addr maddr; 1450 struct in6_addr maddr;
1461 1451
@@ -1466,7 +1456,7 @@ void addrconf_join_solict(struct net_device *dev, struct in6_addr *addr)
1466 ipv6_dev_mc_inc(dev, &maddr); 1456 ipv6_dev_mc_inc(dev, &maddr);
1467} 1457}
1468 1458
1469void addrconf_leave_solict(struct inet6_dev *idev, struct in6_addr *addr) 1459void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr)
1470{ 1460{
1471 struct in6_addr maddr; 1461 struct in6_addr maddr;
1472 1462
@@ -2111,7 +2101,7 @@ err_exit:
2111/* 2101/*
2112 * Manual configuration of address on an interface 2102 * Manual configuration of address on an interface
2113 */ 2103 */
2114static int inet6_addr_add(struct net *net, int ifindex, struct in6_addr *pfx, 2104static int inet6_addr_add(struct net *net, int ifindex, const struct in6_addr *pfx,
2115 unsigned int plen, __u8 ifa_flags, __u32 prefered_lft, 2105 unsigned int plen, __u8 ifa_flags, __u32 prefered_lft,
2116 __u32 valid_lft) 2106 __u32 valid_lft)
2117{ 2107{
@@ -2185,7 +2175,7 @@ static int inet6_addr_add(struct net *net, int ifindex, struct in6_addr *pfx,
2185 return PTR_ERR(ifp); 2175 return PTR_ERR(ifp);
2186} 2176}
2187 2177
2188static int inet6_addr_del(struct net *net, int ifindex, struct in6_addr *pfx, 2178static int inet6_addr_del(struct net *net, int ifindex, const struct in6_addr *pfx,
2189 unsigned int plen) 2179 unsigned int plen)
2190{ 2180{
2191 struct inet6_ifaddr *ifp; 2181 struct inet6_ifaddr *ifp;
@@ -2348,7 +2338,7 @@ static void init_loopback(struct net_device *dev)
2348 add_addr(idev, &in6addr_loopback, 128, IFA_HOST); 2338 add_addr(idev, &in6addr_loopback, 128, IFA_HOST);
2349} 2339}
2350 2340
2351static void addrconf_add_linklocal(struct inet6_dev *idev, struct in6_addr *addr) 2341static void addrconf_add_linklocal(struct inet6_dev *idev, const struct in6_addr *addr)
2352{ 2342{
2353 struct inet6_ifaddr * ifp; 2343 struct inet6_ifaddr * ifp;
2354 u32 addr_flags = IFA_F_PERMANENT; 2344 u32 addr_flags = IFA_F_PERMANENT;
@@ -3119,7 +3109,7 @@ void if6_proc_exit(void)
3119 3109
3120#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) 3110#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
3121/* Check if address is a home address configured on any interface. */ 3111/* Check if address is a home address configured on any interface. */
3122int ipv6_chk_home_addr(struct net *net, struct in6_addr *addr) 3112int ipv6_chk_home_addr(struct net *net, const struct in6_addr *addr)
3123{ 3113{
3124 int ret = 0; 3114 int ret = 0;
3125 struct inet6_ifaddr *ifp = NULL; 3115 struct inet6_ifaddr *ifp = NULL;
@@ -3836,7 +3826,7 @@ static inline size_t inet6_if_nlmsg_size(void)
3836 + nla_total_size(inet6_ifla6_size()); /* IFLA_PROTINFO */ 3826 + nla_total_size(inet6_ifla6_size()); /* IFLA_PROTINFO */
3837} 3827}
3838 3828
3839static inline void __snmp6_fill_stats(u64 *stats, void __percpu **mib, 3829static inline void __snmp6_fill_statsdev(u64 *stats, atomic_long_t *mib,
3840 int items, int bytes) 3830 int items, int bytes)
3841{ 3831{
3842 int i; 3832 int i;
@@ -3846,7 +3836,7 @@ static inline void __snmp6_fill_stats(u64 *stats, void __percpu **mib,
3846 /* Use put_unaligned() because stats may not be aligned for u64. */ 3836 /* Use put_unaligned() because stats may not be aligned for u64. */
3847 put_unaligned(items, &stats[0]); 3837 put_unaligned(items, &stats[0]);
3848 for (i = 1; i < items; i++) 3838 for (i = 1; i < items; i++)
3849 put_unaligned(snmp_fold_field(mib, i), &stats[i]); 3839 put_unaligned(atomic_long_read(&mib[i]), &stats[i]);
3850 3840
3851 memset(&stats[items], 0, pad); 3841 memset(&stats[items], 0, pad);
3852} 3842}
@@ -3875,7 +3865,7 @@ static void snmp6_fill_stats(u64 *stats, struct inet6_dev *idev, int attrtype,
3875 IPSTATS_MIB_MAX, bytes, offsetof(struct ipstats_mib, syncp)); 3865 IPSTATS_MIB_MAX, bytes, offsetof(struct ipstats_mib, syncp));
3876 break; 3866 break;
3877 case IFLA_INET6_ICMP6STATS: 3867 case IFLA_INET6_ICMP6STATS:
3878 __snmp6_fill_stats(stats, (void __percpu **)idev->stats.icmpv6, ICMP6_MIB_MAX, bytes); 3868 __snmp6_fill_statsdev(stats, idev->stats.icmpv6dev->mibs, ICMP6_MIB_MAX, bytes);
3879 break; 3869 break;
3880 } 3870 }
3881} 3871}
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index afcc7099f96d..3b5669a2582d 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -272,6 +272,10 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
272 272
273 if (addr_len < SIN6_LEN_RFC2133) 273 if (addr_len < SIN6_LEN_RFC2133)
274 return -EINVAL; 274 return -EINVAL;
275
276 if (addr->sin6_family != AF_INET6)
277 return -EAFNOSUPPORT;
278
275 addr_type = ipv6_addr_type(&addr->sin6_addr); 279 addr_type = ipv6_addr_type(&addr->sin6_addr);
276 if ((addr_type & IPV6_ADDR_MULTICAST) && sock->type == SOCK_STREAM) 280 if ((addr_type & IPV6_ADDR_MULTICAST) && sock->type == SOCK_STREAM)
277 return -EINVAL; 281 return -EINVAL;
@@ -740,7 +744,7 @@ static int ipv6_gso_pull_exthdrs(struct sk_buff *skb, int proto)
740 744
741static int ipv6_gso_send_check(struct sk_buff *skb) 745static int ipv6_gso_send_check(struct sk_buff *skb)
742{ 746{
743 struct ipv6hdr *ipv6h; 747 const struct ipv6hdr *ipv6h;
744 const struct inet6_protocol *ops; 748 const struct inet6_protocol *ops;
745 int err = -EINVAL; 749 int err = -EINVAL;
746 750
diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c
index 0e5e943446f0..674255f5e6b7 100644
--- a/net/ipv6/anycast.c
+++ b/net/ipv6/anycast.c
@@ -44,7 +44,7 @@
44 44
45#include <net/checksum.h> 45#include <net/checksum.h>
46 46
47static int ipv6_dev_ac_dec(struct net_device *dev, struct in6_addr *addr); 47static int ipv6_dev_ac_dec(struct net_device *dev, const struct in6_addr *addr);
48 48
49/* Big ac list lock for all the sockets */ 49/* Big ac list lock for all the sockets */
50static DEFINE_RWLOCK(ipv6_sk_ac_lock); 50static DEFINE_RWLOCK(ipv6_sk_ac_lock);
@@ -54,7 +54,7 @@ static DEFINE_RWLOCK(ipv6_sk_ac_lock);
54 * socket join an anycast group 54 * socket join an anycast group
55 */ 55 */
56 56
57int ipv6_sock_ac_join(struct sock *sk, int ifindex, struct in6_addr *addr) 57int ipv6_sock_ac_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
58{ 58{
59 struct ipv6_pinfo *np = inet6_sk(sk); 59 struct ipv6_pinfo *np = inet6_sk(sk);
60 struct net_device *dev = NULL; 60 struct net_device *dev = NULL;
@@ -145,7 +145,7 @@ error:
145/* 145/*
146 * socket leave an anycast group 146 * socket leave an anycast group
147 */ 147 */
148int ipv6_sock_ac_drop(struct sock *sk, int ifindex, struct in6_addr *addr) 148int ipv6_sock_ac_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
149{ 149{
150 struct ipv6_pinfo *np = inet6_sk(sk); 150 struct ipv6_pinfo *np = inet6_sk(sk);
151 struct net_device *dev; 151 struct net_device *dev;
@@ -252,7 +252,7 @@ static void aca_put(struct ifacaddr6 *ac)
252/* 252/*
253 * device anycast group inc (add if not found) 253 * device anycast group inc (add if not found)
254 */ 254 */
255int ipv6_dev_ac_inc(struct net_device *dev, struct in6_addr *addr) 255int ipv6_dev_ac_inc(struct net_device *dev, const struct in6_addr *addr)
256{ 256{
257 struct ifacaddr6 *aca; 257 struct ifacaddr6 *aca;
258 struct inet6_dev *idev; 258 struct inet6_dev *idev;
@@ -324,7 +324,7 @@ out:
324/* 324/*
325 * device anycast group decrement 325 * device anycast group decrement
326 */ 326 */
327int __ipv6_dev_ac_dec(struct inet6_dev *idev, struct in6_addr *addr) 327int __ipv6_dev_ac_dec(struct inet6_dev *idev, const struct in6_addr *addr)
328{ 328{
329 struct ifacaddr6 *aca, *prev_aca; 329 struct ifacaddr6 *aca, *prev_aca;
330 330
@@ -358,7 +358,7 @@ int __ipv6_dev_ac_dec(struct inet6_dev *idev, struct in6_addr *addr)
358} 358}
359 359
360/* called with rcu_read_lock() */ 360/* called with rcu_read_lock() */
361static int ipv6_dev_ac_dec(struct net_device *dev, struct in6_addr *addr) 361static int ipv6_dev_ac_dec(struct net_device *dev, const struct in6_addr *addr)
362{ 362{
363 struct inet6_dev *idev = __in6_dev_get(dev); 363 struct inet6_dev *idev = __in6_dev_get(dev);
364 364
@@ -371,7 +371,7 @@ static int ipv6_dev_ac_dec(struct net_device *dev, struct in6_addr *addr)
371 * check if the interface has this anycast address 371 * check if the interface has this anycast address
372 * called with rcu_read_lock() 372 * called with rcu_read_lock()
373 */ 373 */
374static int ipv6_chk_acast_dev(struct net_device *dev, struct in6_addr *addr) 374static int ipv6_chk_acast_dev(struct net_device *dev, const struct in6_addr *addr)
375{ 375{
376 struct inet6_dev *idev; 376 struct inet6_dev *idev;
377 struct ifacaddr6 *aca; 377 struct ifacaddr6 *aca;
@@ -392,7 +392,7 @@ static int ipv6_chk_acast_dev(struct net_device *dev, struct in6_addr *addr)
392 * check if given interface (or any, if dev==0) has this anycast address 392 * check if given interface (or any, if dev==0) has this anycast address
393 */ 393 */
394int ipv6_chk_acast_addr(struct net *net, struct net_device *dev, 394int ipv6_chk_acast_addr(struct net *net, struct net_device *dev,
395 struct in6_addr *addr) 395 const struct in6_addr *addr)
396{ 396{
397 int found = 0; 397 int found = 0;
398 398
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 59dccfbb5b11..1ac7938dd9ec 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -430,7 +430,7 @@ static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
430 u8 type, u8 code, int offset, __be32 info) 430 u8 type, u8 code, int offset, __be32 info)
431{ 431{
432 struct net *net = dev_net(skb->dev); 432 struct net *net = dev_net(skb->dev);
433 struct ipv6hdr *iph = (struct ipv6hdr*)skb->data; 433 const struct ipv6hdr *iph = (const struct ipv6hdr *)skb->data;
434 struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data + offset); 434 struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data + offset);
435 struct xfrm_state *x; 435 struct xfrm_state *x;
436 436
@@ -438,7 +438,8 @@ static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
438 type != ICMPV6_PKT_TOOBIG) 438 type != ICMPV6_PKT_TOOBIG)
439 return; 439 return;
440 440
441 x = xfrm_state_lookup(net, skb->mark, (xfrm_address_t *)&iph->daddr, esph->spi, IPPROTO_ESP, AF_INET6); 441 x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
442 esph->spi, IPPROTO_ESP, AF_INET6);
442 if (!x) 443 if (!x)
443 return; 444 return;
444 printk(KERN_DEBUG "pmtu discovery on SA ESP/%08x/%pI6\n", 445 printk(KERN_DEBUG "pmtu discovery on SA ESP/%08x/%pI6\n",
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 83cb4f9add81..11900417b1cc 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -372,7 +372,7 @@ void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
372 struct ipv6hdr *hdr = ipv6_hdr(skb); 372 struct ipv6hdr *hdr = ipv6_hdr(skb);
373 struct sock *sk; 373 struct sock *sk;
374 struct ipv6_pinfo *np; 374 struct ipv6_pinfo *np;
375 struct in6_addr *saddr = NULL; 375 const struct in6_addr *saddr = NULL;
376 struct dst_entry *dst; 376 struct dst_entry *dst;
377 struct icmp6hdr tmp_hdr; 377 struct icmp6hdr tmp_hdr;
378 struct flowi6 fl6; 378 struct flowi6 fl6;
@@ -521,7 +521,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
521 struct sock *sk; 521 struct sock *sk;
522 struct inet6_dev *idev; 522 struct inet6_dev *idev;
523 struct ipv6_pinfo *np; 523 struct ipv6_pinfo *np;
524 struct in6_addr *saddr = NULL; 524 const struct in6_addr *saddr = NULL;
525 struct icmp6hdr *icmph = icmp6_hdr(skb); 525 struct icmp6hdr *icmph = icmp6_hdr(skb);
526 struct icmp6hdr tmp_hdr; 526 struct icmp6hdr tmp_hdr;
527 struct flowi6 fl6; 527 struct flowi6 fl6;
@@ -645,8 +645,8 @@ static int icmpv6_rcv(struct sk_buff *skb)
645{ 645{
646 struct net_device *dev = skb->dev; 646 struct net_device *dev = skb->dev;
647 struct inet6_dev *idev = __in6_dev_get(dev); 647 struct inet6_dev *idev = __in6_dev_get(dev);
648 struct in6_addr *saddr, *daddr; 648 const struct in6_addr *saddr, *daddr;
649 struct ipv6hdr *orig_hdr; 649 const struct ipv6hdr *orig_hdr;
650 struct icmp6hdr *hdr; 650 struct icmp6hdr *hdr;
651 u8 type; 651 u8 type;
652 652
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index f2c5b0fc0f21..8a58e8cf6646 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -203,7 +203,7 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
203 return dst; 203 return dst;
204} 204}
205 205
206int inet6_csk_xmit(struct sk_buff *skb) 206int inet6_csk_xmit(struct sk_buff *skb, struct flowi *fl_unused)
207{ 207{
208 struct sock *sk = skb->sk; 208 struct sock *sk = skb->sk;
209 struct inet_sock *inet = inet_sk(sk); 209 struct inet_sock *inet = inet_sk(sk);
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 7548905e79e1..4076a0b14b20 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -134,9 +134,9 @@ static __inline__ u32 fib6_new_sernum(void)
134# define BITOP_BE32_SWIZZLE 0 134# define BITOP_BE32_SWIZZLE 0
135#endif 135#endif
136 136
137static __inline__ __be32 addr_bit_set(void *token, int fn_bit) 137static __inline__ __be32 addr_bit_set(const void *token, int fn_bit)
138{ 138{
139 __be32 *addr = token; 139 const __be32 *addr = token;
140 /* 140 /*
141 * Here, 141 * Here,
142 * 1 << ((~fn_bit ^ BITOP_BE32_SWIZZLE) & 0x1f) 142 * 1 << ((~fn_bit ^ BITOP_BE32_SWIZZLE) & 0x1f)
@@ -394,10 +394,11 @@ static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
394 arg.net = net; 394 arg.net = net;
395 w->args = &arg; 395 w->args = &arg;
396 396
397 rcu_read_lock();
397 for (h = s_h; h < FIB6_TABLE_HASHSZ; h++, s_e = 0) { 398 for (h = s_h; h < FIB6_TABLE_HASHSZ; h++, s_e = 0) {
398 e = 0; 399 e = 0;
399 head = &net->ipv6.fib_table_hash[h]; 400 head = &net->ipv6.fib_table_hash[h];
400 hlist_for_each_entry(tb, node, head, tb6_hlist) { 401 hlist_for_each_entry_rcu(tb, node, head, tb6_hlist) {
401 if (e < s_e) 402 if (e < s_e)
402 goto next; 403 goto next;
403 res = fib6_dump_table(tb, skb, cb); 404 res = fib6_dump_table(tb, skb, cb);
@@ -408,6 +409,7 @@ next:
408 } 409 }
409 } 410 }
410out: 411out:
412 rcu_read_unlock();
411 cb->args[1] = e; 413 cb->args[1] = e;
412 cb->args[0] = h; 414 cb->args[0] = h;
413 415
@@ -822,7 +824,7 @@ st_failure:
822 824
823struct lookup_args { 825struct lookup_args {
824 int offset; /* key offset on rt6_info */ 826 int offset; /* key offset on rt6_info */
825 struct in6_addr *addr; /* search key */ 827 const struct in6_addr *addr; /* search key */
826}; 828};
827 829
828static struct fib6_node * fib6_lookup_1(struct fib6_node *root, 830static struct fib6_node * fib6_lookup_1(struct fib6_node *root,
@@ -881,8 +883,8 @@ static struct fib6_node * fib6_lookup_1(struct fib6_node *root,
881 return NULL; 883 return NULL;
882} 884}
883 885
884struct fib6_node * fib6_lookup(struct fib6_node *root, struct in6_addr *daddr, 886struct fib6_node * fib6_lookup(struct fib6_node *root, const struct in6_addr *daddr,
885 struct in6_addr *saddr) 887 const struct in6_addr *saddr)
886{ 888{
887 struct fib6_node *fn; 889 struct fib6_node *fn;
888 struct lookup_args args[] = { 890 struct lookup_args args[] = {
@@ -916,7 +918,7 @@ struct fib6_node * fib6_lookup(struct fib6_node *root, struct in6_addr *daddr,
916 918
917 919
918static struct fib6_node * fib6_locate_1(struct fib6_node *root, 920static struct fib6_node * fib6_locate_1(struct fib6_node *root,
919 struct in6_addr *addr, 921 const struct in6_addr *addr,
920 int plen, int offset) 922 int plen, int offset)
921{ 923{
922 struct fib6_node *fn; 924 struct fib6_node *fn;
@@ -946,8 +948,8 @@ static struct fib6_node * fib6_locate_1(struct fib6_node *root,
946} 948}
947 949
948struct fib6_node * fib6_locate(struct fib6_node *root, 950struct fib6_node * fib6_locate(struct fib6_node *root,
949 struct in6_addr *daddr, int dst_len, 951 const struct in6_addr *daddr, int dst_len,
950 struct in6_addr *saddr, int src_len) 952 const struct in6_addr *saddr, int src_len)
951{ 953{
952 struct fib6_node *fn; 954 struct fib6_node *fn;
953 955
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index a83e9209cecc..027c7ff6f1e5 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -57,7 +57,7 @@ inline int ip6_rcv_finish( struct sk_buff *skb)
57 57
58int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) 58int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
59{ 59{
60 struct ipv6hdr *hdr; 60 const struct ipv6hdr *hdr;
61 u32 pkt_len; 61 u32 pkt_len;
62 struct inet6_dev *idev; 62 struct inet6_dev *idev;
63 struct net *net = dev_net(skb->dev); 63 struct net *net = dev_net(skb->dev);
@@ -186,7 +186,7 @@ resubmit:
186 int ret; 186 int ret;
187 187
188 if (ipprot->flags & INET6_PROTO_FINAL) { 188 if (ipprot->flags & INET6_PROTO_FINAL) {
189 struct ipv6hdr *hdr; 189 const struct ipv6hdr *hdr;
190 190
191 /* Free reference early: we don't need it any more, 191 /* Free reference early: we don't need it any more,
192 and it may hold ip_conntrack module loaded 192 and it may hold ip_conntrack module loaded
@@ -242,7 +242,7 @@ int ip6_input(struct sk_buff *skb)
242 242
243int ip6_mc_input(struct sk_buff *skb) 243int ip6_mc_input(struct sk_buff *skb)
244{ 244{
245 struct ipv6hdr *hdr; 245 const struct ipv6hdr *hdr;
246 int deliver; 246 int deliver;
247 247
248 IP6_UPD_PO_STATS_BH(dev_net(skb_dst(skb)->dev), 248 IP6_UPD_PO_STATS_BH(dev_net(skb_dst(skb)->dev),
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 46cf7bea6769..9d4b165837d6 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -869,9 +869,9 @@ fail:
869 return err; 869 return err;
870} 870}
871 871
872static inline int ip6_rt_check(struct rt6key *rt_key, 872static inline int ip6_rt_check(const struct rt6key *rt_key,
873 struct in6_addr *fl_addr, 873 const struct in6_addr *fl_addr,
874 struct in6_addr *addr_cache) 874 const struct in6_addr *addr_cache)
875{ 875{
876 return (rt_key->plen != 128 || !ipv6_addr_equal(fl_addr, &rt_key->addr)) && 876 return (rt_key->plen != 128 || !ipv6_addr_equal(fl_addr, &rt_key->addr)) &&
877 (addr_cache == NULL || !ipv6_addr_equal(fl_addr, addr_cache)); 877 (addr_cache == NULL || !ipv6_addr_equal(fl_addr, addr_cache));
@@ -879,7 +879,7 @@ static inline int ip6_rt_check(struct rt6key *rt_key,
879 879
880static struct dst_entry *ip6_sk_dst_check(struct sock *sk, 880static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
881 struct dst_entry *dst, 881 struct dst_entry *dst,
882 struct flowi6 *fl6) 882 const struct flowi6 *fl6)
883{ 883{
884 struct ipv6_pinfo *np = inet6_sk(sk); 884 struct ipv6_pinfo *np = inet6_sk(sk);
885 struct rt6_info *rt = (struct rt6_info *)dst; 885 struct rt6_info *rt = (struct rt6_info *)dst;
@@ -930,10 +930,10 @@ static int ip6_dst_lookup_tail(struct sock *sk,
930 goto out_err_release; 930 goto out_err_release;
931 931
932 if (ipv6_addr_any(&fl6->saddr)) { 932 if (ipv6_addr_any(&fl6->saddr)) {
933 err = ipv6_dev_get_saddr(net, ip6_dst_idev(*dst)->dev, 933 struct rt6_info *rt = (struct rt6_info *) *dst;
934 &fl6->daddr, 934 err = ip6_route_get_saddr(net, rt, &fl6->daddr,
935 sk ? inet6_sk(sk)->srcprefs : 0, 935 sk ? inet6_sk(sk)->srcprefs : 0,
936 &fl6->saddr); 936 &fl6->saddr);
937 if (err) 937 if (err)
938 goto out_err_release; 938 goto out_err_release;
939 } 939 }
@@ -1150,6 +1150,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1150{ 1150{
1151 struct inet_sock *inet = inet_sk(sk); 1151 struct inet_sock *inet = inet_sk(sk);
1152 struct ipv6_pinfo *np = inet6_sk(sk); 1152 struct ipv6_pinfo *np = inet6_sk(sk);
1153 struct inet_cork *cork;
1153 struct sk_buff *skb; 1154 struct sk_buff *skb;
1154 unsigned int maxfraglen, fragheaderlen; 1155 unsigned int maxfraglen, fragheaderlen;
1155 int exthdrlen; 1156 int exthdrlen;
@@ -1163,6 +1164,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1163 1164
1164 if (flags&MSG_PROBE) 1165 if (flags&MSG_PROBE)
1165 return 0; 1166 return 0;
1167 cork = &inet->cork.base;
1166 if (skb_queue_empty(&sk->sk_write_queue)) { 1168 if (skb_queue_empty(&sk->sk_write_queue)) {
1167 /* 1169 /*
1168 * setup for corking 1170 * setup for corking
@@ -1202,7 +1204,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1202 /* need source address above miyazawa*/ 1204 /* need source address above miyazawa*/
1203 } 1205 }
1204 dst_hold(&rt->dst); 1206 dst_hold(&rt->dst);
1205 inet->cork.dst = &rt->dst; 1207 cork->dst = &rt->dst;
1206 inet->cork.fl.u.ip6 = *fl6; 1208 inet->cork.fl.u.ip6 = *fl6;
1207 np->cork.hop_limit = hlimit; 1209 np->cork.hop_limit = hlimit;
1208 np->cork.tclass = tclass; 1210 np->cork.tclass = tclass;
@@ -1212,10 +1214,10 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1212 if (np->frag_size) 1214 if (np->frag_size)
1213 mtu = np->frag_size; 1215 mtu = np->frag_size;
1214 } 1216 }
1215 inet->cork.fragsize = mtu; 1217 cork->fragsize = mtu;
1216 if (dst_allfrag(rt->dst.path)) 1218 if (dst_allfrag(rt->dst.path))
1217 inet->cork.flags |= IPCORK_ALLFRAG; 1219 cork->flags |= IPCORK_ALLFRAG;
1218 inet->cork.length = 0; 1220 cork->length = 0;
1219 sk->sk_sndmsg_page = NULL; 1221 sk->sk_sndmsg_page = NULL;
1220 sk->sk_sndmsg_off = 0; 1222 sk->sk_sndmsg_off = 0;
1221 exthdrlen = rt->dst.header_len + (opt ? opt->opt_flen : 0) - 1223 exthdrlen = rt->dst.header_len + (opt ? opt->opt_flen : 0) -
@@ -1223,12 +1225,12 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1223 length += exthdrlen; 1225 length += exthdrlen;
1224 transhdrlen += exthdrlen; 1226 transhdrlen += exthdrlen;
1225 } else { 1227 } else {
1226 rt = (struct rt6_info *)inet->cork.dst; 1228 rt = (struct rt6_info *)cork->dst;
1227 fl6 = &inet->cork.fl.u.ip6; 1229 fl6 = &inet->cork.fl.u.ip6;
1228 opt = np->cork.opt; 1230 opt = np->cork.opt;
1229 transhdrlen = 0; 1231 transhdrlen = 0;
1230 exthdrlen = 0; 1232 exthdrlen = 0;
1231 mtu = inet->cork.fragsize; 1233 mtu = cork->fragsize;
1232 } 1234 }
1233 1235
1234 hh_len = LL_RESERVED_SPACE(rt->dst.dev); 1236 hh_len = LL_RESERVED_SPACE(rt->dst.dev);
@@ -1238,7 +1240,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1238 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen - sizeof(struct frag_hdr); 1240 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen - sizeof(struct frag_hdr);
1239 1241
1240 if (mtu <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) { 1242 if (mtu <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) {
1241 if (inet->cork.length + length > sizeof(struct ipv6hdr) + IPV6_MAXPLEN - fragheaderlen) { 1243 if (cork->length + length > sizeof(struct ipv6hdr) + IPV6_MAXPLEN - fragheaderlen) {
1242 ipv6_local_error(sk, EMSGSIZE, fl6, mtu-exthdrlen); 1244 ipv6_local_error(sk, EMSGSIZE, fl6, mtu-exthdrlen);
1243 return -EMSGSIZE; 1245 return -EMSGSIZE;
1244 } 1246 }
@@ -1267,7 +1269,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1267 * --yoshfuji 1269 * --yoshfuji
1268 */ 1270 */
1269 1271
1270 inet->cork.length += length; 1272 cork->length += length;
1271 if (length > mtu) { 1273 if (length > mtu) {
1272 int proto = sk->sk_protocol; 1274 int proto = sk->sk_protocol;
1273 if (dontfrag && (proto == IPPROTO_UDP || proto == IPPROTO_RAW)){ 1275 if (dontfrag && (proto == IPPROTO_UDP || proto == IPPROTO_RAW)){
@@ -1292,7 +1294,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1292 1294
1293 while (length > 0) { 1295 while (length > 0) {
1294 /* Check if the remaining data fits into current packet. */ 1296 /* Check if the remaining data fits into current packet. */
1295 copy = (inet->cork.length <= mtu && !(inet->cork.flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - skb->len; 1297 copy = (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - skb->len;
1296 if (copy < length) 1298 if (copy < length)
1297 copy = maxfraglen - skb->len; 1299 copy = maxfraglen - skb->len;
1298 1300
@@ -1317,7 +1319,7 @@ alloc_new_skb:
1317 * we know we need more fragment(s). 1319 * we know we need more fragment(s).
1318 */ 1320 */
1319 datalen = length + fraggap; 1321 datalen = length + fraggap;
1320 if (datalen > (inet->cork.length <= mtu && !(inet->cork.flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen) 1322 if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
1321 datalen = maxfraglen - fragheaderlen; 1323 datalen = maxfraglen - fragheaderlen;
1322 1324
1323 fraglen = datalen + fragheaderlen; 1325 fraglen = datalen + fragheaderlen;
@@ -1481,7 +1483,7 @@ alloc_new_skb:
1481 } 1483 }
1482 return 0; 1484 return 0;
1483error: 1485error:
1484 inet->cork.length -= length; 1486 cork->length -= length;
1485 IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS); 1487 IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
1486 return err; 1488 return err;
1487} 1489}
@@ -1497,10 +1499,10 @@ static void ip6_cork_release(struct inet_sock *inet, struct ipv6_pinfo *np)
1497 np->cork.opt = NULL; 1499 np->cork.opt = NULL;
1498 } 1500 }
1499 1501
1500 if (inet->cork.dst) { 1502 if (inet->cork.base.dst) {
1501 dst_release(inet->cork.dst); 1503 dst_release(inet->cork.base.dst);
1502 inet->cork.dst = NULL; 1504 inet->cork.base.dst = NULL;
1503 inet->cork.flags &= ~IPCORK_ALLFRAG; 1505 inet->cork.base.flags &= ~IPCORK_ALLFRAG;
1504 } 1506 }
1505 memset(&inet->cork.fl, 0, sizeof(inet->cork.fl)); 1507 memset(&inet->cork.fl, 0, sizeof(inet->cork.fl));
1506} 1508}
@@ -1515,7 +1517,7 @@ int ip6_push_pending_frames(struct sock *sk)
1515 struct net *net = sock_net(sk); 1517 struct net *net = sock_net(sk);
1516 struct ipv6hdr *hdr; 1518 struct ipv6hdr *hdr;
1517 struct ipv6_txoptions *opt = np->cork.opt; 1519 struct ipv6_txoptions *opt = np->cork.opt;
1518 struct rt6_info *rt = (struct rt6_info *)inet->cork.dst; 1520 struct rt6_info *rt = (struct rt6_info *)inet->cork.base.dst;
1519 struct flowi6 *fl6 = &inet->cork.fl.u.ip6; 1521 struct flowi6 *fl6 = &inet->cork.fl.u.ip6;
1520 unsigned char proto = fl6->flowi6_proto; 1522 unsigned char proto = fl6->flowi6_proto;
1521 int err = 0; 1523 int err = 0;
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index c1b1bd312df2..36c2842a86b2 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -162,7 +162,7 @@ static inline void ip6_tnl_dst_store(struct ip6_tnl *t, struct dst_entry *dst)
162 for (t = rcu_dereference(start); t; t = rcu_dereference(t->next)) 162 for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
163 163
164static struct ip6_tnl * 164static struct ip6_tnl *
165ip6_tnl_lookup(struct net *net, struct in6_addr *remote, struct in6_addr *local) 165ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_addr *local)
166{ 166{
167 unsigned int h0 = HASH(remote); 167 unsigned int h0 = HASH(remote);
168 unsigned int h1 = HASH(local); 168 unsigned int h1 = HASH(local);
@@ -194,10 +194,10 @@ ip6_tnl_lookup(struct net *net, struct in6_addr *remote, struct in6_addr *local)
194 **/ 194 **/
195 195
196static struct ip6_tnl __rcu ** 196static struct ip6_tnl __rcu **
197ip6_tnl_bucket(struct ip6_tnl_net *ip6n, struct ip6_tnl_parm *p) 197ip6_tnl_bucket(struct ip6_tnl_net *ip6n, const struct ip6_tnl_parm *p)
198{ 198{
199 struct in6_addr *remote = &p->raddr; 199 const struct in6_addr *remote = &p->raddr;
200 struct in6_addr *local = &p->laddr; 200 const struct in6_addr *local = &p->laddr;
201 unsigned h = 0; 201 unsigned h = 0;
202 int prio = 0; 202 int prio = 0;
203 203
@@ -280,11 +280,6 @@ static struct ip6_tnl *ip6_tnl_create(struct net *net, struct ip6_tnl_parm *p)
280 280
281 dev_net_set(dev, net); 281 dev_net_set(dev, net);
282 282
283 if (strchr(name, '%')) {
284 if (dev_alloc_name(dev, name) < 0)
285 goto failed_free;
286 }
287
288 t = netdev_priv(dev); 283 t = netdev_priv(dev);
289 t->parms = *p; 284 t->parms = *p;
290 err = ip6_tnl_dev_init(dev); 285 err = ip6_tnl_dev_init(dev);
@@ -321,8 +316,8 @@ failed:
321static struct ip6_tnl *ip6_tnl_locate(struct net *net, 316static struct ip6_tnl *ip6_tnl_locate(struct net *net,
322 struct ip6_tnl_parm *p, int create) 317 struct ip6_tnl_parm *p, int create)
323{ 318{
324 struct in6_addr *remote = &p->raddr; 319 const struct in6_addr *remote = &p->raddr;
325 struct in6_addr *local = &p->laddr; 320 const struct in6_addr *local = &p->laddr;
326 struct ip6_tnl __rcu **tp; 321 struct ip6_tnl __rcu **tp;
327 struct ip6_tnl *t; 322 struct ip6_tnl *t;
328 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 323 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
@@ -374,7 +369,7 @@ ip6_tnl_dev_uninit(struct net_device *dev)
374static __u16 369static __u16
375parse_tlv_tnl_enc_lim(struct sk_buff *skb, __u8 * raw) 370parse_tlv_tnl_enc_lim(struct sk_buff *skb, __u8 * raw)
376{ 371{
377 struct ipv6hdr *ipv6h = (struct ipv6hdr *) raw; 372 const struct ipv6hdr *ipv6h = (const struct ipv6hdr *) raw;
378 __u8 nexthdr = ipv6h->nexthdr; 373 __u8 nexthdr = ipv6h->nexthdr;
379 __u16 off = sizeof (*ipv6h); 374 __u16 off = sizeof (*ipv6h);
380 375
@@ -435,7 +430,7 @@ static int
435ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt, 430ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt,
436 u8 *type, u8 *code, int *msg, __u32 *info, int offset) 431 u8 *type, u8 *code, int *msg, __u32 *info, int offset)
437{ 432{
438 struct ipv6hdr *ipv6h = (struct ipv6hdr *) skb->data; 433 const struct ipv6hdr *ipv6h = (const struct ipv6hdr *) skb->data;
439 struct ip6_tnl *t; 434 struct ip6_tnl *t;
440 int rel_msg = 0; 435 int rel_msg = 0;
441 u8 rel_type = ICMPV6_DEST_UNREACH; 436 u8 rel_type = ICMPV6_DEST_UNREACH;
@@ -535,8 +530,9 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
535 __u32 rel_info = ntohl(info); 530 __u32 rel_info = ntohl(info);
536 int err; 531 int err;
537 struct sk_buff *skb2; 532 struct sk_buff *skb2;
538 struct iphdr *eiph; 533 const struct iphdr *eiph;
539 struct rtable *rt; 534 struct rtable *rt;
535 struct flowi4 fl4;
540 536
541 err = ip6_tnl_err(skb, IPPROTO_IPIP, opt, &rel_type, &rel_code, 537 err = ip6_tnl_err(skb, IPPROTO_IPIP, opt, &rel_type, &rel_code,
542 &rel_msg, &rel_info, offset); 538 &rel_msg, &rel_info, offset);
@@ -577,7 +573,7 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
577 eiph = ip_hdr(skb2); 573 eiph = ip_hdr(skb2);
578 574
579 /* Try to guess incoming interface */ 575 /* Try to guess incoming interface */
580 rt = ip_route_output_ports(dev_net(skb->dev), NULL, 576 rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL,
581 eiph->saddr, 0, 577 eiph->saddr, 0,
582 0, 0, 578 0, 0,
583 IPPROTO_IPIP, RT_TOS(eiph->tos), 0); 579 IPPROTO_IPIP, RT_TOS(eiph->tos), 0);
@@ -590,7 +586,7 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
590 if (rt->rt_flags & RTCF_LOCAL) { 586 if (rt->rt_flags & RTCF_LOCAL) {
591 ip_rt_put(rt); 587 ip_rt_put(rt);
592 rt = NULL; 588 rt = NULL;
593 rt = ip_route_output_ports(dev_net(skb->dev), NULL, 589 rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL,
594 eiph->daddr, eiph->saddr, 590 eiph->daddr, eiph->saddr,
595 0, 0, 591 0, 0,
596 IPPROTO_IPIP, 592 IPPROTO_IPIP,
@@ -669,8 +665,8 @@ ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
669 return 0; 665 return 0;
670} 666}
671 667
672static void ip4ip6_dscp_ecn_decapsulate(struct ip6_tnl *t, 668static void ip4ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t,
673 struct ipv6hdr *ipv6h, 669 const struct ipv6hdr *ipv6h,
674 struct sk_buff *skb) 670 struct sk_buff *skb)
675{ 671{
676 __u8 dsfield = ipv6_get_dsfield(ipv6h) & ~INET_ECN_MASK; 672 __u8 dsfield = ipv6_get_dsfield(ipv6h) & ~INET_ECN_MASK;
@@ -682,8 +678,8 @@ static void ip4ip6_dscp_ecn_decapsulate(struct ip6_tnl *t,
682 IP_ECN_set_ce(ip_hdr(skb)); 678 IP_ECN_set_ce(ip_hdr(skb));
683} 679}
684 680
685static void ip6ip6_dscp_ecn_decapsulate(struct ip6_tnl *t, 681static void ip6ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t,
686 struct ipv6hdr *ipv6h, 682 const struct ipv6hdr *ipv6h,
687 struct sk_buff *skb) 683 struct sk_buff *skb)
688{ 684{
689 if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY) 685 if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY)
@@ -726,12 +722,12 @@ static inline int ip6_tnl_rcv_ctl(struct ip6_tnl *t)
726 722
727static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol, 723static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol,
728 __u8 ipproto, 724 __u8 ipproto,
729 void (*dscp_ecn_decapsulate)(struct ip6_tnl *t, 725 void (*dscp_ecn_decapsulate)(const struct ip6_tnl *t,
730 struct ipv6hdr *ipv6h, 726 const struct ipv6hdr *ipv6h,
731 struct sk_buff *skb)) 727 struct sk_buff *skb))
732{ 728{
733 struct ip6_tnl *t; 729 struct ip6_tnl *t;
734 struct ipv6hdr *ipv6h = ipv6_hdr(skb); 730 const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
735 731
736 rcu_read_lock(); 732 rcu_read_lock();
737 733
@@ -828,7 +824,7 @@ static void init_tel_txopt(struct ipv6_tel_txoption *opt, __u8 encap_limit)
828 **/ 824 **/
829 825
830static inline int 826static inline int
831ip6_tnl_addr_conflict(struct ip6_tnl *t, struct ipv6hdr *hdr) 827ip6_tnl_addr_conflict(const struct ip6_tnl *t, const struct ipv6hdr *hdr)
832{ 828{
833 return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr); 829 return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr);
834} 830}
@@ -1005,7 +1001,7 @@ static inline int
1005ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) 1001ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1006{ 1002{
1007 struct ip6_tnl *t = netdev_priv(dev); 1003 struct ip6_tnl *t = netdev_priv(dev);
1008 struct iphdr *iph = ip_hdr(skb); 1004 const struct iphdr *iph = ip_hdr(skb);
1009 int encap_limit = -1; 1005 int encap_limit = -1;
1010 struct flowi6 fl6; 1006 struct flowi6 fl6;
1011 __u8 dsfield; 1007 __u8 dsfield;
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 29e48593bf22..82a809901f8e 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -989,8 +989,8 @@ static int mif6_add(struct net *net, struct mr6_table *mrt,
989} 989}
990 990
991static struct mfc6_cache *ip6mr_cache_find(struct mr6_table *mrt, 991static struct mfc6_cache *ip6mr_cache_find(struct mr6_table *mrt,
992 struct in6_addr *origin, 992 const struct in6_addr *origin,
993 struct in6_addr *mcastgrp) 993 const struct in6_addr *mcastgrp)
994{ 994{
995 int line = MFC6_HASH(mcastgrp, origin); 995 int line = MFC6_HASH(mcastgrp, origin);
996 struct mfc6_cache *c; 996 struct mfc6_cache *c;
diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c
index 85cccd6ed0b7..bba658d9a03c 100644
--- a/net/ipv6/ipcomp6.c
+++ b/net/ipv6/ipcomp6.c
@@ -55,7 +55,7 @@ static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
55{ 55{
56 struct net *net = dev_net(skb->dev); 56 struct net *net = dev_net(skb->dev);
57 __be32 spi; 57 __be32 spi;
58 struct ipv6hdr *iph = (struct ipv6hdr*)skb->data; 58 const struct ipv6hdr *iph = (const struct ipv6hdr *)skb->data;
59 struct ip_comp_hdr *ipcomph = 59 struct ip_comp_hdr *ipcomph =
60 (struct ip_comp_hdr *)(skb->data + offset); 60 (struct ip_comp_hdr *)(skb->data + offset);
61 struct xfrm_state *x; 61 struct xfrm_state *x;
@@ -64,7 +64,8 @@ static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
64 return; 64 return;
65 65
66 spi = htonl(ntohs(ipcomph->cpi)); 66 spi = htonl(ntohs(ipcomph->cpi));
67 x = xfrm_state_lookup(net, skb->mark, (xfrm_address_t *)&iph->daddr, spi, IPPROTO_COMP, AF_INET6); 67 x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
68 spi, IPPROTO_COMP, AF_INET6);
68 if (!x) 69 if (!x)
69 return; 70 return;
70 71
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 76b893771e6e..3e6ebcdb4779 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -92,16 +92,16 @@ static void mld_gq_timer_expire(unsigned long data);
92static void mld_ifc_timer_expire(unsigned long data); 92static void mld_ifc_timer_expire(unsigned long data);
93static void mld_ifc_event(struct inet6_dev *idev); 93static void mld_ifc_event(struct inet6_dev *idev);
94static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *pmc); 94static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *pmc);
95static void mld_del_delrec(struct inet6_dev *idev, struct in6_addr *addr); 95static void mld_del_delrec(struct inet6_dev *idev, const struct in6_addr *addr);
96static void mld_clear_delrec(struct inet6_dev *idev); 96static void mld_clear_delrec(struct inet6_dev *idev);
97static int sf_setstate(struct ifmcaddr6 *pmc); 97static int sf_setstate(struct ifmcaddr6 *pmc);
98static void sf_markstate(struct ifmcaddr6 *pmc); 98static void sf_markstate(struct ifmcaddr6 *pmc);
99static void ip6_mc_clear_src(struct ifmcaddr6 *pmc); 99static void ip6_mc_clear_src(struct ifmcaddr6 *pmc);
100static int ip6_mc_del_src(struct inet6_dev *idev, struct in6_addr *pmca, 100static int ip6_mc_del_src(struct inet6_dev *idev, const struct in6_addr *pmca,
101 int sfmode, int sfcount, struct in6_addr *psfsrc, 101 int sfmode, int sfcount, const struct in6_addr *psfsrc,
102 int delta); 102 int delta);
103static int ip6_mc_add_src(struct inet6_dev *idev, struct in6_addr *pmca, 103static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca,
104 int sfmode, int sfcount, struct in6_addr *psfsrc, 104 int sfmode, int sfcount, const struct in6_addr *psfsrc,
105 int delta); 105 int delta);
106static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml, 106static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml,
107 struct inet6_dev *idev); 107 struct inet6_dev *idev);
@@ -201,10 +201,6 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
201 return 0; 201 return 0;
202} 202}
203 203
204static void ipv6_mc_socklist_reclaim(struct rcu_head *head)
205{
206 kfree(container_of(head, struct ipv6_mc_socklist, rcu));
207}
208/* 204/*
209 * socket leave on multicast group 205 * socket leave on multicast group
210 */ 206 */
@@ -239,7 +235,7 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
239 (void) ip6_mc_leave_src(sk, mc_lst, NULL); 235 (void) ip6_mc_leave_src(sk, mc_lst, NULL);
240 rcu_read_unlock(); 236 rcu_read_unlock();
241 atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc); 237 atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc);
242 call_rcu(&mc_lst->rcu, ipv6_mc_socklist_reclaim); 238 kfree_rcu(mc_lst, rcu);
243 return 0; 239 return 0;
244 } 240 }
245 } 241 }
@@ -250,7 +246,7 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
250 246
251/* called with rcu_read_lock() */ 247/* called with rcu_read_lock() */
252static struct inet6_dev *ip6_mc_find_dev_rcu(struct net *net, 248static struct inet6_dev *ip6_mc_find_dev_rcu(struct net *net,
253 struct in6_addr *group, 249 const struct in6_addr *group,
254 int ifindex) 250 int ifindex)
255{ 251{
256 struct net_device *dev = NULL; 252 struct net_device *dev = NULL;
@@ -307,7 +303,7 @@ void ipv6_sock_mc_close(struct sock *sk)
307 rcu_read_unlock(); 303 rcu_read_unlock();
308 304
309 atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc); 305 atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc);
310 call_rcu(&mc_lst->rcu, ipv6_mc_socklist_reclaim); 306 kfree_rcu(mc_lst, rcu);
311 307
312 spin_lock(&ipv6_sk_mc_lock); 308 spin_lock(&ipv6_sk_mc_lock);
313 } 309 }
@@ -451,7 +447,7 @@ done:
451 447
452int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf) 448int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf)
453{ 449{
454 struct in6_addr *group; 450 const struct in6_addr *group;
455 struct ipv6_mc_socklist *pmc; 451 struct ipv6_mc_socklist *pmc;
456 struct inet6_dev *idev; 452 struct inet6_dev *idev;
457 struct ipv6_pinfo *inet6 = inet6_sk(sk); 453 struct ipv6_pinfo *inet6 = inet6_sk(sk);
@@ -542,7 +538,7 @@ int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf,
542 struct group_filter __user *optval, int __user *optlen) 538 struct group_filter __user *optval, int __user *optlen)
543{ 539{
544 int err, i, count, copycount; 540 int err, i, count, copycount;
545 struct in6_addr *group; 541 const struct in6_addr *group;
546 struct ipv6_mc_socklist *pmc; 542 struct ipv6_mc_socklist *pmc;
547 struct inet6_dev *idev; 543 struct inet6_dev *idev;
548 struct ipv6_pinfo *inet6 = inet6_sk(sk); 544 struct ipv6_pinfo *inet6 = inet6_sk(sk);
@@ -752,7 +748,7 @@ static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
752 spin_unlock_bh(&idev->mc_lock); 748 spin_unlock_bh(&idev->mc_lock);
753} 749}
754 750
755static void mld_del_delrec(struct inet6_dev *idev, struct in6_addr *pmca) 751static void mld_del_delrec(struct inet6_dev *idev, const struct in6_addr *pmca)
756{ 752{
757 struct ifmcaddr6 *pmc, *pmc_prev; 753 struct ifmcaddr6 *pmc, *pmc_prev;
758 struct ip6_sf_list *psf, *psf_next; 754 struct ip6_sf_list *psf, *psf_next;
@@ -1052,7 +1048,7 @@ static void igmp6_group_queried(struct ifmcaddr6 *ma, unsigned long resptime)
1052 1048
1053/* mark EXCLUDE-mode sources */ 1049/* mark EXCLUDE-mode sources */
1054static int mld_xmarksources(struct ifmcaddr6 *pmc, int nsrcs, 1050static int mld_xmarksources(struct ifmcaddr6 *pmc, int nsrcs,
1055 struct in6_addr *srcs) 1051 const struct in6_addr *srcs)
1056{ 1052{
1057 struct ip6_sf_list *psf; 1053 struct ip6_sf_list *psf;
1058 int i, scount; 1054 int i, scount;
@@ -1080,7 +1076,7 @@ static int mld_xmarksources(struct ifmcaddr6 *pmc, int nsrcs,
1080} 1076}
1081 1077
1082static int mld_marksources(struct ifmcaddr6 *pmc, int nsrcs, 1078static int mld_marksources(struct ifmcaddr6 *pmc, int nsrcs,
1083 struct in6_addr *srcs) 1079 const struct in6_addr *srcs)
1084{ 1080{
1085 struct ip6_sf_list *psf; 1081 struct ip6_sf_list *psf;
1086 int i, scount; 1082 int i, scount;
@@ -1115,7 +1111,7 @@ int igmp6_event_query(struct sk_buff *skb)
1115{ 1111{
1116 struct mld2_query *mlh2 = NULL; 1112 struct mld2_query *mlh2 = NULL;
1117 struct ifmcaddr6 *ma; 1113 struct ifmcaddr6 *ma;
1118 struct in6_addr *group; 1114 const struct in6_addr *group;
1119 unsigned long max_delay; 1115 unsigned long max_delay;
1120 struct inet6_dev *idev; 1116 struct inet6_dev *idev;
1121 struct mld_msg *mld; 1117 struct mld_msg *mld;
@@ -1821,7 +1817,7 @@ err_out:
1821} 1817}
1822 1818
1823static int ip6_mc_del1_src(struct ifmcaddr6 *pmc, int sfmode, 1819static int ip6_mc_del1_src(struct ifmcaddr6 *pmc, int sfmode,
1824 struct in6_addr *psfsrc) 1820 const struct in6_addr *psfsrc)
1825{ 1821{
1826 struct ip6_sf_list *psf, *psf_prev; 1822 struct ip6_sf_list *psf, *psf_prev;
1827 int rv = 0; 1823 int rv = 0;
@@ -1857,8 +1853,8 @@ static int ip6_mc_del1_src(struct ifmcaddr6 *pmc, int sfmode,
1857 return rv; 1853 return rv;
1858} 1854}
1859 1855
1860static int ip6_mc_del_src(struct inet6_dev *idev, struct in6_addr *pmca, 1856static int ip6_mc_del_src(struct inet6_dev *idev, const struct in6_addr *pmca,
1861 int sfmode, int sfcount, struct in6_addr *psfsrc, 1857 int sfmode, int sfcount, const struct in6_addr *psfsrc,
1862 int delta) 1858 int delta)
1863{ 1859{
1864 struct ifmcaddr6 *pmc; 1860 struct ifmcaddr6 *pmc;
@@ -1918,7 +1914,7 @@ static int ip6_mc_del_src(struct inet6_dev *idev, struct in6_addr *pmca,
1918 * Add multicast single-source filter to the interface list 1914 * Add multicast single-source filter to the interface list
1919 */ 1915 */
1920static int ip6_mc_add1_src(struct ifmcaddr6 *pmc, int sfmode, 1916static int ip6_mc_add1_src(struct ifmcaddr6 *pmc, int sfmode,
1921 struct in6_addr *psfsrc, int delta) 1917 const struct in6_addr *psfsrc, int delta)
1922{ 1918{
1923 struct ip6_sf_list *psf, *psf_prev; 1919 struct ip6_sf_list *psf, *psf_prev;
1924 1920
@@ -2021,8 +2017,8 @@ static int sf_setstate(struct ifmcaddr6 *pmc)
2021/* 2017/*
2022 * Add multicast source filter list to the interface list 2018 * Add multicast source filter list to the interface list
2023 */ 2019 */
2024static int ip6_mc_add_src(struct inet6_dev *idev, struct in6_addr *pmca, 2020static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca,
2025 int sfmode, int sfcount, struct in6_addr *psfsrc, 2021 int sfmode, int sfcount, const struct in6_addr *psfsrc,
2026 int delta) 2022 int delta)
2027{ 2023{
2028 struct ifmcaddr6 *pmc; 2024 struct ifmcaddr6 *pmc;
diff --git a/net/ipv6/mip6.c b/net/ipv6/mip6.c
index 9b210482fb05..43242e6e6103 100644
--- a/net/ipv6/mip6.c
+++ b/net/ipv6/mip6.c
@@ -126,7 +126,7 @@ static struct mip6_report_rate_limiter mip6_report_rl = {
126 126
127static int mip6_destopt_input(struct xfrm_state *x, struct sk_buff *skb) 127static int mip6_destopt_input(struct xfrm_state *x, struct sk_buff *skb)
128{ 128{
129 struct ipv6hdr *iph = ipv6_hdr(skb); 129 const struct ipv6hdr *iph = ipv6_hdr(skb);
130 struct ipv6_destopt_hdr *destopt = (struct ipv6_destopt_hdr *)skb->data; 130 struct ipv6_destopt_hdr *destopt = (struct ipv6_destopt_hdr *)skb->data;
131 int err = destopt->nexthdr; 131 int err = destopt->nexthdr;
132 132
@@ -181,8 +181,8 @@ static int mip6_destopt_output(struct xfrm_state *x, struct sk_buff *skb)
181} 181}
182 182
183static inline int mip6_report_rl_allow(struct timeval *stamp, 183static inline int mip6_report_rl_allow(struct timeval *stamp,
184 struct in6_addr *dst, 184 const struct in6_addr *dst,
185 struct in6_addr *src, int iif) 185 const struct in6_addr *src, int iif)
186{ 186{
187 int allow = 0; 187 int allow = 0;
188 188
@@ -349,7 +349,7 @@ static const struct xfrm_type mip6_destopt_type =
349 349
350static int mip6_rthdr_input(struct xfrm_state *x, struct sk_buff *skb) 350static int mip6_rthdr_input(struct xfrm_state *x, struct sk_buff *skb)
351{ 351{
352 struct ipv6hdr *iph = ipv6_hdr(skb); 352 const struct ipv6hdr *iph = ipv6_hdr(skb);
353 struct rt2_hdr *rt2 = (struct rt2_hdr *)skb->data; 353 struct rt2_hdr *rt2 = (struct rt2_hdr *)skb->data;
354 int err = rt2->rt_hdr.nexthdr; 354 int err = rt2->rt_hdr.nexthdr;
355 355
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 92f952d093db..7596f071d308 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -324,7 +324,7 @@ static inline u8 *ndisc_opt_addr_data(struct nd_opt_hdr *p,
324 return lladdr + prepad; 324 return lladdr + prepad;
325} 325}
326 326
327int ndisc_mc_map(struct in6_addr *addr, char *buf, struct net_device *dev, int dir) 327int ndisc_mc_map(const struct in6_addr *addr, char *buf, struct net_device *dev, int dir)
328{ 328{
329 switch (dev->type) { 329 switch (dev->type) {
330 case ARPHRD_ETHER: 330 case ARPHRD_ETHER:
@@ -611,6 +611,29 @@ static void ndisc_send_na(struct net_device *dev, struct neighbour *neigh,
611 inc_opt ? ND_OPT_TARGET_LL_ADDR : 0); 611 inc_opt ? ND_OPT_TARGET_LL_ADDR : 0);
612} 612}
613 613
614static void ndisc_send_unsol_na(struct net_device *dev)
615{
616 struct inet6_dev *idev;
617 struct inet6_ifaddr *ifa;
618 struct in6_addr mcaddr;
619
620 idev = in6_dev_get(dev);
621 if (!idev)
622 return;
623
624 read_lock_bh(&idev->lock);
625 list_for_each_entry(ifa, &idev->addr_list, if_list) {
626 addrconf_addr_solict_mult(&ifa->addr, &mcaddr);
627 ndisc_send_na(dev, NULL, &mcaddr, &ifa->addr,
628 /*router=*/ !!idev->cnf.forwarding,
629 /*solicited=*/ false, /*override=*/ true,
630 /*inc_opt=*/ true);
631 }
632 read_unlock_bh(&idev->lock);
633
634 in6_dev_put(idev);
635}
636
614void ndisc_send_ns(struct net_device *dev, struct neighbour *neigh, 637void ndisc_send_ns(struct net_device *dev, struct neighbour *neigh,
615 const struct in6_addr *solicit, 638 const struct in6_addr *solicit,
616 const struct in6_addr *daddr, const struct in6_addr *saddr) 639 const struct in6_addr *daddr, const struct in6_addr *saddr)
@@ -725,8 +748,8 @@ static int pndisc_is_router(const void *pkey,
725static void ndisc_recv_ns(struct sk_buff *skb) 748static void ndisc_recv_ns(struct sk_buff *skb)
726{ 749{
727 struct nd_msg *msg = (struct nd_msg *)skb_transport_header(skb); 750 struct nd_msg *msg = (struct nd_msg *)skb_transport_header(skb);
728 struct in6_addr *saddr = &ipv6_hdr(skb)->saddr; 751 const struct in6_addr *saddr = &ipv6_hdr(skb)->saddr;
729 struct in6_addr *daddr = &ipv6_hdr(skb)->daddr; 752 const struct in6_addr *daddr = &ipv6_hdr(skb)->daddr;
730 u8 *lladdr = NULL; 753 u8 *lladdr = NULL;
731 u32 ndoptlen = skb->tail - (skb->transport_header + 754 u32 ndoptlen = skb->tail - (skb->transport_header +
732 offsetof(struct nd_msg, opt)); 755 offsetof(struct nd_msg, opt));
@@ -901,8 +924,8 @@ out:
901static void ndisc_recv_na(struct sk_buff *skb) 924static void ndisc_recv_na(struct sk_buff *skb)
902{ 925{
903 struct nd_msg *msg = (struct nd_msg *)skb_transport_header(skb); 926 struct nd_msg *msg = (struct nd_msg *)skb_transport_header(skb);
904 struct in6_addr *saddr = &ipv6_hdr(skb)->saddr; 927 const struct in6_addr *saddr = &ipv6_hdr(skb)->saddr;
905 struct in6_addr *daddr = &ipv6_hdr(skb)->daddr; 928 const struct in6_addr *daddr = &ipv6_hdr(skb)->daddr;
906 u8 *lladdr = NULL; 929 u8 *lladdr = NULL;
907 u32 ndoptlen = skb->tail - (skb->transport_header + 930 u32 ndoptlen = skb->tail - (skb->transport_header +
908 offsetof(struct nd_msg, opt)); 931 offsetof(struct nd_msg, opt));
@@ -945,9 +968,10 @@ static void ndisc_recv_na(struct sk_buff *skb)
945 } 968 }
946 ifp = ipv6_get_ifaddr(dev_net(dev), &msg->target, dev, 1); 969 ifp = ipv6_get_ifaddr(dev_net(dev), &msg->target, dev, 1);
947 if (ifp) { 970 if (ifp) {
948 if (ifp->flags & IFA_F_TENTATIVE) { 971 if (skb->pkt_type != PACKET_LOOPBACK
949 addrconf_dad_failure(ifp); 972 && (ifp->flags & IFA_F_TENTATIVE)) {
950 return; 973 addrconf_dad_failure(ifp);
974 return;
951 } 975 }
952 /* What should we make now? The advertisement 976 /* What should we make now? The advertisement
953 is invalid, but ndisc specs say nothing 977 is invalid, but ndisc specs say nothing
@@ -1014,7 +1038,7 @@ static void ndisc_recv_rs(struct sk_buff *skb)
1014 unsigned long ndoptlen = skb->len - sizeof(*rs_msg); 1038 unsigned long ndoptlen = skb->len - sizeof(*rs_msg);
1015 struct neighbour *neigh; 1039 struct neighbour *neigh;
1016 struct inet6_dev *idev; 1040 struct inet6_dev *idev;
1017 struct in6_addr *saddr = &ipv6_hdr(skb)->saddr; 1041 const struct in6_addr *saddr = &ipv6_hdr(skb)->saddr;
1018 struct ndisc_options ndopts; 1042 struct ndisc_options ndopts;
1019 u8 *lladdr = NULL; 1043 u8 *lladdr = NULL;
1020 1044
@@ -1411,8 +1435,8 @@ static void ndisc_redirect_rcv(struct sk_buff *skb)
1411{ 1435{
1412 struct inet6_dev *in6_dev; 1436 struct inet6_dev *in6_dev;
1413 struct icmp6hdr *icmph; 1437 struct icmp6hdr *icmph;
1414 struct in6_addr *dest; 1438 const struct in6_addr *dest;
1415 struct in6_addr *target; /* new first hop to destination */ 1439 const struct in6_addr *target; /* new first hop to destination */
1416 struct neighbour *neigh; 1440 struct neighbour *neigh;
1417 int on_link = 0; 1441 int on_link = 0;
1418 struct ndisc_options ndopts; 1442 struct ndisc_options ndopts;
@@ -1445,7 +1469,7 @@ static void ndisc_redirect_rcv(struct sk_buff *skb)
1445 } 1469 }
1446 1470
1447 icmph = icmp6_hdr(skb); 1471 icmph = icmp6_hdr(skb);
1448 target = (struct in6_addr *) (icmph + 1); 1472 target = (const struct in6_addr *) (icmph + 1);
1449 dest = target + 1; 1473 dest = target + 1;
1450 1474
1451 if (ipv6_addr_is_multicast(dest)) { 1475 if (ipv6_addr_is_multicast(dest)) {
@@ -1722,6 +1746,9 @@ static int ndisc_netdev_event(struct notifier_block *this, unsigned long event,
1722 neigh_ifdown(&nd_tbl, dev); 1746 neigh_ifdown(&nd_tbl, dev);
1723 fib6_run_gc(~0UL, net); 1747 fib6_run_gc(~0UL, net);
1724 break; 1748 break;
1749 case NETDEV_NOTIFY_PEERS:
1750 ndisc_send_unsol_na(dev);
1751 break;
1725 default: 1752 default:
1726 break; 1753 break;
1727 } 1754 }
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
index 28bc1f644b7b..30fcee465448 100644
--- a/net/ipv6/netfilter.c
+++ b/net/ipv6/netfilter.c
@@ -13,7 +13,7 @@
13int ip6_route_me_harder(struct sk_buff *skb) 13int ip6_route_me_harder(struct sk_buff *skb)
14{ 14{
15 struct net *net = dev_net(skb_dst(skb)->dev); 15 struct net *net = dev_net(skb_dst(skb)->dev);
16 struct ipv6hdr *iph = ipv6_hdr(skb); 16 const struct ipv6hdr *iph = ipv6_hdr(skb);
17 struct dst_entry *dst; 17 struct dst_entry *dst;
18 struct flowi6 fl6 = { 18 struct flowi6 fl6 = {
19 .flowi6_oif = skb->sk ? skb->sk->sk_bound_dev_if : 0, 19 .flowi6_oif = skb->sk ? skb->sk->sk_bound_dev_if : 0,
@@ -67,7 +67,7 @@ static void nf_ip6_saveroute(const struct sk_buff *skb,
67 struct ip6_rt_info *rt_info = nf_queue_entry_reroute(entry); 67 struct ip6_rt_info *rt_info = nf_queue_entry_reroute(entry);
68 68
69 if (entry->hook == NF_INET_LOCAL_OUT) { 69 if (entry->hook == NF_INET_LOCAL_OUT) {
70 struct ipv6hdr *iph = ipv6_hdr(skb); 70 const struct ipv6hdr *iph = ipv6_hdr(skb);
71 71
72 rt_info->daddr = iph->daddr; 72 rt_info->daddr = iph->daddr;
73 rt_info->saddr = iph->saddr; 73 rt_info->saddr = iph->saddr;
@@ -81,7 +81,7 @@ static int nf_ip6_reroute(struct sk_buff *skb,
81 struct ip6_rt_info *rt_info = nf_queue_entry_reroute(entry); 81 struct ip6_rt_info *rt_info = nf_queue_entry_reroute(entry);
82 82
83 if (entry->hook == NF_INET_LOCAL_OUT) { 83 if (entry->hook == NF_INET_LOCAL_OUT) {
84 struct ipv6hdr *iph = ipv6_hdr(skb); 84 const struct ipv6hdr *iph = ipv6_hdr(skb);
85 if (!ipv6_addr_equal(&iph->daddr, &rt_info->daddr) || 85 if (!ipv6_addr_equal(&iph->daddr, &rt_info->daddr) ||
86 !ipv6_addr_equal(&iph->saddr, &rt_info->saddr) || 86 !ipv6_addr_equal(&iph->saddr, &rt_info->saddr) ||
87 skb->mark != rt_info->mark) 87 skb->mark != rt_info->mark)
@@ -108,7 +108,7 @@ static int nf_ip6_route(struct net *net, struct dst_entry **dst,
108__sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook, 108__sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook,
109 unsigned int dataoff, u_int8_t protocol) 109 unsigned int dataoff, u_int8_t protocol)
110{ 110{
111 struct ipv6hdr *ip6h = ipv6_hdr(skb); 111 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
112 __sum16 csum = 0; 112 __sum16 csum = 0;
113 113
114 switch (skb->ip_summed) { 114 switch (skb->ip_summed) {
@@ -142,7 +142,7 @@ static __sum16 nf_ip6_checksum_partial(struct sk_buff *skb, unsigned int hook,
142 unsigned int dataoff, unsigned int len, 142 unsigned int dataoff, unsigned int len,
143 u_int8_t protocol) 143 u_int8_t protocol)
144{ 144{
145 struct ipv6hdr *ip6h = ipv6_hdr(skb); 145 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
146 __wsum hsum; 146 __wsum hsum;
147 __sum16 csum = 0; 147 __sum16 csum = 0;
148 148
diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
index 413ab0754e1f..249394863284 100644
--- a/net/ipv6/netfilter/ip6_queue.c
+++ b/net/ipv6/netfilter/ip6_queue.c
@@ -204,7 +204,8 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)
204 else 204 else
205 pmsg->outdev_name[0] = '\0'; 205 pmsg->outdev_name[0] = '\0';
206 206
207 if (entry->indev && entry->skb->dev) { 207 if (entry->indev && entry->skb->dev &&
208 entry->skb->mac_header != entry->skb->network_header) {
208 pmsg->hw_type = entry->skb->dev->type; 209 pmsg->hw_type = entry->skb->dev->type;
209 pmsg->hw_addrlen = dev_parse_header(entry->skb, pmsg->hw_addr); 210 pmsg->hw_addrlen = dev_parse_header(entry->skb, pmsg->hw_addr);
210 } 211 }
@@ -403,7 +404,8 @@ ipq_dev_drop(int ifindex)
403static inline void 404static inline void
404__ipq_rcv_skb(struct sk_buff *skb) 405__ipq_rcv_skb(struct sk_buff *skb)
405{ 406{
406 int status, type, pid, flags, nlmsglen, skblen; 407 int status, type, pid, flags;
408 unsigned int nlmsglen, skblen;
407 struct nlmsghdr *nlh; 409 struct nlmsghdr *nlh;
408 410
409 skblen = skb->len; 411 skblen = skb->len;
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 5a1c6f27ffaf..94874b0bdcdc 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -340,6 +340,7 @@ ip6t_do_table(struct sk_buff *skb,
340 unsigned int *stackptr, origptr, cpu; 340 unsigned int *stackptr, origptr, cpu;
341 const struct xt_table_info *private; 341 const struct xt_table_info *private;
342 struct xt_action_param acpar; 342 struct xt_action_param acpar;
343 unsigned int addend;
343 344
344 /* Initialization */ 345 /* Initialization */
345 indev = in ? in->name : nulldevname; 346 indev = in ? in->name : nulldevname;
@@ -358,7 +359,8 @@ ip6t_do_table(struct sk_buff *skb,
358 359
359 IP_NF_ASSERT(table->valid_hooks & (1 << hook)); 360 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
360 361
361 xt_info_rdlock_bh(); 362 local_bh_disable();
363 addend = xt_write_recseq_begin();
362 private = table->private; 364 private = table->private;
363 cpu = smp_processor_id(); 365 cpu = smp_processor_id();
364 table_base = private->entries[cpu]; 366 table_base = private->entries[cpu];
@@ -442,7 +444,9 @@ ip6t_do_table(struct sk_buff *skb,
442 } while (!acpar.hotdrop); 444 } while (!acpar.hotdrop);
443 445
444 *stackptr = origptr; 446 *stackptr = origptr;
445 xt_info_rdunlock_bh(); 447
448 xt_write_recseq_end(addend);
449 local_bh_enable();
446 450
447#ifdef DEBUG_ALLOW_ALL 451#ifdef DEBUG_ALLOW_ALL
448 return NF_ACCEPT; 452 return NF_ACCEPT;
@@ -899,7 +903,7 @@ get_counters(const struct xt_table_info *t,
899 unsigned int i; 903 unsigned int i;
900 904
901 for_each_possible_cpu(cpu) { 905 for_each_possible_cpu(cpu) {
902 seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock; 906 seqcount_t *s = &per_cpu(xt_recseq, cpu);
903 907
904 i = 0; 908 i = 0;
905 xt_entry_foreach(iter, t->entries[cpu], t->size) { 909 xt_entry_foreach(iter, t->entries[cpu], t->size) {
@@ -907,10 +911,10 @@ get_counters(const struct xt_table_info *t,
907 unsigned int start; 911 unsigned int start;
908 912
909 do { 913 do {
910 start = read_seqbegin(lock); 914 start = read_seqcount_begin(s);
911 bcnt = iter->counters.bcnt; 915 bcnt = iter->counters.bcnt;
912 pcnt = iter->counters.pcnt; 916 pcnt = iter->counters.pcnt;
913 } while (read_seqretry(lock, start)); 917 } while (read_seqcount_retry(s, start));
914 918
915 ADD_COUNTER(counters[i], bcnt, pcnt); 919 ADD_COUNTER(counters[i], bcnt, pcnt);
916 ++i; 920 ++i;
@@ -1325,6 +1329,7 @@ do_add_counters(struct net *net, const void __user *user, unsigned int len,
1325 int ret = 0; 1329 int ret = 0;
1326 const void *loc_cpu_entry; 1330 const void *loc_cpu_entry;
1327 struct ip6t_entry *iter; 1331 struct ip6t_entry *iter;
1332 unsigned int addend;
1328#ifdef CONFIG_COMPAT 1333#ifdef CONFIG_COMPAT
1329 struct compat_xt_counters_info compat_tmp; 1334 struct compat_xt_counters_info compat_tmp;
1330 1335
@@ -1381,13 +1386,13 @@ do_add_counters(struct net *net, const void __user *user, unsigned int len,
1381 i = 0; 1386 i = 0;
1382 /* Choose the copy that is on our node */ 1387 /* Choose the copy that is on our node */
1383 curcpu = smp_processor_id(); 1388 curcpu = smp_processor_id();
1384 xt_info_wrlock(curcpu); 1389 addend = xt_write_recseq_begin();
1385 loc_cpu_entry = private->entries[curcpu]; 1390 loc_cpu_entry = private->entries[curcpu];
1386 xt_entry_foreach(iter, loc_cpu_entry, private->size) { 1391 xt_entry_foreach(iter, loc_cpu_entry, private->size) {
1387 ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt); 1392 ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt);
1388 ++i; 1393 ++i;
1389 } 1394 }
1390 xt_info_wrunlock(curcpu); 1395 xt_write_recseq_end(addend);
1391 1396
1392 unlock_up_free: 1397 unlock_up_free:
1393 local_bh_enable(); 1398 local_bh_enable();
@@ -1578,7 +1583,6 @@ compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1578 struct xt_table_info *newinfo, unsigned char *base) 1583 struct xt_table_info *newinfo, unsigned char *base)
1579{ 1584{
1580 struct xt_entry_target *t; 1585 struct xt_entry_target *t;
1581 struct xt_target *target;
1582 struct ip6t_entry *de; 1586 struct ip6t_entry *de;
1583 unsigned int origsize; 1587 unsigned int origsize;
1584 int ret, h; 1588 int ret, h;
@@ -1600,7 +1604,6 @@ compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1600 } 1604 }
1601 de->target_offset = e->target_offset - (origsize - *size); 1605 de->target_offset = e->target_offset - (origsize - *size);
1602 t = compat_ip6t_get_target(e); 1606 t = compat_ip6t_get_target(e);
1603 target = t->u.kernel.target;
1604 xt_compat_target_from_user(t, dstptr, size); 1607 xt_compat_target_from_user(t, dstptr, size);
1605 1608
1606 de->next_offset = e->next_offset - (origsize - *size); 1609 de->next_offset = e->next_offset - (origsize - *size);
diff --git a/net/ipv6/netfilter/ip6table_mangle.c b/net/ipv6/netfilter/ip6table_mangle.c
index 679a0a3b7b3c..00d19173db7e 100644
--- a/net/ipv6/netfilter/ip6table_mangle.c
+++ b/net/ipv6/netfilter/ip6table_mangle.c
@@ -64,7 +64,8 @@ ip6t_mangle_out(struct sk_buff *skb, const struct net_device *out)
64 (memcmp(&ipv6_hdr(skb)->saddr, &saddr, sizeof(saddr)) || 64 (memcmp(&ipv6_hdr(skb)->saddr, &saddr, sizeof(saddr)) ||
65 memcmp(&ipv6_hdr(skb)->daddr, &daddr, sizeof(daddr)) || 65 memcmp(&ipv6_hdr(skb)->daddr, &daddr, sizeof(daddr)) ||
66 skb->mark != mark || 66 skb->mark != mark ||
67 ipv6_hdr(skb)->hop_limit != hop_limit)) 67 ipv6_hdr(skb)->hop_limit != hop_limit ||
68 flowlabel != *((u_int32_t *)ipv6_hdr(skb))))
68 return ip6_route_me_harder(skb) == 0 ? ret : NF_DROP; 69 return ip6_route_me_harder(skb) == 0 ? ret : NF_DROP;
69 70
70 return ret; 71 return ret;
diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
index c8af58b22562..4111050a9fc5 100644
--- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
@@ -160,7 +160,7 @@ static unsigned int ipv6_confirm(unsigned int hooknum,
160 160
161 /* This is where we call the helper: as the packet goes out. */ 161 /* This is where we call the helper: as the packet goes out. */
162 ct = nf_ct_get(skb, &ctinfo); 162 ct = nf_ct_get(skb, &ctinfo);
163 if (!ct || ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY) 163 if (!ct || ctinfo == IP_CT_RELATED_REPLY)
164 goto out; 164 goto out;
165 165
166 help = nfct_help(ct); 166 help = nfct_help(ct);
diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
index 1df3c8b6bf47..7c05e7eacbc6 100644
--- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
@@ -177,7 +177,7 @@ icmpv6_error_message(struct net *net, struct nf_conn *tmpl,
177 /* Update skb to refer to this connection */ 177 /* Update skb to refer to this connection */
178 skb->nfct = &nf_ct_tuplehash_to_ctrack(h)->ct_general; 178 skb->nfct = &nf_ct_tuplehash_to_ctrack(h)->ct_general;
179 skb->nfctinfo = *ctinfo; 179 skb->nfctinfo = *ctinfo;
180 return -NF_ACCEPT; 180 return NF_ACCEPT;
181} 181}
182 182
183static int 183static int
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index 24b3558b8e67..18ff5df7ec02 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -141,7 +141,11 @@ static const struct snmp_mib snmp6_udplite6_list[] = {
141 SNMP_MIB_SENTINEL 141 SNMP_MIB_SENTINEL
142}; 142};
143 143
144static void snmp6_seq_show_icmpv6msg(struct seq_file *seq, void __percpu **mib) 144/* can be called either with percpu mib (pcpumib != NULL),
145 * or shared one (smib != NULL)
146 */
147static void snmp6_seq_show_icmpv6msg(struct seq_file *seq, void __percpu **pcpumib,
148 atomic_long_t *smib)
145{ 149{
146 char name[32]; 150 char name[32];
147 int i; 151 int i;
@@ -158,14 +162,14 @@ static void snmp6_seq_show_icmpv6msg(struct seq_file *seq, void __percpu **mib)
158 snprintf(name, sizeof(name), "Icmp6%s%s", 162 snprintf(name, sizeof(name), "Icmp6%s%s",
159 i & 0x100 ? "Out" : "In", p); 163 i & 0x100 ? "Out" : "In", p);
160 seq_printf(seq, "%-32s\t%lu\n", name, 164 seq_printf(seq, "%-32s\t%lu\n", name,
161 snmp_fold_field(mib, i)); 165 pcpumib ? snmp_fold_field(pcpumib, i) : atomic_long_read(smib + i));
162 } 166 }
163 167
164 /* print by number (nonzero only) - ICMPMsgStat format */ 168 /* print by number (nonzero only) - ICMPMsgStat format */
165 for (i = 0; i < ICMP6MSG_MIB_MAX; i++) { 169 for (i = 0; i < ICMP6MSG_MIB_MAX; i++) {
166 unsigned long val; 170 unsigned long val;
167 171
168 val = snmp_fold_field(mib, i); 172 val = pcpumib ? snmp_fold_field(pcpumib, i) : atomic_long_read(smib + i);
169 if (!val) 173 if (!val)
170 continue; 174 continue;
171 snprintf(name, sizeof(name), "Icmp6%sType%u", 175 snprintf(name, sizeof(name), "Icmp6%sType%u",
@@ -174,14 +178,22 @@ static void snmp6_seq_show_icmpv6msg(struct seq_file *seq, void __percpu **mib)
174 } 178 }
175} 179}
176 180
177static void snmp6_seq_show_item(struct seq_file *seq, void __percpu **mib, 181/* can be called either with percpu mib (pcpumib != NULL),
182 * or shared one (smib != NULL)
183 */
184static void snmp6_seq_show_item(struct seq_file *seq, void __percpu **pcpumib,
185 atomic_long_t *smib,
178 const struct snmp_mib *itemlist) 186 const struct snmp_mib *itemlist)
179{ 187{
180 int i; 188 int i;
189 unsigned long val;
181 190
182 for (i = 0; itemlist[i].name; i++) 191 for (i = 0; itemlist[i].name; i++) {
183 seq_printf(seq, "%-32s\t%lu\n", itemlist[i].name, 192 val = pcpumib ?
184 snmp_fold_field(mib, itemlist[i].entry)); 193 snmp_fold_field(pcpumib, itemlist[i].entry) :
194 atomic_long_read(smib + itemlist[i].entry);
195 seq_printf(seq, "%-32s\t%lu\n", itemlist[i].name, val);
196 }
185} 197}
186 198
187static void snmp6_seq_show_item64(struct seq_file *seq, void __percpu **mib, 199static void snmp6_seq_show_item64(struct seq_file *seq, void __percpu **mib,
@@ -201,13 +213,13 @@ static int snmp6_seq_show(struct seq_file *seq, void *v)
201 snmp6_seq_show_item64(seq, (void __percpu **)net->mib.ipv6_statistics, 213 snmp6_seq_show_item64(seq, (void __percpu **)net->mib.ipv6_statistics,
202 snmp6_ipstats_list, offsetof(struct ipstats_mib, syncp)); 214 snmp6_ipstats_list, offsetof(struct ipstats_mib, syncp));
203 snmp6_seq_show_item(seq, (void __percpu **)net->mib.icmpv6_statistics, 215 snmp6_seq_show_item(seq, (void __percpu **)net->mib.icmpv6_statistics,
204 snmp6_icmp6_list); 216 NULL, snmp6_icmp6_list);
205 snmp6_seq_show_icmpv6msg(seq, 217 snmp6_seq_show_icmpv6msg(seq,
206 (void __percpu **)net->mib.icmpv6msg_statistics); 218 (void __percpu **)net->mib.icmpv6msg_statistics, NULL);
207 snmp6_seq_show_item(seq, (void __percpu **)net->mib.udp_stats_in6, 219 snmp6_seq_show_item(seq, (void __percpu **)net->mib.udp_stats_in6,
208 snmp6_udp6_list); 220 NULL, snmp6_udp6_list);
209 snmp6_seq_show_item(seq, (void __percpu **)net->mib.udplite_stats_in6, 221 snmp6_seq_show_item(seq, (void __percpu **)net->mib.udplite_stats_in6,
210 snmp6_udplite6_list); 222 NULL, snmp6_udplite6_list);
211 return 0; 223 return 0;
212} 224}
213 225
@@ -229,11 +241,11 @@ static int snmp6_dev_seq_show(struct seq_file *seq, void *v)
229 struct inet6_dev *idev = (struct inet6_dev *)seq->private; 241 struct inet6_dev *idev = (struct inet6_dev *)seq->private;
230 242
231 seq_printf(seq, "%-32s\t%u\n", "ifIndex", idev->dev->ifindex); 243 seq_printf(seq, "%-32s\t%u\n", "ifIndex", idev->dev->ifindex);
232 snmp6_seq_show_item(seq, (void __percpu **)idev->stats.ipv6, 244 snmp6_seq_show_item(seq, (void __percpu **)idev->stats.ipv6, NULL,
233 snmp6_ipstats_list); 245 snmp6_ipstats_list);
234 snmp6_seq_show_item(seq, (void __percpu **)idev->stats.icmpv6, 246 snmp6_seq_show_item(seq, NULL, idev->stats.icmpv6dev->mibs,
235 snmp6_icmp6_list); 247 snmp6_icmp6_list);
236 snmp6_seq_show_icmpv6msg(seq, (void __percpu **)idev->stats.icmpv6msg); 248 snmp6_seq_show_icmpv6msg(seq, NULL, idev->stats.icmpv6msgdev->mibs);
237 return 0; 249 return 0;
238} 250}
239 251
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 4a1c3b46c56b..cc7313b8f7ea 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -67,8 +67,8 @@ static struct raw_hashinfo raw_v6_hashinfo = {
67}; 67};
68 68
69static struct sock *__raw_v6_lookup(struct net *net, struct sock *sk, 69static struct sock *__raw_v6_lookup(struct net *net, struct sock *sk,
70 unsigned short num, struct in6_addr *loc_addr, 70 unsigned short num, const struct in6_addr *loc_addr,
71 struct in6_addr *rmt_addr, int dif) 71 const struct in6_addr *rmt_addr, int dif)
72{ 72{
73 struct hlist_node *node; 73 struct hlist_node *node;
74 int is_multicast = ipv6_addr_is_multicast(loc_addr); 74 int is_multicast = ipv6_addr_is_multicast(loc_addr);
@@ -154,8 +154,8 @@ EXPORT_SYMBOL(rawv6_mh_filter_unregister);
154 */ 154 */
155static int ipv6_raw_deliver(struct sk_buff *skb, int nexthdr) 155static int ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
156{ 156{
157 struct in6_addr *saddr; 157 const struct in6_addr *saddr;
158 struct in6_addr *daddr; 158 const struct in6_addr *daddr;
159 struct sock *sk; 159 struct sock *sk;
160 int delivered = 0; 160 int delivered = 0;
161 __u8 hash; 161 __u8 hash;
@@ -348,7 +348,7 @@ void raw6_icmp_error(struct sk_buff *skb, int nexthdr,
348{ 348{
349 struct sock *sk; 349 struct sock *sk;
350 int hash; 350 int hash;
351 struct in6_addr *saddr, *daddr; 351 const struct in6_addr *saddr, *daddr;
352 struct net *net; 352 struct net *net;
353 353
354 hash = nexthdr & (RAW_HTABLE_SIZE - 1); 354 hash = nexthdr & (RAW_HTABLE_SIZE - 1);
@@ -357,7 +357,7 @@ void raw6_icmp_error(struct sk_buff *skb, int nexthdr,
357 sk = sk_head(&raw_v6_hashinfo.ht[hash]); 357 sk = sk_head(&raw_v6_hashinfo.ht[hash]);
358 if (sk != NULL) { 358 if (sk != NULL) {
359 /* Note: ipv6_hdr(skb) != skb->data */ 359 /* Note: ipv6_hdr(skb) != skb->data */
360 struct ipv6hdr *ip6h = (struct ipv6hdr *)skb->data; 360 const struct ipv6hdr *ip6h = (const struct ipv6hdr *)skb->data;
361 saddr = &ip6h->saddr; 361 saddr = &ip6h->saddr;
362 daddr = &ip6h->daddr; 362 daddr = &ip6h->daddr;
363 net = dev_net(skb->dev); 363 net = dev_net(skb->dev);
@@ -542,8 +542,8 @@ static int rawv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6,
542 goto out; 542 goto out;
543 543
544 offset = rp->offset; 544 offset = rp->offset;
545 total_len = inet_sk(sk)->cork.length - (skb_network_header(skb) - 545 total_len = inet_sk(sk)->cork.base.length - (skb_network_header(skb) -
546 skb->data); 546 skb->data);
547 if (offset >= total_len - 1) { 547 if (offset >= total_len - 1) {
548 err = -EINVAL; 548 err = -EINVAL;
549 ip6_flush_pending_frames(sk); 549 ip6_flush_pending_frames(sk);
@@ -1231,7 +1231,7 @@ struct proto rawv6_prot = {
1231static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i) 1231static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
1232{ 1232{
1233 struct ipv6_pinfo *np = inet6_sk(sp); 1233 struct ipv6_pinfo *np = inet6_sk(sp);
1234 struct in6_addr *dest, *src; 1234 const struct in6_addr *dest, *src;
1235 __u16 destp, srcp; 1235 __u16 destp, srcp;
1236 1236
1237 dest = &np->daddr; 1237 dest = &np->daddr;
@@ -1240,7 +1240,7 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
1240 srcp = inet_sk(sp)->inet_num; 1240 srcp = inet_sk(sp)->inet_num;
1241 seq_printf(seq, 1241 seq_printf(seq,
1242 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " 1242 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1243 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d\n", 1243 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %d\n",
1244 i, 1244 i,
1245 src->s6_addr32[0], src->s6_addr32[1], 1245 src->s6_addr32[0], src->s6_addr32[1],
1246 src->s6_addr32[2], src->s6_addr32[3], srcp, 1246 src->s6_addr32[2], src->s6_addr32[3], srcp,
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 07beeb06f752..7b954e2539d0 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -224,7 +224,7 @@ out:
224} 224}
225 225
226static __inline__ struct frag_queue * 226static __inline__ struct frag_queue *
227fq_find(struct net *net, __be32 id, struct in6_addr *src, struct in6_addr *dst) 227fq_find(struct net *net, __be32 id, const struct in6_addr *src, const struct in6_addr *dst)
228{ 228{
229 struct inet_frag_queue *q; 229 struct inet_frag_queue *q;
230 struct ip6_create_arg arg; 230 struct ip6_create_arg arg;
@@ -535,7 +535,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
535{ 535{
536 struct frag_hdr *fhdr; 536 struct frag_hdr *fhdr;
537 struct frag_queue *fq; 537 struct frag_queue *fq;
538 struct ipv6hdr *hdr = ipv6_hdr(skb); 538 const struct ipv6hdr *hdr = ipv6_hdr(skb);
539 struct net *net = dev_net(skb_dst(skb)->dev); 539 struct net *net = dev_net(skb_dst(skb)->dev);
540 540
541 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMREQDS); 541 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMREQDS);
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index fd0eec6f88c6..0ef1f086feb8 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -89,12 +89,12 @@ static void ip6_rt_update_pmtu(struct dst_entry *dst, u32 mtu);
89 89
90#ifdef CONFIG_IPV6_ROUTE_INFO 90#ifdef CONFIG_IPV6_ROUTE_INFO
91static struct rt6_info *rt6_add_route_info(struct net *net, 91static struct rt6_info *rt6_add_route_info(struct net *net,
92 struct in6_addr *prefix, int prefixlen, 92 const struct in6_addr *prefix, int prefixlen,
93 struct in6_addr *gwaddr, int ifindex, 93 const struct in6_addr *gwaddr, int ifindex,
94 unsigned pref); 94 unsigned pref);
95static struct rt6_info *rt6_get_route_info(struct net *net, 95static struct rt6_info *rt6_get_route_info(struct net *net,
96 struct in6_addr *prefix, int prefixlen, 96 const struct in6_addr *prefix, int prefixlen,
97 struct in6_addr *gwaddr, int ifindex); 97 const struct in6_addr *gwaddr, int ifindex);
98#endif 98#endif
99 99
100static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old) 100static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)
@@ -227,9 +227,15 @@ static struct rt6_info ip6_blk_hole_entry_template = {
227#endif 227#endif
228 228
229/* allocate dst with ip6_dst_ops */ 229/* allocate dst with ip6_dst_ops */
230static inline struct rt6_info *ip6_dst_alloc(struct dst_ops *ops) 230static inline struct rt6_info *ip6_dst_alloc(struct dst_ops *ops,
231 struct net_device *dev,
232 int flags)
231{ 233{
232 return (struct rt6_info *)dst_alloc(ops, 0); 234 struct rt6_info *rt = dst_alloc(ops, dev, 0, 0, flags);
235
236 memset(&rt->rt6i_table, 0, sizeof(*rt) - sizeof(struct dst_entry));
237
238 return rt;
233} 239}
234 240
235static void ip6_dst_destroy(struct dst_entry *dst) 241static void ip6_dst_destroy(struct dst_entry *dst)
@@ -290,7 +296,7 @@ static __inline__ int rt6_check_expired(const struct rt6_info *rt)
290 time_after(jiffies, rt->rt6i_expires); 296 time_after(jiffies, rt->rt6i_expires);
291} 297}
292 298
293static inline int rt6_need_strict(struct in6_addr *daddr) 299static inline int rt6_need_strict(const struct in6_addr *daddr)
294{ 300{
295 return ipv6_addr_type(daddr) & 301 return ipv6_addr_type(daddr) &
296 (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK); 302 (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK);
@@ -302,7 +308,7 @@ static inline int rt6_need_strict(struct in6_addr *daddr)
302 308
303static inline struct rt6_info *rt6_device_match(struct net *net, 309static inline struct rt6_info *rt6_device_match(struct net *net,
304 struct rt6_info *rt, 310 struct rt6_info *rt,
305 struct in6_addr *saddr, 311 const struct in6_addr *saddr,
306 int oif, 312 int oif,
307 int flags) 313 int flags)
308{ 314{
@@ -514,7 +520,7 @@ static struct rt6_info *rt6_select(struct fib6_node *fn, int oif, int strict)
514 520
515#ifdef CONFIG_IPV6_ROUTE_INFO 521#ifdef CONFIG_IPV6_ROUTE_INFO
516int rt6_route_rcv(struct net_device *dev, u8 *opt, int len, 522int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
517 struct in6_addr *gwaddr) 523 const struct in6_addr *gwaddr)
518{ 524{
519 struct net *net = dev_net(dev); 525 struct net *net = dev_net(dev);
520 struct route_info *rinfo = (struct route_info *) opt; 526 struct route_info *rinfo = (struct route_info *) opt;
@@ -677,8 +683,8 @@ int ip6_ins_rt(struct rt6_info *rt)
677 return __ip6_ins_rt(rt, &info); 683 return __ip6_ins_rt(rt, &info);
678} 684}
679 685
680static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort, struct in6_addr *daddr, 686static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort, const struct in6_addr *daddr,
681 struct in6_addr *saddr) 687 const struct in6_addr *saddr)
682{ 688{
683 struct rt6_info *rt; 689 struct rt6_info *rt;
684 690
@@ -746,7 +752,7 @@ static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort, struct in6_addr *dad
746 return rt; 752 return rt;
747} 753}
748 754
749static struct rt6_info *rt6_alloc_clone(struct rt6_info *ort, struct in6_addr *daddr) 755static struct rt6_info *rt6_alloc_clone(struct rt6_info *ort, const struct in6_addr *daddr)
750{ 756{
751 struct rt6_info *rt = ip6_rt_copy(ort); 757 struct rt6_info *rt = ip6_rt_copy(ort);
752 if (rt) { 758 if (rt) {
@@ -837,7 +843,7 @@ static struct rt6_info *ip6_pol_route_input(struct net *net, struct fib6_table *
837 843
838void ip6_route_input(struct sk_buff *skb) 844void ip6_route_input(struct sk_buff *skb)
839{ 845{
840 struct ipv6hdr *iph = ipv6_hdr(skb); 846 const struct ipv6hdr *iph = ipv6_hdr(skb);
841 struct net *net = dev_net(skb->dev); 847 struct net *net = dev_net(skb->dev);
842 int flags = RT6_LOOKUP_F_HAS_SADDR; 848 int flags = RT6_LOOKUP_F_HAS_SADDR;
843 struct flowi6 fl6 = { 849 struct flowi6 fl6 = {
@@ -881,11 +887,13 @@ EXPORT_SYMBOL(ip6_route_output);
881 887
882struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig) 888struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig)
883{ 889{
884 struct rt6_info *rt = dst_alloc(&ip6_dst_blackhole_ops, 1); 890 struct rt6_info *rt, *ort = (struct rt6_info *) dst_orig;
885 struct rt6_info *ort = (struct rt6_info *) dst_orig;
886 struct dst_entry *new = NULL; 891 struct dst_entry *new = NULL;
887 892
893 rt = dst_alloc(&ip6_dst_blackhole_ops, ort->dst.dev, 1, 0, 0);
888 if (rt) { 894 if (rt) {
895 memset(&rt->rt6i_table, 0, sizeof(*rt) - sizeof(struct dst_entry));
896
889 new = &rt->dst; 897 new = &rt->dst;
890 898
891 new->__use = 1; 899 new->__use = 1;
@@ -893,9 +901,6 @@ struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_ori
893 new->output = dst_discard; 901 new->output = dst_discard;
894 902
895 dst_copy_metrics(new, &ort->dst); 903 dst_copy_metrics(new, &ort->dst);
896 new->dev = ort->dst.dev;
897 if (new->dev)
898 dev_hold(new->dev);
899 rt->rt6i_idev = ort->rt6i_idev; 904 rt->rt6i_idev = ort->rt6i_idev;
900 if (rt->rt6i_idev) 905 if (rt->rt6i_idev)
901 in6_dev_hold(rt->rt6i_idev); 906 in6_dev_hold(rt->rt6i_idev);
@@ -1038,13 +1043,12 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
1038 if (unlikely(idev == NULL)) 1043 if (unlikely(idev == NULL))
1039 return NULL; 1044 return NULL;
1040 1045
1041 rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops); 1046 rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, dev, 0);
1042 if (unlikely(rt == NULL)) { 1047 if (unlikely(rt == NULL)) {
1043 in6_dev_put(idev); 1048 in6_dev_put(idev);
1044 goto out; 1049 goto out;
1045 } 1050 }
1046 1051
1047 dev_hold(dev);
1048 if (neigh) 1052 if (neigh)
1049 neigh_hold(neigh); 1053 neigh_hold(neigh);
1050 else { 1054 else {
@@ -1053,21 +1057,12 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
1053 neigh = NULL; 1057 neigh = NULL;
1054 } 1058 }
1055 1059
1056 rt->rt6i_dev = dev;
1057 rt->rt6i_idev = idev; 1060 rt->rt6i_idev = idev;
1058 rt->rt6i_nexthop = neigh; 1061 rt->rt6i_nexthop = neigh;
1059 atomic_set(&rt->dst.__refcnt, 1); 1062 atomic_set(&rt->dst.__refcnt, 1);
1060 dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 255); 1063 dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 255);
1061 rt->dst.output = ip6_output; 1064 rt->dst.output = ip6_output;
1062 1065
1063#if 0 /* there's no chance to use these for ndisc */
1064 rt->dst.flags = ipv6_addr_type(addr) & IPV6_ADDR_UNICAST
1065 ? DST_HOST
1066 : 0;
1067 ipv6_addr_copy(&rt->rt6i_dst.addr, addr);
1068 rt->rt6i_dst.plen = 128;
1069#endif
1070
1071 spin_lock_bh(&icmp6_dst_lock); 1066 spin_lock_bh(&icmp6_dst_lock);
1072 rt->dst.next = icmp6_dst_gc_list; 1067 rt->dst.next = icmp6_dst_gc_list;
1073 icmp6_dst_gc_list = &rt->dst; 1068 icmp6_dst_gc_list = &rt->dst;
@@ -1212,7 +1207,7 @@ int ip6_route_add(struct fib6_config *cfg)
1212 goto out; 1207 goto out;
1213 } 1208 }
1214 1209
1215 rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops); 1210 rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, NULL, DST_NOCOUNT);
1216 1211
1217 if (rt == NULL) { 1212 if (rt == NULL) {
1218 err = -ENOMEM; 1213 err = -ENOMEM;
@@ -1242,7 +1237,7 @@ int ip6_route_add(struct fib6_config *cfg)
1242 ipv6_addr_prefix(&rt->rt6i_dst.addr, &cfg->fc_dst, cfg->fc_dst_len); 1237 ipv6_addr_prefix(&rt->rt6i_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
1243 rt->rt6i_dst.plen = cfg->fc_dst_len; 1238 rt->rt6i_dst.plen = cfg->fc_dst_len;
1244 if (rt->rt6i_dst.plen == 128) 1239 if (rt->rt6i_dst.plen == 128)
1245 rt->dst.flags = DST_HOST; 1240 rt->dst.flags |= DST_HOST;
1246 1241
1247#ifdef CONFIG_IPV6_SUBTREES 1242#ifdef CONFIG_IPV6_SUBTREES
1248 ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len); 1243 ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len);
@@ -1279,7 +1274,7 @@ int ip6_route_add(struct fib6_config *cfg)
1279 } 1274 }
1280 1275
1281 if (cfg->fc_flags & RTF_GATEWAY) { 1276 if (cfg->fc_flags & RTF_GATEWAY) {
1282 struct in6_addr *gw_addr; 1277 const struct in6_addr *gw_addr;
1283 int gwa_type; 1278 int gwa_type;
1284 1279
1285 gw_addr = &cfg->fc_gateway; 1280 gw_addr = &cfg->fc_gateway;
@@ -1332,6 +1327,16 @@ int ip6_route_add(struct fib6_config *cfg)
1332 if (dev == NULL) 1327 if (dev == NULL)
1333 goto out; 1328 goto out;
1334 1329
1330 if (!ipv6_addr_any(&cfg->fc_prefsrc)) {
1331 if (!ipv6_chk_addr(net, &cfg->fc_prefsrc, dev, 0)) {
1332 err = -EINVAL;
1333 goto out;
1334 }
1335 ipv6_addr_copy(&rt->rt6i_prefsrc.addr, &cfg->fc_prefsrc);
1336 rt->rt6i_prefsrc.plen = 128;
1337 } else
1338 rt->rt6i_prefsrc.plen = 0;
1339
1335 if (cfg->fc_flags & (RTF_GATEWAY | RTF_NONEXTHOP)) { 1340 if (cfg->fc_flags & (RTF_GATEWAY | RTF_NONEXTHOP)) {
1336 rt->rt6i_nexthop = __neigh_lookup_errno(&nd_tbl, &rt->rt6i_gateway, dev); 1341 rt->rt6i_nexthop = __neigh_lookup_errno(&nd_tbl, &rt->rt6i_gateway, dev);
1337 if (IS_ERR(rt->rt6i_nexthop)) { 1342 if (IS_ERR(rt->rt6i_nexthop)) {
@@ -1509,9 +1514,9 @@ out:
1509 return rt; 1514 return rt;
1510}; 1515};
1511 1516
1512static struct rt6_info *ip6_route_redirect(struct in6_addr *dest, 1517static struct rt6_info *ip6_route_redirect(const struct in6_addr *dest,
1513 struct in6_addr *src, 1518 const struct in6_addr *src,
1514 struct in6_addr *gateway, 1519 const struct in6_addr *gateway,
1515 struct net_device *dev) 1520 struct net_device *dev)
1516{ 1521{
1517 int flags = RT6_LOOKUP_F_HAS_SADDR; 1522 int flags = RT6_LOOKUP_F_HAS_SADDR;
@@ -1533,8 +1538,8 @@ static struct rt6_info *ip6_route_redirect(struct in6_addr *dest,
1533 flags, __ip6_route_redirect); 1538 flags, __ip6_route_redirect);
1534} 1539}
1535 1540
1536void rt6_redirect(struct in6_addr *dest, struct in6_addr *src, 1541void rt6_redirect(const struct in6_addr *dest, const struct in6_addr *src,
1537 struct in6_addr *saddr, 1542 const struct in6_addr *saddr,
1538 struct neighbour *neigh, u8 *lladdr, int on_link) 1543 struct neighbour *neigh, u8 *lladdr, int on_link)
1539{ 1544{
1540 struct rt6_info *rt, *nrt = NULL; 1545 struct rt6_info *rt, *nrt = NULL;
@@ -1608,7 +1613,7 @@ out:
1608 * i.e. Path MTU discovery 1613 * i.e. Path MTU discovery
1609 */ 1614 */
1610 1615
1611static void rt6_do_pmtu_disc(struct in6_addr *daddr, struct in6_addr *saddr, 1616static void rt6_do_pmtu_disc(const struct in6_addr *daddr, const struct in6_addr *saddr,
1612 struct net *net, u32 pmtu, int ifindex) 1617 struct net *net, u32 pmtu, int ifindex)
1613{ 1618{
1614 struct rt6_info *rt, *nrt; 1619 struct rt6_info *rt, *nrt;
@@ -1693,7 +1698,7 @@ out:
1693 dst_release(&rt->dst); 1698 dst_release(&rt->dst);
1694} 1699}
1695 1700
1696void rt6_pmtu_discovery(struct in6_addr *daddr, struct in6_addr *saddr, 1701void rt6_pmtu_discovery(const struct in6_addr *daddr, const struct in6_addr *saddr,
1697 struct net_device *dev, u32 pmtu) 1702 struct net_device *dev, u32 pmtu)
1698{ 1703{
1699 struct net *net = dev_net(dev); 1704 struct net *net = dev_net(dev);
@@ -1721,7 +1726,8 @@ void rt6_pmtu_discovery(struct in6_addr *daddr, struct in6_addr *saddr,
1721static struct rt6_info * ip6_rt_copy(struct rt6_info *ort) 1726static struct rt6_info * ip6_rt_copy(struct rt6_info *ort)
1722{ 1727{
1723 struct net *net = dev_net(ort->rt6i_dev); 1728 struct net *net = dev_net(ort->rt6i_dev);
1724 struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops); 1729 struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops,
1730 ort->dst.dev, 0);
1725 1731
1726 if (rt) { 1732 if (rt) {
1727 rt->dst.input = ort->dst.input; 1733 rt->dst.input = ort->dst.input;
@@ -1729,9 +1735,6 @@ static struct rt6_info * ip6_rt_copy(struct rt6_info *ort)
1729 1735
1730 dst_copy_metrics(&rt->dst, &ort->dst); 1736 dst_copy_metrics(&rt->dst, &ort->dst);
1731 rt->dst.error = ort->dst.error; 1737 rt->dst.error = ort->dst.error;
1732 rt->dst.dev = ort->dst.dev;
1733 if (rt->dst.dev)
1734 dev_hold(rt->dst.dev);
1735 rt->rt6i_idev = ort->rt6i_idev; 1738 rt->rt6i_idev = ort->rt6i_idev;
1736 if (rt->rt6i_idev) 1739 if (rt->rt6i_idev)
1737 in6_dev_hold(rt->rt6i_idev); 1740 in6_dev_hold(rt->rt6i_idev);
@@ -1746,6 +1749,7 @@ static struct rt6_info * ip6_rt_copy(struct rt6_info *ort)
1746#ifdef CONFIG_IPV6_SUBTREES 1749#ifdef CONFIG_IPV6_SUBTREES
1747 memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key)); 1750 memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
1748#endif 1751#endif
1752 memcpy(&rt->rt6i_prefsrc, &ort->rt6i_prefsrc, sizeof(struct rt6key));
1749 rt->rt6i_table = ort->rt6i_table; 1753 rt->rt6i_table = ort->rt6i_table;
1750 } 1754 }
1751 return rt; 1755 return rt;
@@ -1753,8 +1757,8 @@ static struct rt6_info * ip6_rt_copy(struct rt6_info *ort)
1753 1757
1754#ifdef CONFIG_IPV6_ROUTE_INFO 1758#ifdef CONFIG_IPV6_ROUTE_INFO
1755static struct rt6_info *rt6_get_route_info(struct net *net, 1759static struct rt6_info *rt6_get_route_info(struct net *net,
1756 struct in6_addr *prefix, int prefixlen, 1760 const struct in6_addr *prefix, int prefixlen,
1757 struct in6_addr *gwaddr, int ifindex) 1761 const struct in6_addr *gwaddr, int ifindex)
1758{ 1762{
1759 struct fib6_node *fn; 1763 struct fib6_node *fn;
1760 struct rt6_info *rt = NULL; 1764 struct rt6_info *rt = NULL;
@@ -1785,8 +1789,8 @@ out:
1785} 1789}
1786 1790
1787static struct rt6_info *rt6_add_route_info(struct net *net, 1791static struct rt6_info *rt6_add_route_info(struct net *net,
1788 struct in6_addr *prefix, int prefixlen, 1792 const struct in6_addr *prefix, int prefixlen,
1789 struct in6_addr *gwaddr, int ifindex, 1793 const struct in6_addr *gwaddr, int ifindex,
1790 unsigned pref) 1794 unsigned pref)
1791{ 1795{
1792 struct fib6_config cfg = { 1796 struct fib6_config cfg = {
@@ -1814,7 +1818,7 @@ static struct rt6_info *rt6_add_route_info(struct net *net,
1814} 1818}
1815#endif 1819#endif
1816 1820
1817struct rt6_info *rt6_get_dflt_router(struct in6_addr *addr, struct net_device *dev) 1821struct rt6_info *rt6_get_dflt_router(const struct in6_addr *addr, struct net_device *dev)
1818{ 1822{
1819 struct rt6_info *rt; 1823 struct rt6_info *rt;
1820 struct fib6_table *table; 1824 struct fib6_table *table;
@@ -1836,7 +1840,7 @@ struct rt6_info *rt6_get_dflt_router(struct in6_addr *addr, struct net_device *d
1836 return rt; 1840 return rt;
1837} 1841}
1838 1842
1839struct rt6_info *rt6_add_dflt_router(struct in6_addr *gwaddr, 1843struct rt6_info *rt6_add_dflt_router(const struct in6_addr *gwaddr,
1840 struct net_device *dev, 1844 struct net_device *dev,
1841 unsigned int pref) 1845 unsigned int pref)
1842{ 1846{
@@ -2001,7 +2005,8 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
2001 int anycast) 2005 int anycast)
2002{ 2006{
2003 struct net *net = dev_net(idev->dev); 2007 struct net *net = dev_net(idev->dev);
2004 struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops); 2008 struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops,
2009 net->loopback_dev, 0);
2005 struct neighbour *neigh; 2010 struct neighbour *neigh;
2006 2011
2007 if (rt == NULL) { 2012 if (rt == NULL) {
@@ -2011,13 +2016,11 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
2011 return ERR_PTR(-ENOMEM); 2016 return ERR_PTR(-ENOMEM);
2012 } 2017 }
2013 2018
2014 dev_hold(net->loopback_dev);
2015 in6_dev_hold(idev); 2019 in6_dev_hold(idev);
2016 2020
2017 rt->dst.flags = DST_HOST; 2021 rt->dst.flags |= DST_HOST;
2018 rt->dst.input = ip6_input; 2022 rt->dst.input = ip6_input;
2019 rt->dst.output = ip6_output; 2023 rt->dst.output = ip6_output;
2020 rt->rt6i_dev = net->loopback_dev;
2021 rt->rt6i_idev = idev; 2024 rt->rt6i_idev = idev;
2022 rt->dst.obsolete = -1; 2025 rt->dst.obsolete = -1;
2023 2026
@@ -2043,6 +2046,55 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
2043 return rt; 2046 return rt;
2044} 2047}
2045 2048
2049int ip6_route_get_saddr(struct net *net,
2050 struct rt6_info *rt,
2051 const struct in6_addr *daddr,
2052 unsigned int prefs,
2053 struct in6_addr *saddr)
2054{
2055 struct inet6_dev *idev = ip6_dst_idev((struct dst_entry*)rt);
2056 int err = 0;
2057 if (rt->rt6i_prefsrc.plen)
2058 ipv6_addr_copy(saddr, &rt->rt6i_prefsrc.addr);
2059 else
2060 err = ipv6_dev_get_saddr(net, idev ? idev->dev : NULL,
2061 daddr, prefs, saddr);
2062 return err;
2063}
2064
2065/* remove deleted ip from prefsrc entries */
2066struct arg_dev_net_ip {
2067 struct net_device *dev;
2068 struct net *net;
2069 struct in6_addr *addr;
2070};
2071
2072static int fib6_remove_prefsrc(struct rt6_info *rt, void *arg)
2073{
2074 struct net_device *dev = ((struct arg_dev_net_ip *)arg)->dev;
2075 struct net *net = ((struct arg_dev_net_ip *)arg)->net;
2076 struct in6_addr *addr = ((struct arg_dev_net_ip *)arg)->addr;
2077
2078 if (((void *)rt->rt6i_dev == dev || dev == NULL) &&
2079 rt != net->ipv6.ip6_null_entry &&
2080 ipv6_addr_equal(addr, &rt->rt6i_prefsrc.addr)) {
2081 /* remove prefsrc entry */
2082 rt->rt6i_prefsrc.plen = 0;
2083 }
2084 return 0;
2085}
2086
2087void rt6_remove_prefsrc(struct inet6_ifaddr *ifp)
2088{
2089 struct net *net = dev_net(ifp->idev->dev);
2090 struct arg_dev_net_ip adni = {
2091 .dev = ifp->idev->dev,
2092 .net = net,
2093 .addr = &ifp->addr,
2094 };
2095 fib6_clean_all(net, fib6_remove_prefsrc, 0, &adni);
2096}
2097
2046struct arg_dev_net { 2098struct arg_dev_net {
2047 struct net_device *dev; 2099 struct net_device *dev;
2048 struct net *net; 2100 struct net *net;
@@ -2189,6 +2241,9 @@ static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
2189 nla_memcpy(&cfg->fc_src, tb[RTA_SRC], plen); 2241 nla_memcpy(&cfg->fc_src, tb[RTA_SRC], plen);
2190 } 2242 }
2191 2243
2244 if (tb[RTA_PREFSRC])
2245 nla_memcpy(&cfg->fc_prefsrc, tb[RTA_PREFSRC], 16);
2246
2192 if (tb[RTA_OIF]) 2247 if (tb[RTA_OIF])
2193 cfg->fc_ifindex = nla_get_u32(tb[RTA_OIF]); 2248 cfg->fc_ifindex = nla_get_u32(tb[RTA_OIF]);
2194 2249
@@ -2331,13 +2386,17 @@ static int rt6_fill_node(struct net *net,
2331#endif 2386#endif
2332 NLA_PUT_U32(skb, RTA_IIF, iif); 2387 NLA_PUT_U32(skb, RTA_IIF, iif);
2333 } else if (dst) { 2388 } else if (dst) {
2334 struct inet6_dev *idev = ip6_dst_idev(&rt->dst);
2335 struct in6_addr saddr_buf; 2389 struct in6_addr saddr_buf;
2336 if (ipv6_dev_get_saddr(net, idev ? idev->dev : NULL, 2390 if (ip6_route_get_saddr(net, rt, dst, 0, &saddr_buf) == 0)
2337 dst, 0, &saddr_buf) == 0)
2338 NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf); 2391 NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf);
2339 } 2392 }
2340 2393
2394 if (rt->rt6i_prefsrc.plen) {
2395 struct in6_addr saddr_buf;
2396 ipv6_addr_copy(&saddr_buf, &rt->rt6i_prefsrc.addr);
2397 NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf);
2398 }
2399
2341 if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0) 2400 if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
2342 goto nla_put_failure; 2401 goto nla_put_failure;
2343 2402
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 43b33373adb2..1cca5761aea9 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -250,11 +250,6 @@ static struct ip_tunnel *ipip6_tunnel_locate(struct net *net,
250 250
251 dev_net_set(dev, net); 251 dev_net_set(dev, net);
252 252
253 if (strchr(name, '%')) {
254 if (dev_alloc_name(dev, name) < 0)
255 goto failed_free;
256 }
257
258 nt = netdev_priv(dev); 253 nt = netdev_priv(dev);
259 254
260 nt->parms = *parms; 255 nt->parms = *parms;
@@ -401,11 +396,6 @@ out:
401 return err; 396 return err;
402} 397}
403 398
404static void prl_entry_destroy_rcu(struct rcu_head *head)
405{
406 kfree(container_of(head, struct ip_tunnel_prl_entry, rcu_head));
407}
408
409static void prl_list_destroy_rcu(struct rcu_head *head) 399static void prl_list_destroy_rcu(struct rcu_head *head)
410{ 400{
411 struct ip_tunnel_prl_entry *p, *n; 401 struct ip_tunnel_prl_entry *p, *n;
@@ -433,7 +423,7 @@ ipip6_tunnel_del_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a)
433 p = &x->next) { 423 p = &x->next) {
434 if (x->addr == a->addr) { 424 if (x->addr == a->addr) {
435 *p = x->next; 425 *p = x->next;
436 call_rcu(&x->rcu_head, prl_entry_destroy_rcu); 426 kfree_rcu(x, rcu_head);
437 t->prl_count--; 427 t->prl_count--;
438 goto out; 428 goto out;
439 } 429 }
@@ -452,7 +442,7 @@ out:
452} 442}
453 443
454static int 444static int
455isatap_chksrc(struct sk_buff *skb, struct iphdr *iph, struct ip_tunnel *t) 445isatap_chksrc(struct sk_buff *skb, const struct iphdr *iph, struct ip_tunnel *t)
456{ 446{
457 struct ip_tunnel_prl_entry *p; 447 struct ip_tunnel_prl_entry *p;
458 int ok = 1; 448 int ok = 1;
@@ -465,7 +455,8 @@ isatap_chksrc(struct sk_buff *skb, struct iphdr *iph, struct ip_tunnel *t)
465 else 455 else
466 skb->ndisc_nodetype = NDISC_NODETYPE_NODEFAULT; 456 skb->ndisc_nodetype = NDISC_NODETYPE_NODEFAULT;
467 } else { 457 } else {
468 struct in6_addr *addr6 = &ipv6_hdr(skb)->saddr; 458 const struct in6_addr *addr6 = &ipv6_hdr(skb)->saddr;
459
469 if (ipv6_addr_is_isatap(addr6) && 460 if (ipv6_addr_is_isatap(addr6) &&
470 (addr6->s6_addr32[3] == iph->saddr) && 461 (addr6->s6_addr32[3] == iph->saddr) &&
471 ipv6_chk_prefix(addr6, t->dev)) 462 ipv6_chk_prefix(addr6, t->dev))
@@ -499,7 +490,7 @@ static int ipip6_err(struct sk_buff *skb, u32 info)
499 8 bytes of packet payload. It means, that precise relaying of 490 8 bytes of packet payload. It means, that precise relaying of
500 ICMP in the real Internet is absolutely infeasible. 491 ICMP in the real Internet is absolutely infeasible.
501 */ 492 */
502 struct iphdr *iph = (struct iphdr*)skb->data; 493 const struct iphdr *iph = (const struct iphdr *)skb->data;
503 const int type = icmp_hdr(skb)->type; 494 const int type = icmp_hdr(skb)->type;
504 const int code = icmp_hdr(skb)->code; 495 const int code = icmp_hdr(skb)->code;
505 struct ip_tunnel *t; 496 struct ip_tunnel *t;
@@ -557,7 +548,7 @@ out:
557 return err; 548 return err;
558} 549}
559 550
560static inline void ipip6_ecn_decapsulate(struct iphdr *iph, struct sk_buff *skb) 551static inline void ipip6_ecn_decapsulate(const struct iphdr *iph, struct sk_buff *skb)
561{ 552{
562 if (INET_ECN_is_ce(iph->tos)) 553 if (INET_ECN_is_ce(iph->tos))
563 IP6_ECN_set_ce(ipv6_hdr(skb)); 554 IP6_ECN_set_ce(ipv6_hdr(skb));
@@ -565,7 +556,7 @@ static inline void ipip6_ecn_decapsulate(struct iphdr *iph, struct sk_buff *skb)
565 556
566static int ipip6_rcv(struct sk_buff *skb) 557static int ipip6_rcv(struct sk_buff *skb)
567{ 558{
568 struct iphdr *iph; 559 const struct iphdr *iph;
569 struct ip_tunnel *tunnel; 560 struct ip_tunnel *tunnel;
570 561
571 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) 562 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
@@ -621,7 +612,7 @@ out:
621 * comes from 6rd / 6to4 (RFC 3056) addr space. 612 * comes from 6rd / 6to4 (RFC 3056) addr space.
622 */ 613 */
623static inline 614static inline
624__be32 try_6rd(struct in6_addr *v6dst, struct ip_tunnel *tunnel) 615__be32 try_6rd(const struct in6_addr *v6dst, struct ip_tunnel *tunnel)
625{ 616{
626 __be32 dst = 0; 617 __be32 dst = 0;
627 618
@@ -664,8 +655,8 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
664{ 655{
665 struct ip_tunnel *tunnel = netdev_priv(dev); 656 struct ip_tunnel *tunnel = netdev_priv(dev);
666 struct pcpu_tstats *tstats; 657 struct pcpu_tstats *tstats;
667 struct iphdr *tiph = &tunnel->parms.iph; 658 const struct iphdr *tiph = &tunnel->parms.iph;
668 struct ipv6hdr *iph6 = ipv6_hdr(skb); 659 const struct ipv6hdr *iph6 = ipv6_hdr(skb);
669 u8 tos = tunnel->parms.iph.tos; 660 u8 tos = tunnel->parms.iph.tos;
670 __be16 df = tiph->frag_off; 661 __be16 df = tiph->frag_off;
671 struct rtable *rt; /* Route to the other host */ 662 struct rtable *rt; /* Route to the other host */
@@ -673,8 +664,9 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
673 struct iphdr *iph; /* Our new IP header */ 664 struct iphdr *iph; /* Our new IP header */
674 unsigned int max_headroom; /* The extra header space needed */ 665 unsigned int max_headroom; /* The extra header space needed */
675 __be32 dst = tiph->daddr; 666 __be32 dst = tiph->daddr;
667 struct flowi4 fl4;
676 int mtu; 668 int mtu;
677 struct in6_addr *addr6; 669 const struct in6_addr *addr6;
678 int addr_type; 670 int addr_type;
679 671
680 if (skb->protocol != htons(ETH_P_IPV6)) 672 if (skb->protocol != htons(ETH_P_IPV6))
@@ -693,7 +685,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
693 goto tx_error; 685 goto tx_error;
694 } 686 }
695 687
696 addr6 = (struct in6_addr*)&neigh->primary_key; 688 addr6 = (const struct in6_addr*)&neigh->primary_key;
697 addr_type = ipv6_addr_type(addr6); 689 addr_type = ipv6_addr_type(addr6);
698 690
699 if ((addr_type & IPV6_ADDR_UNICAST) && 691 if ((addr_type & IPV6_ADDR_UNICAST) &&
@@ -718,7 +710,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
718 goto tx_error; 710 goto tx_error;
719 } 711 }
720 712
721 addr6 = (struct in6_addr*)&neigh->primary_key; 713 addr6 = (const struct in6_addr*)&neigh->primary_key;
722 addr_type = ipv6_addr_type(addr6); 714 addr_type = ipv6_addr_type(addr6);
723 715
724 if (addr_type == IPV6_ADDR_ANY) { 716 if (addr_type == IPV6_ADDR_ANY) {
@@ -732,7 +724,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
732 dst = addr6->s6_addr32[3]; 724 dst = addr6->s6_addr32[3];
733 } 725 }
734 726
735 rt = ip_route_output_ports(dev_net(dev), NULL, 727 rt = ip_route_output_ports(dev_net(dev), &fl4, NULL,
736 dst, tiph->saddr, 728 dst, tiph->saddr,
737 0, 0, 729 0, 0,
738 IPPROTO_IPV6, RT_TOS(tos), 730 IPPROTO_IPV6, RT_TOS(tos),
@@ -826,8 +818,8 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
826 iph->frag_off = df; 818 iph->frag_off = df;
827 iph->protocol = IPPROTO_IPV6; 819 iph->protocol = IPPROTO_IPV6;
828 iph->tos = INET_ECN_encapsulate(tos, ipv6_get_dsfield(iph6)); 820 iph->tos = INET_ECN_encapsulate(tos, ipv6_get_dsfield(iph6));
829 iph->daddr = rt->rt_dst; 821 iph->daddr = fl4.daddr;
830 iph->saddr = rt->rt_src; 822 iph->saddr = fl4.saddr;
831 823
832 if ((iph->ttl = tiph->ttl) == 0) 824 if ((iph->ttl = tiph->ttl) == 0)
833 iph->ttl = iph6->hop_limit; 825 iph->ttl = iph6->hop_limit;
@@ -849,13 +841,14 @@ static void ipip6_tunnel_bind_dev(struct net_device *dev)
849{ 841{
850 struct net_device *tdev = NULL; 842 struct net_device *tdev = NULL;
851 struct ip_tunnel *tunnel; 843 struct ip_tunnel *tunnel;
852 struct iphdr *iph; 844 const struct iphdr *iph;
845 struct flowi4 fl4;
853 846
854 tunnel = netdev_priv(dev); 847 tunnel = netdev_priv(dev);
855 iph = &tunnel->parms.iph; 848 iph = &tunnel->parms.iph;
856 849
857 if (iph->daddr) { 850 if (iph->daddr) {
858 struct rtable *rt = ip_route_output_ports(dev_net(dev), NULL, 851 struct rtable *rt = ip_route_output_ports(dev_net(dev), &fl4, NULL,
859 iph->daddr, iph->saddr, 852 iph->daddr, iph->saddr,
860 0, 0, 853 0, 0,
861 IPPROTO_IPV6, 854 IPPROTO_IPV6,
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 352c26081f5d..8b9644a8b697 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -66,7 +66,7 @@ static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb,
66static DEFINE_PER_CPU(__u32 [16 + 5 + SHA_WORKSPACE_WORDS], 66static DEFINE_PER_CPU(__u32 [16 + 5 + SHA_WORKSPACE_WORDS],
67 ipv6_cookie_scratch); 67 ipv6_cookie_scratch);
68 68
69static u32 cookie_hash(struct in6_addr *saddr, struct in6_addr *daddr, 69static u32 cookie_hash(const struct in6_addr *saddr, const struct in6_addr *daddr,
70 __be16 sport, __be16 dport, u32 count, int c) 70 __be16 sport, __be16 dport, u32 count, int c)
71{ 71{
72 __u32 *tmp = __get_cpu_var(ipv6_cookie_scratch); 72 __u32 *tmp = __get_cpu_var(ipv6_cookie_scratch);
@@ -86,7 +86,8 @@ static u32 cookie_hash(struct in6_addr *saddr, struct in6_addr *daddr,
86 return tmp[17]; 86 return tmp[17];
87} 87}
88 88
89static __u32 secure_tcp_syn_cookie(struct in6_addr *saddr, struct in6_addr *daddr, 89static __u32 secure_tcp_syn_cookie(const struct in6_addr *saddr,
90 const struct in6_addr *daddr,
90 __be16 sport, __be16 dport, __u32 sseq, 91 __be16 sport, __be16 dport, __u32 sseq,
91 __u32 count, __u32 data) 92 __u32 count, __u32 data)
92{ 93{
@@ -96,8 +97,8 @@ static __u32 secure_tcp_syn_cookie(struct in6_addr *saddr, struct in6_addr *dadd
96 & COOKIEMASK)); 97 & COOKIEMASK));
97} 98}
98 99
99static __u32 check_tcp_syn_cookie(__u32 cookie, struct in6_addr *saddr, 100static __u32 check_tcp_syn_cookie(__u32 cookie, const struct in6_addr *saddr,
100 struct in6_addr *daddr, __be16 sport, 101 const struct in6_addr *daddr, __be16 sport,
101 __be16 dport, __u32 sseq, __u32 count, 102 __be16 dport, __u32 sseq, __u32 count,
102 __u32 maxdiff) 103 __u32 maxdiff)
103{ 104{
@@ -116,7 +117,7 @@ static __u32 check_tcp_syn_cookie(__u32 cookie, struct in6_addr *saddr,
116 117
117__u32 cookie_v6_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp) 118__u32 cookie_v6_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp)
118{ 119{
119 struct ipv6hdr *iph = ipv6_hdr(skb); 120 const struct ipv6hdr *iph = ipv6_hdr(skb);
120 const struct tcphdr *th = tcp_hdr(skb); 121 const struct tcphdr *th = tcp_hdr(skb);
121 int mssind; 122 int mssind;
122 const __u16 mss = *mssp; 123 const __u16 mss = *mssp;
@@ -138,7 +139,7 @@ __u32 cookie_v6_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp)
138 139
139static inline int cookie_check(struct sk_buff *skb, __u32 cookie) 140static inline int cookie_check(struct sk_buff *skb, __u32 cookie)
140{ 141{
141 struct ipv6hdr *iph = ipv6_hdr(skb); 142 const struct ipv6hdr *iph = ipv6_hdr(skb);
142 const struct tcphdr *th = tcp_hdr(skb); 143 const struct tcphdr *th = tcp_hdr(skb);
143 __u32 seq = ntohl(th->seq) - 1; 144 __u32 seq = ntohl(th->seq) - 1;
144 __u32 mssind = check_tcp_syn_cookie(cookie, &iph->saddr, &iph->daddr, 145 __u32 mssind = check_tcp_syn_cookie(cookie, &iph->saddr, &iph->daddr,
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 4f49e5dd41bb..87551ca568cd 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -76,8 +76,8 @@ static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
76 76
77static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb); 77static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
78static void __tcp_v6_send_check(struct sk_buff *skb, 78static void __tcp_v6_send_check(struct sk_buff *skb,
79 struct in6_addr *saddr, 79 const struct in6_addr *saddr,
80 struct in6_addr *daddr); 80 const struct in6_addr *daddr);
81 81
82static const struct inet_connection_sock_af_ops ipv6_mapped; 82static const struct inet_connection_sock_af_ops ipv6_mapped;
83static const struct inet_connection_sock_af_ops ipv6_specific; 83static const struct inet_connection_sock_af_ops ipv6_specific;
@@ -86,7 +86,7 @@ static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
86static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific; 86static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
87#else 87#else
88static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk, 88static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
89 struct in6_addr *addr) 89 const struct in6_addr *addr)
90{ 90{
91 return NULL; 91 return NULL;
92} 92}
@@ -106,8 +106,8 @@ static void tcp_v6_hash(struct sock *sk)
106} 106}
107 107
108static __inline__ __sum16 tcp_v6_check(int len, 108static __inline__ __sum16 tcp_v6_check(int len,
109 struct in6_addr *saddr, 109 const struct in6_addr *saddr,
110 struct in6_addr *daddr, 110 const struct in6_addr *daddr,
111 __wsum base) 111 __wsum base)
112{ 112{
113 return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base); 113 return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
@@ -331,7 +331,7 @@ failure:
331static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 331static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
332 u8 type, u8 code, int offset, __be32 info) 332 u8 type, u8 code, int offset, __be32 info)
333{ 333{
334 struct ipv6hdr *hdr = (struct ipv6hdr*)skb->data; 334 const struct ipv6hdr *hdr = (const struct ipv6hdr*)skb->data;
335 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset); 335 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
336 struct ipv6_pinfo *np; 336 struct ipv6_pinfo *np;
337 struct sock *sk; 337 struct sock *sk;
@@ -551,7 +551,7 @@ static void tcp_v6_reqsk_destructor(struct request_sock *req)
551 551
552#ifdef CONFIG_TCP_MD5SIG 552#ifdef CONFIG_TCP_MD5SIG
553static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk, 553static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
554 struct in6_addr *addr) 554 const struct in6_addr *addr)
555{ 555{
556 struct tcp_sock *tp = tcp_sk(sk); 556 struct tcp_sock *tp = tcp_sk(sk);
557 int i; 557 int i;
@@ -580,7 +580,7 @@ static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
580 return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr); 580 return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr);
581} 581}
582 582
583static int tcp_v6_md5_do_add(struct sock *sk, struct in6_addr *peer, 583static int tcp_v6_md5_do_add(struct sock *sk, const struct in6_addr *peer,
584 char *newkey, u8 newkeylen) 584 char *newkey, u8 newkeylen)
585{ 585{
586 /* Add key to the list */ 586 /* Add key to the list */
@@ -645,7 +645,7 @@ static int tcp_v6_md5_add_func(struct sock *sk, struct sock *addr_sk,
645 newkey, newkeylen); 645 newkey, newkeylen);
646} 646}
647 647
648static int tcp_v6_md5_do_del(struct sock *sk, struct in6_addr *peer) 648static int tcp_v6_md5_do_del(struct sock *sk, const struct in6_addr *peer)
649{ 649{
650 struct tcp_sock *tp = tcp_sk(sk); 650 struct tcp_sock *tp = tcp_sk(sk);
651 int i; 651 int i;
@@ -753,8 +753,8 @@ static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
753} 753}
754 754
755static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp, 755static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
756 struct in6_addr *daddr, 756 const struct in6_addr *daddr,
757 struct in6_addr *saddr, int nbytes) 757 const struct in6_addr *saddr, int nbytes)
758{ 758{
759 struct tcp6_pseudohdr *bp; 759 struct tcp6_pseudohdr *bp;
760 struct scatterlist sg; 760 struct scatterlist sg;
@@ -771,7 +771,7 @@ static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
771} 771}
772 772
773static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key, 773static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
774 struct in6_addr *daddr, struct in6_addr *saddr, 774 const struct in6_addr *daddr, struct in6_addr *saddr,
775 struct tcphdr *th) 775 struct tcphdr *th)
776{ 776{
777 struct tcp_md5sig_pool *hp; 777 struct tcp_md5sig_pool *hp;
@@ -807,7 +807,7 @@ static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
807 struct sock *sk, struct request_sock *req, 807 struct sock *sk, struct request_sock *req,
808 struct sk_buff *skb) 808 struct sk_buff *skb)
809{ 809{
810 struct in6_addr *saddr, *daddr; 810 const struct in6_addr *saddr, *daddr;
811 struct tcp_md5sig_pool *hp; 811 struct tcp_md5sig_pool *hp;
812 struct hash_desc *desc; 812 struct hash_desc *desc;
813 struct tcphdr *th = tcp_hdr(skb); 813 struct tcphdr *th = tcp_hdr(skb);
@@ -819,7 +819,7 @@ static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
819 saddr = &inet6_rsk(req)->loc_addr; 819 saddr = &inet6_rsk(req)->loc_addr;
820 daddr = &inet6_rsk(req)->rmt_addr; 820 daddr = &inet6_rsk(req)->rmt_addr;
821 } else { 821 } else {
822 struct ipv6hdr *ip6h = ipv6_hdr(skb); 822 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
823 saddr = &ip6h->saddr; 823 saddr = &ip6h->saddr;
824 daddr = &ip6h->daddr; 824 daddr = &ip6h->daddr;
825 } 825 }
@@ -857,7 +857,7 @@ static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb)
857{ 857{
858 __u8 *hash_location = NULL; 858 __u8 *hash_location = NULL;
859 struct tcp_md5sig_key *hash_expected; 859 struct tcp_md5sig_key *hash_expected;
860 struct ipv6hdr *ip6h = ipv6_hdr(skb); 860 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
861 struct tcphdr *th = tcp_hdr(skb); 861 struct tcphdr *th = tcp_hdr(skb);
862 int genhash; 862 int genhash;
863 u8 newhash[16]; 863 u8 newhash[16];
@@ -915,7 +915,7 @@ static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
915#endif 915#endif
916 916
917static void __tcp_v6_send_check(struct sk_buff *skb, 917static void __tcp_v6_send_check(struct sk_buff *skb,
918 struct in6_addr *saddr, struct in6_addr *daddr) 918 const struct in6_addr *saddr, const struct in6_addr *daddr)
919{ 919{
920 struct tcphdr *th = tcp_hdr(skb); 920 struct tcphdr *th = tcp_hdr(skb);
921 921
@@ -939,7 +939,7 @@ static void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)
939 939
940static int tcp_v6_gso_send_check(struct sk_buff *skb) 940static int tcp_v6_gso_send_check(struct sk_buff *skb)
941{ 941{
942 struct ipv6hdr *ipv6h; 942 const struct ipv6hdr *ipv6h;
943 struct tcphdr *th; 943 struct tcphdr *th;
944 944
945 if (!pskb_may_pull(skb, sizeof(*th))) 945 if (!pskb_may_pull(skb, sizeof(*th)))
@@ -957,7 +957,7 @@ static int tcp_v6_gso_send_check(struct sk_buff *skb)
957static struct sk_buff **tcp6_gro_receive(struct sk_buff **head, 957static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
958 struct sk_buff *skb) 958 struct sk_buff *skb)
959{ 959{
960 struct ipv6hdr *iph = skb_gro_network_header(skb); 960 const struct ipv6hdr *iph = skb_gro_network_header(skb);
961 961
962 switch (skb->ip_summed) { 962 switch (skb->ip_summed) {
963 case CHECKSUM_COMPLETE: 963 case CHECKSUM_COMPLETE:
@@ -978,7 +978,7 @@ static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
978 978
979static int tcp6_gro_complete(struct sk_buff *skb) 979static int tcp6_gro_complete(struct sk_buff *skb)
980{ 980{
981 struct ipv6hdr *iph = ipv6_hdr(skb); 981 const struct ipv6hdr *iph = ipv6_hdr(skb);
982 struct tcphdr *th = tcp_hdr(skb); 982 struct tcphdr *th = tcp_hdr(skb);
983 983
984 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb), 984 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
@@ -1469,7 +1469,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1469 1469
1470 First: no IPv4 options. 1470 First: no IPv4 options.
1471 */ 1471 */
1472 newinet->opt = NULL; 1472 newinet->inet_opt = NULL;
1473 newnp->ipv6_fl_list = NULL; 1473 newnp->ipv6_fl_list = NULL;
1474 1474
1475 /* Clone RX bits */ 1475 /* Clone RX bits */
@@ -1644,6 +1644,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1644 * the new socket.. 1644 * the new socket..
1645 */ 1645 */
1646 if(nsk != sk) { 1646 if(nsk != sk) {
1647 sock_rps_save_rxhash(nsk, skb->rxhash);
1647 if (tcp_child_process(sk, nsk, skb)) 1648 if (tcp_child_process(sk, nsk, skb))
1648 goto reset; 1649 goto reset;
1649 if (opt_skb) 1650 if (opt_skb)
@@ -1702,7 +1703,7 @@ ipv6_pktoptions:
1702static int tcp_v6_rcv(struct sk_buff *skb) 1703static int tcp_v6_rcv(struct sk_buff *skb)
1703{ 1704{
1704 struct tcphdr *th; 1705 struct tcphdr *th;
1705 struct ipv6hdr *hdr; 1706 const struct ipv6hdr *hdr;
1706 struct sock *sk; 1707 struct sock *sk;
1707 int ret; 1708 int ret;
1708 struct net *net = dev_net(skb->dev); 1709 struct net *net = dev_net(skb->dev);
@@ -2028,15 +2029,15 @@ static void get_openreq6(struct seq_file *seq,
2028 struct sock *sk, struct request_sock *req, int i, int uid) 2029 struct sock *sk, struct request_sock *req, int i, int uid)
2029{ 2030{
2030 int ttd = req->expires - jiffies; 2031 int ttd = req->expires - jiffies;
2031 struct in6_addr *src = &inet6_rsk(req)->loc_addr; 2032 const struct in6_addr *src = &inet6_rsk(req)->loc_addr;
2032 struct in6_addr *dest = &inet6_rsk(req)->rmt_addr; 2033 const struct in6_addr *dest = &inet6_rsk(req)->rmt_addr;
2033 2034
2034 if (ttd < 0) 2035 if (ttd < 0)
2035 ttd = 0; 2036 ttd = 0;
2036 2037
2037 seq_printf(seq, 2038 seq_printf(seq,
2038 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " 2039 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2039 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n", 2040 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
2040 i, 2041 i,
2041 src->s6_addr32[0], src->s6_addr32[1], 2042 src->s6_addr32[0], src->s6_addr32[1],
2042 src->s6_addr32[2], src->s6_addr32[3], 2043 src->s6_addr32[2], src->s6_addr32[3],
@@ -2057,7 +2058,7 @@ static void get_openreq6(struct seq_file *seq,
2057 2058
2058static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i) 2059static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
2059{ 2060{
2060 struct in6_addr *dest, *src; 2061 const struct in6_addr *dest, *src;
2061 __u16 destp, srcp; 2062 __u16 destp, srcp;
2062 int timer_active; 2063 int timer_active;
2063 unsigned long timer_expires; 2064 unsigned long timer_expires;
@@ -2087,7 +2088,7 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
2087 2088
2088 seq_printf(seq, 2089 seq_printf(seq,
2089 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " 2090 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2090 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %lu %lu %u %u %d\n", 2091 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %lu %lu %u %u %d\n",
2091 i, 2092 i,
2092 src->s6_addr32[0], src->s6_addr32[1], 2093 src->s6_addr32[0], src->s6_addr32[1],
2093 src->s6_addr32[2], src->s6_addr32[3], srcp, 2094 src->s6_addr32[2], src->s6_addr32[3], srcp,
@@ -2114,7 +2115,7 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
2114static void get_timewait6_sock(struct seq_file *seq, 2115static void get_timewait6_sock(struct seq_file *seq,
2115 struct inet_timewait_sock *tw, int i) 2116 struct inet_timewait_sock *tw, int i)
2116{ 2117{
2117 struct in6_addr *dest, *src; 2118 const struct in6_addr *dest, *src;
2118 __u16 destp, srcp; 2119 __u16 destp, srcp;
2119 struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw); 2120 struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
2120 int ttd = tw->tw_ttd - jiffies; 2121 int ttd = tw->tw_ttd - jiffies;
@@ -2129,7 +2130,7 @@ static void get_timewait6_sock(struct seq_file *seq,
2129 2130
2130 seq_printf(seq, 2131 seq_printf(seq,
2131 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " 2132 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2132 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n", 2133 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
2133 i, 2134 i,
2134 src->s6_addr32[0], src->s6_addr32[1], 2135 src->s6_addr32[0], src->s6_addr32[1],
2135 src->s6_addr32[2], src->s6_addr32[3], srcp, 2136 src->s6_addr32[2], src->s6_addr32[3], srcp,
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 9e305d74b3d4..328985c40883 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -311,7 +311,7 @@ static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb,
311 struct udp_table *udptable) 311 struct udp_table *udptable)
312{ 312{
313 struct sock *sk; 313 struct sock *sk;
314 struct ipv6hdr *iph = ipv6_hdr(skb); 314 const struct ipv6hdr *iph = ipv6_hdr(skb);
315 315
316 if (unlikely(sk = skb_steal_sock(skb))) 316 if (unlikely(sk = skb_steal_sock(skb)))
317 return sk; 317 return sk;
@@ -453,8 +453,11 @@ csum_copy_err:
453 } 453 }
454 unlock_sock_fast(sk, slow); 454 unlock_sock_fast(sk, slow);
455 455
456 if (flags & MSG_DONTWAIT) 456 if (noblock)
457 return -EAGAIN; 457 return -EAGAIN;
458
459 /* starting over for a new packet */
460 msg->msg_flags &= ~MSG_TRUNC;
458 goto try_again; 461 goto try_again;
459} 462}
460 463
@@ -463,9 +466,9 @@ void __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
463 struct udp_table *udptable) 466 struct udp_table *udptable)
464{ 467{
465 struct ipv6_pinfo *np; 468 struct ipv6_pinfo *np;
466 struct ipv6hdr *hdr = (struct ipv6hdr*)skb->data; 469 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
467 struct in6_addr *saddr = &hdr->saddr; 470 const struct in6_addr *saddr = &hdr->saddr;
468 struct in6_addr *daddr = &hdr->daddr; 471 const struct in6_addr *daddr = &hdr->daddr;
469 struct udphdr *uh = (struct udphdr*)(skb->data+offset); 472 struct udphdr *uh = (struct udphdr*)(skb->data+offset);
470 struct sock *sk; 473 struct sock *sk;
471 int err; 474 int err;
@@ -553,8 +556,8 @@ drop_no_sk_drops_inc:
553} 556}
554 557
555static struct sock *udp_v6_mcast_next(struct net *net, struct sock *sk, 558static struct sock *udp_v6_mcast_next(struct net *net, struct sock *sk,
556 __be16 loc_port, struct in6_addr *loc_addr, 559 __be16 loc_port, const struct in6_addr *loc_addr,
557 __be16 rmt_port, struct in6_addr *rmt_addr, 560 __be16 rmt_port, const struct in6_addr *rmt_addr,
558 int dif) 561 int dif)
559{ 562{
560 struct hlist_nulls_node *node; 563 struct hlist_nulls_node *node;
@@ -633,7 +636,7 @@ drop:
633 * so we don't need to lock the hashes. 636 * so we don't need to lock the hashes.
634 */ 637 */
635static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb, 638static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
636 struct in6_addr *saddr, struct in6_addr *daddr, 639 const struct in6_addr *saddr, const struct in6_addr *daddr,
637 struct udp_table *udptable) 640 struct udp_table *udptable)
638{ 641{
639 struct sock *sk, *stack[256 / sizeof(struct sock *)]; 642 struct sock *sk, *stack[256 / sizeof(struct sock *)];
@@ -716,7 +719,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
716 struct net *net = dev_net(skb->dev); 719 struct net *net = dev_net(skb->dev);
717 struct sock *sk; 720 struct sock *sk;
718 struct udphdr *uh; 721 struct udphdr *uh;
719 struct in6_addr *saddr, *daddr; 722 const struct in6_addr *saddr, *daddr;
720 u32 ulen = 0; 723 u32 ulen = 0;
721 724
722 if (!pskb_may_pull(skb, sizeof(struct udphdr))) 725 if (!pskb_may_pull(skb, sizeof(struct udphdr)))
@@ -1278,7 +1281,7 @@ int compat_udpv6_getsockopt(struct sock *sk, int level, int optname,
1278 1281
1279static int udp6_ufo_send_check(struct sk_buff *skb) 1282static int udp6_ufo_send_check(struct sk_buff *skb)
1280{ 1283{
1281 struct ipv6hdr *ipv6h; 1284 const struct ipv6hdr *ipv6h;
1282 struct udphdr *uh; 1285 struct udphdr *uh;
1283 1286
1284 if (!pskb_may_pull(skb, sizeof(*uh))) 1287 if (!pskb_may_pull(skb, sizeof(*uh)))
@@ -1328,7 +1331,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, u32 features)
1328 /* Do software UFO. Complete and fill in the UDP checksum as HW cannot 1331 /* Do software UFO. Complete and fill in the UDP checksum as HW cannot
1329 * do checksum of UDP packets sent as multiple IP fragments. 1332 * do checksum of UDP packets sent as multiple IP fragments.
1330 */ 1333 */
1331 offset = skb->csum_start - skb_headroom(skb); 1334 offset = skb_checksum_start_offset(skb);
1332 csum = skb_checksum(skb, offset, skb->len- offset, 0); 1335 csum = skb_checksum(skb, offset, skb->len- offset, 0);
1333 offset += skb->csum_offset; 1336 offset += skb->csum_offset;
1334 *(__sum16 *)(skb->data + offset) = csum_fold(csum); 1337 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
@@ -1382,7 +1385,7 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
1382{ 1385{
1383 struct inet_sock *inet = inet_sk(sp); 1386 struct inet_sock *inet = inet_sk(sp);
1384 struct ipv6_pinfo *np = inet6_sk(sp); 1387 struct ipv6_pinfo *np = inet6_sk(sp);
1385 struct in6_addr *dest, *src; 1388 const struct in6_addr *dest, *src;
1386 __u16 destp, srcp; 1389 __u16 destp, srcp;
1387 1390
1388 dest = &np->daddr; 1391 dest = &np->daddr;
@@ -1391,7 +1394,7 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
1391 srcp = ntohs(inet->inet_sport); 1394 srcp = ntohs(inet->inet_sport);
1392 seq_printf(seq, 1395 seq_printf(seq,
1393 "%5d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " 1396 "%5d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1394 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d\n", 1397 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %d\n",
1395 bucket, 1398 bucket,
1396 src->s6_addr32[0], src->s6_addr32[1], 1399 src->s6_addr32[0], src->s6_addr32[1],
1397 src->s6_addr32[2], src->s6_addr32[3], srcp, 1400 src->s6_addr32[2], src->s6_addr32[3], srcp,
diff --git a/net/ipv6/xfrm6_mode_beet.c b/net/ipv6/xfrm6_mode_beet.c
index bbd48b101bae..3437d7d4eed6 100644
--- a/net/ipv6/xfrm6_mode_beet.c
+++ b/net/ipv6/xfrm6_mode_beet.c
@@ -41,10 +41,8 @@ static int xfrm6_beet_output(struct xfrm_state *x, struct sk_buff *skb)
41{ 41{
42 struct ipv6hdr *top_iph; 42 struct ipv6hdr *top_iph;
43 struct ip_beet_phdr *ph; 43 struct ip_beet_phdr *ph;
44 struct iphdr *iphv4;
45 int optlen, hdr_len; 44 int optlen, hdr_len;
46 45
47 iphv4 = ip_hdr(skb);
48 hdr_len = 0; 46 hdr_len = 0;
49 optlen = XFRM_MODE_SKB_CB(skb)->optlen; 47 optlen = XFRM_MODE_SKB_CB(skb)->optlen;
50 if (unlikely(optlen)) 48 if (unlikely(optlen))
diff --git a/net/ipv6/xfrm6_mode_tunnel.c b/net/ipv6/xfrm6_mode_tunnel.c
index 645cb968d450..4d6edff0498f 100644
--- a/net/ipv6/xfrm6_mode_tunnel.c
+++ b/net/ipv6/xfrm6_mode_tunnel.c
@@ -20,7 +20,7 @@
20 20
21static inline void ipip6_ecn_decapsulate(struct sk_buff *skb) 21static inline void ipip6_ecn_decapsulate(struct sk_buff *skb)
22{ 22{
23 struct ipv6hdr *outer_iph = ipv6_hdr(skb); 23 const struct ipv6hdr *outer_iph = ipv6_hdr(skb);
24 struct ipv6hdr *inner_iph = ipipv6_hdr(skb); 24 struct ipv6hdr *inner_iph = ipipv6_hdr(skb);
25 25
26 if (INET_ECN_is_ce(ipv6_get_dsfield(outer_iph))) 26 if (INET_ECN_is_ce(ipv6_get_dsfield(outer_iph)))
@@ -55,8 +55,8 @@ static int xfrm6_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
55 dsfield &= ~INET_ECN_MASK; 55 dsfield &= ~INET_ECN_MASK;
56 ipv6_change_dsfield(top_iph, 0, dsfield); 56 ipv6_change_dsfield(top_iph, 0, dsfield);
57 top_iph->hop_limit = ip6_dst_hoplimit(dst->child); 57 top_iph->hop_limit = ip6_dst_hoplimit(dst->child);
58 ipv6_addr_copy(&top_iph->saddr, (struct in6_addr *)&x->props.saddr); 58 ipv6_addr_copy(&top_iph->saddr, (const struct in6_addr *)&x->props.saddr);
59 ipv6_addr_copy(&top_iph->daddr, (struct in6_addr *)&x->id.daddr); 59 ipv6_addr_copy(&top_iph->daddr, (const struct in6_addr *)&x->id.daddr);
60 return 0; 60 return 0;
61} 61}
62 62
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 05e34c8ec913..d879f7efbd10 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -124,7 +124,7 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
124 struct flowi6 *fl6 = &fl->u.ip6; 124 struct flowi6 *fl6 = &fl->u.ip6;
125 int onlyproto = 0; 125 int onlyproto = 0;
126 u16 offset = skb_network_header_len(skb); 126 u16 offset = skb_network_header_len(skb);
127 struct ipv6hdr *hdr = ipv6_hdr(skb); 127 const struct ipv6hdr *hdr = ipv6_hdr(skb);
128 struct ipv6_opt_hdr *exthdr; 128 struct ipv6_opt_hdr *exthdr;
129 const unsigned char *nh = skb_network_header(skb); 129 const unsigned char *nh = skb_network_header(skb);
130 u8 nexthdr = nh[IP6CB(skb)->nhoff]; 130 u8 nexthdr = nh[IP6CB(skb)->nhoff];
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
index 2969cad408de..4fe1db12d2a3 100644
--- a/net/ipv6/xfrm6_tunnel.c
+++ b/net/ipv6/xfrm6_tunnel.c
@@ -68,7 +68,7 @@ static DEFINE_SPINLOCK(xfrm6_tunnel_spi_lock);
68 68
69static struct kmem_cache *xfrm6_tunnel_spi_kmem __read_mostly; 69static struct kmem_cache *xfrm6_tunnel_spi_kmem __read_mostly;
70 70
71static inline unsigned xfrm6_tunnel_spi_hash_byaddr(xfrm_address_t *addr) 71static inline unsigned xfrm6_tunnel_spi_hash_byaddr(const xfrm_address_t *addr)
72{ 72{
73 unsigned h; 73 unsigned h;
74 74
@@ -85,7 +85,7 @@ static inline unsigned xfrm6_tunnel_spi_hash_byspi(u32 spi)
85 return spi % XFRM6_TUNNEL_SPI_BYSPI_HSIZE; 85 return spi % XFRM6_TUNNEL_SPI_BYSPI_HSIZE;
86} 86}
87 87
88static struct xfrm6_tunnel_spi *__xfrm6_tunnel_spi_lookup(struct net *net, xfrm_address_t *saddr) 88static struct xfrm6_tunnel_spi *__xfrm6_tunnel_spi_lookup(struct net *net, const xfrm_address_t *saddr)
89{ 89{
90 struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net); 90 struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
91 struct xfrm6_tunnel_spi *x6spi; 91 struct xfrm6_tunnel_spi *x6spi;
@@ -101,7 +101,7 @@ static struct xfrm6_tunnel_spi *__xfrm6_tunnel_spi_lookup(struct net *net, xfrm_
101 return NULL; 101 return NULL;
102} 102}
103 103
104__be32 xfrm6_tunnel_spi_lookup(struct net *net, xfrm_address_t *saddr) 104__be32 xfrm6_tunnel_spi_lookup(struct net *net, const xfrm_address_t *saddr)
105{ 105{
106 struct xfrm6_tunnel_spi *x6spi; 106 struct xfrm6_tunnel_spi *x6spi;
107 u32 spi; 107 u32 spi;
@@ -237,11 +237,11 @@ static int xfrm6_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
237static int xfrm6_tunnel_rcv(struct sk_buff *skb) 237static int xfrm6_tunnel_rcv(struct sk_buff *skb)
238{ 238{
239 struct net *net = dev_net(skb->dev); 239 struct net *net = dev_net(skb->dev);
240 struct ipv6hdr *iph = ipv6_hdr(skb); 240 const struct ipv6hdr *iph = ipv6_hdr(skb);
241 __be32 spi; 241 __be32 spi;
242 242
243 spi = xfrm6_tunnel_spi_lookup(net, (xfrm_address_t *)&iph->saddr); 243 spi = xfrm6_tunnel_spi_lookup(net, (const xfrm_address_t *)&iph->saddr);
244 return xfrm6_rcv_spi(skb, IPPROTO_IPV6, spi) > 0 ? : 0; 244 return xfrm6_rcv_spi(skb, IPPROTO_IPV6, spi);
245} 245}
246 246
247static int xfrm6_tunnel_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 247static int xfrm6_tunnel_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
diff --git a/net/irda/ircomm/ircomm_core.c b/net/irda/ircomm/ircomm_core.c
index e97082017f4f..52079f19bbbe 100644
--- a/net/irda/ircomm/ircomm_core.c
+++ b/net/irda/ircomm/ircomm_core.c
@@ -244,14 +244,8 @@ EXPORT_SYMBOL(ircomm_connect_request);
244void ircomm_connect_indication(struct ircomm_cb *self, struct sk_buff *skb, 244void ircomm_connect_indication(struct ircomm_cb *self, struct sk_buff *skb,
245 struct ircomm_info *info) 245 struct ircomm_info *info)
246{ 246{
247 int clen = 0;
248
249 IRDA_DEBUG(2, "%s()\n", __func__ ); 247 IRDA_DEBUG(2, "%s()\n", __func__ );
250 248
251 /* Check if the packet contains data on the control channel */
252 if (skb->len > 0)
253 clen = skb->data[0];
254
255 /* 249 /*
256 * If there are any data hiding in the control channel, we must 250 * If there are any data hiding in the control channel, we must
257 * deliver it first. The side effect is that the control channel 251 * deliver it first. The side effect is that the control channel
diff --git a/net/irda/ircomm/ircomm_lmp.c b/net/irda/ircomm/ircomm_lmp.c
index 08fb54dc8c41..3b8095c771d4 100644
--- a/net/irda/ircomm/ircomm_lmp.c
+++ b/net/irda/ircomm/ircomm_lmp.c
@@ -75,7 +75,6 @@ static int ircomm_lmp_connect_response(struct ircomm_cb *self,
75 struct sk_buff *userdata) 75 struct sk_buff *userdata)
76{ 76{
77 struct sk_buff *tx_skb; 77 struct sk_buff *tx_skb;
78 int ret;
79 78
80 IRDA_DEBUG(0, "%s()\n", __func__ ); 79 IRDA_DEBUG(0, "%s()\n", __func__ );
81 80
@@ -100,9 +99,7 @@ static int ircomm_lmp_connect_response(struct ircomm_cb *self,
100 tx_skb = userdata; 99 tx_skb = userdata;
101 } 100 }
102 101
103 ret = irlmp_connect_response(self->lsap, tx_skb); 102 return irlmp_connect_response(self->lsap, tx_skb);
104
105 return 0;
106} 103}
107 104
108static int ircomm_lmp_disconnect_request(struct ircomm_cb *self, 105static int ircomm_lmp_disconnect_request(struct ircomm_cb *self,
diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
index a39cca8331df..b3cc8b3989a9 100644
--- a/net/irda/ircomm/ircomm_tty.c
+++ b/net/irda/ircomm/ircomm_tty.c
@@ -38,6 +38,7 @@
38#include <linux/seq_file.h> 38#include <linux/seq_file.h>
39#include <linux/termios.h> 39#include <linux/termios.h>
40#include <linux/tty.h> 40#include <linux/tty.h>
41#include <linux/tty_flip.h>
41#include <linux/interrupt.h> 42#include <linux/interrupt.h>
42#include <linux/device.h> /* for MODULE_ALIAS_CHARDEV_MAJOR */ 43#include <linux/device.h> /* for MODULE_ALIAS_CHARDEV_MAJOR */
43 44
@@ -1132,7 +1133,6 @@ static int ircomm_tty_data_indication(void *instance, void *sap,
1132 struct sk_buff *skb) 1133 struct sk_buff *skb)
1133{ 1134{
1134 struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance; 1135 struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance;
1135 struct tty_ldisc *ld;
1136 1136
1137 IRDA_DEBUG(2, "%s()\n", __func__ ); 1137 IRDA_DEBUG(2, "%s()\n", __func__ );
1138 1138
@@ -1161,15 +1161,11 @@ static int ircomm_tty_data_indication(void *instance, void *sap,
1161 } 1161 }
1162 1162
1163 /* 1163 /*
1164 * Just give it over to the line discipline. There is no need to 1164 * Use flip buffer functions since the code may be called from interrupt
1165 * involve the flip buffers, since we are not running in an interrupt 1165 * context
1166 * handler
1167 */ 1166 */
1168 1167 tty_insert_flip_string(self->tty, skb->data, skb->len);
1169 ld = tty_ldisc_ref(self->tty); 1168 tty_flip_buffer_push(self->tty);
1170 if (ld)
1171 ld->ops->receive_buf(self->tty, skb->data, NULL, skb->len);
1172 tty_ldisc_deref(ld);
1173 1169
1174 /* No need to kfree_skb - see ircomm_ttp_data_indication() */ 1170 /* No need to kfree_skb - see ircomm_ttp_data_indication() */
1175 1171
diff --git a/net/irda/iriap.c b/net/irda/iriap.c
index 36477538cea8..f876eed7d4aa 100644
--- a/net/irda/iriap.c
+++ b/net/irda/iriap.c
@@ -87,6 +87,8 @@ static inline void iriap_start_watchdog_timer(struct iriap_cb *self,
87 iriap_watchdog_timer_expired); 87 iriap_watchdog_timer_expired);
88} 88}
89 89
90static struct lock_class_key irias_objects_key;
91
90/* 92/*
91 * Function iriap_init (void) 93 * Function iriap_init (void)
92 * 94 *
@@ -114,6 +116,9 @@ int __init iriap_init(void)
114 return -ENOMEM; 116 return -ENOMEM;
115 } 117 }
116 118
119 lockdep_set_class_and_name(&irias_objects->hb_spinlock, &irias_objects_key,
120 "irias_objects");
121
117 /* 122 /*
118 * Register some default services for IrLMP 123 * Register some default services for IrLMP
119 */ 124 */
diff --git a/net/irda/irlan/irlan_filter.c b/net/irda/irlan/irlan_filter.c
index 9ff7823abec7..7977be7caf0f 100644
--- a/net/irda/irlan/irlan_filter.c
+++ b/net/irda/irlan/irlan_filter.c
@@ -143,12 +143,8 @@ void irlan_filter_request(struct irlan_cb *self, struct sk_buff *skb)
143 */ 143 */
144void irlan_check_command_param(struct irlan_cb *self, char *param, char *value) 144void irlan_check_command_param(struct irlan_cb *self, char *param, char *value)
145{ 145{
146 __u8 *bytes;
147
148 IRDA_DEBUG(4, "%s()\n", __func__ ); 146 IRDA_DEBUG(4, "%s()\n", __func__ );
149 147
150 bytes = value;
151
152 IRDA_ASSERT(self != NULL, return;); 148 IRDA_ASSERT(self != NULL, return;);
153 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 149 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
154 150
diff --git a/net/irda/irlan/irlan_provider.c b/net/irda/irlan/irlan_provider.c
index 5cf5e6c872bb..b8af74ab8b68 100644
--- a/net/irda/irlan/irlan_provider.c
+++ b/net/irda/irlan/irlan_provider.c
@@ -128,7 +128,6 @@ static void irlan_provider_connect_indication(void *instance, void *sap,
128{ 128{
129 struct irlan_cb *self; 129 struct irlan_cb *self;
130 struct tsap_cb *tsap; 130 struct tsap_cb *tsap;
131 __u32 saddr, daddr;
132 131
133 IRDA_DEBUG(0, "%s()\n", __func__ ); 132 IRDA_DEBUG(0, "%s()\n", __func__ );
134 133
@@ -141,8 +140,6 @@ static void irlan_provider_connect_indication(void *instance, void *sap,
141 IRDA_ASSERT(tsap == self->provider.tsap_ctrl,return;); 140 IRDA_ASSERT(tsap == self->provider.tsap_ctrl,return;);
142 IRDA_ASSERT(self->provider.state == IRLAN_IDLE, return;); 141 IRDA_ASSERT(self->provider.state == IRLAN_IDLE, return;);
143 142
144 daddr = irttp_get_daddr(tsap);
145 saddr = irttp_get_saddr(tsap);
146 self->provider.max_sdu_size = max_sdu_size; 143 self->provider.max_sdu_size = max_sdu_size;
147 self->provider.max_header_size = max_header_size; 144 self->provider.max_header_size = max_header_size;
148 145
diff --git a/net/irda/irlap_event.c b/net/irda/irlap_event.c
index bb47021c9a55..ccd214f9d196 100644
--- a/net/irda/irlap_event.c
+++ b/net/irda/irlap_event.c
@@ -2227,8 +2227,6 @@ static int irlap_state_nrm_s(struct irlap_cb *self, IRLAP_EVENT event,
2227static int irlap_state_sclose(struct irlap_cb *self, IRLAP_EVENT event, 2227static int irlap_state_sclose(struct irlap_cb *self, IRLAP_EVENT event,
2228 struct sk_buff *skb, struct irlap_info *info) 2228 struct sk_buff *skb, struct irlap_info *info)
2229{ 2229{
2230 int ret = 0;
2231
2232 IRDA_DEBUG(1, "%s()\n", __func__); 2230 IRDA_DEBUG(1, "%s()\n", __func__);
2233 2231
2234 IRDA_ASSERT(self != NULL, return -ENODEV;); 2232 IRDA_ASSERT(self != NULL, return -ENODEV;);
@@ -2289,7 +2287,6 @@ static int irlap_state_sclose(struct irlap_cb *self, IRLAP_EVENT event,
2289 IRDA_DEBUG(1, "%s(), Unknown event %d, (%s)\n", __func__, 2287 IRDA_DEBUG(1, "%s(), Unknown event %d, (%s)\n", __func__,
2290 event, irlap_event[event]); 2288 event, irlap_event[event]);
2291 2289
2292 ret = -EINVAL;
2293 break; 2290 break;
2294 } 2291 }
2295 2292
diff --git a/net/irda/irproc.c b/net/irda/irproc.c
index 318766e5dbdf..b9ac598e2116 100644
--- a/net/irda/irproc.c
+++ b/net/irda/irproc.c
@@ -65,15 +65,14 @@ static const struct irda_entry irda_dirs[] = {
65void __init irda_proc_register(void) 65void __init irda_proc_register(void)
66{ 66{
67 int i; 67 int i;
68 struct proc_dir_entry *d;
69 68
70 proc_irda = proc_mkdir("irda", init_net.proc_net); 69 proc_irda = proc_mkdir("irda", init_net.proc_net);
71 if (proc_irda == NULL) 70 if (proc_irda == NULL)
72 return; 71 return;
73 72
74 for (i = 0; i < ARRAY_SIZE(irda_dirs); i++) 73 for (i = 0; i < ARRAY_SIZE(irda_dirs); i++)
75 d = proc_create(irda_dirs[i].name, 0, proc_irda, 74 (void) proc_create(irda_dirs[i].name, 0, proc_irda,
76 irda_dirs[i].fops); 75 irda_dirs[i].fops);
77} 76}
78 77
79/* 78/*
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 986b2a5e8769..e2013e434d03 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -190,7 +190,6 @@ static int afiucv_pm_freeze(struct device *dev)
190 */ 190 */
191static int afiucv_pm_restore_thaw(struct device *dev) 191static int afiucv_pm_restore_thaw(struct device *dev)
192{ 192{
193 struct iucv_sock *iucv;
194 struct sock *sk; 193 struct sock *sk;
195 struct hlist_node *node; 194 struct hlist_node *node;
196 195
@@ -199,7 +198,6 @@ static int afiucv_pm_restore_thaw(struct device *dev)
199#endif 198#endif
200 read_lock(&iucv_sk_list.lock); 199 read_lock(&iucv_sk_list.lock);
201 sk_for_each(sk, node, &iucv_sk_list.head) { 200 sk_for_each(sk, node, &iucv_sk_list.head) {
202 iucv = iucv_sk(sk);
203 switch (sk->sk_state) { 201 switch (sk->sk_state) {
204 case IUCV_CONNECTED: 202 case IUCV_CONNECTED:
205 sk->sk_err = EPIPE; 203 sk->sk_err = EPIPE;
@@ -381,7 +379,6 @@ static void iucv_sock_close(struct sock *sk)
381{ 379{
382 unsigned char user_data[16]; 380 unsigned char user_data[16];
383 struct iucv_sock *iucv = iucv_sk(sk); 381 struct iucv_sock *iucv = iucv_sk(sk);
384 int err;
385 unsigned long timeo; 382 unsigned long timeo;
386 383
387 iucv_sock_clear_timer(sk); 384 iucv_sock_clear_timer(sk);
@@ -394,8 +391,6 @@ static void iucv_sock_close(struct sock *sk)
394 391
395 case IUCV_CONNECTED: 392 case IUCV_CONNECTED:
396 case IUCV_DISCONN: 393 case IUCV_DISCONN:
397 err = 0;
398
399 sk->sk_state = IUCV_CLOSING; 394 sk->sk_state = IUCV_CLOSING;
400 sk->sk_state_change(sk); 395 sk->sk_state_change(sk);
401 396
@@ -404,7 +399,7 @@ static void iucv_sock_close(struct sock *sk)
404 timeo = sk->sk_lingertime; 399 timeo = sk->sk_lingertime;
405 else 400 else
406 timeo = IUCV_DISCONN_TIMEOUT; 401 timeo = IUCV_DISCONN_TIMEOUT;
407 err = iucv_sock_wait(sk, 402 iucv_sock_wait(sk,
408 iucv_sock_in_state(sk, IUCV_CLOSED, 0), 403 iucv_sock_in_state(sk, IUCV_CLOSED, 0),
409 timeo); 404 timeo);
410 } 405 }
@@ -417,7 +412,7 @@ static void iucv_sock_close(struct sock *sk)
417 low_nmcpy(user_data, iucv->src_name); 412 low_nmcpy(user_data, iucv->src_name);
418 high_nmcpy(user_data, iucv->dst_name); 413 high_nmcpy(user_data, iucv->dst_name);
419 ASCEBC(user_data, sizeof(user_data)); 414 ASCEBC(user_data, sizeof(user_data));
420 err = iucv_path_sever(iucv->path, user_data); 415 iucv_path_sever(iucv->path, user_data);
421 iucv_path_free(iucv->path); 416 iucv_path_free(iucv->path);
422 iucv->path = NULL; 417 iucv->path = NULL;
423 } 418 }
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index 8f156bd86be7..7f9124914b13 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -54,7 +54,7 @@
54#include <asm/atomic.h> 54#include <asm/atomic.h>
55#include <asm/ebcdic.h> 55#include <asm/ebcdic.h>
56#include <asm/io.h> 56#include <asm/io.h>
57#include <asm/s390_ext.h> 57#include <asm/irq.h>
58#include <asm/smp.h> 58#include <asm/smp.h>
59 59
60/* 60/*
@@ -128,8 +128,8 @@ struct iucv_irq_list {
128}; 128};
129 129
130static struct iucv_irq_data *iucv_irq_data[NR_CPUS]; 130static struct iucv_irq_data *iucv_irq_data[NR_CPUS];
131static cpumask_t iucv_buffer_cpumask = CPU_MASK_NONE; 131static cpumask_t iucv_buffer_cpumask = { CPU_BITS_NONE };
132static cpumask_t iucv_irq_cpumask = CPU_MASK_NONE; 132static cpumask_t iucv_irq_cpumask = { CPU_BITS_NONE };
133 133
134/* 134/*
135 * Queue of interrupt buffers lock for delivery via the tasklet 135 * Queue of interrupt buffers lock for delivery via the tasklet
@@ -406,7 +406,7 @@ static void iucv_allow_cpu(void *data)
406 parm->set_mask.ipmask = 0xf8; 406 parm->set_mask.ipmask = 0xf8;
407 iucv_call_b2f0(IUCV_SETCONTROLMASK, parm); 407 iucv_call_b2f0(IUCV_SETCONTROLMASK, parm);
408 /* Set indication that iucv interrupts are allowed for this cpu. */ 408 /* Set indication that iucv interrupts are allowed for this cpu. */
409 cpu_set(cpu, iucv_irq_cpumask); 409 cpumask_set_cpu(cpu, &iucv_irq_cpumask);
410} 410}
411 411
412/** 412/**
@@ -426,7 +426,7 @@ static void iucv_block_cpu(void *data)
426 iucv_call_b2f0(IUCV_SETMASK, parm); 426 iucv_call_b2f0(IUCV_SETMASK, parm);
427 427
428 /* Clear indication that iucv interrupts are allowed for this cpu. */ 428 /* Clear indication that iucv interrupts are allowed for this cpu. */
429 cpu_clear(cpu, iucv_irq_cpumask); 429 cpumask_clear_cpu(cpu, &iucv_irq_cpumask);
430} 430}
431 431
432/** 432/**
@@ -451,7 +451,7 @@ static void iucv_block_cpu_almost(void *data)
451 iucv_call_b2f0(IUCV_SETCONTROLMASK, parm); 451 iucv_call_b2f0(IUCV_SETCONTROLMASK, parm);
452 452
453 /* Clear indication that iucv interrupts are allowed for this cpu. */ 453 /* Clear indication that iucv interrupts are allowed for this cpu. */
454 cpu_clear(cpu, iucv_irq_cpumask); 454 cpumask_clear_cpu(cpu, &iucv_irq_cpumask);
455} 455}
456 456
457/** 457/**
@@ -466,7 +466,7 @@ static void iucv_declare_cpu(void *data)
466 union iucv_param *parm; 466 union iucv_param *parm;
467 int rc; 467 int rc;
468 468
469 if (cpu_isset(cpu, iucv_buffer_cpumask)) 469 if (cpumask_test_cpu(cpu, &iucv_buffer_cpumask))
470 return; 470 return;
471 471
472 /* Declare interrupt buffer. */ 472 /* Declare interrupt buffer. */
@@ -499,9 +499,9 @@ static void iucv_declare_cpu(void *data)
499 } 499 }
500 500
501 /* Set indication that an iucv buffer exists for this cpu. */ 501 /* Set indication that an iucv buffer exists for this cpu. */
502 cpu_set(cpu, iucv_buffer_cpumask); 502 cpumask_set_cpu(cpu, &iucv_buffer_cpumask);
503 503
504 if (iucv_nonsmp_handler == 0 || cpus_empty(iucv_irq_cpumask)) 504 if (iucv_nonsmp_handler == 0 || cpumask_empty(&iucv_irq_cpumask))
505 /* Enable iucv interrupts on this cpu. */ 505 /* Enable iucv interrupts on this cpu. */
506 iucv_allow_cpu(NULL); 506 iucv_allow_cpu(NULL);
507 else 507 else
@@ -520,7 +520,7 @@ static void iucv_retrieve_cpu(void *data)
520 int cpu = smp_processor_id(); 520 int cpu = smp_processor_id();
521 union iucv_param *parm; 521 union iucv_param *parm;
522 522
523 if (!cpu_isset(cpu, iucv_buffer_cpumask)) 523 if (!cpumask_test_cpu(cpu, &iucv_buffer_cpumask))
524 return; 524 return;
525 525
526 /* Block iucv interrupts. */ 526 /* Block iucv interrupts. */
@@ -531,7 +531,7 @@ static void iucv_retrieve_cpu(void *data)
531 iucv_call_b2f0(IUCV_RETRIEVE_BUFFER, parm); 531 iucv_call_b2f0(IUCV_RETRIEVE_BUFFER, parm);
532 532
533 /* Clear indication that an iucv buffer exists for this cpu. */ 533 /* Clear indication that an iucv buffer exists for this cpu. */
534 cpu_clear(cpu, iucv_buffer_cpumask); 534 cpumask_clear_cpu(cpu, &iucv_buffer_cpumask);
535} 535}
536 536
537/** 537/**
@@ -546,8 +546,8 @@ static void iucv_setmask_mp(void)
546 get_online_cpus(); 546 get_online_cpus();
547 for_each_online_cpu(cpu) 547 for_each_online_cpu(cpu)
548 /* Enable all cpus with a declared buffer. */ 548 /* Enable all cpus with a declared buffer. */
549 if (cpu_isset(cpu, iucv_buffer_cpumask) && 549 if (cpumask_test_cpu(cpu, &iucv_buffer_cpumask) &&
550 !cpu_isset(cpu, iucv_irq_cpumask)) 550 !cpumask_test_cpu(cpu, &iucv_irq_cpumask))
551 smp_call_function_single(cpu, iucv_allow_cpu, 551 smp_call_function_single(cpu, iucv_allow_cpu,
552 NULL, 1); 552 NULL, 1);
553 put_online_cpus(); 553 put_online_cpus();
@@ -564,9 +564,9 @@ static void iucv_setmask_up(void)
564 int cpu; 564 int cpu;
565 565
566 /* Disable all cpu but the first in cpu_irq_cpumask. */ 566 /* Disable all cpu but the first in cpu_irq_cpumask. */
567 cpumask = iucv_irq_cpumask; 567 cpumask_copy(&cpumask, &iucv_irq_cpumask);
568 cpu_clear(first_cpu(iucv_irq_cpumask), cpumask); 568 cpumask_clear_cpu(cpumask_first(&iucv_irq_cpumask), &cpumask);
569 for_each_cpu_mask_nr(cpu, cpumask) 569 for_each_cpu(cpu, &cpumask)
570 smp_call_function_single(cpu, iucv_block_cpu, NULL, 1); 570 smp_call_function_single(cpu, iucv_block_cpu, NULL, 1);
571} 571}
572 572
@@ -593,7 +593,7 @@ static int iucv_enable(void)
593 rc = -EIO; 593 rc = -EIO;
594 for_each_online_cpu(cpu) 594 for_each_online_cpu(cpu)
595 smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1); 595 smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1);
596 if (cpus_empty(iucv_buffer_cpumask)) 596 if (cpumask_empty(&iucv_buffer_cpumask))
597 /* No cpu could declare an iucv buffer. */ 597 /* No cpu could declare an iucv buffer. */
598 goto out; 598 goto out;
599 put_online_cpus(); 599 put_online_cpus();
@@ -675,15 +675,16 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
675 case CPU_DOWN_PREPARE_FROZEN: 675 case CPU_DOWN_PREPARE_FROZEN:
676 if (!iucv_path_table) 676 if (!iucv_path_table)
677 break; 677 break;
678 cpumask = iucv_buffer_cpumask; 678 cpumask_copy(&cpumask, &iucv_buffer_cpumask);
679 cpu_clear(cpu, cpumask); 679 cpumask_clear_cpu(cpu, &cpumask);
680 if (cpus_empty(cpumask)) 680 if (cpumask_empty(&cpumask))
681 /* Can't offline last IUCV enabled cpu. */ 681 /* Can't offline last IUCV enabled cpu. */
682 return notifier_from_errno(-EINVAL); 682 return notifier_from_errno(-EINVAL);
683 smp_call_function_single(cpu, iucv_retrieve_cpu, NULL, 1); 683 smp_call_function_single(cpu, iucv_retrieve_cpu, NULL, 1);
684 if (cpus_empty(iucv_irq_cpumask)) 684 if (cpumask_empty(&iucv_irq_cpumask))
685 smp_call_function_single(first_cpu(iucv_buffer_cpumask), 685 smp_call_function_single(
686 iucv_allow_cpu, NULL, 1); 686 cpumask_first(&iucv_buffer_cpumask),
687 iucv_allow_cpu, NULL, 1);
687 break; 688 break;
688 } 689 }
689 return NOTIFY_OK; 690 return NOTIFY_OK;
@@ -828,14 +829,14 @@ EXPORT_SYMBOL(iucv_unregister);
828static int iucv_reboot_event(struct notifier_block *this, 829static int iucv_reboot_event(struct notifier_block *this,
829 unsigned long event, void *ptr) 830 unsigned long event, void *ptr)
830{ 831{
831 int i, rc; 832 int i;
832 833
833 get_online_cpus(); 834 get_online_cpus();
834 on_each_cpu(iucv_block_cpu, NULL, 1); 835 on_each_cpu(iucv_block_cpu, NULL, 1);
835 preempt_disable(); 836 preempt_disable();
836 for (i = 0; i < iucv_max_pathid; i++) { 837 for (i = 0; i < iucv_max_pathid; i++) {
837 if (iucv_path_table[i]) 838 if (iucv_path_table[i])
838 rc = iucv_sever_pathid(i, NULL); 839 iucv_sever_pathid(i, NULL);
839 } 840 }
840 preempt_enable(); 841 preempt_enable();
841 put_online_cpus(); 842 put_online_cpus();
@@ -866,7 +867,7 @@ int iucv_path_accept(struct iucv_path *path, struct iucv_handler *handler,
866 int rc; 867 int rc;
867 868
868 local_bh_disable(); 869 local_bh_disable();
869 if (cpus_empty(iucv_buffer_cpumask)) { 870 if (cpumask_empty(&iucv_buffer_cpumask)) {
870 rc = -EIO; 871 rc = -EIO;
871 goto out; 872 goto out;
872 } 873 }
@@ -915,7 +916,7 @@ int iucv_path_connect(struct iucv_path *path, struct iucv_handler *handler,
915 916
916 spin_lock_bh(&iucv_table_lock); 917 spin_lock_bh(&iucv_table_lock);
917 iucv_cleanup_queue(); 918 iucv_cleanup_queue();
918 if (cpus_empty(iucv_buffer_cpumask)) { 919 if (cpumask_empty(&iucv_buffer_cpumask)) {
919 rc = -EIO; 920 rc = -EIO;
920 goto out; 921 goto out;
921 } 922 }
@@ -975,7 +976,7 @@ int iucv_path_quiesce(struct iucv_path *path, u8 userdata[16])
975 int rc; 976 int rc;
976 977
977 local_bh_disable(); 978 local_bh_disable();
978 if (cpus_empty(iucv_buffer_cpumask)) { 979 if (cpumask_empty(&iucv_buffer_cpumask)) {
979 rc = -EIO; 980 rc = -EIO;
980 goto out; 981 goto out;
981 } 982 }
@@ -1007,7 +1008,7 @@ int iucv_path_resume(struct iucv_path *path, u8 userdata[16])
1007 int rc; 1008 int rc;
1008 1009
1009 local_bh_disable(); 1010 local_bh_disable();
1010 if (cpus_empty(iucv_buffer_cpumask)) { 1011 if (cpumask_empty(&iucv_buffer_cpumask)) {
1011 rc = -EIO; 1012 rc = -EIO;
1012 goto out; 1013 goto out;
1013 } 1014 }
@@ -1036,7 +1037,7 @@ int iucv_path_sever(struct iucv_path *path, u8 userdata[16])
1036 int rc; 1037 int rc;
1037 1038
1038 preempt_disable(); 1039 preempt_disable();
1039 if (cpus_empty(iucv_buffer_cpumask)) { 1040 if (cpumask_empty(&iucv_buffer_cpumask)) {
1040 rc = -EIO; 1041 rc = -EIO;
1041 goto out; 1042 goto out;
1042 } 1043 }
@@ -1070,7 +1071,7 @@ int iucv_message_purge(struct iucv_path *path, struct iucv_message *msg,
1070 int rc; 1071 int rc;
1071 1072
1072 local_bh_disable(); 1073 local_bh_disable();
1073 if (cpus_empty(iucv_buffer_cpumask)) { 1074 if (cpumask_empty(&iucv_buffer_cpumask)) {
1074 rc = -EIO; 1075 rc = -EIO;
1075 goto out; 1076 goto out;
1076 } 1077 }
@@ -1162,7 +1163,7 @@ int __iucv_message_receive(struct iucv_path *path, struct iucv_message *msg,
1162 if (msg->flags & IUCV_IPRMDATA) 1163 if (msg->flags & IUCV_IPRMDATA)
1163 return iucv_message_receive_iprmdata(path, msg, flags, 1164 return iucv_message_receive_iprmdata(path, msg, flags,
1164 buffer, size, residual); 1165 buffer, size, residual);
1165 if (cpus_empty(iucv_buffer_cpumask)) { 1166 if (cpumask_empty(&iucv_buffer_cpumask)) {
1166 rc = -EIO; 1167 rc = -EIO;
1167 goto out; 1168 goto out;
1168 } 1169 }
@@ -1235,7 +1236,7 @@ int iucv_message_reject(struct iucv_path *path, struct iucv_message *msg)
1235 int rc; 1236 int rc;
1236 1237
1237 local_bh_disable(); 1238 local_bh_disable();
1238 if (cpus_empty(iucv_buffer_cpumask)) { 1239 if (cpumask_empty(&iucv_buffer_cpumask)) {
1239 rc = -EIO; 1240 rc = -EIO;
1240 goto out; 1241 goto out;
1241 } 1242 }
@@ -1274,7 +1275,7 @@ int iucv_message_reply(struct iucv_path *path, struct iucv_message *msg,
1274 int rc; 1275 int rc;
1275 1276
1276 local_bh_disable(); 1277 local_bh_disable();
1277 if (cpus_empty(iucv_buffer_cpumask)) { 1278 if (cpumask_empty(&iucv_buffer_cpumask)) {
1278 rc = -EIO; 1279 rc = -EIO;
1279 goto out; 1280 goto out;
1280 } 1281 }
@@ -1324,7 +1325,7 @@ int __iucv_message_send(struct iucv_path *path, struct iucv_message *msg,
1324 union iucv_param *parm; 1325 union iucv_param *parm;
1325 int rc; 1326 int rc;
1326 1327
1327 if (cpus_empty(iucv_buffer_cpumask)) { 1328 if (cpumask_empty(&iucv_buffer_cpumask)) {
1328 rc = -EIO; 1329 rc = -EIO;
1329 goto out; 1330 goto out;
1330 } 1331 }
@@ -1411,7 +1412,7 @@ int iucv_message_send2way(struct iucv_path *path, struct iucv_message *msg,
1411 int rc; 1412 int rc;
1412 1413
1413 local_bh_disable(); 1414 local_bh_disable();
1414 if (cpus_empty(iucv_buffer_cpumask)) { 1415 if (cpumask_empty(&iucv_buffer_cpumask)) {
1415 rc = -EIO; 1416 rc = -EIO;
1416 goto out; 1417 goto out;
1417 } 1418 }
@@ -1888,7 +1889,7 @@ static int iucv_pm_freeze(struct device *dev)
1888 printk(KERN_WARNING "iucv_pm_freeze\n"); 1889 printk(KERN_WARNING "iucv_pm_freeze\n");
1889#endif 1890#endif
1890 if (iucv_pm_state != IUCV_PM_FREEZING) { 1891 if (iucv_pm_state != IUCV_PM_FREEZING) {
1891 for_each_cpu_mask_nr(cpu, iucv_irq_cpumask) 1892 for_each_cpu(cpu, &iucv_irq_cpumask)
1892 smp_call_function_single(cpu, iucv_block_cpu_almost, 1893 smp_call_function_single(cpu, iucv_block_cpu_almost,
1893 NULL, 1); 1894 NULL, 1);
1894 cancel_work_sync(&iucv_work); 1895 cancel_work_sync(&iucv_work);
@@ -1928,7 +1929,7 @@ static int iucv_pm_thaw(struct device *dev)
1928 if (rc) 1929 if (rc)
1929 goto out; 1930 goto out;
1930 } 1931 }
1931 if (cpus_empty(iucv_irq_cpumask)) { 1932 if (cpumask_empty(&iucv_irq_cpumask)) {
1932 if (iucv_nonsmp_handler) 1933 if (iucv_nonsmp_handler)
1933 /* enable interrupts on one cpu */ 1934 /* enable interrupts on one cpu */
1934 iucv_allow_cpu(NULL); 1935 iucv_allow_cpu(NULL);
@@ -1961,7 +1962,7 @@ static int iucv_pm_restore(struct device *dev)
1961 pr_warning("Suspending Linux did not completely close all IUCV " 1962 pr_warning("Suspending Linux did not completely close all IUCV "
1962 "connections\n"); 1963 "connections\n");
1963 iucv_pm_state = IUCV_PM_RESTORING; 1964 iucv_pm_state = IUCV_PM_RESTORING;
1964 if (cpus_empty(iucv_irq_cpumask)) { 1965 if (cpumask_empty(&iucv_irq_cpumask)) {
1965 rc = iucv_query_maxconn(); 1966 rc = iucv_query_maxconn();
1966 rc = iucv_enable(); 1967 rc = iucv_enable();
1967 if (rc) 1968 if (rc)
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 7db86ffcf070..8f92cf8116ea 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -712,7 +712,7 @@ static unsigned int pfkey_sockaddr_fill(const xfrm_address_t *xaddr, __be16 port
712 sin6->sin6_family = AF_INET6; 712 sin6->sin6_family = AF_INET6;
713 sin6->sin6_port = port; 713 sin6->sin6_port = port;
714 sin6->sin6_flowinfo = 0; 714 sin6->sin6_flowinfo = 0;
715 ipv6_addr_copy(&sin6->sin6_addr, (struct in6_addr *)xaddr->a6); 715 ipv6_addr_copy(&sin6->sin6_addr, (const struct in6_addr *)xaddr->a6);
716 sin6->sin6_scope_id = 0; 716 sin6->sin6_scope_id = 0;
717 return 128; 717 return 128;
718 } 718 }
@@ -3656,7 +3656,7 @@ static int pfkey_seq_show(struct seq_file *f, void *v)
3656 if (v == SEQ_START_TOKEN) 3656 if (v == SEQ_START_TOKEN)
3657 seq_printf(f ,"sk RefCnt Rmem Wmem User Inode\n"); 3657 seq_printf(f ,"sk RefCnt Rmem Wmem User Inode\n");
3658 else 3658 else
3659 seq_printf(f ,"%p %-6d %-6u %-6u %-6u %-6lu\n", 3659 seq_printf(f, "%pK %-6d %-6u %-6u %-6u %-6lu\n",
3660 s, 3660 s,
3661 atomic_read(&s->sk_refcnt), 3661 atomic_read(&s->sk_refcnt),
3662 sk_rmem_alloc_get(s), 3662 sk_rmem_alloc_get(s),
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index c64ce0a0bb03..ed8a2335442f 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -954,7 +954,7 @@ static int l2tp_build_l2tpv3_header(struct l2tp_session *session, void *buf)
954} 954}
955 955
956static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, 956static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb,
957 size_t data_len) 957 struct flowi *fl, size_t data_len)
958{ 958{
959 struct l2tp_tunnel *tunnel = session->tunnel; 959 struct l2tp_tunnel *tunnel = session->tunnel;
960 unsigned int len = skb->len; 960 unsigned int len = skb->len;
@@ -987,7 +987,7 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb,
987 987
988 /* Queue the packet to IP for output */ 988 /* Queue the packet to IP for output */
989 skb->local_df = 1; 989 skb->local_df = 1;
990 error = ip_queue_xmit(skb); 990 error = ip_queue_xmit(skb, fl);
991 991
992 /* Update stats */ 992 /* Update stats */
993 if (error >= 0) { 993 if (error >= 0) {
@@ -1028,6 +1028,7 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
1028 int data_len = skb->len; 1028 int data_len = skb->len;
1029 struct l2tp_tunnel *tunnel = session->tunnel; 1029 struct l2tp_tunnel *tunnel = session->tunnel;
1030 struct sock *sk = tunnel->sock; 1030 struct sock *sk = tunnel->sock;
1031 struct flowi *fl;
1031 struct udphdr *uh; 1032 struct udphdr *uh;
1032 struct inet_sock *inet; 1033 struct inet_sock *inet;
1033 __wsum csum; 1034 __wsum csum;
@@ -1060,14 +1061,21 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
1060 IPSKB_REROUTED); 1061 IPSKB_REROUTED);
1061 nf_reset(skb); 1062 nf_reset(skb);
1062 1063
1064 bh_lock_sock(sk);
1065 if (sock_owned_by_user(sk)) {
1066 dev_kfree_skb(skb);
1067 goto out_unlock;
1068 }
1069
1063 /* Get routing info from the tunnel socket */ 1070 /* Get routing info from the tunnel socket */
1064 skb_dst_drop(skb); 1071 skb_dst_drop(skb);
1065 skb_dst_set(skb, dst_clone(__sk_dst_get(sk))); 1072 skb_dst_set(skb, dst_clone(__sk_dst_get(sk)));
1066 1073
1074 inet = inet_sk(sk);
1075 fl = &inet->cork.fl;
1067 switch (tunnel->encap) { 1076 switch (tunnel->encap) {
1068 case L2TP_ENCAPTYPE_UDP: 1077 case L2TP_ENCAPTYPE_UDP:
1069 /* Setup UDP header */ 1078 /* Setup UDP header */
1070 inet = inet_sk(sk);
1071 __skb_push(skb, sizeof(*uh)); 1079 __skb_push(skb, sizeof(*uh));
1072 skb_reset_transport_header(skb); 1080 skb_reset_transport_header(skb);
1073 uh = udp_hdr(skb); 1081 uh = udp_hdr(skb);
@@ -1105,7 +1113,9 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
1105 1113
1106 l2tp_skb_set_owner_w(skb, sk); 1114 l2tp_skb_set_owner_w(skb, sk);
1107 1115
1108 l2tp_xmit_core(session, skb, data_len); 1116 l2tp_xmit_core(session, skb, fl, data_len);
1117out_unlock:
1118 bh_unlock_sock(sk);
1109 1119
1110abort: 1120abort:
1111 return 0; 1121 return 0;
@@ -1425,16 +1435,15 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
1425 1435
1426 /* Add tunnel to our list */ 1436 /* Add tunnel to our list */
1427 INIT_LIST_HEAD(&tunnel->list); 1437 INIT_LIST_HEAD(&tunnel->list);
1428 spin_lock_bh(&pn->l2tp_tunnel_list_lock);
1429 list_add_rcu(&tunnel->list, &pn->l2tp_tunnel_list);
1430 spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
1431 synchronize_rcu();
1432 atomic_inc(&l2tp_tunnel_count); 1438 atomic_inc(&l2tp_tunnel_count);
1433 1439
1434 /* Bump the reference count. The tunnel context is deleted 1440 /* Bump the reference count. The tunnel context is deleted
1435 * only when this drops to zero. 1441 * only when this drops to zero. Must be done before list insertion
1436 */ 1442 */
1437 l2tp_tunnel_inc_refcount(tunnel); 1443 l2tp_tunnel_inc_refcount(tunnel);
1444 spin_lock_bh(&pn->l2tp_tunnel_list_lock);
1445 list_add_rcu(&tunnel->list, &pn->l2tp_tunnel_list);
1446 spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
1438 1447
1439 err = 0; 1448 err = 0;
1440err: 1449err:
@@ -1626,7 +1635,6 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn
1626 hlist_add_head_rcu(&session->global_hlist, 1635 hlist_add_head_rcu(&session->global_hlist,
1627 l2tp_session_id_hash_2(pn, session_id)); 1636 l2tp_session_id_hash_2(pn, session_id));
1628 spin_unlock_bh(&pn->l2tp_session_hlist_lock); 1637 spin_unlock_bh(&pn->l2tp_session_hlist_lock);
1629 synchronize_rcu();
1630 } 1638 }
1631 1639
1632 /* Ignore management session in session count value */ 1640 /* Ignore management session in session count value */
diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c
index b8dbae82fab8..76130134bfa6 100644
--- a/net/l2tp/l2tp_debugfs.c
+++ b/net/l2tp/l2tp_debugfs.c
@@ -258,7 +258,7 @@ static int l2tp_dfs_seq_open(struct inode *inode, struct file *file)
258 */ 258 */
259 pd->net = get_net_ns_by_pid(current->pid); 259 pd->net = get_net_ns_by_pid(current->pid);
260 if (IS_ERR(pd->net)) { 260 if (IS_ERR(pd->net)) {
261 rc = -PTR_ERR(pd->net); 261 rc = PTR_ERR(pd->net);
262 goto err_free_pd; 262 goto err_free_pd;
263 } 263 }
264 264
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index 5c04f3e42704..b6466e71f5e1 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -296,12 +296,12 @@ out_in_use:
296 296
297static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) 297static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
298{ 298{
299 int rc;
300 struct inet_sock *inet = inet_sk(sk);
301 struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *) uaddr; 299 struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *) uaddr;
300 struct inet_sock *inet = inet_sk(sk);
301 struct flowi4 *fl4;
302 struct rtable *rt; 302 struct rtable *rt;
303 __be32 saddr; 303 __be32 saddr;
304 int oif; 304 int oif, rc;
305 305
306 rc = -EINVAL; 306 rc = -EINVAL;
307 if (addr_len < sizeof(*lsa)) 307 if (addr_len < sizeof(*lsa))
@@ -311,6 +311,8 @@ static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len
311 if (lsa->l2tp_family != AF_INET) 311 if (lsa->l2tp_family != AF_INET)
312 goto out; 312 goto out;
313 313
314 lock_sock(sk);
315
314 sk_dst_reset(sk); 316 sk_dst_reset(sk);
315 317
316 oif = sk->sk_bound_dev_if; 318 oif = sk->sk_bound_dev_if;
@@ -320,7 +322,8 @@ static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len
320 if (ipv4_is_multicast(lsa->l2tp_addr.s_addr)) 322 if (ipv4_is_multicast(lsa->l2tp_addr.s_addr))
321 goto out; 323 goto out;
322 324
323 rt = ip_route_connect(lsa->l2tp_addr.s_addr, saddr, 325 fl4 = &inet->cork.fl.u.ip4;
326 rt = ip_route_connect(fl4, lsa->l2tp_addr.s_addr, saddr,
324 RT_CONN_FLAGS(sk), oif, 327 RT_CONN_FLAGS(sk), oif,
325 IPPROTO_L2TP, 328 IPPROTO_L2TP,
326 0, 0, sk, true); 329 0, 0, sk, true);
@@ -340,10 +343,10 @@ static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len
340 l2tp_ip_sk(sk)->peer_conn_id = lsa->l2tp_conn_id; 343 l2tp_ip_sk(sk)->peer_conn_id = lsa->l2tp_conn_id;
341 344
342 if (!inet->inet_saddr) 345 if (!inet->inet_saddr)
343 inet->inet_saddr = rt->rt_src; 346 inet->inet_saddr = fl4->saddr;
344 if (!inet->inet_rcv_saddr) 347 if (!inet->inet_rcv_saddr)
345 inet->inet_rcv_saddr = rt->rt_src; 348 inet->inet_rcv_saddr = fl4->saddr;
346 inet->inet_daddr = rt->rt_dst; 349 inet->inet_daddr = fl4->daddr;
347 sk->sk_state = TCP_ESTABLISHED; 350 sk->sk_state = TCP_ESTABLISHED;
348 inet->inet_id = jiffies; 351 inet->inet_id = jiffies;
349 352
@@ -356,6 +359,7 @@ static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len
356 359
357 rc = 0; 360 rc = 0;
358out: 361out:
362 release_sock(sk);
359 return rc; 363 return rc;
360} 364}
361 365
@@ -416,23 +420,28 @@ static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
416 int rc; 420 int rc;
417 struct l2tp_ip_sock *lsa = l2tp_ip_sk(sk); 421 struct l2tp_ip_sock *lsa = l2tp_ip_sk(sk);
418 struct inet_sock *inet = inet_sk(sk); 422 struct inet_sock *inet = inet_sk(sk);
419 struct ip_options *opt = inet->opt;
420 struct rtable *rt = NULL; 423 struct rtable *rt = NULL;
424 struct flowi4 *fl4;
421 int connected = 0; 425 int connected = 0;
422 __be32 daddr; 426 __be32 daddr;
423 427
428 lock_sock(sk);
429
430 rc = -ENOTCONN;
424 if (sock_flag(sk, SOCK_DEAD)) 431 if (sock_flag(sk, SOCK_DEAD))
425 return -ENOTCONN; 432 goto out;
426 433
427 /* Get and verify the address. */ 434 /* Get and verify the address. */
428 if (msg->msg_name) { 435 if (msg->msg_name) {
429 struct sockaddr_l2tpip *lip = (struct sockaddr_l2tpip *) msg->msg_name; 436 struct sockaddr_l2tpip *lip = (struct sockaddr_l2tpip *) msg->msg_name;
437 rc = -EINVAL;
430 if (msg->msg_namelen < sizeof(*lip)) 438 if (msg->msg_namelen < sizeof(*lip))
431 return -EINVAL; 439 goto out;
432 440
433 if (lip->l2tp_family != AF_INET) { 441 if (lip->l2tp_family != AF_INET) {
442 rc = -EAFNOSUPPORT;
434 if (lip->l2tp_family != AF_UNSPEC) 443 if (lip->l2tp_family != AF_UNSPEC)
435 return -EAFNOSUPPORT; 444 goto out;
436 } 445 }
437 446
438 daddr = lip->l2tp_addr.s_addr; 447 daddr = lip->l2tp_addr.s_addr;
@@ -467,19 +476,27 @@ static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
467 goto error; 476 goto error;
468 } 477 }
469 478
479 fl4 = &inet->cork.fl.u.ip4;
470 if (connected) 480 if (connected)
471 rt = (struct rtable *) __sk_dst_check(sk, 0); 481 rt = (struct rtable *) __sk_dst_check(sk, 0);
472 482
473 if (rt == NULL) { 483 if (rt == NULL) {
484 struct ip_options_rcu *inet_opt;
485
486 rcu_read_lock();
487 inet_opt = rcu_dereference(inet->inet_opt);
488
474 /* Use correct destination address if we have options. */ 489 /* Use correct destination address if we have options. */
475 if (opt && opt->srr) 490 if (inet_opt && inet_opt->opt.srr)
476 daddr = opt->faddr; 491 daddr = inet_opt->opt.faddr;
492
493 rcu_read_unlock();
477 494
478 /* If this fails, retransmit mechanism of transport layer will 495 /* If this fails, retransmit mechanism of transport layer will
479 * keep trying until route appears or the connection times 496 * keep trying until route appears or the connection times
480 * itself out. 497 * itself out.
481 */ 498 */
482 rt = ip_route_output_ports(sock_net(sk), sk, 499 rt = ip_route_output_ports(sock_net(sk), fl4, sk,
483 daddr, inet->inet_saddr, 500 daddr, inet->inet_saddr,
484 inet->inet_dport, inet->inet_sport, 501 inet->inet_dport, inet->inet_sport,
485 sk->sk_protocol, RT_CONN_FLAGS(sk), 502 sk->sk_protocol, RT_CONN_FLAGS(sk),
@@ -491,7 +508,7 @@ static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
491 skb_dst_set(skb, dst_clone(&rt->dst)); 508 skb_dst_set(skb, dst_clone(&rt->dst));
492 509
493 /* Queue the packet to IP for output */ 510 /* Queue the packet to IP for output */
494 rc = ip_queue_xmit(skb); 511 rc = ip_queue_xmit(skb, &inet->cork.fl);
495 512
496error: 513error:
497 /* Update stats */ 514 /* Update stats */
@@ -503,12 +520,15 @@ error:
503 lsa->tx_errors++; 520 lsa->tx_errors++;
504 } 521 }
505 522
523out:
524 release_sock(sk);
506 return rc; 525 return rc;
507 526
508no_route: 527no_route:
509 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES); 528 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
510 kfree_skb(skb); 529 kfree_skb(skb);
511 return -EHOSTUNREACH; 530 rc = -EHOSTUNREACH;
531 goto out;
512} 532}
513 533
514static int l2tp_ip_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, 534static int l2tp_ip_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
index 4c1e540732d7..93a41a09458b 100644
--- a/net/l2tp/l2tp_netlink.c
+++ b/net/l2tp/l2tp_netlink.c
@@ -795,11 +795,12 @@ int l2tp_nl_register_ops(enum l2tp_pwtype pw_type, const struct l2tp_nl_cmd_ops
795 goto out; 795 goto out;
796 796
797 l2tp_nl_cmd_ops[pw_type] = ops; 797 l2tp_nl_cmd_ops[pw_type] = ops;
798 ret = 0;
798 799
799out: 800out:
800 genl_unlock(); 801 genl_unlock();
801err: 802err:
802 return 0; 803 return ret;
803} 804}
804EXPORT_SYMBOL_GPL(l2tp_nl_register_ops); 805EXPORT_SYMBOL_GPL(l2tp_nl_register_ops);
805 806
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index 513f85cc2ae1..f5fdfcbf552a 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -2,7 +2,6 @@ config MAC80211
2 tristate "Generic IEEE 802.11 Networking Stack (mac80211)" 2 tristate "Generic IEEE 802.11 Networking Stack (mac80211)"
3 depends on CFG80211 3 depends on CFG80211
4 select CRYPTO 4 select CRYPTO
5 select CRYPTO_ECB
6 select CRYPTO_ARC4 5 select CRYPTO_ARC4
7 select CRYPTO_AES 6 select CRYPTO_AES
8 select CRC32 7 select CRC32
diff --git a/net/mac80211/aes_ccm.c b/net/mac80211/aes_ccm.c
index 4bd6ef0be380..b9b595c08112 100644
--- a/net/mac80211/aes_ccm.c
+++ b/net/mac80211/aes_ccm.c
@@ -54,13 +54,12 @@ void ieee80211_aes_ccm_encrypt(struct crypto_cipher *tfm, u8 *scratch,
54 u8 *cdata, u8 *mic) 54 u8 *cdata, u8 *mic)
55{ 55{
56 int i, j, last_len, num_blocks; 56 int i, j, last_len, num_blocks;
57 u8 *pos, *cpos, *b, *s_0, *e, *b_0, *aad; 57 u8 *pos, *cpos, *b, *s_0, *e, *b_0;
58 58
59 b = scratch; 59 b = scratch;
60 s_0 = scratch + AES_BLOCK_LEN; 60 s_0 = scratch + AES_BLOCK_LEN;
61 e = scratch + 2 * AES_BLOCK_LEN; 61 e = scratch + 2 * AES_BLOCK_LEN;
62 b_0 = scratch + 3 * AES_BLOCK_LEN; 62 b_0 = scratch + 3 * AES_BLOCK_LEN;
63 aad = scratch + 4 * AES_BLOCK_LEN;
64 63
65 num_blocks = DIV_ROUND_UP(data_len, AES_BLOCK_LEN); 64 num_blocks = DIV_ROUND_UP(data_len, AES_BLOCK_LEN);
66 last_len = data_len % AES_BLOCK_LEN; 65 last_len = data_len % AES_BLOCK_LEN;
@@ -94,13 +93,12 @@ int ieee80211_aes_ccm_decrypt(struct crypto_cipher *tfm, u8 *scratch,
94 u8 *cdata, size_t data_len, u8 *mic, u8 *data) 93 u8 *cdata, size_t data_len, u8 *mic, u8 *data)
95{ 94{
96 int i, j, last_len, num_blocks; 95 int i, j, last_len, num_blocks;
97 u8 *pos, *cpos, *b, *s_0, *a, *b_0, *aad; 96 u8 *pos, *cpos, *b, *s_0, *a, *b_0;
98 97
99 b = scratch; 98 b = scratch;
100 s_0 = scratch + AES_BLOCK_LEN; 99 s_0 = scratch + AES_BLOCK_LEN;
101 a = scratch + 2 * AES_BLOCK_LEN; 100 a = scratch + 2 * AES_BLOCK_LEN;
102 b_0 = scratch + 3 * AES_BLOCK_LEN; 101 b_0 = scratch + 3 * AES_BLOCK_LEN;
103 aad = scratch + 4 * AES_BLOCK_LEN;
104 102
105 num_blocks = DIV_ROUND_UP(data_len, AES_BLOCK_LEN); 103 num_blocks = DIV_ROUND_UP(data_len, AES_BLOCK_LEN);
106 last_len = data_len % AES_BLOCK_LEN; 104 last_len = data_len % AES_BLOCK_LEN;
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index 0c9d0c07eae6..9c0d76cdca92 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -63,7 +63,8 @@ void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
63 63
64 lockdep_assert_held(&sta->ampdu_mlme.mtx); 64 lockdep_assert_held(&sta->ampdu_mlme.mtx);
65 65
66 tid_rx = sta->ampdu_mlme.tid_rx[tid]; 66 tid_rx = rcu_dereference_protected(sta->ampdu_mlme.tid_rx[tid],
67 lockdep_is_held(&sta->ampdu_mlme.mtx));
67 68
68 if (!tid_rx) 69 if (!tid_rx)
69 return; 70 return;
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index 63d852cb4ca2..c8be8eff70da 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -136,12 +136,12 @@ void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u1
136 ieee80211_tx_skb(sdata, skb); 136 ieee80211_tx_skb(sdata, skb);
137} 137}
138 138
139static void kfree_tid_tx(struct rcu_head *rcu_head) 139void ieee80211_assign_tid_tx(struct sta_info *sta, int tid,
140 struct tid_ampdu_tx *tid_tx)
140{ 141{
141 struct tid_ampdu_tx *tid_tx = 142 lockdep_assert_held(&sta->ampdu_mlme.mtx);
142 container_of(rcu_head, struct tid_ampdu_tx, rcu_head); 143 lockdep_assert_held(&sta->lock);
143 144 rcu_assign_pointer(sta->ampdu_mlme.tid_tx[tid], tid_tx);
144 kfree(tid_tx);
145} 145}
146 146
147int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, 147int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
@@ -149,21 +149,24 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
149 bool tx) 149 bool tx)
150{ 150{
151 struct ieee80211_local *local = sta->local; 151 struct ieee80211_local *local = sta->local;
152 struct tid_ampdu_tx *tid_tx = sta->ampdu_mlme.tid_tx[tid]; 152 struct tid_ampdu_tx *tid_tx;
153 int ret; 153 int ret;
154 154
155 lockdep_assert_held(&sta->ampdu_mlme.mtx); 155 lockdep_assert_held(&sta->ampdu_mlme.mtx);
156 156
157 if (!tid_tx)
158 return -ENOENT;
159
160 spin_lock_bh(&sta->lock); 157 spin_lock_bh(&sta->lock);
161 158
159 tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
160 if (!tid_tx) {
161 spin_unlock_bh(&sta->lock);
162 return -ENOENT;
163 }
164
162 if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) { 165 if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) {
163 /* not even started yet! */ 166 /* not even started yet! */
164 rcu_assign_pointer(sta->ampdu_mlme.tid_tx[tid], NULL); 167 ieee80211_assign_tid_tx(sta, tid, NULL);
165 spin_unlock_bh(&sta->lock); 168 spin_unlock_bh(&sta->lock);
166 call_rcu(&tid_tx->rcu_head, kfree_tid_tx); 169 kfree_rcu(tid_tx, rcu_head);
167 return 0; 170 return 0;
168 } 171 }
169 172
@@ -283,13 +286,13 @@ ieee80211_wake_queue_agg(struct ieee80211_local *local, int tid)
283 286
284void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid) 287void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
285{ 288{
286 struct tid_ampdu_tx *tid_tx = sta->ampdu_mlme.tid_tx[tid]; 289 struct tid_ampdu_tx *tid_tx;
287 struct ieee80211_local *local = sta->local; 290 struct ieee80211_local *local = sta->local;
288 struct ieee80211_sub_if_data *sdata = sta->sdata; 291 struct ieee80211_sub_if_data *sdata = sta->sdata;
289 u16 start_seq_num; 292 u16 start_seq_num;
290 int ret; 293 int ret;
291 294
292 lockdep_assert_held(&sta->ampdu_mlme.mtx); 295 tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
293 296
294 /* 297 /*
295 * While we're asking the driver about the aggregation, 298 * While we're asking the driver about the aggregation,
@@ -318,11 +321,11 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
318 " tid %d\n", tid); 321 " tid %d\n", tid);
319#endif 322#endif
320 spin_lock_bh(&sta->lock); 323 spin_lock_bh(&sta->lock);
321 rcu_assign_pointer(sta->ampdu_mlme.tid_tx[tid], NULL); 324 ieee80211_assign_tid_tx(sta, tid, NULL);
322 spin_unlock_bh(&sta->lock); 325 spin_unlock_bh(&sta->lock);
323 326
324 ieee80211_wake_queue_agg(local, tid); 327 ieee80211_wake_queue_agg(local, tid);
325 call_rcu(&tid_tx->rcu_head, kfree_tid_tx); 328 kfree_rcu(tid_tx, rcu_head);
326 return; 329 return;
327 } 330 }
328 331
@@ -396,9 +399,9 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
396 goto err_unlock_sta; 399 goto err_unlock_sta;
397 } 400 }
398 401
399 tid_tx = sta->ampdu_mlme.tid_tx[tid]; 402 tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
400 /* check if the TID is not in aggregation flow already */ 403 /* check if the TID is not in aggregation flow already */
401 if (tid_tx) { 404 if (tid_tx || sta->ampdu_mlme.tid_start_tx[tid]) {
402#ifdef CONFIG_MAC80211_HT_DEBUG 405#ifdef CONFIG_MAC80211_HT_DEBUG
403 printk(KERN_DEBUG "BA request denied - session is not " 406 printk(KERN_DEBUG "BA request denied - session is not "
404 "idle on tid %u\n", tid); 407 "idle on tid %u\n", tid);
@@ -433,8 +436,11 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
433 sta->ampdu_mlme.dialog_token_allocator++; 436 sta->ampdu_mlme.dialog_token_allocator++;
434 tid_tx->dialog_token = sta->ampdu_mlme.dialog_token_allocator; 437 tid_tx->dialog_token = sta->ampdu_mlme.dialog_token_allocator;
435 438
436 /* finally, assign it to the array */ 439 /*
437 rcu_assign_pointer(sta->ampdu_mlme.tid_tx[tid], tid_tx); 440 * Finally, assign it to the start array; the work item will
441 * collect it and move it to the normal array.
442 */
443 sta->ampdu_mlme.tid_start_tx[tid] = tid_tx;
438 444
439 ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work); 445 ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);
440 446
@@ -480,16 +486,19 @@ ieee80211_agg_splice_finish(struct ieee80211_local *local, u16 tid)
480static void ieee80211_agg_tx_operational(struct ieee80211_local *local, 486static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
481 struct sta_info *sta, u16 tid) 487 struct sta_info *sta, u16 tid)
482{ 488{
489 struct tid_ampdu_tx *tid_tx;
490
483 lockdep_assert_held(&sta->ampdu_mlme.mtx); 491 lockdep_assert_held(&sta->ampdu_mlme.mtx);
484 492
493 tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
494
485#ifdef CONFIG_MAC80211_HT_DEBUG 495#ifdef CONFIG_MAC80211_HT_DEBUG
486 printk(KERN_DEBUG "Aggregation is on for tid %d\n", tid); 496 printk(KERN_DEBUG "Aggregation is on for tid %d\n", tid);
487#endif 497#endif
488 498
489 drv_ampdu_action(local, sta->sdata, 499 drv_ampdu_action(local, sta->sdata,
490 IEEE80211_AMPDU_TX_OPERATIONAL, 500 IEEE80211_AMPDU_TX_OPERATIONAL,
491 &sta->sta, tid, NULL, 501 &sta->sta, tid, NULL, tid_tx->buf_size);
492 sta->ampdu_mlme.tid_tx[tid]->buf_size);
493 502
494 /* 503 /*
495 * synchronize with TX path, while splicing the TX path 504 * synchronize with TX path, while splicing the TX path
@@ -497,13 +506,13 @@ static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
497 */ 506 */
498 spin_lock_bh(&sta->lock); 507 spin_lock_bh(&sta->lock);
499 508
500 ieee80211_agg_splice_packets(local, sta->ampdu_mlme.tid_tx[tid], tid); 509 ieee80211_agg_splice_packets(local, tid_tx, tid);
501 /* 510 /*
502 * Now mark as operational. This will be visible 511 * Now mark as operational. This will be visible
503 * in the TX path, and lets it go lock-free in 512 * in the TX path, and lets it go lock-free in
504 * the common case. 513 * the common case.
505 */ 514 */
506 set_bit(HT_AGG_STATE_OPERATIONAL, &sta->ampdu_mlme.tid_tx[tid]->state); 515 set_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state);
507 ieee80211_agg_splice_finish(local, tid); 516 ieee80211_agg_splice_finish(local, tid);
508 517
509 spin_unlock_bh(&sta->lock); 518 spin_unlock_bh(&sta->lock);
@@ -537,7 +546,7 @@ void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid)
537 } 546 }
538 547
539 mutex_lock(&sta->ampdu_mlme.mtx); 548 mutex_lock(&sta->ampdu_mlme.mtx);
540 tid_tx = sta->ampdu_mlme.tid_tx[tid]; 549 tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
541 550
542 if (WARN_ON(!tid_tx)) { 551 if (WARN_ON(!tid_tx)) {
543#ifdef CONFIG_MAC80211_HT_DEBUG 552#ifdef CONFIG_MAC80211_HT_DEBUG
@@ -615,7 +624,7 @@ int ieee80211_stop_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
615 return -EINVAL; 624 return -EINVAL;
616 625
617 spin_lock_bh(&sta->lock); 626 spin_lock_bh(&sta->lock);
618 tid_tx = sta->ampdu_mlme.tid_tx[tid]; 627 tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
619 628
620 if (!tid_tx) { 629 if (!tid_tx) {
621 ret = -ENOENT; 630 ret = -ENOENT;
@@ -671,7 +680,7 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid)
671 680
672 mutex_lock(&sta->ampdu_mlme.mtx); 681 mutex_lock(&sta->ampdu_mlme.mtx);
673 spin_lock_bh(&sta->lock); 682 spin_lock_bh(&sta->lock);
674 tid_tx = sta->ampdu_mlme.tid_tx[tid]; 683 tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
675 684
676 if (!tid_tx || !test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) { 685 if (!tid_tx || !test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
677#ifdef CONFIG_MAC80211_HT_DEBUG 686#ifdef CONFIG_MAC80211_HT_DEBUG
@@ -697,11 +706,11 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid)
697 ieee80211_agg_splice_packets(local, tid_tx, tid); 706 ieee80211_agg_splice_packets(local, tid_tx, tid);
698 707
699 /* future packets must not find the tid_tx struct any more */ 708 /* future packets must not find the tid_tx struct any more */
700 rcu_assign_pointer(sta->ampdu_mlme.tid_tx[tid], NULL); 709 ieee80211_assign_tid_tx(sta, tid, NULL);
701 710
702 ieee80211_agg_splice_finish(local, tid); 711 ieee80211_agg_splice_finish(local, tid);
703 712
704 call_rcu(&tid_tx->rcu_head, kfree_tid_tx); 713 kfree_rcu(tid_tx, rcu_head);
705 714
706 unlock_sta: 715 unlock_sta:
707 spin_unlock_bh(&sta->lock); 716 spin_unlock_bh(&sta->lock);
@@ -752,7 +761,7 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
752 761
753 mutex_lock(&sta->ampdu_mlme.mtx); 762 mutex_lock(&sta->ampdu_mlme.mtx);
754 763
755 tid_tx = sta->ampdu_mlme.tid_tx[tid]; 764 tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
756 if (!tid_tx) 765 if (!tid_tx)
757 goto out; 766 goto out;
758 767
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 44049733c4ea..be70c70d3f5b 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -136,7 +136,10 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev,
136 mutex_lock(&sdata->local->sta_mtx); 136 mutex_lock(&sdata->local->sta_mtx);
137 137
138 if (mac_addr) { 138 if (mac_addr) {
139 sta = sta_info_get_bss(sdata, mac_addr); 139 if (ieee80211_vif_is_mesh(&sdata->vif))
140 sta = sta_info_get(sdata, mac_addr);
141 else
142 sta = sta_info_get_bss(sdata, mac_addr);
140 if (!sta) { 143 if (!sta) {
141 ieee80211_key_free(sdata->local, key); 144 ieee80211_key_free(sdata->local, key);
142 err = -ENOENT; 145 err = -ENOENT;
@@ -157,13 +160,14 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev,
157static int ieee80211_del_key(struct wiphy *wiphy, struct net_device *dev, 160static int ieee80211_del_key(struct wiphy *wiphy, struct net_device *dev,
158 u8 key_idx, bool pairwise, const u8 *mac_addr) 161 u8 key_idx, bool pairwise, const u8 *mac_addr)
159{ 162{
160 struct ieee80211_sub_if_data *sdata; 163 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
164 struct ieee80211_local *local = sdata->local;
161 struct sta_info *sta; 165 struct sta_info *sta;
166 struct ieee80211_key *key = NULL;
162 int ret; 167 int ret;
163 168
164 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 169 mutex_lock(&local->sta_mtx);
165 170 mutex_lock(&local->key_mtx);
166 mutex_lock(&sdata->local->sta_mtx);
167 171
168 if (mac_addr) { 172 if (mac_addr) {
169 ret = -ENOENT; 173 ret = -ENOENT;
@@ -172,33 +176,24 @@ static int ieee80211_del_key(struct wiphy *wiphy, struct net_device *dev,
172 if (!sta) 176 if (!sta)
173 goto out_unlock; 177 goto out_unlock;
174 178
175 if (pairwise) { 179 if (pairwise)
176 if (sta->ptk) { 180 key = key_mtx_dereference(local, sta->ptk);
177 ieee80211_key_free(sdata->local, sta->ptk); 181 else
178 ret = 0; 182 key = key_mtx_dereference(local, sta->gtk[key_idx]);
179 } 183 } else
180 } else { 184 key = key_mtx_dereference(local, sdata->keys[key_idx]);
181 if (sta->gtk[key_idx]) {
182 ieee80211_key_free(sdata->local,
183 sta->gtk[key_idx]);
184 ret = 0;
185 }
186 }
187
188 goto out_unlock;
189 }
190 185
191 if (!sdata->keys[key_idx]) { 186 if (!key) {
192 ret = -ENOENT; 187 ret = -ENOENT;
193 goto out_unlock; 188 goto out_unlock;
194 } 189 }
195 190
196 ieee80211_key_free(sdata->local, sdata->keys[key_idx]); 191 __ieee80211_key_free(key);
197 WARN_ON(sdata->keys[key_idx]);
198 192
199 ret = 0; 193 ret = 0;
200 out_unlock: 194 out_unlock:
201 mutex_unlock(&sdata->local->sta_mtx); 195 mutex_unlock(&local->key_mtx);
196 mutex_unlock(&local->sta_mtx);
202 197
203 return ret; 198 return ret;
204} 199}
@@ -228,11 +223,11 @@ static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev,
228 goto out; 223 goto out;
229 224
230 if (pairwise) 225 if (pairwise)
231 key = sta->ptk; 226 key = rcu_dereference(sta->ptk);
232 else if (key_idx < NUM_DEFAULT_KEYS) 227 else if (key_idx < NUM_DEFAULT_KEYS)
233 key = sta->gtk[key_idx]; 228 key = rcu_dereference(sta->gtk[key_idx]);
234 } else 229 } else
235 key = sdata->keys[key_idx]; 230 key = rcu_dereference(sdata->keys[key_idx]);
236 231
237 if (!key) 232 if (!key)
238 goto out; 233 goto out;
@@ -330,6 +325,7 @@ static void rate_idx_to_bitrate(struct rate_info *rate, struct sta_info *sta, in
330static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo) 325static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
331{ 326{
332 struct ieee80211_sub_if_data *sdata = sta->sdata; 327 struct ieee80211_sub_if_data *sdata = sta->sdata;
328 struct timespec uptime;
333 329
334 sinfo->generation = sdata->local->sta_generation; 330 sinfo->generation = sdata->local->sta_generation;
335 331
@@ -342,7 +338,12 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
342 STATION_INFO_TX_FAILED | 338 STATION_INFO_TX_FAILED |
343 STATION_INFO_TX_BITRATE | 339 STATION_INFO_TX_BITRATE |
344 STATION_INFO_RX_BITRATE | 340 STATION_INFO_RX_BITRATE |
345 STATION_INFO_RX_DROP_MISC; 341 STATION_INFO_RX_DROP_MISC |
342 STATION_INFO_BSS_PARAM |
343 STATION_INFO_CONNECTED_TIME;
344
345 do_posix_clock_monotonic_gettime(&uptime);
346 sinfo->connected_time = uptime.tv_sec - sta->last_connected;
346 347
347 sinfo->inactive_time = jiffies_to_msecs(jiffies - sta->last_rx); 348 sinfo->inactive_time = jiffies_to_msecs(jiffies - sta->last_rx);
348 sinfo->rx_bytes = sta->rx_bytes; 349 sinfo->rx_bytes = sta->rx_bytes;
@@ -389,6 +390,16 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
389 sinfo->plink_state = sta->plink_state; 390 sinfo->plink_state = sta->plink_state;
390#endif 391#endif
391 } 392 }
393
394 sinfo->bss_param.flags = 0;
395 if (sdata->vif.bss_conf.use_cts_prot)
396 sinfo->bss_param.flags |= BSS_PARAM_FLAGS_CTS_PROT;
397 if (sdata->vif.bss_conf.use_short_preamble)
398 sinfo->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_PREAMBLE;
399 if (sdata->vif.bss_conf.use_short_slot)
400 sinfo->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_SLOT_TIME;
401 sinfo->bss_param.dtim_period = sdata->local->hw.conf.ps_dtim_period;
402 sinfo->bss_param.beacon_interval = sdata->vif.bss_conf.beacon_int;
392} 403}
393 404
394 405
@@ -452,7 +463,7 @@ static int ieee80211_config_beacon(struct ieee80211_sub_if_data *sdata,
452 int size; 463 int size;
453 int err = -EINVAL; 464 int err = -EINVAL;
454 465
455 old = sdata->u.ap.beacon; 466 old = rtnl_dereference(sdata->u.ap.beacon);
456 467
457 /* head must not be zero-length */ 468 /* head must not be zero-length */
458 if (params->head && !params->head_len) 469 if (params->head && !params->head_len)
@@ -547,8 +558,7 @@ static int ieee80211_add_beacon(struct wiphy *wiphy, struct net_device *dev,
547 558
548 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 559 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
549 560
550 old = sdata->u.ap.beacon; 561 old = rtnl_dereference(sdata->u.ap.beacon);
551
552 if (old) 562 if (old)
553 return -EALREADY; 563 return -EALREADY;
554 564
@@ -563,8 +573,7 @@ static int ieee80211_set_beacon(struct wiphy *wiphy, struct net_device *dev,
563 573
564 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 574 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
565 575
566 old = sdata->u.ap.beacon; 576 old = rtnl_dereference(sdata->u.ap.beacon);
567
568 if (!old) 577 if (!old)
569 return -ENOENT; 578 return -ENOENT;
570 579
@@ -578,8 +587,7 @@ static int ieee80211_del_beacon(struct wiphy *wiphy, struct net_device *dev)
578 587
579 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 588 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
580 589
581 old = sdata->u.ap.beacon; 590 old = rtnl_dereference(sdata->u.ap.beacon);
582
583 if (!old) 591 if (!old)
584 return -ENOENT; 592 return -ENOENT;
585 593
@@ -675,6 +683,12 @@ static void sta_apply_parameters(struct ieee80211_local *local,
675 if (set & BIT(NL80211_STA_FLAG_MFP)) 683 if (set & BIT(NL80211_STA_FLAG_MFP))
676 sta->flags |= WLAN_STA_MFP; 684 sta->flags |= WLAN_STA_MFP;
677 } 685 }
686
687 if (mask & BIT(NL80211_STA_FLAG_AUTHENTICATED)) {
688 sta->flags &= ~WLAN_STA_AUTH;
689 if (set & BIT(NL80211_STA_FLAG_AUTHENTICATED))
690 sta->flags |= WLAN_STA_AUTH;
691 }
678 spin_unlock_irqrestore(&sta->flaglock, flags); 692 spin_unlock_irqrestore(&sta->flaglock, flags);
679 693
680 /* 694 /*
@@ -712,15 +726,29 @@ static void sta_apply_parameters(struct ieee80211_local *local,
712 params->ht_capa, 726 params->ht_capa,
713 &sta->sta.ht_cap); 727 &sta->sta.ht_cap);
714 728
715 if (ieee80211_vif_is_mesh(&sdata->vif) && params->plink_action) { 729 if (ieee80211_vif_is_mesh(&sdata->vif)) {
716 switch (params->plink_action) { 730#ifdef CONFIG_MAC80211_MESH
717 case PLINK_ACTION_OPEN: 731 if (sdata->u.mesh.security & IEEE80211_MESH_SEC_SECURED)
718 mesh_plink_open(sta); 732 switch (params->plink_state) {
719 break; 733 case NL80211_PLINK_LISTEN:
720 case PLINK_ACTION_BLOCK: 734 case NL80211_PLINK_ESTAB:
721 mesh_plink_block(sta); 735 case NL80211_PLINK_BLOCKED:
722 break; 736 sta->plink_state = params->plink_state;
723 } 737 break;
738 default:
739 /* nothing */
740 break;
741 }
742 else
743 switch (params->plink_action) {
744 case PLINK_ACTION_OPEN:
745 mesh_plink_open(sta);
746 break;
747 case PLINK_ACTION_BLOCK:
748 mesh_plink_block(sta);
749 break;
750 }
751#endif
724 } 752 }
725} 753}
726 754
@@ -921,8 +949,10 @@ static int ieee80211_change_mpath(struct wiphy *wiphy,
921static void mpath_set_pinfo(struct mesh_path *mpath, u8 *next_hop, 949static void mpath_set_pinfo(struct mesh_path *mpath, u8 *next_hop,
922 struct mpath_info *pinfo) 950 struct mpath_info *pinfo)
923{ 951{
924 if (mpath->next_hop) 952 struct sta_info *next_hop_sta = rcu_dereference(mpath->next_hop);
925 memcpy(next_hop, mpath->next_hop->sta.addr, ETH_ALEN); 953
954 if (next_hop_sta)
955 memcpy(next_hop, next_hop_sta->sta.addr, ETH_ALEN);
926 else 956 else
927 memset(next_hop, 0, ETH_ALEN); 957 memset(next_hop, 0, ETH_ALEN);
928 958
@@ -1023,26 +1053,30 @@ static int copy_mesh_setup(struct ieee80211_if_mesh *ifmsh,
1023 u8 *new_ie; 1053 u8 *new_ie;
1024 const u8 *old_ie; 1054 const u8 *old_ie;
1025 1055
1026 /* first allocate the new vendor information element */ 1056 /* allocate information elements */
1027 new_ie = NULL; 1057 new_ie = NULL;
1028 old_ie = ifmsh->vendor_ie; 1058 old_ie = ifmsh->ie;
1029 1059
1030 ifmsh->vendor_ie_len = setup->vendor_ie_len; 1060 if (setup->ie_len) {
1031 if (setup->vendor_ie_len) { 1061 new_ie = kmemdup(setup->ie, setup->ie_len,
1032 new_ie = kmemdup(setup->vendor_ie, setup->vendor_ie_len,
1033 GFP_KERNEL); 1062 GFP_KERNEL);
1034 if (!new_ie) 1063 if (!new_ie)
1035 return -ENOMEM; 1064 return -ENOMEM;
1036 } 1065 }
1066 ifmsh->ie_len = setup->ie_len;
1067 ifmsh->ie = new_ie;
1068 kfree(old_ie);
1037 1069
1038 /* now copy the rest of the setup parameters */ 1070 /* now copy the rest of the setup parameters */
1039 ifmsh->mesh_id_len = setup->mesh_id_len; 1071 ifmsh->mesh_id_len = setup->mesh_id_len;
1040 memcpy(ifmsh->mesh_id, setup->mesh_id, ifmsh->mesh_id_len); 1072 memcpy(ifmsh->mesh_id, setup->mesh_id, ifmsh->mesh_id_len);
1041 ifmsh->mesh_pp_id = setup->path_sel_proto; 1073 ifmsh->mesh_pp_id = setup->path_sel_proto;
1042 ifmsh->mesh_pm_id = setup->path_metric; 1074 ifmsh->mesh_pm_id = setup->path_metric;
1043 ifmsh->vendor_ie = new_ie; 1075 ifmsh->security = IEEE80211_MESH_SEC_NONE;
1044 1076 if (setup->is_authenticated)
1045 kfree(old_ie); 1077 ifmsh->security |= IEEE80211_MESH_SEC_AUTHED;
1078 if (setup->is_secure)
1079 ifmsh->security |= IEEE80211_MESH_SEC_SECURED;
1046 1080
1047 return 0; 1081 return 0;
1048} 1082}
@@ -1275,9 +1309,10 @@ static int ieee80211_set_channel(struct wiphy *wiphy,
1275} 1309}
1276 1310
1277#ifdef CONFIG_PM 1311#ifdef CONFIG_PM
1278static int ieee80211_suspend(struct wiphy *wiphy) 1312static int ieee80211_suspend(struct wiphy *wiphy,
1313 struct cfg80211_wowlan *wowlan)
1279{ 1314{
1280 return __ieee80211_suspend(wiphy_priv(wiphy)); 1315 return __ieee80211_suspend(wiphy_priv(wiphy), wowlan);
1281} 1316}
1282 1317
1283static int ieee80211_resume(struct wiphy *wiphy) 1318static int ieee80211_resume(struct wiphy *wiphy)
@@ -1320,6 +1355,30 @@ static int ieee80211_scan(struct wiphy *wiphy,
1320 return ieee80211_request_scan(sdata, req); 1355 return ieee80211_request_scan(sdata, req);
1321} 1356}
1322 1357
1358static int
1359ieee80211_sched_scan_start(struct wiphy *wiphy,
1360 struct net_device *dev,
1361 struct cfg80211_sched_scan_request *req)
1362{
1363 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1364
1365 if (!sdata->local->ops->sched_scan_start)
1366 return -EOPNOTSUPP;
1367
1368 return ieee80211_request_sched_scan_start(sdata, req);
1369}
1370
1371static int
1372ieee80211_sched_scan_stop(struct wiphy *wiphy, struct net_device *dev)
1373{
1374 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1375
1376 if (!sdata->local->ops->sched_scan_stop)
1377 return -EOPNOTSUPP;
1378
1379 return ieee80211_request_sched_scan_stop(sdata);
1380}
1381
1323static int ieee80211_auth(struct wiphy *wiphy, struct net_device *dev, 1382static int ieee80211_auth(struct wiphy *wiphy, struct net_device *dev,
1324 struct cfg80211_auth_request *req) 1383 struct cfg80211_auth_request *req)
1325{ 1384{
@@ -1611,16 +1670,13 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
1611{ 1670{
1612 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 1671 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1613 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 1672 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
1614 int i; 1673 int i, ret;
1615
1616 /*
1617 * This _could_ be supported by providing a hook for
1618 * drivers for this function, but at this point it
1619 * doesn't seem worth bothering.
1620 */
1621 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL)
1622 return -EOPNOTSUPP;
1623 1674
1675 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
1676 ret = drv_set_bitrate_mask(local, sdata, mask);
1677 if (ret)
1678 return ret;
1679 }
1624 1680
1625 for (i = 0; i < IEEE80211_NUM_BANDS; i++) 1681 for (i = 0; i < IEEE80211_NUM_BANDS; i++)
1626 sdata->rc_rateidx_mask[i] = mask->control[i].legacy; 1682 sdata->rc_rateidx_mask[i] = mask->control[i].legacy;
@@ -2064,6 +2120,8 @@ struct cfg80211_ops mac80211_config_ops = {
2064 .suspend = ieee80211_suspend, 2120 .suspend = ieee80211_suspend,
2065 .resume = ieee80211_resume, 2121 .resume = ieee80211_resume,
2066 .scan = ieee80211_scan, 2122 .scan = ieee80211_scan,
2123 .sched_scan_start = ieee80211_sched_scan_start,
2124 .sched_scan_stop = ieee80211_sched_scan_stop,
2067 .auth = ieee80211_auth, 2125 .auth = ieee80211_auth,
2068 .assoc = ieee80211_assoc, 2126 .assoc = ieee80211_assoc,
2069 .deauth = ieee80211_deauth, 2127 .deauth = ieee80211_deauth,
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index 51f0d780dafa..186e02f7cc32 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -37,7 +37,7 @@ int mac80211_format_buffer(char __user *userbuf, size_t count,
37 return simple_read_from_buffer(userbuf, count, ppos, buf, res); 37 return simple_read_from_buffer(userbuf, count, ppos, buf, res);
38} 38}
39 39
40#define DEBUGFS_READONLY_FILE(name, fmt, value...) \ 40#define DEBUGFS_READONLY_FILE_FN(name, fmt, value...) \
41static ssize_t name## _read(struct file *file, char __user *userbuf, \ 41static ssize_t name## _read(struct file *file, char __user *userbuf, \
42 size_t count, loff_t *ppos) \ 42 size_t count, loff_t *ppos) \
43{ \ 43{ \
@@ -45,14 +45,19 @@ static ssize_t name## _read(struct file *file, char __user *userbuf, \
45 \ 45 \
46 return mac80211_format_buffer(userbuf, count, ppos, \ 46 return mac80211_format_buffer(userbuf, count, ppos, \
47 fmt "\n", ##value); \ 47 fmt "\n", ##value); \
48} \ 48}
49 \ 49
50#define DEBUGFS_READONLY_FILE_OPS(name) \
50static const struct file_operations name## _ops = { \ 51static const struct file_operations name## _ops = { \
51 .read = name## _read, \ 52 .read = name## _read, \
52 .open = mac80211_open_file_generic, \ 53 .open = mac80211_open_file_generic, \
53 .llseek = generic_file_llseek, \ 54 .llseek = generic_file_llseek, \
54}; 55};
55 56
57#define DEBUGFS_READONLY_FILE(name, fmt, value...) \
58 DEBUGFS_READONLY_FILE_FN(name, fmt, value) \
59 DEBUGFS_READONLY_FILE_OPS(name)
60
56#define DEBUGFS_ADD(name) \ 61#define DEBUGFS_ADD(name) \
57 debugfs_create_file(#name, 0400, phyd, local, &name## _ops); 62 debugfs_create_file(#name, 0400, phyd, local, &name## _ops);
58 63
@@ -130,7 +135,7 @@ static ssize_t reset_write(struct file *file, const char __user *user_buf,
130 struct ieee80211_local *local = file->private_data; 135 struct ieee80211_local *local = file->private_data;
131 136
132 rtnl_lock(); 137 rtnl_lock();
133 __ieee80211_suspend(&local->hw); 138 __ieee80211_suspend(&local->hw, NULL);
134 __ieee80211_resume(&local->hw); 139 __ieee80211_resume(&local->hw);
135 rtnl_unlock(); 140 rtnl_unlock();
136 141
@@ -291,11 +296,70 @@ static ssize_t channel_type_read(struct file *file, char __user *user_buf,
291 return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf)); 296 return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
292} 297}
293 298
294static const struct file_operations channel_type_ops = { 299static ssize_t hwflags_read(struct file *file, char __user *user_buf,
295 .read = channel_type_read, 300 size_t count, loff_t *ppos)
296 .open = mac80211_open_file_generic, 301{
297 .llseek = default_llseek, 302 struct ieee80211_local *local = file->private_data;
298}; 303 int mxln = 500;
304 ssize_t rv;
305 char *buf = kzalloc(mxln, GFP_KERNEL);
306 int sf = 0; /* how many written so far */
307
308 sf += snprintf(buf, mxln - sf, "0x%x\n", local->hw.flags);
309 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL)
310 sf += snprintf(buf + sf, mxln - sf, "HAS_RATE_CONTROL\n");
311 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
312 sf += snprintf(buf + sf, mxln - sf, "RX_INCLUDES_FCS\n");
313 if (local->hw.flags & IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING)
314 sf += snprintf(buf + sf, mxln - sf,
315 "HOST_BCAST_PS_BUFFERING\n");
316 if (local->hw.flags & IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE)
317 sf += snprintf(buf + sf, mxln - sf,
318 "2GHZ_SHORT_SLOT_INCAPABLE\n");
319 if (local->hw.flags & IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE)
320 sf += snprintf(buf + sf, mxln - sf,
321 "2GHZ_SHORT_PREAMBLE_INCAPABLE\n");
322 if (local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC)
323 sf += snprintf(buf + sf, mxln - sf, "SIGNAL_UNSPEC\n");
324 if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
325 sf += snprintf(buf + sf, mxln - sf, "SIGNAL_DBM\n");
326 if (local->hw.flags & IEEE80211_HW_NEED_DTIM_PERIOD)
327 sf += snprintf(buf + sf, mxln - sf, "NEED_DTIM_PERIOD\n");
328 if (local->hw.flags & IEEE80211_HW_SPECTRUM_MGMT)
329 sf += snprintf(buf + sf, mxln - sf, "SPECTRUM_MGMT\n");
330 if (local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION)
331 sf += snprintf(buf + sf, mxln - sf, "AMPDU_AGGREGATION\n");
332 if (local->hw.flags & IEEE80211_HW_SUPPORTS_PS)
333 sf += snprintf(buf + sf, mxln - sf, "SUPPORTS_PS\n");
334 if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)
335 sf += snprintf(buf + sf, mxln - sf, "PS_NULLFUNC_STACK\n");
336 if (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS)
337 sf += snprintf(buf + sf, mxln - sf, "SUPPORTS_DYNAMIC_PS\n");
338 if (local->hw.flags & IEEE80211_HW_MFP_CAPABLE)
339 sf += snprintf(buf + sf, mxln - sf, "MFP_CAPABLE\n");
340 if (local->hw.flags & IEEE80211_HW_BEACON_FILTER)
341 sf += snprintf(buf + sf, mxln - sf, "BEACON_FILTER\n");
342 if (local->hw.flags & IEEE80211_HW_SUPPORTS_STATIC_SMPS)
343 sf += snprintf(buf + sf, mxln - sf, "SUPPORTS_STATIC_SMPS\n");
344 if (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS)
345 sf += snprintf(buf + sf, mxln - sf, "SUPPORTS_DYNAMIC_SMPS\n");
346 if (local->hw.flags & IEEE80211_HW_SUPPORTS_UAPSD)
347 sf += snprintf(buf + sf, mxln - sf, "SUPPORTS_UAPSD\n");
348 if (local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)
349 sf += snprintf(buf + sf, mxln - sf, "REPORTS_TX_ACK_STATUS\n");
350 if (local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR)
351 sf += snprintf(buf + sf, mxln - sf, "CONNECTION_MONITOR\n");
352 if (local->hw.flags & IEEE80211_HW_SUPPORTS_CQM_RSSI)
353 sf += snprintf(buf + sf, mxln - sf, "SUPPORTS_CQM_RSSI\n");
354 if (local->hw.flags & IEEE80211_HW_SUPPORTS_PER_STA_GTK)
355 sf += snprintf(buf + sf, mxln - sf, "SUPPORTS_PER_STA_GTK\n");
356 if (local->hw.flags & IEEE80211_HW_AP_LINK_PS)
357 sf += snprintf(buf + sf, mxln - sf, "AP_LINK_PS\n");
358
359 rv = simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
360 kfree(buf);
361 return rv;
362}
299 363
300static ssize_t queues_read(struct file *file, char __user *user_buf, 364static ssize_t queues_read(struct file *file, char __user *user_buf,
301 size_t count, loff_t *ppos) 365 size_t count, loff_t *ppos)
@@ -315,11 +379,9 @@ static ssize_t queues_read(struct file *file, char __user *user_buf,
315 return simple_read_from_buffer(user_buf, count, ppos, buf, res); 379 return simple_read_from_buffer(user_buf, count, ppos, buf, res);
316} 380}
317 381
318static const struct file_operations queues_ops = { 382DEBUGFS_READONLY_FILE_OPS(hwflags);
319 .read = queues_read, 383DEBUGFS_READONLY_FILE_OPS(channel_type);
320 .open = mac80211_open_file_generic, 384DEBUGFS_READONLY_FILE_OPS(queues);
321 .llseek = default_llseek,
322};
323 385
324/* statistics stuff */ 386/* statistics stuff */
325 387
@@ -395,6 +457,7 @@ void debugfs_hw_add(struct ieee80211_local *local)
395 DEBUGFS_ADD(uapsd_queues); 457 DEBUGFS_ADD(uapsd_queues);
396 DEBUGFS_ADD(uapsd_max_sp_len); 458 DEBUGFS_ADD(uapsd_max_sp_len);
397 DEBUGFS_ADD(channel_type); 459 DEBUGFS_ADD(channel_type);
460 DEBUGFS_ADD(hwflags);
398 DEBUGFS_ADD(user_power); 461 DEBUGFS_ADD(user_power);
399 DEBUGFS_ADD(power); 462 DEBUGFS_ADD(power);
400 463
diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c
index f7ef3477c24a..33c58b85c911 100644
--- a/net/mac80211/debugfs_key.c
+++ b/net/mac80211/debugfs_key.c
@@ -241,16 +241,12 @@ void ieee80211_debugfs_key_add(struct ieee80211_key *key)
241 if (!key->debugfs.dir) 241 if (!key->debugfs.dir)
242 return; 242 return;
243 243
244 rcu_read_lock(); 244 sta = key->sta;
245 sta = rcu_dereference(key->sta); 245 if (sta) {
246 if (sta)
247 sprintf(buf, "../../stations/%pM", sta->sta.addr); 246 sprintf(buf, "../../stations/%pM", sta->sta.addr);
248 rcu_read_unlock();
249
250 /* using sta as a boolean is fine outside RCU lock */
251 if (sta)
252 key->debugfs.stalink = 247 key->debugfs.stalink =
253 debugfs_create_symlink("station", key->debugfs.dir, buf); 248 debugfs_create_symlink("station", key->debugfs.dir, buf);
249 }
254 250
255 DEBUGFS_ADD(keylen); 251 DEBUGFS_ADD(keylen);
256 DEBUGFS_ADD(flags); 252 DEBUGFS_ADD(flags);
@@ -286,7 +282,8 @@ void ieee80211_debugfs_key_update_default(struct ieee80211_sub_if_data *sdata)
286 lockdep_assert_held(&sdata->local->key_mtx); 282 lockdep_assert_held(&sdata->local->key_mtx);
287 283
288 if (sdata->default_unicast_key) { 284 if (sdata->default_unicast_key) {
289 key = sdata->default_unicast_key; 285 key = key_mtx_dereference(sdata->local,
286 sdata->default_unicast_key);
290 sprintf(buf, "../keys/%d", key->debugfs.cnt); 287 sprintf(buf, "../keys/%d", key->debugfs.cnt);
291 sdata->debugfs.default_unicast_key = 288 sdata->debugfs.default_unicast_key =
292 debugfs_create_symlink("default_unicast_key", 289 debugfs_create_symlink("default_unicast_key",
@@ -297,7 +294,8 @@ void ieee80211_debugfs_key_update_default(struct ieee80211_sub_if_data *sdata)
297 } 294 }
298 295
299 if (sdata->default_multicast_key) { 296 if (sdata->default_multicast_key) {
300 key = sdata->default_multicast_key; 297 key = key_mtx_dereference(sdata->local,
298 sdata->default_multicast_key);
301 sprintf(buf, "../keys/%d", key->debugfs.cnt); 299 sprintf(buf, "../keys/%d", key->debugfs.cnt);
302 sdata->debugfs.default_multicast_key = 300 sdata->debugfs.default_multicast_key =
303 debugfs_create_symlink("default_multicast_key", 301 debugfs_create_symlink("default_multicast_key",
@@ -316,9 +314,8 @@ void ieee80211_debugfs_key_add_mgmt_default(struct ieee80211_sub_if_data *sdata)
316 if (!sdata->debugfs.dir) 314 if (!sdata->debugfs.dir)
317 return; 315 return;
318 316
319 /* this is running under the key lock */ 317 key = key_mtx_dereference(sdata->local,
320 318 sdata->default_mgmt_key);
321 key = sdata->default_mgmt_key;
322 if (key) { 319 if (key) {
323 sprintf(buf, "../keys/%d", key->debugfs.cnt); 320 sprintf(buf, "../keys/%d", key->debugfs.cnt);
324 sdata->debugfs.default_mgmt_key = 321 sdata->debugfs.default_mgmt_key =
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index c04a1396cf8d..a01d2137fddc 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -92,6 +92,31 @@ static ssize_t sta_inactive_ms_read(struct file *file, char __user *userbuf,
92} 92}
93STA_OPS(inactive_ms); 93STA_OPS(inactive_ms);
94 94
95
96static ssize_t sta_connected_time_read(struct file *file, char __user *userbuf,
97 size_t count, loff_t *ppos)
98{
99 struct sta_info *sta = file->private_data;
100 struct timespec uptime;
101 struct tm result;
102 long connected_time_secs;
103 char buf[100];
104 int res;
105 do_posix_clock_monotonic_gettime(&uptime);
106 connected_time_secs = uptime.tv_sec - sta->last_connected;
107 time_to_tm(connected_time_secs, 0, &result);
108 result.tm_year -= 70;
109 result.tm_mday -= 1;
110 res = scnprintf(buf, sizeof(buf),
111 "years - %ld\nmonths - %d\ndays - %d\nclock - %d:%d:%d\n\n",
112 result.tm_year, result.tm_mon, result.tm_mday,
113 result.tm_hour, result.tm_min, result.tm_sec);
114 return simple_read_from_buffer(userbuf, count, ppos, buf, res);
115}
116STA_OPS(connected_time);
117
118
119
95static ssize_t sta_last_seq_ctrl_read(struct file *file, char __user *userbuf, 120static ssize_t sta_last_seq_ctrl_read(struct file *file, char __user *userbuf,
96 size_t count, loff_t *ppos) 121 size_t count, loff_t *ppos)
97{ 122{
@@ -324,6 +349,7 @@ void ieee80211_sta_debugfs_add(struct sta_info *sta)
324 DEBUGFS_ADD(flags); 349 DEBUGFS_ADD(flags);
325 DEBUGFS_ADD(num_ps_buf_frames); 350 DEBUGFS_ADD(num_ps_buf_frames);
326 DEBUGFS_ADD(inactive_ms); 351 DEBUGFS_ADD(inactive_ms);
352 DEBUGFS_ADD(connected_time);
327 DEBUGFS_ADD(last_seq_ctrl); 353 DEBUGFS_ADD(last_seq_ctrl);
328 DEBUGFS_ADD(agg_status); 354 DEBUGFS_ADD(agg_status);
329 DEBUGFS_ADD(dev); 355 DEBUGFS_ADD(dev);
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 9c0d62bb0ea3..eebf7a67daf7 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -41,6 +41,33 @@ static inline void drv_stop(struct ieee80211_local *local)
41 local->started = false; 41 local->started = false;
42} 42}
43 43
44#ifdef CONFIG_PM
45static inline int drv_suspend(struct ieee80211_local *local,
46 struct cfg80211_wowlan *wowlan)
47{
48 int ret;
49
50 might_sleep();
51
52 trace_drv_suspend(local);
53 ret = local->ops->suspend(&local->hw, wowlan);
54 trace_drv_return_int(local, ret);
55 return ret;
56}
57
58static inline int drv_resume(struct ieee80211_local *local)
59{
60 int ret;
61
62 might_sleep();
63
64 trace_drv_resume(local);
65 ret = local->ops->resume(&local->hw);
66 trace_drv_return_int(local, ret);
67 return ret;
68}
69#endif
70
44static inline int drv_add_interface(struct ieee80211_local *local, 71static inline int drv_add_interface(struct ieee80211_local *local,
45 struct ieee80211_vif *vif) 72 struct ieee80211_vif *vif)
46{ 73{
@@ -185,12 +212,39 @@ static inline int drv_hw_scan(struct ieee80211_local *local,
185 212
186 might_sleep(); 213 might_sleep();
187 214
188 trace_drv_hw_scan(local, sdata, req); 215 trace_drv_hw_scan(local, sdata);
189 ret = local->ops->hw_scan(&local->hw, &sdata->vif, req); 216 ret = local->ops->hw_scan(&local->hw, &sdata->vif, req);
190 trace_drv_return_int(local, ret); 217 trace_drv_return_int(local, ret);
191 return ret; 218 return ret;
192} 219}
193 220
221static inline int
222drv_sched_scan_start(struct ieee80211_local *local,
223 struct ieee80211_sub_if_data *sdata,
224 struct cfg80211_sched_scan_request *req,
225 struct ieee80211_sched_scan_ies *ies)
226{
227 int ret;
228
229 might_sleep();
230
231 trace_drv_sched_scan_start(local, sdata);
232 ret = local->ops->sched_scan_start(&local->hw, &sdata->vif,
233 req, ies);
234 trace_drv_return_int(local, ret);
235 return ret;
236}
237
238static inline void drv_sched_scan_stop(struct ieee80211_local *local,
239 struct ieee80211_sub_if_data *sdata)
240{
241 might_sleep();
242
243 trace_drv_sched_scan_stop(local, sdata);
244 local->ops->sched_scan_stop(&local->hw, &sdata->vif);
245 trace_drv_return_void(local);
246}
247
194static inline void drv_sw_scan_start(struct ieee80211_local *local) 248static inline void drv_sw_scan_start(struct ieee80211_local *local)
195{ 249{
196 might_sleep(); 250 might_sleep();
@@ -552,4 +606,35 @@ static inline void drv_get_ringparam(struct ieee80211_local *local,
552 trace_drv_return_void(local); 606 trace_drv_return_void(local);
553} 607}
554 608
609static inline bool drv_tx_frames_pending(struct ieee80211_local *local)
610{
611 bool ret = false;
612
613 might_sleep();
614
615 trace_drv_tx_frames_pending(local);
616 if (local->ops->tx_frames_pending)
617 ret = local->ops->tx_frames_pending(&local->hw);
618 trace_drv_return_bool(local, ret);
619
620 return ret;
621}
622
623static inline int drv_set_bitrate_mask(struct ieee80211_local *local,
624 struct ieee80211_sub_if_data *sdata,
625 const struct cfg80211_bitrate_mask *mask)
626{
627 int ret = -EOPNOTSUPP;
628
629 might_sleep();
630
631 trace_drv_set_bitrate_mask(local, sdata, mask);
632 if (local->ops->set_bitrate_mask)
633 ret = local->ops->set_bitrate_mask(&local->hw,
634 &sdata->vif, mask);
635 trace_drv_return_int(local, ret);
636
637 return ret;
638}
639
555#endif /* __MAC80211_DRIVER_OPS */ 640#endif /* __MAC80211_DRIVER_OPS */
diff --git a/net/mac80211/driver-trace.h b/net/mac80211/driver-trace.h
index 45aab80738e2..ed9edcbd9aa5 100644
--- a/net/mac80211/driver-trace.h
+++ b/net/mac80211/driver-trace.h
@@ -55,6 +55,70 @@ DECLARE_EVENT_CLASS(local_only_evt,
55 TP_printk(LOCAL_PR_FMT, LOCAL_PR_ARG) 55 TP_printk(LOCAL_PR_FMT, LOCAL_PR_ARG)
56); 56);
57 57
58DECLARE_EVENT_CLASS(local_sdata_addr_evt,
59 TP_PROTO(struct ieee80211_local *local,
60 struct ieee80211_sub_if_data *sdata),
61 TP_ARGS(local, sdata),
62
63 TP_STRUCT__entry(
64 LOCAL_ENTRY
65 VIF_ENTRY
66 __array(char, addr, 6)
67 ),
68
69 TP_fast_assign(
70 LOCAL_ASSIGN;
71 VIF_ASSIGN;
72 memcpy(__entry->addr, sdata->vif.addr, 6);
73 ),
74
75 TP_printk(
76 LOCAL_PR_FMT VIF_PR_FMT " addr:%pM",
77 LOCAL_PR_ARG, VIF_PR_ARG, __entry->addr
78 )
79);
80
81DECLARE_EVENT_CLASS(local_u32_evt,
82 TP_PROTO(struct ieee80211_local *local, u32 value),
83 TP_ARGS(local, value),
84
85 TP_STRUCT__entry(
86 LOCAL_ENTRY
87 __field(u32, value)
88 ),
89
90 TP_fast_assign(
91 LOCAL_ASSIGN;
92 __entry->value = value;
93 ),
94
95 TP_printk(
96 LOCAL_PR_FMT " value:%d",
97 LOCAL_PR_ARG, __entry->value
98 )
99);
100
101DECLARE_EVENT_CLASS(local_sdata_evt,
102 TP_PROTO(struct ieee80211_local *local,
103 struct ieee80211_sub_if_data *sdata),
104 TP_ARGS(local, sdata),
105
106 TP_STRUCT__entry(
107 LOCAL_ENTRY
108 VIF_ENTRY
109 ),
110
111 TP_fast_assign(
112 LOCAL_ASSIGN;
113 VIF_ASSIGN;
114 ),
115
116 TP_printk(
117 LOCAL_PR_FMT VIF_PR_FMT,
118 LOCAL_PR_ARG, VIF_PR_ARG
119 )
120);
121
58DEFINE_EVENT(local_only_evt, drv_return_void, 122DEFINE_EVENT(local_only_evt, drv_return_void,
59 TP_PROTO(struct ieee80211_local *local), 123 TP_PROTO(struct ieee80211_local *local),
60 TP_ARGS(local) 124 TP_ARGS(local)
@@ -74,6 +138,21 @@ TRACE_EVENT(drv_return_int,
74 TP_printk(LOCAL_PR_FMT " - %d", LOCAL_PR_ARG, __entry->ret) 138 TP_printk(LOCAL_PR_FMT " - %d", LOCAL_PR_ARG, __entry->ret)
75); 139);
76 140
141TRACE_EVENT(drv_return_bool,
142 TP_PROTO(struct ieee80211_local *local, bool ret),
143 TP_ARGS(local, ret),
144 TP_STRUCT__entry(
145 LOCAL_ENTRY
146 __field(bool, ret)
147 ),
148 TP_fast_assign(
149 LOCAL_ASSIGN;
150 __entry->ret = ret;
151 ),
152 TP_printk(LOCAL_PR_FMT " - %s", LOCAL_PR_ARG, (__entry->ret) ?
153 "true" : "false")
154);
155
77TRACE_EVENT(drv_return_u64, 156TRACE_EVENT(drv_return_u64,
78 TP_PROTO(struct ieee80211_local *local, u64 ret), 157 TP_PROTO(struct ieee80211_local *local, u64 ret),
79 TP_ARGS(local, ret), 158 TP_ARGS(local, ret),
@@ -93,33 +172,25 @@ DEFINE_EVENT(local_only_evt, drv_start,
93 TP_ARGS(local) 172 TP_ARGS(local)
94); 173);
95 174
175DEFINE_EVENT(local_only_evt, drv_suspend,
176 TP_PROTO(struct ieee80211_local *local),
177 TP_ARGS(local)
178);
179
180DEFINE_EVENT(local_only_evt, drv_resume,
181 TP_PROTO(struct ieee80211_local *local),
182 TP_ARGS(local)
183);
184
96DEFINE_EVENT(local_only_evt, drv_stop, 185DEFINE_EVENT(local_only_evt, drv_stop,
97 TP_PROTO(struct ieee80211_local *local), 186 TP_PROTO(struct ieee80211_local *local),
98 TP_ARGS(local) 187 TP_ARGS(local)
99); 188);
100 189
101TRACE_EVENT(drv_add_interface, 190DEFINE_EVENT(local_sdata_addr_evt, drv_add_interface,
102 TP_PROTO(struct ieee80211_local *local, 191 TP_PROTO(struct ieee80211_local *local,
103 struct ieee80211_sub_if_data *sdata), 192 struct ieee80211_sub_if_data *sdata),
104 193 TP_ARGS(local, sdata)
105 TP_ARGS(local, sdata),
106
107 TP_STRUCT__entry(
108 LOCAL_ENTRY
109 VIF_ENTRY
110 __array(char, addr, 6)
111 ),
112
113 TP_fast_assign(
114 LOCAL_ASSIGN;
115 VIF_ASSIGN;
116 memcpy(__entry->addr, sdata->vif.addr, 6);
117 ),
118
119 TP_printk(
120 LOCAL_PR_FMT VIF_PR_FMT " addr:%pM",
121 LOCAL_PR_ARG, VIF_PR_ARG, __entry->addr
122 )
123); 194);
124 195
125TRACE_EVENT(drv_change_interface, 196TRACE_EVENT(drv_change_interface,
@@ -150,27 +221,10 @@ TRACE_EVENT(drv_change_interface,
150 ) 221 )
151); 222);
152 223
153TRACE_EVENT(drv_remove_interface, 224DEFINE_EVENT(local_sdata_addr_evt, drv_remove_interface,
154 TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata), 225 TP_PROTO(struct ieee80211_local *local,
155 226 struct ieee80211_sub_if_data *sdata),
156 TP_ARGS(local, sdata), 227 TP_ARGS(local, sdata)
157
158 TP_STRUCT__entry(
159 LOCAL_ENTRY
160 VIF_ENTRY
161 __array(char, addr, 6)
162 ),
163
164 TP_fast_assign(
165 LOCAL_ASSIGN;
166 VIF_ASSIGN;
167 memcpy(__entry->addr, sdata->vif.addr, 6);
168 ),
169
170 TP_printk(
171 LOCAL_PR_FMT VIF_PR_FMT " addr:%pM",
172 LOCAL_PR_ARG, VIF_PR_ARG, __entry->addr
173 )
174); 228);
175 229
176TRACE_EVENT(drv_config, 230TRACE_EVENT(drv_config,
@@ -400,27 +454,22 @@ TRACE_EVENT(drv_update_tkip_key,
400 ) 454 )
401); 455);
402 456
403TRACE_EVENT(drv_hw_scan, 457DEFINE_EVENT(local_sdata_evt, drv_hw_scan,
404 TP_PROTO(struct ieee80211_local *local, 458 TP_PROTO(struct ieee80211_local *local,
405 struct ieee80211_sub_if_data *sdata, 459 struct ieee80211_sub_if_data *sdata),
406 struct cfg80211_scan_request *req), 460 TP_ARGS(local, sdata)
407 461);
408 TP_ARGS(local, sdata, req),
409
410 TP_STRUCT__entry(
411 LOCAL_ENTRY
412 VIF_ENTRY
413 ),
414 462
415 TP_fast_assign( 463DEFINE_EVENT(local_sdata_evt, drv_sched_scan_start,
416 LOCAL_ASSIGN; 464 TP_PROTO(struct ieee80211_local *local,
417 VIF_ASSIGN; 465 struct ieee80211_sub_if_data *sdata),
418 ), 466 TP_ARGS(local, sdata)
467);
419 468
420 TP_printk( 469DEFINE_EVENT(local_sdata_evt, drv_sched_scan_stop,
421 LOCAL_PR_FMT VIF_PR_FMT, 470 TP_PROTO(struct ieee80211_local *local,
422 LOCAL_PR_ARG,VIF_PR_ARG 471 struct ieee80211_sub_if_data *sdata),
423 ) 472 TP_ARGS(local, sdata)
424); 473);
425 474
426DEFINE_EVENT(local_only_evt, drv_sw_scan_start, 475DEFINE_EVENT(local_only_evt, drv_sw_scan_start,
@@ -489,46 +538,14 @@ TRACE_EVENT(drv_get_tkip_seq,
489 ) 538 )
490); 539);
491 540
492TRACE_EVENT(drv_set_frag_threshold, 541DEFINE_EVENT(local_u32_evt, drv_set_frag_threshold,
493 TP_PROTO(struct ieee80211_local *local, u32 value), 542 TP_PROTO(struct ieee80211_local *local, u32 value),
494 543 TP_ARGS(local, value)
495 TP_ARGS(local, value),
496
497 TP_STRUCT__entry(
498 LOCAL_ENTRY
499 __field(u32, value)
500 ),
501
502 TP_fast_assign(
503 LOCAL_ASSIGN;
504 __entry->value = value;
505 ),
506
507 TP_printk(
508 LOCAL_PR_FMT " value:%d",
509 LOCAL_PR_ARG, __entry->value
510 )
511); 544);
512 545
513TRACE_EVENT(drv_set_rts_threshold, 546DEFINE_EVENT(local_u32_evt, drv_set_rts_threshold,
514 TP_PROTO(struct ieee80211_local *local, u32 value), 547 TP_PROTO(struct ieee80211_local *local, u32 value),
515 548 TP_ARGS(local, value)
516 TP_ARGS(local, value),
517
518 TP_STRUCT__entry(
519 LOCAL_ENTRY
520 __field(u32, value)
521 ),
522
523 TP_fast_assign(
524 LOCAL_ASSIGN;
525 __entry->value = value;
526 ),
527
528 TP_printk(
529 LOCAL_PR_FMT " value:%d",
530 LOCAL_PR_ARG, __entry->value
531 )
532); 549);
533 550
534TRACE_EVENT(drv_set_coverage_class, 551TRACE_EVENT(drv_set_coverage_class,
@@ -964,11 +981,43 @@ TRACE_EVENT(drv_get_ringparam,
964 ) 981 )
965); 982);
966 983
984DEFINE_EVENT(local_only_evt, drv_tx_frames_pending,
985 TP_PROTO(struct ieee80211_local *local),
986 TP_ARGS(local)
987);
988
967DEFINE_EVENT(local_only_evt, drv_offchannel_tx_cancel_wait, 989DEFINE_EVENT(local_only_evt, drv_offchannel_tx_cancel_wait,
968 TP_PROTO(struct ieee80211_local *local), 990 TP_PROTO(struct ieee80211_local *local),
969 TP_ARGS(local) 991 TP_ARGS(local)
970); 992);
971 993
994TRACE_EVENT(drv_set_bitrate_mask,
995 TP_PROTO(struct ieee80211_local *local,
996 struct ieee80211_sub_if_data *sdata,
997 const struct cfg80211_bitrate_mask *mask),
998
999 TP_ARGS(local, sdata, mask),
1000
1001 TP_STRUCT__entry(
1002 LOCAL_ENTRY
1003 VIF_ENTRY
1004 __field(u32, legacy_2g)
1005 __field(u32, legacy_5g)
1006 ),
1007
1008 TP_fast_assign(
1009 LOCAL_ASSIGN;
1010 VIF_ASSIGN;
1011 __entry->legacy_2g = mask->control[IEEE80211_BAND_2GHZ].legacy;
1012 __entry->legacy_5g = mask->control[IEEE80211_BAND_5GHZ].legacy;
1013 ),
1014
1015 TP_printk(
1016 LOCAL_PR_FMT VIF_PR_FMT " 2G Mask:0x%x 5G Mask:0x%x",
1017 LOCAL_PR_ARG, VIF_PR_ARG, __entry->legacy_2g, __entry->legacy_5g
1018 )
1019);
1020
972/* 1021/*
973 * Tracing for API calls that drivers call. 1022 * Tracing for API calls that drivers call.
974 */ 1023 */
@@ -1147,6 +1196,42 @@ TRACE_EVENT(api_scan_completed,
1147 ) 1196 )
1148); 1197);
1149 1198
1199TRACE_EVENT(api_sched_scan_results,
1200 TP_PROTO(struct ieee80211_local *local),
1201
1202 TP_ARGS(local),
1203
1204 TP_STRUCT__entry(
1205 LOCAL_ENTRY
1206 ),
1207
1208 TP_fast_assign(
1209 LOCAL_ASSIGN;
1210 ),
1211
1212 TP_printk(
1213 LOCAL_PR_FMT, LOCAL_PR_ARG
1214 )
1215);
1216
1217TRACE_EVENT(api_sched_scan_stopped,
1218 TP_PROTO(struct ieee80211_local *local),
1219
1220 TP_ARGS(local),
1221
1222 TP_STRUCT__entry(
1223 LOCAL_ENTRY
1224 ),
1225
1226 TP_fast_assign(
1227 LOCAL_ASSIGN;
1228 ),
1229
1230 TP_printk(
1231 LOCAL_PR_FMT, LOCAL_PR_ARG
1232 )
1233);
1234
1150TRACE_EVENT(api_sta_block_awake, 1235TRACE_EVENT(api_sta_block_awake,
1151 TP_PROTO(struct ieee80211_local *local, 1236 TP_PROTO(struct ieee80211_local *local,
1152 struct ieee80211_sta *sta, bool block), 1237 struct ieee80211_sta *sta, bool block),
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index b9e4b9bd2179..591add22bcc0 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -140,14 +140,29 @@ void ieee80211_ba_session_work(struct work_struct *work)
140 sta, tid, WLAN_BACK_RECIPIENT, 140 sta, tid, WLAN_BACK_RECIPIENT,
141 WLAN_REASON_QSTA_TIMEOUT, true); 141 WLAN_REASON_QSTA_TIMEOUT, true);
142 142
143 tid_tx = sta->ampdu_mlme.tid_tx[tid]; 143 tid_tx = sta->ampdu_mlme.tid_start_tx[tid];
144 if (!tid_tx) 144 if (tid_tx) {
145 continue; 145 /*
146 * Assign it over to the normal tid_tx array
147 * where it "goes live".
148 */
149 spin_lock_bh(&sta->lock);
150
151 sta->ampdu_mlme.tid_start_tx[tid] = NULL;
152 /* could there be a race? */
153 if (sta->ampdu_mlme.tid_tx[tid])
154 kfree(tid_tx);
155 else
156 ieee80211_assign_tid_tx(sta, tid, tid_tx);
157 spin_unlock_bh(&sta->lock);
146 158
147 if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state))
148 ieee80211_tx_ba_session_handle_start(sta, tid); 159 ieee80211_tx_ba_session_handle_start(sta, tid);
149 else if (test_and_clear_bit(HT_AGG_STATE_WANT_STOP, 160 continue;
150 &tid_tx->state)) 161 }
162
163 tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
164 if (tid_tx && test_and_clear_bit(HT_AGG_STATE_WANT_STOP,
165 &tid_tx->state))
151 ___ieee80211_stop_tx_ba_session(sta, tid, 166 ___ieee80211_stop_tx_ba_session(sta, tid,
152 WLAN_BACK_INITIATOR, 167 WLAN_BACK_INITIATOR,
153 true); 168 true);
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 3e81af1fce58..56c24cabf26d 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -40,7 +40,7 @@ static void ieee80211_rx_mgmt_auth_ibss(struct ieee80211_sub_if_data *sdata,
40 struct ieee80211_mgmt *mgmt, 40 struct ieee80211_mgmt *mgmt,
41 size_t len) 41 size_t len)
42{ 42{
43 u16 auth_alg, auth_transaction, status_code; 43 u16 auth_alg, auth_transaction;
44 44
45 lockdep_assert_held(&sdata->u.ibss.mtx); 45 lockdep_assert_held(&sdata->u.ibss.mtx);
46 46
@@ -49,7 +49,6 @@ static void ieee80211_rx_mgmt_auth_ibss(struct ieee80211_sub_if_data *sdata,
49 49
50 auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg); 50 auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg);
51 auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction); 51 auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction);
52 status_code = le16_to_cpu(mgmt->u.auth.status_code);
53 52
54 /* 53 /*
55 * IEEE 802.11 standard does not require authentication in IBSS 54 * IEEE 802.11 standard does not require authentication in IBSS
@@ -527,8 +526,6 @@ static void ieee80211_sta_merge_ibss(struct ieee80211_sub_if_data *sdata)
527static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata) 526static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata)
528{ 527{
529 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; 528 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
530 struct ieee80211_local *local = sdata->local;
531 struct ieee80211_supported_band *sband;
532 u8 bssid[ETH_ALEN]; 529 u8 bssid[ETH_ALEN];
533 u16 capability; 530 u16 capability;
534 int i; 531 int i;
@@ -551,8 +548,6 @@ static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata)
551 printk(KERN_DEBUG "%s: Creating new IBSS network, BSSID %pM\n", 548 printk(KERN_DEBUG "%s: Creating new IBSS network, BSSID %pM\n",
552 sdata->name, bssid); 549 sdata->name, bssid);
553 550
554 sband = local->hw.wiphy->bands[ifibss->channel->band];
555
556 capability = WLAN_CAPABILITY_IBSS; 551 capability = WLAN_CAPABILITY_IBSS;
557 552
558 if (ifibss->privacy) 553 if (ifibss->privacy)
@@ -661,19 +656,22 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
661static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata, 656static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
662 struct sk_buff *req) 657 struct sk_buff *req)
663{ 658{
664 struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(req);
665 struct ieee80211_mgmt *mgmt = (void *)req->data; 659 struct ieee80211_mgmt *mgmt = (void *)req->data;
666 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; 660 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
667 struct ieee80211_local *local = sdata->local; 661 struct ieee80211_local *local = sdata->local;
668 int tx_last_beacon, len = req->len; 662 int tx_last_beacon, len = req->len;
669 struct sk_buff *skb; 663 struct sk_buff *skb;
670 struct ieee80211_mgmt *resp; 664 struct ieee80211_mgmt *resp;
665 struct sk_buff *presp;
671 u8 *pos, *end; 666 u8 *pos, *end;
672 667
673 lockdep_assert_held(&ifibss->mtx); 668 lockdep_assert_held(&ifibss->mtx);
674 669
670 presp = rcu_dereference_protected(ifibss->presp,
671 lockdep_is_held(&ifibss->mtx));
672
675 if (ifibss->state != IEEE80211_IBSS_MLME_JOINED || 673 if (ifibss->state != IEEE80211_IBSS_MLME_JOINED ||
676 len < 24 + 2 || !ifibss->presp) 674 len < 24 + 2 || !presp)
677 return; 675 return;
678 676
679 tx_last_beacon = drv_tx_last_beacon(local); 677 tx_last_beacon = drv_tx_last_beacon(local);
@@ -685,7 +683,7 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
685 mgmt->bssid, tx_last_beacon); 683 mgmt->bssid, tx_last_beacon);
686#endif /* CONFIG_MAC80211_IBSS_DEBUG */ 684#endif /* CONFIG_MAC80211_IBSS_DEBUG */
687 685
688 if (!tx_last_beacon && !(rx_status->rx_flags & IEEE80211_RX_RA_MATCH)) 686 if (!tx_last_beacon && is_multicast_ether_addr(mgmt->da))
689 return; 687 return;
690 688
691 if (memcmp(mgmt->bssid, ifibss->bssid, ETH_ALEN) != 0 && 689 if (memcmp(mgmt->bssid, ifibss->bssid, ETH_ALEN) != 0 &&
@@ -711,7 +709,7 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
711 } 709 }
712 710
713 /* Reply with ProbeResp */ 711 /* Reply with ProbeResp */
714 skb = skb_copy(ifibss->presp, GFP_KERNEL); 712 skb = skb_copy(presp, GFP_KERNEL);
715 if (!skb) 713 if (!skb)
716 return; 714 return;
717 715
@@ -967,6 +965,10 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
967 965
968 mutex_lock(&sdata->u.ibss.mtx); 966 mutex_lock(&sdata->u.ibss.mtx);
969 967
968 sdata->u.ibss.state = IEEE80211_IBSS_MLME_SEARCH;
969 memset(sdata->u.ibss.bssid, 0, ETH_ALEN);
970 sdata->u.ibss.ssid_len = 0;
971
970 active_ibss = ieee80211_sta_active_ibss(sdata); 972 active_ibss = ieee80211_sta_active_ibss(sdata);
971 973
972 if (!active_ibss && !is_zero_ether_addr(ifibss->bssid)) { 974 if (!active_ibss && !is_zero_ether_addr(ifibss->bssid)) {
@@ -991,7 +993,8 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
991 993
992 /* remove beacon */ 994 /* remove beacon */
993 kfree(sdata->u.ibss.ie); 995 kfree(sdata->u.ibss.ie);
994 skb = sdata->u.ibss.presp; 996 skb = rcu_dereference_protected(sdata->u.ibss.presp,
997 lockdep_is_held(&sdata->u.ibss.mtx));
995 rcu_assign_pointer(sdata->u.ibss.presp, NULL); 998 rcu_assign_pointer(sdata->u.ibss.presp, NULL);
996 sdata->vif.bss_conf.ibss_joined = false; 999 sdata->vif.bss_conf.ibss_joined = false;
997 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED | 1000 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED |
@@ -1000,8 +1003,6 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
1000 kfree_skb(skb); 1003 kfree_skb(skb);
1001 1004
1002 skb_queue_purge(&sdata->skb_queue); 1005 skb_queue_purge(&sdata->skb_queue);
1003 memset(sdata->u.ibss.bssid, 0, ETH_ALEN);
1004 sdata->u.ibss.ssid_len = 0;
1005 1006
1006 del_timer_sync(&sdata->u.ibss.timer); 1007 del_timer_sync(&sdata->u.ibss.timer);
1007 1008
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index c18396c248d7..090b0ec1e056 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -214,7 +214,7 @@ struct beacon_data {
214}; 214};
215 215
216struct ieee80211_if_ap { 216struct ieee80211_if_ap {
217 struct beacon_data *beacon; 217 struct beacon_data __rcu *beacon;
218 218
219 struct list_head vlans; 219 struct list_head vlans;
220 220
@@ -237,7 +237,7 @@ struct ieee80211_if_vlan {
237 struct list_head list; 237 struct list_head list;
238 238
239 /* used for all tx if the VLAN is configured to 4-addr mode */ 239 /* used for all tx if the VLAN is configured to 4-addr mode */
240 struct sta_info *sta; 240 struct sta_info __rcu *sta;
241}; 241};
242 242
243struct mesh_stats { 243struct mesh_stats {
@@ -442,7 +442,8 @@ struct ieee80211_if_ibss {
442 442
443 unsigned long ibss_join_req; 443 unsigned long ibss_join_req;
444 /* probe response/beacon for IBSS */ 444 /* probe response/beacon for IBSS */
445 struct sk_buff *presp, *skb; 445 struct sk_buff __rcu *presp;
446 struct sk_buff *skb;
446 447
447 enum { 448 enum {
448 IEEE80211_IBSS_MLME_SEARCH, 449 IEEE80211_IBSS_MLME_SEARCH,
@@ -488,8 +489,13 @@ struct ieee80211_if_mesh {
488 struct mesh_config mshcfg; 489 struct mesh_config mshcfg;
489 u32 mesh_seqnum; 490 u32 mesh_seqnum;
490 bool accepting_plinks; 491 bool accepting_plinks;
491 const u8 *vendor_ie; 492 const u8 *ie;
492 u8 vendor_ie_len; 493 u8 ie_len;
494 enum {
495 IEEE80211_MESH_SEC_NONE = 0x0,
496 IEEE80211_MESH_SEC_AUTHED = 0x1,
497 IEEE80211_MESH_SEC_SECURED = 0x2,
498 } security;
493}; 499};
494 500
495#ifdef CONFIG_MAC80211_MESH 501#ifdef CONFIG_MAC80211_MESH
@@ -562,9 +568,10 @@ struct ieee80211_sub_if_data {
562 struct ieee80211_fragment_entry fragments[IEEE80211_FRAGMENT_MAX]; 568 struct ieee80211_fragment_entry fragments[IEEE80211_FRAGMENT_MAX];
563 unsigned int fragment_next; 569 unsigned int fragment_next;
564 570
565 struct ieee80211_key *keys[NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS]; 571 struct ieee80211_key __rcu *keys[NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS];
566 struct ieee80211_key *default_unicast_key, *default_multicast_key; 572 struct ieee80211_key __rcu *default_unicast_key;
567 struct ieee80211_key *default_mgmt_key; 573 struct ieee80211_key __rcu *default_multicast_key;
574 struct ieee80211_key __rcu *default_mgmt_key;
568 575
569 u16 sequence_number; 576 u16 sequence_number;
570 __be16 control_port_protocol; 577 __be16 control_port_protocol;
@@ -763,6 +770,9 @@ struct ieee80211_local {
763 /* device is started */ 770 /* device is started */
764 bool started; 771 bool started;
765 772
773 /* wowlan is enabled -- don't reconfig on resume */
774 bool wowlan;
775
766 int tx_headroom; /* required headroom for hardware/radiotap */ 776 int tx_headroom; /* required headroom for hardware/radiotap */
767 777
768 /* Tasklet and skb queue to process calls from IRQ mode. All frames 778 /* Tasklet and skb queue to process calls from IRQ mode. All frames
@@ -794,7 +804,7 @@ struct ieee80211_local {
794 spinlock_t sta_lock; 804 spinlock_t sta_lock;
795 unsigned long num_sta; 805 unsigned long num_sta;
796 struct list_head sta_list, sta_pending_list; 806 struct list_head sta_list, sta_pending_list;
797 struct sta_info *sta_hash[STA_HASH_SIZE]; 807 struct sta_info __rcu *sta_hash[STA_HASH_SIZE];
798 struct timer_list sta_cleanup; 808 struct timer_list sta_cleanup;
799 struct work_struct sta_finish_work; 809 struct work_struct sta_finish_work;
800 int sta_generation; 810 int sta_generation;
@@ -809,8 +819,8 @@ struct ieee80211_local {
809 819
810 struct rate_control_ref *rate_ctrl; 820 struct rate_control_ref *rate_ctrl;
811 821
812 struct crypto_blkcipher *wep_tx_tfm; 822 struct crypto_cipher *wep_tx_tfm;
813 struct crypto_blkcipher *wep_rx_tfm; 823 struct crypto_cipher *wep_rx_tfm;
814 u32 wep_iv; 824 u32 wep_iv;
815 825
816 /* see iface.c */ 826 /* see iface.c */
@@ -836,6 +846,10 @@ struct ieee80211_local {
836 int scan_channel_idx; 846 int scan_channel_idx;
837 int scan_ies_len; 847 int scan_ies_len;
838 848
849 bool sched_scanning;
850 struct ieee80211_sched_scan_ies sched_scan_ies;
851 struct work_struct sched_scan_stopped_work;
852
839 unsigned long leave_oper_channel_time; 853 unsigned long leave_oper_channel_time;
840 enum mac80211_scan_state next_scan_state; 854 enum mac80211_scan_state next_scan_state;
841 struct delayed_work scan_work; 855 struct delayed_work scan_work;
@@ -1143,6 +1157,12 @@ ieee80211_rx_bss_get(struct ieee80211_local *local, u8 *bssid, int freq,
1143void ieee80211_rx_bss_put(struct ieee80211_local *local, 1157void ieee80211_rx_bss_put(struct ieee80211_local *local,
1144 struct ieee80211_bss *bss); 1158 struct ieee80211_bss *bss);
1145 1159
1160/* scheduled scan handling */
1161int ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata,
1162 struct cfg80211_sched_scan_request *req);
1163int ieee80211_request_sched_scan_stop(struct ieee80211_sub_if_data *sdata);
1164void ieee80211_sched_scan_stopped_work(struct work_struct *work);
1165
1146/* off-channel helpers */ 1166/* off-channel helpers */
1147bool ieee80211_cfg_on_oper_channel(struct ieee80211_local *local); 1167bool ieee80211_cfg_on_oper_channel(struct ieee80211_local *local);
1148void ieee80211_offchannel_enable_all_ps(struct ieee80211_local *local, 1168void ieee80211_offchannel_enable_all_ps(struct ieee80211_local *local,
@@ -1246,7 +1266,8 @@ int ieee80211_reconfig(struct ieee80211_local *local);
1246void ieee80211_stop_device(struct ieee80211_local *local); 1266void ieee80211_stop_device(struct ieee80211_local *local);
1247 1267
1248#ifdef CONFIG_PM 1268#ifdef CONFIG_PM
1249int __ieee80211_suspend(struct ieee80211_hw *hw); 1269int __ieee80211_suspend(struct ieee80211_hw *hw,
1270 struct cfg80211_wowlan *wowlan);
1250 1271
1251static inline int __ieee80211_resume(struct ieee80211_hw *hw) 1272static inline int __ieee80211_resume(struct ieee80211_hw *hw)
1252{ 1273{
@@ -1259,7 +1280,8 @@ static inline int __ieee80211_resume(struct ieee80211_hw *hw)
1259 return ieee80211_reconfig(hw_to_local(hw)); 1280 return ieee80211_reconfig(hw_to_local(hw));
1260} 1281}
1261#else 1282#else
1262static inline int __ieee80211_suspend(struct ieee80211_hw *hw) 1283static inline int __ieee80211_suspend(struct ieee80211_hw *hw,
1284 struct cfg80211_wowlan *wowlan)
1263{ 1285{
1264 return 0; 1286 return 0;
1265} 1287}
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 4054399be907..dee30aea9ab3 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -384,11 +384,11 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
384 int i; 384 int i;
385 enum nl80211_channel_type orig_ct; 385 enum nl80211_channel_type orig_ct;
386 386
387 clear_bit(SDATA_STATE_RUNNING, &sdata->state);
388
387 if (local->scan_sdata == sdata) 389 if (local->scan_sdata == sdata)
388 ieee80211_scan_cancel(local); 390 ieee80211_scan_cancel(local);
389 391
390 clear_bit(SDATA_STATE_RUNNING, &sdata->state);
391
392 /* 392 /*
393 * Stop TX on this interface first. 393 * Stop TX on this interface first.
394 */ 394 */
@@ -449,7 +449,8 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
449 /* APs need special treatment */ 449 /* APs need special treatment */
450 if (sdata->vif.type == NL80211_IFTYPE_AP) { 450 if (sdata->vif.type == NL80211_IFTYPE_AP) {
451 struct ieee80211_sub_if_data *vlan, *tmpsdata; 451 struct ieee80211_sub_if_data *vlan, *tmpsdata;
452 struct beacon_data *old_beacon = sdata->u.ap.beacon; 452 struct beacon_data *old_beacon =
453 rtnl_dereference(sdata->u.ap.beacon);
453 454
454 /* sdata_running will return false, so this will disable */ 455 /* sdata_running will return false, so this will disable */
455 ieee80211_bss_info_change_notify(sdata, 456 ieee80211_bss_info_change_notify(sdata,
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index af3c56482c80..f825e2f0a57e 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -186,7 +186,7 @@ static void __ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata,
186 assert_key_lock(sdata->local); 186 assert_key_lock(sdata->local);
187 187
188 if (idx >= 0 && idx < NUM_DEFAULT_KEYS) 188 if (idx >= 0 && idx < NUM_DEFAULT_KEYS)
189 key = sdata->keys[idx]; 189 key = key_mtx_dereference(sdata->local, sdata->keys[idx]);
190 190
191 if (uni) 191 if (uni)
192 rcu_assign_pointer(sdata->default_unicast_key, key); 192 rcu_assign_pointer(sdata->default_unicast_key, key);
@@ -213,7 +213,7 @@ __ieee80211_set_default_mgmt_key(struct ieee80211_sub_if_data *sdata, int idx)
213 213
214 if (idx >= NUM_DEFAULT_KEYS && 214 if (idx >= NUM_DEFAULT_KEYS &&
215 idx < NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS) 215 idx < NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS)
216 key = sdata->keys[idx]; 216 key = key_mtx_dereference(sdata->local, sdata->keys[idx]);
217 217
218 rcu_assign_pointer(sdata->default_mgmt_key, key); 218 rcu_assign_pointer(sdata->default_mgmt_key, key);
219 219
@@ -257,9 +257,15 @@ static void __ieee80211_key_replace(struct ieee80211_sub_if_data *sdata,
257 else 257 else
258 idx = new->conf.keyidx; 258 idx = new->conf.keyidx;
259 259
260 defunikey = old && sdata->default_unicast_key == old; 260 defunikey = old &&
261 defmultikey = old && sdata->default_multicast_key == old; 261 old == key_mtx_dereference(sdata->local,
262 defmgmtkey = old && sdata->default_mgmt_key == old; 262 sdata->default_unicast_key);
263 defmultikey = old &&
264 old == key_mtx_dereference(sdata->local,
265 sdata->default_multicast_key);
266 defmgmtkey = old &&
267 old == key_mtx_dereference(sdata->local,
268 sdata->default_mgmt_key);
263 269
264 if (defunikey && !new) 270 if (defunikey && !new)
265 __ieee80211_set_default_key(sdata, -1, true, false); 271 __ieee80211_set_default_key(sdata, -1, true, false);
@@ -440,11 +446,11 @@ int ieee80211_key_link(struct ieee80211_key *key,
440 mutex_lock(&sdata->local->key_mtx); 446 mutex_lock(&sdata->local->key_mtx);
441 447
442 if (sta && pairwise) 448 if (sta && pairwise)
443 old_key = sta->ptk; 449 old_key = key_mtx_dereference(sdata->local, sta->ptk);
444 else if (sta) 450 else if (sta)
445 old_key = sta->gtk[idx]; 451 old_key = key_mtx_dereference(sdata->local, sta->gtk[idx]);
446 else 452 else
447 old_key = sdata->keys[idx]; 453 old_key = key_mtx_dereference(sdata->local, sdata->keys[idx]);
448 454
449 __ieee80211_key_replace(sdata, sta, pairwise, old_key, key); 455 __ieee80211_key_replace(sdata, sta, pairwise, old_key, key);
450 __ieee80211_key_destroy(old_key); 456 __ieee80211_key_destroy(old_key);
@@ -458,8 +464,11 @@ int ieee80211_key_link(struct ieee80211_key *key,
458 return ret; 464 return ret;
459} 465}
460 466
461static void __ieee80211_key_free(struct ieee80211_key *key) 467void __ieee80211_key_free(struct ieee80211_key *key)
462{ 468{
469 if (!key)
470 return;
471
463 /* 472 /*
464 * Replace key with nothingness if it was ever used. 473 * Replace key with nothingness if it was ever used.
465 */ 474 */
@@ -473,9 +482,6 @@ static void __ieee80211_key_free(struct ieee80211_key *key)
473void ieee80211_key_free(struct ieee80211_local *local, 482void ieee80211_key_free(struct ieee80211_local *local,
474 struct ieee80211_key *key) 483 struct ieee80211_key *key)
475{ 484{
476 if (!key)
477 return;
478
479 mutex_lock(&local->key_mtx); 485 mutex_lock(&local->key_mtx);
480 __ieee80211_key_free(key); 486 __ieee80211_key_free(key);
481 mutex_unlock(&local->key_mtx); 487 mutex_unlock(&local->key_mtx);
diff --git a/net/mac80211/key.h b/net/mac80211/key.h
index 4ddbe27eb570..d801d5351336 100644
--- a/net/mac80211/key.h
+++ b/net/mac80211/key.h
@@ -135,6 +135,7 @@ struct ieee80211_key *ieee80211_key_alloc(u32 cipher, int idx, size_t key_len,
135int __must_check ieee80211_key_link(struct ieee80211_key *key, 135int __must_check ieee80211_key_link(struct ieee80211_key *key,
136 struct ieee80211_sub_if_data *sdata, 136 struct ieee80211_sub_if_data *sdata,
137 struct sta_info *sta); 137 struct sta_info *sta);
138void __ieee80211_key_free(struct ieee80211_key *key);
138void ieee80211_key_free(struct ieee80211_local *local, 139void ieee80211_key_free(struct ieee80211_local *local,
139 struct ieee80211_key *key); 140 struct ieee80211_key *key);
140void ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata, int idx, 141void ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata, int idx,
@@ -145,4 +146,7 @@ void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata);
145void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata); 146void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata);
146void ieee80211_disable_keys(struct ieee80211_sub_if_data *sdata); 147void ieee80211_disable_keys(struct ieee80211_sub_if_data *sdata);
147 148
149#define key_mtx_dereference(local, ref) \
150 rcu_dereference_protected(ref, lockdep_is_held(&((local)->key_mtx)))
151
148#endif /* IEEE80211_KEY_H */ 152#endif /* IEEE80211_KEY_H */
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 562d2984c482..866f269183cf 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -33,12 +33,6 @@
33#include "cfg.h" 33#include "cfg.h"
34#include "debugfs.h" 34#include "debugfs.h"
35 35
36
37static bool ieee80211_disable_40mhz_24ghz;
38module_param(ieee80211_disable_40mhz_24ghz, bool, 0644);
39MODULE_PARM_DESC(ieee80211_disable_40mhz_24ghz,
40 "Disable 40MHz support in the 2.4GHz band");
41
42static struct lock_class_key ieee80211_rx_skb_queue_class; 36static struct lock_class_key ieee80211_rx_skb_queue_class;
43 37
44void ieee80211_configure_filter(struct ieee80211_local *local) 38void ieee80211_configure_filter(struct ieee80211_local *local)
@@ -364,7 +358,8 @@ static void ieee80211_restart_work(struct work_struct *work)
364 flush_workqueue(local->workqueue); 358 flush_workqueue(local->workqueue);
365 359
366 mutex_lock(&local->mtx); 360 mutex_lock(&local->mtx);
367 WARN(test_bit(SCAN_HW_SCANNING, &local->scanning), 361 WARN(test_bit(SCAN_HW_SCANNING, &local->scanning) ||
362 local->sched_scanning,
368 "%s called with hardware scan in progress\n", __func__); 363 "%s called with hardware scan in progress\n", __func__);
369 mutex_unlock(&local->mtx); 364 mutex_unlock(&local->mtx);
370 365
@@ -545,7 +540,9 @@ ieee80211_default_mgmt_stypes[NUM_NL80211_IFTYPES] = {
545 }, 540 },
546 [NL80211_IFTYPE_MESH_POINT] = { 541 [NL80211_IFTYPE_MESH_POINT] = {
547 .tx = 0xffff, 542 .tx = 0xffff,
548 .rx = BIT(IEEE80211_STYPE_ACTION >> 4), 543 .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
544 BIT(IEEE80211_STYPE_AUTH >> 4) |
545 BIT(IEEE80211_STYPE_DEAUTH >> 4),
549 }, 546 },
550}; 547};
551 548
@@ -584,8 +581,7 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
584 581
585 wiphy->flags |= WIPHY_FLAG_NETNS_OK | 582 wiphy->flags |= WIPHY_FLAG_NETNS_OK |
586 WIPHY_FLAG_4ADDR_AP | 583 WIPHY_FLAG_4ADDR_AP |
587 WIPHY_FLAG_4ADDR_STATION | 584 WIPHY_FLAG_4ADDR_STATION;
588 WIPHY_FLAG_SUPPORTS_SEPARATE_DEFAULT_KEYS;
589 585
590 if (!ops->set_key) 586 if (!ops->set_key)
591 wiphy->flags |= WIPHY_FLAG_IBSS_RSN; 587 wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
@@ -656,6 +652,9 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
656 setup_timer(&local->dynamic_ps_timer, 652 setup_timer(&local->dynamic_ps_timer,
657 ieee80211_dynamic_ps_timer, (unsigned long) local); 653 ieee80211_dynamic_ps_timer, (unsigned long) local);
658 654
655 INIT_WORK(&local->sched_scan_stopped_work,
656 ieee80211_sched_scan_stopped_work);
657
659 sta_info_init(local); 658 sta_info_init(local);
660 659
661 for (i = 0; i < IEEE80211_MAX_QUEUES; i++) { 660 for (i = 0; i < IEEE80211_MAX_QUEUES; i++) {
@@ -686,7 +685,7 @@ EXPORT_SYMBOL(ieee80211_alloc_hw);
686int ieee80211_register_hw(struct ieee80211_hw *hw) 685int ieee80211_register_hw(struct ieee80211_hw *hw)
687{ 686{
688 struct ieee80211_local *local = hw_to_local(hw); 687 struct ieee80211_local *local = hw_to_local(hw);
689 int result; 688 int result, i;
690 enum ieee80211_band band; 689 enum ieee80211_band band;
691 int channels, max_bitrates; 690 int channels, max_bitrates;
692 bool supp_ht; 691 bool supp_ht;
@@ -701,6 +700,13 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
701 WLAN_CIPHER_SUITE_AES_CMAC 700 WLAN_CIPHER_SUITE_AES_CMAC
702 }; 701 };
703 702
703 if ((hw->wiphy->wowlan.flags || hw->wiphy->wowlan.n_patterns)
704#ifdef CONFIG_PM
705 && (!local->ops->suspend || !local->ops->resume)
706#endif
707 )
708 return -EINVAL;
709
704 if (hw->max_report_rates == 0) 710 if (hw->max_report_rates == 0)
705 hw->max_report_rates = hw->max_rates; 711 hw->max_report_rates = hw->max_rates;
706 712
@@ -726,18 +732,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
726 } 732 }
727 channels += sband->n_channels; 733 channels += sband->n_channels;
728 734
729 /*
730 * Since ieee80211_disable_40mhz_24ghz is global, we can
731 * modify the sband's ht data even if the driver uses a
732 * global structure for that.
733 */
734 if (ieee80211_disable_40mhz_24ghz &&
735 band == IEEE80211_BAND_2GHZ &&
736 sband->ht_cap.ht_supported) {
737 sband->ht_cap.cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
738 sband->ht_cap.cap &= ~IEEE80211_HT_CAP_SGI_40;
739 }
740
741 if (max_bitrates < sband->n_bitrates) 735 if (max_bitrates < sband->n_bitrates)
742 max_bitrates = sband->n_bitrates; 736 max_bitrates = sband->n_bitrates;
743 supp_ht = supp_ht || sband->ht_cap.ht_supported; 737 supp_ht = supp_ht || sband->ht_cap.ht_supported;
@@ -749,17 +743,44 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
749 return -ENOMEM; 743 return -ENOMEM;
750 744
751 /* if low-level driver supports AP, we also support VLAN */ 745 /* if low-level driver supports AP, we also support VLAN */
752 if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_AP)) 746 if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_AP)) {
753 local->hw.wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP_VLAN); 747 hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP_VLAN);
748 hw->wiphy->software_iftypes |= BIT(NL80211_IFTYPE_AP_VLAN);
749 }
754 750
755 /* mac80211 always supports monitor */ 751 /* mac80211 always supports monitor */
756 local->hw.wiphy->interface_modes |= BIT(NL80211_IFTYPE_MONITOR); 752 hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_MONITOR);
753 hw->wiphy->software_iftypes |= BIT(NL80211_IFTYPE_MONITOR);
754
755 /*
756 * mac80211 doesn't support more than 1 channel, and also not more
757 * than one IBSS interface
758 */
759 for (i = 0; i < hw->wiphy->n_iface_combinations; i++) {
760 const struct ieee80211_iface_combination *c;
761 int j;
762
763 c = &hw->wiphy->iface_combinations[i];
764
765 if (c->num_different_channels > 1)
766 return -EINVAL;
767
768 for (j = 0; j < c->n_limits; j++)
769 if ((c->limits[j].types & BIT(NL80211_IFTYPE_ADHOC)) &&
770 c->limits[j].max > 1)
771 return -EINVAL;
772 }
757 773
758#ifndef CONFIG_MAC80211_MESH 774#ifndef CONFIG_MAC80211_MESH
759 /* mesh depends on Kconfig, but drivers should set it if they want */ 775 /* mesh depends on Kconfig, but drivers should set it if they want */
760 local->hw.wiphy->interface_modes &= ~BIT(NL80211_IFTYPE_MESH_POINT); 776 local->hw.wiphy->interface_modes &= ~BIT(NL80211_IFTYPE_MESH_POINT);
761#endif 777#endif
762 778
779 /* if the underlying driver supports mesh, mac80211 will (at least)
780 * provide routing of mesh authentication frames to userspace */
781 if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_MESH_POINT))
782 local->hw.wiphy->flags |= WIPHY_FLAG_MESH_AUTH;
783
763 /* mac80211 supports control port protocol changing */ 784 /* mac80211 supports control port protocol changing */
764 local->hw.wiphy->flags |= WIPHY_FLAG_CONTROL_PORT_PROTOCOL; 785 local->hw.wiphy->flags |= WIPHY_FLAG_CONTROL_PORT_PROTOCOL;
765 786
@@ -838,6 +859,9 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
838 if (!local->ops->remain_on_channel) 859 if (!local->ops->remain_on_channel)
839 local->hw.wiphy->max_remain_on_channel_duration = 5000; 860 local->hw.wiphy->max_remain_on_channel_duration = 5000;
840 861
862 if (local->ops->sched_scan_start)
863 local->hw.wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
864
841 result = wiphy_register(local->hw.wiphy); 865 result = wiphy_register(local->hw.wiphy);
842 if (result < 0) 866 if (result < 0)
843 goto fail_wiphy_register; 867 goto fail_wiphy_register;
@@ -861,8 +885,10 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
861 * and we need some headroom for passing the frame to monitor 885 * and we need some headroom for passing the frame to monitor
862 * interfaces, but never both at the same time. 886 * interfaces, but never both at the same time.
863 */ 887 */
888#ifndef __CHECKER__
864 BUILD_BUG_ON(IEEE80211_TX_STATUS_HEADROOM != 889 BUILD_BUG_ON(IEEE80211_TX_STATUS_HEADROOM !=
865 sizeof(struct ieee80211_tx_status_rtap_hdr)); 890 sizeof(struct ieee80211_tx_status_rtap_hdr));
891#endif
866 local->tx_headroom = max_t(unsigned int , local->hw.extra_tx_headroom, 892 local->tx_headroom = max_t(unsigned int , local->hw.extra_tx_headroom,
867 sizeof(struct ieee80211_tx_status_rtap_hdr)); 893 sizeof(struct ieee80211_tx_status_rtap_hdr));
868 894
@@ -879,10 +905,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
879 905
880 local->dynamic_ps_forced_timeout = -1; 906 local->dynamic_ps_forced_timeout = -1;
881 907
882 result = sta_info_start(local);
883 if (result < 0)
884 goto fail_sta_info;
885
886 result = ieee80211_wep_init(local); 908 result = ieee80211_wep_init(local);
887 if (result < 0) 909 if (result < 0)
888 wiphy_debug(local->hw.wiphy, "Failed to initialize wep: %d\n", 910 wiphy_debug(local->hw.wiphy, "Failed to initialize wep: %d\n",
@@ -945,7 +967,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
945 rtnl_unlock(); 967 rtnl_unlock();
946 ieee80211_wep_free(local); 968 ieee80211_wep_free(local);
947 sta_info_stop(local); 969 sta_info_stop(local);
948 fail_sta_info:
949 destroy_workqueue(local->workqueue); 970 destroy_workqueue(local->workqueue);
950 fail_workqueue: 971 fail_workqueue:
951 wiphy_unregister(local->hw.wiphy); 972 wiphy_unregister(local->hw.wiphy);
@@ -1069,6 +1090,8 @@ static void __exit ieee80211_exit(void)
1069 ieee80211s_stop(); 1090 ieee80211s_stop();
1070 1091
1071 ieee80211_iface_exit(); 1092 ieee80211_iface_exit();
1093
1094 rcu_barrier();
1072} 1095}
1073 1096
1074 1097
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 2a57cc02c618..29e9980c8e60 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -279,57 +279,14 @@ void mesh_mgmt_ies_add(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata)
279 MESHCONF_CAPAB_ACCEPT_PLINKS : 0x00; 279 MESHCONF_CAPAB_ACCEPT_PLINKS : 0x00;
280 *pos++ = 0x00; 280 *pos++ = 0x00;
281 281
282 if (sdata->u.mesh.vendor_ie) { 282 if (sdata->u.mesh.ie) {
283 int len = sdata->u.mesh.vendor_ie_len; 283 int len = sdata->u.mesh.ie_len;
284 const u8 *data = sdata->u.mesh.vendor_ie; 284 const u8 *data = sdata->u.mesh.ie;
285 if (skb_tailroom(skb) > len) 285 if (skb_tailroom(skb) > len)
286 memcpy(skb_put(skb, len), data, len); 286 memcpy(skb_put(skb, len), data, len);
287 } 287 }
288} 288}
289 289
290u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata, struct mesh_table *tbl)
291{
292 /* Use last four bytes of hw addr and interface index as hash index */
293 return jhash_2words(*(u32 *)(addr+2), sdata->dev->ifindex, tbl->hash_rnd)
294 & tbl->hash_mask;
295}
296
297struct mesh_table *mesh_table_alloc(int size_order)
298{
299 int i;
300 struct mesh_table *newtbl;
301
302 newtbl = kmalloc(sizeof(struct mesh_table), GFP_KERNEL);
303 if (!newtbl)
304 return NULL;
305
306 newtbl->hash_buckets = kzalloc(sizeof(struct hlist_head) *
307 (1 << size_order), GFP_KERNEL);
308
309 if (!newtbl->hash_buckets) {
310 kfree(newtbl);
311 return NULL;
312 }
313
314 newtbl->hashwlock = kmalloc(sizeof(spinlock_t) *
315 (1 << size_order), GFP_KERNEL);
316 if (!newtbl->hashwlock) {
317 kfree(newtbl->hash_buckets);
318 kfree(newtbl);
319 return NULL;
320 }
321
322 newtbl->size_order = size_order;
323 newtbl->hash_mask = (1 << size_order) - 1;
324 atomic_set(&newtbl->entries, 0);
325 get_random_bytes(&newtbl->hash_rnd,
326 sizeof(newtbl->hash_rnd));
327 for (i = 0; i <= newtbl->hash_mask; i++)
328 spin_lock_init(&newtbl->hashwlock[i]);
329
330 return newtbl;
331}
332
333 290
334static void ieee80211_mesh_path_timer(unsigned long data) 291static void ieee80211_mesh_path_timer(unsigned long data)
335{ 292{
@@ -573,6 +530,10 @@ static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
573 ieee802_11_parse_elems(mgmt->u.probe_resp.variable, len - baselen, 530 ieee802_11_parse_elems(mgmt->u.probe_resp.variable, len - baselen,
574 &elems); 531 &elems);
575 532
533 /* ignore beacons from secure mesh peers if our security is off */
534 if (elems.rsn_len && sdata->u.mesh.security == IEEE80211_MESH_SEC_NONE)
535 return;
536
576 if (elems.ds_params && elems.ds_params_len == 1) 537 if (elems.ds_params && elems.ds_params_len == 1)
577 freq = ieee80211_channel_to_frequency(elems.ds_params[0], band); 538 freq = ieee80211_channel_to_frequency(elems.ds_params[0], band);
578 else 539 else
@@ -586,9 +547,7 @@ static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
586 if (elems.mesh_id && elems.mesh_config && 547 if (elems.mesh_id && elems.mesh_config &&
587 mesh_matches_local(&elems, sdata)) { 548 mesh_matches_local(&elems, sdata)) {
588 supp_rates = ieee80211_sta_get_rates(local, &elems, band); 549 supp_rates = ieee80211_sta_get_rates(local, &elems, band);
589 550 mesh_neighbour_update(mgmt->sa, supp_rates, sdata, &elems);
590 mesh_neighbour_update(mgmt->sa, supp_rates, sdata,
591 mesh_peer_accepts_plinks(&elems));
592 } 551 }
593} 552}
594 553
@@ -598,7 +557,7 @@ static void ieee80211_mesh_rx_mgmt_action(struct ieee80211_sub_if_data *sdata,
598 struct ieee80211_rx_status *rx_status) 557 struct ieee80211_rx_status *rx_status)
599{ 558{
600 switch (mgmt->u.action.category) { 559 switch (mgmt->u.action.category) {
601 case WLAN_CATEGORY_MESH_PLINK: 560 case WLAN_CATEGORY_MESH_ACTION:
602 mesh_rx_plink_frame(sdata, mgmt, len, rx_status); 561 mesh_rx_plink_frame(sdata, mgmt, len, rx_status);
603 break; 562 break;
604 case WLAN_CATEGORY_MESH_PATH_SEL: 563 case WLAN_CATEGORY_MESH_PATH_SEL:
@@ -611,12 +570,9 @@ void ieee80211_mesh_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
611 struct sk_buff *skb) 570 struct sk_buff *skb)
612{ 571{
613 struct ieee80211_rx_status *rx_status; 572 struct ieee80211_rx_status *rx_status;
614 struct ieee80211_if_mesh *ifmsh;
615 struct ieee80211_mgmt *mgmt; 573 struct ieee80211_mgmt *mgmt;
616 u16 stype; 574 u16 stype;
617 575
618 ifmsh = &sdata->u.mesh;
619
620 rx_status = IEEE80211_SKB_RXCB(skb); 576 rx_status = IEEE80211_SKB_RXCB(skb);
621 mgmt = (struct ieee80211_mgmt *) skb->data; 577 mgmt = (struct ieee80211_mgmt *) skb->data;
622 stype = le16_to_cpu(mgmt->frame_control) & IEEE80211_FCTL_STYPE; 578 stype = le16_to_cpu(mgmt->frame_control) & IEEE80211_FCTL_STYPE;
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index b99e230fe31c..249e733362e7 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -92,7 +92,7 @@ struct mesh_path {
92 u8 dst[ETH_ALEN]; 92 u8 dst[ETH_ALEN];
93 u8 mpp[ETH_ALEN]; /* used for MPP or MAP */ 93 u8 mpp[ETH_ALEN]; /* used for MPP or MAP */
94 struct ieee80211_sub_if_data *sdata; 94 struct ieee80211_sub_if_data *sdata;
95 struct sta_info *next_hop; 95 struct sta_info __rcu *next_hop;
96 struct timer_list timer; 96 struct timer_list timer;
97 struct sk_buff_head frame_queue; 97 struct sk_buff_head frame_queue;
98 struct rcu_head rcu; 98 struct rcu_head rcu;
@@ -120,6 +120,7 @@ struct mesh_path {
120 * buckets 120 * buckets
121 * @mean_chain_len: maximum average length for the hash buckets' list, if it is 121 * @mean_chain_len: maximum average length for the hash buckets' list, if it is
122 * reached, the table will grow 122 * reached, the table will grow
123 * rcu_head: RCU head to free the table
123 */ 124 */
124struct mesh_table { 125struct mesh_table {
125 /* Number of buckets will be 2^N */ 126 /* Number of buckets will be 2^N */
@@ -132,6 +133,8 @@ struct mesh_table {
132 int (*copy_node) (struct hlist_node *p, struct mesh_table *newtbl); 133 int (*copy_node) (struct hlist_node *p, struct mesh_table *newtbl);
133 int size_order; 134 int size_order;
134 int mean_chain_len; 135 int mean_chain_len;
136
137 struct rcu_head rcu_head;
135}; 138};
136 139
137/* Recent multicast cache */ 140/* Recent multicast cache */
@@ -226,7 +229,8 @@ void mesh_rx_path_sel_frame(struct ieee80211_sub_if_data *sdata,
226int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata); 229int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata);
227/* Mesh plinks */ 230/* Mesh plinks */
228void mesh_neighbour_update(u8 *hw_addr, u32 rates, 231void mesh_neighbour_update(u8 *hw_addr, u32 rates,
229 struct ieee80211_sub_if_data *sdata, bool add); 232 struct ieee80211_sub_if_data *sdata,
233 struct ieee802_11_elems *ie);
230bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie); 234bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie);
231void mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata); 235void mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata);
232void mesh_plink_broken(struct sta_info *sta); 236void mesh_plink_broken(struct sta_info *sta);
@@ -239,12 +243,8 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata,
239 243
240/* Private interfaces */ 244/* Private interfaces */
241/* Mesh tables */ 245/* Mesh tables */
242struct mesh_table *mesh_table_alloc(int size_order);
243void mesh_table_free(struct mesh_table *tbl, bool free_leafs);
244void mesh_mpath_table_grow(void); 246void mesh_mpath_table_grow(void);
245void mesh_mpp_table_grow(void); 247void mesh_mpp_table_grow(void);
246u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata,
247 struct mesh_table *tbl);
248/* Mesh paths */ 248/* Mesh paths */
249int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn, __le16 target_rcode, 249int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn, __le16 target_rcode,
250 const u8 *ra, struct ieee80211_sub_if_data *sdata); 250 const u8 *ra, struct ieee80211_sub_if_data *sdata);
@@ -289,10 +289,6 @@ static inline bool mesh_path_sel_is_hwmp(struct ieee80211_sub_if_data *sdata)
289 return sdata->u.mesh.mesh_pp_id == IEEE80211_PATH_PROTOCOL_HWMP; 289 return sdata->u.mesh.mesh_pp_id == IEEE80211_PATH_PROTOCOL_HWMP;
290} 290}
291 291
292#define for_each_mesh_entry(x, p, node, i) \
293 for (i = 0; i <= x->hash_mask; i++) \
294 hlist_for_each_entry_rcu(node, p, &x->hash_buckets[i], list)
295
296void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local); 292void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local);
297 293
298void ieee80211_mesh_quiesce(struct ieee80211_sub_if_data *sdata); 294void ieee80211_mesh_quiesce(struct ieee80211_sub_if_data *sdata);
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 5bf64d7112b3..2b18053070c1 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -391,7 +391,6 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
391 (mpath->flags & MESH_PATH_SN_VALID)) { 391 (mpath->flags & MESH_PATH_SN_VALID)) {
392 if (SN_GT(mpath->sn, orig_sn) || 392 if (SN_GT(mpath->sn, orig_sn) ||
393 (mpath->sn == orig_sn && 393 (mpath->sn == orig_sn &&
394 action == MPATH_PREQ &&
395 new_metric >= mpath->metric)) { 394 new_metric >= mpath->metric)) {
396 process = false; 395 process = false;
397 fresh_info = false; 396 fresh_info = false;
@@ -561,6 +560,14 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
561} 560}
562 561
563 562
563static inline struct sta_info *
564next_hop_deref_protected(struct mesh_path *mpath)
565{
566 return rcu_dereference_protected(mpath->next_hop,
567 lockdep_is_held(&mpath->state_lock));
568}
569
570
564static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata, 571static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata,
565 struct ieee80211_mgmt *mgmt, 572 struct ieee80211_mgmt *mgmt,
566 u8 *prep_elem, u32 metric) 573 u8 *prep_elem, u32 metric)
@@ -600,7 +607,7 @@ static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata,
600 spin_unlock_bh(&mpath->state_lock); 607 spin_unlock_bh(&mpath->state_lock);
601 goto fail; 608 goto fail;
602 } 609 }
603 memcpy(next_hop, mpath->next_hop->sta.addr, ETH_ALEN); 610 memcpy(next_hop, next_hop_deref_protected(mpath)->sta.addr, ETH_ALEN);
604 spin_unlock_bh(&mpath->state_lock); 611 spin_unlock_bh(&mpath->state_lock);
605 --ttl; 612 --ttl;
606 flags = PREP_IE_FLAGS(prep_elem); 613 flags = PREP_IE_FLAGS(prep_elem);
@@ -633,7 +640,6 @@ static void hwmp_perr_frame_process(struct ieee80211_sub_if_data *sdata,
633 struct mesh_path *mpath; 640 struct mesh_path *mpath;
634 u8 ttl; 641 u8 ttl;
635 u8 *ta, *target_addr; 642 u8 *ta, *target_addr;
636 u8 target_flags;
637 u32 target_sn; 643 u32 target_sn;
638 u16 target_rcode; 644 u16 target_rcode;
639 645
@@ -644,7 +650,6 @@ static void hwmp_perr_frame_process(struct ieee80211_sub_if_data *sdata,
644 return; 650 return;
645 } 651 }
646 ttl--; 652 ttl--;
647 target_flags = PERR_IE_TARGET_FLAGS(perr_elem);
648 target_addr = PERR_IE_TARGET_ADDR(perr_elem); 653 target_addr = PERR_IE_TARGET_ADDR(perr_elem);
649 target_sn = PERR_IE_TARGET_SN(perr_elem); 654 target_sn = PERR_IE_TARGET_SN(perr_elem);
650 target_rcode = PERR_IE_TARGET_RCODE(perr_elem); 655 target_rcode = PERR_IE_TARGET_RCODE(perr_elem);
@@ -654,7 +659,8 @@ static void hwmp_perr_frame_process(struct ieee80211_sub_if_data *sdata,
654 if (mpath) { 659 if (mpath) {
655 spin_lock_bh(&mpath->state_lock); 660 spin_lock_bh(&mpath->state_lock);
656 if (mpath->flags & MESH_PATH_ACTIVE && 661 if (mpath->flags & MESH_PATH_ACTIVE &&
657 memcmp(ta, mpath->next_hop->sta.addr, ETH_ALEN) == 0 && 662 memcmp(ta, next_hop_deref_protected(mpath)->sta.addr,
663 ETH_ALEN) == 0 &&
658 (!(mpath->flags & MESH_PATH_SN_VALID) || 664 (!(mpath->flags & MESH_PATH_SN_VALID) ||
659 SN_GT(target_sn, mpath->sn))) { 665 SN_GT(target_sn, mpath->sn))) {
660 mpath->flags &= ~MESH_PATH_ACTIVE; 666 mpath->flags &= ~MESH_PATH_ACTIVE;
@@ -675,12 +681,10 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
675{ 681{
676 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 682 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
677 struct mesh_path *mpath; 683 struct mesh_path *mpath;
678 u8 *ta;
679 u8 ttl, flags, hopcount; 684 u8 ttl, flags, hopcount;
680 u8 *orig_addr; 685 u8 *orig_addr;
681 u32 orig_sn, metric; 686 u32 orig_sn, metric;
682 687
683 ta = mgmt->sa;
684 ttl = rann->rann_ttl; 688 ttl = rann->rann_ttl;
685 if (ttl <= 1) { 689 if (ttl <= 1) {
686 ifmsh->mshstats.dropped_frames_ttl++; 690 ifmsh->mshstats.dropped_frames_ttl++;
@@ -918,6 +922,7 @@ int mesh_nexthop_lookup(struct sk_buff *skb,
918{ 922{
919 struct sk_buff *skb_to_free = NULL; 923 struct sk_buff *skb_to_free = NULL;
920 struct mesh_path *mpath; 924 struct mesh_path *mpath;
925 struct sta_info *next_hop;
921 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 926 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
922 u8 *target_addr = hdr->addr3; 927 u8 *target_addr = hdr->addr3;
923 int err = 0; 928 int err = 0;
@@ -945,7 +950,11 @@ int mesh_nexthop_lookup(struct sk_buff *skb,
945 mesh_queue_preq(mpath, 950 mesh_queue_preq(mpath,
946 PREQ_Q_F_START | PREQ_Q_F_REFRESH); 951 PREQ_Q_F_START | PREQ_Q_F_REFRESH);
947 } 952 }
948 memcpy(hdr->addr1, mpath->next_hop->sta.addr, ETH_ALEN); 953 next_hop = rcu_dereference(mpath->next_hop);
954 if (next_hop)
955 memcpy(hdr->addr1, next_hop->sta.addr, ETH_ALEN);
956 else
957 err = -ENOENT;
949 } else { 958 } else {
950 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 959 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
951 if (!(mpath->flags & MESH_PATH_RESOLVING)) { 960 if (!(mpath->flags & MESH_PATH_RESOLVING)) {
@@ -971,20 +980,11 @@ endlookup:
971 980
972void mesh_path_timer(unsigned long data) 981void mesh_path_timer(unsigned long data)
973{ 982{
974 struct ieee80211_sub_if_data *sdata; 983 struct mesh_path *mpath = (void *) data;
975 struct mesh_path *mpath; 984 struct ieee80211_sub_if_data *sdata = mpath->sdata;
976
977 rcu_read_lock();
978 mpath = (struct mesh_path *) data;
979 mpath = rcu_dereference(mpath);
980 if (!mpath)
981 goto endmpathtimer;
982 sdata = mpath->sdata;
983 985
984 if (sdata->local->quiescing) { 986 if (sdata->local->quiescing)
985 rcu_read_unlock();
986 return; 987 return;
987 }
988 988
989 spin_lock_bh(&mpath->state_lock); 989 spin_lock_bh(&mpath->state_lock);
990 if (mpath->flags & MESH_PATH_RESOLVED || 990 if (mpath->flags & MESH_PATH_RESOLVED ||
@@ -1001,8 +1001,6 @@ void mesh_path_timer(unsigned long data)
1001 } 1001 }
1002 1002
1003 spin_unlock_bh(&mpath->state_lock); 1003 spin_unlock_bh(&mpath->state_lock);
1004endmpathtimer:
1005 rcu_read_unlock();
1006} 1004}
1007 1005
1008void 1006void
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 336ca9d0c5c4..0d2faacc3e87 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -36,10 +36,77 @@ struct mpath_node {
36 struct mesh_path *mpath; 36 struct mesh_path *mpath;
37}; 37};
38 38
39static struct mesh_table *mesh_paths; 39static struct mesh_table __rcu *mesh_paths;
40static struct mesh_table *mpp_paths; /* Store paths for MPP&MAP */ 40static struct mesh_table __rcu *mpp_paths; /* Store paths for MPP&MAP */
41 41
42int mesh_paths_generation; 42int mesh_paths_generation;
43
44/* This lock will have the grow table function as writer and add / delete nodes
45 * as readers. When reading the table (i.e. doing lookups) we are well protected
46 * by RCU
47 */
48static DEFINE_RWLOCK(pathtbl_resize_lock);
49
50
51static inline struct mesh_table *resize_dereference_mesh_paths(void)
52{
53 return rcu_dereference_protected(mesh_paths,
54 lockdep_is_held(&pathtbl_resize_lock));
55}
56
57static inline struct mesh_table *resize_dereference_mpp_paths(void)
58{
59 return rcu_dereference_protected(mpp_paths,
60 lockdep_is_held(&pathtbl_resize_lock));
61}
62
63/*
64 * CAREFUL -- "tbl" must not be an expression,
65 * in particular not an rcu_dereference(), since
66 * it's used twice. So it is illegal to do
67 * for_each_mesh_entry(rcu_dereference(...), ...)
68 */
69#define for_each_mesh_entry(tbl, p, node, i) \
70 for (i = 0; i <= tbl->hash_mask; i++) \
71 hlist_for_each_entry_rcu(node, p, &tbl->hash_buckets[i], list)
72
73
74static struct mesh_table *mesh_table_alloc(int size_order)
75{
76 int i;
77 struct mesh_table *newtbl;
78
79 newtbl = kmalloc(sizeof(struct mesh_table), GFP_ATOMIC);
80 if (!newtbl)
81 return NULL;
82
83 newtbl->hash_buckets = kzalloc(sizeof(struct hlist_head) *
84 (1 << size_order), GFP_ATOMIC);
85
86 if (!newtbl->hash_buckets) {
87 kfree(newtbl);
88 return NULL;
89 }
90
91 newtbl->hashwlock = kmalloc(sizeof(spinlock_t) *
92 (1 << size_order), GFP_ATOMIC);
93 if (!newtbl->hashwlock) {
94 kfree(newtbl->hash_buckets);
95 kfree(newtbl);
96 return NULL;
97 }
98
99 newtbl->size_order = size_order;
100 newtbl->hash_mask = (1 << size_order) - 1;
101 atomic_set(&newtbl->entries, 0);
102 get_random_bytes(&newtbl->hash_rnd,
103 sizeof(newtbl->hash_rnd));
104 for (i = 0; i <= newtbl->hash_mask; i++)
105 spin_lock_init(&newtbl->hashwlock[i]);
106
107 return newtbl;
108}
109
43static void __mesh_table_free(struct mesh_table *tbl) 110static void __mesh_table_free(struct mesh_table *tbl)
44{ 111{
45 kfree(tbl->hash_buckets); 112 kfree(tbl->hash_buckets);
@@ -47,7 +114,7 @@ static void __mesh_table_free(struct mesh_table *tbl)
47 kfree(tbl); 114 kfree(tbl);
48} 115}
49 116
50void mesh_table_free(struct mesh_table *tbl, bool free_leafs) 117static void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
51{ 118{
52 struct hlist_head *mesh_hash; 119 struct hlist_head *mesh_hash;
53 struct hlist_node *p, *q; 120 struct hlist_node *p, *q;
@@ -55,60 +122,56 @@ void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
55 122
56 mesh_hash = tbl->hash_buckets; 123 mesh_hash = tbl->hash_buckets;
57 for (i = 0; i <= tbl->hash_mask; i++) { 124 for (i = 0; i <= tbl->hash_mask; i++) {
58 spin_lock(&tbl->hashwlock[i]); 125 spin_lock_bh(&tbl->hashwlock[i]);
59 hlist_for_each_safe(p, q, &mesh_hash[i]) { 126 hlist_for_each_safe(p, q, &mesh_hash[i]) {
60 tbl->free_node(p, free_leafs); 127 tbl->free_node(p, free_leafs);
61 atomic_dec(&tbl->entries); 128 atomic_dec(&tbl->entries);
62 } 129 }
63 spin_unlock(&tbl->hashwlock[i]); 130 spin_unlock_bh(&tbl->hashwlock[i]);
64 } 131 }
65 __mesh_table_free(tbl); 132 __mesh_table_free(tbl);
66} 133}
67 134
68static struct mesh_table *mesh_table_grow(struct mesh_table *tbl) 135static int mesh_table_grow(struct mesh_table *oldtbl,
136 struct mesh_table *newtbl)
69{ 137{
70 struct mesh_table *newtbl;
71 struct hlist_head *oldhash; 138 struct hlist_head *oldhash;
72 struct hlist_node *p, *q; 139 struct hlist_node *p, *q;
73 int i; 140 int i;
74 141
75 if (atomic_read(&tbl->entries) 142 if (atomic_read(&oldtbl->entries)
76 < tbl->mean_chain_len * (tbl->hash_mask + 1)) 143 < oldtbl->mean_chain_len * (oldtbl->hash_mask + 1))
77 goto endgrow; 144 return -EAGAIN;
78 145
79 newtbl = mesh_table_alloc(tbl->size_order + 1); 146 newtbl->free_node = oldtbl->free_node;
80 if (!newtbl) 147 newtbl->mean_chain_len = oldtbl->mean_chain_len;
81 goto endgrow; 148 newtbl->copy_node = oldtbl->copy_node;
82 149 atomic_set(&newtbl->entries, atomic_read(&oldtbl->entries));
83 newtbl->free_node = tbl->free_node;
84 newtbl->mean_chain_len = tbl->mean_chain_len;
85 newtbl->copy_node = tbl->copy_node;
86 atomic_set(&newtbl->entries, atomic_read(&tbl->entries));
87 150
88 oldhash = tbl->hash_buckets; 151 oldhash = oldtbl->hash_buckets;
89 for (i = 0; i <= tbl->hash_mask; i++) 152 for (i = 0; i <= oldtbl->hash_mask; i++)
90 hlist_for_each(p, &oldhash[i]) 153 hlist_for_each(p, &oldhash[i])
91 if (tbl->copy_node(p, newtbl) < 0) 154 if (oldtbl->copy_node(p, newtbl) < 0)
92 goto errcopy; 155 goto errcopy;
93 156
94 return newtbl; 157 return 0;
95 158
96errcopy: 159errcopy:
97 for (i = 0; i <= newtbl->hash_mask; i++) { 160 for (i = 0; i <= newtbl->hash_mask; i++) {
98 hlist_for_each_safe(p, q, &newtbl->hash_buckets[i]) 161 hlist_for_each_safe(p, q, &newtbl->hash_buckets[i])
99 tbl->free_node(p, 0); 162 oldtbl->free_node(p, 0);
100 } 163 }
101 __mesh_table_free(newtbl); 164 return -ENOMEM;
102endgrow:
103 return NULL;
104} 165}
105 166
167static u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata,
168 struct mesh_table *tbl)
169{
170 /* Use last four bytes of hw addr and interface index as hash index */
171 return jhash_2words(*(u32 *)(addr+2), sdata->dev->ifindex, tbl->hash_rnd)
172 & tbl->hash_mask;
173}
106 174
107/* This lock will have the grow table function as writer and add / delete nodes
108 * as readers. When reading the table (i.e. doing lookups) we are well protected
109 * by RCU
110 */
111static DEFINE_RWLOCK(pathtbl_resize_lock);
112 175
113/** 176/**
114 * 177 *
@@ -218,12 +281,13 @@ struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
218 */ 281 */
219struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data *sdata) 282struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data *sdata)
220{ 283{
284 struct mesh_table *tbl = rcu_dereference(mesh_paths);
221 struct mpath_node *node; 285 struct mpath_node *node;
222 struct hlist_node *p; 286 struct hlist_node *p;
223 int i; 287 int i;
224 int j = 0; 288 int j = 0;
225 289
226 for_each_mesh_entry(mesh_paths, p, node, i) { 290 for_each_mesh_entry(tbl, p, node, i) {
227 if (sdata && node->mpath->sdata != sdata) 291 if (sdata && node->mpath->sdata != sdata)
228 continue; 292 continue;
229 if (j++ == idx) { 293 if (j++ == idx) {
@@ -253,6 +317,7 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
253{ 317{
254 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 318 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
255 struct ieee80211_local *local = sdata->local; 319 struct ieee80211_local *local = sdata->local;
320 struct mesh_table *tbl;
256 struct mesh_path *mpath, *new_mpath; 321 struct mesh_path *mpath, *new_mpath;
257 struct mpath_node *node, *new_node; 322 struct mpath_node *node, *new_node;
258 struct hlist_head *bucket; 323 struct hlist_head *bucket;
@@ -280,7 +345,7 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
280 if (!new_node) 345 if (!new_node)
281 goto err_node_alloc; 346 goto err_node_alloc;
282 347
283 read_lock(&pathtbl_resize_lock); 348 read_lock_bh(&pathtbl_resize_lock);
284 memcpy(new_mpath->dst, dst, ETH_ALEN); 349 memcpy(new_mpath->dst, dst, ETH_ALEN);
285 new_mpath->sdata = sdata; 350 new_mpath->sdata = sdata;
286 new_mpath->flags = 0; 351 new_mpath->flags = 0;
@@ -292,10 +357,12 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
292 spin_lock_init(&new_mpath->state_lock); 357 spin_lock_init(&new_mpath->state_lock);
293 init_timer(&new_mpath->timer); 358 init_timer(&new_mpath->timer);
294 359
295 hash_idx = mesh_table_hash(dst, sdata, mesh_paths); 360 tbl = resize_dereference_mesh_paths();
296 bucket = &mesh_paths->hash_buckets[hash_idx]; 361
362 hash_idx = mesh_table_hash(dst, sdata, tbl);
363 bucket = &tbl->hash_buckets[hash_idx];
297 364
298 spin_lock(&mesh_paths->hashwlock[hash_idx]); 365 spin_lock_bh(&tbl->hashwlock[hash_idx]);
299 366
300 err = -EEXIST; 367 err = -EEXIST;
301 hlist_for_each_entry(node, n, bucket, list) { 368 hlist_for_each_entry(node, n, bucket, list) {
@@ -305,14 +372,14 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
305 } 372 }
306 373
307 hlist_add_head_rcu(&new_node->list, bucket); 374 hlist_add_head_rcu(&new_node->list, bucket);
308 if (atomic_inc_return(&mesh_paths->entries) >= 375 if (atomic_inc_return(&tbl->entries) >=
309 mesh_paths->mean_chain_len * (mesh_paths->hash_mask + 1)) 376 tbl->mean_chain_len * (tbl->hash_mask + 1))
310 grow = 1; 377 grow = 1;
311 378
312 mesh_paths_generation++; 379 mesh_paths_generation++;
313 380
314 spin_unlock(&mesh_paths->hashwlock[hash_idx]); 381 spin_unlock_bh(&tbl->hashwlock[hash_idx]);
315 read_unlock(&pathtbl_resize_lock); 382 read_unlock_bh(&pathtbl_resize_lock);
316 if (grow) { 383 if (grow) {
317 set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags); 384 set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags);
318 ieee80211_queue_work(&local->hw, &sdata->work); 385 ieee80211_queue_work(&local->hw, &sdata->work);
@@ -320,8 +387,8 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
320 return 0; 387 return 0;
321 388
322err_exists: 389err_exists:
323 spin_unlock(&mesh_paths->hashwlock[hash_idx]); 390 spin_unlock_bh(&tbl->hashwlock[hash_idx]);
324 read_unlock(&pathtbl_resize_lock); 391 read_unlock_bh(&pathtbl_resize_lock);
325 kfree(new_node); 392 kfree(new_node);
326err_node_alloc: 393err_node_alloc:
327 kfree(new_mpath); 394 kfree(new_mpath);
@@ -330,46 +397,59 @@ err_path_alloc:
330 return err; 397 return err;
331} 398}
332 399
400static void mesh_table_free_rcu(struct rcu_head *rcu)
401{
402 struct mesh_table *tbl = container_of(rcu, struct mesh_table, rcu_head);
403
404 mesh_table_free(tbl, false);
405}
406
333void mesh_mpath_table_grow(void) 407void mesh_mpath_table_grow(void)
334{ 408{
335 struct mesh_table *oldtbl, *newtbl; 409 struct mesh_table *oldtbl, *newtbl;
336 410
337 write_lock(&pathtbl_resize_lock); 411 write_lock_bh(&pathtbl_resize_lock);
338 oldtbl = mesh_paths; 412 oldtbl = resize_dereference_mesh_paths();
339 newtbl = mesh_table_grow(mesh_paths); 413 newtbl = mesh_table_alloc(oldtbl->size_order + 1);
340 if (!newtbl) { 414 if (!newtbl)
341 write_unlock(&pathtbl_resize_lock); 415 goto out;
342 return; 416 if (mesh_table_grow(oldtbl, newtbl) < 0) {
417 __mesh_table_free(newtbl);
418 goto out;
343 } 419 }
344 rcu_assign_pointer(mesh_paths, newtbl); 420 rcu_assign_pointer(mesh_paths, newtbl);
345 write_unlock(&pathtbl_resize_lock);
346 421
347 synchronize_rcu(); 422 call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);
348 mesh_table_free(oldtbl, false); 423
424 out:
425 write_unlock_bh(&pathtbl_resize_lock);
349} 426}
350 427
351void mesh_mpp_table_grow(void) 428void mesh_mpp_table_grow(void)
352{ 429{
353 struct mesh_table *oldtbl, *newtbl; 430 struct mesh_table *oldtbl, *newtbl;
354 431
355 write_lock(&pathtbl_resize_lock); 432 write_lock_bh(&pathtbl_resize_lock);
356 oldtbl = mpp_paths; 433 oldtbl = resize_dereference_mpp_paths();
357 newtbl = mesh_table_grow(mpp_paths); 434 newtbl = mesh_table_alloc(oldtbl->size_order + 1);
358 if (!newtbl) { 435 if (!newtbl)
359 write_unlock(&pathtbl_resize_lock); 436 goto out;
360 return; 437 if (mesh_table_grow(oldtbl, newtbl) < 0) {
438 __mesh_table_free(newtbl);
439 goto out;
361 } 440 }
362 rcu_assign_pointer(mpp_paths, newtbl); 441 rcu_assign_pointer(mpp_paths, newtbl);
363 write_unlock(&pathtbl_resize_lock); 442 call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);
364 443
365 synchronize_rcu(); 444 out:
366 mesh_table_free(oldtbl, false); 445 write_unlock_bh(&pathtbl_resize_lock);
367} 446}
368 447
369int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata) 448int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
370{ 449{
371 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 450 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
372 struct ieee80211_local *local = sdata->local; 451 struct ieee80211_local *local = sdata->local;
452 struct mesh_table *tbl;
373 struct mesh_path *mpath, *new_mpath; 453 struct mesh_path *mpath, *new_mpath;
374 struct mpath_node *node, *new_node; 454 struct mpath_node *node, *new_node;
375 struct hlist_head *bucket; 455 struct hlist_head *bucket;
@@ -394,7 +474,7 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
394 if (!new_node) 474 if (!new_node)
395 goto err_node_alloc; 475 goto err_node_alloc;
396 476
397 read_lock(&pathtbl_resize_lock); 477 read_lock_bh(&pathtbl_resize_lock);
398 memcpy(new_mpath->dst, dst, ETH_ALEN); 478 memcpy(new_mpath->dst, dst, ETH_ALEN);
399 memcpy(new_mpath->mpp, mpp, ETH_ALEN); 479 memcpy(new_mpath->mpp, mpp, ETH_ALEN);
400 new_mpath->sdata = sdata; 480 new_mpath->sdata = sdata;
@@ -404,10 +484,12 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
404 new_mpath->exp_time = jiffies; 484 new_mpath->exp_time = jiffies;
405 spin_lock_init(&new_mpath->state_lock); 485 spin_lock_init(&new_mpath->state_lock);
406 486
407 hash_idx = mesh_table_hash(dst, sdata, mpp_paths); 487 tbl = resize_dereference_mpp_paths();
408 bucket = &mpp_paths->hash_buckets[hash_idx]; 488
489 hash_idx = mesh_table_hash(dst, sdata, tbl);
490 bucket = &tbl->hash_buckets[hash_idx];
409 491
410 spin_lock(&mpp_paths->hashwlock[hash_idx]); 492 spin_lock_bh(&tbl->hashwlock[hash_idx]);
411 493
412 err = -EEXIST; 494 err = -EEXIST;
413 hlist_for_each_entry(node, n, bucket, list) { 495 hlist_for_each_entry(node, n, bucket, list) {
@@ -417,12 +499,12 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
417 } 499 }
418 500
419 hlist_add_head_rcu(&new_node->list, bucket); 501 hlist_add_head_rcu(&new_node->list, bucket);
420 if (atomic_inc_return(&mpp_paths->entries) >= 502 if (atomic_inc_return(&tbl->entries) >=
421 mpp_paths->mean_chain_len * (mpp_paths->hash_mask + 1)) 503 tbl->mean_chain_len * (tbl->hash_mask + 1))
422 grow = 1; 504 grow = 1;
423 505
424 spin_unlock(&mpp_paths->hashwlock[hash_idx]); 506 spin_unlock_bh(&tbl->hashwlock[hash_idx]);
425 read_unlock(&pathtbl_resize_lock); 507 read_unlock_bh(&pathtbl_resize_lock);
426 if (grow) { 508 if (grow) {
427 set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags); 509 set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags);
428 ieee80211_queue_work(&local->hw, &sdata->work); 510 ieee80211_queue_work(&local->hw, &sdata->work);
@@ -430,8 +512,8 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
430 return 0; 512 return 0;
431 513
432err_exists: 514err_exists:
433 spin_unlock(&mpp_paths->hashwlock[hash_idx]); 515 spin_unlock_bh(&tbl->hashwlock[hash_idx]);
434 read_unlock(&pathtbl_resize_lock); 516 read_unlock_bh(&pathtbl_resize_lock);
435 kfree(new_node); 517 kfree(new_node);
436err_node_alloc: 518err_node_alloc:
437 kfree(new_mpath); 519 kfree(new_mpath);
@@ -450,6 +532,7 @@ err_path_alloc:
450 */ 532 */
451void mesh_plink_broken(struct sta_info *sta) 533void mesh_plink_broken(struct sta_info *sta)
452{ 534{
535 struct mesh_table *tbl;
453 static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 536 static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
454 struct mesh_path *mpath; 537 struct mesh_path *mpath;
455 struct mpath_node *node; 538 struct mpath_node *node;
@@ -458,10 +541,11 @@ void mesh_plink_broken(struct sta_info *sta)
458 int i; 541 int i;
459 542
460 rcu_read_lock(); 543 rcu_read_lock();
461 for_each_mesh_entry(mesh_paths, p, node, i) { 544 tbl = rcu_dereference(mesh_paths);
545 for_each_mesh_entry(tbl, p, node, i) {
462 mpath = node->mpath; 546 mpath = node->mpath;
463 spin_lock_bh(&mpath->state_lock); 547 spin_lock_bh(&mpath->state_lock);
464 if (mpath->next_hop == sta && 548 if (rcu_dereference(mpath->next_hop) == sta &&
465 mpath->flags & MESH_PATH_ACTIVE && 549 mpath->flags & MESH_PATH_ACTIVE &&
466 !(mpath->flags & MESH_PATH_FIXED)) { 550 !(mpath->flags & MESH_PATH_FIXED)) {
467 mpath->flags &= ~MESH_PATH_ACTIVE; 551 mpath->flags &= ~MESH_PATH_ACTIVE;
@@ -490,30 +574,38 @@ void mesh_plink_broken(struct sta_info *sta)
490 */ 574 */
491void mesh_path_flush_by_nexthop(struct sta_info *sta) 575void mesh_path_flush_by_nexthop(struct sta_info *sta)
492{ 576{
577 struct mesh_table *tbl;
493 struct mesh_path *mpath; 578 struct mesh_path *mpath;
494 struct mpath_node *node; 579 struct mpath_node *node;
495 struct hlist_node *p; 580 struct hlist_node *p;
496 int i; 581 int i;
497 582
498 for_each_mesh_entry(mesh_paths, p, node, i) { 583 rcu_read_lock();
584 tbl = rcu_dereference(mesh_paths);
585 for_each_mesh_entry(tbl, p, node, i) {
499 mpath = node->mpath; 586 mpath = node->mpath;
500 if (mpath->next_hop == sta) 587 if (rcu_dereference(mpath->next_hop) == sta)
501 mesh_path_del(mpath->dst, mpath->sdata); 588 mesh_path_del(mpath->dst, mpath->sdata);
502 } 589 }
590 rcu_read_unlock();
503} 591}
504 592
505void mesh_path_flush(struct ieee80211_sub_if_data *sdata) 593void mesh_path_flush(struct ieee80211_sub_if_data *sdata)
506{ 594{
595 struct mesh_table *tbl;
507 struct mesh_path *mpath; 596 struct mesh_path *mpath;
508 struct mpath_node *node; 597 struct mpath_node *node;
509 struct hlist_node *p; 598 struct hlist_node *p;
510 int i; 599 int i;
511 600
512 for_each_mesh_entry(mesh_paths, p, node, i) { 601 rcu_read_lock();
602 tbl = rcu_dereference(mesh_paths);
603 for_each_mesh_entry(tbl, p, node, i) {
513 mpath = node->mpath; 604 mpath = node->mpath;
514 if (mpath->sdata == sdata) 605 if (mpath->sdata == sdata)
515 mesh_path_del(mpath->dst, mpath->sdata); 606 mesh_path_del(mpath->dst, mpath->sdata);
516 } 607 }
608 rcu_read_unlock();
517} 609}
518 610
519static void mesh_path_node_reclaim(struct rcu_head *rp) 611static void mesh_path_node_reclaim(struct rcu_head *rp)
@@ -537,6 +629,7 @@ static void mesh_path_node_reclaim(struct rcu_head *rp)
537 */ 629 */
538int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata) 630int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata)
539{ 631{
632 struct mesh_table *tbl;
540 struct mesh_path *mpath; 633 struct mesh_path *mpath;
541 struct mpath_node *node; 634 struct mpath_node *node;
542 struct hlist_head *bucket; 635 struct hlist_head *bucket;
@@ -544,20 +637,21 @@ int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata)
544 int hash_idx; 637 int hash_idx;
545 int err = 0; 638 int err = 0;
546 639
547 read_lock(&pathtbl_resize_lock); 640 read_lock_bh(&pathtbl_resize_lock);
548 hash_idx = mesh_table_hash(addr, sdata, mesh_paths); 641 tbl = resize_dereference_mesh_paths();
549 bucket = &mesh_paths->hash_buckets[hash_idx]; 642 hash_idx = mesh_table_hash(addr, sdata, tbl);
643 bucket = &tbl->hash_buckets[hash_idx];
550 644
551 spin_lock(&mesh_paths->hashwlock[hash_idx]); 645 spin_lock_bh(&tbl->hashwlock[hash_idx]);
552 hlist_for_each_entry(node, n, bucket, list) { 646 hlist_for_each_entry(node, n, bucket, list) {
553 mpath = node->mpath; 647 mpath = node->mpath;
554 if (mpath->sdata == sdata && 648 if (mpath->sdata == sdata &&
555 memcmp(addr, mpath->dst, ETH_ALEN) == 0) { 649 memcmp(addr, mpath->dst, ETH_ALEN) == 0) {
556 spin_lock_bh(&mpath->state_lock); 650 spin_lock_bh(&mpath->state_lock);
557 mpath->flags |= MESH_PATH_RESOLVING; 651 mpath->flags |= MESH_PATH_RESOLVING;
558 hlist_del_rcu(&node->list); 652 hlist_del_rcu(&node->list);
559 call_rcu(&node->rcu, mesh_path_node_reclaim); 653 call_rcu(&node->rcu, mesh_path_node_reclaim);
560 atomic_dec(&mesh_paths->entries); 654 atomic_dec(&tbl->entries);
561 spin_unlock_bh(&mpath->state_lock); 655 spin_unlock_bh(&mpath->state_lock);
562 goto enddel; 656 goto enddel;
563 } 657 }
@@ -566,8 +660,8 @@ int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata)
566 err = -ENXIO; 660 err = -ENXIO;
567enddel: 661enddel:
568 mesh_paths_generation++; 662 mesh_paths_generation++;
569 spin_unlock(&mesh_paths->hashwlock[hash_idx]); 663 spin_unlock_bh(&tbl->hashwlock[hash_idx]);
570 read_unlock(&pathtbl_resize_lock); 664 read_unlock_bh(&pathtbl_resize_lock);
571 return err; 665 return err;
572} 666}
573 667
@@ -667,8 +761,10 @@ static void mesh_path_node_free(struct hlist_node *p, bool free_leafs)
667 struct mpath_node *node = hlist_entry(p, struct mpath_node, list); 761 struct mpath_node *node = hlist_entry(p, struct mpath_node, list);
668 mpath = node->mpath; 762 mpath = node->mpath;
669 hlist_del_rcu(p); 763 hlist_del_rcu(p);
670 if (free_leafs) 764 if (free_leafs) {
765 del_timer_sync(&mpath->timer);
671 kfree(mpath); 766 kfree(mpath);
767 }
672 kfree(node); 768 kfree(node);
673} 769}
674 770
@@ -693,52 +789,60 @@ static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl)
693 789
694int mesh_pathtbl_init(void) 790int mesh_pathtbl_init(void)
695{ 791{
696 mesh_paths = mesh_table_alloc(INIT_PATHS_SIZE_ORDER); 792 struct mesh_table *tbl_path, *tbl_mpp;
697 if (!mesh_paths) 793
794 tbl_path = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
795 if (!tbl_path)
698 return -ENOMEM; 796 return -ENOMEM;
699 mesh_paths->free_node = &mesh_path_node_free; 797 tbl_path->free_node = &mesh_path_node_free;
700 mesh_paths->copy_node = &mesh_path_node_copy; 798 tbl_path->copy_node = &mesh_path_node_copy;
701 mesh_paths->mean_chain_len = MEAN_CHAIN_LEN; 799 tbl_path->mean_chain_len = MEAN_CHAIN_LEN;
702 800
703 mpp_paths = mesh_table_alloc(INIT_PATHS_SIZE_ORDER); 801 tbl_mpp = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
704 if (!mpp_paths) { 802 if (!tbl_mpp) {
705 mesh_table_free(mesh_paths, true); 803 mesh_table_free(tbl_path, true);
706 return -ENOMEM; 804 return -ENOMEM;
707 } 805 }
708 mpp_paths->free_node = &mesh_path_node_free; 806 tbl_mpp->free_node = &mesh_path_node_free;
709 mpp_paths->copy_node = &mesh_path_node_copy; 807 tbl_mpp->copy_node = &mesh_path_node_copy;
710 mpp_paths->mean_chain_len = MEAN_CHAIN_LEN; 808 tbl_mpp->mean_chain_len = MEAN_CHAIN_LEN;
809
810 /* Need no locking since this is during init */
811 RCU_INIT_POINTER(mesh_paths, tbl_path);
812 RCU_INIT_POINTER(mpp_paths, tbl_mpp);
711 813
712 return 0; 814 return 0;
713} 815}
714 816
715void mesh_path_expire(struct ieee80211_sub_if_data *sdata) 817void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
716{ 818{
819 struct mesh_table *tbl;
717 struct mesh_path *mpath; 820 struct mesh_path *mpath;
718 struct mpath_node *node; 821 struct mpath_node *node;
719 struct hlist_node *p; 822 struct hlist_node *p;
720 int i; 823 int i;
721 824
722 read_lock(&pathtbl_resize_lock); 825 rcu_read_lock();
723 for_each_mesh_entry(mesh_paths, p, node, i) { 826 tbl = rcu_dereference(mesh_paths);
827 for_each_mesh_entry(tbl, p, node, i) {
724 if (node->mpath->sdata != sdata) 828 if (node->mpath->sdata != sdata)
725 continue; 829 continue;
726 mpath = node->mpath; 830 mpath = node->mpath;
727 spin_lock_bh(&mpath->state_lock); 831 spin_lock_bh(&mpath->state_lock);
728 if ((!(mpath->flags & MESH_PATH_RESOLVING)) && 832 if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
729 (!(mpath->flags & MESH_PATH_FIXED)) && 833 (!(mpath->flags & MESH_PATH_FIXED)) &&
730 time_after(jiffies, 834 time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE)) {
731 mpath->exp_time + MESH_PATH_EXPIRE)) {
732 spin_unlock_bh(&mpath->state_lock); 835 spin_unlock_bh(&mpath->state_lock);
733 mesh_path_del(mpath->dst, mpath->sdata); 836 mesh_path_del(mpath->dst, mpath->sdata);
734 } else 837 } else
735 spin_unlock_bh(&mpath->state_lock); 838 spin_unlock_bh(&mpath->state_lock);
736 } 839 }
737 read_unlock(&pathtbl_resize_lock); 840 rcu_read_unlock();
738} 841}
739 842
740void mesh_pathtbl_unregister(void) 843void mesh_pathtbl_unregister(void)
741{ 844{
742 mesh_table_free(mesh_paths, true); 845 /* no need for locking during exit path */
743 mesh_table_free(mpp_paths, true); 846 mesh_table_free(rcu_dereference_raw(mesh_paths), true);
847 mesh_table_free(rcu_dereference_raw(mpp_paths), true);
744} 848}
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 44b53931ba5e..f4adc0917888 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -43,7 +43,7 @@
43#define dot11MeshMaxPeerLinks(s) (s->u.mesh.mshcfg.dot11MeshMaxPeerLinks) 43#define dot11MeshMaxPeerLinks(s) (s->u.mesh.mshcfg.dot11MeshMaxPeerLinks)
44 44
45enum plink_frame_type { 45enum plink_frame_type {
46 PLINK_OPEN = 0, 46 PLINK_OPEN = 1,
47 PLINK_CONFIRM, 47 PLINK_CONFIRM,
48 PLINK_CLOSE 48 PLINK_CLOSE
49}; 49};
@@ -83,7 +83,7 @@ void mesh_plink_dec_estab_count(struct ieee80211_sub_if_data *sdata)
83 */ 83 */
84static inline void mesh_plink_fsm_restart(struct sta_info *sta) 84static inline void mesh_plink_fsm_restart(struct sta_info *sta)
85{ 85{
86 sta->plink_state = PLINK_LISTEN; 86 sta->plink_state = NL80211_PLINK_LISTEN;
87 sta->llid = sta->plid = sta->reason = 0; 87 sta->llid = sta->plid = sta->reason = 0;
88 sta->plink_retries = 0; 88 sta->plink_retries = 0;
89} 89}
@@ -105,7 +105,7 @@ static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata,
105 if (!sta) 105 if (!sta)
106 return NULL; 106 return NULL;
107 107
108 sta->flags = WLAN_STA_AUTHORIZED; 108 sta->flags = WLAN_STA_AUTHORIZED | WLAN_STA_AUTH;
109 sta->sta.supp_rates[local->hw.conf.channel->band] = rates; 109 sta->sta.supp_rates[local->hw.conf.channel->band] = rates;
110 rate_control_rate_init(sta); 110 rate_control_rate_init(sta);
111 111
@@ -126,11 +126,11 @@ static bool __mesh_plink_deactivate(struct sta_info *sta)
126 struct ieee80211_sub_if_data *sdata = sta->sdata; 126 struct ieee80211_sub_if_data *sdata = sta->sdata;
127 bool deactivated = false; 127 bool deactivated = false;
128 128
129 if (sta->plink_state == PLINK_ESTAB) { 129 if (sta->plink_state == NL80211_PLINK_ESTAB) {
130 mesh_plink_dec_estab_count(sdata); 130 mesh_plink_dec_estab_count(sdata);
131 deactivated = true; 131 deactivated = true;
132 } 132 }
133 sta->plink_state = PLINK_BLOCKED; 133 sta->plink_state = NL80211_PLINK_BLOCKED;
134 mesh_path_flush_by_nexthop(sta); 134 mesh_path_flush_by_nexthop(sta);
135 135
136 return deactivated; 136 return deactivated;
@@ -161,7 +161,7 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
161 __le16 reason) { 161 __le16 reason) {
162 struct ieee80211_local *local = sdata->local; 162 struct ieee80211_local *local = sdata->local;
163 struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400 + 163 struct sk_buff *skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400 +
164 sdata->u.mesh.vendor_ie_len); 164 sdata->u.mesh.ie_len);
165 struct ieee80211_mgmt *mgmt; 165 struct ieee80211_mgmt *mgmt;
166 bool include_plid = false; 166 bool include_plid = false;
167 static const u8 meshpeeringproto[] = { 0x00, 0x0F, 0xAC, 0x2A }; 167 static const u8 meshpeeringproto[] = { 0x00, 0x0F, 0xAC, 0x2A };
@@ -181,8 +181,8 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
181 IEEE80211_STYPE_ACTION); 181 IEEE80211_STYPE_ACTION);
182 memcpy(mgmt->da, da, ETH_ALEN); 182 memcpy(mgmt->da, da, ETH_ALEN);
183 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); 183 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
184 /* BSSID is left zeroed, wildcard value */ 184 memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
185 mgmt->u.action.category = WLAN_CATEGORY_MESH_PLINK; 185 mgmt->u.action.category = WLAN_CATEGORY_MESH_ACTION;
186 mgmt->u.action.u.plink_action.action_code = action; 186 mgmt->u.action.u.plink_action.action_code = action;
187 187
188 if (action == PLINK_CLOSE) 188 if (action == PLINK_CLOSE)
@@ -237,8 +237,9 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
237 return 0; 237 return 0;
238} 238}
239 239
240void mesh_neighbour_update(u8 *hw_addr, u32 rates, struct ieee80211_sub_if_data *sdata, 240void mesh_neighbour_update(u8 *hw_addr, u32 rates,
241 bool peer_accepting_plinks) 241 struct ieee80211_sub_if_data *sdata,
242 struct ieee802_11_elems *elems)
242{ 243{
243 struct ieee80211_local *local = sdata->local; 244 struct ieee80211_local *local = sdata->local;
244 struct sta_info *sta; 245 struct sta_info *sta;
@@ -248,8 +249,14 @@ void mesh_neighbour_update(u8 *hw_addr, u32 rates, struct ieee80211_sub_if_data
248 sta = sta_info_get(sdata, hw_addr); 249 sta = sta_info_get(sdata, hw_addr);
249 if (!sta) { 250 if (!sta) {
250 rcu_read_unlock(); 251 rcu_read_unlock();
251 252 /* Userspace handles peer allocation when security is enabled
252 sta = mesh_plink_alloc(sdata, hw_addr, rates); 253 * */
254 if (sdata->u.mesh.security & IEEE80211_MESH_SEC_AUTHED)
255 cfg80211_notify_new_peer_candidate(sdata->dev, hw_addr,
256 elems->ie_start, elems->total_len,
257 GFP_KERNEL);
258 else
259 sta = mesh_plink_alloc(sdata, hw_addr, rates);
253 if (!sta) 260 if (!sta)
254 return; 261 return;
255 if (sta_info_insert_rcu(sta)) { 262 if (sta_info_insert_rcu(sta)) {
@@ -260,7 +267,8 @@ void mesh_neighbour_update(u8 *hw_addr, u32 rates, struct ieee80211_sub_if_data
260 267
261 sta->last_rx = jiffies; 268 sta->last_rx = jiffies;
262 sta->sta.supp_rates[local->hw.conf.channel->band] = rates; 269 sta->sta.supp_rates[local->hw.conf.channel->band] = rates;
263 if (peer_accepting_plinks && sta->plink_state == PLINK_LISTEN && 270 if (mesh_peer_accepts_plinks(elems) &&
271 sta->plink_state == NL80211_PLINK_LISTEN &&
264 sdata->u.mesh.accepting_plinks && 272 sdata->u.mesh.accepting_plinks &&
265 sdata->u.mesh.mshcfg.auto_open_plinks) 273 sdata->u.mesh.mshcfg.auto_open_plinks)
266 mesh_plink_open(sta); 274 mesh_plink_open(sta);
@@ -300,8 +308,8 @@ static void mesh_plink_timer(unsigned long data)
300 sdata = sta->sdata; 308 sdata = sta->sdata;
301 309
302 switch (sta->plink_state) { 310 switch (sta->plink_state) {
303 case PLINK_OPN_RCVD: 311 case NL80211_PLINK_OPN_RCVD:
304 case PLINK_OPN_SNT: 312 case NL80211_PLINK_OPN_SNT:
305 /* retry timer */ 313 /* retry timer */
306 if (sta->plink_retries < dot11MeshMaxRetries(sdata)) { 314 if (sta->plink_retries < dot11MeshMaxRetries(sdata)) {
307 u32 rand; 315 u32 rand;
@@ -320,17 +328,17 @@ static void mesh_plink_timer(unsigned long data)
320 } 328 }
321 reason = cpu_to_le16(MESH_MAX_RETRIES); 329 reason = cpu_to_le16(MESH_MAX_RETRIES);
322 /* fall through on else */ 330 /* fall through on else */
323 case PLINK_CNF_RCVD: 331 case NL80211_PLINK_CNF_RCVD:
324 /* confirm timer */ 332 /* confirm timer */
325 if (!reason) 333 if (!reason)
326 reason = cpu_to_le16(MESH_CONFIRM_TIMEOUT); 334 reason = cpu_to_le16(MESH_CONFIRM_TIMEOUT);
327 sta->plink_state = PLINK_HOLDING; 335 sta->plink_state = NL80211_PLINK_HOLDING;
328 mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata)); 336 mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata));
329 spin_unlock_bh(&sta->lock); 337 spin_unlock_bh(&sta->lock);
330 mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr, llid, plid, 338 mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr, llid, plid,
331 reason); 339 reason);
332 break; 340 break;
333 case PLINK_HOLDING: 341 case NL80211_PLINK_HOLDING:
334 /* holding timer */ 342 /* holding timer */
335 del_timer(&sta->plink_timer); 343 del_timer(&sta->plink_timer);
336 mesh_plink_fsm_restart(sta); 344 mesh_plink_fsm_restart(sta);
@@ -372,14 +380,17 @@ int mesh_plink_open(struct sta_info *sta)
372 __le16 llid; 380 __le16 llid;
373 struct ieee80211_sub_if_data *sdata = sta->sdata; 381 struct ieee80211_sub_if_data *sdata = sta->sdata;
374 382
383 if (!test_sta_flags(sta, WLAN_STA_AUTH))
384 return -EPERM;
385
375 spin_lock_bh(&sta->lock); 386 spin_lock_bh(&sta->lock);
376 get_random_bytes(&llid, 2); 387 get_random_bytes(&llid, 2);
377 sta->llid = llid; 388 sta->llid = llid;
378 if (sta->plink_state != PLINK_LISTEN) { 389 if (sta->plink_state != NL80211_PLINK_LISTEN) {
379 spin_unlock_bh(&sta->lock); 390 spin_unlock_bh(&sta->lock);
380 return -EBUSY; 391 return -EBUSY;
381 } 392 }
382 sta->plink_state = PLINK_OPN_SNT; 393 sta->plink_state = NL80211_PLINK_OPN_SNT;
383 mesh_plink_timer_set(sta, dot11MeshRetryTimeout(sdata)); 394 mesh_plink_timer_set(sta, dot11MeshRetryTimeout(sdata));
384 spin_unlock_bh(&sta->lock); 395 spin_unlock_bh(&sta->lock);
385 mpl_dbg("Mesh plink: starting establishment with %pM\n", 396 mpl_dbg("Mesh plink: starting establishment with %pM\n",
@@ -396,7 +407,7 @@ void mesh_plink_block(struct sta_info *sta)
396 407
397 spin_lock_bh(&sta->lock); 408 spin_lock_bh(&sta->lock);
398 deactivated = __mesh_plink_deactivate(sta); 409 deactivated = __mesh_plink_deactivate(sta);
399 sta->plink_state = PLINK_BLOCKED; 410 sta->plink_state = NL80211_PLINK_BLOCKED;
400 spin_unlock_bh(&sta->lock); 411 spin_unlock_bh(&sta->lock);
401 412
402 if (deactivated) 413 if (deactivated)
@@ -419,13 +430,13 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
419 __le16 plid, llid, reason; 430 __le16 plid, llid, reason;
420#ifdef CONFIG_MAC80211_VERBOSE_MPL_DEBUG 431#ifdef CONFIG_MAC80211_VERBOSE_MPL_DEBUG
421 static const char *mplstates[] = { 432 static const char *mplstates[] = {
422 [PLINK_LISTEN] = "LISTEN", 433 [NL80211_PLINK_LISTEN] = "LISTEN",
423 [PLINK_OPN_SNT] = "OPN-SNT", 434 [NL80211_PLINK_OPN_SNT] = "OPN-SNT",
424 [PLINK_OPN_RCVD] = "OPN-RCVD", 435 [NL80211_PLINK_OPN_RCVD] = "OPN-RCVD",
425 [PLINK_CNF_RCVD] = "CNF_RCVD", 436 [NL80211_PLINK_CNF_RCVD] = "CNF_RCVD",
426 [PLINK_ESTAB] = "ESTAB", 437 [NL80211_PLINK_ESTAB] = "ESTAB",
427 [PLINK_HOLDING] = "HOLDING", 438 [NL80211_PLINK_HOLDING] = "HOLDING",
428 [PLINK_BLOCKED] = "BLOCKED" 439 [NL80211_PLINK_BLOCKED] = "BLOCKED"
429 }; 440 };
430#endif 441#endif
431 442
@@ -449,6 +460,11 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
449 mpl_dbg("Mesh plink: missing necessary peer link ie\n"); 460 mpl_dbg("Mesh plink: missing necessary peer link ie\n");
450 return; 461 return;
451 } 462 }
463 if (elems.rsn_len &&
464 sdata->u.mesh.security == IEEE80211_MESH_SEC_NONE) {
465 mpl_dbg("Mesh plink: can't establish link with secure peer\n");
466 return;
467 }
452 468
453 ftype = mgmt->u.action.u.plink_action.action_code; 469 ftype = mgmt->u.action.u.plink_action.action_code;
454 ie_len = elems.peer_link_len; 470 ie_len = elems.peer_link_len;
@@ -480,7 +496,13 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
480 return; 496 return;
481 } 497 }
482 498
483 if (sta && sta->plink_state == PLINK_BLOCKED) { 499 if (sta && !test_sta_flags(sta, WLAN_STA_AUTH)) {
500 mpl_dbg("Mesh plink: Action frame from non-authed peer\n");
501 rcu_read_unlock();
502 return;
503 }
504
505 if (sta && sta->plink_state == NL80211_PLINK_BLOCKED) {
484 rcu_read_unlock(); 506 rcu_read_unlock();
485 return; 507 return;
486 } 508 }
@@ -550,7 +572,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
550 event = CNF_ACPT; 572 event = CNF_ACPT;
551 break; 573 break;
552 case PLINK_CLOSE: 574 case PLINK_CLOSE:
553 if (sta->plink_state == PLINK_ESTAB) 575 if (sta->plink_state == NL80211_PLINK_ESTAB)
554 /* Do not check for llid or plid. This does not 576 /* Do not check for llid or plid. This does not
555 * follow the standard but since multiple plinks 577 * follow the standard but since multiple plinks
556 * per sta are not supported, it is necessary in 578 * per sta are not supported, it is necessary in
@@ -585,14 +607,14 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
585 reason = 0; 607 reason = 0;
586 switch (sta->plink_state) { 608 switch (sta->plink_state) {
587 /* spin_unlock as soon as state is updated at each case */ 609 /* spin_unlock as soon as state is updated at each case */
588 case PLINK_LISTEN: 610 case NL80211_PLINK_LISTEN:
589 switch (event) { 611 switch (event) {
590 case CLS_ACPT: 612 case CLS_ACPT:
591 mesh_plink_fsm_restart(sta); 613 mesh_plink_fsm_restart(sta);
592 spin_unlock_bh(&sta->lock); 614 spin_unlock_bh(&sta->lock);
593 break; 615 break;
594 case OPN_ACPT: 616 case OPN_ACPT:
595 sta->plink_state = PLINK_OPN_RCVD; 617 sta->plink_state = NL80211_PLINK_OPN_RCVD;
596 sta->plid = plid; 618 sta->plid = plid;
597 get_random_bytes(&llid, 2); 619 get_random_bytes(&llid, 2);
598 sta->llid = llid; 620 sta->llid = llid;
@@ -609,7 +631,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
609 } 631 }
610 break; 632 break;
611 633
612 case PLINK_OPN_SNT: 634 case NL80211_PLINK_OPN_SNT:
613 switch (event) { 635 switch (event) {
614 case OPN_RJCT: 636 case OPN_RJCT:
615 case CNF_RJCT: 637 case CNF_RJCT:
@@ -618,7 +640,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
618 if (!reason) 640 if (!reason)
619 reason = cpu_to_le16(MESH_CLOSE_RCVD); 641 reason = cpu_to_le16(MESH_CLOSE_RCVD);
620 sta->reason = reason; 642 sta->reason = reason;
621 sta->plink_state = PLINK_HOLDING; 643 sta->plink_state = NL80211_PLINK_HOLDING;
622 if (!mod_plink_timer(sta, 644 if (!mod_plink_timer(sta,
623 dot11MeshHoldingTimeout(sdata))) 645 dot11MeshHoldingTimeout(sdata)))
624 sta->ignore_plink_timer = true; 646 sta->ignore_plink_timer = true;
@@ -630,7 +652,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
630 break; 652 break;
631 case OPN_ACPT: 653 case OPN_ACPT:
632 /* retry timer is left untouched */ 654 /* retry timer is left untouched */
633 sta->plink_state = PLINK_OPN_RCVD; 655 sta->plink_state = NL80211_PLINK_OPN_RCVD;
634 sta->plid = plid; 656 sta->plid = plid;
635 llid = sta->llid; 657 llid = sta->llid;
636 spin_unlock_bh(&sta->lock); 658 spin_unlock_bh(&sta->lock);
@@ -638,7 +660,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
638 plid, 0); 660 plid, 0);
639 break; 661 break;
640 case CNF_ACPT: 662 case CNF_ACPT:
641 sta->plink_state = PLINK_CNF_RCVD; 663 sta->plink_state = NL80211_PLINK_CNF_RCVD;
642 if (!mod_plink_timer(sta, 664 if (!mod_plink_timer(sta,
643 dot11MeshConfirmTimeout(sdata))) 665 dot11MeshConfirmTimeout(sdata)))
644 sta->ignore_plink_timer = true; 666 sta->ignore_plink_timer = true;
@@ -651,7 +673,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
651 } 673 }
652 break; 674 break;
653 675
654 case PLINK_OPN_RCVD: 676 case NL80211_PLINK_OPN_RCVD:
655 switch (event) { 677 switch (event) {
656 case OPN_RJCT: 678 case OPN_RJCT:
657 case CNF_RJCT: 679 case CNF_RJCT:
@@ -660,7 +682,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
660 if (!reason) 682 if (!reason)
661 reason = cpu_to_le16(MESH_CLOSE_RCVD); 683 reason = cpu_to_le16(MESH_CLOSE_RCVD);
662 sta->reason = reason; 684 sta->reason = reason;
663 sta->plink_state = PLINK_HOLDING; 685 sta->plink_state = NL80211_PLINK_HOLDING;
664 if (!mod_plink_timer(sta, 686 if (!mod_plink_timer(sta,
665 dot11MeshHoldingTimeout(sdata))) 687 dot11MeshHoldingTimeout(sdata)))
666 sta->ignore_plink_timer = true; 688 sta->ignore_plink_timer = true;
@@ -678,7 +700,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
678 break; 700 break;
679 case CNF_ACPT: 701 case CNF_ACPT:
680 del_timer(&sta->plink_timer); 702 del_timer(&sta->plink_timer);
681 sta->plink_state = PLINK_ESTAB; 703 sta->plink_state = NL80211_PLINK_ESTAB;
682 spin_unlock_bh(&sta->lock); 704 spin_unlock_bh(&sta->lock);
683 mesh_plink_inc_estab_count(sdata); 705 mesh_plink_inc_estab_count(sdata);
684 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON); 706 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
@@ -691,7 +713,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
691 } 713 }
692 break; 714 break;
693 715
694 case PLINK_CNF_RCVD: 716 case NL80211_PLINK_CNF_RCVD:
695 switch (event) { 717 switch (event) {
696 case OPN_RJCT: 718 case OPN_RJCT:
697 case CNF_RJCT: 719 case CNF_RJCT:
@@ -700,7 +722,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
700 if (!reason) 722 if (!reason)
701 reason = cpu_to_le16(MESH_CLOSE_RCVD); 723 reason = cpu_to_le16(MESH_CLOSE_RCVD);
702 sta->reason = reason; 724 sta->reason = reason;
703 sta->plink_state = PLINK_HOLDING; 725 sta->plink_state = NL80211_PLINK_HOLDING;
704 if (!mod_plink_timer(sta, 726 if (!mod_plink_timer(sta,
705 dot11MeshHoldingTimeout(sdata))) 727 dot11MeshHoldingTimeout(sdata)))
706 sta->ignore_plink_timer = true; 728 sta->ignore_plink_timer = true;
@@ -712,7 +734,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
712 break; 734 break;
713 case OPN_ACPT: 735 case OPN_ACPT:
714 del_timer(&sta->plink_timer); 736 del_timer(&sta->plink_timer);
715 sta->plink_state = PLINK_ESTAB; 737 sta->plink_state = NL80211_PLINK_ESTAB;
716 spin_unlock_bh(&sta->lock); 738 spin_unlock_bh(&sta->lock);
717 mesh_plink_inc_estab_count(sdata); 739 mesh_plink_inc_estab_count(sdata);
718 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON); 740 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
@@ -727,13 +749,13 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
727 } 749 }
728 break; 750 break;
729 751
730 case PLINK_ESTAB: 752 case NL80211_PLINK_ESTAB:
731 switch (event) { 753 switch (event) {
732 case CLS_ACPT: 754 case CLS_ACPT:
733 reason = cpu_to_le16(MESH_CLOSE_RCVD); 755 reason = cpu_to_le16(MESH_CLOSE_RCVD);
734 sta->reason = reason; 756 sta->reason = reason;
735 deactivated = __mesh_plink_deactivate(sta); 757 deactivated = __mesh_plink_deactivate(sta);
736 sta->plink_state = PLINK_HOLDING; 758 sta->plink_state = NL80211_PLINK_HOLDING;
737 llid = sta->llid; 759 llid = sta->llid;
738 mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata)); 760 mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata));
739 spin_unlock_bh(&sta->lock); 761 spin_unlock_bh(&sta->lock);
@@ -753,7 +775,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
753 break; 775 break;
754 } 776 }
755 break; 777 break;
756 case PLINK_HOLDING: 778 case NL80211_PLINK_HOLDING:
757 switch (event) { 779 switch (event) {
758 case CLS_ACPT: 780 case CLS_ACPT:
759 if (del_timer(&sta->plink_timer)) 781 if (del_timer(&sta->plink_timer))
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 64d92d5a7f40..d595265d6c22 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -90,20 +90,11 @@ enum rx_mgmt_action {
90 /* no action required */ 90 /* no action required */
91 RX_MGMT_NONE, 91 RX_MGMT_NONE,
92 92
93 /* caller must call cfg80211_send_rx_auth() */
94 RX_MGMT_CFG80211_AUTH,
95
96 /* caller must call cfg80211_send_rx_assoc() */
97 RX_MGMT_CFG80211_ASSOC,
98
99 /* caller must call cfg80211_send_deauth() */ 93 /* caller must call cfg80211_send_deauth() */
100 RX_MGMT_CFG80211_DEAUTH, 94 RX_MGMT_CFG80211_DEAUTH,
101 95
102 /* caller must call cfg80211_send_disassoc() */ 96 /* caller must call cfg80211_send_disassoc() */
103 RX_MGMT_CFG80211_DISASSOC, 97 RX_MGMT_CFG80211_DISASSOC,
104
105 /* caller must tell cfg80211 about internal error */
106 RX_MGMT_CFG80211_ASSOC_ERROR,
107}; 98};
108 99
109/* utils */ 100/* utils */
@@ -759,6 +750,8 @@ void ieee80211_dynamic_ps_enable_work(struct work_struct *work)
759 dynamic_ps_enable_work); 750 dynamic_ps_enable_work);
760 struct ieee80211_sub_if_data *sdata = local->ps_sdata; 751 struct ieee80211_sub_if_data *sdata = local->ps_sdata;
761 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 752 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
753 unsigned long flags;
754 int q;
762 755
763 /* can only happen when PS was just disabled anyway */ 756 /* can only happen when PS was just disabled anyway */
764 if (!sdata) 757 if (!sdata)
@@ -767,18 +760,37 @@ void ieee80211_dynamic_ps_enable_work(struct work_struct *work)
767 if (local->hw.conf.flags & IEEE80211_CONF_PS) 760 if (local->hw.conf.flags & IEEE80211_CONF_PS)
768 return; 761 return;
769 762
763 /*
764 * transmission can be stopped by others which leads to
765 * dynamic_ps_timer expiry. Postpond the ps timer if it
766 * is not the actual idle state.
767 */
768 spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
769 for (q = 0; q < local->hw.queues; q++) {
770 if (local->queue_stop_reasons[q]) {
771 spin_unlock_irqrestore(&local->queue_stop_reason_lock,
772 flags);
773 mod_timer(&local->dynamic_ps_timer, jiffies +
774 msecs_to_jiffies(
775 local->hw.conf.dynamic_ps_timeout));
776 return;
777 }
778 }
779 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
780
770 if ((local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) && 781 if ((local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) &&
771 (!(ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED))) { 782 (!(ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED))) {
772 netif_tx_stop_all_queues(sdata->dev); 783 netif_tx_stop_all_queues(sdata->dev);
773 /*
774 * Flush all the frames queued in the driver before
775 * going to power save
776 */
777 drv_flush(local, false);
778 ieee80211_send_nullfunc(local, sdata, 1);
779 784
780 /* Flush once again to get the tx status of nullfunc frame */ 785 if (drv_tx_frames_pending(local))
781 drv_flush(local, false); 786 mod_timer(&local->dynamic_ps_timer, jiffies +
787 msecs_to_jiffies(
788 local->hw.conf.dynamic_ps_timeout));
789 else {
790 ieee80211_send_nullfunc(local, sdata, 1);
791 /* Flush to get the tx status of nullfunc frame */
792 drv_flush(local, false);
793 }
782 } 794 }
783 795
784 if (!((local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) && 796 if (!((local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) &&
@@ -789,7 +801,7 @@ void ieee80211_dynamic_ps_enable_work(struct work_struct *work)
789 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); 801 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
790 } 802 }
791 803
792 netif_tx_start_all_queues(sdata->dev); 804 netif_tx_wake_all_queues(sdata->dev);
793} 805}
794 806
795void ieee80211_dynamic_ps_timer(unsigned long data) 807void ieee80211_dynamic_ps_timer(unsigned long data)
@@ -1077,6 +1089,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
1077 local->hw.conf.flags &= ~IEEE80211_CONF_PS; 1089 local->hw.conf.flags &= ~IEEE80211_CONF_PS;
1078 config_changed |= IEEE80211_CONF_CHANGE_PS; 1090 config_changed |= IEEE80211_CONF_CHANGE_PS;
1079 } 1091 }
1092 local->ps_sdata = NULL;
1080 1093
1081 ieee80211_hw_config(local, config_changed); 1094 ieee80211_hw_config(local, config_changed);
1082 1095
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index e37355193ed1..730778a2c90c 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -6,7 +6,7 @@
6#include "driver-ops.h" 6#include "driver-ops.h"
7#include "led.h" 7#include "led.h"
8 8
9int __ieee80211_suspend(struct ieee80211_hw *hw) 9int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
10{ 10{
11 struct ieee80211_local *local = hw_to_local(hw); 11 struct ieee80211_local *local = hw_to_local(hw);
12 struct ieee80211_sub_if_data *sdata; 12 struct ieee80211_sub_if_data *sdata;
@@ -14,12 +14,23 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
14 14
15 ieee80211_scan_cancel(local); 15 ieee80211_scan_cancel(local);
16 16
17 if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) {
18 mutex_lock(&local->sta_mtx);
19 list_for_each_entry(sta, &local->sta_list, list) {
20 set_sta_flags(sta, WLAN_STA_BLOCK_BA);
21 ieee80211_sta_tear_down_BA_sessions(sta, true);
22 }
23 mutex_unlock(&local->sta_mtx);
24 }
25
17 ieee80211_stop_queues_by_reason(hw, 26 ieee80211_stop_queues_by_reason(hw,
18 IEEE80211_QUEUE_STOP_REASON_SUSPEND); 27 IEEE80211_QUEUE_STOP_REASON_SUSPEND);
19 28
20 /* flush out all packets */ 29 /* flush out all packets */
21 synchronize_net(); 30 synchronize_net();
22 31
32 drv_flush(local, false);
33
23 local->quiescing = true; 34 local->quiescing = true;
24 /* make quiescing visible to timers everywhere */ 35 /* make quiescing visible to timers everywhere */
25 mb(); 36 mb();
@@ -36,6 +47,16 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
36 cancel_work_sync(&local->dynamic_ps_enable_work); 47 cancel_work_sync(&local->dynamic_ps_enable_work);
37 del_timer_sync(&local->dynamic_ps_timer); 48 del_timer_sync(&local->dynamic_ps_timer);
38 49
50 local->wowlan = wowlan && local->open_count;
51 if (local->wowlan) {
52 int err = drv_suspend(local, wowlan);
53 if (err) {
54 local->quiescing = false;
55 return err;
56 }
57 goto suspend;
58 }
59
39 /* disable keys */ 60 /* disable keys */
40 list_for_each_entry(sdata, &local->interfaces, list) 61 list_for_each_entry(sdata, &local->interfaces, list)
41 ieee80211_disable_keys(sdata); 62 ieee80211_disable_keys(sdata);
@@ -43,11 +64,6 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
43 /* tear down aggregation sessions and remove STAs */ 64 /* tear down aggregation sessions and remove STAs */
44 mutex_lock(&local->sta_mtx); 65 mutex_lock(&local->sta_mtx);
45 list_for_each_entry(sta, &local->sta_list, list) { 66 list_for_each_entry(sta, &local->sta_list, list) {
46 if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) {
47 set_sta_flags(sta, WLAN_STA_BLOCK_BA);
48 ieee80211_sta_tear_down_BA_sessions(sta, true);
49 }
50
51 if (sta->uploaded) { 67 if (sta->uploaded) {
52 sdata = sta->sdata; 68 sdata = sta->sdata;
53 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 69 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
@@ -98,6 +114,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
98 if (local->open_count) 114 if (local->open_count)
99 ieee80211_stop_device(local); 115 ieee80211_stop_device(local);
100 116
117 suspend:
101 local->suspended = true; 118 local->suspended = true;
102 /* need suspended to be visible before quiescing is false */ 119 /* need suspended to be visible before quiescing is false */
103 barrier(); 120 barrier();
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c
index 778c604d7939..8adac67395f7 100644
--- a/net/mac80211/rc80211_minstrel.c
+++ b/net/mac80211/rc80211_minstrel.c
@@ -417,8 +417,8 @@ minstrel_rate_init(void *priv, struct ieee80211_supported_band *sband,
417 tx_time_single = mr->ack_time + mr->perfect_tx_time; 417 tx_time_single = mr->ack_time + mr->perfect_tx_time;
418 418
419 /* contention window */ 419 /* contention window */
420 tx_time_single += t_slot + min(cw, mp->cw_max); 420 tx_time_single += (t_slot * cw) >> 1;
421 cw = (cw << 1) | 1; 421 cw = min((cw << 1) | 1, mp->cw_max);
422 422
423 tx_time += tx_time_single; 423 tx_time += tx_time_single;
424 tx_time_cts += tx_time_single + mi->sp_ack_dur; 424 tx_time_cts += tx_time_single + mi->sp_ack_dur;
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index c06aa3ac6b9d..333b5118be6d 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -464,6 +464,7 @@ minstrel_calc_retransmit(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
464 const struct mcs_group *group; 464 const struct mcs_group *group;
465 unsigned int tx_time, tx_time_rtscts, tx_time_data; 465 unsigned int tx_time, tx_time_rtscts, tx_time_data;
466 unsigned int cw = mp->cw_min; 466 unsigned int cw = mp->cw_min;
467 unsigned int ctime = 0;
467 unsigned int t_slot = 9; /* FIXME */ 468 unsigned int t_slot = 9; /* FIXME */
468 unsigned int ampdu_len = MINSTREL_TRUNC(mi->avg_ampdu_len); 469 unsigned int ampdu_len = MINSTREL_TRUNC(mi->avg_ampdu_len);
469 470
@@ -480,13 +481,27 @@ minstrel_calc_retransmit(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
480 481
481 group = &minstrel_mcs_groups[index / MCS_GROUP_RATES]; 482 group = &minstrel_mcs_groups[index / MCS_GROUP_RATES];
482 tx_time_data = group->duration[index % MCS_GROUP_RATES] * ampdu_len; 483 tx_time_data = group->duration[index % MCS_GROUP_RATES] * ampdu_len;
483 tx_time = 2 * (t_slot + mi->overhead + tx_time_data); 484
484 tx_time_rtscts = 2 * (t_slot + mi->overhead_rtscts + tx_time_data); 485 /* Contention time for first 2 tries */
486 ctime = (t_slot * cw) >> 1;
487 cw = min((cw << 1) | 1, mp->cw_max);
488 ctime += (t_slot * cw) >> 1;
489 cw = min((cw << 1) | 1, mp->cw_max);
490
491 /* Total TX time for data and Contention after first 2 tries */
492 tx_time = ctime + 2 * (mi->overhead + tx_time_data);
493 tx_time_rtscts = ctime + 2 * (mi->overhead_rtscts + tx_time_data);
494
495 /* See how many more tries we can fit inside segment size */
485 do { 496 do {
486 cw = (cw << 1) | 1; 497 /* Contention time for this try */
487 cw = min(cw, mp->cw_max); 498 ctime = (t_slot * cw) >> 1;
488 tx_time += cw + t_slot + mi->overhead; 499 cw = min((cw << 1) | 1, mp->cw_max);
489 tx_time_rtscts += cw + t_slot + mi->overhead_rtscts; 500
501 /* Total TX time after this try */
502 tx_time += ctime + mi->overhead + tx_time_data;
503 tx_time_rtscts += ctime + mi->overhead_rtscts + tx_time_data;
504
490 if (tx_time_rtscts < mp->segment_size) 505 if (tx_time_rtscts < mp->segment_size)
491 mr->retry_count_rtscts++; 506 mr->retry_count_rtscts++;
492 } while ((tx_time < mp->segment_size) && 507 } while ((tx_time < mp->segment_size) &&
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index c5d4530d8284..7fa8c6be7bf0 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -143,7 +143,8 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
143 if (status->flag & RX_FLAG_HT) { 143 if (status->flag & RX_FLAG_HT) {
144 /* 144 /*
145 * MCS information is a separate field in radiotap, 145 * MCS information is a separate field in radiotap,
146 * added below. 146 * added below. The byte here is needed as padding
147 * for the channel though, so initialise it to 0.
147 */ 148 */
148 *pos = 0; 149 *pos = 0;
149 } else { 150 } else {
@@ -403,11 +404,13 @@ ieee80211_rx_h_passive_scan(struct ieee80211_rx_data *rx)
403 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 404 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
404 struct sk_buff *skb = rx->skb; 405 struct sk_buff *skb = rx->skb;
405 406
406 if (likely(!(status->rx_flags & IEEE80211_RX_IN_SCAN))) 407 if (likely(!(status->rx_flags & IEEE80211_RX_IN_SCAN) &&
408 !local->sched_scanning))
407 return RX_CONTINUE; 409 return RX_CONTINUE;
408 410
409 if (test_bit(SCAN_HW_SCANNING, &local->scanning) || 411 if (test_bit(SCAN_HW_SCANNING, &local->scanning) ||
410 test_bit(SCAN_SW_SCANNING, &local->scanning)) 412 test_bit(SCAN_SW_SCANNING, &local->scanning) ||
413 local->sched_scanning)
411 return ieee80211_scan_rx(rx->sdata, skb); 414 return ieee80211_scan_rx(rx->sdata, skb);
412 415
413 /* scanning finished during invoking of handlers */ 416 /* scanning finished during invoking of handlers */
@@ -487,22 +490,26 @@ ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
487 * establisment frame, beacon or probe, drop the frame. 490 * establisment frame, beacon or probe, drop the frame.
488 */ 491 */
489 492
490 if (!rx->sta || sta_plink_state(rx->sta) != PLINK_ESTAB) { 493 if (!rx->sta || sta_plink_state(rx->sta) != NL80211_PLINK_ESTAB) {
491 struct ieee80211_mgmt *mgmt; 494 struct ieee80211_mgmt *mgmt;
492 495
493 if (!ieee80211_is_mgmt(hdr->frame_control)) 496 if (!ieee80211_is_mgmt(hdr->frame_control))
494 return RX_DROP_MONITOR; 497 return RX_DROP_MONITOR;
495 498
496 if (ieee80211_is_action(hdr->frame_control)) { 499 if (ieee80211_is_action(hdr->frame_control)) {
500 u8 category;
497 mgmt = (struct ieee80211_mgmt *)hdr; 501 mgmt = (struct ieee80211_mgmt *)hdr;
498 if (mgmt->u.action.category != WLAN_CATEGORY_MESH_PLINK) 502 category = mgmt->u.action.category;
503 if (category != WLAN_CATEGORY_MESH_ACTION &&
504 category != WLAN_CATEGORY_SELF_PROTECTED)
499 return RX_DROP_MONITOR; 505 return RX_DROP_MONITOR;
500 return RX_CONTINUE; 506 return RX_CONTINUE;
501 } 507 }
502 508
503 if (ieee80211_is_probe_req(hdr->frame_control) || 509 if (ieee80211_is_probe_req(hdr->frame_control) ||
504 ieee80211_is_probe_resp(hdr->frame_control) || 510 ieee80211_is_probe_resp(hdr->frame_control) ||
505 ieee80211_is_beacon(hdr->frame_control)) 511 ieee80211_is_beacon(hdr->frame_control) ||
512 ieee80211_is_auth(hdr->frame_control))
506 return RX_CONTINUE; 513 return RX_CONTINUE;
507 514
508 return RX_DROP_MONITOR; 515 return RX_DROP_MONITOR;
@@ -650,7 +657,7 @@ static void ieee80211_sta_reorder_release(struct ieee80211_hw *hw,
650 set_release_timer: 657 set_release_timer:
651 658
652 mod_timer(&tid_agg_rx->reorder_timer, 659 mod_timer(&tid_agg_rx->reorder_timer,
653 tid_agg_rx->reorder_time[j] + 660 tid_agg_rx->reorder_time[j] + 1 +
654 HT_RX_REORDER_BUF_TIMEOUT); 661 HT_RX_REORDER_BUF_TIMEOUT);
655 } else { 662 } else {
656 del_timer(&tid_agg_rx->reorder_timer); 663 del_timer(&tid_agg_rx->reorder_timer);
@@ -707,6 +714,8 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
707 /* 714 /*
708 * If the current MPDU is in the right order and nothing else 715 * If the current MPDU is in the right order and nothing else
709 * is stored we can process it directly, no need to buffer it. 716 * is stored we can process it directly, no need to buffer it.
717 * If it is first but there's something stored, we may be able
718 * to release frames after this one.
710 */ 719 */
711 if (mpdu_seq_num == tid_agg_rx->head_seq_num && 720 if (mpdu_seq_num == tid_agg_rx->head_seq_num &&
712 tid_agg_rx->stored_mpdu_num == 0) { 721 tid_agg_rx->stored_mpdu_num == 0) {
@@ -1583,7 +1592,7 @@ ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx)
1583} 1592}
1584 1593
1585static int 1594static int
1586__ieee80211_data_to_8023(struct ieee80211_rx_data *rx) 1595__ieee80211_data_to_8023(struct ieee80211_rx_data *rx, bool *port_control)
1587{ 1596{
1588 struct ieee80211_sub_if_data *sdata = rx->sdata; 1597 struct ieee80211_sub_if_data *sdata = rx->sdata;
1589 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 1598 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
@@ -1591,6 +1600,7 @@ __ieee80211_data_to_8023(struct ieee80211_rx_data *rx)
1591 struct ethhdr *ehdr; 1600 struct ethhdr *ehdr;
1592 int ret; 1601 int ret;
1593 1602
1603 *port_control = false;
1594 if (ieee80211_has_a4(hdr->frame_control) && 1604 if (ieee80211_has_a4(hdr->frame_control) &&
1595 sdata->vif.type == NL80211_IFTYPE_AP_VLAN && !sdata->u.vlan.sta) 1605 sdata->vif.type == NL80211_IFTYPE_AP_VLAN && !sdata->u.vlan.sta)
1596 return -1; 1606 return -1;
@@ -1609,11 +1619,13 @@ __ieee80211_data_to_8023(struct ieee80211_rx_data *rx)
1609 return -1; 1619 return -1;
1610 1620
1611 ret = ieee80211_data_to_8023(rx->skb, sdata->vif.addr, sdata->vif.type); 1621 ret = ieee80211_data_to_8023(rx->skb, sdata->vif.addr, sdata->vif.type);
1612 if (ret < 0 || !check_port_control) 1622 if (ret < 0)
1613 return ret; 1623 return ret;
1614 1624
1615 ehdr = (struct ethhdr *) rx->skb->data; 1625 ehdr = (struct ethhdr *) rx->skb->data;
1616 if (ehdr->h_proto != rx->sdata->control_port_protocol) 1626 if (ehdr->h_proto == rx->sdata->control_port_protocol)
1627 *port_control = true;
1628 else if (check_port_control)
1617 return -1; 1629 return -1;
1618 1630
1619 return 0; 1631 return 0;
@@ -1771,7 +1783,7 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
1771 1783
1772 ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr, 1784 ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr,
1773 rx->sdata->vif.type, 1785 rx->sdata->vif.type,
1774 rx->local->hw.extra_tx_headroom); 1786 rx->local->hw.extra_tx_headroom, true);
1775 1787
1776 while (!skb_queue_empty(&frame_list)) { 1788 while (!skb_queue_empty(&frame_list)) {
1777 rx->skb = __skb_dequeue(&frame_list); 1789 rx->skb = __skb_dequeue(&frame_list);
@@ -1914,6 +1926,7 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
1914 struct net_device *dev = sdata->dev; 1926 struct net_device *dev = sdata->dev;
1915 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 1927 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1916 __le16 fc = hdr->frame_control; 1928 __le16 fc = hdr->frame_control;
1929 bool port_control;
1917 int err; 1930 int err;
1918 1931
1919 if (unlikely(!ieee80211_is_data(hdr->frame_control))) 1932 if (unlikely(!ieee80211_is_data(hdr->frame_control)))
@@ -1930,13 +1943,21 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
1930 sdata->vif.type == NL80211_IFTYPE_AP) 1943 sdata->vif.type == NL80211_IFTYPE_AP)
1931 return RX_DROP_MONITOR; 1944 return RX_DROP_MONITOR;
1932 1945
1933 err = __ieee80211_data_to_8023(rx); 1946 err = __ieee80211_data_to_8023(rx, &port_control);
1934 if (unlikely(err)) 1947 if (unlikely(err))
1935 return RX_DROP_UNUSABLE; 1948 return RX_DROP_UNUSABLE;
1936 1949
1937 if (!ieee80211_frame_allowed(rx, fc)) 1950 if (!ieee80211_frame_allowed(rx, fc))
1938 return RX_DROP_MONITOR; 1951 return RX_DROP_MONITOR;
1939 1952
1953 if (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
1954 unlikely(port_control) && sdata->bss) {
1955 sdata = container_of(sdata->bss, struct ieee80211_sub_if_data,
1956 u.ap);
1957 dev = sdata->dev;
1958 rx->sdata = sdata;
1959 }
1960
1940 rx->skb->dev = dev; 1961 rx->skb->dev = dev;
1941 1962
1942 dev->stats.rx_packets++; 1963 dev->stats.rx_packets++;
@@ -2189,7 +2210,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
2189 goto handled; 2210 goto handled;
2190 } 2211 }
2191 break; 2212 break;
2192 case WLAN_CATEGORY_MESH_PLINK: 2213 case WLAN_CATEGORY_MESH_ACTION:
2193 if (!ieee80211_vif_is_mesh(&sdata->vif)) 2214 if (!ieee80211_vif_is_mesh(&sdata->vif))
2194 break; 2215 break;
2195 goto queue; 2216 goto queue;
@@ -2352,47 +2373,6 @@ ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
2352 return RX_QUEUED; 2373 return RX_QUEUED;
2353} 2374}
2354 2375
2355static void ieee80211_rx_michael_mic_report(struct ieee80211_hdr *hdr,
2356 struct ieee80211_rx_data *rx)
2357{
2358 int keyidx;
2359 unsigned int hdrlen;
2360
2361 hdrlen = ieee80211_hdrlen(hdr->frame_control);
2362 if (rx->skb->len >= hdrlen + 4)
2363 keyidx = rx->skb->data[hdrlen + 3] >> 6;
2364 else
2365 keyidx = -1;
2366
2367 if (!rx->sta) {
2368 /*
2369 * Some hardware seem to generate incorrect Michael MIC
2370 * reports; ignore them to avoid triggering countermeasures.
2371 */
2372 return;
2373 }
2374
2375 if (!ieee80211_has_protected(hdr->frame_control))
2376 return;
2377
2378 if (rx->sdata->vif.type == NL80211_IFTYPE_AP && keyidx) {
2379 /*
2380 * APs with pairwise keys should never receive Michael MIC
2381 * errors for non-zero keyidx because these are reserved for
2382 * group keys and only the AP is sending real multicast
2383 * frames in the BSS.
2384 */
2385 return;
2386 }
2387
2388 if (!ieee80211_is_data(hdr->frame_control) &&
2389 !ieee80211_is_auth(hdr->frame_control))
2390 return;
2391
2392 mac80211_ev_michael_mic_failure(rx->sdata, keyidx, hdr, NULL,
2393 GFP_ATOMIC);
2394}
2395
2396/* TODO: use IEEE80211_RX_FRAGMENTED */ 2376/* TODO: use IEEE80211_RX_FRAGMENTED */
2397static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx, 2377static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
2398 struct ieee80211_rate *rate) 2378 struct ieee80211_rate *rate)
@@ -2736,12 +2716,6 @@ static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx,
2736 if (!prepares) 2716 if (!prepares)
2737 return false; 2717 return false;
2738 2718
2739 if (status->flag & RX_FLAG_MMIC_ERROR) {
2740 if (status->rx_flags & IEEE80211_RX_RA_MATCH)
2741 ieee80211_rx_michael_mic_report(hdr, rx);
2742 return false;
2743 }
2744
2745 if (!consume) { 2719 if (!consume) {
2746 skb = skb_copy(skb, GFP_ATOMIC); 2720 skb = skb_copy(skb, GFP_ATOMIC);
2747 if (!skb) { 2721 if (!skb) {
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 489b6ad200d4..669d2e32efb6 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -170,7 +170,7 @@ ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
170 return RX_CONTINUE; 170 return RX_CONTINUE;
171 171
172 if (skb->len < 24) 172 if (skb->len < 24)
173 return RX_DROP_MONITOR; 173 return RX_CONTINUE;
174 174
175 presp = ieee80211_is_probe_resp(fc); 175 presp = ieee80211_is_probe_resp(fc);
176 if (presp) { 176 if (presp) {
@@ -718,6 +718,11 @@ void ieee80211_scan_work(struct work_struct *work)
718 * without scheduling a new work 718 * without scheduling a new work
719 */ 719 */
720 do { 720 do {
721 if (!ieee80211_sdata_running(sdata)) {
722 aborted = true;
723 goto out_complete;
724 }
725
721 switch (local->next_scan_state) { 726 switch (local->next_scan_state) {
722 case SCAN_DECISION: 727 case SCAN_DECISION:
723 /* if no more bands/channels left, complete scan */ 728 /* if no more bands/channels left, complete scan */
@@ -850,3 +855,123 @@ void ieee80211_scan_cancel(struct ieee80211_local *local)
850 } 855 }
851 mutex_unlock(&local->mtx); 856 mutex_unlock(&local->mtx);
852} 857}
858
859int ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata,
860 struct cfg80211_sched_scan_request *req)
861{
862 struct ieee80211_local *local = sdata->local;
863 int ret, i;
864
865 mutex_lock(&sdata->local->mtx);
866
867 if (local->sched_scanning) {
868 ret = -EBUSY;
869 goto out;
870 }
871
872 if (!local->ops->sched_scan_start) {
873 ret = -ENOTSUPP;
874 goto out;
875 }
876
877 for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
878 local->sched_scan_ies.ie[i] = kzalloc(2 +
879 IEEE80211_MAX_SSID_LEN +
880 local->scan_ies_len +
881 req->ie_len,
882 GFP_KERNEL);
883 if (!local->sched_scan_ies.ie[i]) {
884 ret = -ENOMEM;
885 goto out_free;
886 }
887
888 local->sched_scan_ies.len[i] =
889 ieee80211_build_preq_ies(local,
890 local->sched_scan_ies.ie[i],
891 req->ie, req->ie_len, i,
892 (u32) -1, 0);
893 }
894
895 ret = drv_sched_scan_start(local, sdata, req,
896 &local->sched_scan_ies);
897 if (ret == 0) {
898 local->sched_scanning = true;
899 goto out;
900 }
901
902out_free:
903 while (i > 0)
904 kfree(local->sched_scan_ies.ie[--i]);
905out:
906 mutex_unlock(&sdata->local->mtx);
907 return ret;
908}
909
910int ieee80211_request_sched_scan_stop(struct ieee80211_sub_if_data *sdata)
911{
912 struct ieee80211_local *local = sdata->local;
913 int ret = 0, i;
914
915 mutex_lock(&sdata->local->mtx);
916
917 if (!local->ops->sched_scan_stop) {
918 ret = -ENOTSUPP;
919 goto out;
920 }
921
922 if (local->sched_scanning) {
923 for (i = 0; i < IEEE80211_NUM_BANDS; i++)
924 kfree(local->sched_scan_ies.ie[i]);
925
926 drv_sched_scan_stop(local, sdata);
927 local->sched_scanning = false;
928 }
929out:
930 mutex_unlock(&sdata->local->mtx);
931
932 return ret;
933}
934
935void ieee80211_sched_scan_results(struct ieee80211_hw *hw)
936{
937 struct ieee80211_local *local = hw_to_local(hw);
938
939 trace_api_sched_scan_results(local);
940
941 cfg80211_sched_scan_results(hw->wiphy);
942}
943EXPORT_SYMBOL(ieee80211_sched_scan_results);
944
945void ieee80211_sched_scan_stopped_work(struct work_struct *work)
946{
947 struct ieee80211_local *local =
948 container_of(work, struct ieee80211_local,
949 sched_scan_stopped_work);
950 int i;
951
952 mutex_lock(&local->mtx);
953
954 if (!local->sched_scanning) {
955 mutex_unlock(&local->mtx);
956 return;
957 }
958
959 for (i = 0; i < IEEE80211_NUM_BANDS; i++)
960 kfree(local->sched_scan_ies.ie[i]);
961
962 local->sched_scanning = false;
963
964 mutex_unlock(&local->mtx);
965
966 cfg80211_sched_scan_stopped(local->hw.wiphy);
967}
968
969void ieee80211_sched_scan_stopped(struct ieee80211_hw *hw)
970{
971 struct ieee80211_local *local = hw_to_local(hw);
972
973 trace_api_sched_scan_stopped(local);
974
975 ieee80211_queue_work(&local->hw, &local->sched_scan_stopped_work);
976}
977EXPORT_SYMBOL(ieee80211_sched_scan_stopped);
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 13e8c30adf01..b83870bf60fa 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -67,7 +67,8 @@ static int sta_info_hash_del(struct ieee80211_local *local,
67{ 67{
68 struct sta_info *s; 68 struct sta_info *s;
69 69
70 s = local->sta_hash[STA_HASH(sta->sta.addr)]; 70 s = rcu_dereference_protected(local->sta_hash[STA_HASH(sta->sta.addr)],
71 lockdep_is_held(&local->sta_lock));
71 if (!s) 72 if (!s)
72 return -ENOENT; 73 return -ENOENT;
73 if (s == sta) { 74 if (s == sta) {
@@ -76,9 +77,11 @@ static int sta_info_hash_del(struct ieee80211_local *local,
76 return 0; 77 return 0;
77 } 78 }
78 79
79 while (s->hnext && s->hnext != sta) 80 while (rcu_access_pointer(s->hnext) &&
80 s = s->hnext; 81 rcu_access_pointer(s->hnext) != sta)
81 if (s->hnext) { 82 s = rcu_dereference_protected(s->hnext,
83 lockdep_is_held(&local->sta_lock));
84 if (rcu_access_pointer(s->hnext)) {
82 rcu_assign_pointer(s->hnext, sta->hnext); 85 rcu_assign_pointer(s->hnext, sta->hnext);
83 return 0; 86 return 0;
84 } 87 }
@@ -228,6 +231,7 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
228{ 231{
229 struct ieee80211_local *local = sdata->local; 232 struct ieee80211_local *local = sdata->local;
230 struct sta_info *sta; 233 struct sta_info *sta;
234 struct timespec uptime;
231 int i; 235 int i;
232 236
233 sta = kzalloc(sizeof(*sta) + local->hw.sta_data_size, gfp); 237 sta = kzalloc(sizeof(*sta) + local->hw.sta_data_size, gfp);
@@ -245,6 +249,8 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
245 sta->sdata = sdata; 249 sta->sdata = sdata;
246 sta->last_rx = jiffies; 250 sta->last_rx = jiffies;
247 251
252 do_posix_clock_monotonic_gettime(&uptime);
253 sta->last_connected = uptime.tv_sec;
248 ewma_init(&sta->avg_signal, 1024, 8); 254 ewma_init(&sta->avg_signal, 1024, 8);
249 255
250 if (sta_prepare_rate_control(local, sta, gfp)) { 256 if (sta_prepare_rate_control(local, sta, gfp)) {
@@ -271,7 +277,7 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
271#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ 277#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
272 278
273#ifdef CONFIG_MAC80211_MESH 279#ifdef CONFIG_MAC80211_MESH
274 sta->plink_state = PLINK_LISTEN; 280 sta->plink_state = NL80211_PLINK_LISTEN;
275 init_timer(&sta->plink_timer); 281 init_timer(&sta->plink_timer);
276#endif 282#endif
277 283
@@ -584,7 +590,6 @@ static bool sta_info_cleanup_expire_buffered(struct ieee80211_local *local,
584{ 590{
585 unsigned long flags; 591 unsigned long flags;
586 struct sk_buff *skb; 592 struct sk_buff *skb;
587 struct ieee80211_sub_if_data *sdata;
588 593
589 if (skb_queue_empty(&sta->ps_tx_buf)) 594 if (skb_queue_empty(&sta->ps_tx_buf))
590 return false; 595 return false;
@@ -601,7 +606,6 @@ static bool sta_info_cleanup_expire_buffered(struct ieee80211_local *local,
601 if (!skb) 606 if (!skb)
602 break; 607 break;
603 608
604 sdata = sta->sdata;
605 local->total_ps_buffered--; 609 local->total_ps_buffered--;
606#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG 610#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
607 printk(KERN_DEBUG "Buffered frame expired (STA %pM)\n", 611 printk(KERN_DEBUG "Buffered frame expired (STA %pM)\n",
@@ -609,7 +613,8 @@ static bool sta_info_cleanup_expire_buffered(struct ieee80211_local *local,
609#endif 613#endif
610 dev_kfree_skb(skb); 614 dev_kfree_skb(skb);
611 615
612 if (skb_queue_empty(&sta->ps_tx_buf)) 616 if (skb_queue_empty(&sta->ps_tx_buf) &&
617 !test_sta_flags(sta, WLAN_STA_PS_DRIVER_BUF))
613 sta_info_clear_tim_bit(sta); 618 sta_info_clear_tim_bit(sta);
614 } 619 }
615 620
@@ -650,10 +655,12 @@ static int __must_check __sta_info_destroy(struct sta_info *sta)
650 if (ret) 655 if (ret)
651 return ret; 656 return ret;
652 657
658 mutex_lock(&local->key_mtx);
653 for (i = 0; i < NUM_DEFAULT_KEYS; i++) 659 for (i = 0; i < NUM_DEFAULT_KEYS; i++)
654 ieee80211_key_free(local, sta->gtk[i]); 660 __ieee80211_key_free(key_mtx_dereference(local, sta->gtk[i]));
655 if (sta->ptk) 661 if (sta->ptk)
656 ieee80211_key_free(local, sta->ptk); 662 __ieee80211_key_free(key_mtx_dereference(local, sta->ptk));
663 mutex_unlock(&local->key_mtx);
657 664
658 sta->dead = true; 665 sta->dead = true;
659 666
@@ -698,6 +705,8 @@ static int __must_check __sta_info_destroy(struct sta_info *sta)
698#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ 705#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
699 cancel_work_sync(&sta->drv_unblock_wk); 706 cancel_work_sync(&sta->drv_unblock_wk);
700 707
708 cfg80211_del_sta(sdata->dev, sta->sta.addr, GFP_KERNEL);
709
701 rate_control_remove_sta_debugfs(sta); 710 rate_control_remove_sta_debugfs(sta);
702 ieee80211_sta_debugfs_remove(sta); 711 ieee80211_sta_debugfs_remove(sta);
703 712
@@ -766,9 +775,8 @@ static void sta_info_cleanup(unsigned long data)
766 if (!timer_needed) 775 if (!timer_needed)
767 return; 776 return;
768 777
769 local->sta_cleanup.expires = 778 mod_timer(&local->sta_cleanup,
770 round_jiffies(jiffies + STA_INFO_CLEANUP_INTERVAL); 779 round_jiffies(jiffies + STA_INFO_CLEANUP_INTERVAL));
771 add_timer(&local->sta_cleanup);
772} 780}
773 781
774void sta_info_init(struct ieee80211_local *local) 782void sta_info_init(struct ieee80211_local *local)
@@ -781,14 +789,6 @@ void sta_info_init(struct ieee80211_local *local)
781 789
782 setup_timer(&local->sta_cleanup, sta_info_cleanup, 790 setup_timer(&local->sta_cleanup, sta_info_cleanup,
783 (unsigned long)local); 791 (unsigned long)local);
784 local->sta_cleanup.expires =
785 round_jiffies(jiffies + STA_INFO_CLEANUP_INTERVAL);
786}
787
788int sta_info_start(struct ieee80211_local *local)
789{
790 add_timer(&local->sta_cleanup);
791 return 0;
792} 792}
793 793
794void sta_info_stop(struct ieee80211_local *local) 794void sta_info_stop(struct ieee80211_local *local)
@@ -900,6 +900,7 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
900 struct ieee80211_local *local = sdata->local; 900 struct ieee80211_local *local = sdata->local;
901 int sent, buffered; 901 int sent, buffered;
902 902
903 clear_sta_flags(sta, WLAN_STA_PS_DRIVER_BUF);
903 if (!(local->hw.flags & IEEE80211_HW_AP_LINK_PS)) 904 if (!(local->hw.flags & IEEE80211_HW_AP_LINK_PS))
904 drv_sta_notify(local, sdata, STA_NOTIFY_AWAKE, &sta->sta); 905 drv_sta_notify(local, sdata, STA_NOTIFY_AWAKE, &sta->sta);
905 906
@@ -992,3 +993,12 @@ void ieee80211_sta_block_awake(struct ieee80211_hw *hw,
992 ieee80211_queue_work(hw, &sta->drv_unblock_wk); 993 ieee80211_queue_work(hw, &sta->drv_unblock_wk);
993} 994}
994EXPORT_SYMBOL(ieee80211_sta_block_awake); 995EXPORT_SYMBOL(ieee80211_sta_block_awake);
996
997void ieee80211_sta_set_tim(struct ieee80211_sta *pubsta)
998{
999 struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
1000
1001 set_sta_flags(sta, WLAN_STA_PS_DRIVER_BUF);
1002 sta_info_set_tim_bit(sta);
1003}
1004EXPORT_SYMBOL(ieee80211_sta_set_tim);
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index b2f95966c7f4..c6ae8718bd57 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -43,6 +43,8 @@
43 * be in the queues 43 * be in the queues
44 * @WLAN_STA_PSPOLL: Station sent PS-poll while driver was keeping 44 * @WLAN_STA_PSPOLL: Station sent PS-poll while driver was keeping
45 * station in power-save mode, reply when the driver unblocks. 45 * station in power-save mode, reply when the driver unblocks.
46 * @WLAN_STA_PS_DRIVER_BUF: Station has frames pending in driver internal
47 * buffers. Automatically cleared on station wake-up.
46 */ 48 */
47enum ieee80211_sta_info_flags { 49enum ieee80211_sta_info_flags {
48 WLAN_STA_AUTH = 1<<0, 50 WLAN_STA_AUTH = 1<<0,
@@ -58,6 +60,7 @@ enum ieee80211_sta_info_flags {
58 WLAN_STA_BLOCK_BA = 1<<11, 60 WLAN_STA_BLOCK_BA = 1<<11,
59 WLAN_STA_PS_DRIVER = 1<<12, 61 WLAN_STA_PS_DRIVER = 1<<12,
60 WLAN_STA_PSPOLL = 1<<13, 62 WLAN_STA_PSPOLL = 1<<13,
63 WLAN_STA_PS_DRIVER_BUF = 1<<14,
61}; 64};
62 65
63#define STA_TID_NUM 16 66#define STA_TID_NUM 16
@@ -149,6 +152,7 @@ struct tid_ampdu_rx {
149 * 152 *
150 * @tid_rx: aggregation info for Rx per TID -- RCU protected 153 * @tid_rx: aggregation info for Rx per TID -- RCU protected
151 * @tid_tx: aggregation info for Tx per TID 154 * @tid_tx: aggregation info for Tx per TID
155 * @tid_start_tx: sessions where start was requested
152 * @addba_req_num: number of times addBA request has been sent. 156 * @addba_req_num: number of times addBA request has been sent.
153 * @dialog_token_allocator: dialog token enumerator for each new session; 157 * @dialog_token_allocator: dialog token enumerator for each new session;
154 * @work: work struct for starting/stopping aggregation 158 * @work: work struct for starting/stopping aggregation
@@ -160,40 +164,18 @@ struct tid_ampdu_rx {
160struct sta_ampdu_mlme { 164struct sta_ampdu_mlme {
161 struct mutex mtx; 165 struct mutex mtx;
162 /* rx */ 166 /* rx */
163 struct tid_ampdu_rx *tid_rx[STA_TID_NUM]; 167 struct tid_ampdu_rx __rcu *tid_rx[STA_TID_NUM];
164 unsigned long tid_rx_timer_expired[BITS_TO_LONGS(STA_TID_NUM)]; 168 unsigned long tid_rx_timer_expired[BITS_TO_LONGS(STA_TID_NUM)];
165 /* tx */ 169 /* tx */
166 struct work_struct work; 170 struct work_struct work;
167 struct tid_ampdu_tx *tid_tx[STA_TID_NUM]; 171 struct tid_ampdu_tx __rcu *tid_tx[STA_TID_NUM];
172 struct tid_ampdu_tx *tid_start_tx[STA_TID_NUM];
168 u8 addba_req_num[STA_TID_NUM]; 173 u8 addba_req_num[STA_TID_NUM];
169 u8 dialog_token_allocator; 174 u8 dialog_token_allocator;
170}; 175};
171 176
172 177
173/** 178/**
174 * enum plink_state - state of a mesh peer link finite state machine
175 *
176 * @PLINK_LISTEN: initial state, considered the implicit state of non existent
177 * mesh peer links
178 * @PLINK_OPN_SNT: mesh plink open frame has been sent to this mesh peer
179 * @PLINK_OPN_RCVD: mesh plink open frame has been received from this mesh peer
180 * @PLINK_CNF_RCVD: mesh plink confirm frame has been received from this mesh
181 * peer
182 * @PLINK_ESTAB: mesh peer link is established
183 * @PLINK_HOLDING: mesh peer link is being closed or cancelled
184 * @PLINK_BLOCKED: all frames transmitted from this mesh plink are discarded
185 */
186enum plink_state {
187 PLINK_LISTEN,
188 PLINK_OPN_SNT,
189 PLINK_OPN_RCVD,
190 PLINK_CNF_RCVD,
191 PLINK_ESTAB,
192 PLINK_HOLDING,
193 PLINK_BLOCKED
194};
195
196/**
197 * struct sta_info - STA information 179 * struct sta_info - STA information
198 * 180 *
199 * This structure collects information about a station that 181 * This structure collects information about a station that
@@ -226,6 +208,7 @@ enum plink_state {
226 * @rx_bytes: Number of bytes received from this STA 208 * @rx_bytes: Number of bytes received from this STA
227 * @wep_weak_iv_count: number of weak WEP IVs received from this station 209 * @wep_weak_iv_count: number of weak WEP IVs received from this station
228 * @last_rx: time (in jiffies) when last frame was received from this STA 210 * @last_rx: time (in jiffies) when last frame was received from this STA
211 * @last_connected: time (in seconds) when a station got connected
229 * @num_duplicates: number of duplicate frames received from this STA 212 * @num_duplicates: number of duplicate frames received from this STA
230 * @rx_fragments: number of received MPDUs 213 * @rx_fragments: number of received MPDUs
231 * @rx_dropped: number of dropped MPDUs from this STA 214 * @rx_dropped: number of dropped MPDUs from this STA
@@ -260,11 +243,11 @@ enum plink_state {
260struct sta_info { 243struct sta_info {
261 /* General information, mostly static */ 244 /* General information, mostly static */
262 struct list_head list; 245 struct list_head list;
263 struct sta_info *hnext; 246 struct sta_info __rcu *hnext;
264 struct ieee80211_local *local; 247 struct ieee80211_local *local;
265 struct ieee80211_sub_if_data *sdata; 248 struct ieee80211_sub_if_data *sdata;
266 struct ieee80211_key *gtk[NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS]; 249 struct ieee80211_key __rcu *gtk[NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS];
267 struct ieee80211_key *ptk; 250 struct ieee80211_key __rcu *ptk;
268 struct rate_control_ref *rate_ctrl; 251 struct rate_control_ref *rate_ctrl;
269 void *rate_ctrl_priv; 252 void *rate_ctrl_priv;
270 spinlock_t lock; 253 spinlock_t lock;
@@ -295,6 +278,7 @@ struct sta_info {
295 unsigned long rx_packets, rx_bytes; 278 unsigned long rx_packets, rx_bytes;
296 unsigned long wep_weak_iv_count; 279 unsigned long wep_weak_iv_count;
297 unsigned long last_rx; 280 unsigned long last_rx;
281 long last_connected;
298 unsigned long num_duplicates; 282 unsigned long num_duplicates;
299 unsigned long rx_fragments; 283 unsigned long rx_fragments;
300 unsigned long rx_dropped; 284 unsigned long rx_dropped;
@@ -334,7 +318,7 @@ struct sta_info {
334 u8 plink_retries; 318 u8 plink_retries;
335 bool ignore_plink_timer; 319 bool ignore_plink_timer;
336 bool plink_timer_was_running; 320 bool plink_timer_was_running;
337 enum plink_state plink_state; 321 enum nl80211_plink_state plink_state;
338 u32 plink_timeout; 322 u32 plink_timeout;
339 struct timer_list plink_timer; 323 struct timer_list plink_timer;
340#endif 324#endif
@@ -352,12 +336,12 @@ struct sta_info {
352 struct ieee80211_sta sta; 336 struct ieee80211_sta sta;
353}; 337};
354 338
355static inline enum plink_state sta_plink_state(struct sta_info *sta) 339static inline enum nl80211_plink_state sta_plink_state(struct sta_info *sta)
356{ 340{
357#ifdef CONFIG_MAC80211_MESH 341#ifdef CONFIG_MAC80211_MESH
358 return sta->plink_state; 342 return sta->plink_state;
359#endif 343#endif
360 return PLINK_LISTEN; 344 return NL80211_PLINK_LISTEN;
361} 345}
362 346
363static inline void set_sta_flags(struct sta_info *sta, const u32 flags) 347static inline void set_sta_flags(struct sta_info *sta, const u32 flags)
@@ -416,7 +400,16 @@ static inline u32 get_sta_flags(struct sta_info *sta)
416 return ret; 400 return ret;
417} 401}
418 402
403void ieee80211_assign_tid_tx(struct sta_info *sta, int tid,
404 struct tid_ampdu_tx *tid_tx);
419 405
406static inline struct tid_ampdu_tx *
407rcu_dereference_protected_tid_tx(struct sta_info *sta, int tid)
408{
409 return rcu_dereference_protected(sta->ampdu_mlme.tid_tx[tid],
410 lockdep_is_held(&sta->lock) ||
411 lockdep_is_held(&sta->ampdu_mlme.mtx));
412}
420 413
421#define STA_HASH_SIZE 256 414#define STA_HASH_SIZE 256
422#define STA_HASH(sta) (sta[5]) 415#define STA_HASH(sta) (sta[5])
@@ -497,7 +490,6 @@ void sta_info_set_tim_bit(struct sta_info *sta);
497void sta_info_clear_tim_bit(struct sta_info *sta); 490void sta_info_clear_tim_bit(struct sta_info *sta);
498 491
499void sta_info_init(struct ieee80211_local *local); 492void sta_info_init(struct ieee80211_local *local);
500int sta_info_start(struct ieee80211_local *local);
501void sta_info_stop(struct ieee80211_local *local); 493void sta_info_stop(struct ieee80211_local *local);
502int sta_info_flush(struct ieee80211_local *local, 494int sta_info_flush(struct ieee80211_local *local,
503 struct ieee80211_sub_if_data *sdata); 495 struct ieee80211_sub_if_data *sdata);
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index b936dd29e92b..1658efaa2e8e 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -189,16 +189,19 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
189 bool acked; 189 bool acked;
190 190
191 for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { 191 for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
192 /* the HW cannot have attempted that rate */ 192 if (info->status.rates[i].idx < 0) {
193 if (i >= hw->max_report_rates) { 193 break;
194 } else if (i >= hw->max_report_rates) {
195 /* the HW cannot have attempted that rate */
194 info->status.rates[i].idx = -1; 196 info->status.rates[i].idx = -1;
195 info->status.rates[i].count = 0; 197 info->status.rates[i].count = 0;
196 } else if (info->status.rates[i].idx >= 0) { 198 break;
197 rates_idx = i;
198 } 199 }
199 200
200 retry_count += info->status.rates[i].count; 201 retry_count += info->status.rates[i].count;
201 } 202 }
203 rates_idx = i - 1;
204
202 if (retry_count < 0) 205 if (retry_count < 0)
203 retry_count = 0; 206 retry_count = 0;
204 207
@@ -443,3 +446,11 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
443 dev_kfree_skb(skb); 446 dev_kfree_skb(skb);
444} 447}
445EXPORT_SYMBOL(ieee80211_tx_status); 448EXPORT_SYMBOL(ieee80211_tx_status);
449
450void ieee80211_report_low_ack(struct ieee80211_sta *pubsta, u32 num_packets)
451{
452 struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
453 cfg80211_cqm_pktloss_notify(sta->sdata->dev, sta->sta.addr,
454 num_packets, GFP_ATOMIC);
455}
456EXPORT_SYMBOL(ieee80211_report_low_ack);
diff --git a/net/mac80211/tkip.c b/net/mac80211/tkip.c
index e840c9cd46db..757e4eb2baf7 100644
--- a/net/mac80211/tkip.c
+++ b/net/mac80211/tkip.c
@@ -202,7 +202,7 @@ EXPORT_SYMBOL(ieee80211_get_tkip_key);
202 * @payload_len is the length of payload (_not_ including IV/ICV length). 202 * @payload_len is the length of payload (_not_ including IV/ICV length).
203 * @ta is the transmitter addresses. 203 * @ta is the transmitter addresses.
204 */ 204 */
205int ieee80211_tkip_encrypt_data(struct crypto_blkcipher *tfm, 205int ieee80211_tkip_encrypt_data(struct crypto_cipher *tfm,
206 struct ieee80211_key *key, 206 struct ieee80211_key *key,
207 u8 *pos, size_t payload_len, u8 *ta) 207 u8 *pos, size_t payload_len, u8 *ta)
208{ 208{
@@ -223,7 +223,7 @@ int ieee80211_tkip_encrypt_data(struct crypto_blkcipher *tfm,
223 * beginning of the buffer containing IEEE 802.11 header payload, i.e., 223 * beginning of the buffer containing IEEE 802.11 header payload, i.e.,
224 * including IV, Ext. IV, real data, Michael MIC, ICV. @payload_len is the 224 * including IV, Ext. IV, real data, Michael MIC, ICV. @payload_len is the
225 * length of payload, including IV, Ext. IV, MIC, ICV. */ 225 * length of payload, including IV, Ext. IV, MIC, ICV. */
226int ieee80211_tkip_decrypt_data(struct crypto_blkcipher *tfm, 226int ieee80211_tkip_decrypt_data(struct crypto_cipher *tfm,
227 struct ieee80211_key *key, 227 struct ieee80211_key *key,
228 u8 *payload, size_t payload_len, u8 *ta, 228 u8 *payload, size_t payload_len, u8 *ta,
229 u8 *ra, int only_iv, int queue, 229 u8 *ra, int only_iv, int queue,
diff --git a/net/mac80211/tkip.h b/net/mac80211/tkip.h
index 7e83dee976fa..1cab9c86978f 100644
--- a/net/mac80211/tkip.h
+++ b/net/mac80211/tkip.h
@@ -15,7 +15,7 @@
15 15
16u8 *ieee80211_tkip_add_iv(u8 *pos, struct ieee80211_key *key, u16 iv16); 16u8 *ieee80211_tkip_add_iv(u8 *pos, struct ieee80211_key *key, u16 iv16);
17 17
18int ieee80211_tkip_encrypt_data(struct crypto_blkcipher *tfm, 18int ieee80211_tkip_encrypt_data(struct crypto_cipher *tfm,
19 struct ieee80211_key *key, 19 struct ieee80211_key *key,
20 u8 *pos, size_t payload_len, u8 *ta); 20 u8 *pos, size_t payload_len, u8 *ta);
21enum { 21enum {
@@ -24,7 +24,7 @@ enum {
24 TKIP_DECRYPT_INVALID_KEYIDX = -2, 24 TKIP_DECRYPT_INVALID_KEYIDX = -2,
25 TKIP_DECRYPT_REPLAY = -3, 25 TKIP_DECRYPT_REPLAY = -3,
26}; 26};
27int ieee80211_tkip_decrypt_data(struct crypto_blkcipher *tfm, 27int ieee80211_tkip_decrypt_data(struct crypto_cipher *tfm,
28 struct ieee80211_key *key, 28 struct ieee80211_key *key,
29 u8 *payload, size_t payload_len, u8 *ta, 29 u8 *payload, size_t payload_len, u8 *ta,
30 u8 *ra, int only_iv, int queue, 30 u8 *ra, int only_iv, int queue,
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index bd1224fd216a..3104c844b544 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1040,14 +1040,11 @@ static bool __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx,
1040 struct ieee80211_radiotap_iterator iterator; 1040 struct ieee80211_radiotap_iterator iterator;
1041 struct ieee80211_radiotap_header *rthdr = 1041 struct ieee80211_radiotap_header *rthdr =
1042 (struct ieee80211_radiotap_header *) skb->data; 1042 (struct ieee80211_radiotap_header *) skb->data;
1043 struct ieee80211_supported_band *sband;
1044 bool hw_frag; 1043 bool hw_frag;
1045 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1044 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1046 int ret = ieee80211_radiotap_iterator_init(&iterator, rthdr, skb->len, 1045 int ret = ieee80211_radiotap_iterator_init(&iterator, rthdr, skb->len,
1047 NULL); 1046 NULL);
1048 1047
1049 sband = tx->local->hw.wiphy->bands[tx->channel->band];
1050
1051 info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; 1048 info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
1052 tx->flags &= ~IEEE80211_TX_FRAGMENTED; 1049 tx->flags &= ~IEEE80211_TX_FRAGMENTED;
1053 1050
@@ -1154,7 +1151,7 @@ static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx,
1154 * packet pass through because splicing the frames 1151 * packet pass through because splicing the frames
1155 * back is already done. 1152 * back is already done.
1156 */ 1153 */
1157 tid_tx = tx->sta->ampdu_mlme.tid_tx[tid]; 1154 tid_tx = rcu_dereference_protected_tid_tx(tx->sta, tid);
1158 1155
1159 if (!tid_tx) { 1156 if (!tid_tx) {
1160 /* do nothing, let packet pass through */ 1157 /* do nothing, let packet pass through */
@@ -1446,11 +1443,8 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata,
1446 struct ieee80211_tx_data tx; 1443 struct ieee80211_tx_data tx;
1447 ieee80211_tx_result res_prepare; 1444 ieee80211_tx_result res_prepare;
1448 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1445 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1449 u16 queue;
1450 bool result = true; 1446 bool result = true;
1451 1447
1452 queue = skb_get_queue_mapping(skb);
1453
1454 if (unlikely(skb->len < 10)) { 1448 if (unlikely(skb->len < 10)) {
1455 dev_kfree_skb(skb); 1449 dev_kfree_skb(skb);
1456 return true; 1450 return true;
@@ -1766,6 +1760,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1766 ret = NETDEV_TX_OK; 1760 ret = NETDEV_TX_OK;
1767 goto fail; 1761 goto fail;
1768 } 1762 }
1763 rcu_read_lock();
1769 if (!is_multicast_ether_addr(skb->data)) 1764 if (!is_multicast_ether_addr(skb->data))
1770 mppath = mpp_path_lookup(skb->data, sdata); 1765 mppath = mpp_path_lookup(skb->data, sdata);
1771 1766
@@ -1780,13 +1775,13 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1780 !(mppath && compare_ether_addr(mppath->mpp, skb->data))) { 1775 !(mppath && compare_ether_addr(mppath->mpp, skb->data))) {
1781 hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc, 1776 hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc,
1782 skb->data, skb->data + ETH_ALEN); 1777 skb->data, skb->data + ETH_ALEN);
1778 rcu_read_unlock();
1783 meshhdrlen = ieee80211_new_mesh_header(&mesh_hdr, 1779 meshhdrlen = ieee80211_new_mesh_header(&mesh_hdr,
1784 sdata, NULL, NULL); 1780 sdata, NULL, NULL);
1785 } else { 1781 } else {
1786 int is_mesh_mcast = 1; 1782 int is_mesh_mcast = 1;
1787 const u8 *mesh_da; 1783 const u8 *mesh_da;
1788 1784
1789 rcu_read_lock();
1790 if (is_multicast_ether_addr(skb->data)) 1785 if (is_multicast_ether_addr(skb->data))
1791 /* DA TA mSA AE:SA */ 1786 /* DA TA mSA AE:SA */
1792 mesh_da = skb->data; 1787 mesh_da = skb->data;
@@ -2266,7 +2261,7 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
2266 2261
2267 /* headroom, head length, tail length and maximum TIM length */ 2262 /* headroom, head length, tail length and maximum TIM length */
2268 skb = dev_alloc_skb(local->tx_headroom + 400 + 2263 skb = dev_alloc_skb(local->tx_headroom + 400 +
2269 sdata->u.mesh.vendor_ie_len); 2264 sdata->u.mesh.ie_len);
2270 if (!skb) 2265 if (!skb)
2271 goto out; 2266 goto out;
2272 2267
@@ -2489,7 +2484,6 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw,
2489{ 2484{
2490 struct ieee80211_local *local = hw_to_local(hw); 2485 struct ieee80211_local *local = hw_to_local(hw);
2491 struct sk_buff *skb = NULL; 2486 struct sk_buff *skb = NULL;
2492 struct sta_info *sta;
2493 struct ieee80211_tx_data tx; 2487 struct ieee80211_tx_data tx;
2494 struct ieee80211_sub_if_data *sdata; 2488 struct ieee80211_sub_if_data *sdata;
2495 struct ieee80211_if_ap *bss = NULL; 2489 struct ieee80211_if_ap *bss = NULL;
@@ -2531,7 +2525,6 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw,
2531 2525
2532 info = IEEE80211_SKB_CB(skb); 2526 info = IEEE80211_SKB_CB(skb);
2533 2527
2534 sta = tx.sta;
2535 tx.flags |= IEEE80211_TX_PS_BUFFERED; 2528 tx.flags |= IEEE80211_TX_PS_BUFFERED;
2536 tx.channel = local->hw.conf.channel; 2529 tx.channel = local->hw.conf.channel;
2537 info->band = tx.channel->band; 2530 info->band = tx.channel->band;
@@ -2551,8 +2544,9 @@ void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
2551 skb_set_network_header(skb, 0); 2544 skb_set_network_header(skb, 0);
2552 skb_set_transport_header(skb, 0); 2545 skb_set_transport_header(skb, 0);
2553 2546
2554 /* send all internal mgmt frames on VO */ 2547 /* Send all internal mgmt frames on VO. Accordingly set TID to 7. */
2555 skb_set_queue_mapping(skb, 0); 2548 skb_set_queue_mapping(skb, IEEE80211_AC_VO);
2549 skb->priority = 7;
2556 2550
2557 /* 2551 /*
2558 * The other path calling ieee80211_xmit is from the tasklet, 2552 * The other path calling ieee80211_xmit is from the tasklet,
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 556647a910ac..d3fe2d237485 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -1125,9 +1125,27 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1125 struct sta_info *sta; 1125 struct sta_info *sta;
1126 int res; 1126 int res;
1127 1127
1128#ifdef CONFIG_PM
1128 if (local->suspended) 1129 if (local->suspended)
1129 local->resuming = true; 1130 local->resuming = true;
1130 1131
1132 if (local->wowlan) {
1133 local->wowlan = false;
1134 res = drv_resume(local);
1135 if (res < 0) {
1136 local->resuming = false;
1137 return res;
1138 }
1139 if (res == 0)
1140 goto wake_up;
1141 WARN_ON(res > 1);
1142 /*
1143 * res is 1, which means the driver requested
1144 * to go through a regular reset on wakeup.
1145 */
1146 }
1147#endif
1148
1131 /* restart hardware */ 1149 /* restart hardware */
1132 if (local->open_count) { 1150 if (local->open_count) {
1133 /* 1151 /*
@@ -1258,6 +1276,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1258 if (ieee80211_sdata_running(sdata)) 1276 if (ieee80211_sdata_running(sdata))
1259 ieee80211_enable_keys(sdata); 1277 ieee80211_enable_keys(sdata);
1260 1278
1279 wake_up:
1261 ieee80211_wake_queues_by_reason(hw, 1280 ieee80211_wake_queues_by_reason(hw,
1262 IEEE80211_QUEUE_STOP_REASON_SUSPEND); 1281 IEEE80211_QUEUE_STOP_REASON_SUSPEND);
1263 1282
@@ -1290,7 +1309,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1290 } 1309 }
1291 } 1310 }
1292 1311
1293 add_timer(&local->sta_cleanup); 1312 mod_timer(&local->sta_cleanup, jiffies + 1);
1294 1313
1295 mutex_lock(&local->sta_mtx); 1314 mutex_lock(&local->sta_mtx);
1296 list_for_each_entry(sta, &local->sta_list, list) 1315 list_for_each_entry(sta, &local->sta_list, list)
diff --git a/net/mac80211/wep.c b/net/mac80211/wep.c
index 2ff6d1e3ed21..a1c6bfd55f0f 100644
--- a/net/mac80211/wep.c
+++ b/net/mac80211/wep.c
@@ -30,17 +30,15 @@ int ieee80211_wep_init(struct ieee80211_local *local)
30 /* start WEP IV from a random value */ 30 /* start WEP IV from a random value */
31 get_random_bytes(&local->wep_iv, WEP_IV_LEN); 31 get_random_bytes(&local->wep_iv, WEP_IV_LEN);
32 32
33 local->wep_tx_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, 33 local->wep_tx_tfm = crypto_alloc_cipher("arc4", 0, CRYPTO_ALG_ASYNC);
34 CRYPTO_ALG_ASYNC);
35 if (IS_ERR(local->wep_tx_tfm)) { 34 if (IS_ERR(local->wep_tx_tfm)) {
36 local->wep_rx_tfm = ERR_PTR(-EINVAL); 35 local->wep_rx_tfm = ERR_PTR(-EINVAL);
37 return PTR_ERR(local->wep_tx_tfm); 36 return PTR_ERR(local->wep_tx_tfm);
38 } 37 }
39 38
40 local->wep_rx_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, 39 local->wep_rx_tfm = crypto_alloc_cipher("arc4", 0, CRYPTO_ALG_ASYNC);
41 CRYPTO_ALG_ASYNC);
42 if (IS_ERR(local->wep_rx_tfm)) { 40 if (IS_ERR(local->wep_rx_tfm)) {
43 crypto_free_blkcipher(local->wep_tx_tfm); 41 crypto_free_cipher(local->wep_tx_tfm);
44 local->wep_tx_tfm = ERR_PTR(-EINVAL); 42 local->wep_tx_tfm = ERR_PTR(-EINVAL);
45 return PTR_ERR(local->wep_rx_tfm); 43 return PTR_ERR(local->wep_rx_tfm);
46 } 44 }
@@ -51,9 +49,9 @@ int ieee80211_wep_init(struct ieee80211_local *local)
51void ieee80211_wep_free(struct ieee80211_local *local) 49void ieee80211_wep_free(struct ieee80211_local *local)
52{ 50{
53 if (!IS_ERR(local->wep_tx_tfm)) 51 if (!IS_ERR(local->wep_tx_tfm))
54 crypto_free_blkcipher(local->wep_tx_tfm); 52 crypto_free_cipher(local->wep_tx_tfm);
55 if (!IS_ERR(local->wep_rx_tfm)) 53 if (!IS_ERR(local->wep_rx_tfm))
56 crypto_free_blkcipher(local->wep_rx_tfm); 54 crypto_free_cipher(local->wep_rx_tfm);
57} 55}
58 56
59static inline bool ieee80211_wep_weak_iv(u32 iv, int keylen) 57static inline bool ieee80211_wep_weak_iv(u32 iv, int keylen)
@@ -127,12 +125,11 @@ static void ieee80211_wep_remove_iv(struct ieee80211_local *local,
127/* Perform WEP encryption using given key. data buffer must have tailroom 125/* Perform WEP encryption using given key. data buffer must have tailroom
128 * for 4-byte ICV. data_len must not include this ICV. Note: this function 126 * for 4-byte ICV. data_len must not include this ICV. Note: this function
129 * does _not_ add IV. data = RC4(data | CRC32(data)) */ 127 * does _not_ add IV. data = RC4(data | CRC32(data)) */
130int ieee80211_wep_encrypt_data(struct crypto_blkcipher *tfm, u8 *rc4key, 128int ieee80211_wep_encrypt_data(struct crypto_cipher *tfm, u8 *rc4key,
131 size_t klen, u8 *data, size_t data_len) 129 size_t klen, u8 *data, size_t data_len)
132{ 130{
133 struct blkcipher_desc desc = { .tfm = tfm };
134 struct scatterlist sg;
135 __le32 icv; 131 __le32 icv;
132 int i;
136 133
137 if (IS_ERR(tfm)) 134 if (IS_ERR(tfm))
138 return -1; 135 return -1;
@@ -140,9 +137,9 @@ int ieee80211_wep_encrypt_data(struct crypto_blkcipher *tfm, u8 *rc4key,
140 icv = cpu_to_le32(~crc32_le(~0, data, data_len)); 137 icv = cpu_to_le32(~crc32_le(~0, data, data_len));
141 put_unaligned(icv, (__le32 *)(data + data_len)); 138 put_unaligned(icv, (__le32 *)(data + data_len));
142 139
143 crypto_blkcipher_setkey(tfm, rc4key, klen); 140 crypto_cipher_setkey(tfm, rc4key, klen);
144 sg_init_one(&sg, data, data_len + WEP_ICV_LEN); 141 for (i = 0; i < data_len + WEP_ICV_LEN; i++)
145 crypto_blkcipher_encrypt(&desc, &sg, &sg, sg.length); 142 crypto_cipher_encrypt_one(tfm, data + i, data + i);
146 143
147 return 0; 144 return 0;
148} 145}
@@ -186,19 +183,18 @@ int ieee80211_wep_encrypt(struct ieee80211_local *local,
186/* Perform WEP decryption using given key. data buffer includes encrypted 183/* Perform WEP decryption using given key. data buffer includes encrypted
187 * payload, including 4-byte ICV, but _not_ IV. data_len must not include ICV. 184 * payload, including 4-byte ICV, but _not_ IV. data_len must not include ICV.
188 * Return 0 on success and -1 on ICV mismatch. */ 185 * Return 0 on success and -1 on ICV mismatch. */
189int ieee80211_wep_decrypt_data(struct crypto_blkcipher *tfm, u8 *rc4key, 186int ieee80211_wep_decrypt_data(struct crypto_cipher *tfm, u8 *rc4key,
190 size_t klen, u8 *data, size_t data_len) 187 size_t klen, u8 *data, size_t data_len)
191{ 188{
192 struct blkcipher_desc desc = { .tfm = tfm };
193 struct scatterlist sg;
194 __le32 crc; 189 __le32 crc;
190 int i;
195 191
196 if (IS_ERR(tfm)) 192 if (IS_ERR(tfm))
197 return -1; 193 return -1;
198 194
199 crypto_blkcipher_setkey(tfm, rc4key, klen); 195 crypto_cipher_setkey(tfm, rc4key, klen);
200 sg_init_one(&sg, data, data_len + WEP_ICV_LEN); 196 for (i = 0; i < data_len + WEP_ICV_LEN; i++)
201 crypto_blkcipher_decrypt(&desc, &sg, &sg, sg.length); 197 crypto_cipher_decrypt_one(tfm, data + i, data + i);
202 198
203 crc = cpu_to_le32(~crc32_le(~0, data, data_len)); 199 crc = cpu_to_le32(~crc32_le(~0, data, data_len));
204 if (memcmp(&crc, data + data_len, WEP_ICV_LEN) != 0) 200 if (memcmp(&crc, data + data_len, WEP_ICV_LEN) != 0)
diff --git a/net/mac80211/wep.h b/net/mac80211/wep.h
index 58654ee33518..01e54840a628 100644
--- a/net/mac80211/wep.h
+++ b/net/mac80211/wep.h
@@ -18,12 +18,12 @@
18 18
19int ieee80211_wep_init(struct ieee80211_local *local); 19int ieee80211_wep_init(struct ieee80211_local *local);
20void ieee80211_wep_free(struct ieee80211_local *local); 20void ieee80211_wep_free(struct ieee80211_local *local);
21int ieee80211_wep_encrypt_data(struct crypto_blkcipher *tfm, u8 *rc4key, 21int ieee80211_wep_encrypt_data(struct crypto_cipher *tfm, u8 *rc4key,
22 size_t klen, u8 *data, size_t data_len); 22 size_t klen, u8 *data, size_t data_len);
23int ieee80211_wep_encrypt(struct ieee80211_local *local, 23int ieee80211_wep_encrypt(struct ieee80211_local *local,
24 struct sk_buff *skb, 24 struct sk_buff *skb,
25 const u8 *key, int keylen, int keyidx); 25 const u8 *key, int keylen, int keyidx);
26int ieee80211_wep_decrypt_data(struct crypto_blkcipher *tfm, u8 *rc4key, 26int ieee80211_wep_decrypt_data(struct crypto_cipher *tfm, u8 *rc4key,
27 size_t klen, u8 *data, size_t data_len); 27 size_t klen, u8 *data, size_t data_len);
28bool ieee80211_wep_is_weak_iv(struct sk_buff *skb, struct ieee80211_key *key); 28bool ieee80211_wep_is_weak_iv(struct sk_buff *skb, struct ieee80211_key *key);
29 29
diff --git a/net/mac80211/work.c b/net/mac80211/work.c
index e73c8cae036b..d2e7f0e86677 100644
--- a/net/mac80211/work.c
+++ b/net/mac80211/work.c
@@ -65,17 +65,9 @@ static void run_again(struct ieee80211_local *local,
65 mod_timer(&local->work_timer, timeout); 65 mod_timer(&local->work_timer, timeout);
66} 66}
67 67
68static void work_free_rcu(struct rcu_head *head)
69{
70 struct ieee80211_work *wk =
71 container_of(head, struct ieee80211_work, rcu_head);
72
73 kfree(wk);
74}
75
76void free_work(struct ieee80211_work *wk) 68void free_work(struct ieee80211_work *wk)
77{ 69{
78 call_rcu(&wk->rcu_head, work_free_rcu); 70 kfree_rcu(wk, rcu_head);
79} 71}
80 72
81static int ieee80211_compatible_rates(const u8 *supp_rates, int supp_rates_len, 73static int ieee80211_compatible_rates(const u8 *supp_rates, int supp_rates_len,
@@ -198,9 +190,8 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata,
198 struct sk_buff *skb; 190 struct sk_buff *skb;
199 struct ieee80211_mgmt *mgmt; 191 struct ieee80211_mgmt *mgmt;
200 u8 *pos, qos_info; 192 u8 *pos, qos_info;
201 const u8 *ies;
202 size_t offset = 0, noffset; 193 size_t offset = 0, noffset;
203 int i, len, count, rates_len, supp_rates_len; 194 int i, count, rates_len, supp_rates_len;
204 u16 capab; 195 u16 capab;
205 struct ieee80211_supported_band *sband; 196 struct ieee80211_supported_band *sband;
206 u32 rates = 0; 197 u32 rates = 0;
@@ -285,7 +276,7 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata,
285 } 276 }
286 277
287 /* SSID */ 278 /* SSID */
288 ies = pos = skb_put(skb, 2 + wk->assoc.ssid_len); 279 pos = skb_put(skb, 2 + wk->assoc.ssid_len);
289 *pos++ = WLAN_EID_SSID; 280 *pos++ = WLAN_EID_SSID;
290 *pos++ = wk->assoc.ssid_len; 281 *pos++ = wk->assoc.ssid_len;
291 memcpy(pos, wk->assoc.ssid, wk->assoc.ssid_len); 282 memcpy(pos, wk->assoc.ssid, wk->assoc.ssid_len);
@@ -295,7 +286,6 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata,
295 if (supp_rates_len > 8) 286 if (supp_rates_len > 8)
296 supp_rates_len = 8; 287 supp_rates_len = 8;
297 288
298 len = sband->n_bitrates;
299 pos = skb_put(skb, supp_rates_len + 2); 289 pos = skb_put(skb, supp_rates_len + 2);
300 *pos++ = WLAN_EID_SUPP_RATES; 290 *pos++ = WLAN_EID_SUPP_RATES;
301 *pos++ = supp_rates_len; 291 *pos++ = supp_rates_len;
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index f1765de2f4bf..8f6a302d2ac3 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -86,43 +86,88 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx)
86 struct sk_buff *skb = rx->skb; 86 struct sk_buff *skb = rx->skb;
87 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 87 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
88 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 88 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
89 int queue = rx->queue;
90
91 /* otherwise, TKIP is vulnerable to TID 0 vs. non-QoS replays */
92 if (rx->queue == NUM_RX_DATA_QUEUES - 1)
93 queue = 0;
94
95 /*
96 * it makes no sense to check for MIC errors on anything other
97 * than data frames.
98 */
99 if (!ieee80211_is_data_present(hdr->frame_control))
100 return RX_CONTINUE;
101
102 /*
103 * No way to verify the MIC if the hardware stripped it or
104 * the IV with the key index. In this case we have solely rely
105 * on the driver to set RX_FLAG_MMIC_ERROR in the event of a
106 * MIC failure report.
107 */
108 if (status->flag & (RX_FLAG_MMIC_STRIPPED | RX_FLAG_IV_STRIPPED)) {
109 if (status->flag & RX_FLAG_MMIC_ERROR)
110 goto mic_fail;
111
112 if (!(status->flag & RX_FLAG_IV_STRIPPED))
113 goto update_iv;
89 114
90 /* No way to verify the MIC if the hardware stripped it */
91 if (status->flag & RX_FLAG_MMIC_STRIPPED)
92 return RX_CONTINUE; 115 return RX_CONTINUE;
116 }
93 117
118 /*
119 * Some hardware seems to generate Michael MIC failure reports; even
120 * though, the frame was not encrypted with TKIP and therefore has no
121 * MIC. Ignore the flag them to avoid triggering countermeasures.
122 */
94 if (!rx->key || rx->key->conf.cipher != WLAN_CIPHER_SUITE_TKIP || 123 if (!rx->key || rx->key->conf.cipher != WLAN_CIPHER_SUITE_TKIP ||
95 !ieee80211_has_protected(hdr->frame_control) || 124 !(status->flag & RX_FLAG_DECRYPTED))
96 !ieee80211_is_data_present(hdr->frame_control))
97 return RX_CONTINUE; 125 return RX_CONTINUE;
98 126
127 if (rx->sdata->vif.type == NL80211_IFTYPE_AP && rx->key->conf.keyidx) {
128 /*
129 * APs with pairwise keys should never receive Michael MIC
130 * errors for non-zero keyidx because these are reserved for
131 * group keys and only the AP is sending real multicast
132 * frames in the BSS. (
133 */
134 return RX_DROP_UNUSABLE;
135 }
136
137 if (status->flag & RX_FLAG_MMIC_ERROR)
138 goto mic_fail;
139
99 hdrlen = ieee80211_hdrlen(hdr->frame_control); 140 hdrlen = ieee80211_hdrlen(hdr->frame_control);
100 if (skb->len < hdrlen + MICHAEL_MIC_LEN) 141 if (skb->len < hdrlen + MICHAEL_MIC_LEN)
101 return RX_DROP_UNUSABLE; 142 return RX_DROP_UNUSABLE;
102 143
103 data = skb->data + hdrlen; 144 data = skb->data + hdrlen;
104 data_len = skb->len - hdrlen - MICHAEL_MIC_LEN; 145 data_len = skb->len - hdrlen - MICHAEL_MIC_LEN;
105
106 key = &rx->key->conf.key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY]; 146 key = &rx->key->conf.key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY];
107 michael_mic(key, hdr, data, data_len, mic); 147 michael_mic(key, hdr, data, data_len, mic);
108 if (memcmp(mic, data + data_len, MICHAEL_MIC_LEN) != 0) { 148 if (memcmp(mic, data + data_len, MICHAEL_MIC_LEN) != 0)
109 if (!(status->rx_flags & IEEE80211_RX_RA_MATCH)) 149 goto mic_fail;
110 return RX_DROP_UNUSABLE;
111
112 mac80211_ev_michael_mic_failure(rx->sdata, rx->key->conf.keyidx,
113 (void *) skb->data, NULL,
114 GFP_ATOMIC);
115 return RX_DROP_UNUSABLE;
116 }
117 150
118 /* remove Michael MIC from payload */ 151 /* remove Michael MIC from payload */
119 skb_trim(skb, skb->len - MICHAEL_MIC_LEN); 152 skb_trim(skb, skb->len - MICHAEL_MIC_LEN);
120 153
154update_iv:
121 /* update IV in key information to be able to detect replays */ 155 /* update IV in key information to be able to detect replays */
122 rx->key->u.tkip.rx[rx->queue].iv32 = rx->tkip_iv32; 156 rx->key->u.tkip.rx[queue].iv32 = rx->tkip_iv32;
123 rx->key->u.tkip.rx[rx->queue].iv16 = rx->tkip_iv16; 157 rx->key->u.tkip.rx[queue].iv16 = rx->tkip_iv16;
124 158
125 return RX_CONTINUE; 159 return RX_CONTINUE;
160
161mic_fail:
162 /*
163 * In some cases the key can be unset - e.g. a multicast packet, in
164 * a driver that supports HW encryption. Send up the key idx only if
165 * the key is set.
166 */
167 mac80211_ev_michael_mic_failure(rx->sdata,
168 rx->key ? rx->key->conf.keyidx : -1,
169 (void *) skb->data, NULL, GFP_ATOMIC);
170 return RX_DROP_UNUSABLE;
126} 171}
127 172
128 173
@@ -201,6 +246,11 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx)
201 struct ieee80211_key *key = rx->key; 246 struct ieee80211_key *key = rx->key;
202 struct sk_buff *skb = rx->skb; 247 struct sk_buff *skb = rx->skb;
203 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 248 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
249 int queue = rx->queue;
250
251 /* otherwise, TKIP is vulnerable to TID 0 vs. non-QoS replays */
252 if (rx->queue == NUM_RX_DATA_QUEUES - 1)
253 queue = 0;
204 254
205 hdrlen = ieee80211_hdrlen(hdr->frame_control); 255 hdrlen = ieee80211_hdrlen(hdr->frame_control);
206 256
@@ -221,7 +271,7 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx)
221 res = ieee80211_tkip_decrypt_data(rx->local->wep_rx_tfm, 271 res = ieee80211_tkip_decrypt_data(rx->local->wep_rx_tfm,
222 key, skb->data + hdrlen, 272 key, skb->data + hdrlen,
223 skb->len - hdrlen, rx->sta->sta.addr, 273 skb->len - hdrlen, rx->sta->sta.addr,
224 hdr->addr1, hwaccel, rx->queue, 274 hdr->addr1, hwaccel, queue,
225 &rx->tkip_iv32, 275 &rx->tkip_iv32,
226 &rx->tkip_iv16); 276 &rx->tkip_iv16);
227 if (res != TKIP_DECRYPT_OK) 277 if (res != TKIP_DECRYPT_OK)
diff --git a/net/netfilter/ipset/ip_set_bitmap_ip.c b/net/netfilter/ipset/ip_set_bitmap_ip.c
index a113ff066928..ba2d16607f48 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ip.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ip.c
@@ -293,7 +293,7 @@ bitmap_ip_uadt(struct ip_set *set, struct nlattr *tb[],
293 293
294 for (; !before(ip_to, ip); ip += map->hosts) { 294 for (; !before(ip_to, ip); ip += map->hosts) {
295 id = ip_to_id(map, ip); 295 id = ip_to_id(map, ip);
296 ret = adtfn(set, &id, timeout);; 296 ret = adtfn(set, &id, timeout);
297 297
298 if (ret && !ip_set_eexist(ret, flags)) 298 if (ret && !ip_set_eexist(ret, flags))
299 return ret; 299 return ret;
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index 72d1ac611fdc..42aa64b6b0b1 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -767,7 +767,7 @@ ip_set_destroy(struct sock *ctnl, struct sk_buff *skb,
767 if (!attr[IPSET_ATTR_SETNAME]) { 767 if (!attr[IPSET_ATTR_SETNAME]) {
768 for (i = 0; i < ip_set_max; i++) { 768 for (i = 0; i < ip_set_max; i++) {
769 if (ip_set_list[i] != NULL && ip_set_list[i]->ref) { 769 if (ip_set_list[i] != NULL && ip_set_list[i]->ref) {
770 ret = IPSET_ERR_BUSY; 770 ret = -IPSET_ERR_BUSY;
771 goto out; 771 goto out;
772 } 772 }
773 } 773 }
@@ -815,7 +815,7 @@ ip_set_flush(struct sock *ctnl, struct sk_buff *skb,
815 ip_set_id_t i; 815 ip_set_id_t i;
816 816
817 if (unlikely(protocol_failed(attr))) 817 if (unlikely(protocol_failed(attr)))
818 return -EPROTO; 818 return -IPSET_ERR_PROTOCOL;
819 819
820 if (!attr[IPSET_ATTR_SETNAME]) { 820 if (!attr[IPSET_ATTR_SETNAME]) {
821 for (i = 0; i < ip_set_max; i++) 821 for (i = 0; i < ip_set_max; i++)
diff --git a/net/netfilter/ipset/ip_set_getport.c b/net/netfilter/ipset/ip_set_getport.c
index 8d5227212686..757143b2240a 100644
--- a/net/netfilter/ipset/ip_set_getport.c
+++ b/net/netfilter/ipset/ip_set_getport.c
@@ -11,6 +11,7 @@
11#include <linux/skbuff.h> 11#include <linux/skbuff.h>
12#include <linux/icmp.h> 12#include <linux/icmp.h>
13#include <linux/icmpv6.h> 13#include <linux/icmpv6.h>
14#include <linux/sctp.h>
14#include <linux/netfilter_ipv6/ip6_tables.h> 15#include <linux/netfilter_ipv6/ip6_tables.h>
15#include <net/ip.h> 16#include <net/ip.h>
16#include <net/ipv6.h> 17#include <net/ipv6.h>
@@ -35,7 +36,20 @@ get_port(const struct sk_buff *skb, int protocol, unsigned int protooff,
35 *port = src ? th->source : th->dest; 36 *port = src ? th->source : th->dest;
36 break; 37 break;
37 } 38 }
38 case IPPROTO_UDP: { 39 case IPPROTO_SCTP: {
40 sctp_sctphdr_t _sh;
41 const sctp_sctphdr_t *sh;
42
43 sh = skb_header_pointer(skb, protooff, sizeof(_sh), &_sh);
44 if (sh == NULL)
45 /* No choice either */
46 return false;
47
48 *port = src ? sh->source : sh->dest;
49 break;
50 }
51 case IPPROTO_UDP:
52 case IPPROTO_UDPLITE: {
39 struct udphdr _udph; 53 struct udphdr _udph;
40 const struct udphdr *uh; 54 const struct udphdr *uh;
41 55
diff --git a/net/netfilter/ipset/ip_set_hash_ipport.c b/net/netfilter/ipset/ip_set_hash_ipport.c
index b9214145d357..14281b6b8074 100644
--- a/net/netfilter/ipset/ip_set_hash_ipport.c
+++ b/net/netfilter/ipset/ip_set_hash_ipport.c
@@ -491,7 +491,7 @@ static struct ip_set_type hash_ipport_type __read_mostly = {
491 .features = IPSET_TYPE_IP | IPSET_TYPE_PORT, 491 .features = IPSET_TYPE_IP | IPSET_TYPE_PORT,
492 .dimension = IPSET_DIM_TWO, 492 .dimension = IPSET_DIM_TWO,
493 .family = AF_UNSPEC, 493 .family = AF_UNSPEC,
494 .revision = 0, 494 .revision = 1,
495 .create = hash_ipport_create, 495 .create = hash_ipport_create,
496 .create_policy = { 496 .create_policy = {
497 [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 }, 497 [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
diff --git a/net/netfilter/ipset/ip_set_hash_ipportip.c b/net/netfilter/ipset/ip_set_hash_ipportip.c
index 4642872df6e1..401c8a2531db 100644
--- a/net/netfilter/ipset/ip_set_hash_ipportip.c
+++ b/net/netfilter/ipset/ip_set_hash_ipportip.c
@@ -509,7 +509,7 @@ static struct ip_set_type hash_ipportip_type __read_mostly = {
509 .features = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_IP2, 509 .features = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_IP2,
510 .dimension = IPSET_DIM_THREE, 510 .dimension = IPSET_DIM_THREE,
511 .family = AF_UNSPEC, 511 .family = AF_UNSPEC,
512 .revision = 0, 512 .revision = 1,
513 .create = hash_ipportip_create, 513 .create = hash_ipportip_create,
514 .create_policy = { 514 .create_policy = {
515 [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 }, 515 [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
diff --git a/net/netfilter/ipset/ip_set_hash_ipportnet.c b/net/netfilter/ipset/ip_set_hash_ipportnet.c
index 2cb84a54b7ad..565a7c5b8818 100644
--- a/net/netfilter/ipset/ip_set_hash_ipportnet.c
+++ b/net/netfilter/ipset/ip_set_hash_ipportnet.c
@@ -146,8 +146,9 @@ hash_ipportnet4_kadt(struct ip_set *set, const struct sk_buff *skb,
146{ 146{
147 const struct ip_set_hash *h = set->data; 147 const struct ip_set_hash *h = set->data;
148 ipset_adtfn adtfn = set->variant->adt[adt]; 148 ipset_adtfn adtfn = set->variant->adt[adt];
149 struct hash_ipportnet4_elem data = 149 struct hash_ipportnet4_elem data = {
150 { .cidr = h->nets[0].cidr || HOST_MASK }; 150 .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK
151 };
151 152
152 if (data.cidr == 0) 153 if (data.cidr == 0)
153 return -EINVAL; 154 return -EINVAL;
@@ -394,8 +395,9 @@ hash_ipportnet6_kadt(struct ip_set *set, const struct sk_buff *skb,
394{ 395{
395 const struct ip_set_hash *h = set->data; 396 const struct ip_set_hash *h = set->data;
396 ipset_adtfn adtfn = set->variant->adt[adt]; 397 ipset_adtfn adtfn = set->variant->adt[adt];
397 struct hash_ipportnet6_elem data = 398 struct hash_ipportnet6_elem data = {
398 { .cidr = h->nets[0].cidr || HOST_MASK }; 399 .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK
400 };
399 401
400 if (data.cidr == 0) 402 if (data.cidr == 0)
401 return -EINVAL; 403 return -EINVAL;
@@ -574,7 +576,7 @@ static struct ip_set_type hash_ipportnet_type __read_mostly = {
574 .features = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_IP2, 576 .features = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_IP2,
575 .dimension = IPSET_DIM_THREE, 577 .dimension = IPSET_DIM_THREE,
576 .family = AF_UNSPEC, 578 .family = AF_UNSPEC,
577 .revision = 0, 579 .revision = 1,
578 .create = hash_ipportnet_create, 580 .create = hash_ipportnet_create,
579 .create_policy = { 581 .create_policy = {
580 [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 }, 582 [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
diff --git a/net/netfilter/ipset/ip_set_hash_net.c b/net/netfilter/ipset/ip_set_hash_net.c
index c4db202b7da4..2aeeabcd5a21 100644
--- a/net/netfilter/ipset/ip_set_hash_net.c
+++ b/net/netfilter/ipset/ip_set_hash_net.c
@@ -131,7 +131,9 @@ hash_net4_kadt(struct ip_set *set, const struct sk_buff *skb,
131{ 131{
132 const struct ip_set_hash *h = set->data; 132 const struct ip_set_hash *h = set->data;
133 ipset_adtfn adtfn = set->variant->adt[adt]; 133 ipset_adtfn adtfn = set->variant->adt[adt];
134 struct hash_net4_elem data = { .cidr = h->nets[0].cidr || HOST_MASK }; 134 struct hash_net4_elem data = {
135 .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK
136 };
135 137
136 if (data.cidr == 0) 138 if (data.cidr == 0)
137 return -EINVAL; 139 return -EINVAL;
@@ -296,7 +298,9 @@ hash_net6_kadt(struct ip_set *set, const struct sk_buff *skb,
296{ 298{
297 const struct ip_set_hash *h = set->data; 299 const struct ip_set_hash *h = set->data;
298 ipset_adtfn adtfn = set->variant->adt[adt]; 300 ipset_adtfn adtfn = set->variant->adt[adt];
299 struct hash_net6_elem data = { .cidr = h->nets[0].cidr || HOST_MASK }; 301 struct hash_net6_elem data = {
302 .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK
303 };
300 304
301 if (data.cidr == 0) 305 if (data.cidr == 0)
302 return -EINVAL; 306 return -EINVAL;
diff --git a/net/netfilter/ipset/ip_set_hash_netport.c b/net/netfilter/ipset/ip_set_hash_netport.c
index 8598676f2a05..e50d9bb8820b 100644
--- a/net/netfilter/ipset/ip_set_hash_netport.c
+++ b/net/netfilter/ipset/ip_set_hash_netport.c
@@ -144,7 +144,8 @@ hash_netport4_kadt(struct ip_set *set, const struct sk_buff *skb,
144 const struct ip_set_hash *h = set->data; 144 const struct ip_set_hash *h = set->data;
145 ipset_adtfn adtfn = set->variant->adt[adt]; 145 ipset_adtfn adtfn = set->variant->adt[adt];
146 struct hash_netport4_elem data = { 146 struct hash_netport4_elem data = {
147 .cidr = h->nets[0].cidr || HOST_MASK }; 147 .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK
148 };
148 149
149 if (data.cidr == 0) 150 if (data.cidr == 0)
150 return -EINVAL; 151 return -EINVAL;
@@ -357,7 +358,8 @@ hash_netport6_kadt(struct ip_set *set, const struct sk_buff *skb,
357 const struct ip_set_hash *h = set->data; 358 const struct ip_set_hash *h = set->data;
358 ipset_adtfn adtfn = set->variant->adt[adt]; 359 ipset_adtfn adtfn = set->variant->adt[adt];
359 struct hash_netport6_elem data = { 360 struct hash_netport6_elem data = {
360 .cidr = h->nets[0].cidr || HOST_MASK }; 361 .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK
362 };
361 363
362 if (data.cidr == 0) 364 if (data.cidr == 0)
363 return -EINVAL; 365 return -EINVAL;
@@ -526,7 +528,7 @@ static struct ip_set_type hash_netport_type __read_mostly = {
526 .features = IPSET_TYPE_IP | IPSET_TYPE_PORT, 528 .features = IPSET_TYPE_IP | IPSET_TYPE_PORT,
527 .dimension = IPSET_DIM_TWO, 529 .dimension = IPSET_DIM_TWO,
528 .family = AF_UNSPEC, 530 .family = AF_UNSPEC,
529 .revision = 0, 531 .revision = 1,
530 .create = hash_netport_create, 532 .create = hash_netport_create,
531 .create_policy = { 533 .create_policy = {
532 [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 }, 534 [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index bf28ac2fc99b..782db275ac53 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -776,8 +776,16 @@ static void ip_vs_conn_expire(unsigned long data)
776 if (cp->control) 776 if (cp->control)
777 ip_vs_control_del(cp); 777 ip_vs_control_del(cp);
778 778
779 if (cp->flags & IP_VS_CONN_F_NFCT) 779 if (cp->flags & IP_VS_CONN_F_NFCT) {
780 ip_vs_conn_drop_conntrack(cp); 780 ip_vs_conn_drop_conntrack(cp);
781 /* Do not access conntracks during subsys cleanup
782 * because nf_conntrack_find_get can not be used after
783 * conntrack cleanup for the net.
784 */
785 smp_rmb();
786 if (ipvs->enable)
787 ip_vs_conn_drop_conntrack(cp);
788 }
781 789
782 ip_vs_pe_put(cp->pe); 790 ip_vs_pe_put(cp->pe);
783 kfree(cp->pe_data); 791 kfree(cp->pe_data);
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index a74dae6c5dbc..24c28d238dcb 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -1382,15 +1382,7 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
1382 ip_vs_in_stats(cp, skb); 1382 ip_vs_in_stats(cp, skb);
1383 if (IPPROTO_TCP == cih->protocol || IPPROTO_UDP == cih->protocol) 1383 if (IPPROTO_TCP == cih->protocol || IPPROTO_UDP == cih->protocol)
1384 offset += 2 * sizeof(__u16); 1384 offset += 2 * sizeof(__u16);
1385 verdict = ip_vs_icmp_xmit(skb, cp, pp, offset); 1385 verdict = ip_vs_icmp_xmit(skb, cp, pp, offset, hooknum);
1386 /* LOCALNODE from FORWARD hook is not supported */
1387 if (verdict == NF_ACCEPT && hooknum == NF_INET_FORWARD &&
1388 skb_rtable(skb)->rt_flags & RTCF_LOCAL) {
1389 IP_VS_DBG(1, "%s(): "
1390 "local delivery to %pI4 but in FORWARD\n",
1391 __func__, &skb_rtable(skb)->rt_dst);
1392 verdict = NF_DROP;
1393 }
1394 1386
1395 out: 1387 out:
1396 __ip_vs_conn_put(cp); 1388 __ip_vs_conn_put(cp);
@@ -1412,7 +1404,6 @@ ip_vs_in_icmp_v6(struct sk_buff *skb, int *related, unsigned int hooknum)
1412 struct ip_vs_protocol *pp; 1404 struct ip_vs_protocol *pp;
1413 struct ip_vs_proto_data *pd; 1405 struct ip_vs_proto_data *pd;
1414 unsigned int offset, verdict; 1406 unsigned int offset, verdict;
1415 struct rt6_info *rt;
1416 1407
1417 *related = 1; 1408 *related = 1;
1418 1409
@@ -1474,23 +1465,12 @@ ip_vs_in_icmp_v6(struct sk_buff *skb, int *related, unsigned int hooknum)
1474 if (!cp) 1465 if (!cp)
1475 return NF_ACCEPT; 1466 return NF_ACCEPT;
1476 1467
1477 verdict = NF_DROP;
1478
1479 /* do the statistics and put it back */ 1468 /* do the statistics and put it back */
1480 ip_vs_in_stats(cp, skb); 1469 ip_vs_in_stats(cp, skb);
1481 if (IPPROTO_TCP == cih->nexthdr || IPPROTO_UDP == cih->nexthdr || 1470 if (IPPROTO_TCP == cih->nexthdr || IPPROTO_UDP == cih->nexthdr ||
1482 IPPROTO_SCTP == cih->nexthdr) 1471 IPPROTO_SCTP == cih->nexthdr)
1483 offset += 2 * sizeof(__u16); 1472 offset += 2 * sizeof(__u16);
1484 verdict = ip_vs_icmp_xmit_v6(skb, cp, pp, offset); 1473 verdict = ip_vs_icmp_xmit_v6(skb, cp, pp, offset, hooknum);
1485 /* LOCALNODE from FORWARD hook is not supported */
1486 if (verdict == NF_ACCEPT && hooknum == NF_INET_FORWARD &&
1487 (rt = (struct rt6_info *) skb_dst(skb)) &&
1488 rt->rt6i_dev && rt->rt6i_dev->flags & IFF_LOOPBACK) {
1489 IP_VS_DBG(1, "%s(): "
1490 "local delivery to %pI6 but in FORWARD\n",
1491 __func__, &rt->rt6i_dst);
1492 verdict = NF_DROP;
1493 }
1494 1474
1495 __ip_vs_conn_put(cp); 1475 __ip_vs_conn_put(cp);
1496 1476
@@ -1792,7 +1772,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
1792 .owner = THIS_MODULE, 1772 .owner = THIS_MODULE,
1793 .pf = PF_INET, 1773 .pf = PF_INET,
1794 .hooknum = NF_INET_LOCAL_IN, 1774 .hooknum = NF_INET_LOCAL_IN,
1795 .priority = 99, 1775 .priority = NF_IP_PRI_NAT_SRC - 2,
1796 }, 1776 },
1797 /* After packet filtering, forward packet through VS/DR, VS/TUN, 1777 /* After packet filtering, forward packet through VS/DR, VS/TUN,
1798 * or VS/NAT(change destination), so that filtering rules can be 1778 * or VS/NAT(change destination), so that filtering rules can be
@@ -1802,7 +1782,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
1802 .owner = THIS_MODULE, 1782 .owner = THIS_MODULE,
1803 .pf = PF_INET, 1783 .pf = PF_INET,
1804 .hooknum = NF_INET_LOCAL_IN, 1784 .hooknum = NF_INET_LOCAL_IN,
1805 .priority = 101, 1785 .priority = NF_IP_PRI_NAT_SRC - 1,
1806 }, 1786 },
1807 /* Before ip_vs_in, change source only for VS/NAT */ 1787 /* Before ip_vs_in, change source only for VS/NAT */
1808 { 1788 {
@@ -1810,7 +1790,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
1810 .owner = THIS_MODULE, 1790 .owner = THIS_MODULE,
1811 .pf = PF_INET, 1791 .pf = PF_INET,
1812 .hooknum = NF_INET_LOCAL_OUT, 1792 .hooknum = NF_INET_LOCAL_OUT,
1813 .priority = -99, 1793 .priority = NF_IP_PRI_NAT_DST + 1,
1814 }, 1794 },
1815 /* After mangle, schedule and forward local requests */ 1795 /* After mangle, schedule and forward local requests */
1816 { 1796 {
@@ -1818,7 +1798,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
1818 .owner = THIS_MODULE, 1798 .owner = THIS_MODULE,
1819 .pf = PF_INET, 1799 .pf = PF_INET,
1820 .hooknum = NF_INET_LOCAL_OUT, 1800 .hooknum = NF_INET_LOCAL_OUT,
1821 .priority = -98, 1801 .priority = NF_IP_PRI_NAT_DST + 2,
1822 }, 1802 },
1823 /* After packet filtering (but before ip_vs_out_icmp), catch icmp 1803 /* After packet filtering (but before ip_vs_out_icmp), catch icmp
1824 * destined for 0.0.0.0/0, which is for incoming IPVS connections */ 1804 * destined for 0.0.0.0/0, which is for incoming IPVS connections */
@@ -1844,7 +1824,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
1844 .owner = THIS_MODULE, 1824 .owner = THIS_MODULE,
1845 .pf = PF_INET6, 1825 .pf = PF_INET6,
1846 .hooknum = NF_INET_LOCAL_IN, 1826 .hooknum = NF_INET_LOCAL_IN,
1847 .priority = 99, 1827 .priority = NF_IP6_PRI_NAT_SRC - 2,
1848 }, 1828 },
1849 /* After packet filtering, forward packet through VS/DR, VS/TUN, 1829 /* After packet filtering, forward packet through VS/DR, VS/TUN,
1850 * or VS/NAT(change destination), so that filtering rules can be 1830 * or VS/NAT(change destination), so that filtering rules can be
@@ -1854,7 +1834,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
1854 .owner = THIS_MODULE, 1834 .owner = THIS_MODULE,
1855 .pf = PF_INET6, 1835 .pf = PF_INET6,
1856 .hooknum = NF_INET_LOCAL_IN, 1836 .hooknum = NF_INET_LOCAL_IN,
1857 .priority = 101, 1837 .priority = NF_IP6_PRI_NAT_SRC - 1,
1858 }, 1838 },
1859 /* Before ip_vs_in, change source only for VS/NAT */ 1839 /* Before ip_vs_in, change source only for VS/NAT */
1860 { 1840 {
@@ -1862,7 +1842,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
1862 .owner = THIS_MODULE, 1842 .owner = THIS_MODULE,
1863 .pf = PF_INET, 1843 .pf = PF_INET,
1864 .hooknum = NF_INET_LOCAL_OUT, 1844 .hooknum = NF_INET_LOCAL_OUT,
1865 .priority = -99, 1845 .priority = NF_IP6_PRI_NAT_DST + 1,
1866 }, 1846 },
1867 /* After mangle, schedule and forward local requests */ 1847 /* After mangle, schedule and forward local requests */
1868 { 1848 {
@@ -1870,7 +1850,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
1870 .owner = THIS_MODULE, 1850 .owner = THIS_MODULE,
1871 .pf = PF_INET6, 1851 .pf = PF_INET6,
1872 .hooknum = NF_INET_LOCAL_OUT, 1852 .hooknum = NF_INET_LOCAL_OUT,
1873 .priority = -98, 1853 .priority = NF_IP6_PRI_NAT_DST + 2,
1874 }, 1854 },
1875 /* After packet filtering (but before ip_vs_out_icmp), catch icmp 1855 /* After packet filtering (but before ip_vs_out_icmp), catch icmp
1876 * destined for 0.0.0.0/0, which is for incoming IPVS connections */ 1856 * destined for 0.0.0.0/0, which is for incoming IPVS connections */
@@ -1965,6 +1945,7 @@ static void __net_exit __ip_vs_dev_cleanup(struct net *net)
1965{ 1945{
1966 EnterFunction(2); 1946 EnterFunction(2);
1967 net_ipvs(net)->enable = 0; /* Disable packet reception */ 1947 net_ipvs(net)->enable = 0; /* Disable packet reception */
1948 smp_wmb();
1968 __ip_vs_sync_cleanup(net); 1949 __ip_vs_sync_cleanup(net);
1969 LeaveFunction(2); 1950 LeaveFunction(2);
1970} 1951}
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 37890f228b19..699c79a55657 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -2069,9 +2069,6 @@ static const struct file_operations ip_vs_info_fops = {
2069 .release = seq_release_net, 2069 .release = seq_release_net,
2070}; 2070};
2071 2071
2072#endif
2073
2074#ifdef CONFIG_PROC_FS
2075static int ip_vs_stats_show(struct seq_file *seq, void *v) 2072static int ip_vs_stats_show(struct seq_file *seq, void *v)
2076{ 2073{
2077 struct net *net = seq_file_single_net(seq); 2074 struct net *net = seq_file_single_net(seq);
diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c
index 6b5dd6ddaae9..af63553fa332 100644
--- a/net/netfilter/ipvs/ip_vs_ftp.c
+++ b/net/netfilter/ipvs/ip_vs_ftp.c
@@ -411,25 +411,35 @@ static struct ip_vs_app ip_vs_ftp = {
411static int __net_init __ip_vs_ftp_init(struct net *net) 411static int __net_init __ip_vs_ftp_init(struct net *net)
412{ 412{
413 int i, ret; 413 int i, ret;
414 struct ip_vs_app *app = &ip_vs_ftp; 414 struct ip_vs_app *app;
415 struct netns_ipvs *ipvs = net_ipvs(net);
416
417 app = kmemdup(&ip_vs_ftp, sizeof(struct ip_vs_app), GFP_KERNEL);
418 if (!app)
419 return -ENOMEM;
420 INIT_LIST_HEAD(&app->a_list);
421 INIT_LIST_HEAD(&app->incs_list);
422 ipvs->ftp_app = app;
415 423
416 ret = register_ip_vs_app(net, app); 424 ret = register_ip_vs_app(net, app);
417 if (ret) 425 if (ret)
418 return ret; 426 goto err_exit;
419 427
420 for (i=0; i<IP_VS_APP_MAX_PORTS; i++) { 428 for (i=0; i<IP_VS_APP_MAX_PORTS; i++) {
421 if (!ports[i]) 429 if (!ports[i])
422 continue; 430 continue;
423 ret = register_ip_vs_app_inc(net, app, app->protocol, ports[i]); 431 ret = register_ip_vs_app_inc(net, app, app->protocol, ports[i]);
424 if (ret) 432 if (ret)
425 break; 433 goto err_unreg;
426 pr_info("%s: loaded support on port[%d] = %d\n", 434 pr_info("%s: loaded support on port[%d] = %d\n",
427 app->name, i, ports[i]); 435 app->name, i, ports[i]);
428 } 436 }
437 return 0;
429 438
430 if (ret) 439err_unreg:
431 unregister_ip_vs_app(net, app); 440 unregister_ip_vs_app(net, app);
432 441err_exit:
442 kfree(ipvs->ftp_app);
433 return ret; 443 return ret;
434} 444}
435/* 445/*
@@ -437,9 +447,10 @@ static int __net_init __ip_vs_ftp_init(struct net *net)
437 */ 447 */
438static void __ip_vs_ftp_exit(struct net *net) 448static void __ip_vs_ftp_exit(struct net *net)
439{ 449{
440 struct ip_vs_app *app = &ip_vs_ftp; 450 struct netns_ipvs *ipvs = net_ipvs(net);
441 451
442 unregister_ip_vs_app(net, app); 452 unregister_ip_vs_app(net, ipvs->ftp_app);
453 kfree(ipvs->ftp_app);
443} 454}
444 455
445static struct pernet_operations ip_vs_ftp_ops = { 456static struct pernet_operations ip_vs_ftp_ops = {
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index 6132b213eddc..ee319a4338b0 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -87,7 +87,7 @@ __ip_vs_dst_check(struct ip_vs_dest *dest, u32 rtos)
87/* Get route to destination or remote server */ 87/* Get route to destination or remote server */
88static struct rtable * 88static struct rtable *
89__ip_vs_get_out_rt(struct sk_buff *skb, struct ip_vs_dest *dest, 89__ip_vs_get_out_rt(struct sk_buff *skb, struct ip_vs_dest *dest,
90 __be32 daddr, u32 rtos, int rt_mode) 90 __be32 daddr, u32 rtos, int rt_mode, __be32 *ret_saddr)
91{ 91{
92 struct net *net = dev_net(skb_dst(skb)->dev); 92 struct net *net = dev_net(skb_dst(skb)->dev);
93 struct rtable *rt; /* Route to the other host */ 93 struct rtable *rt; /* Route to the other host */
@@ -98,7 +98,12 @@ __ip_vs_get_out_rt(struct sk_buff *skb, struct ip_vs_dest *dest,
98 spin_lock(&dest->dst_lock); 98 spin_lock(&dest->dst_lock);
99 if (!(rt = (struct rtable *) 99 if (!(rt = (struct rtable *)
100 __ip_vs_dst_check(dest, rtos))) { 100 __ip_vs_dst_check(dest, rtos))) {
101 rt = ip_route_output(net, dest->addr.ip, 0, rtos, 0); 101 struct flowi4 fl4;
102
103 memset(&fl4, 0, sizeof(fl4));
104 fl4.daddr = dest->addr.ip;
105 fl4.flowi4_tos = rtos;
106 rt = ip_route_output_key(net, &fl4);
102 if (IS_ERR(rt)) { 107 if (IS_ERR(rt)) {
103 spin_unlock(&dest->dst_lock); 108 spin_unlock(&dest->dst_lock);
104 IP_VS_DBG_RL("ip_route_output error, dest: %pI4\n", 109 IP_VS_DBG_RL("ip_route_output error, dest: %pI4\n",
@@ -106,18 +111,30 @@ __ip_vs_get_out_rt(struct sk_buff *skb, struct ip_vs_dest *dest,
106 return NULL; 111 return NULL;
107 } 112 }
108 __ip_vs_dst_set(dest, rtos, dst_clone(&rt->dst), 0); 113 __ip_vs_dst_set(dest, rtos, dst_clone(&rt->dst), 0);
109 IP_VS_DBG(10, "new dst %pI4, refcnt=%d, rtos=%X\n", 114 dest->dst_saddr.ip = fl4.saddr;
110 &dest->addr.ip, 115 IP_VS_DBG(10, "new dst %pI4, src %pI4, refcnt=%d, "
116 "rtos=%X\n",
117 &dest->addr.ip, &dest->dst_saddr.ip,
111 atomic_read(&rt->dst.__refcnt), rtos); 118 atomic_read(&rt->dst.__refcnt), rtos);
112 } 119 }
120 daddr = dest->addr.ip;
121 if (ret_saddr)
122 *ret_saddr = dest->dst_saddr.ip;
113 spin_unlock(&dest->dst_lock); 123 spin_unlock(&dest->dst_lock);
114 } else { 124 } else {
115 rt = ip_route_output(net, daddr, 0, rtos, 0); 125 struct flowi4 fl4;
126
127 memset(&fl4, 0, sizeof(fl4));
128 fl4.daddr = daddr;
129 fl4.flowi4_tos = rtos;
130 rt = ip_route_output_key(net, &fl4);
116 if (IS_ERR(rt)) { 131 if (IS_ERR(rt)) {
117 IP_VS_DBG_RL("ip_route_output error, dest: %pI4\n", 132 IP_VS_DBG_RL("ip_route_output error, dest: %pI4\n",
118 &daddr); 133 &daddr);
119 return NULL; 134 return NULL;
120 } 135 }
136 if (ret_saddr)
137 *ret_saddr = fl4.saddr;
121 } 138 }
122 139
123 local = rt->rt_flags & RTCF_LOCAL; 140 local = rt->rt_flags & RTCF_LOCAL;
@@ -125,7 +142,7 @@ __ip_vs_get_out_rt(struct sk_buff *skb, struct ip_vs_dest *dest,
125 rt_mode)) { 142 rt_mode)) {
126 IP_VS_DBG_RL("Stopping traffic to %s address, dest: %pI4\n", 143 IP_VS_DBG_RL("Stopping traffic to %s address, dest: %pI4\n",
127 (rt->rt_flags & RTCF_LOCAL) ? 144 (rt->rt_flags & RTCF_LOCAL) ?
128 "local":"non-local", &rt->rt_dst); 145 "local":"non-local", &daddr);
129 ip_rt_put(rt); 146 ip_rt_put(rt);
130 return NULL; 147 return NULL;
131 } 148 }
@@ -133,14 +150,14 @@ __ip_vs_get_out_rt(struct sk_buff *skb, struct ip_vs_dest *dest,
133 !((ort = skb_rtable(skb)) && ort->rt_flags & RTCF_LOCAL)) { 150 !((ort = skb_rtable(skb)) && ort->rt_flags & RTCF_LOCAL)) {
134 IP_VS_DBG_RL("Redirect from non-local address %pI4 to local " 151 IP_VS_DBG_RL("Redirect from non-local address %pI4 to local "
135 "requires NAT method, dest: %pI4\n", 152 "requires NAT method, dest: %pI4\n",
136 &ip_hdr(skb)->daddr, &rt->rt_dst); 153 &ip_hdr(skb)->daddr, &daddr);
137 ip_rt_put(rt); 154 ip_rt_put(rt);
138 return NULL; 155 return NULL;
139 } 156 }
140 if (unlikely(!local && ipv4_is_loopback(ip_hdr(skb)->saddr))) { 157 if (unlikely(!local && ipv4_is_loopback(ip_hdr(skb)->saddr))) {
141 IP_VS_DBG_RL("Stopping traffic from loopback address %pI4 " 158 IP_VS_DBG_RL("Stopping traffic from loopback address %pI4 "
142 "to non-local address, dest: %pI4\n", 159 "to non-local address, dest: %pI4\n",
143 &ip_hdr(skb)->saddr, &rt->rt_dst); 160 &ip_hdr(skb)->saddr, &daddr);
144 ip_rt_put(rt); 161 ip_rt_put(rt);
145 return NULL; 162 return NULL;
146 } 163 }
@@ -229,8 +246,6 @@ out_err:
229 246
230/* 247/*
231 * Get route to destination or remote server 248 * Get route to destination or remote server
232 * rt_mode: flags, &1=Allow local dest, &2=Allow non-local dest,
233 * &4=Allow redirect from remote daddr to local
234 */ 249 */
235static struct rt6_info * 250static struct rt6_info *
236__ip_vs_get_out_rt_v6(struct sk_buff *skb, struct ip_vs_dest *dest, 251__ip_vs_get_out_rt_v6(struct sk_buff *skb, struct ip_vs_dest *dest,
@@ -250,7 +265,7 @@ __ip_vs_get_out_rt_v6(struct sk_buff *skb, struct ip_vs_dest *dest,
250 u32 cookie; 265 u32 cookie;
251 266
252 dst = __ip_vs_route_output_v6(net, &dest->addr.in6, 267 dst = __ip_vs_route_output_v6(net, &dest->addr.in6,
253 &dest->dst_saddr, 268 &dest->dst_saddr.in6,
254 do_xfrm); 269 do_xfrm);
255 if (!dst) { 270 if (!dst) {
256 spin_unlock(&dest->dst_lock); 271 spin_unlock(&dest->dst_lock);
@@ -260,11 +275,11 @@ __ip_vs_get_out_rt_v6(struct sk_buff *skb, struct ip_vs_dest *dest,
260 cookie = rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0; 275 cookie = rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0;
261 __ip_vs_dst_set(dest, 0, dst_clone(&rt->dst), cookie); 276 __ip_vs_dst_set(dest, 0, dst_clone(&rt->dst), cookie);
262 IP_VS_DBG(10, "new dst %pI6, src %pI6, refcnt=%d\n", 277 IP_VS_DBG(10, "new dst %pI6, src %pI6, refcnt=%d\n",
263 &dest->addr.in6, &dest->dst_saddr, 278 &dest->addr.in6, &dest->dst_saddr.in6,
264 atomic_read(&rt->dst.__refcnt)); 279 atomic_read(&rt->dst.__refcnt));
265 } 280 }
266 if (ret_saddr) 281 if (ret_saddr)
267 ipv6_addr_copy(ret_saddr, &dest->dst_saddr); 282 ipv6_addr_copy(ret_saddr, &dest->dst_saddr.in6);
268 spin_unlock(&dest->dst_lock); 283 spin_unlock(&dest->dst_lock);
269 } else { 284 } else {
270 dst = __ip_vs_route_output_v6(net, daddr, ret_saddr, do_xfrm); 285 dst = __ip_vs_route_output_v6(net, daddr, ret_saddr, do_xfrm);
@@ -274,13 +289,14 @@ __ip_vs_get_out_rt_v6(struct sk_buff *skb, struct ip_vs_dest *dest,
274 } 289 }
275 290
276 local = __ip_vs_is_local_route6(rt); 291 local = __ip_vs_is_local_route6(rt);
277 if (!((local ? 1 : 2) & rt_mode)) { 292 if (!((local ? IP_VS_RT_MODE_LOCAL : IP_VS_RT_MODE_NON_LOCAL) &
293 rt_mode)) {
278 IP_VS_DBG_RL("Stopping traffic to %s address, dest: %pI6\n", 294 IP_VS_DBG_RL("Stopping traffic to %s address, dest: %pI6\n",
279 local ? "local":"non-local", daddr); 295 local ? "local":"non-local", daddr);
280 dst_release(&rt->dst); 296 dst_release(&rt->dst);
281 return NULL; 297 return NULL;
282 } 298 }
283 if (local && !(rt_mode & 4) && 299 if (local && !(rt_mode & IP_VS_RT_MODE_RDR) &&
284 !((ort = (struct rt6_info *) skb_dst(skb)) && 300 !((ort = (struct rt6_info *) skb_dst(skb)) &&
285 __ip_vs_is_local_route6(ort))) { 301 __ip_vs_is_local_route6(ort))) {
286 IP_VS_DBG_RL("Redirect from non-local address %pI6 to local " 302 IP_VS_DBG_RL("Redirect from non-local address %pI6 to local "
@@ -386,7 +402,7 @@ ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
386 EnterFunction(10); 402 EnterFunction(10);
387 403
388 if (!(rt = __ip_vs_get_out_rt(skb, NULL, iph->daddr, RT_TOS(iph->tos), 404 if (!(rt = __ip_vs_get_out_rt(skb, NULL, iph->daddr, RT_TOS(iph->tos),
389 IP_VS_RT_MODE_NON_LOCAL))) 405 IP_VS_RT_MODE_NON_LOCAL, NULL)))
390 goto tx_error_icmp; 406 goto tx_error_icmp;
391 407
392 /* MTU checking */ 408 /* MTU checking */
@@ -440,7 +456,8 @@ ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
440 456
441 EnterFunction(10); 457 EnterFunction(10);
442 458
443 if (!(rt = __ip_vs_get_out_rt_v6(skb, NULL, &iph->daddr, NULL, 0, 2))) 459 if (!(rt = __ip_vs_get_out_rt_v6(skb, NULL, &iph->daddr, NULL, 0,
460 IP_VS_RT_MODE_NON_LOCAL)))
444 goto tx_error_icmp; 461 goto tx_error_icmp;
445 462
446 /* MTU checking */ 463 /* MTU checking */
@@ -517,7 +534,7 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
517 RT_TOS(iph->tos), 534 RT_TOS(iph->tos),
518 IP_VS_RT_MODE_LOCAL | 535 IP_VS_RT_MODE_LOCAL |
519 IP_VS_RT_MODE_NON_LOCAL | 536 IP_VS_RT_MODE_NON_LOCAL |
520 IP_VS_RT_MODE_RDR))) 537 IP_VS_RT_MODE_RDR, NULL)))
521 goto tx_error_icmp; 538 goto tx_error_icmp;
522 local = rt->rt_flags & RTCF_LOCAL; 539 local = rt->rt_flags & RTCF_LOCAL;
523 /* 540 /*
@@ -539,7 +556,7 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
539#endif 556#endif
540 557
541 /* From world but DNAT to loopback address? */ 558 /* From world but DNAT to loopback address? */
542 if (local && ipv4_is_loopback(rt->rt_dst) && 559 if (local && ipv4_is_loopback(cp->daddr.ip) &&
543 rt_is_input_route(skb_rtable(skb))) { 560 rt_is_input_route(skb_rtable(skb))) {
544 IP_VS_DBG_RL_PKT(1, AF_INET, pp, skb, 0, "ip_vs_nat_xmit(): " 561 IP_VS_DBG_RL_PKT(1, AF_INET, pp, skb, 0, "ip_vs_nat_xmit(): "
545 "stopping DNAT to loopback address"); 562 "stopping DNAT to loopback address");
@@ -632,7 +649,9 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
632 } 649 }
633 650
634 if (!(rt = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6, NULL, 651 if (!(rt = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6, NULL,
635 0, 1|2|4))) 652 0, (IP_VS_RT_MODE_LOCAL |
653 IP_VS_RT_MODE_NON_LOCAL |
654 IP_VS_RT_MODE_RDR))))
636 goto tx_error_icmp; 655 goto tx_error_icmp;
637 local = __ip_vs_is_local_route6(rt); 656 local = __ip_vs_is_local_route6(rt);
638 /* 657 /*
@@ -748,6 +767,7 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
748 struct ip_vs_protocol *pp) 767 struct ip_vs_protocol *pp)
749{ 768{
750 struct rtable *rt; /* Route to the other host */ 769 struct rtable *rt; /* Route to the other host */
770 __be32 saddr; /* Source for tunnel */
751 struct net_device *tdev; /* Device to other host */ 771 struct net_device *tdev; /* Device to other host */
752 struct iphdr *old_iph = ip_hdr(skb); 772 struct iphdr *old_iph = ip_hdr(skb);
753 u8 tos = old_iph->tos; 773 u8 tos = old_iph->tos;
@@ -761,7 +781,8 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
761 781
762 if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip, 782 if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip,
763 RT_TOS(tos), IP_VS_RT_MODE_LOCAL | 783 RT_TOS(tos), IP_VS_RT_MODE_LOCAL |
764 IP_VS_RT_MODE_NON_LOCAL))) 784 IP_VS_RT_MODE_NON_LOCAL,
785 &saddr)))
765 goto tx_error_icmp; 786 goto tx_error_icmp;
766 if (rt->rt_flags & RTCF_LOCAL) { 787 if (rt->rt_flags & RTCF_LOCAL) {
767 ip_rt_put(rt); 788 ip_rt_put(rt);
@@ -829,8 +850,8 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
829 iph->frag_off = df; 850 iph->frag_off = df;
830 iph->protocol = IPPROTO_IPIP; 851 iph->protocol = IPPROTO_IPIP;
831 iph->tos = tos; 852 iph->tos = tos;
832 iph->daddr = rt->rt_dst; 853 iph->daddr = cp->daddr.ip;
833 iph->saddr = rt->rt_src; 854 iph->saddr = saddr;
834 iph->ttl = old_iph->ttl; 855 iph->ttl = old_iph->ttl;
835 ip_select_ident(iph, &rt->dst, NULL); 856 ip_select_ident(iph, &rt->dst, NULL);
836 857
@@ -875,7 +896,8 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
875 EnterFunction(10); 896 EnterFunction(10);
876 897
877 if (!(rt = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6, 898 if (!(rt = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6,
878 &saddr, 1, 1|2))) 899 &saddr, 1, (IP_VS_RT_MODE_LOCAL |
900 IP_VS_RT_MODE_NON_LOCAL))))
879 goto tx_error_icmp; 901 goto tx_error_icmp;
880 if (__ip_vs_is_local_route6(rt)) { 902 if (__ip_vs_is_local_route6(rt)) {
881 dst_release(&rt->dst); 903 dst_release(&rt->dst);
@@ -992,7 +1014,7 @@ ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
992 if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip, 1014 if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip,
993 RT_TOS(iph->tos), 1015 RT_TOS(iph->tos),
994 IP_VS_RT_MODE_LOCAL | 1016 IP_VS_RT_MODE_LOCAL |
995 IP_VS_RT_MODE_NON_LOCAL))) 1017 IP_VS_RT_MODE_NON_LOCAL, NULL)))
996 goto tx_error_icmp; 1018 goto tx_error_icmp;
997 if (rt->rt_flags & RTCF_LOCAL) { 1019 if (rt->rt_flags & RTCF_LOCAL) {
998 ip_rt_put(rt); 1020 ip_rt_put(rt);
@@ -1050,7 +1072,8 @@ ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
1050 EnterFunction(10); 1072 EnterFunction(10);
1051 1073
1052 if (!(rt = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6, NULL, 1074 if (!(rt = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6, NULL,
1053 0, 1|2))) 1075 0, (IP_VS_RT_MODE_LOCAL |
1076 IP_VS_RT_MODE_NON_LOCAL))))
1054 goto tx_error_icmp; 1077 goto tx_error_icmp;
1055 if (__ip_vs_is_local_route6(rt)) { 1078 if (__ip_vs_is_local_route6(rt)) {
1056 dst_release(&rt->dst); 1079 dst_release(&rt->dst);
@@ -1109,12 +1132,13 @@ tx_error:
1109 */ 1132 */
1110int 1133int
1111ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, 1134ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
1112 struct ip_vs_protocol *pp, int offset) 1135 struct ip_vs_protocol *pp, int offset, unsigned int hooknum)
1113{ 1136{
1114 struct rtable *rt; /* Route to the other host */ 1137 struct rtable *rt; /* Route to the other host */
1115 int mtu; 1138 int mtu;
1116 int rc; 1139 int rc;
1117 int local; 1140 int local;
1141 int rt_mode;
1118 1142
1119 EnterFunction(10); 1143 EnterFunction(10);
1120 1144
@@ -1135,11 +1159,13 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
1135 * mangle and send the packet here (only for VS/NAT) 1159 * mangle and send the packet here (only for VS/NAT)
1136 */ 1160 */
1137 1161
1162 /* LOCALNODE from FORWARD hook is not supported */
1163 rt_mode = (hooknum != NF_INET_FORWARD) ?
1164 IP_VS_RT_MODE_LOCAL | IP_VS_RT_MODE_NON_LOCAL |
1165 IP_VS_RT_MODE_RDR : IP_VS_RT_MODE_NON_LOCAL;
1138 if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip, 1166 if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip,
1139 RT_TOS(ip_hdr(skb)->tos), 1167 RT_TOS(ip_hdr(skb)->tos),
1140 IP_VS_RT_MODE_LOCAL | 1168 rt_mode, NULL)))
1141 IP_VS_RT_MODE_NON_LOCAL |
1142 IP_VS_RT_MODE_RDR)))
1143 goto tx_error_icmp; 1169 goto tx_error_icmp;
1144 local = rt->rt_flags & RTCF_LOCAL; 1170 local = rt->rt_flags & RTCF_LOCAL;
1145 1171
@@ -1162,7 +1188,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
1162#endif 1188#endif
1163 1189
1164 /* From world but DNAT to loopback address? */ 1190 /* From world but DNAT to loopback address? */
1165 if (local && ipv4_is_loopback(rt->rt_dst) && 1191 if (local && ipv4_is_loopback(cp->daddr.ip) &&
1166 rt_is_input_route(skb_rtable(skb))) { 1192 rt_is_input_route(skb_rtable(skb))) {
1167 IP_VS_DBG(1, "%s(): " 1193 IP_VS_DBG(1, "%s(): "
1168 "stopping DNAT to loopback %pI4\n", 1194 "stopping DNAT to loopback %pI4\n",
@@ -1227,12 +1253,13 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
1227#ifdef CONFIG_IP_VS_IPV6 1253#ifdef CONFIG_IP_VS_IPV6
1228int 1254int
1229ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, 1255ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
1230 struct ip_vs_protocol *pp, int offset) 1256 struct ip_vs_protocol *pp, int offset, unsigned int hooknum)
1231{ 1257{
1232 struct rt6_info *rt; /* Route to the other host */ 1258 struct rt6_info *rt; /* Route to the other host */
1233 int mtu; 1259 int mtu;
1234 int rc; 1260 int rc;
1235 int local; 1261 int local;
1262 int rt_mode;
1236 1263
1237 EnterFunction(10); 1264 EnterFunction(10);
1238 1265
@@ -1253,8 +1280,12 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
1253 * mangle and send the packet here (only for VS/NAT) 1280 * mangle and send the packet here (only for VS/NAT)
1254 */ 1281 */
1255 1282
1283 /* LOCALNODE from FORWARD hook is not supported */
1284 rt_mode = (hooknum != NF_INET_FORWARD) ?
1285 IP_VS_RT_MODE_LOCAL | IP_VS_RT_MODE_NON_LOCAL |
1286 IP_VS_RT_MODE_RDR : IP_VS_RT_MODE_NON_LOCAL;
1256 if (!(rt = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6, NULL, 1287 if (!(rt = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6, NULL,
1257 0, 1|2|4))) 1288 0, rt_mode)))
1258 goto tx_error_icmp; 1289 goto tx_error_icmp;
1259 1290
1260 local = __ip_vs_is_local_route6(rt); 1291 local = __ip_vs_is_local_route6(rt);
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 2e1c11f78419..f7af8b866017 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -850,7 +850,7 @@ resolve_normal_ct(struct net *net, struct nf_conn *tmpl,
850 850
851 /* It exists; we have (non-exclusive) reference. */ 851 /* It exists; we have (non-exclusive) reference. */
852 if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) { 852 if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) {
853 *ctinfo = IP_CT_ESTABLISHED + IP_CT_IS_REPLY; 853 *ctinfo = IP_CT_ESTABLISHED_REPLY;
854 /* Please set reply bit if this packet OK */ 854 /* Please set reply bit if this packet OK */
855 *set_reply = 1; 855 *set_reply = 1;
856 } else { 856 } else {
@@ -922,6 +922,9 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
922 ret = -ret; 922 ret = -ret;
923 goto out; 923 goto out;
924 } 924 }
925 /* ICMP[v6] protocol trackers may assign one conntrack. */
926 if (skb->nfct)
927 goto out;
925 } 928 }
926 929
927 ct = resolve_normal_ct(net, tmpl, skb, dataoff, pf, protonum, 930 ct = resolve_normal_ct(net, tmpl, skb, dataoff, pf, protonum,
@@ -1143,7 +1146,7 @@ static void nf_conntrack_attach(struct sk_buff *nskb, struct sk_buff *skb)
1143 /* This ICMP is in reverse direction to the packet which caused it */ 1146 /* This ICMP is in reverse direction to the packet which caused it */
1144 ct = nf_ct_get(skb, &ctinfo); 1147 ct = nf_ct_get(skb, &ctinfo);
1145 if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) 1148 if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL)
1146 ctinfo = IP_CT_RELATED + IP_CT_IS_REPLY; 1149 ctinfo = IP_CT_RELATED_REPLY;
1147 else 1150 else
1148 ctinfo = IP_CT_RELATED; 1151 ctinfo = IP_CT_RELATED;
1149 1152
diff --git a/net/netfilter/nf_conntrack_extend.c b/net/netfilter/nf_conntrack_extend.c
index 80a23ed62bb0..05ecdc281a53 100644
--- a/net/netfilter/nf_conntrack_extend.c
+++ b/net/netfilter/nf_conntrack_extend.c
@@ -68,12 +68,6 @@ nf_ct_ext_create(struct nf_ct_ext **ext, enum nf_ct_ext_id id, gfp_t gfp)
68 return (void *)(*ext) + off; 68 return (void *)(*ext) + off;
69} 69}
70 70
71static void __nf_ct_ext_free_rcu(struct rcu_head *head)
72{
73 struct nf_ct_ext *ext = container_of(head, struct nf_ct_ext, rcu);
74 kfree(ext);
75}
76
77void *__nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp) 71void *__nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp)
78{ 72{
79 struct nf_ct_ext *old, *new; 73 struct nf_ct_ext *old, *new;
@@ -114,7 +108,7 @@ void *__nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp)
114 (void *)old + old->offset[i]); 108 (void *)old + old->offset[i]);
115 rcu_read_unlock(); 109 rcu_read_unlock();
116 } 110 }
117 call_rcu(&old->rcu, __nf_ct_ext_free_rcu); 111 kfree_rcu(old, rcu);
118 ct->ext = new; 112 ct->ext = new;
119 } 113 }
120 114
diff --git a/net/netfilter/nf_conntrack_ftp.c b/net/netfilter/nf_conntrack_ftp.c
index e17cb7c7dd8f..6f5801eac999 100644
--- a/net/netfilter/nf_conntrack_ftp.c
+++ b/net/netfilter/nf_conntrack_ftp.c
@@ -368,7 +368,7 @@ static int help(struct sk_buff *skb,
368 368
369 /* Until there's been traffic both ways, don't look in packets. */ 369 /* Until there's been traffic both ways, don't look in packets. */
370 if (ctinfo != IP_CT_ESTABLISHED && 370 if (ctinfo != IP_CT_ESTABLISHED &&
371 ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY) { 371 ctinfo != IP_CT_ESTABLISHED_REPLY) {
372 pr_debug("ftp: Conntrackinfo = %u\n", ctinfo); 372 pr_debug("ftp: Conntrackinfo = %u\n", ctinfo);
373 return NF_ACCEPT; 373 return NF_ACCEPT;
374 } 374 }
diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c
index 18b2ce5c8ced..f03c2d4539f6 100644
--- a/net/netfilter/nf_conntrack_h323_main.c
+++ b/net/netfilter/nf_conntrack_h323_main.c
@@ -571,10 +571,9 @@ static int h245_help(struct sk_buff *skb, unsigned int protoff,
571 int ret; 571 int ret;
572 572
573 /* Until there's been traffic both ways, don't look in packets. */ 573 /* Until there's been traffic both ways, don't look in packets. */
574 if (ctinfo != IP_CT_ESTABLISHED && 574 if (ctinfo != IP_CT_ESTABLISHED && ctinfo != IP_CT_ESTABLISHED_REPLY)
575 ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY) {
576 return NF_ACCEPT; 575 return NF_ACCEPT;
577 } 576
578 pr_debug("nf_ct_h245: skblen = %u\n", skb->len); 577 pr_debug("nf_ct_h245: skblen = %u\n", skb->len);
579 578
580 spin_lock_bh(&nf_h323_lock); 579 spin_lock_bh(&nf_h323_lock);
@@ -1125,10 +1124,9 @@ static int q931_help(struct sk_buff *skb, unsigned int protoff,
1125 int ret; 1124 int ret;
1126 1125
1127 /* Until there's been traffic both ways, don't look in packets. */ 1126 /* Until there's been traffic both ways, don't look in packets. */
1128 if (ctinfo != IP_CT_ESTABLISHED && 1127 if (ctinfo != IP_CT_ESTABLISHED && ctinfo != IP_CT_ESTABLISHED_REPLY)
1129 ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY) {
1130 return NF_ACCEPT; 1128 return NF_ACCEPT;
1131 } 1129
1132 pr_debug("nf_ct_q931: skblen = %u\n", skb->len); 1130 pr_debug("nf_ct_q931: skblen = %u\n", skb->len);
1133 1131
1134 spin_lock_bh(&nf_h323_lock); 1132 spin_lock_bh(&nf_h323_lock);
diff --git a/net/netfilter/nf_conntrack_irc.c b/net/netfilter/nf_conntrack_irc.c
index b394aa318776..4f9390b98697 100644
--- a/net/netfilter/nf_conntrack_irc.c
+++ b/net/netfilter/nf_conntrack_irc.c
@@ -125,8 +125,7 @@ static int help(struct sk_buff *skb, unsigned int protoff,
125 return NF_ACCEPT; 125 return NF_ACCEPT;
126 126
127 /* Until there's been traffic both ways, don't look in packets. */ 127 /* Until there's been traffic both ways, don't look in packets. */
128 if (ctinfo != IP_CT_ESTABLISHED && 128 if (ctinfo != IP_CT_ESTABLISHED && ctinfo != IP_CT_ESTABLISHED_REPLY)
129 ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY)
130 return NF_ACCEPT; 129 return NF_ACCEPT;
131 130
132 /* Not a full tcp header? */ 131 /* Not a full tcp header? */
diff --git a/net/netfilter/nf_conntrack_pptp.c b/net/netfilter/nf_conntrack_pptp.c
index 088944824e13..2fd4565144de 100644
--- a/net/netfilter/nf_conntrack_pptp.c
+++ b/net/netfilter/nf_conntrack_pptp.c
@@ -519,8 +519,7 @@ conntrack_pptp_help(struct sk_buff *skb, unsigned int protoff,
519 u_int16_t msg; 519 u_int16_t msg;
520 520
521 /* don't do any tracking before tcp handshake complete */ 521 /* don't do any tracking before tcp handshake complete */
522 if (ctinfo != IP_CT_ESTABLISHED && 522 if (ctinfo != IP_CT_ESTABLISHED && ctinfo != IP_CT_ESTABLISHED_REPLY)
523 ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY)
524 return NF_ACCEPT; 523 return NF_ACCEPT;
525 524
526 nexthdr_off = protoff; 525 nexthdr_off = protoff;
diff --git a/net/netfilter/nf_conntrack_sane.c b/net/netfilter/nf_conntrack_sane.c
index d9e27734b2a2..8501823b3f9b 100644
--- a/net/netfilter/nf_conntrack_sane.c
+++ b/net/netfilter/nf_conntrack_sane.c
@@ -78,7 +78,7 @@ static int help(struct sk_buff *skb,
78 ct_sane_info = &nfct_help(ct)->help.ct_sane_info; 78 ct_sane_info = &nfct_help(ct)->help.ct_sane_info;
79 /* Until there's been traffic both ways, don't look in packets. */ 79 /* Until there's been traffic both ways, don't look in packets. */
80 if (ctinfo != IP_CT_ESTABLISHED && 80 if (ctinfo != IP_CT_ESTABLISHED &&
81 ctinfo != IP_CT_ESTABLISHED+IP_CT_IS_REPLY) 81 ctinfo != IP_CT_ESTABLISHED_REPLY)
82 return NF_ACCEPT; 82 return NF_ACCEPT;
83 83
84 /* Not a full tcp header? */ 84 /* Not a full tcp header? */
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
index 237cc1981b89..93faf6a3a637 100644
--- a/net/netfilter/nf_conntrack_sip.c
+++ b/net/netfilter/nf_conntrack_sip.c
@@ -1419,10 +1419,11 @@ static int sip_help_tcp(struct sk_buff *skb, unsigned int protoff,
1419 const char *dptr, *end; 1419 const char *dptr, *end;
1420 s16 diff, tdiff = 0; 1420 s16 diff, tdiff = 0;
1421 int ret = NF_ACCEPT; 1421 int ret = NF_ACCEPT;
1422 bool term;
1422 typeof(nf_nat_sip_seq_adjust_hook) nf_nat_sip_seq_adjust; 1423 typeof(nf_nat_sip_seq_adjust_hook) nf_nat_sip_seq_adjust;
1423 1424
1424 if (ctinfo != IP_CT_ESTABLISHED && 1425 if (ctinfo != IP_CT_ESTABLISHED &&
1425 ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY) 1426 ctinfo != IP_CT_ESTABLISHED_REPLY)
1426 return NF_ACCEPT; 1427 return NF_ACCEPT;
1427 1428
1428 /* No Data ? */ 1429 /* No Data ? */
@@ -1453,14 +1454,21 @@ static int sip_help_tcp(struct sk_buff *skb, unsigned int protoff,
1453 if (dptr + matchoff == end) 1454 if (dptr + matchoff == end)
1454 break; 1455 break;
1455 1456
1456 if (end + strlen("\r\n\r\n") > dptr + datalen) 1457 term = false;
1457 break; 1458 for (; end + strlen("\r\n\r\n") <= dptr + datalen; end++) {
1458 if (end[0] != '\r' || end[1] != '\n' || 1459 if (end[0] == '\r' && end[1] == '\n' &&
1459 end[2] != '\r' || end[3] != '\n') 1460 end[2] == '\r' && end[3] == '\n') {
1461 term = true;
1462 break;
1463 }
1464 }
1465 if (!term)
1460 break; 1466 break;
1461 end += strlen("\r\n\r\n") + clen; 1467 end += strlen("\r\n\r\n") + clen;
1462 1468
1463 msglen = origlen = end - dptr; 1469 msglen = origlen = end - dptr;
1470 if (msglen > datalen)
1471 return NF_DROP;
1464 1472
1465 ret = process_sip_msg(skb, ct, dataoff, &dptr, &msglen); 1473 ret = process_sip_msg(skb, ct, dataoff, &dptr, &msglen);
1466 if (ret != NF_ACCEPT) 1474 if (ret != NF_ACCEPT)
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index 0ae142825881..05e9feb101c3 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -245,7 +245,7 @@ static int ct_seq_show(struct seq_file *s, void *v)
245 ret = 0; 245 ret = 0;
246release: 246release:
247 nf_ct_put(ct); 247 nf_ct_put(ct);
248 return 0; 248 return ret;
249} 249}
250 250
251static const struct seq_operations ct_seq_ops = { 251static const struct seq_operations ct_seq_ops = {
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 985e9b76c916..2e7ccbb43ddb 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -381,7 +381,6 @@ __build_packet_message(struct nfulnl_instance *inst,
381 struct nfulnl_msg_packet_hdr pmsg; 381 struct nfulnl_msg_packet_hdr pmsg;
382 struct nlmsghdr *nlh; 382 struct nlmsghdr *nlh;
383 struct nfgenmsg *nfmsg; 383 struct nfgenmsg *nfmsg;
384 __be32 tmp_uint;
385 sk_buff_data_t old_tail = inst->skb->tail; 384 sk_buff_data_t old_tail = inst->skb->tail;
386 385
387 nlh = NLMSG_PUT(inst->skb, 0, 0, 386 nlh = NLMSG_PUT(inst->skb, 0, 0,
@@ -428,7 +427,6 @@ __build_packet_message(struct nfulnl_instance *inst,
428 } 427 }
429 428
430 if (outdev) { 429 if (outdev) {
431 tmp_uint = htonl(outdev->ifindex);
432#ifndef CONFIG_BRIDGE_NETFILTER 430#ifndef CONFIG_BRIDGE_NETFILTER
433 NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_OUTDEV, 431 NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_OUTDEV,
434 htonl(outdev->ifindex)); 432 htonl(outdev->ifindex));
@@ -458,7 +456,8 @@ __build_packet_message(struct nfulnl_instance *inst,
458 if (skb->mark) 456 if (skb->mark)
459 NLA_PUT_BE32(inst->skb, NFULA_MARK, htonl(skb->mark)); 457 NLA_PUT_BE32(inst->skb, NFULA_MARK, htonl(skb->mark));
460 458
461 if (indev && skb->dev) { 459 if (indev && skb->dev &&
460 skb->mac_header != skb->network_header) {
462 struct nfulnl_msg_packet_hw phw; 461 struct nfulnl_msg_packet_hw phw;
463 int len = dev_parse_header(skb, phw.hw_addr); 462 int len = dev_parse_header(skb, phw.hw_addr);
464 if (len > 0) { 463 if (len > 0) {
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index b83123f12b42..fdd2fafe0a14 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -335,7 +335,8 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
335 if (entskb->mark) 335 if (entskb->mark)
336 NLA_PUT_BE32(skb, NFQA_MARK, htonl(entskb->mark)); 336 NLA_PUT_BE32(skb, NFQA_MARK, htonl(entskb->mark));
337 337
338 if (indev && entskb->dev) { 338 if (indev && entskb->dev &&
339 entskb->mac_header != entskb->network_header) {
339 struct nfqnl_msg_packet_hw phw; 340 struct nfqnl_msg_packet_hw phw;
340 int len = dev_parse_header(entskb, phw.hw_addr); 341 int len = dev_parse_header(entskb, phw.hw_addr);
341 if (len) { 342 if (len) {
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index 8a025a585d2f..b0869fe3633b 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -762,8 +762,8 @@ void xt_compat_unlock(u_int8_t af)
762EXPORT_SYMBOL_GPL(xt_compat_unlock); 762EXPORT_SYMBOL_GPL(xt_compat_unlock);
763#endif 763#endif
764 764
765DEFINE_PER_CPU(struct xt_info_lock, xt_info_locks); 765DEFINE_PER_CPU(seqcount_t, xt_recseq);
766EXPORT_PER_CPU_SYMBOL_GPL(xt_info_locks); 766EXPORT_PER_CPU_SYMBOL_GPL(xt_recseq);
767 767
768static int xt_jumpstack_alloc(struct xt_table_info *i) 768static int xt_jumpstack_alloc(struct xt_table_info *i)
769{ 769{
@@ -1362,10 +1362,7 @@ static int __init xt_init(void)
1362 int rv; 1362 int rv;
1363 1363
1364 for_each_possible_cpu(i) { 1364 for_each_possible_cpu(i) {
1365 struct xt_info_lock *lock = &per_cpu(xt_info_locks, i); 1365 seqcount_init(&per_cpu(xt_recseq, i));
1366
1367 seqlock_init(&lock->lock);
1368 lock->readers = 0;
1369 } 1366 }
1370 1367
1371 xt = kmalloc(sizeof(struct xt_af) * NFPROTO_NUMPROTO, GFP_KERNEL); 1368 xt = kmalloc(sizeof(struct xt_af) * NFPROTO_NUMPROTO, GFP_KERNEL);
diff --git a/net/netfilter/xt_osf.c b/net/netfilter/xt_osf.c
index 4327e101c047..846f895cb656 100644
--- a/net/netfilter/xt_osf.c
+++ b/net/netfilter/xt_osf.c
@@ -62,13 +62,6 @@ static const struct nla_policy xt_osf_policy[OSF_ATTR_MAX + 1] = {
62 [OSF_ATTR_FINGER] = { .len = sizeof(struct xt_osf_user_finger) }, 62 [OSF_ATTR_FINGER] = { .len = sizeof(struct xt_osf_user_finger) },
63}; 63};
64 64
65static void xt_osf_finger_free_rcu(struct rcu_head *rcu_head)
66{
67 struct xt_osf_finger *f = container_of(rcu_head, struct xt_osf_finger, rcu_head);
68
69 kfree(f);
70}
71
72static int xt_osf_add_callback(struct sock *ctnl, struct sk_buff *skb, 65static int xt_osf_add_callback(struct sock *ctnl, struct sk_buff *skb,
73 const struct nlmsghdr *nlh, 66 const struct nlmsghdr *nlh,
74 const struct nlattr * const osf_attrs[]) 67 const struct nlattr * const osf_attrs[])
@@ -133,7 +126,7 @@ static int xt_osf_remove_callback(struct sock *ctnl, struct sk_buff *skb,
133 * We are protected by nfnl mutex. 126 * We are protected by nfnl mutex.
134 */ 127 */
135 list_del_rcu(&sf->finger_entry); 128 list_del_rcu(&sf->finger_entry);
136 call_rcu(&sf->rcu_head, xt_osf_finger_free_rcu); 129 kfree_rcu(sf, rcu_head);
137 130
138 err = 0; 131 err = 0;
139 break; 132 break;
@@ -414,7 +407,7 @@ static void __exit xt_osf_fini(void)
414 407
415 list_for_each_entry_rcu(f, &xt_osf_fingers[i], finger_entry) { 408 list_for_each_entry_rcu(f, &xt_osf_fingers[i], finger_entry) {
416 list_del_rcu(&f->finger_entry); 409 list_del_rcu(&f->finger_entry);
417 call_rcu(&f->rcu_head, xt_osf_finger_free_rcu); 410 kfree_rcu(f, rcu_head);
418 } 411 }
419 } 412 }
420 rcu_read_unlock(); 413 rcu_read_unlock();
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c
index 9cc46356b577..fe39f7e913df 100644
--- a/net/netfilter/xt_socket.c
+++ b/net/netfilter/xt_socket.c
@@ -143,9 +143,9 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
143 ct = nf_ct_get(skb, &ctinfo); 143 ct = nf_ct_get(skb, &ctinfo);
144 if (ct && !nf_ct_is_untracked(ct) && 144 if (ct && !nf_ct_is_untracked(ct) &&
145 ((iph->protocol != IPPROTO_ICMP && 145 ((iph->protocol != IPPROTO_ICMP &&
146 ctinfo == IP_CT_IS_REPLY + IP_CT_ESTABLISHED) || 146 ctinfo == IP_CT_ESTABLISHED_REPLY) ||
147 (iph->protocol == IPPROTO_ICMP && 147 (iph->protocol == IPPROTO_ICMP &&
148 ctinfo == IP_CT_IS_REPLY + IP_CT_RELATED)) && 148 ctinfo == IP_CT_RELATED_REPLY)) &&
149 (ct->status & IPS_SRC_NAT_DONE)) { 149 (ct->status & IPS_SRC_NAT_DONE)) {
150 150
151 daddr = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip; 151 daddr = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip;
diff --git a/net/netlabel/netlabel_addrlist.h b/net/netlabel/netlabel_addrlist.h
index 1c1c093cf279..2b9644e19de0 100644
--- a/net/netlabel/netlabel_addrlist.h
+++ b/net/netlabel/netlabel_addrlist.h
@@ -96,12 +96,12 @@ static inline struct netlbl_af4list *__af4list_valid_rcu(struct list_head *s,
96 96
97#define netlbl_af4list_foreach(iter, head) \ 97#define netlbl_af4list_foreach(iter, head) \
98 for (iter = __af4list_valid((head)->next, head); \ 98 for (iter = __af4list_valid((head)->next, head); \
99 prefetch(iter->list.next), &iter->list != (head); \ 99 &iter->list != (head); \
100 iter = __af4list_valid(iter->list.next, head)) 100 iter = __af4list_valid(iter->list.next, head))
101 101
102#define netlbl_af4list_foreach_rcu(iter, head) \ 102#define netlbl_af4list_foreach_rcu(iter, head) \
103 for (iter = __af4list_valid_rcu((head)->next, head); \ 103 for (iter = __af4list_valid_rcu((head)->next, head); \
104 prefetch(iter->list.next), &iter->list != (head); \ 104 &iter->list != (head); \
105 iter = __af4list_valid_rcu(iter->list.next, head)) 105 iter = __af4list_valid_rcu(iter->list.next, head))
106 106
107#define netlbl_af4list_foreach_safe(iter, tmp, head) \ 107#define netlbl_af4list_foreach_safe(iter, tmp, head) \
@@ -163,12 +163,12 @@ static inline struct netlbl_af6list *__af6list_valid_rcu(struct list_head *s,
163 163
164#define netlbl_af6list_foreach(iter, head) \ 164#define netlbl_af6list_foreach(iter, head) \
165 for (iter = __af6list_valid((head)->next, head); \ 165 for (iter = __af6list_valid((head)->next, head); \
166 prefetch(iter->list.next), &iter->list != (head); \ 166 &iter->list != (head); \
167 iter = __af6list_valid(iter->list.next, head)) 167 iter = __af6list_valid(iter->list.next, head))
168 168
169#define netlbl_af6list_foreach_rcu(iter, head) \ 169#define netlbl_af6list_foreach_rcu(iter, head) \
170 for (iter = __af6list_valid_rcu((head)->next, head); \ 170 for (iter = __af6list_valid_rcu((head)->next, head); \
171 prefetch(iter->list.next), &iter->list != (head); \ 171 &iter->list != (head); \
172 iter = __af6list_valid_rcu(iter->list.next, head)) 172 iter = __af6list_valid_rcu(iter->list.next, head))
173 173
174#define netlbl_af6list_foreach_safe(iter, tmp, head) \ 174#define netlbl_af6list_foreach_safe(iter, tmp, head) \
diff --git a/net/netlabel/netlabel_cipso_v4.c b/net/netlabel/netlabel_cipso_v4.c
index 5f14c8462e30..bae5756b1626 100644
--- a/net/netlabel/netlabel_cipso_v4.c
+++ b/net/netlabel/netlabel_cipso_v4.c
@@ -422,7 +422,6 @@ static int netlbl_cipsov4_add(struct sk_buff *skb, struct genl_info *info)
422 422
423{ 423{
424 int ret_val = -EINVAL; 424 int ret_val = -EINVAL;
425 const char *type_str = "(unknown)";
426 struct netlbl_audit audit_info; 425 struct netlbl_audit audit_info;
427 426
428 if (!info->attrs[NLBL_CIPSOV4_A_DOI] || 427 if (!info->attrs[NLBL_CIPSOV4_A_DOI] ||
@@ -432,15 +431,12 @@ static int netlbl_cipsov4_add(struct sk_buff *skb, struct genl_info *info)
432 netlbl_netlink_auditinfo(skb, &audit_info); 431 netlbl_netlink_auditinfo(skb, &audit_info);
433 switch (nla_get_u32(info->attrs[NLBL_CIPSOV4_A_MTYPE])) { 432 switch (nla_get_u32(info->attrs[NLBL_CIPSOV4_A_MTYPE])) {
434 case CIPSO_V4_MAP_TRANS: 433 case CIPSO_V4_MAP_TRANS:
435 type_str = "trans";
436 ret_val = netlbl_cipsov4_add_std(info, &audit_info); 434 ret_val = netlbl_cipsov4_add_std(info, &audit_info);
437 break; 435 break;
438 case CIPSO_V4_MAP_PASS: 436 case CIPSO_V4_MAP_PASS:
439 type_str = "pass";
440 ret_val = netlbl_cipsov4_add_pass(info, &audit_info); 437 ret_val = netlbl_cipsov4_add_pass(info, &audit_info);
441 break; 438 break;
442 case CIPSO_V4_MAP_LOCAL: 439 case CIPSO_V4_MAP_LOCAL:
443 type_str = "local";
444 ret_val = netlbl_cipsov4_add_local(info, &audit_info); 440 ret_val = netlbl_cipsov4_add_local(info, &audit_info);
445 break; 441 break;
446 } 442 }
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c
index e2b0a680dd56..9c38658fba8b 100644
--- a/net/netlabel/netlabel_unlabeled.c
+++ b/net/netlabel/netlabel_unlabeled.c
@@ -154,44 +154,6 @@ static const struct nla_policy netlbl_unlabel_genl_policy[NLBL_UNLABEL_A_MAX + 1
154 */ 154 */
155 155
156/** 156/**
157 * netlbl_unlhsh_free_addr4 - Frees an IPv4 address entry from the hash table
158 * @entry: the entry's RCU field
159 *
160 * Description:
161 * This function is designed to be used as a callback to the call_rcu()
162 * function so that memory allocated to a hash table address entry can be
163 * released safely.
164 *
165 */
166static void netlbl_unlhsh_free_addr4(struct rcu_head *entry)
167{
168 struct netlbl_unlhsh_addr4 *ptr;
169
170 ptr = container_of(entry, struct netlbl_unlhsh_addr4, rcu);
171 kfree(ptr);
172}
173
174#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
175/**
176 * netlbl_unlhsh_free_addr6 - Frees an IPv6 address entry from the hash table
177 * @entry: the entry's RCU field
178 *
179 * Description:
180 * This function is designed to be used as a callback to the call_rcu()
181 * function so that memory allocated to a hash table address entry can be
182 * released safely.
183 *
184 */
185static void netlbl_unlhsh_free_addr6(struct rcu_head *entry)
186{
187 struct netlbl_unlhsh_addr6 *ptr;
188
189 ptr = container_of(entry, struct netlbl_unlhsh_addr6, rcu);
190 kfree(ptr);
191}
192#endif /* IPv6 */
193
194/**
195 * netlbl_unlhsh_free_iface - Frees an interface entry from the hash table 157 * netlbl_unlhsh_free_iface - Frees an interface entry from the hash table
196 * @entry: the entry's RCU field 158 * @entry: the entry's RCU field
197 * 159 *
@@ -568,7 +530,7 @@ static int netlbl_unlhsh_remove_addr4(struct net *net,
568 if (entry == NULL) 530 if (entry == NULL)
569 return -ENOENT; 531 return -ENOENT;
570 532
571 call_rcu(&entry->rcu, netlbl_unlhsh_free_addr4); 533 kfree_rcu(entry, rcu);
572 return 0; 534 return 0;
573} 535}
574 536
@@ -629,7 +591,7 @@ static int netlbl_unlhsh_remove_addr6(struct net *net,
629 if (entry == NULL) 591 if (entry == NULL)
630 return -ENOENT; 592 return -ENOENT;
631 593
632 call_rcu(&entry->rcu, netlbl_unlhsh_free_addr6); 594 kfree_rcu(entry, rcu);
633 return 0; 595 return 0;
634} 596}
635#endif /* IPv6 */ 597#endif /* IPv6 */
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index c8f35b5d2ee9..6ef64adf7362 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1566,12 +1566,6 @@ netlink_kernel_release(struct sock *sk)
1566} 1566}
1567EXPORT_SYMBOL(netlink_kernel_release); 1567EXPORT_SYMBOL(netlink_kernel_release);
1568 1568
1569
1570static void listeners_free_rcu(struct rcu_head *head)
1571{
1572 kfree(container_of(head, struct listeners, rcu));
1573}
1574
1575int __netlink_change_ngroups(struct sock *sk, unsigned int groups) 1569int __netlink_change_ngroups(struct sock *sk, unsigned int groups)
1576{ 1570{
1577 struct listeners *new, *old; 1571 struct listeners *new, *old;
@@ -1588,7 +1582,7 @@ int __netlink_change_ngroups(struct sock *sk, unsigned int groups)
1588 memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups)); 1582 memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups));
1589 rcu_assign_pointer(tbl->listeners, new); 1583 rcu_assign_pointer(tbl->listeners, new);
1590 1584
1591 call_rcu(&old->rcu, listeners_free_rcu); 1585 kfree_rcu(old, rcu);
1592 } 1586 }
1593 tbl->groups = groups; 1587 tbl->groups = groups;
1594 1588
@@ -1991,7 +1985,7 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
1991 struct sock *s = v; 1985 struct sock *s = v;
1992 struct netlink_sock *nlk = nlk_sk(s); 1986 struct netlink_sock *nlk = nlk_sk(s);
1993 1987
1994 seq_printf(seq, "%p %-3d %-6d %08x %-8d %-8d %p %-8d %-8d %-8lu\n", 1988 seq_printf(seq, "%pK %-3d %-6d %08x %-8d %-8d %pK %-8d %-8d %-8lu\n",
1995 s, 1989 s,
1996 s->sk_protocol, 1990 s->sk_protocol,
1997 nlk->pid, 1991 nlk->pid,
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 06cb02796a0e..732152f718e0 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -591,7 +591,6 @@ static int nr_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
591 return -EINVAL; 591 return -EINVAL;
592 } 592 }
593 if ((dev = nr_dev_get(&addr->fsa_ax25.sax25_call)) == NULL) { 593 if ((dev = nr_dev_get(&addr->fsa_ax25.sax25_call)) == NULL) {
594 SOCK_DEBUG(sk, "NET/ROM: bind failed: invalid node callsign\n");
595 release_sock(sk); 594 release_sock(sk);
596 return -EADDRNOTAVAIL; 595 return -EADDRNOTAVAIL;
597 } 596 }
@@ -632,7 +631,7 @@ static int nr_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
632 sock_reset_flag(sk, SOCK_ZAPPED); 631 sock_reset_flag(sk, SOCK_ZAPPED);
633 dev_put(dev); 632 dev_put(dev);
634 release_sock(sk); 633 release_sock(sk);
635 SOCK_DEBUG(sk, "NET/ROM: socket is bound\n"); 634
636 return 0; 635 return 0;
637} 636}
638 637
@@ -1082,8 +1081,6 @@ static int nr_sendmsg(struct kiocb *iocb, struct socket *sock,
1082 sax.sax25_call = nr->dest_addr; 1081 sax.sax25_call = nr->dest_addr;
1083 } 1082 }
1084 1083
1085 SOCK_DEBUG(sk, "NET/ROM: sendto: Addresses built.\n");
1086
1087 /* Build a packet - the conventional user limit is 236 bytes. We can 1084 /* Build a packet - the conventional user limit is 236 bytes. We can
1088 do ludicrously large NetROM frames but must not overflow */ 1085 do ludicrously large NetROM frames but must not overflow */
1089 if (len > 65536) { 1086 if (len > 65536) {
@@ -1091,7 +1088,6 @@ static int nr_sendmsg(struct kiocb *iocb, struct socket *sock,
1091 goto out; 1088 goto out;
1092 } 1089 }
1093 1090
1094 SOCK_DEBUG(sk, "NET/ROM: sendto: building packet.\n");
1095 size = len + NR_NETWORK_LEN + NR_TRANSPORT_LEN; 1091 size = len + NR_NETWORK_LEN + NR_TRANSPORT_LEN;
1096 1092
1097 if ((skb = sock_alloc_send_skb(sk, size, msg->msg_flags & MSG_DONTWAIT, &err)) == NULL) 1093 if ((skb = sock_alloc_send_skb(sk, size, msg->msg_flags & MSG_DONTWAIT, &err)) == NULL)
@@ -1105,7 +1101,6 @@ static int nr_sendmsg(struct kiocb *iocb, struct socket *sock,
1105 */ 1101 */
1106 1102
1107 asmptr = skb_push(skb, NR_TRANSPORT_LEN); 1103 asmptr = skb_push(skb, NR_TRANSPORT_LEN);
1108 SOCK_DEBUG(sk, "Building NET/ROM Header.\n");
1109 1104
1110 /* Build a NET/ROM Transport header */ 1105 /* Build a NET/ROM Transport header */
1111 1106
@@ -1114,15 +1109,12 @@ static int nr_sendmsg(struct kiocb *iocb, struct socket *sock,
1114 *asmptr++ = 0; /* To be filled in later */ 1109 *asmptr++ = 0; /* To be filled in later */
1115 *asmptr++ = 0; /* Ditto */ 1110 *asmptr++ = 0; /* Ditto */
1116 *asmptr++ = NR_INFO; 1111 *asmptr++ = NR_INFO;
1117 SOCK_DEBUG(sk, "Built header.\n");
1118 1112
1119 /* 1113 /*
1120 * Put the data on the end 1114 * Put the data on the end
1121 */ 1115 */
1122 skb_put(skb, len); 1116 skb_put(skb, len);
1123 1117
1124 SOCK_DEBUG(sk, "NET/ROM: Appending user data\n");
1125
1126 /* User data follows immediately after the NET/ROM transport header */ 1118 /* User data follows immediately after the NET/ROM transport header */
1127 if (memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len)) { 1119 if (memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len)) {
1128 kfree_skb(skb); 1120 kfree_skb(skb);
@@ -1130,8 +1122,6 @@ static int nr_sendmsg(struct kiocb *iocb, struct socket *sock,
1130 goto out; 1122 goto out;
1131 } 1123 }
1132 1124
1133 SOCK_DEBUG(sk, "NET/ROM: Transmitting buffer\n");
1134
1135 if (sk->sk_state != TCP_ESTABLISHED) { 1125 if (sk->sk_state != TCP_ESTABLISHED) {
1136 kfree_skb(skb); 1126 kfree_skb(skb);
1137 err = -ENOTCONN; 1127 err = -ENOTCONN;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index b5362e96022b..c0c3cda19712 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -538,7 +538,7 @@ static inline unsigned int run_filter(const struct sk_buff *skb,
538 rcu_read_lock(); 538 rcu_read_lock();
539 filter = rcu_dereference(sk->sk_filter); 539 filter = rcu_dereference(sk->sk_filter);
540 if (filter != NULL) 540 if (filter != NULL)
541 res = sk_run_filter(skb, filter->insns); 541 res = SK_RUN_FILTER(filter, skb);
542 rcu_read_unlock(); 542 rcu_read_unlock();
543 543
544 return res; 544 return res;
@@ -798,7 +798,13 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
798 getnstimeofday(&ts); 798 getnstimeofday(&ts);
799 h.h2->tp_sec = ts.tv_sec; 799 h.h2->tp_sec = ts.tv_sec;
800 h.h2->tp_nsec = ts.tv_nsec; 800 h.h2->tp_nsec = ts.tv_nsec;
801 h.h2->tp_vlan_tci = vlan_tx_tag_get(skb); 801 if (vlan_tx_tag_present(skb)) {
802 h.h2->tp_vlan_tci = vlan_tx_tag_get(skb);
803 status |= TP_STATUS_VLAN_VALID;
804 } else {
805 h.h2->tp_vlan_tci = 0;
806 }
807 h.h2->tp_padding = 0;
802 hdrlen = sizeof(*h.h2); 808 hdrlen = sizeof(*h.h2);
803 break; 809 break;
804 default: 810 default:
@@ -1725,8 +1731,13 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
1725 aux.tp_snaplen = skb->len; 1731 aux.tp_snaplen = skb->len;
1726 aux.tp_mac = 0; 1732 aux.tp_mac = 0;
1727 aux.tp_net = skb_network_offset(skb); 1733 aux.tp_net = skb_network_offset(skb);
1728 aux.tp_vlan_tci = vlan_tx_tag_get(skb); 1734 if (vlan_tx_tag_present(skb)) {
1729 1735 aux.tp_vlan_tci = vlan_tx_tag_get(skb);
1736 aux.tp_status |= TP_STATUS_VLAN_VALID;
1737 } else {
1738 aux.tp_vlan_tci = 0;
1739 }
1740 aux.tp_padding = 0;
1730 put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux); 1741 put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
1731 } 1742 }
1732 1743
@@ -2706,7 +2717,7 @@ static int packet_seq_show(struct seq_file *seq, void *v)
2706 const struct packet_sock *po = pkt_sk(s); 2717 const struct packet_sock *po = pkt_sk(s);
2707 2718
2708 seq_printf(seq, 2719 seq_printf(seq,
2709 "%p %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n", 2720 "%pK %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
2710 s, 2721 s,
2711 atomic_read(&s->sk_refcnt), 2722 atomic_read(&s->sk_refcnt),
2712 s->sk_type, 2723 s->sk_type,
diff --git a/net/phonet/pn_dev.c b/net/phonet/pn_dev.c
index 947038ddd04c..d2df8f33160b 100644
--- a/net/phonet/pn_dev.c
+++ b/net/phonet/pn_dev.c
@@ -162,14 +162,6 @@ int phonet_address_add(struct net_device *dev, u8 addr)
162 return err; 162 return err;
163} 163}
164 164
165static void phonet_device_rcu_free(struct rcu_head *head)
166{
167 struct phonet_device *pnd;
168
169 pnd = container_of(head, struct phonet_device, rcu);
170 kfree(pnd);
171}
172
173int phonet_address_del(struct net_device *dev, u8 addr) 165int phonet_address_del(struct net_device *dev, u8 addr)
174{ 166{
175 struct phonet_device_list *pndevs = phonet_device_list(dev_net(dev)); 167 struct phonet_device_list *pndevs = phonet_device_list(dev_net(dev));
@@ -188,7 +180,7 @@ int phonet_address_del(struct net_device *dev, u8 addr)
188 mutex_unlock(&pndevs->lock); 180 mutex_unlock(&pndevs->lock);
189 181
190 if (pnd) 182 if (pnd)
191 call_rcu(&pnd->rcu, phonet_device_rcu_free); 183 kfree_rcu(pnd, rcu);
192 184
193 return err; 185 return err;
194} 186}
@@ -426,18 +418,14 @@ int phonet_route_del(struct net_device *dev, u8 daddr)
426 return 0; 418 return 0;
427} 419}
428 420
429struct net_device *phonet_route_get(struct net *net, u8 daddr) 421struct net_device *phonet_route_get_rcu(struct net *net, u8 daddr)
430{ 422{
431 struct phonet_net *pnn = phonet_pernet(net); 423 struct phonet_net *pnn = phonet_pernet(net);
432 struct phonet_routes *routes = &pnn->routes; 424 struct phonet_routes *routes = &pnn->routes;
433 struct net_device *dev; 425 struct net_device *dev;
434 426
435 ASSERT_RTNL(); /* no need to hold the device */
436
437 daddr >>= 2; 427 daddr >>= 2;
438 rcu_read_lock();
439 dev = rcu_dereference(routes->table[daddr]); 428 dev = rcu_dereference(routes->table[daddr]);
440 rcu_read_unlock();
441 return dev; 429 return dev;
442} 430}
443 431
diff --git a/net/phonet/pn_netlink.c b/net/phonet/pn_netlink.c
index 58b3b1f991ed..438accb7a5a8 100644
--- a/net/phonet/pn_netlink.c
+++ b/net/phonet/pn_netlink.c
@@ -264,10 +264,11 @@ static int route_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
264 struct net *net = sock_net(skb->sk); 264 struct net *net = sock_net(skb->sk);
265 u8 addr, addr_idx = 0, addr_start_idx = cb->args[0]; 265 u8 addr, addr_idx = 0, addr_start_idx = cb->args[0];
266 266
267 rcu_read_lock();
267 for (addr = 0; addr < 64; addr++) { 268 for (addr = 0; addr < 64; addr++) {
268 struct net_device *dev; 269 struct net_device *dev;
269 270
270 dev = phonet_route_get(net, addr << 2); 271 dev = phonet_route_get_rcu(net, addr << 2);
271 if (!dev) 272 if (!dev)
272 continue; 273 continue;
273 274
@@ -279,6 +280,7 @@ static int route_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
279 } 280 }
280 281
281out: 282out:
283 rcu_read_unlock();
282 cb->args[0] = addr_idx; 284 cb->args[0] = addr_idx;
283 cb->args[1] = 0; 285 cb->args[1] = 0;
284 286
diff --git a/net/phonet/socket.c b/net/phonet/socket.c
index b1adafab377c..ab07711cf2f4 100644
--- a/net/phonet/socket.c
+++ b/net/phonet/socket.c
@@ -52,7 +52,7 @@ static int pn_socket_release(struct socket *sock)
52 52
53static struct { 53static struct {
54 struct hlist_head hlist[PN_HASHSIZE]; 54 struct hlist_head hlist[PN_HASHSIZE];
55 spinlock_t lock; 55 struct mutex lock;
56} pnsocks; 56} pnsocks;
57 57
58void __init pn_sock_init(void) 58void __init pn_sock_init(void)
@@ -61,7 +61,7 @@ void __init pn_sock_init(void)
61 61
62 for (i = 0; i < PN_HASHSIZE; i++) 62 for (i = 0; i < PN_HASHSIZE; i++)
63 INIT_HLIST_HEAD(pnsocks.hlist + i); 63 INIT_HLIST_HEAD(pnsocks.hlist + i);
64 spin_lock_init(&pnsocks.lock); 64 mutex_init(&pnsocks.lock);
65} 65}
66 66
67static struct hlist_head *pn_hash_list(u16 obj) 67static struct hlist_head *pn_hash_list(u16 obj)
@@ -82,9 +82,8 @@ struct sock *pn_find_sock_by_sa(struct net *net, const struct sockaddr_pn *spn)
82 u8 res = spn->spn_resource; 82 u8 res = spn->spn_resource;
83 struct hlist_head *hlist = pn_hash_list(obj); 83 struct hlist_head *hlist = pn_hash_list(obj);
84 84
85 spin_lock_bh(&pnsocks.lock); 85 rcu_read_lock();
86 86 sk_for_each_rcu(sknode, node, hlist) {
87 sk_for_each(sknode, node, hlist) {
88 struct pn_sock *pn = pn_sk(sknode); 87 struct pn_sock *pn = pn_sk(sknode);
89 BUG_ON(!pn->sobject); /* unbound socket */ 88 BUG_ON(!pn->sobject); /* unbound socket */
90 89
@@ -107,8 +106,7 @@ struct sock *pn_find_sock_by_sa(struct net *net, const struct sockaddr_pn *spn)
107 sock_hold(sknode); 106 sock_hold(sknode);
108 break; 107 break;
109 } 108 }
110 109 rcu_read_unlock();
111 spin_unlock_bh(&pnsocks.lock);
112 110
113 return rval; 111 return rval;
114} 112}
@@ -119,7 +117,7 @@ void pn_deliver_sock_broadcast(struct net *net, struct sk_buff *skb)
119 struct hlist_head *hlist = pnsocks.hlist; 117 struct hlist_head *hlist = pnsocks.hlist;
120 unsigned h; 118 unsigned h;
121 119
122 spin_lock(&pnsocks.lock); 120 rcu_read_lock();
123 for (h = 0; h < PN_HASHSIZE; h++) { 121 for (h = 0; h < PN_HASHSIZE; h++) {
124 struct hlist_node *node; 122 struct hlist_node *node;
125 struct sock *sknode; 123 struct sock *sknode;
@@ -140,25 +138,26 @@ void pn_deliver_sock_broadcast(struct net *net, struct sk_buff *skb)
140 } 138 }
141 hlist++; 139 hlist++;
142 } 140 }
143 spin_unlock(&pnsocks.lock); 141 rcu_read_unlock();
144} 142}
145 143
146void pn_sock_hash(struct sock *sk) 144void pn_sock_hash(struct sock *sk)
147{ 145{
148 struct hlist_head *hlist = pn_hash_list(pn_sk(sk)->sobject); 146 struct hlist_head *hlist = pn_hash_list(pn_sk(sk)->sobject);
149 147
150 spin_lock_bh(&pnsocks.lock); 148 mutex_lock(&pnsocks.lock);
151 sk_add_node(sk, hlist); 149 sk_add_node_rcu(sk, hlist);
152 spin_unlock_bh(&pnsocks.lock); 150 mutex_unlock(&pnsocks.lock);
153} 151}
154EXPORT_SYMBOL(pn_sock_hash); 152EXPORT_SYMBOL(pn_sock_hash);
155 153
156void pn_sock_unhash(struct sock *sk) 154void pn_sock_unhash(struct sock *sk)
157{ 155{
158 spin_lock_bh(&pnsocks.lock); 156 mutex_lock(&pnsocks.lock);
159 sk_del_node_init(sk); 157 sk_del_node_init_rcu(sk);
160 spin_unlock_bh(&pnsocks.lock); 158 mutex_unlock(&pnsocks.lock);
161 pn_sock_unbind_all_res(sk); 159 pn_sock_unbind_all_res(sk);
160 synchronize_rcu();
162} 161}
163EXPORT_SYMBOL(pn_sock_unhash); 162EXPORT_SYMBOL(pn_sock_unhash);
164 163
@@ -548,7 +547,7 @@ static struct sock *pn_sock_get_idx(struct seq_file *seq, loff_t pos)
548 unsigned h; 547 unsigned h;
549 548
550 for (h = 0; h < PN_HASHSIZE; h++) { 549 for (h = 0; h < PN_HASHSIZE; h++) {
551 sk_for_each(sknode, node, hlist) { 550 sk_for_each_rcu(sknode, node, hlist) {
552 if (!net_eq(net, sock_net(sknode))) 551 if (!net_eq(net, sock_net(sknode)))
553 continue; 552 continue;
554 if (!pos) 553 if (!pos)
@@ -572,9 +571,9 @@ static struct sock *pn_sock_get_next(struct seq_file *seq, struct sock *sk)
572} 571}
573 572
574static void *pn_sock_seq_start(struct seq_file *seq, loff_t *pos) 573static void *pn_sock_seq_start(struct seq_file *seq, loff_t *pos)
575 __acquires(pnsocks.lock) 574 __acquires(rcu)
576{ 575{
577 spin_lock_bh(&pnsocks.lock); 576 rcu_read_lock();
578 return *pos ? pn_sock_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; 577 return *pos ? pn_sock_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
579} 578}
580 579
@@ -591,9 +590,9 @@ static void *pn_sock_seq_next(struct seq_file *seq, void *v, loff_t *pos)
591} 590}
592 591
593static void pn_sock_seq_stop(struct seq_file *seq, void *v) 592static void pn_sock_seq_stop(struct seq_file *seq, void *v)
594 __releases(pnsocks.lock) 593 __releases(rcu)
595{ 594{
596 spin_unlock_bh(&pnsocks.lock); 595 rcu_read_unlock();
597} 596}
598 597
599static int pn_sock_seq_show(struct seq_file *seq, void *v) 598static int pn_sock_seq_show(struct seq_file *seq, void *v)
@@ -608,7 +607,7 @@ static int pn_sock_seq_show(struct seq_file *seq, void *v)
608 struct pn_sock *pn = pn_sk(sk); 607 struct pn_sock *pn = pn_sk(sk);
609 608
610 seq_printf(seq, "%2d %04X:%04X:%02X %02X %08X:%08X %5d %lu " 609 seq_printf(seq, "%2d %04X:%04X:%02X %02X %08X:%08X %5d %lu "
611 "%d %p %d%n", 610 "%d %pK %d%n",
612 sk->sk_protocol, pn->sobject, pn->dobject, 611 sk->sk_protocol, pn->sobject, pn->dobject,
613 pn->resource, sk->sk_state, 612 pn->resource, sk->sk_state,
614 sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk), 613 sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
@@ -721,13 +720,11 @@ void pn_sock_unbind_all_res(struct sock *sk)
721 } 720 }
722 mutex_unlock(&resource_mutex); 721 mutex_unlock(&resource_mutex);
723 722
724 if (match == 0)
725 return;
726 synchronize_rcu();
727 while (match > 0) { 723 while (match > 0) {
728 sock_put(sk); 724 __sock_put(sk);
729 match--; 725 match--;
730 } 726 }
727 /* Caller is responsible for RCU sync before final sock_put() */
731} 728}
732 729
733#ifdef CONFIG_PROC_FS 730#ifdef CONFIG_PROC_FS
diff --git a/net/rds/ib.c b/net/rds/ib.c
index cce19f95c624..3b83086bcc30 100644
--- a/net/rds/ib.c
+++ b/net/rds/ib.c
@@ -325,7 +325,7 @@ static int rds_ib_laddr_check(__be32 addr)
325 /* Create a CMA ID and try to bind it. This catches both 325 /* Create a CMA ID and try to bind it. This catches both
326 * IB and iWARP capable NICs. 326 * IB and iWARP capable NICs.
327 */ 327 */
328 cm_id = rdma_create_id(NULL, NULL, RDMA_PS_TCP); 328 cm_id = rdma_create_id(NULL, NULL, RDMA_PS_TCP, IB_QPT_RC);
329 if (IS_ERR(cm_id)) 329 if (IS_ERR(cm_id))
330 return PTR_ERR(cm_id); 330 return PTR_ERR(cm_id);
331 331
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
index ee369d201a65..fd453dd5124b 100644
--- a/net/rds/ib_cm.c
+++ b/net/rds/ib_cm.c
@@ -587,7 +587,7 @@ int rds_ib_conn_connect(struct rds_connection *conn)
587 /* XXX I wonder what affect the port space has */ 587 /* XXX I wonder what affect the port space has */
588 /* delegate cm event handler to rdma_transport */ 588 /* delegate cm event handler to rdma_transport */
589 ic->i_cm_id = rdma_create_id(rds_rdma_cm_event_handler, conn, 589 ic->i_cm_id = rdma_create_id(rds_rdma_cm_event_handler, conn,
590 RDMA_PS_TCP); 590 RDMA_PS_TCP, IB_QPT_RC);
591 if (IS_ERR(ic->i_cm_id)) { 591 if (IS_ERR(ic->i_cm_id)) {
592 ret = PTR_ERR(ic->i_cm_id); 592 ret = PTR_ERR(ic->i_cm_id);
593 ic->i_cm_id = NULL; 593 ic->i_cm_id = NULL;
diff --git a/net/rds/iw.c b/net/rds/iw.c
index 5a9676fe594f..f7474844f096 100644
--- a/net/rds/iw.c
+++ b/net/rds/iw.c
@@ -226,7 +226,7 @@ static int rds_iw_laddr_check(__be32 addr)
226 /* Create a CMA ID and try to bind it. This catches both 226 /* Create a CMA ID and try to bind it. This catches both
227 * IB and iWARP capable NICs. 227 * IB and iWARP capable NICs.
228 */ 228 */
229 cm_id = rdma_create_id(NULL, NULL, RDMA_PS_TCP); 229 cm_id = rdma_create_id(NULL, NULL, RDMA_PS_TCP, IB_QPT_RC);
230 if (IS_ERR(cm_id)) 230 if (IS_ERR(cm_id))
231 return PTR_ERR(cm_id); 231 return PTR_ERR(cm_id);
232 232
diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c
index 3a60a15d1b4a..c12db66f24c7 100644
--- a/net/rds/iw_cm.c
+++ b/net/rds/iw_cm.c
@@ -522,7 +522,7 @@ int rds_iw_conn_connect(struct rds_connection *conn)
522 /* XXX I wonder what affect the port space has */ 522 /* XXX I wonder what affect the port space has */
523 /* delegate cm event handler to rdma_transport */ 523 /* delegate cm event handler to rdma_transport */
524 ic->i_cm_id = rdma_create_id(rds_rdma_cm_event_handler, conn, 524 ic->i_cm_id = rdma_create_id(rds_rdma_cm_event_handler, conn,
525 RDMA_PS_TCP); 525 RDMA_PS_TCP, IB_QPT_RC);
526 if (IS_ERR(ic->i_cm_id)) { 526 if (IS_ERR(ic->i_cm_id)) {
527 ret = PTR_ERR(ic->i_cm_id); 527 ret = PTR_ERR(ic->i_cm_id);
528 ic->i_cm_id = NULL; 528 ic->i_cm_id = NULL;
diff --git a/net/rds/rdma_transport.c b/net/rds/rdma_transport.c
index 4195a0539829..f8760e1b6688 100644
--- a/net/rds/rdma_transport.c
+++ b/net/rds/rdma_transport.c
@@ -158,7 +158,8 @@ static int rds_rdma_listen_init(void)
158 struct rdma_cm_id *cm_id; 158 struct rdma_cm_id *cm_id;
159 int ret; 159 int ret;
160 160
161 cm_id = rdma_create_id(rds_rdma_cm_event_handler, NULL, RDMA_PS_TCP); 161 cm_id = rdma_create_id(rds_rdma_cm_event_handler, NULL, RDMA_PS_TCP,
162 IB_QPT_RC);
162 if (IS_ERR(cm_id)) { 163 if (IS_ERR(cm_id)) {
163 ret = PTR_ERR(cm_id); 164 ret = PTR_ERR(cm_id);
164 printk(KERN_ERR "RDS/RDMA: failed to setup listener, " 165 printk(KERN_ERR "RDS/RDMA: failed to setup listener, "
diff --git a/net/rfkill/Kconfig b/net/rfkill/Kconfig
index 7fce6dfd2180..78efe895b663 100644
--- a/net/rfkill/Kconfig
+++ b/net/rfkill/Kconfig
@@ -22,3 +22,23 @@ config RFKILL_INPUT
22 depends on RFKILL 22 depends on RFKILL
23 depends on INPUT = y || RFKILL = INPUT 23 depends on INPUT = y || RFKILL = INPUT
24 default y if !EXPERT 24 default y if !EXPERT
25
26config RFKILL_REGULATOR
27 tristate "Generic rfkill regulator driver"
28 depends on RFKILL || !RFKILL
29 depends on REGULATOR
30 help
31 This options enable controlling radio transmitters connected to
32 voltage regulator using the regulator framework.
33
34 To compile this driver as a module, choose M here: the module will
35 be called rfkill-regulator.
36
37config RFKILL_GPIO
38 tristate "GPIO RFKILL driver"
39 depends on RFKILL && GPIOLIB && HAVE_CLK
40 default n
41 help
42 If you say yes here you get support of a generic gpio RFKILL
43 driver. The platform should fill in the appropriate fields in the
44 rfkill_gpio_platform_data structure and pass that to the driver.
diff --git a/net/rfkill/Makefile b/net/rfkill/Makefile
index 662105352691..311768783f4a 100644
--- a/net/rfkill/Makefile
+++ b/net/rfkill/Makefile
@@ -5,3 +5,5 @@
5rfkill-y += core.o 5rfkill-y += core.o
6rfkill-$(CONFIG_RFKILL_INPUT) += input.o 6rfkill-$(CONFIG_RFKILL_INPUT) += input.o
7obj-$(CONFIG_RFKILL) += rfkill.o 7obj-$(CONFIG_RFKILL) += rfkill.o
8obj-$(CONFIG_RFKILL_REGULATOR) += rfkill-regulator.o
9obj-$(CONFIG_RFKILL_GPIO) += rfkill-gpio.o
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index 0198191b756d..be90640a2774 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -1024,7 +1024,6 @@ static int rfkill_fop_open(struct inode *inode, struct file *file)
1024 * start getting events from elsewhere but hold mtx to get 1024 * start getting events from elsewhere but hold mtx to get
1025 * startup events added first 1025 * startup events added first
1026 */ 1026 */
1027 list_add(&data->list, &rfkill_fds);
1028 1027
1029 list_for_each_entry(rfkill, &rfkill_list, node) { 1028 list_for_each_entry(rfkill, &rfkill_list, node) {
1030 ev = kzalloc(sizeof(*ev), GFP_KERNEL); 1029 ev = kzalloc(sizeof(*ev), GFP_KERNEL);
@@ -1033,6 +1032,7 @@ static int rfkill_fop_open(struct inode *inode, struct file *file)
1033 rfkill_fill_event(&ev->ev, rfkill, RFKILL_OP_ADD); 1032 rfkill_fill_event(&ev->ev, rfkill, RFKILL_OP_ADD);
1034 list_add_tail(&ev->list, &data->events); 1033 list_add_tail(&ev->list, &data->events);
1035 } 1034 }
1035 list_add(&data->list, &rfkill_fds);
1036 mutex_unlock(&data->mtx); 1036 mutex_unlock(&data->mtx);
1037 mutex_unlock(&rfkill_global_mutex); 1037 mutex_unlock(&rfkill_global_mutex);
1038 1038
diff --git a/net/rfkill/rfkill-gpio.c b/net/rfkill/rfkill-gpio.c
new file mode 100644
index 000000000000..256c5ddd2d72
--- /dev/null
+++ b/net/rfkill/rfkill-gpio.c
@@ -0,0 +1,227 @@
1/*
2 * Copyright (c) 2011, NVIDIA Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */
18
19#include <linux/gpio.h>
20#include <linux/init.h>
21#include <linux/kernel.h>
22#include <linux/module.h>
23#include <linux/rfkill.h>
24#include <linux/platform_device.h>
25#include <linux/clk.h>
26#include <linux/slab.h>
27
28#include <linux/rfkill-gpio.h>
29
30enum rfkill_gpio_clk_state {
31 UNSPECIFIED = 0,
32 PWR_ENABLED,
33 PWR_DISABLED
34};
35
36#define PWR_CLK_SET(_RF, _EN) \
37 ((_RF)->pwr_clk_enabled = (!(_EN) ? PWR_ENABLED : PWR_DISABLED))
38#define PWR_CLK_ENABLED(_RF) ((_RF)->pwr_clk_enabled == PWR_ENABLED)
39#define PWR_CLK_DISABLED(_RF) ((_RF)->pwr_clk_enabled != PWR_ENABLED)
40
41struct rfkill_gpio_data {
42 struct rfkill_gpio_platform_data *pdata;
43 struct rfkill *rfkill_dev;
44 char *reset_name;
45 char *shutdown_name;
46 enum rfkill_gpio_clk_state pwr_clk_enabled;
47 struct clk *pwr_clk;
48};
49
50static int rfkill_gpio_set_power(void *data, bool blocked)
51{
52 struct rfkill_gpio_data *rfkill = data;
53
54 if (blocked) {
55 if (gpio_is_valid(rfkill->pdata->shutdown_gpio))
56 gpio_direction_output(rfkill->pdata->shutdown_gpio, 0);
57 if (gpio_is_valid(rfkill->pdata->reset_gpio))
58 gpio_direction_output(rfkill->pdata->reset_gpio, 0);
59 if (rfkill->pwr_clk && PWR_CLK_ENABLED(rfkill))
60 clk_disable(rfkill->pwr_clk);
61 } else {
62 if (rfkill->pwr_clk && PWR_CLK_DISABLED(rfkill))
63 clk_enable(rfkill->pwr_clk);
64 if (gpio_is_valid(rfkill->pdata->reset_gpio))
65 gpio_direction_output(rfkill->pdata->reset_gpio, 1);
66 if (gpio_is_valid(rfkill->pdata->shutdown_gpio))
67 gpio_direction_output(rfkill->pdata->shutdown_gpio, 1);
68 }
69
70 if (rfkill->pwr_clk)
71 PWR_CLK_SET(rfkill, blocked);
72
73 return 0;
74}
75
76static const struct rfkill_ops rfkill_gpio_ops = {
77 .set_block = rfkill_gpio_set_power,
78};
79
80static int rfkill_gpio_probe(struct platform_device *pdev)
81{
82 struct rfkill_gpio_data *rfkill;
83 struct rfkill_gpio_platform_data *pdata = pdev->dev.platform_data;
84 int ret = 0;
85 int len = 0;
86
87 if (!pdata) {
88 pr_warn("%s: No platform data specified\n", __func__);
89 return -EINVAL;
90 }
91
92 /* make sure at-least one of the GPIO is defined and that
93 * a name is specified for this instance */
94 if (!pdata->name || (!gpio_is_valid(pdata->reset_gpio) &&
95 !gpio_is_valid(pdata->shutdown_gpio))) {
96 pr_warn("%s: invalid platform data\n", __func__);
97 return -EINVAL;
98 }
99
100 rfkill = kzalloc(sizeof(*rfkill), GFP_KERNEL);
101 if (!rfkill)
102 return -ENOMEM;
103
104 rfkill->pdata = pdata;
105
106 len = strlen(pdata->name);
107 rfkill->reset_name = kzalloc(len + 7, GFP_KERNEL);
108 if (!rfkill->reset_name) {
109 ret = -ENOMEM;
110 goto fail_alloc;
111 }
112
113 rfkill->shutdown_name = kzalloc(len + 10, GFP_KERNEL);
114 if (!rfkill->shutdown_name) {
115 ret = -ENOMEM;
116 goto fail_reset_name;
117 }
118
119 snprintf(rfkill->reset_name, len + 6 , "%s_reset", pdata->name);
120 snprintf(rfkill->shutdown_name, len + 9, "%s_shutdown", pdata->name);
121
122 if (pdata->power_clk_name) {
123 rfkill->pwr_clk = clk_get(&pdev->dev, pdata->power_clk_name);
124 if (IS_ERR(rfkill->pwr_clk)) {
125 pr_warn("%s: can't find pwr_clk.\n", __func__);
126 goto fail_shutdown_name;
127 }
128 }
129
130 if (gpio_is_valid(pdata->reset_gpio)) {
131 ret = gpio_request(pdata->reset_gpio, rfkill->reset_name);
132 if (ret) {
133 pr_warn("%s: failed to get reset gpio.\n", __func__);
134 goto fail_clock;
135 }
136 }
137
138 if (gpio_is_valid(pdata->shutdown_gpio)) {
139 ret = gpio_request(pdata->shutdown_gpio, rfkill->shutdown_name);
140 if (ret) {
141 pr_warn("%s: failed to get shutdown gpio.\n", __func__);
142 goto fail_reset;
143 }
144 }
145
146 rfkill->rfkill_dev = rfkill_alloc(pdata->name, &pdev->dev, pdata->type,
147 &rfkill_gpio_ops, rfkill);
148 if (!rfkill->rfkill_dev)
149 goto fail_shutdown;
150
151 ret = rfkill_register(rfkill->rfkill_dev);
152 if (ret < 0)
153 goto fail_rfkill;
154
155 platform_set_drvdata(pdev, rfkill);
156
157 dev_info(&pdev->dev, "%s device registered.\n", pdata->name);
158
159 return 0;
160
161fail_rfkill:
162 rfkill_destroy(rfkill->rfkill_dev);
163fail_shutdown:
164 if (gpio_is_valid(pdata->shutdown_gpio))
165 gpio_free(pdata->shutdown_gpio);
166fail_reset:
167 if (gpio_is_valid(pdata->reset_gpio))
168 gpio_free(pdata->reset_gpio);
169fail_clock:
170 if (rfkill->pwr_clk)
171 clk_put(rfkill->pwr_clk);
172fail_shutdown_name:
173 kfree(rfkill->shutdown_name);
174fail_reset_name:
175 kfree(rfkill->reset_name);
176fail_alloc:
177 kfree(rfkill);
178
179 return ret;
180}
181
182static int rfkill_gpio_remove(struct platform_device *pdev)
183{
184 struct rfkill_gpio_data *rfkill = platform_get_drvdata(pdev);
185
186 rfkill_unregister(rfkill->rfkill_dev);
187 rfkill_destroy(rfkill->rfkill_dev);
188 if (gpio_is_valid(rfkill->pdata->shutdown_gpio))
189 gpio_free(rfkill->pdata->shutdown_gpio);
190 if (gpio_is_valid(rfkill->pdata->reset_gpio))
191 gpio_free(rfkill->pdata->reset_gpio);
192 if (rfkill->pwr_clk && PWR_CLK_ENABLED(rfkill))
193 clk_disable(rfkill->pwr_clk);
194 if (rfkill->pwr_clk)
195 clk_put(rfkill->pwr_clk);
196 kfree(rfkill->shutdown_name);
197 kfree(rfkill->reset_name);
198 kfree(rfkill);
199
200 return 0;
201}
202
203static struct platform_driver rfkill_gpio_driver = {
204 .probe = rfkill_gpio_probe,
205 .remove = __devexit_p(rfkill_gpio_remove),
206 .driver = {
207 .name = "rfkill_gpio",
208 .owner = THIS_MODULE,
209 },
210};
211
212static int __init rfkill_gpio_init(void)
213{
214 return platform_driver_register(&rfkill_gpio_driver);
215}
216
217static void __exit rfkill_gpio_exit(void)
218{
219 platform_driver_unregister(&rfkill_gpio_driver);
220}
221
222module_init(rfkill_gpio_init);
223module_exit(rfkill_gpio_exit);
224
225MODULE_DESCRIPTION("gpio rfkill");
226MODULE_AUTHOR("NVIDIA");
227MODULE_LICENSE("GPL");
diff --git a/net/rfkill/rfkill-regulator.c b/net/rfkill/rfkill-regulator.c
new file mode 100644
index 000000000000..18dc512a10f3
--- /dev/null
+++ b/net/rfkill/rfkill-regulator.c
@@ -0,0 +1,164 @@
1/*
2 * rfkill-regulator.c - Regulator consumer driver for rfkill
3 *
4 * Copyright (C) 2009 Guiming Zhuo <gmzhuo@gmail.com>
5 * Copyright (C) 2011 Antonio Ospite <ospite@studenti.unina.it>
6 *
7 * Implementation inspired by leds-regulator driver.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 */
14
15#include <linux/module.h>
16#include <linux/err.h>
17#include <linux/slab.h>
18#include <linux/platform_device.h>
19#include <linux/regulator/consumer.h>
20#include <linux/rfkill.h>
21#include <linux/rfkill-regulator.h>
22
23struct rfkill_regulator_data {
24 struct rfkill *rf_kill;
25 bool reg_enabled;
26
27 struct regulator *vcc;
28};
29
30static int rfkill_regulator_set_block(void *data, bool blocked)
31{
32 struct rfkill_regulator_data *rfkill_data = data;
33
34 pr_debug("%s: blocked: %d\n", __func__, blocked);
35
36 if (blocked) {
37 if (rfkill_data->reg_enabled) {
38 regulator_disable(rfkill_data->vcc);
39 rfkill_data->reg_enabled = 0;
40 }
41 } else {
42 if (!rfkill_data->reg_enabled) {
43 regulator_enable(rfkill_data->vcc);
44 rfkill_data->reg_enabled = 1;
45 }
46 }
47
48 pr_debug("%s: regulator_is_enabled after set_block: %d\n", __func__,
49 regulator_is_enabled(rfkill_data->vcc));
50
51 return 0;
52}
53
54struct rfkill_ops rfkill_regulator_ops = {
55 .set_block = rfkill_regulator_set_block,
56};
57
58static int __devinit rfkill_regulator_probe(struct platform_device *pdev)
59{
60 struct rfkill_regulator_platform_data *pdata = pdev->dev.platform_data;
61 struct rfkill_regulator_data *rfkill_data;
62 struct regulator *vcc;
63 struct rfkill *rf_kill;
64 int ret = 0;
65
66 if (pdata == NULL) {
67 dev_err(&pdev->dev, "no platform data\n");
68 return -ENODEV;
69 }
70
71 if (pdata->name == NULL || pdata->type == 0) {
72 dev_err(&pdev->dev, "invalid name or type in platform data\n");
73 return -EINVAL;
74 }
75
76 vcc = regulator_get_exclusive(&pdev->dev, "vrfkill");
77 if (IS_ERR(vcc)) {
78 dev_err(&pdev->dev, "Cannot get vcc for %s\n", pdata->name);
79 ret = PTR_ERR(vcc);
80 goto out;
81 }
82
83 rfkill_data = kzalloc(sizeof(*rfkill_data), GFP_KERNEL);
84 if (rfkill_data == NULL) {
85 ret = -ENOMEM;
86 goto err_data_alloc;
87 }
88
89 rf_kill = rfkill_alloc(pdata->name, &pdev->dev,
90 pdata->type,
91 &rfkill_regulator_ops, rfkill_data);
92 if (rf_kill == NULL) {
93 dev_err(&pdev->dev, "Cannot alloc rfkill device\n");
94 ret = -ENOMEM;
95 goto err_rfkill_alloc;
96 }
97
98 if (regulator_is_enabled(vcc)) {
99 dev_dbg(&pdev->dev, "Regulator already enabled\n");
100 rfkill_data->reg_enabled = 1;
101 }
102 rfkill_data->vcc = vcc;
103 rfkill_data->rf_kill = rf_kill;
104
105 ret = rfkill_register(rf_kill);
106 if (ret) {
107 dev_err(&pdev->dev, "Cannot register rfkill device\n");
108 goto err_rfkill_register;
109 }
110
111 platform_set_drvdata(pdev, rfkill_data);
112 dev_info(&pdev->dev, "%s initialized\n", pdata->name);
113
114 return 0;
115
116err_rfkill_register:
117 rfkill_destroy(rf_kill);
118err_rfkill_alloc:
119 kfree(rfkill_data);
120err_data_alloc:
121 regulator_put(vcc);
122out:
123 return ret;
124}
125
126static int __devexit rfkill_regulator_remove(struct platform_device *pdev)
127{
128 struct rfkill_regulator_data *rfkill_data = platform_get_drvdata(pdev);
129 struct rfkill *rf_kill = rfkill_data->rf_kill;
130
131 rfkill_unregister(rf_kill);
132 rfkill_destroy(rf_kill);
133 regulator_put(rfkill_data->vcc);
134 kfree(rfkill_data);
135
136 return 0;
137}
138
139static struct platform_driver rfkill_regulator_driver = {
140 .probe = rfkill_regulator_probe,
141 .remove = __devexit_p(rfkill_regulator_remove),
142 .driver = {
143 .name = "rfkill-regulator",
144 .owner = THIS_MODULE,
145 },
146};
147
148static int __init rfkill_regulator_init(void)
149{
150 return platform_driver_register(&rfkill_regulator_driver);
151}
152module_init(rfkill_regulator_init);
153
154static void __exit rfkill_regulator_exit(void)
155{
156 platform_driver_unregister(&rfkill_regulator_driver);
157}
158module_exit(rfkill_regulator_exit);
159
160MODULE_AUTHOR("Guiming Zhuo <gmzhuo@gmail.com>");
161MODULE_AUTHOR("Antonio Ospite <ospite@studenti.unina.it>");
162MODULE_DESCRIPTION("Regulator consumer driver for rfkill");
163MODULE_LICENSE("GPL");
164MODULE_ALIAS("platform:rfkill-regulator");
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index a80aef6e3d1f..f9ea925ad9cb 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -682,10 +682,8 @@ static int rose_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
682 if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS) 682 if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS)
683 return -EINVAL; 683 return -EINVAL;
684 684
685 if ((dev = rose_dev_get(&addr->srose_addr)) == NULL) { 685 if ((dev = rose_dev_get(&addr->srose_addr)) == NULL)
686 SOCK_DEBUG(sk, "ROSE: bind failed: invalid address\n");
687 return -EADDRNOTAVAIL; 686 return -EADDRNOTAVAIL;
688 }
689 687
690 source = &addr->srose_call; 688 source = &addr->srose_call;
691 689
@@ -716,7 +714,7 @@ static int rose_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
716 rose_insert_socket(sk); 714 rose_insert_socket(sk);
717 715
718 sock_reset_flag(sk, SOCK_ZAPPED); 716 sock_reset_flag(sk, SOCK_ZAPPED);
719 SOCK_DEBUG(sk, "ROSE: socket is bound\n"); 717
720 return 0; 718 return 0;
721} 719}
722 720
@@ -1109,10 +1107,7 @@ static int rose_sendmsg(struct kiocb *iocb, struct socket *sock,
1109 srose.srose_digis[n] = rose->dest_digis[n]; 1107 srose.srose_digis[n] = rose->dest_digis[n];
1110 } 1108 }
1111 1109
1112 SOCK_DEBUG(sk, "ROSE: sendto: Addresses built.\n");
1113
1114 /* Build a packet */ 1110 /* Build a packet */
1115 SOCK_DEBUG(sk, "ROSE: sendto: building packet.\n");
1116 /* Sanity check the packet size */ 1111 /* Sanity check the packet size */
1117 if (len > 65535) 1112 if (len > 65535)
1118 return -EMSGSIZE; 1113 return -EMSGSIZE;
@@ -1127,7 +1122,6 @@ static int rose_sendmsg(struct kiocb *iocb, struct socket *sock,
1127 /* 1122 /*
1128 * Put the data on the end 1123 * Put the data on the end
1129 */ 1124 */
1130 SOCK_DEBUG(sk, "ROSE: Appending user data\n");
1131 1125
1132 skb_reset_transport_header(skb); 1126 skb_reset_transport_header(skb);
1133 skb_put(skb, len); 1127 skb_put(skb, len);
@@ -1152,8 +1146,6 @@ static int rose_sendmsg(struct kiocb *iocb, struct socket *sock,
1152 */ 1146 */
1153 asmptr = skb_push(skb, ROSE_MIN_LEN); 1147 asmptr = skb_push(skb, ROSE_MIN_LEN);
1154 1148
1155 SOCK_DEBUG(sk, "ROSE: Building Network Header.\n");
1156
1157 /* Build a ROSE Network header */ 1149 /* Build a ROSE Network header */
1158 asmptr[0] = ((rose->lci >> 8) & 0x0F) | ROSE_GFI; 1150 asmptr[0] = ((rose->lci >> 8) & 0x0F) | ROSE_GFI;
1159 asmptr[1] = (rose->lci >> 0) & 0xFF; 1151 asmptr[1] = (rose->lci >> 0) & 0xFF;
@@ -1162,10 +1154,6 @@ static int rose_sendmsg(struct kiocb *iocb, struct socket *sock,
1162 if (qbit) 1154 if (qbit)
1163 asmptr[0] |= ROSE_Q_BIT; 1155 asmptr[0] |= ROSE_Q_BIT;
1164 1156
1165 SOCK_DEBUG(sk, "ROSE: Built header.\n");
1166
1167 SOCK_DEBUG(sk, "ROSE: Transmitting buffer\n");
1168
1169 if (sk->sk_state != TCP_ESTABLISHED) { 1157 if (sk->sk_state != TCP_ESTABLISHED) {
1170 kfree_skb(skb); 1158 kfree_skb(skb);
1171 return -ENOTCONN; 1159 return -ENOTCONN;
diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c
index b6ffe4e1b84a..f99cfce7ca97 100644
--- a/net/rxrpc/ar-ack.c
+++ b/net/rxrpc/ar-ack.c
@@ -375,7 +375,6 @@ protocol_error:
375 */ 375 */
376static void rxrpc_rotate_tx_window(struct rxrpc_call *call, u32 hard) 376static void rxrpc_rotate_tx_window(struct rxrpc_call *call, u32 hard)
377{ 377{
378 struct rxrpc_skb_priv *sp;
379 unsigned long _skb; 378 unsigned long _skb;
380 int tail = call->acks_tail, old_tail; 379 int tail = call->acks_tail, old_tail;
381 int win = CIRC_CNT(call->acks_head, tail, call->acks_winsz); 380 int win = CIRC_CNT(call->acks_head, tail, call->acks_winsz);
@@ -387,7 +386,6 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, u32 hard)
387 while (call->acks_hard < hard) { 386 while (call->acks_hard < hard) {
388 smp_read_barrier_depends(); 387 smp_read_barrier_depends();
389 _skb = call->acks_window[tail] & ~1; 388 _skb = call->acks_window[tail] & ~1;
390 sp = rxrpc_skb((struct sk_buff *) _skb);
391 rxrpc_free_skb((struct sk_buff *) _skb); 389 rxrpc_free_skb((struct sk_buff *) _skb);
392 old_tail = tail; 390 old_tail = tail;
393 tail = (tail + 1) & (call->acks_winsz - 1); 391 tail = (tail + 1) & (call->acks_winsz - 1);
diff --git a/net/rxrpc/ar-connevent.c b/net/rxrpc/ar-connevent.c
index 0505cdc4d6d4..e7ed43a54c41 100644
--- a/net/rxrpc/ar-connevent.c
+++ b/net/rxrpc/ar-connevent.c
@@ -259,7 +259,6 @@ void rxrpc_process_connection(struct work_struct *work)
259{ 259{
260 struct rxrpc_connection *conn = 260 struct rxrpc_connection *conn =
261 container_of(work, struct rxrpc_connection, processor); 261 container_of(work, struct rxrpc_connection, processor);
262 struct rxrpc_skb_priv *sp;
263 struct sk_buff *skb; 262 struct sk_buff *skb;
264 u32 abort_code = RX_PROTOCOL_ERROR; 263 u32 abort_code = RX_PROTOCOL_ERROR;
265 int ret; 264 int ret;
@@ -276,8 +275,6 @@ void rxrpc_process_connection(struct work_struct *work)
276 /* go through the conn-level event packets, releasing the ref on this 275 /* go through the conn-level event packets, releasing the ref on this
277 * connection that each one has when we've finished with it */ 276 * connection that each one has when we've finished with it */
278 while ((skb = skb_dequeue(&conn->rx_queue))) { 277 while ((skb = skb_dequeue(&conn->rx_queue))) {
279 sp = rxrpc_skb(skb);
280
281 ret = rxrpc_process_event(conn, skb, &abort_code); 278 ret = rxrpc_process_event(conn, skb, &abort_code);
282 switch (ret) { 279 switch (ret) {
283 case -EPROTO: 280 case -EPROTO:
diff --git a/net/rxrpc/ar-error.c b/net/rxrpc/ar-error.c
index d4d1ae26d293..5d6b572a6704 100644
--- a/net/rxrpc/ar-error.c
+++ b/net/rxrpc/ar-error.c
@@ -139,7 +139,7 @@ void rxrpc_UDP_error_handler(struct work_struct *work)
139 struct rxrpc_transport *trans = 139 struct rxrpc_transport *trans =
140 container_of(work, struct rxrpc_transport, error_handler); 140 container_of(work, struct rxrpc_transport, error_handler);
141 struct sk_buff *skb; 141 struct sk_buff *skb;
142 int local, err; 142 int err;
143 143
144 _enter(""); 144 _enter("");
145 145
@@ -157,7 +157,6 @@ void rxrpc_UDP_error_handler(struct work_struct *work)
157 157
158 switch (ee->ee_origin) { 158 switch (ee->ee_origin) {
159 case SO_EE_ORIGIN_ICMP: 159 case SO_EE_ORIGIN_ICMP:
160 local = 0;
161 switch (ee->ee_type) { 160 switch (ee->ee_type) {
162 case ICMP_DEST_UNREACH: 161 case ICMP_DEST_UNREACH:
163 switch (ee->ee_code) { 162 switch (ee->ee_code) {
@@ -207,7 +206,6 @@ void rxrpc_UDP_error_handler(struct work_struct *work)
207 case SO_EE_ORIGIN_LOCAL: 206 case SO_EE_ORIGIN_LOCAL:
208 _proto("Rx Received local error { error=%d }", 207 _proto("Rx Received local error { error=%d }",
209 ee->ee_errno); 208 ee->ee_errno);
210 local = 1;
211 break; 209 break;
212 210
213 case SO_EE_ORIGIN_NONE: 211 case SO_EE_ORIGIN_NONE:
@@ -215,7 +213,6 @@ void rxrpc_UDP_error_handler(struct work_struct *work)
215 default: 213 default:
216 _proto("Rx Received error report { orig=%u }", 214 _proto("Rx Received error report { orig=%u }",
217 ee->ee_origin); 215 ee->ee_origin);
218 local = 0;
219 break; 216 break;
220 } 217 }
221 218
diff --git a/net/rxrpc/ar-peer.c b/net/rxrpc/ar-peer.c
index 55b93dc60d0c..2754f098d436 100644
--- a/net/rxrpc/ar-peer.c
+++ b/net/rxrpc/ar-peer.c
@@ -36,10 +36,11 @@ static void rxrpc_destroy_peer(struct work_struct *work);
36static void rxrpc_assess_MTU_size(struct rxrpc_peer *peer) 36static void rxrpc_assess_MTU_size(struct rxrpc_peer *peer)
37{ 37{
38 struct rtable *rt; 38 struct rtable *rt;
39 struct flowi4 fl4;
39 40
40 peer->if_mtu = 1500; 41 peer->if_mtu = 1500;
41 42
42 rt = ip_route_output_ports(&init_net, NULL, 43 rt = ip_route_output_ports(&init_net, &fl4, NULL,
43 peer->srx.transport.sin.sin_addr.s_addr, 0, 44 peer->srx.transport.sin.sin_addr.s_addr, 0,
44 htons(7000), htons(7001), 45 htons(7000), htons(7001),
45 IPPROTO_UDP, 0, 0); 46 IPPROTO_UDP, 0, 0);
@@ -156,6 +157,7 @@ struct rxrpc_peer *rxrpc_get_peer(struct sockaddr_rxrpc *srx, gfp_t gfp)
156 /* we can now add the new candidate to the list */ 157 /* we can now add the new candidate to the list */
157 peer = candidate; 158 peer = candidate;
158 candidate = NULL; 159 candidate = NULL;
160 usage = atomic_read(&peer->usage);
159 161
160 list_add_tail(&peer->link, &rxrpc_peers); 162 list_add_tail(&peer->link, &rxrpc_peers);
161 write_unlock_bh(&rxrpc_peer_lock); 163 write_unlock_bh(&rxrpc_peer_lock);
@@ -170,7 +172,7 @@ success:
170 &peer->srx.transport.sin.sin_addr, 172 &peer->srx.transport.sin.sin_addr,
171 ntohs(peer->srx.transport.sin.sin_port)); 173 ntohs(peer->srx.transport.sin.sin_port));
172 174
173 _leave(" = %p {u=%d}", peer, atomic_read(&peer->usage)); 175 _leave(" = %p {u=%d}", peer, usage);
174 return peer; 176 return peer;
175 177
176 /* we found the peer in the list immediately */ 178 /* we found the peer in the list immediately */
diff --git a/net/rxrpc/ar-transport.c b/net/rxrpc/ar-transport.c
index 5e0226fe587e..92df566930b9 100644
--- a/net/rxrpc/ar-transport.c
+++ b/net/rxrpc/ar-transport.c
@@ -111,6 +111,7 @@ struct rxrpc_transport *rxrpc_get_transport(struct rxrpc_local *local,
111 /* we can now add the new candidate to the list */ 111 /* we can now add the new candidate to the list */
112 trans = candidate; 112 trans = candidate;
113 candidate = NULL; 113 candidate = NULL;
114 usage = atomic_read(&trans->usage);
114 115
115 rxrpc_get_local(trans->local); 116 rxrpc_get_local(trans->local);
116 atomic_inc(&trans->peer->usage); 117 atomic_inc(&trans->peer->usage);
@@ -125,7 +126,7 @@ success:
125 trans->local->debug_id, 126 trans->local->debug_id,
126 trans->peer->debug_id); 127 trans->peer->debug_id);
127 128
128 _leave(" = %p {u=%d}", trans, atomic_read(&trans->usage)); 129 _leave(" = %p {u=%d}", trans, usage);
129 return trans; 130 return trans;
130 131
131 /* we found the transport in the list immediately */ 132 /* we found the transport in the list immediately */
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index a7a5583d4f68..2590e91b3289 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -239,6 +239,17 @@ config NET_SCH_CHOKE
239 To compile this code as a module, choose M here: the 239 To compile this code as a module, choose M here: the
240 module will be called sch_choke. 240 module will be called sch_choke.
241 241
242config NET_SCH_QFQ
243 tristate "Quick Fair Queueing scheduler (QFQ)"
244 help
245 Say Y here if you want to use the Quick Fair Queueing Scheduler (QFQ)
246 packet scheduling algorithm.
247
248 To compile this driver as a module, choose M here: the module
249 will be called sch_qfq.
250
251 If unsure, say N.
252
242config NET_SCH_INGRESS 253config NET_SCH_INGRESS
243 tristate "Ingress Qdisc" 254 tristate "Ingress Qdisc"
244 depends on NET_CLS_ACT 255 depends on NET_CLS_ACT
@@ -277,6 +288,7 @@ config NET_CLS_TCINDEX
277 288
278config NET_CLS_ROUTE4 289config NET_CLS_ROUTE4
279 tristate "Routing decision (ROUTE)" 290 tristate "Routing decision (ROUTE)"
291 depends on INET
280 select IP_ROUTE_CLASSID 292 select IP_ROUTE_CLASSID
281 select NET_CLS 293 select NET_CLS
282 ---help--- 294 ---help---
diff --git a/net/sched/Makefile b/net/sched/Makefile
index 2e77b8dba22e..dc5889c0a15a 100644
--- a/net/sched/Makefile
+++ b/net/sched/Makefile
@@ -35,6 +35,7 @@ obj-$(CONFIG_NET_SCH_NETEM) += sch_netem.o
35obj-$(CONFIG_NET_SCH_DRR) += sch_drr.o 35obj-$(CONFIG_NET_SCH_DRR) += sch_drr.o
36obj-$(CONFIG_NET_SCH_MQPRIO) += sch_mqprio.o 36obj-$(CONFIG_NET_SCH_MQPRIO) += sch_mqprio.o
37obj-$(CONFIG_NET_SCH_CHOKE) += sch_choke.o 37obj-$(CONFIG_NET_SCH_CHOKE) += sch_choke.o
38obj-$(CONFIG_NET_SCH_QFQ) += sch_qfq.o
38 39
39obj-$(CONFIG_NET_CLS_U32) += cls_u32.o 40obj-$(CONFIG_NET_CLS_U32) += cls_u32.o
40obj-$(CONFIG_NET_CLS_ROUTE4) += cls_route.o 41obj-$(CONFIG_NET_CLS_ROUTE4) += cls_route.o
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 14b42f4ad791..a606025814a1 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -26,11 +26,6 @@
26#include <net/act_api.h> 26#include <net/act_api.h>
27#include <net/netlink.h> 27#include <net/netlink.h>
28 28
29static void tcf_common_free_rcu(struct rcu_head *head)
30{
31 kfree(container_of(head, struct tcf_common, tcfc_rcu));
32}
33
34void tcf_hash_destroy(struct tcf_common *p, struct tcf_hashinfo *hinfo) 29void tcf_hash_destroy(struct tcf_common *p, struct tcf_hashinfo *hinfo)
35{ 30{
36 unsigned int h = tcf_hash(p->tcfc_index, hinfo->hmask); 31 unsigned int h = tcf_hash(p->tcfc_index, hinfo->hmask);
@@ -47,7 +42,7 @@ void tcf_hash_destroy(struct tcf_common *p, struct tcf_hashinfo *hinfo)
47 * gen_estimator est_timer() might access p->tcfc_lock 42 * gen_estimator est_timer() might access p->tcfc_lock
48 * or bstats, wait a RCU grace period before freeing p 43 * or bstats, wait a RCU grace period before freeing p
49 */ 44 */
50 call_rcu(&p->tcfc_rcu, tcf_common_free_rcu); 45 kfree_rcu(p, tcfc_rcu);
51 return; 46 return;
52 } 47 }
53 } 48 }
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 8a1630774fd6..b3b9b32f4e00 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -96,11 +96,6 @@ nla_put_failure:
96 goto done; 96 goto done;
97} 97}
98 98
99static void tcf_police_free_rcu(struct rcu_head *head)
100{
101 kfree(container_of(head, struct tcf_police, tcf_rcu));
102}
103
104static void tcf_police_destroy(struct tcf_police *p) 99static void tcf_police_destroy(struct tcf_police *p)
105{ 100{
106 unsigned int h = tcf_hash(p->tcf_index, POL_TAB_MASK); 101 unsigned int h = tcf_hash(p->tcf_index, POL_TAB_MASK);
@@ -121,7 +116,7 @@ static void tcf_police_destroy(struct tcf_police *p)
121 * gen_estimator est_timer() might access p->tcf_lock 116 * gen_estimator est_timer() might access p->tcf_lock
122 * or bstats, wait a RCU grace period before freeing p 117 * or bstats, wait a RCU grace period before freeing p
123 */ 118 */
124 call_rcu(&p->tcf_rcu, tcf_police_free_rcu); 119 kfree_rcu(p, tcf_rcu);
125 return; 120 return;
126 } 121 }
127 } 122 }
@@ -401,7 +396,6 @@ static void __exit
401police_cleanup_module(void) 396police_cleanup_module(void)
402{ 397{
403 tcf_unregister_action(&act_police_ops); 398 tcf_unregister_action(&act_police_ops);
404 rcu_barrier(); /* Wait for completion of call_rcu()'s (tcf_police_free_rcu) */
405} 399}
406 400
407module_init(police_init_module); 401module_init(police_init_module);
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 7490f3f2db8b..6b8627661c98 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -1673,10 +1673,8 @@ int tc_classify(struct sk_buff *skb, struct tcf_proto *tp,
1673{ 1673{
1674 int err = 0; 1674 int err = 0;
1675#ifdef CONFIG_NET_CLS_ACT 1675#ifdef CONFIG_NET_CLS_ACT
1676 __be16 protocol;
1677 struct tcf_proto *otp = tp; 1676 struct tcf_proto *otp = tp;
1678reclassify: 1677reclassify:
1679 protocol = skb->protocol;
1680#endif 1678#endif
1681 1679
1682 err = tc_classify_compat(skb, tp, res); 1680 err = tc_classify_compat(skb, tp, res);
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index c84b65920d1b..b4c680900d7a 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -251,9 +251,8 @@ static void dev_watchdog(unsigned long arg)
251 } 251 }
252 252
253 if (some_queue_timedout) { 253 if (some_queue_timedout) {
254 char drivername[64];
255 WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit queue %u timed out\n", 254 WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit queue %u timed out\n",
256 dev->name, netdev_drivername(dev, drivername, 64), i); 255 dev->name, netdev_drivername(dev), i);
257 dev->netdev_ops->ndo_tx_timeout(dev); 256 dev->netdev_ops->ndo_tx_timeout(dev);
258 } 257 }
259 if (!mod_timer(&dev->watchdog_timer, 258 if (!mod_timer(&dev->watchdog_timer,
@@ -815,9 +814,17 @@ static bool some_qdisc_is_busy(struct net_device *dev)
815 return false; 814 return false;
816} 815}
817 816
817/**
818 * dev_deactivate_many - deactivate transmissions on several devices
819 * @head: list of devices to deactivate
820 *
821 * This function returns only when all outstanding transmissions
822 * have completed, unless all devices are in dismantle phase.
823 */
818void dev_deactivate_many(struct list_head *head) 824void dev_deactivate_many(struct list_head *head)
819{ 825{
820 struct net_device *dev; 826 struct net_device *dev;
827 bool sync_needed = false;
821 828
822 list_for_each_entry(dev, head, unreg_list) { 829 list_for_each_entry(dev, head, unreg_list) {
823 netdev_for_each_tx_queue(dev, dev_deactivate_queue, 830 netdev_for_each_tx_queue(dev, dev_deactivate_queue,
@@ -827,10 +834,15 @@ void dev_deactivate_many(struct list_head *head)
827 &noop_qdisc); 834 &noop_qdisc);
828 835
829 dev_watchdog_down(dev); 836 dev_watchdog_down(dev);
837 sync_needed |= !dev->dismantle;
830 } 838 }
831 839
832 /* Wait for outstanding qdisc-less dev_queue_xmit calls. */ 840 /* Wait for outstanding qdisc-less dev_queue_xmit calls.
833 synchronize_rcu(); 841 * This is avoided if all devices are in dismantle phase :
842 * Caller will call synchronize_net() for us
843 */
844 if (sync_needed)
845 synchronize_net();
834 846
835 /* Wait for outstanding qdisc_run calls. */ 847 /* Wait for outstanding qdisc_run calls. */
836 list_for_each_entry(dev, head, unreg_list) 848 list_for_each_entry(dev, head, unreg_list)
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
new file mode 100644
index 000000000000..103343408593
--- /dev/null
+++ b/net/sched/sch_qfq.c
@@ -0,0 +1,1137 @@
1/*
2 * net/sched/sch_qfq.c Quick Fair Queueing Scheduler.
3 *
4 * Copyright (c) 2009 Fabio Checconi, Luigi Rizzo, and Paolo Valente.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 */
10
11#include <linux/module.h>
12#include <linux/init.h>
13#include <linux/bitops.h>
14#include <linux/errno.h>
15#include <linux/netdevice.h>
16#include <linux/pkt_sched.h>
17#include <net/sch_generic.h>
18#include <net/pkt_sched.h>
19#include <net/pkt_cls.h>
20
21
22/* Quick Fair Queueing
23 ===================
24
25 Sources:
26
27 Fabio Checconi, Luigi Rizzo, and Paolo Valente: "QFQ: Efficient
28 Packet Scheduling with Tight Bandwidth Distribution Guarantees."
29
30 See also:
31 http://retis.sssup.it/~fabio/linux/qfq/
32 */
33
34/*
35
36 Virtual time computations.
37
38 S, F and V are all computed in fixed point arithmetic with
39 FRAC_BITS decimal bits.
40
41 QFQ_MAX_INDEX is the maximum index allowed for a group. We need
42 one bit per index.
43 QFQ_MAX_WSHIFT is the maximum power of two supported as a weight.
44
45 The layout of the bits is as below:
46
47 [ MTU_SHIFT ][ FRAC_BITS ]
48 [ MAX_INDEX ][ MIN_SLOT_SHIFT ]
49 ^.__grp->index = 0
50 *.__grp->slot_shift
51
52 where MIN_SLOT_SHIFT is derived by difference from the others.
53
54 The max group index corresponds to Lmax/w_min, where
55 Lmax=1<<MTU_SHIFT, w_min = 1 .
56 From this, and knowing how many groups (MAX_INDEX) we want,
57 we can derive the shift corresponding to each group.
58
59 Because we often need to compute
60 F = S + len/w_i and V = V + len/wsum
61 instead of storing w_i store the value
62 inv_w = (1<<FRAC_BITS)/w_i
63 so we can do F = S + len * inv_w * wsum.
64 We use W_TOT in the formulas so we can easily move between
65 static and adaptive weight sum.
66
67 The per-scheduler-instance data contain all the data structures
68 for the scheduler: bitmaps and bucket lists.
69
70 */
71
72/*
73 * Maximum number of consecutive slots occupied by backlogged classes
74 * inside a group.
75 */
76#define QFQ_MAX_SLOTS 32
77
78/*
79 * Shifts used for class<->group mapping. We allow class weights that are
80 * in the range [1, 2^MAX_WSHIFT], and we try to map each class i to the
81 * group with the smallest index that can support the L_i / r_i configured
82 * for the class.
83 *
84 * grp->index is the index of the group; and grp->slot_shift
85 * is the shift for the corresponding (scaled) sigma_i.
86 */
87#define QFQ_MAX_INDEX 19
88#define QFQ_MAX_WSHIFT 16
89
90#define QFQ_MAX_WEIGHT (1<<QFQ_MAX_WSHIFT)
91#define QFQ_MAX_WSUM (2*QFQ_MAX_WEIGHT)
92
93#define FRAC_BITS 30 /* fixed point arithmetic */
94#define ONE_FP (1UL << FRAC_BITS)
95#define IWSUM (ONE_FP/QFQ_MAX_WSUM)
96
97#define QFQ_MTU_SHIFT 11
98#define QFQ_MIN_SLOT_SHIFT (FRAC_BITS + QFQ_MTU_SHIFT - QFQ_MAX_INDEX)
99
100/*
101 * Possible group states. These values are used as indexes for the bitmaps
102 * array of struct qfq_queue.
103 */
104enum qfq_state { ER, IR, EB, IB, QFQ_MAX_STATE };
105
106struct qfq_group;
107
108struct qfq_class {
109 struct Qdisc_class_common common;
110
111 unsigned int refcnt;
112 unsigned int filter_cnt;
113
114 struct gnet_stats_basic_packed bstats;
115 struct gnet_stats_queue qstats;
116 struct gnet_stats_rate_est rate_est;
117 struct Qdisc *qdisc;
118
119 struct hlist_node next; /* Link for the slot list. */
120 u64 S, F; /* flow timestamps (exact) */
121
122 /* group we belong to. In principle we would need the index,
123 * which is log_2(lmax/weight), but we never reference it
124 * directly, only the group.
125 */
126 struct qfq_group *grp;
127
128 /* these are copied from the flowset. */
129 u32 inv_w; /* ONE_FP/weight */
130 u32 lmax; /* Max packet size for this flow. */
131};
132
133struct qfq_group {
134 u64 S, F; /* group timestamps (approx). */
135 unsigned int slot_shift; /* Slot shift. */
136 unsigned int index; /* Group index. */
137 unsigned int front; /* Index of the front slot. */
138 unsigned long full_slots; /* non-empty slots */
139
140 /* Array of RR lists of active classes. */
141 struct hlist_head slots[QFQ_MAX_SLOTS];
142};
143
144struct qfq_sched {
145 struct tcf_proto *filter_list;
146 struct Qdisc_class_hash clhash;
147
148 u64 V; /* Precise virtual time. */
149 u32 wsum; /* weight sum */
150
151 unsigned long bitmaps[QFQ_MAX_STATE]; /* Group bitmaps. */
152 struct qfq_group groups[QFQ_MAX_INDEX + 1]; /* The groups. */
153};
154
155static struct qfq_class *qfq_find_class(struct Qdisc *sch, u32 classid)
156{
157 struct qfq_sched *q = qdisc_priv(sch);
158 struct Qdisc_class_common *clc;
159
160 clc = qdisc_class_find(&q->clhash, classid);
161 if (clc == NULL)
162 return NULL;
163 return container_of(clc, struct qfq_class, common);
164}
165
166static void qfq_purge_queue(struct qfq_class *cl)
167{
168 unsigned int len = cl->qdisc->q.qlen;
169
170 qdisc_reset(cl->qdisc);
171 qdisc_tree_decrease_qlen(cl->qdisc, len);
172}
173
174static const struct nla_policy qfq_policy[TCA_QFQ_MAX + 1] = {
175 [TCA_QFQ_WEIGHT] = { .type = NLA_U32 },
176 [TCA_QFQ_LMAX] = { .type = NLA_U32 },
177};
178
179/*
180 * Calculate a flow index, given its weight and maximum packet length.
181 * index = log_2(maxlen/weight) but we need to apply the scaling.
182 * This is used only once at flow creation.
183 */
184static int qfq_calc_index(u32 inv_w, unsigned int maxlen)
185{
186 u64 slot_size = (u64)maxlen * inv_w;
187 unsigned long size_map;
188 int index = 0;
189
190 size_map = slot_size >> QFQ_MIN_SLOT_SHIFT;
191 if (!size_map)
192 goto out;
193
194 index = __fls(size_map) + 1; /* basically a log_2 */
195 index -= !(slot_size - (1ULL << (index + QFQ_MIN_SLOT_SHIFT - 1)));
196
197 if (index < 0)
198 index = 0;
199out:
200 pr_debug("qfq calc_index: W = %lu, L = %u, I = %d\n",
201 (unsigned long) ONE_FP/inv_w, maxlen, index);
202
203 return index;
204}
205
206static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
207 struct nlattr **tca, unsigned long *arg)
208{
209 struct qfq_sched *q = qdisc_priv(sch);
210 struct qfq_class *cl = (struct qfq_class *)*arg;
211 struct nlattr *tb[TCA_QFQ_MAX + 1];
212 u32 weight, lmax, inv_w;
213 int i, err;
214
215 if (tca[TCA_OPTIONS] == NULL) {
216 pr_notice("qfq: no options\n");
217 return -EINVAL;
218 }
219
220 err = nla_parse_nested(tb, TCA_QFQ_MAX, tca[TCA_OPTIONS], qfq_policy);
221 if (err < 0)
222 return err;
223
224 if (tb[TCA_QFQ_WEIGHT]) {
225 weight = nla_get_u32(tb[TCA_QFQ_WEIGHT]);
226 if (!weight || weight > (1UL << QFQ_MAX_WSHIFT)) {
227 pr_notice("qfq: invalid weight %u\n", weight);
228 return -EINVAL;
229 }
230 } else
231 weight = 1;
232
233 inv_w = ONE_FP / weight;
234 weight = ONE_FP / inv_w;
235 if (q->wsum + weight > QFQ_MAX_WSUM) {
236 pr_notice("qfq: total weight out of range (%u + %u)\n",
237 weight, q->wsum);
238 return -EINVAL;
239 }
240
241 if (tb[TCA_QFQ_LMAX]) {
242 lmax = nla_get_u32(tb[TCA_QFQ_LMAX]);
243 if (!lmax || lmax > (1UL << QFQ_MTU_SHIFT)) {
244 pr_notice("qfq: invalid max length %u\n", lmax);
245 return -EINVAL;
246 }
247 } else
248 lmax = 1UL << QFQ_MTU_SHIFT;
249
250 if (cl != NULL) {
251 if (tca[TCA_RATE]) {
252 err = gen_replace_estimator(&cl->bstats, &cl->rate_est,
253 qdisc_root_sleeping_lock(sch),
254 tca[TCA_RATE]);
255 if (err)
256 return err;
257 }
258
259 sch_tree_lock(sch);
260 if (tb[TCA_QFQ_WEIGHT]) {
261 q->wsum = weight - ONE_FP / cl->inv_w;
262 cl->inv_w = inv_w;
263 }
264 sch_tree_unlock(sch);
265
266 return 0;
267 }
268
269 cl = kzalloc(sizeof(struct qfq_class), GFP_KERNEL);
270 if (cl == NULL)
271 return -ENOBUFS;
272
273 cl->refcnt = 1;
274 cl->common.classid = classid;
275 cl->lmax = lmax;
276 cl->inv_w = inv_w;
277 i = qfq_calc_index(cl->inv_w, cl->lmax);
278
279 cl->grp = &q->groups[i];
280 q->wsum += weight;
281
282 cl->qdisc = qdisc_create_dflt(sch->dev_queue,
283 &pfifo_qdisc_ops, classid);
284 if (cl->qdisc == NULL)
285 cl->qdisc = &noop_qdisc;
286
287 if (tca[TCA_RATE]) {
288 err = gen_new_estimator(&cl->bstats, &cl->rate_est,
289 qdisc_root_sleeping_lock(sch),
290 tca[TCA_RATE]);
291 if (err) {
292 qdisc_destroy(cl->qdisc);
293 kfree(cl);
294 return err;
295 }
296 }
297
298 sch_tree_lock(sch);
299 qdisc_class_hash_insert(&q->clhash, &cl->common);
300 sch_tree_unlock(sch);
301
302 qdisc_class_hash_grow(sch, &q->clhash);
303
304 *arg = (unsigned long)cl;
305 return 0;
306}
307
308static void qfq_destroy_class(struct Qdisc *sch, struct qfq_class *cl)
309{
310 struct qfq_sched *q = qdisc_priv(sch);
311
312 if (cl->inv_w) {
313 q->wsum -= ONE_FP / cl->inv_w;
314 cl->inv_w = 0;
315 }
316
317 gen_kill_estimator(&cl->bstats, &cl->rate_est);
318 qdisc_destroy(cl->qdisc);
319 kfree(cl);
320}
321
322static int qfq_delete_class(struct Qdisc *sch, unsigned long arg)
323{
324 struct qfq_sched *q = qdisc_priv(sch);
325 struct qfq_class *cl = (struct qfq_class *)arg;
326
327 if (cl->filter_cnt > 0)
328 return -EBUSY;
329
330 sch_tree_lock(sch);
331
332 qfq_purge_queue(cl);
333 qdisc_class_hash_remove(&q->clhash, &cl->common);
334
335 BUG_ON(--cl->refcnt == 0);
336 /*
337 * This shouldn't happen: we "hold" one cops->get() when called
338 * from tc_ctl_tclass; the destroy method is done from cops->put().
339 */
340
341 sch_tree_unlock(sch);
342 return 0;
343}
344
345static unsigned long qfq_get_class(struct Qdisc *sch, u32 classid)
346{
347 struct qfq_class *cl = qfq_find_class(sch, classid);
348
349 if (cl != NULL)
350 cl->refcnt++;
351
352 return (unsigned long)cl;
353}
354
355static void qfq_put_class(struct Qdisc *sch, unsigned long arg)
356{
357 struct qfq_class *cl = (struct qfq_class *)arg;
358
359 if (--cl->refcnt == 0)
360 qfq_destroy_class(sch, cl);
361}
362
363static struct tcf_proto **qfq_tcf_chain(struct Qdisc *sch, unsigned long cl)
364{
365 struct qfq_sched *q = qdisc_priv(sch);
366
367 if (cl)
368 return NULL;
369
370 return &q->filter_list;
371}
372
373static unsigned long qfq_bind_tcf(struct Qdisc *sch, unsigned long parent,
374 u32 classid)
375{
376 struct qfq_class *cl = qfq_find_class(sch, classid);
377
378 if (cl != NULL)
379 cl->filter_cnt++;
380
381 return (unsigned long)cl;
382}
383
384static void qfq_unbind_tcf(struct Qdisc *sch, unsigned long arg)
385{
386 struct qfq_class *cl = (struct qfq_class *)arg;
387
388 cl->filter_cnt--;
389}
390
391static int qfq_graft_class(struct Qdisc *sch, unsigned long arg,
392 struct Qdisc *new, struct Qdisc **old)
393{
394 struct qfq_class *cl = (struct qfq_class *)arg;
395
396 if (new == NULL) {
397 new = qdisc_create_dflt(sch->dev_queue,
398 &pfifo_qdisc_ops, cl->common.classid);
399 if (new == NULL)
400 new = &noop_qdisc;
401 }
402
403 sch_tree_lock(sch);
404 qfq_purge_queue(cl);
405 *old = cl->qdisc;
406 cl->qdisc = new;
407 sch_tree_unlock(sch);
408 return 0;
409}
410
411static struct Qdisc *qfq_class_leaf(struct Qdisc *sch, unsigned long arg)
412{
413 struct qfq_class *cl = (struct qfq_class *)arg;
414
415 return cl->qdisc;
416}
417
418static int qfq_dump_class(struct Qdisc *sch, unsigned long arg,
419 struct sk_buff *skb, struct tcmsg *tcm)
420{
421 struct qfq_class *cl = (struct qfq_class *)arg;
422 struct nlattr *nest;
423
424 tcm->tcm_parent = TC_H_ROOT;
425 tcm->tcm_handle = cl->common.classid;
426 tcm->tcm_info = cl->qdisc->handle;
427
428 nest = nla_nest_start(skb, TCA_OPTIONS);
429 if (nest == NULL)
430 goto nla_put_failure;
431 NLA_PUT_U32(skb, TCA_QFQ_WEIGHT, ONE_FP/cl->inv_w);
432 NLA_PUT_U32(skb, TCA_QFQ_LMAX, cl->lmax);
433 return nla_nest_end(skb, nest);
434
435nla_put_failure:
436 nla_nest_cancel(skb, nest);
437 return -EMSGSIZE;
438}
439
440static int qfq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
441 struct gnet_dump *d)
442{
443 struct qfq_class *cl = (struct qfq_class *)arg;
444 struct tc_qfq_stats xstats;
445
446 memset(&xstats, 0, sizeof(xstats));
447 cl->qdisc->qstats.qlen = cl->qdisc->q.qlen;
448
449 xstats.weight = ONE_FP/cl->inv_w;
450 xstats.lmax = cl->lmax;
451
452 if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
453 gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 ||
454 gnet_stats_copy_queue(d, &cl->qdisc->qstats) < 0)
455 return -1;
456
457 return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
458}
459
460static void qfq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
461{
462 struct qfq_sched *q = qdisc_priv(sch);
463 struct qfq_class *cl;
464 struct hlist_node *n;
465 unsigned int i;
466
467 if (arg->stop)
468 return;
469
470 for (i = 0; i < q->clhash.hashsize; i++) {
471 hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) {
472 if (arg->count < arg->skip) {
473 arg->count++;
474 continue;
475 }
476 if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
477 arg->stop = 1;
478 return;
479 }
480 arg->count++;
481 }
482 }
483}
484
485static struct qfq_class *qfq_classify(struct sk_buff *skb, struct Qdisc *sch,
486 int *qerr)
487{
488 struct qfq_sched *q = qdisc_priv(sch);
489 struct qfq_class *cl;
490 struct tcf_result res;
491 int result;
492
493 if (TC_H_MAJ(skb->priority ^ sch->handle) == 0) {
494 pr_debug("qfq_classify: found %d\n", skb->priority);
495 cl = qfq_find_class(sch, skb->priority);
496 if (cl != NULL)
497 return cl;
498 }
499
500 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
501 result = tc_classify(skb, q->filter_list, &res);
502 if (result >= 0) {
503#ifdef CONFIG_NET_CLS_ACT
504 switch (result) {
505 case TC_ACT_QUEUED:
506 case TC_ACT_STOLEN:
507 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
508 case TC_ACT_SHOT:
509 return NULL;
510 }
511#endif
512 cl = (struct qfq_class *)res.class;
513 if (cl == NULL)
514 cl = qfq_find_class(sch, res.classid);
515 return cl;
516 }
517
518 return NULL;
519}
520
521/* Generic comparison function, handling wraparound. */
522static inline int qfq_gt(u64 a, u64 b)
523{
524 return (s64)(a - b) > 0;
525}
526
527/* Round a precise timestamp to its slotted value. */
528static inline u64 qfq_round_down(u64 ts, unsigned int shift)
529{
530 return ts & ~((1ULL << shift) - 1);
531}
532
533/* return the pointer to the group with lowest index in the bitmap */
534static inline struct qfq_group *qfq_ffs(struct qfq_sched *q,
535 unsigned long bitmap)
536{
537 int index = __ffs(bitmap);
538 return &q->groups[index];
539}
540/* Calculate a mask to mimic what would be ffs_from(). */
541static inline unsigned long mask_from(unsigned long bitmap, int from)
542{
543 return bitmap & ~((1UL << from) - 1);
544}
545
546/*
547 * The state computation relies on ER=0, IR=1, EB=2, IB=3
548 * First compute eligibility comparing grp->S, q->V,
549 * then check if someone is blocking us and possibly add EB
550 */
551static int qfq_calc_state(struct qfq_sched *q, const struct qfq_group *grp)
552{
553 /* if S > V we are not eligible */
554 unsigned int state = qfq_gt(grp->S, q->V);
555 unsigned long mask = mask_from(q->bitmaps[ER], grp->index);
556 struct qfq_group *next;
557
558 if (mask) {
559 next = qfq_ffs(q, mask);
560 if (qfq_gt(grp->F, next->F))
561 state |= EB;
562 }
563
564 return state;
565}
566
567
568/*
569 * In principle
570 * q->bitmaps[dst] |= q->bitmaps[src] & mask;
571 * q->bitmaps[src] &= ~mask;
572 * but we should make sure that src != dst
573 */
574static inline void qfq_move_groups(struct qfq_sched *q, unsigned long mask,
575 int src, int dst)
576{
577 q->bitmaps[dst] |= q->bitmaps[src] & mask;
578 q->bitmaps[src] &= ~mask;
579}
580
581static void qfq_unblock_groups(struct qfq_sched *q, int index, u64 old_F)
582{
583 unsigned long mask = mask_from(q->bitmaps[ER], index + 1);
584 struct qfq_group *next;
585
586 if (mask) {
587 next = qfq_ffs(q, mask);
588 if (!qfq_gt(next->F, old_F))
589 return;
590 }
591
592 mask = (1UL << index) - 1;
593 qfq_move_groups(q, mask, EB, ER);
594 qfq_move_groups(q, mask, IB, IR);
595}
596
597/*
598 * perhaps
599 *
600 old_V ^= q->V;
601 old_V >>= QFQ_MIN_SLOT_SHIFT;
602 if (old_V) {
603 ...
604 }
605 *
606 */
607static void qfq_make_eligible(struct qfq_sched *q, u64 old_V)
608{
609 unsigned long vslot = q->V >> QFQ_MIN_SLOT_SHIFT;
610 unsigned long old_vslot = old_V >> QFQ_MIN_SLOT_SHIFT;
611
612 if (vslot != old_vslot) {
613 unsigned long mask = (1UL << fls(vslot ^ old_vslot)) - 1;
614 qfq_move_groups(q, mask, IR, ER);
615 qfq_move_groups(q, mask, IB, EB);
616 }
617}
618
619
620/*
621 * XXX we should make sure that slot becomes less than 32.
622 * This is guaranteed by the input values.
623 * roundedS is always cl->S rounded on grp->slot_shift bits.
624 */
625static void qfq_slot_insert(struct qfq_group *grp, struct qfq_class *cl,
626 u64 roundedS)
627{
628 u64 slot = (roundedS - grp->S) >> grp->slot_shift;
629 unsigned int i = (grp->front + slot) % QFQ_MAX_SLOTS;
630
631 hlist_add_head(&cl->next, &grp->slots[i]);
632 __set_bit(slot, &grp->full_slots);
633}
634
635/* Maybe introduce hlist_first_entry?? */
636static struct qfq_class *qfq_slot_head(struct qfq_group *grp)
637{
638 return hlist_entry(grp->slots[grp->front].first,
639 struct qfq_class, next);
640}
641
642/*
643 * remove the entry from the slot
644 */
645static void qfq_front_slot_remove(struct qfq_group *grp)
646{
647 struct qfq_class *cl = qfq_slot_head(grp);
648
649 BUG_ON(!cl);
650 hlist_del(&cl->next);
651 if (hlist_empty(&grp->slots[grp->front]))
652 __clear_bit(0, &grp->full_slots);
653}
654
655/*
656 * Returns the first full queue in a group. As a side effect,
657 * adjust the bucket list so the first non-empty bucket is at
658 * position 0 in full_slots.
659 */
660static struct qfq_class *qfq_slot_scan(struct qfq_group *grp)
661{
662 unsigned int i;
663
664 pr_debug("qfq slot_scan: grp %u full %#lx\n",
665 grp->index, grp->full_slots);
666
667 if (grp->full_slots == 0)
668 return NULL;
669
670 i = __ffs(grp->full_slots); /* zero based */
671 if (i > 0) {
672 grp->front = (grp->front + i) % QFQ_MAX_SLOTS;
673 grp->full_slots >>= i;
674 }
675
676 return qfq_slot_head(grp);
677}
678
679/*
680 * adjust the bucket list. When the start time of a group decreases,
681 * we move the index down (modulo QFQ_MAX_SLOTS) so we don't need to
682 * move the objects. The mask of occupied slots must be shifted
683 * because we use ffs() to find the first non-empty slot.
684 * This covers decreases in the group's start time, but what about
685 * increases of the start time ?
686 * Here too we should make sure that i is less than 32
687 */
688static void qfq_slot_rotate(struct qfq_group *grp, u64 roundedS)
689{
690 unsigned int i = (grp->S - roundedS) >> grp->slot_shift;
691
692 grp->full_slots <<= i;
693 grp->front = (grp->front - i) % QFQ_MAX_SLOTS;
694}
695
696static void qfq_update_eligible(struct qfq_sched *q, u64 old_V)
697{
698 struct qfq_group *grp;
699 unsigned long ineligible;
700
701 ineligible = q->bitmaps[IR] | q->bitmaps[IB];
702 if (ineligible) {
703 if (!q->bitmaps[ER]) {
704 grp = qfq_ffs(q, ineligible);
705 if (qfq_gt(grp->S, q->V))
706 q->V = grp->S;
707 }
708 qfq_make_eligible(q, old_V);
709 }
710}
711
712/* What is length of next packet in queue (0 if queue is empty) */
713static unsigned int qdisc_peek_len(struct Qdisc *sch)
714{
715 struct sk_buff *skb;
716
717 skb = sch->ops->peek(sch);
718 return skb ? qdisc_pkt_len(skb) : 0;
719}
720
721/*
722 * Updates the class, returns true if also the group needs to be updated.
723 */
724static bool qfq_update_class(struct qfq_group *grp, struct qfq_class *cl)
725{
726 unsigned int len = qdisc_peek_len(cl->qdisc);
727
728 cl->S = cl->F;
729 if (!len)
730 qfq_front_slot_remove(grp); /* queue is empty */
731 else {
732 u64 roundedS;
733
734 cl->F = cl->S + (u64)len * cl->inv_w;
735 roundedS = qfq_round_down(cl->S, grp->slot_shift);
736 if (roundedS == grp->S)
737 return false;
738
739 qfq_front_slot_remove(grp);
740 qfq_slot_insert(grp, cl, roundedS);
741 }
742
743 return true;
744}
745
746static struct sk_buff *qfq_dequeue(struct Qdisc *sch)
747{
748 struct qfq_sched *q = qdisc_priv(sch);
749 struct qfq_group *grp;
750 struct qfq_class *cl;
751 struct sk_buff *skb;
752 unsigned int len;
753 u64 old_V;
754
755 if (!q->bitmaps[ER])
756 return NULL;
757
758 grp = qfq_ffs(q, q->bitmaps[ER]);
759
760 cl = qfq_slot_head(grp);
761 skb = qdisc_dequeue_peeked(cl->qdisc);
762 if (!skb) {
763 WARN_ONCE(1, "qfq_dequeue: non-workconserving leaf\n");
764 return NULL;
765 }
766
767 sch->q.qlen--;
768 qdisc_bstats_update(sch, skb);
769
770 old_V = q->V;
771 len = qdisc_pkt_len(skb);
772 q->V += (u64)len * IWSUM;
773 pr_debug("qfq dequeue: len %u F %lld now %lld\n",
774 len, (unsigned long long) cl->F, (unsigned long long) q->V);
775
776 if (qfq_update_class(grp, cl)) {
777 u64 old_F = grp->F;
778
779 cl = qfq_slot_scan(grp);
780 if (!cl)
781 __clear_bit(grp->index, &q->bitmaps[ER]);
782 else {
783 u64 roundedS = qfq_round_down(cl->S, grp->slot_shift);
784 unsigned int s;
785
786 if (grp->S == roundedS)
787 goto skip_unblock;
788 grp->S = roundedS;
789 grp->F = roundedS + (2ULL << grp->slot_shift);
790 __clear_bit(grp->index, &q->bitmaps[ER]);
791 s = qfq_calc_state(q, grp);
792 __set_bit(grp->index, &q->bitmaps[s]);
793 }
794
795 qfq_unblock_groups(q, grp->index, old_F);
796 }
797
798skip_unblock:
799 qfq_update_eligible(q, old_V);
800
801 return skb;
802}
803
804/*
805 * Assign a reasonable start time for a new flow k in group i.
806 * Admissible values for \hat(F) are multiples of \sigma_i
807 * no greater than V+\sigma_i . Larger values mean that
808 * we had a wraparound so we consider the timestamp to be stale.
809 *
810 * If F is not stale and F >= V then we set S = F.
811 * Otherwise we should assign S = V, but this may violate
812 * the ordering in ER. So, if we have groups in ER, set S to
813 * the F_j of the first group j which would be blocking us.
814 * We are guaranteed not to move S backward because
815 * otherwise our group i would still be blocked.
816 */
817static void qfq_update_start(struct qfq_sched *q, struct qfq_class *cl)
818{
819 unsigned long mask;
820 uint32_t limit, roundedF;
821 int slot_shift = cl->grp->slot_shift;
822
823 roundedF = qfq_round_down(cl->F, slot_shift);
824 limit = qfq_round_down(q->V, slot_shift) + (1UL << slot_shift);
825
826 if (!qfq_gt(cl->F, q->V) || qfq_gt(roundedF, limit)) {
827 /* timestamp was stale */
828 mask = mask_from(q->bitmaps[ER], cl->grp->index);
829 if (mask) {
830 struct qfq_group *next = qfq_ffs(q, mask);
831 if (qfq_gt(roundedF, next->F)) {
832 cl->S = next->F;
833 return;
834 }
835 }
836 cl->S = q->V;
837 } else /* timestamp is not stale */
838 cl->S = cl->F;
839}
840
841static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
842{
843 struct qfq_sched *q = qdisc_priv(sch);
844 struct qfq_group *grp;
845 struct qfq_class *cl;
846 int err;
847 u64 roundedS;
848 int s;
849
850 cl = qfq_classify(skb, sch, &err);
851 if (cl == NULL) {
852 if (err & __NET_XMIT_BYPASS)
853 sch->qstats.drops++;
854 kfree_skb(skb);
855 return err;
856 }
857 pr_debug("qfq_enqueue: cl = %x\n", cl->common.classid);
858
859 err = qdisc_enqueue(skb, cl->qdisc);
860 if (unlikely(err != NET_XMIT_SUCCESS)) {
861 pr_debug("qfq_enqueue: enqueue failed %d\n", err);
862 if (net_xmit_drop_count(err)) {
863 cl->qstats.drops++;
864 sch->qstats.drops++;
865 }
866 return err;
867 }
868
869 bstats_update(&cl->bstats, skb);
870 ++sch->q.qlen;
871
872 /* If the new skb is not the head of queue, then done here. */
873 if (cl->qdisc->q.qlen != 1)
874 return err;
875
876 /* If reach this point, queue q was idle */
877 grp = cl->grp;
878 qfq_update_start(q, cl);
879
880 /* compute new finish time and rounded start. */
881 cl->F = cl->S + (u64)qdisc_pkt_len(skb) * cl->inv_w;
882 roundedS = qfq_round_down(cl->S, grp->slot_shift);
883
884 /*
885 * insert cl in the correct bucket.
886 * If cl->S >= grp->S we don't need to adjust the
887 * bucket list and simply go to the insertion phase.
888 * Otherwise grp->S is decreasing, we must make room
889 * in the bucket list, and also recompute the group state.
890 * Finally, if there were no flows in this group and nobody
891 * was in ER make sure to adjust V.
892 */
893 if (grp->full_slots) {
894 if (!qfq_gt(grp->S, cl->S))
895 goto skip_update;
896
897 /* create a slot for this cl->S */
898 qfq_slot_rotate(grp, roundedS);
899 /* group was surely ineligible, remove */
900 __clear_bit(grp->index, &q->bitmaps[IR]);
901 __clear_bit(grp->index, &q->bitmaps[IB]);
902 } else if (!q->bitmaps[ER] && qfq_gt(roundedS, q->V))
903 q->V = roundedS;
904
905 grp->S = roundedS;
906 grp->F = roundedS + (2ULL << grp->slot_shift);
907 s = qfq_calc_state(q, grp);
908 __set_bit(grp->index, &q->bitmaps[s]);
909
910 pr_debug("qfq enqueue: new state %d %#lx S %lld F %lld V %lld\n",
911 s, q->bitmaps[s],
912 (unsigned long long) cl->S,
913 (unsigned long long) cl->F,
914 (unsigned long long) q->V);
915
916skip_update:
917 qfq_slot_insert(grp, cl, roundedS);
918
919 return err;
920}
921
922
923static void qfq_slot_remove(struct qfq_sched *q, struct qfq_group *grp,
924 struct qfq_class *cl)
925{
926 unsigned int i, offset;
927 u64 roundedS;
928
929 roundedS = qfq_round_down(cl->S, grp->slot_shift);
930 offset = (roundedS - grp->S) >> grp->slot_shift;
931 i = (grp->front + offset) % QFQ_MAX_SLOTS;
932
933 hlist_del(&cl->next);
934 if (hlist_empty(&grp->slots[i]))
935 __clear_bit(offset, &grp->full_slots);
936}
937
938/*
939 * called to forcibly destroy a queue.
940 * If the queue is not in the front bucket, or if it has
941 * other queues in the front bucket, we can simply remove
942 * the queue with no other side effects.
943 * Otherwise we must propagate the event up.
944 */
945static void qfq_deactivate_class(struct qfq_sched *q, struct qfq_class *cl)
946{
947 struct qfq_group *grp = cl->grp;
948 unsigned long mask;
949 u64 roundedS;
950 int s;
951
952 cl->F = cl->S;
953 qfq_slot_remove(q, grp, cl);
954
955 if (!grp->full_slots) {
956 __clear_bit(grp->index, &q->bitmaps[IR]);
957 __clear_bit(grp->index, &q->bitmaps[EB]);
958 __clear_bit(grp->index, &q->bitmaps[IB]);
959
960 if (test_bit(grp->index, &q->bitmaps[ER]) &&
961 !(q->bitmaps[ER] & ~((1UL << grp->index) - 1))) {
962 mask = q->bitmaps[ER] & ((1UL << grp->index) - 1);
963 if (mask)
964 mask = ~((1UL << __fls(mask)) - 1);
965 else
966 mask = ~0UL;
967 qfq_move_groups(q, mask, EB, ER);
968 qfq_move_groups(q, mask, IB, IR);
969 }
970 __clear_bit(grp->index, &q->bitmaps[ER]);
971 } else if (hlist_empty(&grp->slots[grp->front])) {
972 cl = qfq_slot_scan(grp);
973 roundedS = qfq_round_down(cl->S, grp->slot_shift);
974 if (grp->S != roundedS) {
975 __clear_bit(grp->index, &q->bitmaps[ER]);
976 __clear_bit(grp->index, &q->bitmaps[IR]);
977 __clear_bit(grp->index, &q->bitmaps[EB]);
978 __clear_bit(grp->index, &q->bitmaps[IB]);
979 grp->S = roundedS;
980 grp->F = roundedS + (2ULL << grp->slot_shift);
981 s = qfq_calc_state(q, grp);
982 __set_bit(grp->index, &q->bitmaps[s]);
983 }
984 }
985
986 qfq_update_eligible(q, q->V);
987}
988
989static void qfq_qlen_notify(struct Qdisc *sch, unsigned long arg)
990{
991 struct qfq_sched *q = qdisc_priv(sch);
992 struct qfq_class *cl = (struct qfq_class *)arg;
993
994 if (cl->qdisc->q.qlen == 0)
995 qfq_deactivate_class(q, cl);
996}
997
998static unsigned int qfq_drop(struct Qdisc *sch)
999{
1000 struct qfq_sched *q = qdisc_priv(sch);
1001 struct qfq_group *grp;
1002 unsigned int i, j, len;
1003
1004 for (i = 0; i <= QFQ_MAX_INDEX; i++) {
1005 grp = &q->groups[i];
1006 for (j = 0; j < QFQ_MAX_SLOTS; j++) {
1007 struct qfq_class *cl;
1008 struct hlist_node *n;
1009
1010 hlist_for_each_entry(cl, n, &grp->slots[j], next) {
1011
1012 if (!cl->qdisc->ops->drop)
1013 continue;
1014
1015 len = cl->qdisc->ops->drop(cl->qdisc);
1016 if (len > 0) {
1017 sch->q.qlen--;
1018 if (!cl->qdisc->q.qlen)
1019 qfq_deactivate_class(q, cl);
1020
1021 return len;
1022 }
1023 }
1024 }
1025 }
1026
1027 return 0;
1028}
1029
1030static int qfq_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
1031{
1032 struct qfq_sched *q = qdisc_priv(sch);
1033 struct qfq_group *grp;
1034 int i, j, err;
1035
1036 err = qdisc_class_hash_init(&q->clhash);
1037 if (err < 0)
1038 return err;
1039
1040 for (i = 0; i <= QFQ_MAX_INDEX; i++) {
1041 grp = &q->groups[i];
1042 grp->index = i;
1043 grp->slot_shift = QFQ_MTU_SHIFT + FRAC_BITS
1044 - (QFQ_MAX_INDEX - i);
1045 for (j = 0; j < QFQ_MAX_SLOTS; j++)
1046 INIT_HLIST_HEAD(&grp->slots[j]);
1047 }
1048
1049 return 0;
1050}
1051
1052static void qfq_reset_qdisc(struct Qdisc *sch)
1053{
1054 struct qfq_sched *q = qdisc_priv(sch);
1055 struct qfq_group *grp;
1056 struct qfq_class *cl;
1057 struct hlist_node *n, *tmp;
1058 unsigned int i, j;
1059
1060 for (i = 0; i <= QFQ_MAX_INDEX; i++) {
1061 grp = &q->groups[i];
1062 for (j = 0; j < QFQ_MAX_SLOTS; j++) {
1063 hlist_for_each_entry_safe(cl, n, tmp,
1064 &grp->slots[j], next) {
1065 qfq_deactivate_class(q, cl);
1066 }
1067 }
1068 }
1069
1070 for (i = 0; i < q->clhash.hashsize; i++) {
1071 hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode)
1072 qdisc_reset(cl->qdisc);
1073 }
1074 sch->q.qlen = 0;
1075}
1076
1077static void qfq_destroy_qdisc(struct Qdisc *sch)
1078{
1079 struct qfq_sched *q = qdisc_priv(sch);
1080 struct qfq_class *cl;
1081 struct hlist_node *n, *next;
1082 unsigned int i;
1083
1084 tcf_destroy_chain(&q->filter_list);
1085
1086 for (i = 0; i < q->clhash.hashsize; i++) {
1087 hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[i],
1088 common.hnode) {
1089 qfq_destroy_class(sch, cl);
1090 }
1091 }
1092 qdisc_class_hash_destroy(&q->clhash);
1093}
1094
1095static const struct Qdisc_class_ops qfq_class_ops = {
1096 .change = qfq_change_class,
1097 .delete = qfq_delete_class,
1098 .get = qfq_get_class,
1099 .put = qfq_put_class,
1100 .tcf_chain = qfq_tcf_chain,
1101 .bind_tcf = qfq_bind_tcf,
1102 .unbind_tcf = qfq_unbind_tcf,
1103 .graft = qfq_graft_class,
1104 .leaf = qfq_class_leaf,
1105 .qlen_notify = qfq_qlen_notify,
1106 .dump = qfq_dump_class,
1107 .dump_stats = qfq_dump_class_stats,
1108 .walk = qfq_walk,
1109};
1110
1111static struct Qdisc_ops qfq_qdisc_ops __read_mostly = {
1112 .cl_ops = &qfq_class_ops,
1113 .id = "qfq",
1114 .priv_size = sizeof(struct qfq_sched),
1115 .enqueue = qfq_enqueue,
1116 .dequeue = qfq_dequeue,
1117 .peek = qdisc_peek_dequeued,
1118 .drop = qfq_drop,
1119 .init = qfq_init_qdisc,
1120 .reset = qfq_reset_qdisc,
1121 .destroy = qfq_destroy_qdisc,
1122 .owner = THIS_MODULE,
1123};
1124
1125static int __init qfq_init(void)
1126{
1127 return register_qdisc(&qfq_qdisc_ops);
1128}
1129
1130static void __exit qfq_exit(void)
1131{
1132 unregister_qdisc(&qfq_qdisc_ops);
1133}
1134
1135module_init(qfq_init);
1136module_exit(qfq_exit);
1137MODULE_LICENSE("GPL");
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index c2e628dfaacc..b6ea6afa55b0 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -169,7 +169,7 @@ static unsigned int sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
169 } 169 }
170 case htons(ETH_P_IPV6): 170 case htons(ETH_P_IPV6):
171 { 171 {
172 struct ipv6hdr *iph; 172 const struct ipv6hdr *iph;
173 int poff; 173 int poff;
174 174
175 if (!pskb_network_may_pull(skb, sizeof(*iph))) 175 if (!pskb_network_may_pull(skb, sizeof(*iph)))
@@ -361,7 +361,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
361{ 361{
362 struct sfq_sched_data *q = qdisc_priv(sch); 362 struct sfq_sched_data *q = qdisc_priv(sch);
363 unsigned int hash; 363 unsigned int hash;
364 sfq_index x; 364 sfq_index x, qlen;
365 struct sfq_slot *slot; 365 struct sfq_slot *slot;
366 int uninitialized_var(ret); 366 int uninitialized_var(ret);
367 367
@@ -405,20 +405,12 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
405 if (++sch->q.qlen <= q->limit) 405 if (++sch->q.qlen <= q->limit)
406 return NET_XMIT_SUCCESS; 406 return NET_XMIT_SUCCESS;
407 407
408 qlen = slot->qlen;
408 sfq_drop(sch); 409 sfq_drop(sch);
409 return NET_XMIT_CN; 410 /* Return Congestion Notification only if we dropped a packet
410} 411 * from this flow.
411 412 */
412static struct sk_buff * 413 return (qlen != slot->qlen) ? NET_XMIT_CN : NET_XMIT_SUCCESS;
413sfq_peek(struct Qdisc *sch)
414{
415 struct sfq_sched_data *q = qdisc_priv(sch);
416
417 /* No active slots */
418 if (q->tail == NULL)
419 return NULL;
420
421 return q->slots[q->tail->next].skblist_next;
422} 414}
423 415
424static struct sk_buff * 416static struct sk_buff *
@@ -702,7 +694,7 @@ static struct Qdisc_ops sfq_qdisc_ops __read_mostly = {
702 .priv_size = sizeof(struct sfq_sched_data), 694 .priv_size = sizeof(struct sfq_sched_data),
703 .enqueue = sfq_enqueue, 695 .enqueue = sfq_enqueue,
704 .dequeue = sfq_dequeue, 696 .dequeue = sfq_dequeue,
705 .peek = sfq_peek, 697 .peek = qdisc_peek_dequeued,
706 .drop = sfq_drop, 698 .drop = sfq_drop,
707 .init = sfq_init, 699 .init = sfq_init,
708 .reset = sfq_reset, 700 .reset = sfq_reset,
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 1a21c571aa03..4a62888f2e43 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -64,6 +64,7 @@
64/* Forward declarations for internal functions. */ 64/* Forward declarations for internal functions. */
65static void sctp_assoc_bh_rcv(struct work_struct *work); 65static void sctp_assoc_bh_rcv(struct work_struct *work);
66static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc); 66static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc);
67static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc);
67 68
68/* Keep track of the new idr low so that we don't re-use association id 69/* Keep track of the new idr low so that we don't re-use association id
69 * numbers too fast. It is protected by they idr spin lock is in the 70 * numbers too fast. It is protected by they idr spin lock is in the
@@ -443,12 +444,7 @@ void sctp_association_free(struct sctp_association *asoc)
443 444
444 asoc->peer.transport_count = 0; 445 asoc->peer.transport_count = 0;
445 446
446 /* Free any cached ASCONF_ACK chunk. */ 447 sctp_asconf_queue_teardown(asoc);
447 sctp_assoc_free_asconf_acks(asoc);
448
449 /* Free any cached ASCONF chunk. */
450 if (asoc->addip_last_asconf)
451 sctp_chunk_free(asoc->addip_last_asconf);
452 448
453 /* AUTH - Free the endpoint shared keys */ 449 /* AUTH - Free the endpoint shared keys */
454 sctp_auth_destroy_keys(&asoc->endpoint_shared_keys); 450 sctp_auth_destroy_keys(&asoc->endpoint_shared_keys);
@@ -1578,6 +1574,18 @@ retry:
1578 return error; 1574 return error;
1579} 1575}
1580 1576
1577/* Free the ASCONF queue */
1578static void sctp_assoc_free_asconf_queue(struct sctp_association *asoc)
1579{
1580 struct sctp_chunk *asconf;
1581 struct sctp_chunk *tmp;
1582
1583 list_for_each_entry_safe(asconf, tmp, &asoc->addip_chunk_list, list) {
1584 list_del_init(&asconf->list);
1585 sctp_chunk_free(asconf);
1586 }
1587}
1588
1581/* Free asconf_ack cache */ 1589/* Free asconf_ack cache */
1582static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc) 1590static void sctp_assoc_free_asconf_acks(struct sctp_association *asoc)
1583{ 1591{
@@ -1630,3 +1638,16 @@ struct sctp_chunk *sctp_assoc_lookup_asconf_ack(
1630 1638
1631 return NULL; 1639 return NULL;
1632} 1640}
1641
1642void sctp_asconf_queue_teardown(struct sctp_association *asoc)
1643{
1644 /* Free any cached ASCONF_ACK chunk. */
1645 sctp_assoc_free_asconf_acks(asoc);
1646
1647 /* Free the ASCONF queue. */
1648 sctp_assoc_free_asconf_queue(asoc);
1649
1650 /* Free any cached ASCONF chunk. */
1651 if (asoc->addip_last_asconf)
1652 sctp_chunk_free(asoc->addip_last_asconf);
1653}
diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c
index faf71d179e46..83e3011c19ca 100644
--- a/net/sctp/bind_addr.c
+++ b/net/sctp/bind_addr.c
@@ -140,14 +140,12 @@ void sctp_bind_addr_init(struct sctp_bind_addr *bp, __u16 port)
140/* Dispose of the address list. */ 140/* Dispose of the address list. */
141static void sctp_bind_addr_clean(struct sctp_bind_addr *bp) 141static void sctp_bind_addr_clean(struct sctp_bind_addr *bp)
142{ 142{
143 struct sctp_sockaddr_entry *addr; 143 struct sctp_sockaddr_entry *addr, *temp;
144 struct list_head *pos, *temp;
145 144
146 /* Empty the bind address list. */ 145 /* Empty the bind address list. */
147 list_for_each_safe(pos, temp, &bp->address_list) { 146 list_for_each_entry_safe(addr, temp, &bp->address_list, list) {
148 addr = list_entry(pos, struct sctp_sockaddr_entry, list); 147 list_del_rcu(&addr->list);
149 list_del(pos); 148 kfree_rcu(addr, rcu);
150 kfree(addr);
151 SCTP_DBG_OBJCNT_DEC(addr); 149 SCTP_DBG_OBJCNT_DEC(addr);
152 } 150 }
153} 151}
@@ -219,7 +217,7 @@ int sctp_del_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *del_addr)
219 } 217 }
220 218
221 if (found) { 219 if (found) {
222 call_rcu(&addr->rcu, sctp_local_addr_free); 220 kfree_rcu(addr, rcu);
223 SCTP_DBG_OBJCNT_DEC(addr); 221 SCTP_DBG_OBJCNT_DEC(addr);
224 return 0; 222 return 0;
225 } 223 }
diff --git a/net/sctp/debug.c b/net/sctp/debug.c
index bf24fa697de2..ec997cfe0a7e 100644
--- a/net/sctp/debug.c
+++ b/net/sctp/debug.c
@@ -98,7 +98,6 @@ const char *sctp_cname(const sctp_subtype_t cid)
98 98
99/* These are printable forms of the states. */ 99/* These are printable forms of the states. */
100const char *const sctp_state_tbl[SCTP_STATE_NUM_STATES] = { 100const char *const sctp_state_tbl[SCTP_STATE_NUM_STATES] = {
101 "STATE_EMPTY",
102 "STATE_CLOSED", 101 "STATE_CLOSED",
103 "STATE_COOKIE_WAIT", 102 "STATE_COOKIE_WAIT",
104 "STATE_COOKIE_ECHOED", 103 "STATE_COOKIE_ECHOED",
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index e10acc01c75f..c8cc24e282c3 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -325,6 +325,7 @@ static struct sctp_association *__sctp_endpoint_lookup_assoc(
325 struct sctp_transport **transport) 325 struct sctp_transport **transport)
326{ 326{
327 struct sctp_association *asoc = NULL; 327 struct sctp_association *asoc = NULL;
328 struct sctp_association *tmp;
328 struct sctp_transport *t = NULL; 329 struct sctp_transport *t = NULL;
329 struct sctp_hashbucket *head; 330 struct sctp_hashbucket *head;
330 struct sctp_ep_common *epb; 331 struct sctp_ep_common *epb;
@@ -333,25 +334,32 @@ static struct sctp_association *__sctp_endpoint_lookup_assoc(
333 int rport; 334 int rport;
334 335
335 *transport = NULL; 336 *transport = NULL;
337
338 /* If the local port is not set, there can't be any associations
339 * on this endpoint.
340 */
341 if (!ep->base.bind_addr.port)
342 goto out;
343
336 rport = ntohs(paddr->v4.sin_port); 344 rport = ntohs(paddr->v4.sin_port);
337 345
338 hash = sctp_assoc_hashfn(ep->base.bind_addr.port, rport); 346 hash = sctp_assoc_hashfn(ep->base.bind_addr.port, rport);
339 head = &sctp_assoc_hashtable[hash]; 347 head = &sctp_assoc_hashtable[hash];
340 read_lock(&head->lock); 348 read_lock(&head->lock);
341 sctp_for_each_hentry(epb, node, &head->chain) { 349 sctp_for_each_hentry(epb, node, &head->chain) {
342 asoc = sctp_assoc(epb); 350 tmp = sctp_assoc(epb);
343 if (asoc->ep != ep || rport != asoc->peer.port) 351 if (tmp->ep != ep || rport != tmp->peer.port)
344 goto next; 352 continue;
345 353
346 t = sctp_assoc_lookup_paddr(asoc, paddr); 354 t = sctp_assoc_lookup_paddr(tmp, paddr);
347 if (t) { 355 if (t) {
356 asoc = tmp;
348 *transport = t; 357 *transport = t;
349 break; 358 break;
350 } 359 }
351next:
352 asoc = NULL;
353 } 360 }
354 read_unlock(&head->lock); 361 read_unlock(&head->lock);
362out:
355 return asoc; 363 return asoc;
356} 364}
357 365
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 5436c6921167..741ed1648838 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -565,7 +565,7 @@ void sctp_err_finish(struct sock *sk, struct sctp_association *asoc)
565 */ 565 */
566void sctp_v4_err(struct sk_buff *skb, __u32 info) 566void sctp_v4_err(struct sk_buff *skb, __u32 info)
567{ 567{
568 struct iphdr *iph = (struct iphdr *)skb->data; 568 const struct iphdr *iph = (const struct iphdr *)skb->data;
569 const int ihlen = iph->ihl * 4; 569 const int ihlen = iph->ihl * 4;
570 const int type = icmp_hdr(skb)->type; 570 const int type = icmp_hdr(skb)->type;
571 const int code = icmp_hdr(skb)->code; 571 const int code = icmp_hdr(skb)->code;
@@ -661,7 +661,6 @@ static int sctp_rcv_ootb(struct sk_buff *skb)
661{ 661{
662 sctp_chunkhdr_t *ch; 662 sctp_chunkhdr_t *ch;
663 __u8 *ch_end; 663 __u8 *ch_end;
664 sctp_errhdr_t *err;
665 664
666 ch = (sctp_chunkhdr_t *) skb->data; 665 ch = (sctp_chunkhdr_t *) skb->data;
667 666
@@ -697,20 +696,6 @@ static int sctp_rcv_ootb(struct sk_buff *skb)
697 if (SCTP_CID_INIT == ch->type && (void *)ch != skb->data) 696 if (SCTP_CID_INIT == ch->type && (void *)ch != skb->data)
698 goto discard; 697 goto discard;
699 698
700 /* RFC 8.4, 7) If the packet contains a "Stale cookie" ERROR
701 * or a COOKIE ACK the SCTP Packet should be silently
702 * discarded.
703 */
704 if (SCTP_CID_COOKIE_ACK == ch->type)
705 goto discard;
706
707 if (SCTP_CID_ERROR == ch->type) {
708 sctp_walk_errors(err, ch) {
709 if (SCTP_ERROR_STALE_COOKIE == err->cause)
710 goto discard;
711 }
712 }
713
714 ch = (sctp_chunkhdr_t *) ch_end; 699 ch = (sctp_chunkhdr_t *) ch_end;
715 } while (ch_end < skb_tail_pointer(skb)); 700 } while (ch_end < skb_tail_pointer(skb));
716 701
@@ -1017,7 +1002,7 @@ static struct sctp_association *__sctp_rcv_asconf_lookup(
1017 /* Skip over the ADDIP header and find the Address parameter */ 1002 /* Skip over the ADDIP header and find the Address parameter */
1018 param = (union sctp_addr_param *)(asconf + 1); 1003 param = (union sctp_addr_param *)(asconf + 1);
1019 1004
1020 af = sctp_get_af_specific(param_type2af(param->v4.param_hdr.type)); 1005 af = sctp_get_af_specific(param_type2af(param->p.type));
1021 if (unlikely(!af)) 1006 if (unlikely(!af))
1022 return NULL; 1007 return NULL;
1023 1008
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 865ce7ba4e14..0bb0d7cb9f10 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -80,6 +80,13 @@
80 80
81#include <asm/uaccess.h> 81#include <asm/uaccess.h>
82 82
83static inline int sctp_v6_addr_match_len(union sctp_addr *s1,
84 union sctp_addr *s2);
85static void sctp_v6_to_addr(union sctp_addr *addr, struct in6_addr *saddr,
86 __be16 port);
87static int sctp_v6_cmp_addr(const union sctp_addr *addr1,
88 const union sctp_addr *addr2);
89
83/* Event handler for inet6 address addition/deletion events. 90/* Event handler for inet6 address addition/deletion events.
84 * The sctp_local_addr_list needs to be protocted by a spin lock since 91 * The sctp_local_addr_list needs to be protocted by a spin lock since
85 * multiple notifiers (say IPv4 and IPv6) may be running at the same 92 * multiple notifiers (say IPv4 and IPv6) may be running at the same
@@ -123,7 +130,7 @@ static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev,
123 } 130 }
124 spin_unlock_bh(&sctp_local_addr_lock); 131 spin_unlock_bh(&sctp_local_addr_lock);
125 if (found) 132 if (found)
126 call_rcu(&addr->rcu, sctp_local_addr_free); 133 kfree_rcu(addr, rcu);
127 break; 134 break;
128 } 135 }
129 136
@@ -240,37 +247,107 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport)
240/* Returns the dst cache entry for the given source and destination ip 247/* Returns the dst cache entry for the given source and destination ip
241 * addresses. 248 * addresses.
242 */ 249 */
243static struct dst_entry *sctp_v6_get_dst(struct sctp_association *asoc, 250static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
244 union sctp_addr *daddr, 251 struct flowi *fl, struct sock *sk)
245 union sctp_addr *saddr)
246{ 252{
247 struct dst_entry *dst; 253 struct sctp_association *asoc = t->asoc;
248 struct flowi6 fl6; 254 struct dst_entry *dst = NULL;
255 struct flowi6 *fl6 = &fl->u.ip6;
256 struct sctp_bind_addr *bp;
257 struct sctp_sockaddr_entry *laddr;
258 union sctp_addr *baddr = NULL;
259 union sctp_addr *daddr = &t->ipaddr;
260 union sctp_addr dst_saddr;
261 __u8 matchlen = 0;
262 __u8 bmatchlen;
263 sctp_scope_t scope;
249 264
250 memset(&fl6, 0, sizeof(fl6)); 265 memset(fl6, 0, sizeof(struct flowi6));
251 ipv6_addr_copy(&fl6.daddr, &daddr->v6.sin6_addr); 266 ipv6_addr_copy(&fl6->daddr, &daddr->v6.sin6_addr);
267 fl6->fl6_dport = daddr->v6.sin6_port;
268 fl6->flowi6_proto = IPPROTO_SCTP;
252 if (ipv6_addr_type(&daddr->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL) 269 if (ipv6_addr_type(&daddr->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL)
253 fl6.flowi6_oif = daddr->v6.sin6_scope_id; 270 fl6->flowi6_oif = daddr->v6.sin6_scope_id;
254 271
272 SCTP_DEBUG_PRINTK("%s: DST=%pI6 ", __func__, &fl6->daddr);
255 273
256 SCTP_DEBUG_PRINTK("%s: DST=%pI6 ", __func__, &fl6.daddr); 274 if (asoc)
275 fl6->fl6_sport = htons(asoc->base.bind_addr.port);
257 276
258 if (saddr) { 277 if (saddr) {
259 ipv6_addr_copy(&fl6.saddr, &saddr->v6.sin6_addr); 278 ipv6_addr_copy(&fl6->saddr, &saddr->v6.sin6_addr);
260 SCTP_DEBUG_PRINTK("SRC=%pI6 - ", &fl6.saddr); 279 fl6->fl6_sport = saddr->v6.sin6_port;
280 SCTP_DEBUG_PRINTK("SRC=%pI6 - ", &fl6->saddr);
281 }
282
283 dst = ip6_dst_lookup_flow(sk, fl6, NULL, false);
284 if (!asoc || saddr)
285 goto out;
286
287 bp = &asoc->base.bind_addr;
288 scope = sctp_scope(daddr);
289 /* ip6_dst_lookup has filled in the fl6->saddr for us. Check
290 * to see if we can use it.
291 */
292 if (!IS_ERR(dst)) {
293 /* Walk through the bind address list and look for a bind
294 * address that matches the source address of the returned dst.
295 */
296 sctp_v6_to_addr(&dst_saddr, &fl6->saddr, htons(bp->port));
297 rcu_read_lock();
298 list_for_each_entry_rcu(laddr, &bp->address_list, list) {
299 if (!laddr->valid || (laddr->state != SCTP_ADDR_SRC))
300 continue;
301
302 /* Do not compare against v4 addrs */
303 if ((laddr->a.sa.sa_family == AF_INET6) &&
304 (sctp_v6_cmp_addr(&dst_saddr, &laddr->a))) {
305 rcu_read_unlock();
306 goto out;
307 }
308 }
309 rcu_read_unlock();
310 /* None of the bound addresses match the source address of the
311 * dst. So release it.
312 */
313 dst_release(dst);
314 dst = NULL;
261 } 315 }
262 316
263 dst = ip6_route_output(&init_net, NULL, &fl6); 317 /* Walk through the bind address list and try to get the
264 if (!dst->error) { 318 * best source address for a given destination.
319 */
320 rcu_read_lock();
321 list_for_each_entry_rcu(laddr, &bp->address_list, list) {
322 if (!laddr->valid && laddr->state != SCTP_ADDR_SRC)
323 continue;
324 if ((laddr->a.sa.sa_family == AF_INET6) &&
325 (scope <= sctp_scope(&laddr->a))) {
326 bmatchlen = sctp_v6_addr_match_len(daddr, &laddr->a);
327 if (!baddr || (matchlen < bmatchlen)) {
328 baddr = &laddr->a;
329 matchlen = bmatchlen;
330 }
331 }
332 }
333 rcu_read_unlock();
334 if (baddr) {
335 ipv6_addr_copy(&fl6->saddr, &baddr->v6.sin6_addr);
336 fl6->fl6_sport = baddr->v6.sin6_port;
337 dst = ip6_dst_lookup_flow(sk, fl6, NULL, false);
338 }
339
340out:
341 if (!IS_ERR(dst)) {
265 struct rt6_info *rt; 342 struct rt6_info *rt;
266 rt = (struct rt6_info *)dst; 343 rt = (struct rt6_info *)dst;
344 t->dst = dst;
267 SCTP_DEBUG_PRINTK("rt6_dst:%pI6 rt6_src:%pI6\n", 345 SCTP_DEBUG_PRINTK("rt6_dst:%pI6 rt6_src:%pI6\n",
268 &rt->rt6i_dst.addr, &rt->rt6i_src.addr); 346 &rt->rt6i_dst.addr, &fl6->saddr);
269 return dst; 347 } else {
348 t->dst = NULL;
349 SCTP_DEBUG_PRINTK("NO ROUTE\n");
270 } 350 }
271 SCTP_DEBUG_PRINTK("NO ROUTE\n");
272 dst_release(dst);
273 return NULL;
274} 351}
275 352
276/* Returns the number of consecutive initial bits that match in the 2 ipv6 353/* Returns the number of consecutive initial bits that match in the 2 ipv6
@@ -286,64 +363,18 @@ static inline int sctp_v6_addr_match_len(union sctp_addr *s1,
286 * and asoc's bind address list. 363 * and asoc's bind address list.
287 */ 364 */
288static void sctp_v6_get_saddr(struct sctp_sock *sk, 365static void sctp_v6_get_saddr(struct sctp_sock *sk,
289 struct sctp_association *asoc, 366 struct sctp_transport *t,
290 struct dst_entry *dst, 367 struct flowi *fl)
291 union sctp_addr *daddr,
292 union sctp_addr *saddr)
293{ 368{
294 struct sctp_bind_addr *bp; 369 struct flowi6 *fl6 = &fl->u.ip6;
295 struct sctp_sockaddr_entry *laddr; 370 union sctp_addr *saddr = &t->saddr;
296 sctp_scope_t scope;
297 union sctp_addr *baddr = NULL;
298 __u8 matchlen = 0;
299 __u8 bmatchlen;
300 371
301 SCTP_DEBUG_PRINTK("%s: asoc:%p dst:%p daddr:%pI6 ", 372 SCTP_DEBUG_PRINTK("%s: asoc:%p dst:%p\n", __func__, t->asoc, t->dst);
302 __func__, asoc, dst, &daddr->v6.sin6_addr);
303
304 if (!asoc) {
305 ipv6_dev_get_saddr(sock_net(sctp_opt2sk(sk)),
306 dst ? ip6_dst_idev(dst)->dev : NULL,
307 &daddr->v6.sin6_addr,
308 inet6_sk(&sk->inet.sk)->srcprefs,
309 &saddr->v6.sin6_addr);
310 SCTP_DEBUG_PRINTK("saddr from ipv6_get_saddr: %pI6\n",
311 &saddr->v6.sin6_addr);
312 return;
313 }
314
315 scope = sctp_scope(daddr);
316
317 bp = &asoc->base.bind_addr;
318 373
319 /* Go through the bind address list and find the best source address 374 if (t->dst) {
320 * that matches the scope of the destination address. 375 saddr->v6.sin6_family = AF_INET6;
321 */ 376 ipv6_addr_copy(&saddr->v6.sin6_addr, &fl6->saddr);
322 rcu_read_lock();
323 list_for_each_entry_rcu(laddr, &bp->address_list, list) {
324 if (!laddr->valid)
325 continue;
326 if ((laddr->state == SCTP_ADDR_SRC) &&
327 (laddr->a.sa.sa_family == AF_INET6) &&
328 (scope <= sctp_scope(&laddr->a))) {
329 bmatchlen = sctp_v6_addr_match_len(daddr, &laddr->a);
330 if (!baddr || (matchlen < bmatchlen)) {
331 baddr = &laddr->a;
332 matchlen = bmatchlen;
333 }
334 }
335 }
336
337 if (baddr) {
338 memcpy(saddr, baddr, sizeof(union sctp_addr));
339 SCTP_DEBUG_PRINTK("saddr: %pI6\n", &saddr->v6.sin6_addr);
340 } else {
341 pr_err("%s: asoc:%p Could not find a valid source "
342 "address for the dest:%pI6\n",
343 __func__, asoc, &daddr->v6.sin6_addr);
344 } 377 }
345
346 rcu_read_unlock();
347} 378}
348 379
349/* Make a copy of all potential local addresses. */ 380/* Make a copy of all potential local addresses. */
@@ -465,14 +496,13 @@ static int sctp_v6_to_addr_param(const union sctp_addr *addr,
465 return length; 496 return length;
466} 497}
467 498
468/* Initialize a sctp_addr from a dst_entry. */ 499/* Initialize a sctp_addr from struct in6_addr. */
469static void sctp_v6_dst_saddr(union sctp_addr *addr, struct dst_entry *dst, 500static void sctp_v6_to_addr(union sctp_addr *addr, struct in6_addr *saddr,
470 __be16 port) 501 __be16 port)
471{ 502{
472 struct rt6_info *rt = (struct rt6_info *)dst;
473 addr->sa.sa_family = AF_INET6; 503 addr->sa.sa_family = AF_INET6;
474 addr->v6.sin6_port = port; 504 addr->v6.sin6_port = port;
475 ipv6_addr_copy(&addr->v6.sin6_addr, &rt->rt6i_src.addr); 505 ipv6_addr_copy(&addr->v6.sin6_addr, saddr);
476} 506}
477 507
478/* Compare addresses exactly. 508/* Compare addresses exactly.
@@ -531,7 +561,7 @@ static int sctp_v6_is_any(const union sctp_addr *addr)
531static int sctp_v6_available(union sctp_addr *addr, struct sctp_sock *sp) 561static int sctp_v6_available(union sctp_addr *addr, struct sctp_sock *sp)
532{ 562{
533 int type; 563 int type;
534 struct in6_addr *in6 = (struct in6_addr *)&addr->v6.sin6_addr; 564 const struct in6_addr *in6 = (const struct in6_addr *)&addr->v6.sin6_addr;
535 565
536 type = ipv6_addr_type(in6); 566 type = ipv6_addr_type(in6);
537 if (IPV6_ADDR_ANY == type) 567 if (IPV6_ADDR_ANY == type)
@@ -959,7 +989,6 @@ static struct sctp_af sctp_af_inet6 = {
959 .to_sk_daddr = sctp_v6_to_sk_daddr, 989 .to_sk_daddr = sctp_v6_to_sk_daddr,
960 .from_addr_param = sctp_v6_from_addr_param, 990 .from_addr_param = sctp_v6_from_addr_param,
961 .to_addr_param = sctp_v6_to_addr_param, 991 .to_addr_param = sctp_v6_to_addr_param,
962 .dst_saddr = sctp_v6_dst_saddr,
963 .cmp_addr = sctp_v6_cmp_addr, 992 .cmp_addr = sctp_v6_cmp_addr,
964 .scope = sctp_v6_scope, 993 .scope = sctp_v6_scope,
965 .addr_valid = sctp_v6_addr_valid, 994 .addr_valid = sctp_v6_addr_valid,
diff --git a/net/sctp/output.c b/net/sctp/output.c
index b4f3cf06d8da..08b3cead6503 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -500,23 +500,20 @@ int sctp_packet_transmit(struct sctp_packet *packet)
500 * Note: Adler-32 is no longer applicable, as has been replaced 500 * Note: Adler-32 is no longer applicable, as has been replaced
501 * by CRC32-C as described in <draft-ietf-tsvwg-sctpcsum-02.txt>. 501 * by CRC32-C as described in <draft-ietf-tsvwg-sctpcsum-02.txt>.
502 */ 502 */
503 if (!sctp_checksum_disable && 503 if (!sctp_checksum_disable) {
504 !(dst->dev->features & (NETIF_F_NO_CSUM | NETIF_F_SCTP_CSUM))) { 504 if (!(dst->dev->features & NETIF_F_SCTP_CSUM)) {
505 __u32 crc32 = sctp_start_cksum((__u8 *)sh, cksum_buf_len); 505 __u32 crc32 = sctp_start_cksum((__u8 *)sh, cksum_buf_len);
506 506
507 /* 3) Put the resultant value into the checksum field in the 507 /* 3) Put the resultant value into the checksum field in the
508 * common header, and leave the rest of the bits unchanged. 508 * common header, and leave the rest of the bits unchanged.
509 */ 509 */
510 sh->checksum = sctp_end_cksum(crc32); 510 sh->checksum = sctp_end_cksum(crc32);
511 } else { 511 } else {
512 if (dst->dev->features & NETIF_F_SCTP_CSUM) {
513 /* no need to seed pseudo checksum for SCTP */ 512 /* no need to seed pseudo checksum for SCTP */
514 nskb->ip_summed = CHECKSUM_PARTIAL; 513 nskb->ip_summed = CHECKSUM_PARTIAL;
515 nskb->csum_start = (skb_transport_header(nskb) - 514 nskb->csum_start = (skb_transport_header(nskb) -
516 nskb->head); 515 nskb->head);
517 nskb->csum_offset = offsetof(struct sctphdr, checksum); 516 nskb->csum_offset = offsetof(struct sctphdr, checksum);
518 } else {
519 nskb->ip_summed = CHECKSUM_UNNECESSARY;
520 } 517 }
521 } 518 }
522 519
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index bf92a5b68f8b..d03682109b7a 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -131,7 +131,8 @@ static inline int sctp_cacc_skip_3_1_d(struct sctp_transport *primary,
131static inline int sctp_cacc_skip_3_1_f(struct sctp_transport *transport, 131static inline int sctp_cacc_skip_3_1_f(struct sctp_transport *transport,
132 int count_of_newacks) 132 int count_of_newacks)
133{ 133{
134 if (count_of_newacks < 2 && !transport->cacc.cacc_saw_newack) 134 if (count_of_newacks < 2 &&
135 (transport && !transport->cacc.cacc_saw_newack))
135 return 1; 136 return 1;
136 return 0; 137 return 0;
137} 138}
@@ -319,7 +320,6 @@ int sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk)
319 * chunk. 320 * chunk.
320 */ 321 */
321 switch (q->asoc->state) { 322 switch (q->asoc->state) {
322 case SCTP_STATE_EMPTY:
323 case SCTP_STATE_CLOSED: 323 case SCTP_STATE_CLOSED:
324 case SCTP_STATE_SHUTDOWN_PENDING: 324 case SCTP_STATE_SHUTDOWN_PENDING:
325 case SCTP_STATE_SHUTDOWN_SENT: 325 case SCTP_STATE_SHUTDOWN_SENT:
@@ -577,6 +577,13 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
577 * try to send as much as possible. 577 * try to send as much as possible.
578 */ 578 */
579 list_for_each_entry_safe(chunk, chunk1, lqueue, transmitted_list) { 579 list_for_each_entry_safe(chunk, chunk1, lqueue, transmitted_list) {
580 /* If the chunk is abandoned, move it to abandoned list. */
581 if (sctp_chunk_abandoned(chunk)) {
582 list_del_init(&chunk->transmitted_list);
583 sctp_insert_list(&q->abandoned,
584 &chunk->transmitted_list);
585 continue;
586 }
580 587
581 /* Make sure that Gap Acked TSNs are not retransmitted. A 588 /* Make sure that Gap Acked TSNs are not retransmitted. A
582 * simple approach is just to move such TSNs out of the 589 * simple approach is just to move such TSNs out of the
@@ -618,9 +625,12 @@ redo:
618 625
619 /* If we are retransmitting, we should only 626 /* If we are retransmitting, we should only
620 * send a single packet. 627 * send a single packet.
628 * Otherwise, try appending this chunk again.
621 */ 629 */
622 if (rtx_timeout || fast_rtx) 630 if (rtx_timeout || fast_rtx)
623 done = 1; 631 done = 1;
632 else
633 goto redo;
624 634
625 /* Bundle next chunk in the next round. */ 635 /* Bundle next chunk in the next round. */
626 break; 636 break;
@@ -1572,6 +1582,8 @@ static void sctp_check_transmitted(struct sctp_outq *q,
1572#endif /* SCTP_DEBUG */ 1582#endif /* SCTP_DEBUG */
1573 if (transport) { 1583 if (transport) {
1574 if (bytes_acked) { 1584 if (bytes_acked) {
1585 struct sctp_association *asoc = transport->asoc;
1586
1575 /* We may have counted DATA that was migrated 1587 /* We may have counted DATA that was migrated
1576 * to this transport due to DEL-IP operation. 1588 * to this transport due to DEL-IP operation.
1577 * Subtract those bytes, since the were never 1589 * Subtract those bytes, since the were never
@@ -1590,6 +1602,17 @@ static void sctp_check_transmitted(struct sctp_outq *q,
1590 transport->error_count = 0; 1602 transport->error_count = 0;
1591 transport->asoc->overall_error_count = 0; 1603 transport->asoc->overall_error_count = 0;
1592 1604
1605 /*
1606 * While in SHUTDOWN PENDING, we may have started
1607 * the T5 shutdown guard timer after reaching the
1608 * retransmission limit. Stop that timer as soon
1609 * as the receiver acknowledged any data.
1610 */
1611 if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING &&
1612 del_timer(&asoc->timers
1613 [SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD]))
1614 sctp_association_put(asoc);
1615
1593 /* Mark the destination transport address as 1616 /* Mark the destination transport address as
1594 * active if it is not so marked. 1617 * active if it is not so marked.
1595 */ 1618 */
@@ -1619,10 +1642,15 @@ static void sctp_check_transmitted(struct sctp_outq *q,
1619 * A sender is doing zero window probing when the 1642 * A sender is doing zero window probing when the
1620 * receiver's advertised window is zero, and there is 1643 * receiver's advertised window is zero, and there is
1621 * only one data chunk in flight to the receiver. 1644 * only one data chunk in flight to the receiver.
1645 *
1646 * Allow the association to timeout while in SHUTDOWN
1647 * PENDING or SHUTDOWN RECEIVED in case the receiver
1648 * stays in zero window mode forever.
1622 */ 1649 */
1623 if (!q->asoc->peer.rwnd && 1650 if (!q->asoc->peer.rwnd &&
1624 !list_empty(&tlist) && 1651 !list_empty(&tlist) &&
1625 (sack_ctsn+2 == q->asoc->next_tsn)) { 1652 (sack_ctsn+2 == q->asoc->next_tsn) &&
1653 q->asoc->state < SCTP_STATE_SHUTDOWN_PENDING) {
1626 SCTP_DEBUG_PRINTK("%s: SACK received for zero " 1654 SCTP_DEBUG_PRINTK("%s: SACK received for zero "
1627 "window probe: %u\n", 1655 "window probe: %u\n",
1628 __func__, sack_ctsn); 1656 __func__, sack_ctsn);
@@ -1683,8 +1711,9 @@ static void sctp_mark_missing(struct sctp_outq *q,
1683 /* SFR-CACC may require us to skip marking 1711 /* SFR-CACC may require us to skip marking
1684 * this chunk as missing. 1712 * this chunk as missing.
1685 */ 1713 */
1686 if (!transport || !sctp_cacc_skip(primary, transport, 1714 if (!transport || !sctp_cacc_skip(primary,
1687 count_of_newacks, tsn)) { 1715 chunk->transport,
1716 count_of_newacks, tsn)) {
1688 chunk->tsn_missing_report++; 1717 chunk->tsn_missing_report++;
1689 1718
1690 SCTP_DEBUG_PRINTK( 1719 SCTP_DEBUG_PRINTK(
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index 61aacfbbaa92..05a6ce214714 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -212,7 +212,7 @@ static int sctp_eps_seq_show(struct seq_file *seq, void *v)
212 sctp_for_each_hentry(epb, node, &head->chain) { 212 sctp_for_each_hentry(epb, node, &head->chain) {
213 ep = sctp_ep(epb); 213 ep = sctp_ep(epb);
214 sk = epb->sk; 214 sk = epb->sk;
215 seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ", ep, sk, 215 seq_printf(seq, "%8pK %8pK %-3d %-3d %-4d %-5d %5d %5lu ", ep, sk,
216 sctp_sk(sk)->type, sk->sk_state, hash, 216 sctp_sk(sk)->type, sk->sk_state, hash,
217 epb->bind_addr.port, 217 epb->bind_addr.port,
218 sock_i_uid(sk), sock_i_ino(sk)); 218 sock_i_uid(sk), sock_i_ino(sk));
@@ -316,7 +316,7 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
316 assoc = sctp_assoc(epb); 316 assoc = sctp_assoc(epb);
317 sk = epb->sk; 317 sk = epb->sk;
318 seq_printf(seq, 318 seq_printf(seq,
319 "%8p %8p %-3d %-3d %-2d %-4d " 319 "%8pK %8pK %-3d %-3d %-2d %-4d "
320 "%4d %8d %8d %7d %5lu %-5d %5d ", 320 "%4d %8d %8d %7d %5lu %-5d %5d ",
321 assoc, sk, sctp_sk(sk)->type, sk->sk_state, 321 assoc, sk, sctp_sk(sk)->type, sk->sk_state,
322 assoc->state, hash, 322 assoc->state, hash,
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index d5bf91d04f63..207175b2f40a 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -230,13 +230,6 @@ static void sctp_free_local_addr_list(void)
230 } 230 }
231} 231}
232 232
233void sctp_local_addr_free(struct rcu_head *head)
234{
235 struct sctp_sockaddr_entry *e = container_of(head,
236 struct sctp_sockaddr_entry, rcu);
237 kfree(e);
238}
239
240/* Copy the local addresses which are valid for 'scope' into 'bp'. */ 233/* Copy the local addresses which are valid for 'scope' into 'bp'. */
241int sctp_copy_local_addr_list(struct sctp_bind_addr *bp, sctp_scope_t scope, 234int sctp_copy_local_addr_list(struct sctp_bind_addr *bp, sctp_scope_t scope,
242 gfp_t gfp, int copy_flags) 235 gfp_t gfp, int copy_flags)
@@ -339,13 +332,12 @@ static int sctp_v4_to_addr_param(const union sctp_addr *addr,
339} 332}
340 333
341/* Initialize a sctp_addr from a dst_entry. */ 334/* Initialize a sctp_addr from a dst_entry. */
342static void sctp_v4_dst_saddr(union sctp_addr *saddr, struct dst_entry *dst, 335static void sctp_v4_dst_saddr(union sctp_addr *saddr, struct flowi4 *fl4,
343 __be16 port) 336 __be16 port)
344{ 337{
345 struct rtable *rt = (struct rtable *)dst;
346 saddr->v4.sin_family = AF_INET; 338 saddr->v4.sin_family = AF_INET;
347 saddr->v4.sin_port = port; 339 saddr->v4.sin_port = port;
348 saddr->v4.sin_addr.s_addr = rt->rt_src; 340 saddr->v4.sin_addr.s_addr = fl4->saddr;
349} 341}
350 342
351/* Compare two addresses exactly. */ 343/* Compare two addresses exactly. */
@@ -463,35 +455,36 @@ static sctp_scope_t sctp_v4_scope(union sctp_addr *addr)
463 * addresses. If an association is passed, trys to get a dst entry with a 455 * addresses. If an association is passed, trys to get a dst entry with a
464 * source address that matches an address in the bind address list. 456 * source address that matches an address in the bind address list.
465 */ 457 */
466static struct dst_entry *sctp_v4_get_dst(struct sctp_association *asoc, 458static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
467 union sctp_addr *daddr, 459 struct flowi *fl, struct sock *sk)
468 union sctp_addr *saddr)
469{ 460{
461 struct sctp_association *asoc = t->asoc;
470 struct rtable *rt; 462 struct rtable *rt;
471 struct flowi4 fl4; 463 struct flowi4 *fl4 = &fl->u.ip4;
472 struct sctp_bind_addr *bp; 464 struct sctp_bind_addr *bp;
473 struct sctp_sockaddr_entry *laddr; 465 struct sctp_sockaddr_entry *laddr;
474 struct dst_entry *dst = NULL; 466 struct dst_entry *dst = NULL;
467 union sctp_addr *daddr = &t->ipaddr;
475 union sctp_addr dst_saddr; 468 union sctp_addr dst_saddr;
476 469
477 memset(&fl4, 0x0, sizeof(struct flowi4)); 470 memset(fl4, 0x0, sizeof(struct flowi4));
478 fl4.daddr = daddr->v4.sin_addr.s_addr; 471 fl4->daddr = daddr->v4.sin_addr.s_addr;
479 fl4.fl4_dport = daddr->v4.sin_port; 472 fl4->fl4_dport = daddr->v4.sin_port;
480 fl4.flowi4_proto = IPPROTO_SCTP; 473 fl4->flowi4_proto = IPPROTO_SCTP;
481 if (asoc) { 474 if (asoc) {
482 fl4.flowi4_tos = RT_CONN_FLAGS(asoc->base.sk); 475 fl4->flowi4_tos = RT_CONN_FLAGS(asoc->base.sk);
483 fl4.flowi4_oif = asoc->base.sk->sk_bound_dev_if; 476 fl4->flowi4_oif = asoc->base.sk->sk_bound_dev_if;
484 fl4.fl4_sport = htons(asoc->base.bind_addr.port); 477 fl4->fl4_sport = htons(asoc->base.bind_addr.port);
485 } 478 }
486 if (saddr) { 479 if (saddr) {
487 fl4.saddr = saddr->v4.sin_addr.s_addr; 480 fl4->saddr = saddr->v4.sin_addr.s_addr;
488 fl4.fl4_sport = saddr->v4.sin_port; 481 fl4->fl4_sport = saddr->v4.sin_port;
489 } 482 }
490 483
491 SCTP_DEBUG_PRINTK("%s: DST:%pI4, SRC:%pI4 - ", 484 SCTP_DEBUG_PRINTK("%s: DST:%pI4, SRC:%pI4 - ",
492 __func__, &fl4.daddr, &fl4.saddr); 485 __func__, &fl4->daddr, &fl4->saddr);
493 486
494 rt = ip_route_output_key(&init_net, &fl4); 487 rt = ip_route_output_key(&init_net, fl4);
495 if (!IS_ERR(rt)) 488 if (!IS_ERR(rt))
496 dst = &rt->dst; 489 dst = &rt->dst;
497 490
@@ -507,7 +500,7 @@ static struct dst_entry *sctp_v4_get_dst(struct sctp_association *asoc,
507 /* Walk through the bind address list and look for a bind 500 /* Walk through the bind address list and look for a bind
508 * address that matches the source address of the returned dst. 501 * address that matches the source address of the returned dst.
509 */ 502 */
510 sctp_v4_dst_saddr(&dst_saddr, dst, htons(bp->port)); 503 sctp_v4_dst_saddr(&dst_saddr, fl4, htons(bp->port));
511 rcu_read_lock(); 504 rcu_read_lock();
512 list_for_each_entry_rcu(laddr, &bp->address_list, list) { 505 list_for_each_entry_rcu(laddr, &bp->address_list, list) {
513 if (!laddr->valid || (laddr->state != SCTP_ADDR_SRC)) 506 if (!laddr->valid || (laddr->state != SCTP_ADDR_SRC))
@@ -533,9 +526,9 @@ static struct dst_entry *sctp_v4_get_dst(struct sctp_association *asoc,
533 continue; 526 continue;
534 if ((laddr->state == SCTP_ADDR_SRC) && 527 if ((laddr->state == SCTP_ADDR_SRC) &&
535 (AF_INET == laddr->a.sa.sa_family)) { 528 (AF_INET == laddr->a.sa.sa_family)) {
536 fl4.saddr = laddr->a.v4.sin_addr.s_addr; 529 fl4->saddr = laddr->a.v4.sin_addr.s_addr;
537 fl4.fl4_sport = laddr->a.v4.sin_port; 530 fl4->fl4_sport = laddr->a.v4.sin_port;
538 rt = ip_route_output_key(&init_net, &fl4); 531 rt = ip_route_output_key(&init_net, fl4);
539 if (!IS_ERR(rt)) { 532 if (!IS_ERR(rt)) {
540 dst = &rt->dst; 533 dst = &rt->dst;
541 goto out_unlock; 534 goto out_unlock;
@@ -546,33 +539,27 @@ static struct dst_entry *sctp_v4_get_dst(struct sctp_association *asoc,
546out_unlock: 539out_unlock:
547 rcu_read_unlock(); 540 rcu_read_unlock();
548out: 541out:
542 t->dst = dst;
549 if (dst) 543 if (dst)
550 SCTP_DEBUG_PRINTK("rt_dst:%pI4, rt_src:%pI4\n", 544 SCTP_DEBUG_PRINTK("rt_dst:%pI4, rt_src:%pI4\n",
551 &rt->rt_dst, &rt->rt_src); 545 &fl4->daddr, &fl4->saddr);
552 else 546 else
553 SCTP_DEBUG_PRINTK("NO ROUTE\n"); 547 SCTP_DEBUG_PRINTK("NO ROUTE\n");
554
555 return dst;
556} 548}
557 549
558/* For v4, the source address is cached in the route entry(dst). So no need 550/* For v4, the source address is cached in the route entry(dst). So no need
559 * to cache it separately and hence this is an empty routine. 551 * to cache it separately and hence this is an empty routine.
560 */ 552 */
561static void sctp_v4_get_saddr(struct sctp_sock *sk, 553static void sctp_v4_get_saddr(struct sctp_sock *sk,
562 struct sctp_association *asoc, 554 struct sctp_transport *t,
563 struct dst_entry *dst, 555 struct flowi *fl)
564 union sctp_addr *daddr,
565 union sctp_addr *saddr)
566{ 556{
567 struct rtable *rt = (struct rtable *)dst; 557 union sctp_addr *saddr = &t->saddr;
568 558 struct rtable *rt = (struct rtable *)t->dst;
569 if (!asoc)
570 return;
571 559
572 if (rt) { 560 if (rt) {
573 saddr->v4.sin_family = AF_INET; 561 saddr->v4.sin_family = AF_INET;
574 saddr->v4.sin_port = htons(asoc->base.bind_addr.port); 562 saddr->v4.sin_addr.s_addr = fl->u.ip4.saddr;
575 saddr->v4.sin_addr.s_addr = rt->rt_src;
576 } 563 }
577} 564}
578 565
@@ -681,7 +668,7 @@ static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev,
681 } 668 }
682 spin_unlock_bh(&sctp_local_addr_lock); 669 spin_unlock_bh(&sctp_local_addr_lock);
683 if (found) 670 if (found)
684 call_rcu(&addr->rcu, sctp_local_addr_free); 671 kfree_rcu(addr, rcu);
685 break; 672 break;
686 } 673 }
687 674
@@ -854,14 +841,14 @@ static inline int sctp_v4_xmit(struct sk_buff *skb,
854 841
855 SCTP_DEBUG_PRINTK("%s: skb:%p, len:%d, src:%pI4, dst:%pI4\n", 842 SCTP_DEBUG_PRINTK("%s: skb:%p, len:%d, src:%pI4, dst:%pI4\n",
856 __func__, skb, skb->len, 843 __func__, skb, skb->len,
857 &skb_rtable(skb)->rt_src, 844 &transport->fl.u.ip4.saddr,
858 &skb_rtable(skb)->rt_dst); 845 &transport->fl.u.ip4.daddr);
859 846
860 inet->pmtudisc = transport->param_flags & SPP_PMTUD_ENABLE ? 847 inet->pmtudisc = transport->param_flags & SPP_PMTUD_ENABLE ?
861 IP_PMTUDISC_DO : IP_PMTUDISC_DONT; 848 IP_PMTUDISC_DO : IP_PMTUDISC_DONT;
862 849
863 SCTP_INC_STATS(SCTP_MIB_OUTSCTPPACKS); 850 SCTP_INC_STATS(SCTP_MIB_OUTSCTPPACKS);
864 return ip_queue_xmit(skb); 851 return ip_queue_xmit(skb, &transport->fl);
865} 852}
866 853
867static struct sctp_af sctp_af_inet; 854static struct sctp_af sctp_af_inet;
@@ -950,7 +937,6 @@ static struct sctp_af sctp_af_inet = {
950 .to_sk_daddr = sctp_v4_to_sk_daddr, 937 .to_sk_daddr = sctp_v4_to_sk_daddr,
951 .from_addr_param = sctp_v4_from_addr_param, 938 .from_addr_param = sctp_v4_from_addr_param,
952 .to_addr_param = sctp_v4_to_addr_param, 939 .to_addr_param = sctp_v4_to_addr_param,
953 .dst_saddr = sctp_v4_dst_saddr,
954 .cmp_addr = sctp_v4_cmp_addr, 940 .cmp_addr = sctp_v4_cmp_addr,
955 .addr_valid = sctp_v4_addr_valid, 941 .addr_valid = sctp_v4_addr_valid,
956 .inaddr_any = sctp_v4_inaddr_any, 942 .inaddr_any = sctp_v4_inaddr_any,
@@ -1072,7 +1058,6 @@ SCTP_STATIC __init int sctp_init(void)
1072 int status = -EINVAL; 1058 int status = -EINVAL;
1073 unsigned long goal; 1059 unsigned long goal;
1074 unsigned long limit; 1060 unsigned long limit;
1075 unsigned long nr_pages;
1076 int max_share; 1061 int max_share;
1077 int order; 1062 int order;
1078 1063
@@ -1162,15 +1147,7 @@ SCTP_STATIC __init int sctp_init(void)
1162 /* Initialize handle used for association ids. */ 1147 /* Initialize handle used for association ids. */
1163 idr_init(&sctp_assocs_id); 1148 idr_init(&sctp_assocs_id);
1164 1149
1165 /* Set the pressure threshold to be a fraction of global memory that 1150 limit = nr_free_buffer_pages() / 8;
1166 * is up to 1/2 at 256 MB, decreasing toward zero with the amount of
1167 * memory, with a floor of 128 pages.
1168 * Note this initializes the data in sctpv6_prot too
1169 * Unabashedly stolen from tcp_init
1170 */
1171 nr_pages = totalram_pages - totalhigh_pages;
1172 limit = min(nr_pages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT);
1173 limit = (limit * (nr_pages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11);
1174 limit = max(limit, 128UL); 1151 limit = max(limit, 128UL);
1175 sysctl_sctp_mem[0] = limit / 4 * 3; 1152 sysctl_sctp_mem[0] = limit / 4 * 3;
1176 sysctl_sctp_mem[1] = limit; 1153 sysctl_sctp_mem[1] = limit;
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index b3434cc7d0cf..58eb27fed4b4 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -1075,20 +1075,28 @@ nodata:
1075 1075
1076/* Make a HEARTBEAT chunk. */ 1076/* Make a HEARTBEAT chunk. */
1077struct sctp_chunk *sctp_make_heartbeat(const struct sctp_association *asoc, 1077struct sctp_chunk *sctp_make_heartbeat(const struct sctp_association *asoc,
1078 const struct sctp_transport *transport, 1078 const struct sctp_transport *transport)
1079 const void *payload, const size_t paylen)
1080{ 1079{
1081 struct sctp_chunk *retval = sctp_make_chunk(asoc, SCTP_CID_HEARTBEAT, 1080 struct sctp_chunk *retval;
1082 0, paylen); 1081 sctp_sender_hb_info_t hbinfo;
1082
1083 retval = sctp_make_chunk(asoc, SCTP_CID_HEARTBEAT, 0, sizeof(hbinfo));
1083 1084
1084 if (!retval) 1085 if (!retval)
1085 goto nodata; 1086 goto nodata;
1086 1087
1088 hbinfo.param_hdr.type = SCTP_PARAM_HEARTBEAT_INFO;
1089 hbinfo.param_hdr.length = htons(sizeof(sctp_sender_hb_info_t));
1090 hbinfo.daddr = transport->ipaddr;
1091 hbinfo.sent_at = jiffies;
1092 hbinfo.hb_nonce = transport->hb_nonce;
1093
1087 /* Cast away the 'const', as this is just telling the chunk 1094 /* Cast away the 'const', as this is just telling the chunk
1088 * what transport it belongs to. 1095 * what transport it belongs to.
1089 */ 1096 */
1090 retval->transport = (struct sctp_transport *) transport; 1097 retval->transport = (struct sctp_transport *) transport;
1091 retval->subh.hbs_hdr = sctp_addto_chunk(retval, paylen, payload); 1098 retval->subh.hbs_hdr = sctp_addto_chunk(retval, sizeof(hbinfo),
1099 &hbinfo);
1092 1100
1093nodata: 1101nodata:
1094 return retval; 1102 return retval;
@@ -2242,14 +2250,17 @@ int sctp_verify_init(const struct sctp_association *asoc,
2242 * Returns 0 on failure, else success. 2250 * Returns 0 on failure, else success.
2243 * FIXME: This is an association method. 2251 * FIXME: This is an association method.
2244 */ 2252 */
2245int sctp_process_init(struct sctp_association *asoc, sctp_cid_t cid, 2253int sctp_process_init(struct sctp_association *asoc, struct sctp_chunk *chunk,
2246 const union sctp_addr *peer_addr, 2254 const union sctp_addr *peer_addr,
2247 sctp_init_chunk_t *peer_init, gfp_t gfp) 2255 sctp_init_chunk_t *peer_init, gfp_t gfp)
2248{ 2256{
2249 union sctp_params param; 2257 union sctp_params param;
2250 struct sctp_transport *transport; 2258 struct sctp_transport *transport;
2251 struct list_head *pos, *temp; 2259 struct list_head *pos, *temp;
2260 struct sctp_af *af;
2261 union sctp_addr addr;
2252 char *cookie; 2262 char *cookie;
2263 int src_match = 0;
2253 2264
2254 /* We must include the address that the INIT packet came from. 2265 /* We must include the address that the INIT packet came from.
2255 * This is the only address that matters for an INIT packet. 2266 * This is the only address that matters for an INIT packet.
@@ -2261,18 +2272,31 @@ int sctp_process_init(struct sctp_association *asoc, sctp_cid_t cid,
2261 * added as the primary transport. The source address seems to 2272 * added as the primary transport. The source address seems to
2262 * be a a better choice than any of the embedded addresses. 2273 * be a a better choice than any of the embedded addresses.
2263 */ 2274 */
2264 if (peer_addr) { 2275 if(!sctp_assoc_add_peer(asoc, peer_addr, gfp, SCTP_ACTIVE))
2265 if(!sctp_assoc_add_peer(asoc, peer_addr, gfp, SCTP_ACTIVE)) 2276 goto nomem;
2266 goto nomem; 2277
2267 } 2278 if (sctp_cmp_addr_exact(sctp_source(chunk), peer_addr))
2279 src_match = 1;
2268 2280
2269 /* Process the initialization parameters. */ 2281 /* Process the initialization parameters. */
2270 sctp_walk_params(param, peer_init, init_hdr.params) { 2282 sctp_walk_params(param, peer_init, init_hdr.params) {
2283 if (!src_match && (param.p->type == SCTP_PARAM_IPV4_ADDRESS ||
2284 param.p->type == SCTP_PARAM_IPV6_ADDRESS)) {
2285 af = sctp_get_af_specific(param_type2af(param.p->type));
2286 af->from_addr_param(&addr, param.addr,
2287 chunk->sctp_hdr->source, 0);
2288 if (sctp_cmp_addr_exact(sctp_source(chunk), &addr))
2289 src_match = 1;
2290 }
2271 2291
2272 if (!sctp_process_param(asoc, param, peer_addr, gfp)) 2292 if (!sctp_process_param(asoc, param, peer_addr, gfp))
2273 goto clean_up; 2293 goto clean_up;
2274 } 2294 }
2275 2295
2296 /* source address of chunk may not match any valid address */
2297 if (!src_match)
2298 goto clean_up;
2299
2276 /* AUTH: After processing the parameters, make sure that we 2300 /* AUTH: After processing the parameters, make sure that we
2277 * have all the required info to potentially do authentications. 2301 * have all the required info to potentially do authentications.
2278 */ 2302 */
@@ -2923,7 +2947,7 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc,
2923 asconf_param->param_hdr.type != SCTP_PARAM_SET_PRIMARY) 2947 asconf_param->param_hdr.type != SCTP_PARAM_SET_PRIMARY)
2924 return SCTP_ERROR_UNKNOWN_PARAM; 2948 return SCTP_ERROR_UNKNOWN_PARAM;
2925 2949
2926 switch (addr_param->v4.param_hdr.type) { 2950 switch (addr_param->p.type) {
2927 case SCTP_PARAM_IPV6_ADDRESS: 2951 case SCTP_PARAM_IPV6_ADDRESS:
2928 if (!asoc->peer.ipv6_address) 2952 if (!asoc->peer.ipv6_address)
2929 return SCTP_ERROR_DNS_FAILED; 2953 return SCTP_ERROR_DNS_FAILED;
@@ -2936,7 +2960,7 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc,
2936 return SCTP_ERROR_DNS_FAILED; 2960 return SCTP_ERROR_DNS_FAILED;
2937 } 2961 }
2938 2962
2939 af = sctp_get_af_specific(param_type2af(addr_param->v4.param_hdr.type)); 2963 af = sctp_get_af_specific(param_type2af(addr_param->p.type));
2940 if (unlikely(!af)) 2964 if (unlikely(!af))
2941 return SCTP_ERROR_DNS_FAILED; 2965 return SCTP_ERROR_DNS_FAILED;
2942 2966
@@ -3100,7 +3124,7 @@ struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
3100 /* Skip the address parameter and store a pointer to the first 3124 /* Skip the address parameter and store a pointer to the first
3101 * asconf parameter. 3125 * asconf parameter.
3102 */ 3126 */
3103 length = ntohs(addr_param->v4.param_hdr.length); 3127 length = ntohs(addr_param->p.length);
3104 asconf_param = (sctp_addip_param_t *)((void *)addr_param + length); 3128 asconf_param = (sctp_addip_param_t *)((void *)addr_param + length);
3105 chunk_len -= length; 3129 chunk_len -= length;
3106 3130
@@ -3177,7 +3201,7 @@ static void sctp_asconf_param_success(struct sctp_association *asoc,
3177 ((void *)asconf_param + sizeof(sctp_addip_param_t)); 3201 ((void *)asconf_param + sizeof(sctp_addip_param_t));
3178 3202
3179 /* We have checked the packet before, so we do not check again. */ 3203 /* We have checked the packet before, so we do not check again. */
3180 af = sctp_get_af_specific(param_type2af(addr_param->v4.param_hdr.type)); 3204 af = sctp_get_af_specific(param_type2af(addr_param->p.type));
3181 af->from_addr_param(&addr, addr_param, htons(bp->port), 0); 3205 af->from_addr_param(&addr, addr_param, htons(bp->port), 0);
3182 3206
3183 switch (asconf_param->param_hdr.type) { 3207 switch (asconf_param->param_hdr.type) {
@@ -3193,11 +3217,8 @@ static void sctp_asconf_param_success(struct sctp_association *asoc,
3193 local_bh_enable(); 3217 local_bh_enable();
3194 list_for_each_entry(transport, &asoc->peer.transport_addr_list, 3218 list_for_each_entry(transport, &asoc->peer.transport_addr_list,
3195 transports) { 3219 transports) {
3196 if (transport->state == SCTP_ACTIVE)
3197 continue;
3198 dst_release(transport->dst); 3220 dst_release(transport->dst);
3199 sctp_transport_route(transport, NULL, 3221 transport->dst = NULL;
3200 sctp_sk(asoc->base.sk));
3201 } 3222 }
3202 break; 3223 break;
3203 case SCTP_PARAM_DEL_IP: 3224 case SCTP_PARAM_DEL_IP:
@@ -3207,8 +3228,7 @@ static void sctp_asconf_param_success(struct sctp_association *asoc,
3207 list_for_each_entry(transport, &asoc->peer.transport_addr_list, 3228 list_for_each_entry(transport, &asoc->peer.transport_addr_list,
3208 transports) { 3229 transports) {
3209 dst_release(transport->dst); 3230 dst_release(transport->dst);
3210 sctp_transport_route(transport, NULL, 3231 transport->dst = NULL;
3211 sctp_sk(asoc->base.sk));
3212 } 3232 }
3213 break; 3233 break;
3214 default: 3234 default:
@@ -3304,7 +3324,7 @@ int sctp_process_asconf_ack(struct sctp_association *asoc,
3304 /* Skip the address parameter in the last asconf sent and store a 3324 /* Skip the address parameter in the last asconf sent and store a
3305 * pointer to the first asconf parameter. 3325 * pointer to the first asconf parameter.
3306 */ 3326 */
3307 length = ntohs(addr_param->v4.param_hdr.length); 3327 length = ntohs(addr_param->p.length);
3308 asconf_param = (sctp_addip_param_t *)((void *)addr_param + length); 3328 asconf_param = (sctp_addip_param_t *)((void *)addr_param + length);
3309 asconf_len -= length; 3329 asconf_len -= length;
3310 3330
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 5f86ee4b54c1..6e0f88295aaf 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -595,8 +595,7 @@ static int sctp_cmd_process_init(sctp_cmd_seq_t *commands,
595 * fail during INIT processing (due to malloc problems), 595 * fail during INIT processing (due to malloc problems),
596 * just return the error and stop processing the stack. 596 * just return the error and stop processing the stack.
597 */ 597 */
598 if (!sctp_process_init(asoc, chunk->chunk_hdr->type, 598 if (!sctp_process_init(asoc, chunk, sctp_source(chunk), peer_init, gfp))
599 sctp_source(chunk), peer_init, gfp))
600 error = -ENOMEM; 599 error = -ENOMEM;
601 else 600 else
602 error = 0; 601 error = 0;
@@ -671,10 +670,19 @@ static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds,
671 /* 8.3 Upon the receipt of the HEARTBEAT ACK, the sender of the 670 /* 8.3 Upon the receipt of the HEARTBEAT ACK, the sender of the
672 * HEARTBEAT should clear the error counter of the destination 671 * HEARTBEAT should clear the error counter of the destination
673 * transport address to which the HEARTBEAT was sent. 672 * transport address to which the HEARTBEAT was sent.
674 * The association's overall error count is also cleared.
675 */ 673 */
676 t->error_count = 0; 674 t->error_count = 0;
677 t->asoc->overall_error_count = 0; 675
676 /*
677 * Although RFC4960 specifies that the overall error count must
678 * be cleared when a HEARTBEAT ACK is received, we make an
679 * exception while in SHUTDOWN PENDING. If the peer keeps its
680 * window shut forever, we may never be able to transmit our
681 * outstanding data and rely on the retransmission limit be reached
682 * to shutdown the association.
683 */
684 if (t->asoc->state != SCTP_STATE_SHUTDOWN_PENDING)
685 t->asoc->overall_error_count = 0;
678 686
679 /* Clear the hb_sent flag to signal that we had a good 687 /* Clear the hb_sent flag to signal that we had a good
680 * acknowledgement. 688 * acknowledgement.
@@ -1415,12 +1423,6 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
1415 SCTP_RTXR_T3_RTX); 1423 SCTP_RTXR_T3_RTX);
1416 break; 1424 break;
1417 1425
1418 case SCTP_CMD_TRANSMIT:
1419 /* Kick start transmission. */
1420 error = sctp_outq_uncork(&asoc->outqueue);
1421 local_cork = 0;
1422 break;
1423
1424 case SCTP_CMD_ECN_CE: 1426 case SCTP_CMD_ECN_CE:
1425 /* Do delayed CE processing. */ 1427 /* Do delayed CE processing. */
1426 sctp_do_ecn_ce_work(asoc, cmd->obj.u32); 1428 sctp_do_ecn_ce_work(asoc, cmd->obj.u32);
@@ -1444,6 +1446,13 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
1444 sctp_cmd_setup_t2(commands, asoc, cmd->obj.ptr); 1446 sctp_cmd_setup_t2(commands, asoc, cmd->obj.ptr);
1445 break; 1447 break;
1446 1448
1449 case SCTP_CMD_TIMER_START_ONCE:
1450 timer = &asoc->timers[cmd->obj.to];
1451
1452 if (timer_pending(timer))
1453 break;
1454 /* fall through */
1455
1447 case SCTP_CMD_TIMER_START: 1456 case SCTP_CMD_TIMER_START:
1448 timer = &asoc->timers[cmd->obj.to]; 1457 timer = &asoc->timers[cmd->obj.to];
1449 timeout = asoc->timeouts[cmd->obj.to]; 1458 timeout = asoc->timeouts[cmd->obj.to];
@@ -1677,6 +1686,9 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
1677 case SCTP_CMD_SEND_NEXT_ASCONF: 1686 case SCTP_CMD_SEND_NEXT_ASCONF:
1678 sctp_cmd_send_asconf(asoc); 1687 sctp_cmd_send_asconf(asoc);
1679 break; 1688 break;
1689 case SCTP_CMD_PURGE_ASCONF_QUEUE:
1690 sctp_asconf_queue_teardown(asoc);
1691 break;
1680 default: 1692 default:
1681 pr_warn("Impossible command: %u, %p\n", 1693 pr_warn("Impossible command: %u, %p\n",
1682 cmd->verb, cmd->obj.ptr); 1694 cmd->verb, cmd->obj.ptr);
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 76792083c379..246117142b5c 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -393,8 +393,7 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep,
393 goto nomem_init; 393 goto nomem_init;
394 394
395 /* The call, sctp_process_init(), can fail on memory allocation. */ 395 /* The call, sctp_process_init(), can fail on memory allocation. */
396 if (!sctp_process_init(new_asoc, chunk->chunk_hdr->type, 396 if (!sctp_process_init(new_asoc, chunk, sctp_source(chunk),
397 sctp_source(chunk),
398 (sctp_init_chunk_t *)chunk->chunk_hdr, 397 (sctp_init_chunk_t *)chunk->chunk_hdr,
399 GFP_ATOMIC)) 398 GFP_ATOMIC))
400 goto nomem_init; 399 goto nomem_init;
@@ -725,7 +724,7 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep,
725 */ 724 */
726 peer_init = &chunk->subh.cookie_hdr->c.peer_init[0]; 725 peer_init = &chunk->subh.cookie_hdr->c.peer_init[0];
727 726
728 if (!sctp_process_init(new_asoc, chunk->chunk_hdr->type, 727 if (!sctp_process_init(new_asoc, chunk,
729 &chunk->subh.cookie_hdr->c.peer_addr, 728 &chunk->subh.cookie_hdr->c.peer_addr,
730 peer_init, GFP_ATOMIC)) 729 peer_init, GFP_ATOMIC))
731 goto nomem_init; 730 goto nomem_init;
@@ -942,18 +941,9 @@ static sctp_disposition_t sctp_sf_heartbeat(const struct sctp_endpoint *ep,
942{ 941{
943 struct sctp_transport *transport = (struct sctp_transport *) arg; 942 struct sctp_transport *transport = (struct sctp_transport *) arg;
944 struct sctp_chunk *reply; 943 struct sctp_chunk *reply;
945 sctp_sender_hb_info_t hbinfo;
946 size_t paylen = 0;
947
948 hbinfo.param_hdr.type = SCTP_PARAM_HEARTBEAT_INFO;
949 hbinfo.param_hdr.length = htons(sizeof(sctp_sender_hb_info_t));
950 hbinfo.daddr = transport->ipaddr;
951 hbinfo.sent_at = jiffies;
952 hbinfo.hb_nonce = transport->hb_nonce;
953 944
954 /* Send a heartbeat to our peer. */ 945 /* Send a heartbeat to our peer. */
955 paylen = sizeof(sctp_sender_hb_info_t); 946 reply = sctp_make_heartbeat(asoc, transport);
956 reply = sctp_make_heartbeat(asoc, transport, &hbinfo, paylen);
957 if (!reply) 947 if (!reply)
958 return SCTP_DISPOSITION_NOMEM; 948 return SCTP_DISPOSITION_NOMEM;
959 949
@@ -1464,8 +1454,7 @@ static sctp_disposition_t sctp_sf_do_unexpected_init(
1464 * Verification Tag and Peers Verification tag into a reserved 1454 * Verification Tag and Peers Verification tag into a reserved
1465 * place (local tie-tag and per tie-tag) within the state cookie. 1455 * place (local tie-tag and per tie-tag) within the state cookie.
1466 */ 1456 */
1467 if (!sctp_process_init(new_asoc, chunk->chunk_hdr->type, 1457 if (!sctp_process_init(new_asoc, chunk, sctp_source(chunk),
1468 sctp_source(chunk),
1469 (sctp_init_chunk_t *)chunk->chunk_hdr, 1458 (sctp_init_chunk_t *)chunk->chunk_hdr,
1470 GFP_ATOMIC)) 1459 GFP_ATOMIC))
1471 goto nomem; 1460 goto nomem;
@@ -1694,8 +1683,7 @@ static sctp_disposition_t sctp_sf_do_dupcook_a(const struct sctp_endpoint *ep,
1694 */ 1683 */
1695 peer_init = &chunk->subh.cookie_hdr->c.peer_init[0]; 1684 peer_init = &chunk->subh.cookie_hdr->c.peer_init[0];
1696 1685
1697 if (!sctp_process_init(new_asoc, chunk->chunk_hdr->type, 1686 if (!sctp_process_init(new_asoc, chunk, sctp_source(chunk), peer_init,
1698 sctp_source(chunk), peer_init,
1699 GFP_ATOMIC)) 1687 GFP_ATOMIC))
1700 goto nomem; 1688 goto nomem;
1701 1689
@@ -1730,11 +1718,21 @@ static sctp_disposition_t sctp_sf_do_dupcook_a(const struct sctp_endpoint *ep,
1730 return SCTP_DISPOSITION_CONSUME; 1718 return SCTP_DISPOSITION_CONSUME;
1731 } 1719 }
1732 1720
1733 /* For now, fail any unsent/unacked data. Consider the optional 1721 /* For now, stop pending T3-rtx and SACK timers, fail any unsent/unacked
1734 * choice of resending of this data. 1722 * data. Consider the optional choice of resending of this data.
1735 */ 1723 */
1724 sctp_add_cmd_sf(commands, SCTP_CMD_T3_RTX_TIMERS_STOP, SCTP_NULL());
1725 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
1726 SCTP_TO(SCTP_EVENT_TIMEOUT_SACK));
1736 sctp_add_cmd_sf(commands, SCTP_CMD_PURGE_OUTQUEUE, SCTP_NULL()); 1727 sctp_add_cmd_sf(commands, SCTP_CMD_PURGE_OUTQUEUE, SCTP_NULL());
1737 1728
1729 /* Stop pending T4-rto timer, teardown ASCONF queue, ASCONF-ACK queue
1730 * and ASCONF-ACK cache.
1731 */
1732 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
1733 SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
1734 sctp_add_cmd_sf(commands, SCTP_CMD_PURGE_ASCONF_QUEUE, SCTP_NULL());
1735
1738 repl = sctp_make_cookie_ack(new_asoc, chunk); 1736 repl = sctp_make_cookie_ack(new_asoc, chunk);
1739 if (!repl) 1737 if (!repl)
1740 goto nomem; 1738 goto nomem;
@@ -1780,8 +1778,7 @@ static sctp_disposition_t sctp_sf_do_dupcook_b(const struct sctp_endpoint *ep,
1780 * side effects--it is safe to run them here. 1778 * side effects--it is safe to run them here.
1781 */ 1779 */
1782 peer_init = &chunk->subh.cookie_hdr->c.peer_init[0]; 1780 peer_init = &chunk->subh.cookie_hdr->c.peer_init[0];
1783 if (!sctp_process_init(new_asoc, chunk->chunk_hdr->type, 1781 if (!sctp_process_init(new_asoc, chunk, sctp_source(chunk), peer_init,
1784 sctp_source(chunk), peer_init,
1785 GFP_ATOMIC)) 1782 GFP_ATOMIC))
1786 goto nomem; 1783 goto nomem;
1787 1784
@@ -2412,8 +2409,15 @@ static sctp_disposition_t __sctp_sf_do_9_1_abort(const struct sctp_endpoint *ep,
2412 2409
2413 /* See if we have an error cause code in the chunk. */ 2410 /* See if we have an error cause code in the chunk. */
2414 len = ntohs(chunk->chunk_hdr->length); 2411 len = ntohs(chunk->chunk_hdr->length);
2415 if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_errhdr)) 2412 if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_errhdr)) {
2413
2414 sctp_errhdr_t *err;
2415 sctp_walk_errors(err, chunk->chunk_hdr);
2416 if ((void *)err != (void *)chunk->chunk_end)
2417 return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
2418
2416 error = ((sctp_errhdr_t *)chunk->skb->data)->cause; 2419 error = ((sctp_errhdr_t *)chunk->skb->data)->cause;
2420 }
2417 2421
2418 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ECONNRESET)); 2422 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ECONNRESET));
2419 /* ASSOC_FAILED will DELETE_TCB. */ 2423 /* ASSOC_FAILED will DELETE_TCB. */
@@ -3204,6 +3208,7 @@ sctp_disposition_t sctp_sf_operr_notify(const struct sctp_endpoint *ep,
3204 sctp_cmd_seq_t *commands) 3208 sctp_cmd_seq_t *commands)
3205{ 3209{
3206 struct sctp_chunk *chunk = arg; 3210 struct sctp_chunk *chunk = arg;
3211 sctp_errhdr_t *err;
3207 3212
3208 if (!sctp_vtag_verify(chunk, asoc)) 3213 if (!sctp_vtag_verify(chunk, asoc))
3209 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 3214 return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
@@ -3212,6 +3217,10 @@ sctp_disposition_t sctp_sf_operr_notify(const struct sctp_endpoint *ep,
3212 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_operr_chunk_t))) 3217 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_operr_chunk_t)))
3213 return sctp_sf_violation_chunklen(ep, asoc, type, arg, 3218 return sctp_sf_violation_chunklen(ep, asoc, type, arg,
3214 commands); 3219 commands);
3220 sctp_walk_errors(err, chunk->chunk_hdr);
3221 if ((void *)err != (void *)chunk->chunk_end)
3222 return sctp_sf_violation_paramlen(ep, asoc, type, arg,
3223 (void *)err, commands);
3215 3224
3216 sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_OPERR, 3225 sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_OPERR,
3217 SCTP_CHUNK(chunk)); 3226 SCTP_CHUNK(chunk));
@@ -3320,8 +3329,10 @@ sctp_disposition_t sctp_sf_ootb(const struct sctp_endpoint *ep,
3320 struct sctp_chunk *chunk = arg; 3329 struct sctp_chunk *chunk = arg;
3321 struct sk_buff *skb = chunk->skb; 3330 struct sk_buff *skb = chunk->skb;
3322 sctp_chunkhdr_t *ch; 3331 sctp_chunkhdr_t *ch;
3332 sctp_errhdr_t *err;
3323 __u8 *ch_end; 3333 __u8 *ch_end;
3324 int ootb_shut_ack = 0; 3334 int ootb_shut_ack = 0;
3335 int ootb_cookie_ack = 0;
3325 3336
3326 SCTP_INC_STATS(SCTP_MIB_OUTOFBLUES); 3337 SCTP_INC_STATS(SCTP_MIB_OUTOFBLUES);
3327 3338
@@ -3346,6 +3357,23 @@ sctp_disposition_t sctp_sf_ootb(const struct sctp_endpoint *ep,
3346 if (SCTP_CID_ABORT == ch->type) 3357 if (SCTP_CID_ABORT == ch->type)
3347 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 3358 return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
3348 3359
3360 /* RFC 8.4, 7) If the packet contains a "Stale cookie" ERROR
3361 * or a COOKIE ACK the SCTP Packet should be silently
3362 * discarded.
3363 */
3364
3365 if (SCTP_CID_COOKIE_ACK == ch->type)
3366 ootb_cookie_ack = 1;
3367
3368 if (SCTP_CID_ERROR == ch->type) {
3369 sctp_walk_errors(err, ch) {
3370 if (SCTP_ERROR_STALE_COOKIE == err->cause) {
3371 ootb_cookie_ack = 1;
3372 break;
3373 }
3374 }
3375 }
3376
3349 /* Report violation if chunk len overflows */ 3377 /* Report violation if chunk len overflows */
3350 ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length)); 3378 ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
3351 if (ch_end > skb_tail_pointer(skb)) 3379 if (ch_end > skb_tail_pointer(skb))
@@ -3357,6 +3385,8 @@ sctp_disposition_t sctp_sf_ootb(const struct sctp_endpoint *ep,
3357 3385
3358 if (ootb_shut_ack) 3386 if (ootb_shut_ack)
3359 return sctp_sf_shut_8_4_5(ep, asoc, type, arg, commands); 3387 return sctp_sf_shut_8_4_5(ep, asoc, type, arg, commands);
3388 else if (ootb_cookie_ack)
3389 return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
3360 else 3390 else
3361 return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands); 3391 return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
3362} 3392}
@@ -4343,8 +4373,9 @@ static sctp_disposition_t sctp_sf_violation_chunklen(
4343 4373
4344/* 4374/*
4345 * Handle a protocol violation when the parameter length is invalid. 4375 * Handle a protocol violation when the parameter length is invalid.
4346 * "Invalid" length is identified as smaller than the minimal length a 4376 * If the length is smaller than the minimum length of a given parameter,
4347 * given parameter can be. 4377 * or accumulated length in multi parameters exceeds the end of the chunk,
4378 * the length is considered as invalid.
4348 */ 4379 */
4349static sctp_disposition_t sctp_sf_violation_paramlen( 4380static sctp_disposition_t sctp_sf_violation_paramlen(
4350 const struct sctp_endpoint *ep, 4381 const struct sctp_endpoint *ep,
@@ -5056,6 +5087,30 @@ sctp_disposition_t sctp_sf_ignore_primitive(
5056 ***************************************************************************/ 5087 ***************************************************************************/
5057 5088
5058/* 5089/*
5090 * When the SCTP stack has no more user data to send or retransmit, this
5091 * notification is given to the user. Also, at the time when a user app
5092 * subscribes to this event, if there is no data to be sent or
5093 * retransmit, the stack will immediately send up this notification.
5094 */
5095sctp_disposition_t sctp_sf_do_no_pending_tsn(
5096 const struct sctp_endpoint *ep,
5097 const struct sctp_association *asoc,
5098 const sctp_subtype_t type,
5099 void *arg,
5100 sctp_cmd_seq_t *commands)
5101{
5102 struct sctp_ulpevent *event;
5103
5104 event = sctp_ulpevent_make_sender_dry_event(asoc, GFP_ATOMIC);
5105 if (!event)
5106 return SCTP_DISPOSITION_NOMEM;
5107
5108 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(event));
5109
5110 return SCTP_DISPOSITION_CONSUME;
5111}
5112
5113/*
5059 * Start the shutdown negotiation. 5114 * Start the shutdown negotiation.
5060 * 5115 *
5061 * From Section 9.2: 5116 * From Section 9.2:
@@ -5099,7 +5154,7 @@ sctp_disposition_t sctp_sf_do_9_2_start_shutdown(
5099 * The sender of the SHUTDOWN MAY also start an overall guard timer 5154 * The sender of the SHUTDOWN MAY also start an overall guard timer
5100 * 'T5-shutdown-guard' to bound the overall time for shutdown sequence. 5155 * 'T5-shutdown-guard' to bound the overall time for shutdown sequence.
5101 */ 5156 */
5102 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START, 5157 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
5103 SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD)); 5158 SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD));
5104 5159
5105 if (asoc->autoclose) 5160 if (asoc->autoclose)
@@ -5244,14 +5299,28 @@ sctp_disposition_t sctp_sf_do_6_3_3_rtx(const struct sctp_endpoint *ep,
5244 SCTP_INC_STATS(SCTP_MIB_T3_RTX_EXPIREDS); 5299 SCTP_INC_STATS(SCTP_MIB_T3_RTX_EXPIREDS);
5245 5300
5246 if (asoc->overall_error_count >= asoc->max_retrans) { 5301 if (asoc->overall_error_count >= asoc->max_retrans) {
5247 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, 5302 if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING) {
5248 SCTP_ERROR(ETIMEDOUT)); 5303 /*
5249 /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */ 5304 * We are here likely because the receiver had its rwnd
5250 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, 5305 * closed for a while and we have not been able to
5251 SCTP_PERR(SCTP_ERROR_NO_ERROR)); 5306 * transmit the locally queued data within the maximum
5252 SCTP_INC_STATS(SCTP_MIB_ABORTEDS); 5307 * retransmission attempts limit. Start the T5
5253 SCTP_DEC_STATS(SCTP_MIB_CURRESTAB); 5308 * shutdown guard timer to give the receiver one last
5254 return SCTP_DISPOSITION_DELETE_TCB; 5309 * chance and some additional time to recover before
5310 * aborting.
5311 */
5312 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START_ONCE,
5313 SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD));
5314 } else {
5315 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
5316 SCTP_ERROR(ETIMEDOUT));
5317 /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
5318 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
5319 SCTP_PERR(SCTP_ERROR_NO_ERROR));
5320 SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
5321 SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
5322 return SCTP_DISPOSITION_DELETE_TCB;
5323 }
5255 } 5324 }
5256 5325
5257 /* E1) For the destination address for which the timer 5326 /* E1) For the destination address for which the timer
diff --git a/net/sctp/sm_statetable.c b/net/sctp/sm_statetable.c
index 546d4387fb3c..7c211a7f90f4 100644
--- a/net/sctp/sm_statetable.c
+++ b/net/sctp/sm_statetable.c
@@ -107,8 +107,6 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
107#define TYPE_SCTP_FUNC(func) {.fn = func, .name = #func} 107#define TYPE_SCTP_FUNC(func) {.fn = func, .name = #func}
108 108
109#define TYPE_SCTP_DATA { \ 109#define TYPE_SCTP_DATA { \
110 /* SCTP_STATE_EMPTY */ \
111 TYPE_SCTP_FUNC(sctp_sf_ootb), \
112 /* SCTP_STATE_CLOSED */ \ 110 /* SCTP_STATE_CLOSED */ \
113 TYPE_SCTP_FUNC(sctp_sf_ootb), \ 111 TYPE_SCTP_FUNC(sctp_sf_ootb), \
114 /* SCTP_STATE_COOKIE_WAIT */ \ 112 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -128,8 +126,6 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
128} /* TYPE_SCTP_DATA */ 126} /* TYPE_SCTP_DATA */
129 127
130#define TYPE_SCTP_INIT { \ 128#define TYPE_SCTP_INIT { \
131 /* SCTP_STATE_EMPTY */ \
132 TYPE_SCTP_FUNC(sctp_sf_bug), \
133 /* SCTP_STATE_CLOSED */ \ 129 /* SCTP_STATE_CLOSED */ \
134 TYPE_SCTP_FUNC(sctp_sf_do_5_1B_init), \ 130 TYPE_SCTP_FUNC(sctp_sf_do_5_1B_init), \
135 /* SCTP_STATE_COOKIE_WAIT */ \ 131 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -149,8 +145,6 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
149} /* TYPE_SCTP_INIT */ 145} /* TYPE_SCTP_INIT */
150 146
151#define TYPE_SCTP_INIT_ACK { \ 147#define TYPE_SCTP_INIT_ACK { \
152 /* SCTP_STATE_EMPTY */ \
153 TYPE_SCTP_FUNC(sctp_sf_ootb), \
154 /* SCTP_STATE_CLOSED */ \ 148 /* SCTP_STATE_CLOSED */ \
155 TYPE_SCTP_FUNC(sctp_sf_do_5_2_3_initack), \ 149 TYPE_SCTP_FUNC(sctp_sf_do_5_2_3_initack), \
156 /* SCTP_STATE_COOKIE_WAIT */ \ 150 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -170,8 +164,6 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
170} /* TYPE_SCTP_INIT_ACK */ 164} /* TYPE_SCTP_INIT_ACK */
171 165
172#define TYPE_SCTP_SACK { \ 166#define TYPE_SCTP_SACK { \
173 /* SCTP_STATE_EMPTY */ \
174 TYPE_SCTP_FUNC(sctp_sf_ootb), \
175 /* SCTP_STATE_CLOSED */ \ 167 /* SCTP_STATE_CLOSED */ \
176 TYPE_SCTP_FUNC(sctp_sf_ootb), \ 168 TYPE_SCTP_FUNC(sctp_sf_ootb), \
177 /* SCTP_STATE_COOKIE_WAIT */ \ 169 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -191,8 +183,6 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
191} /* TYPE_SCTP_SACK */ 183} /* TYPE_SCTP_SACK */
192 184
193#define TYPE_SCTP_HEARTBEAT { \ 185#define TYPE_SCTP_HEARTBEAT { \
194 /* SCTP_STATE_EMPTY */ \
195 TYPE_SCTP_FUNC(sctp_sf_ootb), \
196 /* SCTP_STATE_CLOSED */ \ 186 /* SCTP_STATE_CLOSED */ \
197 TYPE_SCTP_FUNC(sctp_sf_ootb), \ 187 TYPE_SCTP_FUNC(sctp_sf_ootb), \
198 /* SCTP_STATE_COOKIE_WAIT */ \ 188 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -213,8 +203,6 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
213} /* TYPE_SCTP_HEARTBEAT */ 203} /* TYPE_SCTP_HEARTBEAT */
214 204
215#define TYPE_SCTP_HEARTBEAT_ACK { \ 205#define TYPE_SCTP_HEARTBEAT_ACK { \
216 /* SCTP_STATE_EMPTY */ \
217 TYPE_SCTP_FUNC(sctp_sf_ootb), \
218 /* SCTP_STATE_CLOSED */ \ 206 /* SCTP_STATE_CLOSED */ \
219 TYPE_SCTP_FUNC(sctp_sf_ootb), \ 207 TYPE_SCTP_FUNC(sctp_sf_ootb), \
220 /* SCTP_STATE_COOKIE_WAIT */ \ 208 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -234,8 +222,6 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
234} /* TYPE_SCTP_HEARTBEAT_ACK */ 222} /* TYPE_SCTP_HEARTBEAT_ACK */
235 223
236#define TYPE_SCTP_ABORT { \ 224#define TYPE_SCTP_ABORT { \
237 /* SCTP_STATE_EMPTY */ \
238 TYPE_SCTP_FUNC(sctp_sf_ootb), \
239 /* SCTP_STATE_CLOSED */ \ 225 /* SCTP_STATE_CLOSED */ \
240 TYPE_SCTP_FUNC(sctp_sf_pdiscard), \ 226 TYPE_SCTP_FUNC(sctp_sf_pdiscard), \
241 /* SCTP_STATE_COOKIE_WAIT */ \ 227 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -255,8 +241,6 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
255} /* TYPE_SCTP_ABORT */ 241} /* TYPE_SCTP_ABORT */
256 242
257#define TYPE_SCTP_SHUTDOWN { \ 243#define TYPE_SCTP_SHUTDOWN { \
258 /* SCTP_STATE_EMPTY */ \
259 TYPE_SCTP_FUNC(sctp_sf_ootb), \
260 /* SCTP_STATE_CLOSED */ \ 244 /* SCTP_STATE_CLOSED */ \
261 TYPE_SCTP_FUNC(sctp_sf_ootb), \ 245 TYPE_SCTP_FUNC(sctp_sf_ootb), \
262 /* SCTP_STATE_COOKIE_WAIT */ \ 246 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -276,8 +260,6 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
276} /* TYPE_SCTP_SHUTDOWN */ 260} /* TYPE_SCTP_SHUTDOWN */
277 261
278#define TYPE_SCTP_SHUTDOWN_ACK { \ 262#define TYPE_SCTP_SHUTDOWN_ACK { \
279 /* SCTP_STATE_EMPTY */ \
280 TYPE_SCTP_FUNC(sctp_sf_ootb), \
281 /* SCTP_STATE_CLOSED */ \ 263 /* SCTP_STATE_CLOSED */ \
282 TYPE_SCTP_FUNC(sctp_sf_ootb), \ 264 TYPE_SCTP_FUNC(sctp_sf_ootb), \
283 /* SCTP_STATE_COOKIE_WAIT */ \ 265 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -297,8 +279,6 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
297} /* TYPE_SCTP_SHUTDOWN_ACK */ 279} /* TYPE_SCTP_SHUTDOWN_ACK */
298 280
299#define TYPE_SCTP_ERROR { \ 281#define TYPE_SCTP_ERROR { \
300 /* SCTP_STATE_EMPTY */ \
301 TYPE_SCTP_FUNC(sctp_sf_ootb), \
302 /* SCTP_STATE_CLOSED */ \ 282 /* SCTP_STATE_CLOSED */ \
303 TYPE_SCTP_FUNC(sctp_sf_ootb), \ 283 TYPE_SCTP_FUNC(sctp_sf_ootb), \
304 /* SCTP_STATE_COOKIE_WAIT */ \ 284 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -318,8 +298,6 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
318} /* TYPE_SCTP_ERROR */ 298} /* TYPE_SCTP_ERROR */
319 299
320#define TYPE_SCTP_COOKIE_ECHO { \ 300#define TYPE_SCTP_COOKIE_ECHO { \
321 /* SCTP_STATE_EMPTY */ \
322 TYPE_SCTP_FUNC(sctp_sf_bug), \
323 /* SCTP_STATE_CLOSED */ \ 301 /* SCTP_STATE_CLOSED */ \
324 TYPE_SCTP_FUNC(sctp_sf_do_5_1D_ce), \ 302 TYPE_SCTP_FUNC(sctp_sf_do_5_1D_ce), \
325 /* SCTP_STATE_COOKIE_WAIT */ \ 303 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -339,8 +317,6 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
339} /* TYPE_SCTP_COOKIE_ECHO */ 317} /* TYPE_SCTP_COOKIE_ECHO */
340 318
341#define TYPE_SCTP_COOKIE_ACK { \ 319#define TYPE_SCTP_COOKIE_ACK { \
342 /* SCTP_STATE_EMPTY */ \
343 TYPE_SCTP_FUNC(sctp_sf_ootb), \
344 /* SCTP_STATE_CLOSED */ \ 320 /* SCTP_STATE_CLOSED */ \
345 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ 321 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
346 /* SCTP_STATE_COOKIE_WAIT */ \ 322 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -360,8 +336,6 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
360} /* TYPE_SCTP_COOKIE_ACK */ 336} /* TYPE_SCTP_COOKIE_ACK */
361 337
362#define TYPE_SCTP_ECN_ECNE { \ 338#define TYPE_SCTP_ECN_ECNE { \
363 /* SCTP_STATE_EMPTY */ \
364 TYPE_SCTP_FUNC(sctp_sf_ootb), \
365 /* SCTP_STATE_CLOSED */ \ 339 /* SCTP_STATE_CLOSED */ \
366 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ 340 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
367 /* SCTP_STATE_COOKIE_WAIT */ \ 341 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -381,8 +355,6 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
381} /* TYPE_SCTP_ECN_ECNE */ 355} /* TYPE_SCTP_ECN_ECNE */
382 356
383#define TYPE_SCTP_ECN_CWR { \ 357#define TYPE_SCTP_ECN_CWR { \
384 /* SCTP_STATE_EMPTY */ \
385 TYPE_SCTP_FUNC(sctp_sf_ootb), \
386 /* SCTP_STATE_CLOSED */ \ 358 /* SCTP_STATE_CLOSED */ \
387 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ 359 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
388 /* SCTP_STATE_COOKIE_WAIT */ \ 360 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -402,8 +374,6 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
402} /* TYPE_SCTP_ECN_CWR */ 374} /* TYPE_SCTP_ECN_CWR */
403 375
404#define TYPE_SCTP_SHUTDOWN_COMPLETE { \ 376#define TYPE_SCTP_SHUTDOWN_COMPLETE { \
405 /* SCTP_STATE_EMPTY */ \
406 TYPE_SCTP_FUNC(sctp_sf_ootb), \
407 /* SCTP_STATE_CLOSED */ \ 377 /* SCTP_STATE_CLOSED */ \
408 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ 378 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
409 /* SCTP_STATE_COOKIE_WAIT */ \ 379 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -446,8 +416,6 @@ static const sctp_sm_table_entry_t chunk_event_table[SCTP_NUM_BASE_CHUNK_TYPES][
446}; /* state_fn_t chunk_event_table[][] */ 416}; /* state_fn_t chunk_event_table[][] */
447 417
448#define TYPE_SCTP_ASCONF { \ 418#define TYPE_SCTP_ASCONF { \
449 /* SCTP_STATE_EMPTY */ \
450 TYPE_SCTP_FUNC(sctp_sf_ootb), \
451 /* SCTP_STATE_CLOSED */ \ 419 /* SCTP_STATE_CLOSED */ \
452 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ 420 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
453 /* SCTP_STATE_COOKIE_WAIT */ \ 421 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -467,8 +435,6 @@ static const sctp_sm_table_entry_t chunk_event_table[SCTP_NUM_BASE_CHUNK_TYPES][
467} /* TYPE_SCTP_ASCONF */ 435} /* TYPE_SCTP_ASCONF */
468 436
469#define TYPE_SCTP_ASCONF_ACK { \ 437#define TYPE_SCTP_ASCONF_ACK { \
470 /* SCTP_STATE_EMPTY */ \
471 TYPE_SCTP_FUNC(sctp_sf_ootb), \
472 /* SCTP_STATE_CLOSED */ \ 438 /* SCTP_STATE_CLOSED */ \
473 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ 439 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
474 /* SCTP_STATE_COOKIE_WAIT */ \ 440 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -496,8 +462,6 @@ static const sctp_sm_table_entry_t addip_chunk_event_table[SCTP_NUM_ADDIP_CHUNK_
496}; /*state_fn_t addip_chunk_event_table[][] */ 462}; /*state_fn_t addip_chunk_event_table[][] */
497 463
498#define TYPE_SCTP_FWD_TSN { \ 464#define TYPE_SCTP_FWD_TSN { \
499 /* SCTP_STATE_EMPTY */ \
500 TYPE_SCTP_FUNC(sctp_sf_ootb), \
501 /* SCTP_STATE_CLOSED */ \ 465 /* SCTP_STATE_CLOSED */ \
502 TYPE_SCTP_FUNC(sctp_sf_ootb), \ 466 TYPE_SCTP_FUNC(sctp_sf_ootb), \
503 /* SCTP_STATE_COOKIE_WAIT */ \ 467 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -524,8 +488,6 @@ static const sctp_sm_table_entry_t prsctp_chunk_event_table[SCTP_NUM_PRSCTP_CHUN
524}; /*state_fn_t prsctp_chunk_event_table[][] */ 488}; /*state_fn_t prsctp_chunk_event_table[][] */
525 489
526#define TYPE_SCTP_AUTH { \ 490#define TYPE_SCTP_AUTH { \
527 /* SCTP_STATE_EMPTY */ \
528 TYPE_SCTP_FUNC(sctp_sf_ootb), \
529 /* SCTP_STATE_CLOSED */ \ 491 /* SCTP_STATE_CLOSED */ \
530 TYPE_SCTP_FUNC(sctp_sf_ootb), \ 492 TYPE_SCTP_FUNC(sctp_sf_ootb), \
531 /* SCTP_STATE_COOKIE_WAIT */ \ 493 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -553,8 +515,6 @@ static const sctp_sm_table_entry_t auth_chunk_event_table[SCTP_NUM_AUTH_CHUNK_TY
553 515
554static const sctp_sm_table_entry_t 516static const sctp_sm_table_entry_t
555chunk_event_table_unknown[SCTP_STATE_NUM_STATES] = { 517chunk_event_table_unknown[SCTP_STATE_NUM_STATES] = {
556 /* SCTP_STATE_EMPTY */
557 TYPE_SCTP_FUNC(sctp_sf_ootb),
558 /* SCTP_STATE_CLOSED */ 518 /* SCTP_STATE_CLOSED */
559 TYPE_SCTP_FUNC(sctp_sf_ootb), 519 TYPE_SCTP_FUNC(sctp_sf_ootb),
560 /* SCTP_STATE_COOKIE_WAIT */ 520 /* SCTP_STATE_COOKIE_WAIT */
@@ -575,8 +535,6 @@ chunk_event_table_unknown[SCTP_STATE_NUM_STATES] = {
575 535
576 536
577#define TYPE_SCTP_PRIMITIVE_ASSOCIATE { \ 537#define TYPE_SCTP_PRIMITIVE_ASSOCIATE { \
578 /* SCTP_STATE_EMPTY */ \
579 TYPE_SCTP_FUNC(sctp_sf_bug), \
580 /* SCTP_STATE_CLOSED */ \ 538 /* SCTP_STATE_CLOSED */ \
581 TYPE_SCTP_FUNC(sctp_sf_do_prm_asoc), \ 539 TYPE_SCTP_FUNC(sctp_sf_do_prm_asoc), \
582 /* SCTP_STATE_COOKIE_WAIT */ \ 540 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -596,8 +554,6 @@ chunk_event_table_unknown[SCTP_STATE_NUM_STATES] = {
596} /* TYPE_SCTP_PRIMITIVE_ASSOCIATE */ 554} /* TYPE_SCTP_PRIMITIVE_ASSOCIATE */
597 555
598#define TYPE_SCTP_PRIMITIVE_SHUTDOWN { \ 556#define TYPE_SCTP_PRIMITIVE_SHUTDOWN { \
599 /* SCTP_STATE_EMPTY */ \
600 TYPE_SCTP_FUNC(sctp_sf_bug), \
601 /* SCTP_STATE_CLOSED */ \ 557 /* SCTP_STATE_CLOSED */ \
602 TYPE_SCTP_FUNC(sctp_sf_error_closed), \ 558 TYPE_SCTP_FUNC(sctp_sf_error_closed), \
603 /* SCTP_STATE_COOKIE_WAIT */ \ 559 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -617,8 +573,6 @@ chunk_event_table_unknown[SCTP_STATE_NUM_STATES] = {
617} /* TYPE_SCTP_PRIMITIVE_SHUTDOWN */ 573} /* TYPE_SCTP_PRIMITIVE_SHUTDOWN */
618 574
619#define TYPE_SCTP_PRIMITIVE_ABORT { \ 575#define TYPE_SCTP_PRIMITIVE_ABORT { \
620 /* SCTP_STATE_EMPTY */ \
621 TYPE_SCTP_FUNC(sctp_sf_bug), \
622 /* SCTP_STATE_CLOSED */ \ 576 /* SCTP_STATE_CLOSED */ \
623 TYPE_SCTP_FUNC(sctp_sf_error_closed), \ 577 TYPE_SCTP_FUNC(sctp_sf_error_closed), \
624 /* SCTP_STATE_COOKIE_WAIT */ \ 578 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -638,8 +592,6 @@ chunk_event_table_unknown[SCTP_STATE_NUM_STATES] = {
638} /* TYPE_SCTP_PRIMITIVE_ABORT */ 592} /* TYPE_SCTP_PRIMITIVE_ABORT */
639 593
640#define TYPE_SCTP_PRIMITIVE_SEND { \ 594#define TYPE_SCTP_PRIMITIVE_SEND { \
641 /* SCTP_STATE_EMPTY */ \
642 TYPE_SCTP_FUNC(sctp_sf_bug), \
643 /* SCTP_STATE_CLOSED */ \ 595 /* SCTP_STATE_CLOSED */ \
644 TYPE_SCTP_FUNC(sctp_sf_error_closed), \ 596 TYPE_SCTP_FUNC(sctp_sf_error_closed), \
645 /* SCTP_STATE_COOKIE_WAIT */ \ 597 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -659,8 +611,6 @@ chunk_event_table_unknown[SCTP_STATE_NUM_STATES] = {
659} /* TYPE_SCTP_PRIMITIVE_SEND */ 611} /* TYPE_SCTP_PRIMITIVE_SEND */
660 612
661#define TYPE_SCTP_PRIMITIVE_REQUESTHEARTBEAT { \ 613#define TYPE_SCTP_PRIMITIVE_REQUESTHEARTBEAT { \
662 /* SCTP_STATE_EMPTY */ \
663 TYPE_SCTP_FUNC(sctp_sf_bug), \
664 /* SCTP_STATE_CLOSED */ \ 614 /* SCTP_STATE_CLOSED */ \
665 TYPE_SCTP_FUNC(sctp_sf_error_closed), \ 615 TYPE_SCTP_FUNC(sctp_sf_error_closed), \
666 /* SCTP_STATE_COOKIE_WAIT */ \ 616 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -680,8 +630,6 @@ chunk_event_table_unknown[SCTP_STATE_NUM_STATES] = {
680} /* TYPE_SCTP_PRIMITIVE_REQUESTHEARTBEAT */ 630} /* TYPE_SCTP_PRIMITIVE_REQUESTHEARTBEAT */
681 631
682#define TYPE_SCTP_PRIMITIVE_ASCONF { \ 632#define TYPE_SCTP_PRIMITIVE_ASCONF { \
683 /* SCTP_STATE_EMPTY */ \
684 TYPE_SCTP_FUNC(sctp_sf_bug), \
685 /* SCTP_STATE_CLOSED */ \ 633 /* SCTP_STATE_CLOSED */ \
686 TYPE_SCTP_FUNC(sctp_sf_error_closed), \ 634 TYPE_SCTP_FUNC(sctp_sf_error_closed), \
687 /* SCTP_STATE_COOKIE_WAIT */ \ 635 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -713,8 +661,6 @@ static const sctp_sm_table_entry_t primitive_event_table[SCTP_NUM_PRIMITIVE_TYPE
713}; 661};
714 662
715#define TYPE_SCTP_OTHER_NO_PENDING_TSN { \ 663#define TYPE_SCTP_OTHER_NO_PENDING_TSN { \
716 /* SCTP_STATE_EMPTY */ \
717 TYPE_SCTP_FUNC(sctp_sf_bug), \
718 /* SCTP_STATE_CLOSED */ \ 664 /* SCTP_STATE_CLOSED */ \
719 TYPE_SCTP_FUNC(sctp_sf_ignore_other), \ 665 TYPE_SCTP_FUNC(sctp_sf_ignore_other), \
720 /* SCTP_STATE_COOKIE_WAIT */ \ 666 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -722,7 +668,7 @@ static const sctp_sm_table_entry_t primitive_event_table[SCTP_NUM_PRIMITIVE_TYPE
722 /* SCTP_STATE_COOKIE_ECHOED */ \ 668 /* SCTP_STATE_COOKIE_ECHOED */ \
723 TYPE_SCTP_FUNC(sctp_sf_ignore_other), \ 669 TYPE_SCTP_FUNC(sctp_sf_ignore_other), \
724 /* SCTP_STATE_ESTABLISHED */ \ 670 /* SCTP_STATE_ESTABLISHED */ \
725 TYPE_SCTP_FUNC(sctp_sf_ignore_other), \ 671 TYPE_SCTP_FUNC(sctp_sf_do_no_pending_tsn), \
726 /* SCTP_STATE_SHUTDOWN_PENDING */ \ 672 /* SCTP_STATE_SHUTDOWN_PENDING */ \
727 TYPE_SCTP_FUNC(sctp_sf_do_9_2_start_shutdown), \ 673 TYPE_SCTP_FUNC(sctp_sf_do_9_2_start_shutdown), \
728 /* SCTP_STATE_SHUTDOWN_SENT */ \ 674 /* SCTP_STATE_SHUTDOWN_SENT */ \
@@ -734,8 +680,6 @@ static const sctp_sm_table_entry_t primitive_event_table[SCTP_NUM_PRIMITIVE_TYPE
734} 680}
735 681
736#define TYPE_SCTP_OTHER_ICMP_PROTO_UNREACH { \ 682#define TYPE_SCTP_OTHER_ICMP_PROTO_UNREACH { \
737 /* SCTP_STATE_EMPTY */ \
738 TYPE_SCTP_FUNC(sctp_sf_bug), \
739 /* SCTP_STATE_CLOSED */ \ 683 /* SCTP_STATE_CLOSED */ \
740 TYPE_SCTP_FUNC(sctp_sf_ignore_other), \ 684 TYPE_SCTP_FUNC(sctp_sf_ignore_other), \
741 /* SCTP_STATE_COOKIE_WAIT */ \ 685 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -760,8 +704,6 @@ static const sctp_sm_table_entry_t other_event_table[SCTP_NUM_OTHER_TYPES][SCTP_
760}; 704};
761 705
762#define TYPE_SCTP_EVENT_TIMEOUT_NONE { \ 706#define TYPE_SCTP_EVENT_TIMEOUT_NONE { \
763 /* SCTP_STATE_EMPTY */ \
764 TYPE_SCTP_FUNC(sctp_sf_bug), \
765 /* SCTP_STATE_CLOSED */ \ 707 /* SCTP_STATE_CLOSED */ \
766 TYPE_SCTP_FUNC(sctp_sf_bug), \ 708 TYPE_SCTP_FUNC(sctp_sf_bug), \
767 /* SCTP_STATE_COOKIE_WAIT */ \ 709 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -781,8 +723,6 @@ static const sctp_sm_table_entry_t other_event_table[SCTP_NUM_OTHER_TYPES][SCTP_
781} 723}
782 724
783#define TYPE_SCTP_EVENT_TIMEOUT_T1_COOKIE { \ 725#define TYPE_SCTP_EVENT_TIMEOUT_T1_COOKIE { \
784 /* SCTP_STATE_EMPTY */ \
785 TYPE_SCTP_FUNC(sctp_sf_bug), \
786 /* SCTP_STATE_CLOSED */ \ 726 /* SCTP_STATE_CLOSED */ \
787 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ 727 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
788 /* SCTP_STATE_COOKIE_WAIT */ \ 728 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -802,8 +742,6 @@ static const sctp_sm_table_entry_t other_event_table[SCTP_NUM_OTHER_TYPES][SCTP_
802} 742}
803 743
804#define TYPE_SCTP_EVENT_TIMEOUT_T1_INIT { \ 744#define TYPE_SCTP_EVENT_TIMEOUT_T1_INIT { \
805 /* SCTP_STATE_EMPTY */ \
806 TYPE_SCTP_FUNC(sctp_sf_bug), \
807 /* SCTP_STATE_CLOSED */ \ 745 /* SCTP_STATE_CLOSED */ \
808 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ 746 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
809 /* SCTP_STATE_COOKIE_WAIT */ \ 747 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -823,8 +761,6 @@ static const sctp_sm_table_entry_t other_event_table[SCTP_NUM_OTHER_TYPES][SCTP_
823} 761}
824 762
825#define TYPE_SCTP_EVENT_TIMEOUT_T2_SHUTDOWN { \ 763#define TYPE_SCTP_EVENT_TIMEOUT_T2_SHUTDOWN { \
826 /* SCTP_STATE_EMPTY */ \
827 TYPE_SCTP_FUNC(sctp_sf_bug), \
828 /* SCTP_STATE_CLOSED */ \ 764 /* SCTP_STATE_CLOSED */ \
829 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ 765 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
830 /* SCTP_STATE_COOKIE_WAIT */ \ 766 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -844,8 +780,6 @@ static const sctp_sm_table_entry_t other_event_table[SCTP_NUM_OTHER_TYPES][SCTP_
844} 780}
845 781
846#define TYPE_SCTP_EVENT_TIMEOUT_T3_RTX { \ 782#define TYPE_SCTP_EVENT_TIMEOUT_T3_RTX { \
847 /* SCTP_STATE_EMPTY */ \
848 TYPE_SCTP_FUNC(sctp_sf_bug), \
849 /* SCTP_STATE_CLOSED */ \ 783 /* SCTP_STATE_CLOSED */ \
850 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ 784 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
851 /* SCTP_STATE_COOKIE_WAIT */ \ 785 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -865,8 +799,6 @@ static const sctp_sm_table_entry_t other_event_table[SCTP_NUM_OTHER_TYPES][SCTP_
865} 799}
866 800
867#define TYPE_SCTP_EVENT_TIMEOUT_T4_RTO { \ 801#define TYPE_SCTP_EVENT_TIMEOUT_T4_RTO { \
868 /* SCTP_STATE_EMPTY */ \
869 TYPE_SCTP_FUNC(sctp_sf_bug), \
870 /* SCTP_STATE_CLOSED */ \ 802 /* SCTP_STATE_CLOSED */ \
871 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ 803 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
872 /* SCTP_STATE_COOKIE_WAIT */ \ 804 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -886,8 +818,6 @@ static const sctp_sm_table_entry_t other_event_table[SCTP_NUM_OTHER_TYPES][SCTP_
886} 818}
887 819
888#define TYPE_SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD { \ 820#define TYPE_SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD { \
889 /* SCTP_STATE_EMPTY */ \
890 TYPE_SCTP_FUNC(sctp_sf_bug), \
891 /* SCTP_STATE_CLOSED */ \ 821 /* SCTP_STATE_CLOSED */ \
892 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ 822 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
893 /* SCTP_STATE_COOKIE_WAIT */ \ 823 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -897,7 +827,7 @@ static const sctp_sm_table_entry_t other_event_table[SCTP_NUM_OTHER_TYPES][SCTP_
897 /* SCTP_STATE_ESTABLISHED */ \ 827 /* SCTP_STATE_ESTABLISHED */ \
898 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ 828 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
899 /* SCTP_STATE_SHUTDOWN_PENDING */ \ 829 /* SCTP_STATE_SHUTDOWN_PENDING */ \
900 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ 830 TYPE_SCTP_FUNC(sctp_sf_t5_timer_expire), \
901 /* SCTP_STATE_SHUTDOWN_SENT */ \ 831 /* SCTP_STATE_SHUTDOWN_SENT */ \
902 TYPE_SCTP_FUNC(sctp_sf_t5_timer_expire), \ 832 TYPE_SCTP_FUNC(sctp_sf_t5_timer_expire), \
903 /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ 833 /* SCTP_STATE_SHUTDOWN_RECEIVED */ \
@@ -907,8 +837,6 @@ static const sctp_sm_table_entry_t other_event_table[SCTP_NUM_OTHER_TYPES][SCTP_
907} 837}
908 838
909#define TYPE_SCTP_EVENT_TIMEOUT_HEARTBEAT { \ 839#define TYPE_SCTP_EVENT_TIMEOUT_HEARTBEAT { \
910 /* SCTP_STATE_EMPTY */ \
911 TYPE_SCTP_FUNC(sctp_sf_bug), \
912 /* SCTP_STATE_CLOSED */ \ 840 /* SCTP_STATE_CLOSED */ \
913 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ 841 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
914 /* SCTP_STATE_COOKIE_WAIT */ \ 842 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -928,8 +856,6 @@ static const sctp_sm_table_entry_t other_event_table[SCTP_NUM_OTHER_TYPES][SCTP_
928} 856}
929 857
930#define TYPE_SCTP_EVENT_TIMEOUT_SACK { \ 858#define TYPE_SCTP_EVENT_TIMEOUT_SACK { \
931 /* SCTP_STATE_EMPTY */ \
932 TYPE_SCTP_FUNC(sctp_sf_bug), \
933 /* SCTP_STATE_CLOSED */ \ 859 /* SCTP_STATE_CLOSED */ \
934 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ 860 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
935 /* SCTP_STATE_COOKIE_WAIT */ \ 861 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -949,8 +875,6 @@ static const sctp_sm_table_entry_t other_event_table[SCTP_NUM_OTHER_TYPES][SCTP_
949} 875}
950 876
951#define TYPE_SCTP_EVENT_TIMEOUT_AUTOCLOSE { \ 877#define TYPE_SCTP_EVENT_TIMEOUT_AUTOCLOSE { \
952 /* SCTP_STATE_EMPTY */ \
953 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
954 /* SCTP_STATE_CLOSED */ \ 878 /* SCTP_STATE_CLOSED */ \
955 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ 879 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
956 /* SCTP_STATE_COOKIE_WAIT */ \ 880 /* SCTP_STATE_COOKIE_WAIT */ \
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index deb82e35a107..d3ccf7973c59 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -658,11 +658,15 @@ static int sctp_bindx_rem(struct sock *sk, struct sockaddr *addrs, int addrcnt)
658 goto err_bindx_rem; 658 goto err_bindx_rem;
659 } 659 }
660 660
661 if (sa_addr->v4.sin_port != htons(bp->port)) { 661 if (sa_addr->v4.sin_port &&
662 sa_addr->v4.sin_port != htons(bp->port)) {
662 retval = -EINVAL; 663 retval = -EINVAL;
663 goto err_bindx_rem; 664 goto err_bindx_rem;
664 } 665 }
665 666
667 if (!sa_addr->v4.sin_port)
668 sa_addr->v4.sin_port = htons(bp->port);
669
666 /* FIXME - There is probably a need to check if sk->sk_saddr and 670 /* FIXME - There is probably a need to check if sk->sk_saddr and
667 * sk->sk_rcv_addr are currently set to one of the addresses to 671 * sk->sk_rcv_addr are currently set to one of the addresses to
668 * be removed. This is something which needs to be looked into 672 * be removed. This is something which needs to be looked into
@@ -1380,6 +1384,7 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout)
1380 struct sctp_endpoint *ep; 1384 struct sctp_endpoint *ep;
1381 struct sctp_association *asoc; 1385 struct sctp_association *asoc;
1382 struct list_head *pos, *temp; 1386 struct list_head *pos, *temp;
1387 unsigned int data_was_unread;
1383 1388
1384 SCTP_DEBUG_PRINTK("sctp_close(sk: 0x%p, timeout:%ld)\n", sk, timeout); 1389 SCTP_DEBUG_PRINTK("sctp_close(sk: 0x%p, timeout:%ld)\n", sk, timeout);
1385 1390
@@ -1389,6 +1394,10 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout)
1389 1394
1390 ep = sctp_sk(sk)->ep; 1395 ep = sctp_sk(sk)->ep;
1391 1396
1397 /* Clean up any skbs sitting on the receive queue. */
1398 data_was_unread = sctp_queue_purge_ulpevents(&sk->sk_receive_queue);
1399 data_was_unread += sctp_queue_purge_ulpevents(&sctp_sk(sk)->pd_lobby);
1400
1392 /* Walk all associations on an endpoint. */ 1401 /* Walk all associations on an endpoint. */
1393 list_for_each_safe(pos, temp, &ep->asocs) { 1402 list_for_each_safe(pos, temp, &ep->asocs) {
1394 asoc = list_entry(pos, struct sctp_association, asocs); 1403 asoc = list_entry(pos, struct sctp_association, asocs);
@@ -1406,7 +1415,9 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout)
1406 } 1415 }
1407 } 1416 }
1408 1417
1409 if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) { 1418 if (data_was_unread || !skb_queue_empty(&asoc->ulpq.lobby) ||
1419 !skb_queue_empty(&asoc->ulpq.reasm) ||
1420 (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime)) {
1410 struct sctp_chunk *chunk; 1421 struct sctp_chunk *chunk;
1411 1422
1412 chunk = sctp_make_abort_user(asoc, NULL, 0); 1423 chunk = sctp_make_abort_user(asoc, NULL, 0);
@@ -1416,10 +1427,6 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout)
1416 sctp_primitive_SHUTDOWN(asoc, NULL); 1427 sctp_primitive_SHUTDOWN(asoc, NULL);
1417 } 1428 }
1418 1429
1419 /* Clean up any skbs sitting on the receive queue. */
1420 sctp_queue_purge_ulpevents(&sk->sk_receive_queue);
1421 sctp_queue_purge_ulpevents(&sctp_sk(sk)->pd_lobby);
1422
1423 /* On a TCP-style socket, block for at most linger_time if set. */ 1430 /* On a TCP-style socket, block for at most linger_time if set. */
1424 if (sctp_style(sk, TCP) && timeout) 1431 if (sctp_style(sk, TCP) && timeout)
1425 sctp_wait_for_close(sk, timeout); 1432 sctp_wait_for_close(sk, timeout);
@@ -1492,7 +1499,7 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
1492 struct sctp_chunk *chunk; 1499 struct sctp_chunk *chunk;
1493 union sctp_addr to; 1500 union sctp_addr to;
1494 struct sockaddr *msg_name = NULL; 1501 struct sockaddr *msg_name = NULL;
1495 struct sctp_sndrcvinfo default_sinfo = { 0 }; 1502 struct sctp_sndrcvinfo default_sinfo;
1496 struct sctp_sndrcvinfo *sinfo; 1503 struct sctp_sndrcvinfo *sinfo;
1497 struct sctp_initmsg *sinit; 1504 struct sctp_initmsg *sinit;
1498 sctp_assoc_t associd = 0; 1505 sctp_assoc_t associd = 0;
@@ -1756,6 +1763,7 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
1756 /* If the user didn't specify SNDRCVINFO, make up one with 1763 /* If the user didn't specify SNDRCVINFO, make up one with
1757 * some defaults. 1764 * some defaults.
1758 */ 1765 */
1766 memset(&default_sinfo, 0, sizeof(default_sinfo));
1759 default_sinfo.sinfo_stream = asoc->default_stream; 1767 default_sinfo.sinfo_stream = asoc->default_stream;
1760 default_sinfo.sinfo_flags = asoc->default_flags; 1768 default_sinfo.sinfo_flags = asoc->default_flags;
1761 default_sinfo.sinfo_ppid = asoc->default_ppid; 1769 default_sinfo.sinfo_ppid = asoc->default_ppid;
@@ -1786,12 +1794,10 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
1786 goto out_free; 1794 goto out_free;
1787 } 1795 }
1788 1796
1789 if (sinfo) { 1797 /* Check for invalid stream. */
1790 /* Check for invalid stream. */ 1798 if (sinfo->sinfo_stream >= asoc->c.sinit_num_ostreams) {
1791 if (sinfo->sinfo_stream >= asoc->c.sinit_num_ostreams) { 1799 err = -EINVAL;
1792 err = -EINVAL; 1800 goto out_free;
1793 goto out_free;
1794 }
1795 } 1801 }
1796 1802
1797 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); 1803 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
@@ -2070,10 +2076,33 @@ static int sctp_setsockopt_disable_fragments(struct sock *sk,
2070static int sctp_setsockopt_events(struct sock *sk, char __user *optval, 2076static int sctp_setsockopt_events(struct sock *sk, char __user *optval,
2071 unsigned int optlen) 2077 unsigned int optlen)
2072{ 2078{
2079 struct sctp_association *asoc;
2080 struct sctp_ulpevent *event;
2081
2073 if (optlen > sizeof(struct sctp_event_subscribe)) 2082 if (optlen > sizeof(struct sctp_event_subscribe))
2074 return -EINVAL; 2083 return -EINVAL;
2075 if (copy_from_user(&sctp_sk(sk)->subscribe, optval, optlen)) 2084 if (copy_from_user(&sctp_sk(sk)->subscribe, optval, optlen))
2076 return -EFAULT; 2085 return -EFAULT;
2086
2087 /*
2088 * At the time when a user app subscribes to SCTP_SENDER_DRY_EVENT,
2089 * if there is no data to be sent or retransmit, the stack will
2090 * immediately send up this notification.
2091 */
2092 if (sctp_ulpevent_type_enabled(SCTP_SENDER_DRY_EVENT,
2093 &sctp_sk(sk)->subscribe)) {
2094 asoc = sctp_id2assoc(sk, 0);
2095
2096 if (asoc && sctp_outq_is_empty(&asoc->outqueue)) {
2097 event = sctp_ulpevent_make_sender_dry_event(asoc,
2098 GFP_ATOMIC);
2099 if (!event)
2100 return -ENOMEM;
2101
2102 sctp_ulpq_tail_event(&asoc->ulpq, event);
2103 }
2104 }
2105
2077 return 0; 2106 return 0;
2078} 2107}
2079 2108
@@ -2283,7 +2312,7 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
2283 trans->param_flags = 2312 trans->param_flags =
2284 (trans->param_flags & ~SPP_PMTUD) | pmtud_change; 2313 (trans->param_flags & ~SPP_PMTUD) | pmtud_change;
2285 if (update) { 2314 if (update) {
2286 sctp_transport_pmtu(trans); 2315 sctp_transport_pmtu(trans, sctp_opt2sk(sp));
2287 sctp_assoc_sync_pmtu(asoc); 2316 sctp_assoc_sync_pmtu(asoc);
2288 } 2317 }
2289 } else if (asoc) { 2318 } else if (asoc) {
@@ -3215,14 +3244,9 @@ static int sctp_setsockopt_hmac_ident(struct sock *sk,
3215 if (optlen < sizeof(struct sctp_hmacalgo)) 3244 if (optlen < sizeof(struct sctp_hmacalgo))
3216 return -EINVAL; 3245 return -EINVAL;
3217 3246
3218 hmacs = kmalloc(optlen, GFP_KERNEL); 3247 hmacs= memdup_user(optval, optlen);
3219 if (!hmacs) 3248 if (IS_ERR(hmacs))
3220 return -ENOMEM; 3249 return PTR_ERR(hmacs);
3221
3222 if (copy_from_user(hmacs, optval, optlen)) {
3223 err = -EFAULT;
3224 goto out;
3225 }
3226 3250
3227 idents = hmacs->shmac_num_idents; 3251 idents = hmacs->shmac_num_idents;
3228 if (idents == 0 || idents > SCTP_AUTH_NUM_HMACS || 3252 if (idents == 0 || idents > SCTP_AUTH_NUM_HMACS ||
@@ -3257,14 +3281,9 @@ static int sctp_setsockopt_auth_key(struct sock *sk,
3257 if (optlen <= sizeof(struct sctp_authkey)) 3281 if (optlen <= sizeof(struct sctp_authkey))
3258 return -EINVAL; 3282 return -EINVAL;
3259 3283
3260 authkey = kmalloc(optlen, GFP_KERNEL); 3284 authkey= memdup_user(optval, optlen);
3261 if (!authkey) 3285 if (IS_ERR(authkey))
3262 return -ENOMEM; 3286 return PTR_ERR(authkey);
3263
3264 if (copy_from_user(authkey, optval, optlen)) {
3265 ret = -EFAULT;
3266 goto out;
3267 }
3268 3287
3269 if (authkey->sca_keylength > optlen - sizeof(struct sctp_authkey)) { 3288 if (authkey->sca_keylength > optlen - sizeof(struct sctp_authkey)) {
3270 ret = -EINVAL; 3289 ret = -EINVAL;
@@ -5283,6 +5302,55 @@ static int sctp_getsockopt_assoc_number(struct sock *sk, int len,
5283 return 0; 5302 return 0;
5284} 5303}
5285 5304
5305/*
5306 * 8.2.6. Get the Current Identifiers of Associations
5307 * (SCTP_GET_ASSOC_ID_LIST)
5308 *
5309 * This option gets the current list of SCTP association identifiers of
5310 * the SCTP associations handled by a one-to-many style socket.
5311 */
5312static int sctp_getsockopt_assoc_ids(struct sock *sk, int len,
5313 char __user *optval, int __user *optlen)
5314{
5315 struct sctp_sock *sp = sctp_sk(sk);
5316 struct sctp_association *asoc;
5317 struct sctp_assoc_ids *ids;
5318 u32 num = 0;
5319
5320 if (sctp_style(sk, TCP))
5321 return -EOPNOTSUPP;
5322
5323 if (len < sizeof(struct sctp_assoc_ids))
5324 return -EINVAL;
5325
5326 list_for_each_entry(asoc, &(sp->ep->asocs), asocs) {
5327 num++;
5328 }
5329
5330 if (len < sizeof(struct sctp_assoc_ids) + sizeof(sctp_assoc_t) * num)
5331 return -EINVAL;
5332
5333 len = sizeof(struct sctp_assoc_ids) + sizeof(sctp_assoc_t) * num;
5334
5335 ids = kmalloc(len, GFP_KERNEL);
5336 if (unlikely(!ids))
5337 return -ENOMEM;
5338
5339 ids->gaids_number_of_ids = num;
5340 num = 0;
5341 list_for_each_entry(asoc, &(sp->ep->asocs), asocs) {
5342 ids->gaids_assoc_id[num++] = asoc->assoc_id;
5343 }
5344
5345 if (put_user(len, optlen) || copy_to_user(optval, ids, len)) {
5346 kfree(ids);
5347 return -EFAULT;
5348 }
5349
5350 kfree(ids);
5351 return 0;
5352}
5353
5286SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname, 5354SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname,
5287 char __user *optval, int __user *optlen) 5355 char __user *optval, int __user *optlen)
5288{ 5356{
@@ -5415,6 +5483,9 @@ SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname,
5415 case SCTP_GET_ASSOC_NUMBER: 5483 case SCTP_GET_ASSOC_NUMBER:
5416 retval = sctp_getsockopt_assoc_number(sk, len, optval, optlen); 5484 retval = sctp_getsockopt_assoc_number(sk, len, optval, optlen);
5417 break; 5485 break;
5486 case SCTP_GET_ASSOC_ID_LIST:
5487 retval = sctp_getsockopt_assoc_ids(sk, len, optval, optlen);
5488 break;
5418 default: 5489 default:
5419 retval = -ENOPROTOOPT; 5490 retval = -ENOPROTOOPT;
5420 break; 5491 break;
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index d3ae493d234a..394c57ca2f54 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -211,15 +211,17 @@ void sctp_transport_set_owner(struct sctp_transport *transport,
211} 211}
212 212
213/* Initialize the pmtu of a transport. */ 213/* Initialize the pmtu of a transport. */
214void sctp_transport_pmtu(struct sctp_transport *transport) 214void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk)
215{ 215{
216 struct dst_entry *dst; 216 /* If we don't have a fresh route, look one up */
217 217 if (!transport->dst || transport->dst->obsolete > 1) {
218 dst = transport->af_specific->get_dst(NULL, &transport->ipaddr, NULL); 218 dst_release(transport->dst);
219 transport->af_specific->get_dst(transport, &transport->saddr,
220 &transport->fl, sk);
221 }
219 222
220 if (dst) { 223 if (transport->dst) {
221 transport->pathmtu = dst_mtu(dst); 224 transport->pathmtu = dst_mtu(transport->dst);
222 dst_release(dst);
223 } else 225 } else
224 transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT; 226 transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
225} 227}
@@ -270,22 +272,19 @@ void sctp_transport_route(struct sctp_transport *transport,
270{ 272{
271 struct sctp_association *asoc = transport->asoc; 273 struct sctp_association *asoc = transport->asoc;
272 struct sctp_af *af = transport->af_specific; 274 struct sctp_af *af = transport->af_specific;
273 union sctp_addr *daddr = &transport->ipaddr;
274 struct dst_entry *dst;
275 275
276 dst = af->get_dst(asoc, daddr, saddr); 276 af->get_dst(transport, saddr, &transport->fl, sctp_opt2sk(opt));
277 277
278 if (saddr) 278 if (saddr)
279 memcpy(&transport->saddr, saddr, sizeof(union sctp_addr)); 279 memcpy(&transport->saddr, saddr, sizeof(union sctp_addr));
280 else 280 else
281 af->get_saddr(opt, asoc, dst, daddr, &transport->saddr); 281 af->get_saddr(opt, transport, &transport->fl);
282 282
283 transport->dst = dst;
284 if ((transport->param_flags & SPP_PMTUD_DISABLE) && transport->pathmtu) { 283 if ((transport->param_flags & SPP_PMTUD_DISABLE) && transport->pathmtu) {
285 return; 284 return;
286 } 285 }
287 if (dst) { 286 if (transport->dst) {
288 transport->pathmtu = dst_mtu(dst); 287 transport->pathmtu = dst_mtu(transport->dst);
289 288
290 /* Initialize sk->sk_rcv_saddr, if the transport is the 289 /* Initialize sk->sk_rcv_saddr, if the transport is the
291 * association's active path for getsockname(). 290 * association's active path for getsockname().
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
index 61b1f5ada96a..8a84017834c2 100644
--- a/net/sctp/ulpevent.c
+++ b/net/sctp/ulpevent.c
@@ -843,7 +843,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_authkey(
843 ak = (struct sctp_authkey_event *) 843 ak = (struct sctp_authkey_event *)
844 skb_put(skb, sizeof(struct sctp_authkey_event)); 844 skb_put(skb, sizeof(struct sctp_authkey_event));
845 845
846 ak->auth_type = SCTP_AUTHENTICATION_INDICATION; 846 ak->auth_type = SCTP_AUTHENTICATION_EVENT;
847 ak->auth_flags = 0; 847 ak->auth_flags = 0;
848 ak->auth_length = sizeof(struct sctp_authkey_event); 848 ak->auth_length = sizeof(struct sctp_authkey_event);
849 849
@@ -862,6 +862,34 @@ fail:
862 return NULL; 862 return NULL;
863} 863}
864 864
865/*
866 * Socket Extensions for SCTP
867 * 6.3.10. SCTP_SENDER_DRY_EVENT
868 */
869struct sctp_ulpevent *sctp_ulpevent_make_sender_dry_event(
870 const struct sctp_association *asoc, gfp_t gfp)
871{
872 struct sctp_ulpevent *event;
873 struct sctp_sender_dry_event *sdry;
874 struct sk_buff *skb;
875
876 event = sctp_ulpevent_new(sizeof(struct sctp_sender_dry_event),
877 MSG_NOTIFICATION, gfp);
878 if (!event)
879 return NULL;
880
881 skb = sctp_event2skb(event);
882 sdry = (struct sctp_sender_dry_event *)
883 skb_put(skb, sizeof(struct sctp_sender_dry_event));
884
885 sdry->sender_dry_type = SCTP_SENDER_DRY_EVENT;
886 sdry->sender_dry_flags = 0;
887 sdry->sender_dry_length = sizeof(struct sctp_sender_dry_event);
888 sctp_ulpevent_set_owner(event, asoc);
889 sdry->sender_dry_assoc_id = sctp_assoc2id(asoc);
890
891 return event;
892}
865 893
866/* Return the notification type, assuming this is a notification 894/* Return the notification type, assuming this is a notification
867 * event. 895 * event.
@@ -1053,9 +1081,19 @@ void sctp_ulpevent_free(struct sctp_ulpevent *event)
1053} 1081}
1054 1082
1055/* Purge the skb lists holding ulpevents. */ 1083/* Purge the skb lists holding ulpevents. */
1056void sctp_queue_purge_ulpevents(struct sk_buff_head *list) 1084unsigned int sctp_queue_purge_ulpevents(struct sk_buff_head *list)
1057{ 1085{
1058 struct sk_buff *skb; 1086 struct sk_buff *skb;
1059 while ((skb = skb_dequeue(list)) != NULL) 1087 unsigned int data_unread = 0;
1060 sctp_ulpevent_free(sctp_skb2event(skb)); 1088
1089 while ((skb = skb_dequeue(list)) != NULL) {
1090 struct sctp_ulpevent *event = sctp_skb2event(skb);
1091
1092 if (!sctp_ulpevent_is_notification(event))
1093 data_unread += skb->len;
1094
1095 sctp_ulpevent_free(event);
1096 }
1097
1098 return data_unread;
1061} 1099}
diff --git a/net/socket.c b/net/socket.c
index 310d16b1b3c9..02dc82db3d23 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -263,15 +263,6 @@ static struct inode *sock_alloc_inode(struct super_block *sb)
263 return &ei->vfs_inode; 263 return &ei->vfs_inode;
264} 264}
265 265
266
267
268static void wq_free_rcu(struct rcu_head *head)
269{
270 struct socket_wq *wq = container_of(head, struct socket_wq, rcu);
271
272 kfree(wq);
273}
274
275static void sock_destroy_inode(struct inode *inode) 266static void sock_destroy_inode(struct inode *inode)
276{ 267{
277 struct socket_alloc *ei; 268 struct socket_alloc *ei;
@@ -279,7 +270,7 @@ static void sock_destroy_inode(struct inode *inode)
279 270
280 ei = container_of(inode, struct socket_alloc, vfs_inode); 271 ei = container_of(inode, struct socket_alloc, vfs_inode);
281 wq = rcu_dereference_protected(ei->socket.wq, 1); 272 wq = rcu_dereference_protected(ei->socket.wq, 1);
282 call_rcu(&wq->rcu, wq_free_rcu); 273 kfree_rcu(wq, rcu);
283 kmem_cache_free(sock_inode_cachep, ei); 274 kmem_cache_free(sock_inode_cachep, ei);
284} 275}
285 276
@@ -551,11 +542,10 @@ int sock_tx_timestamp(struct sock *sk, __u8 *tx_flags)
551} 542}
552EXPORT_SYMBOL(sock_tx_timestamp); 543EXPORT_SYMBOL(sock_tx_timestamp);
553 544
554static inline int __sock_sendmsg(struct kiocb *iocb, struct socket *sock, 545static inline int __sock_sendmsg_nosec(struct kiocb *iocb, struct socket *sock,
555 struct msghdr *msg, size_t size) 546 struct msghdr *msg, size_t size)
556{ 547{
557 struct sock_iocb *si = kiocb_to_siocb(iocb); 548 struct sock_iocb *si = kiocb_to_siocb(iocb);
558 int err;
559 549
560 sock_update_classid(sock->sk); 550 sock_update_classid(sock->sk);
561 551
@@ -564,13 +554,17 @@ static inline int __sock_sendmsg(struct kiocb *iocb, struct socket *sock,
564 si->msg = msg; 554 si->msg = msg;
565 si->size = size; 555 si->size = size;
566 556
567 err = security_socket_sendmsg(sock, msg, size);
568 if (err)
569 return err;
570
571 return sock->ops->sendmsg(iocb, sock, msg, size); 557 return sock->ops->sendmsg(iocb, sock, msg, size);
572} 558}
573 559
560static inline int __sock_sendmsg(struct kiocb *iocb, struct socket *sock,
561 struct msghdr *msg, size_t size)
562{
563 int err = security_socket_sendmsg(sock, msg, size);
564
565 return err ?: __sock_sendmsg_nosec(iocb, sock, msg, size);
566}
567
574int sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) 568int sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
575{ 569{
576 struct kiocb iocb; 570 struct kiocb iocb;
@@ -586,6 +580,20 @@ int sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
586} 580}
587EXPORT_SYMBOL(sock_sendmsg); 581EXPORT_SYMBOL(sock_sendmsg);
588 582
583int sock_sendmsg_nosec(struct socket *sock, struct msghdr *msg, size_t size)
584{
585 struct kiocb iocb;
586 struct sock_iocb siocb;
587 int ret;
588
589 init_sync_kiocb(&iocb, NULL);
590 iocb.private = &siocb;
591 ret = __sock_sendmsg_nosec(&iocb, sock, msg, size);
592 if (-EIOCBQUEUED == ret)
593 ret = wait_on_sync_kiocb(&iocb);
594 return ret;
595}
596
589int kernel_sendmsg(struct socket *sock, struct msghdr *msg, 597int kernel_sendmsg(struct socket *sock, struct msghdr *msg,
590 struct kvec *vec, size_t num, size_t size) 598 struct kvec *vec, size_t num, size_t size)
591{ 599{
@@ -1863,57 +1871,47 @@ SYSCALL_DEFINE2(shutdown, int, fd, int, how)
1863#define COMPAT_NAMELEN(msg) COMPAT_MSG(msg, msg_namelen) 1871#define COMPAT_NAMELEN(msg) COMPAT_MSG(msg, msg_namelen)
1864#define COMPAT_FLAGS(msg) COMPAT_MSG(msg, msg_flags) 1872#define COMPAT_FLAGS(msg) COMPAT_MSG(msg, msg_flags)
1865 1873
1866/* 1874static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
1867 * BSD sendmsg interface 1875 struct msghdr *msg_sys, unsigned flags, int nosec)
1868 */
1869
1870SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned, flags)
1871{ 1876{
1872 struct compat_msghdr __user *msg_compat = 1877 struct compat_msghdr __user *msg_compat =
1873 (struct compat_msghdr __user *)msg; 1878 (struct compat_msghdr __user *)msg;
1874 struct socket *sock;
1875 struct sockaddr_storage address; 1879 struct sockaddr_storage address;
1876 struct iovec iovstack[UIO_FASTIOV], *iov = iovstack; 1880 struct iovec iovstack[UIO_FASTIOV], *iov = iovstack;
1877 unsigned char ctl[sizeof(struct cmsghdr) + 20] 1881 unsigned char ctl[sizeof(struct cmsghdr) + 20]
1878 __attribute__ ((aligned(sizeof(__kernel_size_t)))); 1882 __attribute__ ((aligned(sizeof(__kernel_size_t))));
1879 /* 20 is size of ipv6_pktinfo */ 1883 /* 20 is size of ipv6_pktinfo */
1880 unsigned char *ctl_buf = ctl; 1884 unsigned char *ctl_buf = ctl;
1881 struct msghdr msg_sys;
1882 int err, ctl_len, iov_size, total_len; 1885 int err, ctl_len, iov_size, total_len;
1883 int fput_needed;
1884 1886
1885 err = -EFAULT; 1887 err = -EFAULT;
1886 if (MSG_CMSG_COMPAT & flags) { 1888 if (MSG_CMSG_COMPAT & flags) {
1887 if (get_compat_msghdr(&msg_sys, msg_compat)) 1889 if (get_compat_msghdr(msg_sys, msg_compat))
1888 return -EFAULT; 1890 return -EFAULT;
1889 } else if (copy_from_user(&msg_sys, msg, sizeof(struct msghdr))) 1891 } else if (copy_from_user(msg_sys, msg, sizeof(struct msghdr)))
1890 return -EFAULT; 1892 return -EFAULT;
1891 1893
1892 sock = sockfd_lookup_light(fd, &err, &fput_needed);
1893 if (!sock)
1894 goto out;
1895
1896 /* do not move before msg_sys is valid */ 1894 /* do not move before msg_sys is valid */
1897 err = -EMSGSIZE; 1895 err = -EMSGSIZE;
1898 if (msg_sys.msg_iovlen > UIO_MAXIOV) 1896 if (msg_sys->msg_iovlen > UIO_MAXIOV)
1899 goto out_put; 1897 goto out;
1900 1898
1901 /* Check whether to allocate the iovec area */ 1899 /* Check whether to allocate the iovec area */
1902 err = -ENOMEM; 1900 err = -ENOMEM;
1903 iov_size = msg_sys.msg_iovlen * sizeof(struct iovec); 1901 iov_size = msg_sys->msg_iovlen * sizeof(struct iovec);
1904 if (msg_sys.msg_iovlen > UIO_FASTIOV) { 1902 if (msg_sys->msg_iovlen > UIO_FASTIOV) {
1905 iov = sock_kmalloc(sock->sk, iov_size, GFP_KERNEL); 1903 iov = sock_kmalloc(sock->sk, iov_size, GFP_KERNEL);
1906 if (!iov) 1904 if (!iov)
1907 goto out_put; 1905 goto out;
1908 } 1906 }
1909 1907
1910 /* This will also move the address data into kernel space */ 1908 /* This will also move the address data into kernel space */
1911 if (MSG_CMSG_COMPAT & flags) { 1909 if (MSG_CMSG_COMPAT & flags) {
1912 err = verify_compat_iovec(&msg_sys, iov, 1910 err = verify_compat_iovec(msg_sys, iov,
1913 (struct sockaddr *)&address, 1911 (struct sockaddr *)&address,
1914 VERIFY_READ); 1912 VERIFY_READ);
1915 } else 1913 } else
1916 err = verify_iovec(&msg_sys, iov, 1914 err = verify_iovec(msg_sys, iov,
1917 (struct sockaddr *)&address, 1915 (struct sockaddr *)&address,
1918 VERIFY_READ); 1916 VERIFY_READ);
1919 if (err < 0) 1917 if (err < 0)
@@ -1922,17 +1920,17 @@ SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned, flags)
1922 1920
1923 err = -ENOBUFS; 1921 err = -ENOBUFS;
1924 1922
1925 if (msg_sys.msg_controllen > INT_MAX) 1923 if (msg_sys->msg_controllen > INT_MAX)
1926 goto out_freeiov; 1924 goto out_freeiov;
1927 ctl_len = msg_sys.msg_controllen; 1925 ctl_len = msg_sys->msg_controllen;
1928 if ((MSG_CMSG_COMPAT & flags) && ctl_len) { 1926 if ((MSG_CMSG_COMPAT & flags) && ctl_len) {
1929 err = 1927 err =
1930 cmsghdr_from_user_compat_to_kern(&msg_sys, sock->sk, ctl, 1928 cmsghdr_from_user_compat_to_kern(msg_sys, sock->sk, ctl,
1931 sizeof(ctl)); 1929 sizeof(ctl));
1932 if (err) 1930 if (err)
1933 goto out_freeiov; 1931 goto out_freeiov;
1934 ctl_buf = msg_sys.msg_control; 1932 ctl_buf = msg_sys->msg_control;
1935 ctl_len = msg_sys.msg_controllen; 1933 ctl_len = msg_sys->msg_controllen;
1936 } else if (ctl_len) { 1934 } else if (ctl_len) {
1937 if (ctl_len > sizeof(ctl)) { 1935 if (ctl_len > sizeof(ctl)) {
1938 ctl_buf = sock_kmalloc(sock->sk, ctl_len, GFP_KERNEL); 1936 ctl_buf = sock_kmalloc(sock->sk, ctl_len, GFP_KERNEL);
@@ -1941,21 +1939,22 @@ SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned, flags)
1941 } 1939 }
1942 err = -EFAULT; 1940 err = -EFAULT;
1943 /* 1941 /*
1944 * Careful! Before this, msg_sys.msg_control contains a user pointer. 1942 * Careful! Before this, msg_sys->msg_control contains a user pointer.
1945 * Afterwards, it will be a kernel pointer. Thus the compiler-assisted 1943 * Afterwards, it will be a kernel pointer. Thus the compiler-assisted
1946 * checking falls down on this. 1944 * checking falls down on this.
1947 */ 1945 */
1948 if (copy_from_user(ctl_buf, 1946 if (copy_from_user(ctl_buf,
1949 (void __user __force *)msg_sys.msg_control, 1947 (void __user __force *)msg_sys->msg_control,
1950 ctl_len)) 1948 ctl_len))
1951 goto out_freectl; 1949 goto out_freectl;
1952 msg_sys.msg_control = ctl_buf; 1950 msg_sys->msg_control = ctl_buf;
1953 } 1951 }
1954 msg_sys.msg_flags = flags; 1952 msg_sys->msg_flags = flags;
1955 1953
1956 if (sock->file->f_flags & O_NONBLOCK) 1954 if (sock->file->f_flags & O_NONBLOCK)
1957 msg_sys.msg_flags |= MSG_DONTWAIT; 1955 msg_sys->msg_flags |= MSG_DONTWAIT;
1958 err = sock_sendmsg(sock, &msg_sys, total_len); 1956 err = (nosec ? sock_sendmsg_nosec : sock_sendmsg)(sock, msg_sys,
1957 total_len);
1959 1958
1960out_freectl: 1959out_freectl:
1961 if (ctl_buf != ctl) 1960 if (ctl_buf != ctl)
@@ -1963,12 +1962,114 @@ out_freectl:
1963out_freeiov: 1962out_freeiov:
1964 if (iov != iovstack) 1963 if (iov != iovstack)
1965 sock_kfree_s(sock->sk, iov, iov_size); 1964 sock_kfree_s(sock->sk, iov, iov_size);
1966out_put: 1965out:
1966 return err;
1967}
1968
1969/*
1970 * BSD sendmsg interface
1971 */
1972
1973SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned, flags)
1974{
1975 int fput_needed, err;
1976 struct msghdr msg_sys;
1977 struct socket *sock = sockfd_lookup_light(fd, &err, &fput_needed);
1978
1979 if (!sock)
1980 goto out;
1981
1982 err = __sys_sendmsg(sock, msg, &msg_sys, flags, 0);
1983
1967 fput_light(sock->file, fput_needed); 1984 fput_light(sock->file, fput_needed);
1968out: 1985out:
1969 return err; 1986 return err;
1970} 1987}
1971 1988
1989/*
1990 * Linux sendmmsg interface
1991 */
1992
1993int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
1994 unsigned int flags)
1995{
1996 int fput_needed, err, datagrams;
1997 struct socket *sock;
1998 struct mmsghdr __user *entry;
1999 struct compat_mmsghdr __user *compat_entry;
2000 struct msghdr msg_sys;
2001
2002 datagrams = 0;
2003
2004 sock = sockfd_lookup_light(fd, &err, &fput_needed);
2005 if (!sock)
2006 return err;
2007
2008 err = sock_error(sock->sk);
2009 if (err)
2010 goto out_put;
2011
2012 entry = mmsg;
2013 compat_entry = (struct compat_mmsghdr __user *)mmsg;
2014
2015 while (datagrams < vlen) {
2016 /*
2017 * No need to ask LSM for more than the first datagram.
2018 */
2019 if (MSG_CMSG_COMPAT & flags) {
2020 err = __sys_sendmsg(sock, (struct msghdr __user *)compat_entry,
2021 &msg_sys, flags, datagrams);
2022 if (err < 0)
2023 break;
2024 err = __put_user(err, &compat_entry->msg_len);
2025 ++compat_entry;
2026 } else {
2027 err = __sys_sendmsg(sock, (struct msghdr __user *)entry,
2028 &msg_sys, flags, datagrams);
2029 if (err < 0)
2030 break;
2031 err = put_user(err, &entry->msg_len);
2032 ++entry;
2033 }
2034
2035 if (err)
2036 break;
2037 ++datagrams;
2038 }
2039
2040out_put:
2041 fput_light(sock->file, fput_needed);
2042
2043 if (err == 0)
2044 return datagrams;
2045
2046 if (datagrams != 0) {
2047 /*
2048 * We may send less entries than requested (vlen) if the
2049 * sock is non blocking...
2050 */
2051 if (err != -EAGAIN) {
2052 /*
2053 * ... or if sendmsg returns an error after we
2054 * send some datagrams, where we record the
2055 * error to return on the next call or if the
2056 * app asks about it using getsockopt(SO_ERROR).
2057 */
2058 sock->sk->sk_err = -err;
2059 }
2060
2061 return datagrams;
2062 }
2063
2064 return err;
2065}
2066
2067SYSCALL_DEFINE4(sendmmsg, int, fd, struct mmsghdr __user *, mmsg,
2068 unsigned int, vlen, unsigned int, flags)
2069{
2070 return __sys_sendmmsg(fd, mmsg, vlen, flags);
2071}
2072
1972static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg, 2073static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
1973 struct msghdr *msg_sys, unsigned flags, int nosec) 2074 struct msghdr *msg_sys, unsigned flags, int nosec)
1974{ 2075{
@@ -2122,14 +2223,16 @@ int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
2122 */ 2223 */
2123 if (MSG_CMSG_COMPAT & flags) { 2224 if (MSG_CMSG_COMPAT & flags) {
2124 err = __sys_recvmsg(sock, (struct msghdr __user *)compat_entry, 2225 err = __sys_recvmsg(sock, (struct msghdr __user *)compat_entry,
2125 &msg_sys, flags, datagrams); 2226 &msg_sys, flags & ~MSG_WAITFORONE,
2227 datagrams);
2126 if (err < 0) 2228 if (err < 0)
2127 break; 2229 break;
2128 err = __put_user(err, &compat_entry->msg_len); 2230 err = __put_user(err, &compat_entry->msg_len);
2129 ++compat_entry; 2231 ++compat_entry;
2130 } else { 2232 } else {
2131 err = __sys_recvmsg(sock, (struct msghdr __user *)entry, 2233 err = __sys_recvmsg(sock, (struct msghdr __user *)entry,
2132 &msg_sys, flags, datagrams); 2234 &msg_sys, flags & ~MSG_WAITFORONE,
2235 datagrams);
2133 if (err < 0) 2236 if (err < 0)
2134 break; 2237 break;
2135 err = put_user(err, &entry->msg_len); 2238 err = put_user(err, &entry->msg_len);
@@ -2214,11 +2317,11 @@ SYSCALL_DEFINE5(recvmmsg, int, fd, struct mmsghdr __user *, mmsg,
2214#ifdef __ARCH_WANT_SYS_SOCKETCALL 2317#ifdef __ARCH_WANT_SYS_SOCKETCALL
2215/* Argument list sizes for sys_socketcall */ 2318/* Argument list sizes for sys_socketcall */
2216#define AL(x) ((x) * sizeof(unsigned long)) 2319#define AL(x) ((x) * sizeof(unsigned long))
2217static const unsigned char nargs[20] = { 2320static const unsigned char nargs[21] = {
2218 AL(0), AL(3), AL(3), AL(3), AL(2), AL(3), 2321 AL(0), AL(3), AL(3), AL(3), AL(2), AL(3),
2219 AL(3), AL(3), AL(4), AL(4), AL(4), AL(6), 2322 AL(3), AL(3), AL(4), AL(4), AL(4), AL(6),
2220 AL(6), AL(2), AL(5), AL(5), AL(3), AL(3), 2323 AL(6), AL(2), AL(5), AL(5), AL(3), AL(3),
2221 AL(4), AL(5) 2324 AL(4), AL(5), AL(4)
2222}; 2325};
2223 2326
2224#undef AL 2327#undef AL
@@ -2238,7 +2341,7 @@ SYSCALL_DEFINE2(socketcall, int, call, unsigned long __user *, args)
2238 int err; 2341 int err;
2239 unsigned int len; 2342 unsigned int len;
2240 2343
2241 if (call < 1 || call > SYS_RECVMMSG) 2344 if (call < 1 || call > SYS_SENDMMSG)
2242 return -EINVAL; 2345 return -EINVAL;
2243 2346
2244 len = nargs[call]; 2347 len = nargs[call];
@@ -2313,6 +2416,9 @@ SYSCALL_DEFINE2(socketcall, int, call, unsigned long __user *, args)
2313 case SYS_SENDMSG: 2416 case SYS_SENDMSG:
2314 err = sys_sendmsg(a0, (struct msghdr __user *)a1, a[2]); 2417 err = sys_sendmsg(a0, (struct msghdr __user *)a1, a[2]);
2315 break; 2418 break;
2419 case SYS_SENDMMSG:
2420 err = sys_sendmmsg(a0, (struct mmsghdr __user *)a1, a[2], a[3]);
2421 break;
2316 case SYS_RECVMSG: 2422 case SYS_RECVMSG:
2317 err = sys_recvmsg(a0, (struct msghdr __user *)a1, a[2]); 2423 err = sys_recvmsg(a0, (struct msghdr __user *)a1, a[2]);
2318 break; 2424 break;
@@ -2643,13 +2749,13 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
2643 return -EFAULT; 2749 return -EFAULT;
2644 2750
2645 if (convert_in) { 2751 if (convert_in) {
2646 /* We expect there to be holes between fs.m_u and 2752 /* We expect there to be holes between fs.m_ext and
2647 * fs.ring_cookie and at the end of fs, but nowhere else. 2753 * fs.ring_cookie and at the end of fs, but nowhere else.
2648 */ 2754 */
2649 BUILD_BUG_ON(offsetof(struct compat_ethtool_rxnfc, fs.m_u) + 2755 BUILD_BUG_ON(offsetof(struct compat_ethtool_rxnfc, fs.m_ext) +
2650 sizeof(compat_rxnfc->fs.m_u) != 2756 sizeof(compat_rxnfc->fs.m_ext) !=
2651 offsetof(struct ethtool_rxnfc, fs.m_u) + 2757 offsetof(struct ethtool_rxnfc, fs.m_ext) +
2652 sizeof(rxnfc->fs.m_u)); 2758 sizeof(rxnfc->fs.m_ext));
2653 BUILD_BUG_ON( 2759 BUILD_BUG_ON(
2654 offsetof(struct compat_ethtool_rxnfc, fs.location) - 2760 offsetof(struct compat_ethtool_rxnfc, fs.location) -
2655 offsetof(struct compat_ethtool_rxnfc, fs.ring_cookie) != 2761 offsetof(struct compat_ethtool_rxnfc, fs.ring_cookie) !=
@@ -2657,7 +2763,7 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
2657 offsetof(struct ethtool_rxnfc, fs.ring_cookie)); 2763 offsetof(struct ethtool_rxnfc, fs.ring_cookie));
2658 2764
2659 if (copy_in_user(rxnfc, compat_rxnfc, 2765 if (copy_in_user(rxnfc, compat_rxnfc,
2660 (void *)(&rxnfc->fs.m_u + 1) - 2766 (void *)(&rxnfc->fs.m_ext + 1) -
2661 (void *)rxnfc) || 2767 (void *)rxnfc) ||
2662 copy_in_user(&rxnfc->fs.ring_cookie, 2768 copy_in_user(&rxnfc->fs.ring_cookie,
2663 &compat_rxnfc->fs.ring_cookie, 2769 &compat_rxnfc->fs.ring_cookie,
@@ -2674,7 +2780,7 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
2674 2780
2675 if (convert_out) { 2781 if (convert_out) {
2676 if (copy_in_user(compat_rxnfc, rxnfc, 2782 if (copy_in_user(compat_rxnfc, rxnfc,
2677 (const void *)(&rxnfc->fs.m_u + 1) - 2783 (const void *)(&rxnfc->fs.m_ext + 1) -
2678 (const void *)rxnfc) || 2784 (const void *)rxnfc) ||
2679 copy_in_user(&compat_rxnfc->fs.ring_cookie, 2785 copy_in_user(&compat_rxnfc->fs.ring_cookie,
2680 &rxnfc->fs.ring_cookie, 2786 &rxnfc->fs.ring_cookie,
diff --git a/net/sunrpc/addr.c b/net/sunrpc/addr.c
index 1419d0cdbbac..4195233c4914 100644
--- a/net/sunrpc/addr.c
+++ b/net/sunrpc/addr.c
@@ -151,7 +151,7 @@ static size_t rpc_pton4(const char *buf, const size_t buflen,
151 return 0; 151 return 0;
152 152
153 sin->sin_family = AF_INET; 153 sin->sin_family = AF_INET;
154 return sizeof(struct sockaddr_in);; 154 return sizeof(struct sockaddr_in);
155} 155}
156 156
157#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 157#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index 67e31276682a..cd6e4aa19dbf 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -326,10 +326,12 @@ rpcauth_prune_expired(struct list_head *free, int nr_to_scan)
326 * Run memory cache shrinker. 326 * Run memory cache shrinker.
327 */ 327 */
328static int 328static int
329rpcauth_cache_shrinker(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask) 329rpcauth_cache_shrinker(struct shrinker *shrink, struct shrink_control *sc)
330{ 330{
331 LIST_HEAD(free); 331 LIST_HEAD(free);
332 int res; 332 int res;
333 int nr_to_scan = sc->nr_to_scan;
334 gfp_t gfp_mask = sc->gfp_mask;
333 335
334 if ((gfp_mask & GFP_KERNEL) != GFP_KERNEL) 336 if ((gfp_mask & GFP_KERNEL) != GFP_KERNEL)
335 return (nr_to_scan == 0) ? 0 : -1; 337 return (nr_to_scan == 0) ? 0 : -1;
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 339ba64cce1e..5daf6cc4faea 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -577,13 +577,13 @@ retry:
577 } 577 }
578 inode = &gss_msg->inode->vfs_inode; 578 inode = &gss_msg->inode->vfs_inode;
579 for (;;) { 579 for (;;) {
580 prepare_to_wait(&gss_msg->waitqueue, &wait, TASK_INTERRUPTIBLE); 580 prepare_to_wait(&gss_msg->waitqueue, &wait, TASK_KILLABLE);
581 spin_lock(&inode->i_lock); 581 spin_lock(&inode->i_lock);
582 if (gss_msg->ctx != NULL || gss_msg->msg.errno < 0) { 582 if (gss_msg->ctx != NULL || gss_msg->msg.errno < 0) {
583 break; 583 break;
584 } 584 }
585 spin_unlock(&inode->i_lock); 585 spin_unlock(&inode->i_lock);
586 if (signalled()) { 586 if (fatal_signal_pending(current)) {
587 err = -ERESTARTSYS; 587 err = -ERESTARTSYS;
588 goto out_intr; 588 goto out_intr;
589 } 589 }
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c
index 0a9a2ec2e469..c3b75333b821 100644
--- a/net/sunrpc/auth_gss/gss_krb5_mech.c
+++ b/net/sunrpc/auth_gss/gss_krb5_mech.c
@@ -43,6 +43,7 @@
43#include <linux/sunrpc/gss_krb5.h> 43#include <linux/sunrpc/gss_krb5.h>
44#include <linux/sunrpc/xdr.h> 44#include <linux/sunrpc/xdr.h>
45#include <linux/crypto.h> 45#include <linux/crypto.h>
46#include <linux/sunrpc/gss_krb5_enctypes.h>
46 47
47#ifdef RPC_DEBUG 48#ifdef RPC_DEBUG
48# define RPCDBG_FACILITY RPCDBG_AUTH 49# define RPCDBG_FACILITY RPCDBG_AUTH
@@ -750,7 +751,7 @@ static struct gss_api_mech gss_kerberos_mech = {
750 .gm_ops = &gss_kerberos_ops, 751 .gm_ops = &gss_kerberos_ops,
751 .gm_pf_num = ARRAY_SIZE(gss_kerberos_pfs), 752 .gm_pf_num = ARRAY_SIZE(gss_kerberos_pfs),
752 .gm_pfs = gss_kerberos_pfs, 753 .gm_pfs = gss_kerberos_pfs,
753 .gm_upcall_enctypes = "18,17,16,23,3,1,2", 754 .gm_upcall_enctypes = KRB5_SUPPORTED_ENCTYPES,
754}; 755};
755 756
756static int __init init_kerberos_module(void) 757static int __init init_kerberos_module(void)
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 8d83f9d48713..8c9141583d6f 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -13,10 +13,6 @@
13 * and need to be refreshed, or when a packet was damaged in transit. 13 * and need to be refreshed, or when a packet was damaged in transit.
14 * This may be have to be moved to the VFS layer. 14 * This may be have to be moved to the VFS layer.
15 * 15 *
16 * NB: BSD uses a more intelligent approach to guessing when a request
17 * or reply has been lost by keeping the RTO estimate for each procedure.
18 * We currently make do with a constant timeout value.
19 *
20 * Copyright (C) 1992,1993 Rick Sladkey <jrs@world.std.com> 16 * Copyright (C) 1992,1993 Rick Sladkey <jrs@world.std.com>
21 * Copyright (C) 1995,1996 Olaf Kirch <okir@monad.swb.de> 17 * Copyright (C) 1995,1996 Olaf Kirch <okir@monad.swb.de>
22 */ 18 */
@@ -32,7 +28,9 @@
32#include <linux/slab.h> 28#include <linux/slab.h>
33#include <linux/utsname.h> 29#include <linux/utsname.h>
34#include <linux/workqueue.h> 30#include <linux/workqueue.h>
31#include <linux/in.h>
35#include <linux/in6.h> 32#include <linux/in6.h>
33#include <linux/un.h>
36 34
37#include <linux/sunrpc/clnt.h> 35#include <linux/sunrpc/clnt.h>
38#include <linux/sunrpc/rpc_pipe_fs.h> 36#include <linux/sunrpc/rpc_pipe_fs.h>
@@ -298,22 +296,27 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args)
298 * up a string representation of the passed-in address. 296 * up a string representation of the passed-in address.
299 */ 297 */
300 if (args->servername == NULL) { 298 if (args->servername == NULL) {
299 struct sockaddr_un *sun =
300 (struct sockaddr_un *)args->address;
301 struct sockaddr_in *sin =
302 (struct sockaddr_in *)args->address;
303 struct sockaddr_in6 *sin6 =
304 (struct sockaddr_in6 *)args->address;
305
301 servername[0] = '\0'; 306 servername[0] = '\0';
302 switch (args->address->sa_family) { 307 switch (args->address->sa_family) {
303 case AF_INET: { 308 case AF_LOCAL:
304 struct sockaddr_in *sin = 309 snprintf(servername, sizeof(servername), "%s",
305 (struct sockaddr_in *)args->address; 310 sun->sun_path);
311 break;
312 case AF_INET:
306 snprintf(servername, sizeof(servername), "%pI4", 313 snprintf(servername, sizeof(servername), "%pI4",
307 &sin->sin_addr.s_addr); 314 &sin->sin_addr.s_addr);
308 break; 315 break;
309 } 316 case AF_INET6:
310 case AF_INET6: {
311 struct sockaddr_in6 *sin =
312 (struct sockaddr_in6 *)args->address;
313 snprintf(servername, sizeof(servername), "%pI6", 317 snprintf(servername, sizeof(servername), "%pI6",
314 &sin->sin6_addr); 318 &sin6->sin6_addr);
315 break; 319 break;
316 }
317 default: 320 default:
318 /* caller wants default server name, but 321 /* caller wants default server name, but
319 * address family isn't recognized. */ 322 * address family isn't recognized. */
@@ -1058,7 +1061,7 @@ call_allocate(struct rpc_task *task)
1058 1061
1059 dprintk("RPC: %5u rpc_buffer allocation failed\n", task->tk_pid); 1062 dprintk("RPC: %5u rpc_buffer allocation failed\n", task->tk_pid);
1060 1063
1061 if (RPC_IS_ASYNC(task) || !signalled()) { 1064 if (RPC_IS_ASYNC(task) || !fatal_signal_pending(current)) {
1062 task->tk_action = call_allocate; 1065 task->tk_action = call_allocate;
1063 rpc_delay(task, HZ>>4); 1066 rpc_delay(task, HZ>>4);
1064 return; 1067 return;
@@ -1172,6 +1175,9 @@ call_bind_status(struct rpc_task *task)
1172 status = -EOPNOTSUPP; 1175 status = -EOPNOTSUPP;
1173 break; 1176 break;
1174 } 1177 }
1178 if (task->tk_rebind_retry == 0)
1179 break;
1180 task->tk_rebind_retry--;
1175 rpc_delay(task, 3*HZ); 1181 rpc_delay(task, 3*HZ);
1176 goto retry_timeout; 1182 goto retry_timeout;
1177 case -ETIMEDOUT: 1183 case -ETIMEDOUT:
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
index c652e4cc9fe9..e45d2fbbe5a8 100644
--- a/net/sunrpc/rpcb_clnt.c
+++ b/net/sunrpc/rpcb_clnt.c
@@ -16,6 +16,7 @@
16 16
17#include <linux/types.h> 17#include <linux/types.h>
18#include <linux/socket.h> 18#include <linux/socket.h>
19#include <linux/un.h>
19#include <linux/in.h> 20#include <linux/in.h>
20#include <linux/in6.h> 21#include <linux/in6.h>
21#include <linux/kernel.h> 22#include <linux/kernel.h>
@@ -32,6 +33,8 @@
32# define RPCDBG_FACILITY RPCDBG_BIND 33# define RPCDBG_FACILITY RPCDBG_BIND
33#endif 34#endif
34 35
36#define RPCBIND_SOCK_PATHNAME "/var/run/rpcbind.sock"
37
35#define RPCBIND_PROGRAM (100000u) 38#define RPCBIND_PROGRAM (100000u)
36#define RPCBIND_PORT (111u) 39#define RPCBIND_PORT (111u)
37 40
@@ -158,20 +161,69 @@ static void rpcb_map_release(void *data)
158 kfree(map); 161 kfree(map);
159} 162}
160 163
161static const struct sockaddr_in rpcb_inaddr_loopback = { 164/*
162 .sin_family = AF_INET, 165 * Returns zero on success, otherwise a negative errno value
163 .sin_addr.s_addr = htonl(INADDR_LOOPBACK), 166 * is returned.
164 .sin_port = htons(RPCBIND_PORT), 167 */
165}; 168static int rpcb_create_local_unix(void)
169{
170 static const struct sockaddr_un rpcb_localaddr_rpcbind = {
171 .sun_family = AF_LOCAL,
172 .sun_path = RPCBIND_SOCK_PATHNAME,
173 };
174 struct rpc_create_args args = {
175 .net = &init_net,
176 .protocol = XPRT_TRANSPORT_LOCAL,
177 .address = (struct sockaddr *)&rpcb_localaddr_rpcbind,
178 .addrsize = sizeof(rpcb_localaddr_rpcbind),
179 .servername = "localhost",
180 .program = &rpcb_program,
181 .version = RPCBVERS_2,
182 .authflavor = RPC_AUTH_NULL,
183 };
184 struct rpc_clnt *clnt, *clnt4;
185 int result = 0;
186
187 /*
188 * Because we requested an RPC PING at transport creation time,
189 * this works only if the user space portmapper is rpcbind, and
190 * it's listening on AF_LOCAL on the named socket.
191 */
192 clnt = rpc_create(&args);
193 if (IS_ERR(clnt)) {
194 dprintk("RPC: failed to create AF_LOCAL rpcbind "
195 "client (errno %ld).\n", PTR_ERR(clnt));
196 result = -PTR_ERR(clnt);
197 goto out;
198 }
199
200 clnt4 = rpc_bind_new_program(clnt, &rpcb_program, RPCBVERS_4);
201 if (IS_ERR(clnt4)) {
202 dprintk("RPC: failed to bind second program to "
203 "rpcbind v4 client (errno %ld).\n",
204 PTR_ERR(clnt4));
205 clnt4 = NULL;
206 }
207
208 /* Protected by rpcb_create_local_mutex */
209 rpcb_local_clnt = clnt;
210 rpcb_local_clnt4 = clnt4;
166 211
167static DEFINE_MUTEX(rpcb_create_local_mutex); 212out:
213 return result;
214}
168 215
169/* 216/*
170 * Returns zero on success, otherwise a negative errno value 217 * Returns zero on success, otherwise a negative errno value
171 * is returned. 218 * is returned.
172 */ 219 */
173static int rpcb_create_local(void) 220static int rpcb_create_local_net(void)
174{ 221{
222 static const struct sockaddr_in rpcb_inaddr_loopback = {
223 .sin_family = AF_INET,
224 .sin_addr.s_addr = htonl(INADDR_LOOPBACK),
225 .sin_port = htons(RPCBIND_PORT),
226 };
175 struct rpc_create_args args = { 227 struct rpc_create_args args = {
176 .net = &init_net, 228 .net = &init_net,
177 .protocol = XPRT_TRANSPORT_TCP, 229 .protocol = XPRT_TRANSPORT_TCP,
@@ -186,13 +238,6 @@ static int rpcb_create_local(void)
186 struct rpc_clnt *clnt, *clnt4; 238 struct rpc_clnt *clnt, *clnt4;
187 int result = 0; 239 int result = 0;
188 240
189 if (rpcb_local_clnt)
190 return result;
191
192 mutex_lock(&rpcb_create_local_mutex);
193 if (rpcb_local_clnt)
194 goto out;
195
196 clnt = rpc_create(&args); 241 clnt = rpc_create(&args);
197 if (IS_ERR(clnt)) { 242 if (IS_ERR(clnt)) {
198 dprintk("RPC: failed to create local rpcbind " 243 dprintk("RPC: failed to create local rpcbind "
@@ -214,10 +259,34 @@ static int rpcb_create_local(void)
214 clnt4 = NULL; 259 clnt4 = NULL;
215 } 260 }
216 261
262 /* Protected by rpcb_create_local_mutex */
217 rpcb_local_clnt = clnt; 263 rpcb_local_clnt = clnt;
218 rpcb_local_clnt4 = clnt4; 264 rpcb_local_clnt4 = clnt4;
219 265
220out: 266out:
267 return result;
268}
269
270/*
271 * Returns zero on success, otherwise a negative errno value
272 * is returned.
273 */
274static int rpcb_create_local(void)
275{
276 static DEFINE_MUTEX(rpcb_create_local_mutex);
277 int result = 0;
278
279 if (rpcb_local_clnt)
280 return result;
281
282 mutex_lock(&rpcb_create_local_mutex);
283 if (rpcb_local_clnt)
284 goto out;
285
286 if (rpcb_create_local_unix() != 0)
287 result = rpcb_create_local_net();
288
289out:
221 mutex_unlock(&rpcb_create_local_mutex); 290 mutex_unlock(&rpcb_create_local_mutex);
222 return result; 291 return result;
223} 292}
@@ -528,7 +597,7 @@ void rpcb_getport_async(struct rpc_task *task)
528 u32 bind_version; 597 u32 bind_version;
529 struct rpc_xprt *xprt; 598 struct rpc_xprt *xprt;
530 struct rpc_clnt *rpcb_clnt; 599 struct rpc_clnt *rpcb_clnt;
531 static struct rpcbind_args *map; 600 struct rpcbind_args *map;
532 struct rpc_task *child; 601 struct rpc_task *child;
533 struct sockaddr_storage addr; 602 struct sockaddr_storage addr;
534 struct sockaddr *sap = (struct sockaddr *)&addr; 603 struct sockaddr *sap = (struct sockaddr *)&addr;
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 6b43ee7221d5..4814e246a874 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -616,30 +616,25 @@ static void __rpc_execute(struct rpc_task *task)
616 BUG_ON(RPC_IS_QUEUED(task)); 616 BUG_ON(RPC_IS_QUEUED(task));
617 617
618 for (;;) { 618 for (;;) {
619 void (*do_action)(struct rpc_task *);
619 620
620 /* 621 /*
621 * Execute any pending callback. 622 * Execute any pending callback first.
622 */ 623 */
623 if (task->tk_callback) { 624 do_action = task->tk_callback;
624 void (*save_callback)(struct rpc_task *); 625 task->tk_callback = NULL;
625 626 if (do_action == NULL) {
626 /*
627 * We set tk_callback to NULL before calling it,
628 * in case it sets the tk_callback field itself:
629 */
630 save_callback = task->tk_callback;
631 task->tk_callback = NULL;
632 save_callback(task);
633 } else {
634 /* 627 /*
635 * Perform the next FSM step. 628 * Perform the next FSM step.
636 * tk_action may be NULL when the task has been killed 629 * tk_action may be NULL if the task has been killed.
637 * by someone else. 630 * In particular, note that rpc_killall_tasks may
631 * do this at any time, so beware when dereferencing.
638 */ 632 */
639 if (task->tk_action == NULL) 633 do_action = task->tk_action;
634 if (do_action == NULL)
640 break; 635 break;
641 task->tk_action(task);
642 } 636 }
637 do_action(task);
643 638
644 /* 639 /*
645 * Lockless check for whether task is sleeping or not. 640 * Lockless check for whether task is sleeping or not.
@@ -792,6 +787,7 @@ static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *ta
792 /* Initialize retry counters */ 787 /* Initialize retry counters */
793 task->tk_garb_retry = 2; 788 task->tk_garb_retry = 2;
794 task->tk_cred_retry = 2; 789 task->tk_cred_retry = 2;
790 task->tk_rebind_retry = 2;
795 791
796 task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW; 792 task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW;
797 task->tk_owner = current->tgid; 793 task->tk_owner = current->tgid;
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 08e05a8ce025..2b90292e9505 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -942,6 +942,8 @@ static void svc_unregister(const struct svc_serv *serv)
942 if (progp->pg_vers[i]->vs_hidden) 942 if (progp->pg_vers[i]->vs_hidden)
943 continue; 943 continue;
944 944
945 dprintk("svc: attempting to unregister %sv%u\n",
946 progp->pg_name, i);
945 __svc_unregister(progp->pg_prog, i, progp->pg_name); 947 __svc_unregister(progp->pg_prog, i, progp->pg_name);
946 } 948 }
947 } 949 }
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index b7d435c3f19e..af04f779ce9f 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -387,6 +387,33 @@ static int svc_recvfrom(struct svc_rqst *rqstp, struct kvec *iov, int nr,
387 return len; 387 return len;
388} 388}
389 389
390static int svc_partial_recvfrom(struct svc_rqst *rqstp,
391 struct kvec *iov, int nr,
392 int buflen, unsigned int base)
393{
394 size_t save_iovlen;
395 void __user *save_iovbase;
396 unsigned int i;
397 int ret;
398
399 if (base == 0)
400 return svc_recvfrom(rqstp, iov, nr, buflen);
401
402 for (i = 0; i < nr; i++) {
403 if (iov[i].iov_len > base)
404 break;
405 base -= iov[i].iov_len;
406 }
407 save_iovlen = iov[i].iov_len;
408 save_iovbase = iov[i].iov_base;
409 iov[i].iov_len -= base;
410 iov[i].iov_base += base;
411 ret = svc_recvfrom(rqstp, &iov[i], nr - i, buflen);
412 iov[i].iov_len = save_iovlen;
413 iov[i].iov_base = save_iovbase;
414 return ret;
415}
416
390/* 417/*
391 * Set socket snd and rcv buffer lengths 418 * Set socket snd and rcv buffer lengths
392 */ 419 */
@@ -409,7 +436,6 @@ static void svc_sock_setbufsize(struct socket *sock, unsigned int snd,
409 lock_sock(sock->sk); 436 lock_sock(sock->sk);
410 sock->sk->sk_sndbuf = snd * 2; 437 sock->sk->sk_sndbuf = snd * 2;
411 sock->sk->sk_rcvbuf = rcv * 2; 438 sock->sk->sk_rcvbuf = rcv * 2;
412 sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK|SOCK_RCVBUF_LOCK;
413 sock->sk->sk_write_space(sock->sk); 439 sock->sk->sk_write_space(sock->sk);
414 release_sock(sock->sk); 440 release_sock(sock->sk);
415#endif 441#endif
@@ -884,6 +910,56 @@ failed:
884 return NULL; 910 return NULL;
885} 911}
886 912
913static unsigned int svc_tcp_restore_pages(struct svc_sock *svsk, struct svc_rqst *rqstp)
914{
915 unsigned int i, len, npages;
916
917 if (svsk->sk_tcplen <= sizeof(rpc_fraghdr))
918 return 0;
919 len = svsk->sk_tcplen - sizeof(rpc_fraghdr);
920 npages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
921 for (i = 0; i < npages; i++) {
922 if (rqstp->rq_pages[i] != NULL)
923 put_page(rqstp->rq_pages[i]);
924 BUG_ON(svsk->sk_pages[i] == NULL);
925 rqstp->rq_pages[i] = svsk->sk_pages[i];
926 svsk->sk_pages[i] = NULL;
927 }
928 rqstp->rq_arg.head[0].iov_base = page_address(rqstp->rq_pages[0]);
929 return len;
930}
931
932static void svc_tcp_save_pages(struct svc_sock *svsk, struct svc_rqst *rqstp)
933{
934 unsigned int i, len, npages;
935
936 if (svsk->sk_tcplen <= sizeof(rpc_fraghdr))
937 return;
938 len = svsk->sk_tcplen - sizeof(rpc_fraghdr);
939 npages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
940 for (i = 0; i < npages; i++) {
941 svsk->sk_pages[i] = rqstp->rq_pages[i];
942 rqstp->rq_pages[i] = NULL;
943 }
944}
945
946static void svc_tcp_clear_pages(struct svc_sock *svsk)
947{
948 unsigned int i, len, npages;
949
950 if (svsk->sk_tcplen <= sizeof(rpc_fraghdr))
951 goto out;
952 len = svsk->sk_tcplen - sizeof(rpc_fraghdr);
953 npages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
954 for (i = 0; i < npages; i++) {
955 BUG_ON(svsk->sk_pages[i] == NULL);
956 put_page(svsk->sk_pages[i]);
957 svsk->sk_pages[i] = NULL;
958 }
959out:
960 svsk->sk_tcplen = 0;
961}
962
887/* 963/*
888 * Receive data. 964 * Receive data.
889 * If we haven't gotten the record length yet, get the next four bytes. 965 * If we haven't gotten the record length yet, get the next four bytes.
@@ -893,31 +969,15 @@ failed:
893static int svc_tcp_recv_record(struct svc_sock *svsk, struct svc_rqst *rqstp) 969static int svc_tcp_recv_record(struct svc_sock *svsk, struct svc_rqst *rqstp)
894{ 970{
895 struct svc_serv *serv = svsk->sk_xprt.xpt_server; 971 struct svc_serv *serv = svsk->sk_xprt.xpt_server;
972 unsigned int want;
896 int len; 973 int len;
897 974
898 if (test_and_clear_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags))
899 /* sndbuf needs to have room for one request
900 * per thread, otherwise we can stall even when the
901 * network isn't a bottleneck.
902 *
903 * We count all threads rather than threads in a
904 * particular pool, which provides an upper bound
905 * on the number of threads which will access the socket.
906 *
907 * rcvbuf just needs to be able to hold a few requests.
908 * Normally they will be removed from the queue
909 * as soon a a complete request arrives.
910 */
911 svc_sock_setbufsize(svsk->sk_sock,
912 (serv->sv_nrthreads+3) * serv->sv_max_mesg,
913 3 * serv->sv_max_mesg);
914
915 clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); 975 clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
916 976
917 if (svsk->sk_tcplen < sizeof(rpc_fraghdr)) { 977 if (svsk->sk_tcplen < sizeof(rpc_fraghdr)) {
918 int want = sizeof(rpc_fraghdr) - svsk->sk_tcplen;
919 struct kvec iov; 978 struct kvec iov;
920 979
980 want = sizeof(rpc_fraghdr) - svsk->sk_tcplen;
921 iov.iov_base = ((char *) &svsk->sk_reclen) + svsk->sk_tcplen; 981 iov.iov_base = ((char *) &svsk->sk_reclen) + svsk->sk_tcplen;
922 iov.iov_len = want; 982 iov.iov_len = want;
923 if ((len = svc_recvfrom(rqstp, &iov, 1, want)) < 0) 983 if ((len = svc_recvfrom(rqstp, &iov, 1, want)) < 0)
@@ -927,7 +987,7 @@ static int svc_tcp_recv_record(struct svc_sock *svsk, struct svc_rqst *rqstp)
927 if (len < want) { 987 if (len < want) {
928 dprintk("svc: short recvfrom while reading record " 988 dprintk("svc: short recvfrom while reading record "
929 "length (%d of %d)\n", len, want); 989 "length (%d of %d)\n", len, want);
930 goto err_again; /* record header not complete */ 990 return -EAGAIN;
931 } 991 }
932 992
933 svsk->sk_reclen = ntohl(svsk->sk_reclen); 993 svsk->sk_reclen = ntohl(svsk->sk_reclen);
@@ -954,83 +1014,75 @@ static int svc_tcp_recv_record(struct svc_sock *svsk, struct svc_rqst *rqstp)
954 } 1014 }
955 } 1015 }
956 1016
957 /* Check whether enough data is available */ 1017 if (svsk->sk_reclen < 8)
958 len = svc_recv_available(svsk); 1018 goto err_delete; /* client is nuts. */
959 if (len < 0)
960 goto error;
961 1019
962 if (len < svsk->sk_reclen) {
963 dprintk("svc: incomplete TCP record (%d of %d)\n",
964 len, svsk->sk_reclen);
965 goto err_again; /* record not complete */
966 }
967 len = svsk->sk_reclen; 1020 len = svsk->sk_reclen;
968 set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
969 1021
970 return len; 1022 return len;
971 error: 1023error:
972 if (len == -EAGAIN) 1024 dprintk("RPC: TCP recv_record got %d\n", len);
973 dprintk("RPC: TCP recv_record got EAGAIN\n");
974 return len; 1025 return len;
975 err_delete: 1026err_delete:
976 set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags); 1027 set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
977 err_again:
978 return -EAGAIN; 1028 return -EAGAIN;
979} 1029}
980 1030
981static int svc_process_calldir(struct svc_sock *svsk, struct svc_rqst *rqstp, 1031static int receive_cb_reply(struct svc_sock *svsk, struct svc_rqst *rqstp)
982 struct rpc_rqst **reqpp, struct kvec *vec)
983{ 1032{
1033 struct rpc_xprt *bc_xprt = svsk->sk_xprt.xpt_bc_xprt;
984 struct rpc_rqst *req = NULL; 1034 struct rpc_rqst *req = NULL;
985 u32 *p; 1035 struct kvec *src, *dst;
986 u32 xid; 1036 __be32 *p = (__be32 *)rqstp->rq_arg.head[0].iov_base;
987 u32 calldir; 1037 __be32 xid;
988 int len; 1038 __be32 calldir;
989
990 len = svc_recvfrom(rqstp, vec, 1, 8);
991 if (len < 0)
992 goto error;
993 1039
994 p = (u32 *)rqstp->rq_arg.head[0].iov_base;
995 xid = *p++; 1040 xid = *p++;
996 calldir = *p; 1041 calldir = *p;
997 1042
998 if (calldir == 0) { 1043 if (bc_xprt)
999 /* REQUEST is the most common case */ 1044 req = xprt_lookup_rqst(bc_xprt, xid);
1000 vec[0] = rqstp->rq_arg.head[0];
1001 } else {
1002 /* REPLY */
1003 struct rpc_xprt *bc_xprt = svsk->sk_xprt.xpt_bc_xprt;
1004
1005 if (bc_xprt)
1006 req = xprt_lookup_rqst(bc_xprt, xid);
1007
1008 if (!req) {
1009 printk(KERN_NOTICE
1010 "%s: Got unrecognized reply: "
1011 "calldir 0x%x xpt_bc_xprt %p xid %08x\n",
1012 __func__, ntohl(calldir),
1013 bc_xprt, xid);
1014 vec[0] = rqstp->rq_arg.head[0];
1015 goto out;
1016 }
1017 1045
1018 memcpy(&req->rq_private_buf, &req->rq_rcv_buf, 1046 if (!req) {
1019 sizeof(struct xdr_buf)); 1047 printk(KERN_NOTICE
1020 /* copy the xid and call direction */ 1048 "%s: Got unrecognized reply: "
1021 memcpy(req->rq_private_buf.head[0].iov_base, 1049 "calldir 0x%x xpt_bc_xprt %p xid %08x\n",
1022 rqstp->rq_arg.head[0].iov_base, 8); 1050 __func__, ntohl(calldir),
1023 vec[0] = req->rq_private_buf.head[0]; 1051 bc_xprt, xid);
1052 return -EAGAIN;
1024 } 1053 }
1025 out: 1054
1026 vec[0].iov_base += 8; 1055 memcpy(&req->rq_private_buf, &req->rq_rcv_buf, sizeof(struct xdr_buf));
1027 vec[0].iov_len -= 8; 1056 /*
1028 len = svsk->sk_reclen - 8; 1057 * XXX!: cheating for now! Only copying HEAD.
1029 error: 1058 * But we know this is good enough for now (in fact, for any
1030 *reqpp = req; 1059 * callback reply in the forseeable future).
1031 return len; 1060 */
1061 dst = &req->rq_private_buf.head[0];
1062 src = &rqstp->rq_arg.head[0];
1063 if (dst->iov_len < src->iov_len)
1064 return -EAGAIN; /* whatever; just giving up. */
1065 memcpy(dst->iov_base, src->iov_base, src->iov_len);
1066 xprt_complete_rqst(req->rq_task, svsk->sk_reclen);
1067 rqstp->rq_arg.len = 0;
1068 return 0;
1032} 1069}
1033 1070
1071static int copy_pages_to_kvecs(struct kvec *vec, struct page **pages, int len)
1072{
1073 int i = 0;
1074 int t = 0;
1075
1076 while (t < len) {
1077 vec[i].iov_base = page_address(pages[i]);
1078 vec[i].iov_len = PAGE_SIZE;
1079 i++;
1080 t += PAGE_SIZE;
1081 }
1082 return i;
1083}
1084
1085
1034/* 1086/*
1035 * Receive data from a TCP socket. 1087 * Receive data from a TCP socket.
1036 */ 1088 */
@@ -1041,8 +1093,10 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
1041 struct svc_serv *serv = svsk->sk_xprt.xpt_server; 1093 struct svc_serv *serv = svsk->sk_xprt.xpt_server;
1042 int len; 1094 int len;
1043 struct kvec *vec; 1095 struct kvec *vec;
1044 int pnum, vlen; 1096 unsigned int want, base;
1045 struct rpc_rqst *req = NULL; 1097 __be32 *p;
1098 __be32 calldir;
1099 int pnum;
1046 1100
1047 dprintk("svc: tcp_recv %p data %d conn %d close %d\n", 1101 dprintk("svc: tcp_recv %p data %d conn %d close %d\n",
1048 svsk, test_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags), 1102 svsk, test_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags),
@@ -1053,87 +1107,73 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
1053 if (len < 0) 1107 if (len < 0)
1054 goto error; 1108 goto error;
1055 1109
1110 base = svc_tcp_restore_pages(svsk, rqstp);
1111 want = svsk->sk_reclen - base;
1112
1056 vec = rqstp->rq_vec; 1113 vec = rqstp->rq_vec;
1057 vec[0] = rqstp->rq_arg.head[0];
1058 vlen = PAGE_SIZE;
1059 1114
1060 /* 1115 pnum = copy_pages_to_kvecs(&vec[0], &rqstp->rq_pages[0],
1061 * We have enough data for the whole tcp record. Let's try and read the 1116 svsk->sk_reclen);
1062 * first 8 bytes to get the xid and the call direction. We can use this
1063 * to figure out if this is a call or a reply to a callback. If
1064 * sk_reclen is < 8 (xid and calldir), then this is a malformed packet.
1065 * In that case, don't bother with the calldir and just read the data.
1066 * It will be rejected in svc_process.
1067 */
1068 if (len >= 8) {
1069 len = svc_process_calldir(svsk, rqstp, &req, vec);
1070 if (len < 0)
1071 goto err_again;
1072 vlen -= 8;
1073 }
1074 1117
1075 pnum = 1;
1076 while (vlen < len) {
1077 vec[pnum].iov_base = (req) ?
1078 page_address(req->rq_private_buf.pages[pnum - 1]) :
1079 page_address(rqstp->rq_pages[pnum]);
1080 vec[pnum].iov_len = PAGE_SIZE;
1081 pnum++;
1082 vlen += PAGE_SIZE;
1083 }
1084 rqstp->rq_respages = &rqstp->rq_pages[pnum]; 1118 rqstp->rq_respages = &rqstp->rq_pages[pnum];
1085 1119
1086 /* Now receive data */ 1120 /* Now receive data */
1087 len = svc_recvfrom(rqstp, vec, pnum, len); 1121 len = svc_partial_recvfrom(rqstp, vec, pnum, want, base);
1088 if (len < 0) 1122 if (len >= 0)
1089 goto err_again; 1123 svsk->sk_tcplen += len;
1090 1124 if (len != want) {
1091 /* 1125 if (len < 0 && len != -EAGAIN)
1092 * Account for the 8 bytes we read earlier 1126 goto err_other;
1093 */ 1127 svc_tcp_save_pages(svsk, rqstp);
1094 len += 8; 1128 dprintk("svc: incomplete TCP record (%d of %d)\n",
1095 1129 svsk->sk_tcplen, svsk->sk_reclen);
1096 if (req) { 1130 goto err_noclose;
1097 xprt_complete_rqst(req->rq_task, len);
1098 len = 0;
1099 goto out;
1100 } 1131 }
1101 dprintk("svc: TCP complete record (%d bytes)\n", len); 1132
1102 rqstp->rq_arg.len = len; 1133 rqstp->rq_arg.len = svsk->sk_reclen;
1103 rqstp->rq_arg.page_base = 0; 1134 rqstp->rq_arg.page_base = 0;
1104 if (len <= rqstp->rq_arg.head[0].iov_len) { 1135 if (rqstp->rq_arg.len <= rqstp->rq_arg.head[0].iov_len) {
1105 rqstp->rq_arg.head[0].iov_len = len; 1136 rqstp->rq_arg.head[0].iov_len = rqstp->rq_arg.len;
1106 rqstp->rq_arg.page_len = 0; 1137 rqstp->rq_arg.page_len = 0;
1107 } else { 1138 } else
1108 rqstp->rq_arg.page_len = len - rqstp->rq_arg.head[0].iov_len; 1139 rqstp->rq_arg.page_len = rqstp->rq_arg.len - rqstp->rq_arg.head[0].iov_len;
1109 }
1110 1140
1111 rqstp->rq_xprt_ctxt = NULL; 1141 rqstp->rq_xprt_ctxt = NULL;
1112 rqstp->rq_prot = IPPROTO_TCP; 1142 rqstp->rq_prot = IPPROTO_TCP;
1113 1143
1114out: 1144 p = (__be32 *)rqstp->rq_arg.head[0].iov_base;
1145 calldir = p[1];
1146 if (calldir)
1147 len = receive_cb_reply(svsk, rqstp);
1148
1115 /* Reset TCP read info */ 1149 /* Reset TCP read info */
1116 svsk->sk_reclen = 0; 1150 svsk->sk_reclen = 0;
1117 svsk->sk_tcplen = 0; 1151 svsk->sk_tcplen = 0;
1152 /* If we have more data, signal svc_xprt_enqueue() to try again */
1153 if (svc_recv_available(svsk) > sizeof(rpc_fraghdr))
1154 set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
1155
1156 if (len < 0)
1157 goto error;
1118 1158
1119 svc_xprt_copy_addrs(rqstp, &svsk->sk_xprt); 1159 svc_xprt_copy_addrs(rqstp, &svsk->sk_xprt);
1120 if (serv->sv_stats) 1160 if (serv->sv_stats)
1121 serv->sv_stats->nettcpcnt++; 1161 serv->sv_stats->nettcpcnt++;
1122 1162
1123 return len; 1163 dprintk("svc: TCP complete record (%d bytes)\n", rqstp->rq_arg.len);
1164 return rqstp->rq_arg.len;
1124 1165
1125err_again:
1126 if (len == -EAGAIN) {
1127 dprintk("RPC: TCP recvfrom got EAGAIN\n");
1128 return len;
1129 }
1130error: 1166error:
1131 if (len != -EAGAIN) { 1167 if (len != -EAGAIN)
1132 printk(KERN_NOTICE "%s: recvfrom returned errno %d\n", 1168 goto err_other;
1133 svsk->sk_xprt.xpt_server->sv_name, -len); 1169 dprintk("RPC: TCP recvfrom got EAGAIN\n");
1134 set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
1135 }
1136 return -EAGAIN; 1170 return -EAGAIN;
1171err_other:
1172 printk(KERN_NOTICE "%s: recvfrom returned errno %d\n",
1173 svsk->sk_xprt.xpt_server->sv_name, -len);
1174 set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
1175err_noclose:
1176 return -EAGAIN; /* record not complete */
1137} 1177}
1138 1178
1139/* 1179/*
@@ -1304,18 +1344,10 @@ static void svc_tcp_init(struct svc_sock *svsk, struct svc_serv *serv)
1304 1344
1305 svsk->sk_reclen = 0; 1345 svsk->sk_reclen = 0;
1306 svsk->sk_tcplen = 0; 1346 svsk->sk_tcplen = 0;
1347 memset(&svsk->sk_pages[0], 0, sizeof(svsk->sk_pages));
1307 1348
1308 tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF; 1349 tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF;
1309 1350
1310 /* initialise setting must have enough space to
1311 * receive and respond to one request.
1312 * svc_tcp_recvfrom will re-adjust if necessary
1313 */
1314 svc_sock_setbufsize(svsk->sk_sock,
1315 3 * svsk->sk_xprt.xpt_server->sv_max_mesg,
1316 3 * svsk->sk_xprt.xpt_server->sv_max_mesg);
1317
1318 set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags);
1319 set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); 1351 set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
1320 if (sk->sk_state != TCP_ESTABLISHED) 1352 if (sk->sk_state != TCP_ESTABLISHED)
1321 set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags); 1353 set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
@@ -1379,8 +1411,14 @@ static struct svc_sock *svc_setup_socket(struct svc_serv *serv,
1379 /* Initialize the socket */ 1411 /* Initialize the socket */
1380 if (sock->type == SOCK_DGRAM) 1412 if (sock->type == SOCK_DGRAM)
1381 svc_udp_init(svsk, serv); 1413 svc_udp_init(svsk, serv);
1382 else 1414 else {
1415 /* initialise setting must have enough space to
1416 * receive and respond to one request.
1417 */
1418 svc_sock_setbufsize(svsk->sk_sock, 4 * serv->sv_max_mesg,
1419 4 * serv->sv_max_mesg);
1383 svc_tcp_init(svsk, serv); 1420 svc_tcp_init(svsk, serv);
1421 }
1384 1422
1385 dprintk("svc: svc_setup_socket created %p (inet %p)\n", 1423 dprintk("svc: svc_setup_socket created %p (inet %p)\n",
1386 svsk, svsk->sk_sk); 1424 svsk, svsk->sk_sk);
@@ -1562,8 +1600,10 @@ static void svc_tcp_sock_detach(struct svc_xprt *xprt)
1562 1600
1563 svc_sock_detach(xprt); 1601 svc_sock_detach(xprt);
1564 1602
1565 if (!test_bit(XPT_LISTENER, &xprt->xpt_flags)) 1603 if (!test_bit(XPT_LISTENER, &xprt->xpt_flags)) {
1604 svc_tcp_clear_pages(svsk);
1566 kernel_sock_shutdown(svsk->sk_sock, SHUT_RDWR); 1605 kernel_sock_shutdown(svsk->sk_sock, SHUT_RDWR);
1606 }
1567} 1607}
1568 1608
1569/* 1609/*
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index 679cd674b81d..f008c14ad34c 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -638,6 +638,25 @@ void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
638} 638}
639EXPORT_SYMBOL_GPL(xdr_init_decode); 639EXPORT_SYMBOL_GPL(xdr_init_decode);
640 640
641/**
642 * xdr_init_decode - Initialize an xdr_stream for decoding data.
643 * @xdr: pointer to xdr_stream struct
644 * @buf: pointer to XDR buffer from which to decode data
645 * @pages: list of pages to decode into
646 * @len: length in bytes of buffer in pages
647 */
648void xdr_init_decode_pages(struct xdr_stream *xdr, struct xdr_buf *buf,
649 struct page **pages, unsigned int len)
650{
651 memset(buf, 0, sizeof(*buf));
652 buf->pages = pages;
653 buf->page_len = len;
654 buf->buflen = len;
655 buf->len = len;
656 xdr_init_decode(xdr, buf, NULL);
657}
658EXPORT_SYMBOL_GPL(xdr_init_decode_pages);
659
641static __be32 * __xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes) 660static __be32 * __xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
642{ 661{
643 __be32 *p = xdr->p; 662 __be32 *p = xdr->p;
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index 1a10dcd999ea..c3c232a88d94 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -333,7 +333,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
333} 333}
334 334
335/* 335/*
336 * Processs a completion context 336 * Process a completion context
337 */ 337 */
338static void process_context(struct svcxprt_rdma *xprt, 338static void process_context(struct svcxprt_rdma *xprt,
339 struct svc_rdma_op_ctxt *ctxt) 339 struct svc_rdma_op_ctxt *ctxt)
@@ -695,7 +695,8 @@ static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
695 return ERR_PTR(-ENOMEM); 695 return ERR_PTR(-ENOMEM);
696 xprt = &cma_xprt->sc_xprt; 696 xprt = &cma_xprt->sc_xprt;
697 697
698 listen_id = rdma_create_id(rdma_listen_handler, cma_xprt, RDMA_PS_TCP); 698 listen_id = rdma_create_id(rdma_listen_handler, cma_xprt, RDMA_PS_TCP,
699 IB_QPT_RC);
699 if (IS_ERR(listen_id)) { 700 if (IS_ERR(listen_id)) {
700 ret = PTR_ERR(listen_id); 701 ret = PTR_ERR(listen_id);
701 dprintk("svcrdma: rdma_create_id failed = %d\n", ret); 702 dprintk("svcrdma: rdma_create_id failed = %d\n", ret);
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index d4297dc43dc4..80f8da344df5 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -387,7 +387,7 @@ rpcrdma_create_id(struct rpcrdma_xprt *xprt,
387 387
388 init_completion(&ia->ri_done); 388 init_completion(&ia->ri_done);
389 389
390 id = rdma_create_id(rpcrdma_conn_upcall, xprt, RDMA_PS_TCP); 390 id = rdma_create_id(rpcrdma_conn_upcall, xprt, RDMA_PS_TCP, IB_QPT_RC);
391 if (IS_ERR(id)) { 391 if (IS_ERR(id)) {
392 rc = PTR_ERR(id); 392 rc = PTR_ERR(id);
393 dprintk("RPC: %s: rdma_create_id() failed %i\n", 393 dprintk("RPC: %s: rdma_create_id() failed %i\n",
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index bf005d3c65ef..72abb7358933 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -19,6 +19,7 @@
19 */ 19 */
20 20
21#include <linux/types.h> 21#include <linux/types.h>
22#include <linux/string.h>
22#include <linux/slab.h> 23#include <linux/slab.h>
23#include <linux/module.h> 24#include <linux/module.h>
24#include <linux/capability.h> 25#include <linux/capability.h>
@@ -28,6 +29,7 @@
28#include <linux/in.h> 29#include <linux/in.h>
29#include <linux/net.h> 30#include <linux/net.h>
30#include <linux/mm.h> 31#include <linux/mm.h>
32#include <linux/un.h>
31#include <linux/udp.h> 33#include <linux/udp.h>
32#include <linux/tcp.h> 34#include <linux/tcp.h>
33#include <linux/sunrpc/clnt.h> 35#include <linux/sunrpc/clnt.h>
@@ -45,6 +47,9 @@
45#include <net/tcp.h> 47#include <net/tcp.h>
46 48
47#include "sunrpc.h" 49#include "sunrpc.h"
50
51static void xs_close(struct rpc_xprt *xprt);
52
48/* 53/*
49 * xprtsock tunables 54 * xprtsock tunables
50 */ 55 */
@@ -261,6 +266,11 @@ static inline struct sockaddr *xs_addr(struct rpc_xprt *xprt)
261 return (struct sockaddr *) &xprt->addr; 266 return (struct sockaddr *) &xprt->addr;
262} 267}
263 268
269static inline struct sockaddr_un *xs_addr_un(struct rpc_xprt *xprt)
270{
271 return (struct sockaddr_un *) &xprt->addr;
272}
273
264static inline struct sockaddr_in *xs_addr_in(struct rpc_xprt *xprt) 274static inline struct sockaddr_in *xs_addr_in(struct rpc_xprt *xprt)
265{ 275{
266 return (struct sockaddr_in *) &xprt->addr; 276 return (struct sockaddr_in *) &xprt->addr;
@@ -276,23 +286,34 @@ static void xs_format_common_peer_addresses(struct rpc_xprt *xprt)
276 struct sockaddr *sap = xs_addr(xprt); 286 struct sockaddr *sap = xs_addr(xprt);
277 struct sockaddr_in6 *sin6; 287 struct sockaddr_in6 *sin6;
278 struct sockaddr_in *sin; 288 struct sockaddr_in *sin;
289 struct sockaddr_un *sun;
279 char buf[128]; 290 char buf[128];
280 291
281 (void)rpc_ntop(sap, buf, sizeof(buf));
282 xprt->address_strings[RPC_DISPLAY_ADDR] = kstrdup(buf, GFP_KERNEL);
283
284 switch (sap->sa_family) { 292 switch (sap->sa_family) {
293 case AF_LOCAL:
294 sun = xs_addr_un(xprt);
295 strlcpy(buf, sun->sun_path, sizeof(buf));
296 xprt->address_strings[RPC_DISPLAY_ADDR] =
297 kstrdup(buf, GFP_KERNEL);
298 break;
285 case AF_INET: 299 case AF_INET:
300 (void)rpc_ntop(sap, buf, sizeof(buf));
301 xprt->address_strings[RPC_DISPLAY_ADDR] =
302 kstrdup(buf, GFP_KERNEL);
286 sin = xs_addr_in(xprt); 303 sin = xs_addr_in(xprt);
287 snprintf(buf, sizeof(buf), "%08x", ntohl(sin->sin_addr.s_addr)); 304 snprintf(buf, sizeof(buf), "%08x", ntohl(sin->sin_addr.s_addr));
288 break; 305 break;
289 case AF_INET6: 306 case AF_INET6:
307 (void)rpc_ntop(sap, buf, sizeof(buf));
308 xprt->address_strings[RPC_DISPLAY_ADDR] =
309 kstrdup(buf, GFP_KERNEL);
290 sin6 = xs_addr_in6(xprt); 310 sin6 = xs_addr_in6(xprt);
291 snprintf(buf, sizeof(buf), "%pi6", &sin6->sin6_addr); 311 snprintf(buf, sizeof(buf), "%pi6", &sin6->sin6_addr);
292 break; 312 break;
293 default: 313 default:
294 BUG(); 314 BUG();
295 } 315 }
316
296 xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL); 317 xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL);
297} 318}
298 319
@@ -495,6 +516,70 @@ static int xs_nospace(struct rpc_task *task)
495 return ret; 516 return ret;
496} 517}
497 518
519/*
520 * Construct a stream transport record marker in @buf.
521 */
522static inline void xs_encode_stream_record_marker(struct xdr_buf *buf)
523{
524 u32 reclen = buf->len - sizeof(rpc_fraghdr);
525 rpc_fraghdr *base = buf->head[0].iov_base;
526 *base = cpu_to_be32(RPC_LAST_STREAM_FRAGMENT | reclen);
527}
528
529/**
530 * xs_local_send_request - write an RPC request to an AF_LOCAL socket
531 * @task: RPC task that manages the state of an RPC request
532 *
533 * Return values:
534 * 0: The request has been sent
535 * EAGAIN: The socket was blocked, please call again later to
536 * complete the request
537 * ENOTCONN: Caller needs to invoke connect logic then call again
538 * other: Some other error occured, the request was not sent
539 */
540static int xs_local_send_request(struct rpc_task *task)
541{
542 struct rpc_rqst *req = task->tk_rqstp;
543 struct rpc_xprt *xprt = req->rq_xprt;
544 struct sock_xprt *transport =
545 container_of(xprt, struct sock_xprt, xprt);
546 struct xdr_buf *xdr = &req->rq_snd_buf;
547 int status;
548
549 xs_encode_stream_record_marker(&req->rq_snd_buf);
550
551 xs_pktdump("packet data:",
552 req->rq_svec->iov_base, req->rq_svec->iov_len);
553
554 status = xs_sendpages(transport->sock, NULL, 0,
555 xdr, req->rq_bytes_sent);
556 dprintk("RPC: %s(%u) = %d\n",
557 __func__, xdr->len - req->rq_bytes_sent, status);
558 if (likely(status >= 0)) {
559 req->rq_bytes_sent += status;
560 req->rq_xmit_bytes_sent += status;
561 if (likely(req->rq_bytes_sent >= req->rq_slen)) {
562 req->rq_bytes_sent = 0;
563 return 0;
564 }
565 status = -EAGAIN;
566 }
567
568 switch (status) {
569 case -EAGAIN:
570 status = xs_nospace(task);
571 break;
572 default:
573 dprintk("RPC: sendmsg returned unrecognized error %d\n",
574 -status);
575 case -EPIPE:
576 xs_close(xprt);
577 status = -ENOTCONN;
578 }
579
580 return status;
581}
582
498/** 583/**
499 * xs_udp_send_request - write an RPC request to a UDP socket 584 * xs_udp_send_request - write an RPC request to a UDP socket
500 * @task: address of RPC task that manages the state of an RPC request 585 * @task: address of RPC task that manages the state of an RPC request
@@ -574,13 +659,6 @@ static void xs_tcp_shutdown(struct rpc_xprt *xprt)
574 kernel_sock_shutdown(sock, SHUT_WR); 659 kernel_sock_shutdown(sock, SHUT_WR);
575} 660}
576 661
577static inline void xs_encode_tcp_record_marker(struct xdr_buf *buf)
578{
579 u32 reclen = buf->len - sizeof(rpc_fraghdr);
580 rpc_fraghdr *base = buf->head[0].iov_base;
581 *base = htonl(RPC_LAST_STREAM_FRAGMENT | reclen);
582}
583
584/** 662/**
585 * xs_tcp_send_request - write an RPC request to a TCP socket 663 * xs_tcp_send_request - write an RPC request to a TCP socket
586 * @task: address of RPC task that manages the state of an RPC request 664 * @task: address of RPC task that manages the state of an RPC request
@@ -603,7 +681,7 @@ static int xs_tcp_send_request(struct rpc_task *task)
603 struct xdr_buf *xdr = &req->rq_snd_buf; 681 struct xdr_buf *xdr = &req->rq_snd_buf;
604 int status; 682 int status;
605 683
606 xs_encode_tcp_record_marker(&req->rq_snd_buf); 684 xs_encode_stream_record_marker(&req->rq_snd_buf);
607 685
608 xs_pktdump("packet data:", 686 xs_pktdump("packet data:",
609 req->rq_svec->iov_base, 687 req->rq_svec->iov_base,
@@ -785,6 +863,88 @@ static inline struct rpc_xprt *xprt_from_sock(struct sock *sk)
785 return (struct rpc_xprt *) sk->sk_user_data; 863 return (struct rpc_xprt *) sk->sk_user_data;
786} 864}
787 865
866static int xs_local_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb)
867{
868 struct xdr_skb_reader desc = {
869 .skb = skb,
870 .offset = sizeof(rpc_fraghdr),
871 .count = skb->len - sizeof(rpc_fraghdr),
872 };
873
874 if (xdr_partial_copy_from_skb(xdr, 0, &desc, xdr_skb_read_bits) < 0)
875 return -1;
876 if (desc.count)
877 return -1;
878 return 0;
879}
880
881/**
882 * xs_local_data_ready - "data ready" callback for AF_LOCAL sockets
883 * @sk: socket with data to read
884 * @len: how much data to read
885 *
886 * Currently this assumes we can read the whole reply in a single gulp.
887 */
888static void xs_local_data_ready(struct sock *sk, int len)
889{
890 struct rpc_task *task;
891 struct rpc_xprt *xprt;
892 struct rpc_rqst *rovr;
893 struct sk_buff *skb;
894 int err, repsize, copied;
895 u32 _xid;
896 __be32 *xp;
897
898 read_lock_bh(&sk->sk_callback_lock);
899 dprintk("RPC: %s...\n", __func__);
900 xprt = xprt_from_sock(sk);
901 if (xprt == NULL)
902 goto out;
903
904 skb = skb_recv_datagram(sk, 0, 1, &err);
905 if (skb == NULL)
906 goto out;
907
908 if (xprt->shutdown)
909 goto dropit;
910
911 repsize = skb->len - sizeof(rpc_fraghdr);
912 if (repsize < 4) {
913 dprintk("RPC: impossible RPC reply size %d\n", repsize);
914 goto dropit;
915 }
916
917 /* Copy the XID from the skb... */
918 xp = skb_header_pointer(skb, sizeof(rpc_fraghdr), sizeof(_xid), &_xid);
919 if (xp == NULL)
920 goto dropit;
921
922 /* Look up and lock the request corresponding to the given XID */
923 spin_lock(&xprt->transport_lock);
924 rovr = xprt_lookup_rqst(xprt, *xp);
925 if (!rovr)
926 goto out_unlock;
927 task = rovr->rq_task;
928
929 copied = rovr->rq_private_buf.buflen;
930 if (copied > repsize)
931 copied = repsize;
932
933 if (xs_local_copy_to_xdr(&rovr->rq_private_buf, skb)) {
934 dprintk("RPC: sk_buff copy failed\n");
935 goto out_unlock;
936 }
937
938 xprt_complete_rqst(task, copied);
939
940 out_unlock:
941 spin_unlock(&xprt->transport_lock);
942 dropit:
943 skb_free_datagram(sk, skb);
944 out:
945 read_unlock_bh(&sk->sk_callback_lock);
946}
947
788/** 948/**
789 * xs_udp_data_ready - "data ready" callback for UDP sockets 949 * xs_udp_data_ready - "data ready" callback for UDP sockets
790 * @sk: socket with data to read 950 * @sk: socket with data to read
@@ -1344,7 +1504,6 @@ static void xs_tcp_state_change(struct sock *sk)
1344 case TCP_CLOSE_WAIT: 1504 case TCP_CLOSE_WAIT:
1345 /* The server initiated a shutdown of the socket */ 1505 /* The server initiated a shutdown of the socket */
1346 xprt_force_disconnect(xprt); 1506 xprt_force_disconnect(xprt);
1347 case TCP_SYN_SENT:
1348 xprt->connect_cookie++; 1507 xprt->connect_cookie++;
1349 case TCP_CLOSING: 1508 case TCP_CLOSING:
1350 /* 1509 /*
@@ -1571,11 +1730,31 @@ static int xs_bind(struct sock_xprt *transport, struct socket *sock)
1571 return err; 1730 return err;
1572} 1731}
1573 1732
1733/*
1734 * We don't support autobind on AF_LOCAL sockets
1735 */
1736static void xs_local_rpcbind(struct rpc_task *task)
1737{
1738 xprt_set_bound(task->tk_xprt);
1739}
1740
1741static void xs_local_set_port(struct rpc_xprt *xprt, unsigned short port)
1742{
1743}
1574 1744
1575#ifdef CONFIG_DEBUG_LOCK_ALLOC 1745#ifdef CONFIG_DEBUG_LOCK_ALLOC
1576static struct lock_class_key xs_key[2]; 1746static struct lock_class_key xs_key[2];
1577static struct lock_class_key xs_slock_key[2]; 1747static struct lock_class_key xs_slock_key[2];
1578 1748
1749static inline void xs_reclassify_socketu(struct socket *sock)
1750{
1751 struct sock *sk = sock->sk;
1752
1753 BUG_ON(sock_owned_by_user(sk));
1754 sock_lock_init_class_and_name(sk, "slock-AF_LOCAL-RPC",
1755 &xs_slock_key[1], "sk_lock-AF_LOCAL-RPC", &xs_key[1]);
1756}
1757
1579static inline void xs_reclassify_socket4(struct socket *sock) 1758static inline void xs_reclassify_socket4(struct socket *sock)
1580{ 1759{
1581 struct sock *sk = sock->sk; 1760 struct sock *sk = sock->sk;
@@ -1597,6 +1776,9 @@ static inline void xs_reclassify_socket6(struct socket *sock)
1597static inline void xs_reclassify_socket(int family, struct socket *sock) 1776static inline void xs_reclassify_socket(int family, struct socket *sock)
1598{ 1777{
1599 switch (family) { 1778 switch (family) {
1779 case AF_LOCAL:
1780 xs_reclassify_socketu(sock);
1781 break;
1600 case AF_INET: 1782 case AF_INET:
1601 xs_reclassify_socket4(sock); 1783 xs_reclassify_socket4(sock);
1602 break; 1784 break;
@@ -1606,6 +1788,10 @@ static inline void xs_reclassify_socket(int family, struct socket *sock)
1606 } 1788 }
1607} 1789}
1608#else 1790#else
1791static inline void xs_reclassify_socketu(struct socket *sock)
1792{
1793}
1794
1609static inline void xs_reclassify_socket4(struct socket *sock) 1795static inline void xs_reclassify_socket4(struct socket *sock)
1610{ 1796{
1611} 1797}
@@ -1644,6 +1830,94 @@ out:
1644 return ERR_PTR(err); 1830 return ERR_PTR(err);
1645} 1831}
1646 1832
1833static int xs_local_finish_connecting(struct rpc_xprt *xprt,
1834 struct socket *sock)
1835{
1836 struct sock_xprt *transport = container_of(xprt, struct sock_xprt,
1837 xprt);
1838
1839 if (!transport->inet) {
1840 struct sock *sk = sock->sk;
1841
1842 write_lock_bh(&sk->sk_callback_lock);
1843
1844 xs_save_old_callbacks(transport, sk);
1845
1846 sk->sk_user_data = xprt;
1847 sk->sk_data_ready = xs_local_data_ready;
1848 sk->sk_write_space = xs_udp_write_space;
1849 sk->sk_error_report = xs_error_report;
1850 sk->sk_allocation = GFP_ATOMIC;
1851
1852 xprt_clear_connected(xprt);
1853
1854 /* Reset to new socket */
1855 transport->sock = sock;
1856 transport->inet = sk;
1857
1858 write_unlock_bh(&sk->sk_callback_lock);
1859 }
1860
1861 /* Tell the socket layer to start connecting... */
1862 xprt->stat.connect_count++;
1863 xprt->stat.connect_start = jiffies;
1864 return kernel_connect(sock, xs_addr(xprt), xprt->addrlen, 0);
1865}
1866
1867/**
1868 * xs_local_setup_socket - create AF_LOCAL socket, connect to a local endpoint
1869 * @xprt: RPC transport to connect
1870 * @transport: socket transport to connect
1871 * @create_sock: function to create a socket of the correct type
1872 *
1873 * Invoked by a work queue tasklet.
1874 */
1875static void xs_local_setup_socket(struct work_struct *work)
1876{
1877 struct sock_xprt *transport =
1878 container_of(work, struct sock_xprt, connect_worker.work);
1879 struct rpc_xprt *xprt = &transport->xprt;
1880 struct socket *sock;
1881 int status = -EIO;
1882
1883 if (xprt->shutdown)
1884 goto out;
1885
1886 clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
1887 status = __sock_create(xprt->xprt_net, AF_LOCAL,
1888 SOCK_STREAM, 0, &sock, 1);
1889 if (status < 0) {
1890 dprintk("RPC: can't create AF_LOCAL "
1891 "transport socket (%d).\n", -status);
1892 goto out;
1893 }
1894 xs_reclassify_socketu(sock);
1895
1896 dprintk("RPC: worker connecting xprt %p via AF_LOCAL to %s\n",
1897 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
1898
1899 status = xs_local_finish_connecting(xprt, sock);
1900 switch (status) {
1901 case 0:
1902 dprintk("RPC: xprt %p connected to %s\n",
1903 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
1904 xprt_set_connected(xprt);
1905 break;
1906 case -ENOENT:
1907 dprintk("RPC: xprt %p: socket %s does not exist\n",
1908 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
1909 break;
1910 default:
1911 printk(KERN_ERR "%s: unhandled error (%d) connecting to %s\n",
1912 __func__, -status,
1913 xprt->address_strings[RPC_DISPLAY_ADDR]);
1914 }
1915
1916out:
1917 xprt_clear_connecting(xprt);
1918 xprt_wake_pending_tasks(xprt, status);
1919}
1920
1647static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) 1921static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
1648{ 1922{
1649 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 1923 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
@@ -1758,6 +2032,7 @@ static void xs_tcp_reuse_connection(struct sock_xprt *transport)
1758static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) 2032static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
1759{ 2033{
1760 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 2034 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2035 int ret = -ENOTCONN;
1761 2036
1762 if (!transport->inet) { 2037 if (!transport->inet) {
1763 struct sock *sk = sock->sk; 2038 struct sock *sk = sock->sk;
@@ -1789,12 +2064,22 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
1789 } 2064 }
1790 2065
1791 if (!xprt_bound(xprt)) 2066 if (!xprt_bound(xprt))
1792 return -ENOTCONN; 2067 goto out;
1793 2068
1794 /* Tell the socket layer to start connecting... */ 2069 /* Tell the socket layer to start connecting... */
1795 xprt->stat.connect_count++; 2070 xprt->stat.connect_count++;
1796 xprt->stat.connect_start = jiffies; 2071 xprt->stat.connect_start = jiffies;
1797 return kernel_connect(sock, xs_addr(xprt), xprt->addrlen, O_NONBLOCK); 2072 ret = kernel_connect(sock, xs_addr(xprt), xprt->addrlen, O_NONBLOCK);
2073 switch (ret) {
2074 case 0:
2075 case -EINPROGRESS:
2076 /* SYN_SENT! */
2077 xprt->connect_cookie++;
2078 if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
2079 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
2080 }
2081out:
2082 return ret;
1798} 2083}
1799 2084
1800/** 2085/**
@@ -1917,6 +2202,32 @@ static void xs_connect(struct rpc_task *task)
1917} 2202}
1918 2203
1919/** 2204/**
2205 * xs_local_print_stats - display AF_LOCAL socket-specifc stats
2206 * @xprt: rpc_xprt struct containing statistics
2207 * @seq: output file
2208 *
2209 */
2210static void xs_local_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
2211{
2212 long idle_time = 0;
2213
2214 if (xprt_connected(xprt))
2215 idle_time = (long)(jiffies - xprt->last_used) / HZ;
2216
2217 seq_printf(seq, "\txprt:\tlocal %lu %lu %lu %ld %lu %lu %lu "
2218 "%llu %llu\n",
2219 xprt->stat.bind_count,
2220 xprt->stat.connect_count,
2221 xprt->stat.connect_time,
2222 idle_time,
2223 xprt->stat.sends,
2224 xprt->stat.recvs,
2225 xprt->stat.bad_xids,
2226 xprt->stat.req_u,
2227 xprt->stat.bklog_u);
2228}
2229
2230/**
1920 * xs_udp_print_stats - display UDP socket-specifc stats 2231 * xs_udp_print_stats - display UDP socket-specifc stats
1921 * @xprt: rpc_xprt struct containing statistics 2232 * @xprt: rpc_xprt struct containing statistics
1922 * @seq: output file 2233 * @seq: output file
@@ -2014,10 +2325,7 @@ static int bc_sendto(struct rpc_rqst *req)
2014 unsigned long headoff; 2325 unsigned long headoff;
2015 unsigned long tailoff; 2326 unsigned long tailoff;
2016 2327
2017 /* 2328 xs_encode_stream_record_marker(xbufp);
2018 * Set up the rpc header and record marker stuff
2019 */
2020 xs_encode_tcp_record_marker(xbufp);
2021 2329
2022 tailoff = (unsigned long)xbufp->tail[0].iov_base & ~PAGE_MASK; 2330 tailoff = (unsigned long)xbufp->tail[0].iov_base & ~PAGE_MASK;
2023 headoff = (unsigned long)xbufp->head[0].iov_base & ~PAGE_MASK; 2331 headoff = (unsigned long)xbufp->head[0].iov_base & ~PAGE_MASK;
@@ -2089,6 +2397,21 @@ static void bc_destroy(struct rpc_xprt *xprt)
2089{ 2397{
2090} 2398}
2091 2399
2400static struct rpc_xprt_ops xs_local_ops = {
2401 .reserve_xprt = xprt_reserve_xprt,
2402 .release_xprt = xs_tcp_release_xprt,
2403 .rpcbind = xs_local_rpcbind,
2404 .set_port = xs_local_set_port,
2405 .connect = xs_connect,
2406 .buf_alloc = rpc_malloc,
2407 .buf_free = rpc_free,
2408 .send_request = xs_local_send_request,
2409 .set_retrans_timeout = xprt_set_retrans_timeout_def,
2410 .close = xs_close,
2411 .destroy = xs_destroy,
2412 .print_stats = xs_local_print_stats,
2413};
2414
2092static struct rpc_xprt_ops xs_udp_ops = { 2415static struct rpc_xprt_ops xs_udp_ops = {
2093 .set_buffer_size = xs_udp_set_buffer_size, 2416 .set_buffer_size = xs_udp_set_buffer_size,
2094 .reserve_xprt = xprt_reserve_xprt_cong, 2417 .reserve_xprt = xprt_reserve_xprt_cong,
@@ -2150,6 +2473,8 @@ static int xs_init_anyaddr(const int family, struct sockaddr *sap)
2150 }; 2473 };
2151 2474
2152 switch (family) { 2475 switch (family) {
2476 case AF_LOCAL:
2477 break;
2153 case AF_INET: 2478 case AF_INET:
2154 memcpy(sap, &sin, sizeof(sin)); 2479 memcpy(sap, &sin, sizeof(sin));
2155 break; 2480 break;
@@ -2197,6 +2522,70 @@ static struct rpc_xprt *xs_setup_xprt(struct xprt_create *args,
2197 return xprt; 2522 return xprt;
2198} 2523}
2199 2524
2525static const struct rpc_timeout xs_local_default_timeout = {
2526 .to_initval = 10 * HZ,
2527 .to_maxval = 10 * HZ,
2528 .to_retries = 2,
2529};
2530
2531/**
2532 * xs_setup_local - Set up transport to use an AF_LOCAL socket
2533 * @args: rpc transport creation arguments
2534 *
2535 * AF_LOCAL is a "tpi_cots_ord" transport, just like TCP
2536 */
2537static struct rpc_xprt *xs_setup_local(struct xprt_create *args)
2538{
2539 struct sockaddr_un *sun = (struct sockaddr_un *)args->dstaddr;
2540 struct sock_xprt *transport;
2541 struct rpc_xprt *xprt;
2542 struct rpc_xprt *ret;
2543
2544 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries);
2545 if (IS_ERR(xprt))
2546 return xprt;
2547 transport = container_of(xprt, struct sock_xprt, xprt);
2548
2549 xprt->prot = 0;
2550 xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32);
2551 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
2552
2553 xprt->bind_timeout = XS_BIND_TO;
2554 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
2555 xprt->idle_timeout = XS_IDLE_DISC_TO;
2556
2557 xprt->ops = &xs_local_ops;
2558 xprt->timeout = &xs_local_default_timeout;
2559
2560 switch (sun->sun_family) {
2561 case AF_LOCAL:
2562 if (sun->sun_path[0] != '/') {
2563 dprintk("RPC: bad AF_LOCAL address: %s\n",
2564 sun->sun_path);
2565 ret = ERR_PTR(-EINVAL);
2566 goto out_err;
2567 }
2568 xprt_set_bound(xprt);
2569 INIT_DELAYED_WORK(&transport->connect_worker,
2570 xs_local_setup_socket);
2571 xs_format_peer_addresses(xprt, "local", RPCBIND_NETID_LOCAL);
2572 break;
2573 default:
2574 ret = ERR_PTR(-EAFNOSUPPORT);
2575 goto out_err;
2576 }
2577
2578 dprintk("RPC: set up xprt to %s via AF_LOCAL\n",
2579 xprt->address_strings[RPC_DISPLAY_ADDR]);
2580
2581 if (try_module_get(THIS_MODULE))
2582 return xprt;
2583 ret = ERR_PTR(-EINVAL);
2584out_err:
2585 xprt_free(xprt);
2586 return ret;
2587}
2588
2200static const struct rpc_timeout xs_udp_default_timeout = { 2589static const struct rpc_timeout xs_udp_default_timeout = {
2201 .to_initval = 5 * HZ, 2590 .to_initval = 5 * HZ,
2202 .to_maxval = 30 * HZ, 2591 .to_maxval = 30 * HZ,
@@ -2438,6 +2827,14 @@ out_err:
2438 return ret; 2827 return ret;
2439} 2828}
2440 2829
2830static struct xprt_class xs_local_transport = {
2831 .list = LIST_HEAD_INIT(xs_local_transport.list),
2832 .name = "named UNIX socket",
2833 .owner = THIS_MODULE,
2834 .ident = XPRT_TRANSPORT_LOCAL,
2835 .setup = xs_setup_local,
2836};
2837
2441static struct xprt_class xs_udp_transport = { 2838static struct xprt_class xs_udp_transport = {
2442 .list = LIST_HEAD_INIT(xs_udp_transport.list), 2839 .list = LIST_HEAD_INIT(xs_udp_transport.list),
2443 .name = "udp", 2840 .name = "udp",
@@ -2473,6 +2870,7 @@ int init_socket_xprt(void)
2473 sunrpc_table_header = register_sysctl_table(sunrpc_table); 2870 sunrpc_table_header = register_sysctl_table(sunrpc_table);
2474#endif 2871#endif
2475 2872
2873 xprt_register_transport(&xs_local_transport);
2476 xprt_register_transport(&xs_udp_transport); 2874 xprt_register_transport(&xs_udp_transport);
2477 xprt_register_transport(&xs_tcp_transport); 2875 xprt_register_transport(&xs_tcp_transport);
2478 xprt_register_transport(&xs_bc_tcp_transport); 2876 xprt_register_transport(&xs_bc_tcp_transport);
@@ -2493,6 +2891,7 @@ void cleanup_socket_xprt(void)
2493 } 2891 }
2494#endif 2892#endif
2495 2893
2894 xprt_unregister_transport(&xs_local_transport);
2496 xprt_unregister_transport(&xs_udp_transport); 2895 xprt_unregister_transport(&xs_udp_transport);
2497 xprt_unregister_transport(&xs_tcp_transport); 2896 xprt_unregister_transport(&xs_tcp_transport);
2498 xprt_unregister_transport(&xs_bc_tcp_transport); 2897 xprt_unregister_transport(&xs_bc_tcp_transport);
diff --git a/net/tipc/addr.h b/net/tipc/addr.h
index 8971aba99aea..e4f35afe3207 100644
--- a/net/tipc/addr.h
+++ b/net/tipc/addr.h
@@ -37,14 +37,17 @@
37#ifndef _TIPC_ADDR_H 37#ifndef _TIPC_ADDR_H
38#define _TIPC_ADDR_H 38#define _TIPC_ADDR_H
39 39
40#define TIPC_ZONE_MASK 0xff000000u
41#define TIPC_CLUSTER_MASK 0xfffff000u
42
40static inline u32 tipc_zone_mask(u32 addr) 43static inline u32 tipc_zone_mask(u32 addr)
41{ 44{
42 return addr & 0xff000000u; 45 return addr & TIPC_ZONE_MASK;
43} 46}
44 47
45static inline u32 tipc_cluster_mask(u32 addr) 48static inline u32 tipc_cluster_mask(u32 addr)
46{ 49{
47 return addr & 0xfffff000u; 50 return addr & TIPC_CLUSTER_MASK;
48} 51}
49 52
50static inline int in_own_cluster(u32 addr) 53static inline int in_own_cluster(u32 addr)
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 7dc1dc7151ea..fa68d1e9ff4b 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -44,13 +44,6 @@
44 44
45#define BCLINK_WIN_DEFAULT 20 /* bcast link window size (default) */ 45#define BCLINK_WIN_DEFAULT 20 /* bcast link window size (default) */
46 46
47/*
48 * Loss rate for incoming broadcast frames; used to test retransmission code.
49 * Set to N to cause every N'th frame to be discarded; 0 => don't discard any.
50 */
51
52#define TIPC_BCAST_LOSS_RATE 0
53
54/** 47/**
55 * struct bcbearer_pair - a pair of bearers used by broadcast link 48 * struct bcbearer_pair - a pair of bearers used by broadcast link
56 * @primary: pointer to primary bearer 49 * @primary: pointer to primary bearer
@@ -414,9 +407,7 @@ int tipc_bclink_send_msg(struct sk_buff *buf)
414 spin_lock_bh(&bc_lock); 407 spin_lock_bh(&bc_lock);
415 408
416 res = tipc_link_send_buf(bcl, buf); 409 res = tipc_link_send_buf(bcl, buf);
417 if (unlikely(res == -ELINKCONG)) 410 if (likely(res > 0))
418 buf_discard(buf);
419 else
420 bclink_set_last_sent(); 411 bclink_set_last_sent();
421 412
422 bcl->stats.queue_sz_counts++; 413 bcl->stats.queue_sz_counts++;
@@ -434,9 +425,6 @@ int tipc_bclink_send_msg(struct sk_buff *buf)
434 425
435void tipc_bclink_recv_pkt(struct sk_buff *buf) 426void tipc_bclink_recv_pkt(struct sk_buff *buf)
436{ 427{
437#if (TIPC_BCAST_LOSS_RATE)
438 static int rx_count;
439#endif
440 struct tipc_msg *msg = buf_msg(buf); 428 struct tipc_msg *msg = buf_msg(buf);
441 struct tipc_node *node = tipc_node_find(msg_prevnode(msg)); 429 struct tipc_node *node = tipc_node_find(msg_prevnode(msg));
442 u32 next_in; 430 u32 next_in;
@@ -470,14 +458,6 @@ void tipc_bclink_recv_pkt(struct sk_buff *buf)
470 return; 458 return;
471 } 459 }
472 460
473#if (TIPC_BCAST_LOSS_RATE)
474 if (++rx_count == TIPC_BCAST_LOSS_RATE) {
475 rx_count = 0;
476 buf_discard(buf);
477 return;
478 }
479#endif
480
481 tipc_node_lock(node); 461 tipc_node_lock(node);
482receive: 462receive:
483 deferred = node->bclink.deferred_head; 463 deferred = node->bclink.deferred_head;
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 411719feb803..85209eadfae6 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -46,6 +46,8 @@ static u32 media_count;
46 46
47struct tipc_bearer tipc_bearers[MAX_BEARERS]; 47struct tipc_bearer tipc_bearers[MAX_BEARERS];
48 48
49static void bearer_disable(struct tipc_bearer *b_ptr);
50
49/** 51/**
50 * media_name_valid - validate media name 52 * media_name_valid - validate media name
51 * 53 *
@@ -342,15 +344,15 @@ struct sk_buff *tipc_bearer_get_names(void)
342void tipc_bearer_add_dest(struct tipc_bearer *b_ptr, u32 dest) 344void tipc_bearer_add_dest(struct tipc_bearer *b_ptr, u32 dest)
343{ 345{
344 tipc_nmap_add(&b_ptr->nodes, dest); 346 tipc_nmap_add(&b_ptr->nodes, dest);
345 tipc_disc_update_link_req(b_ptr->link_req);
346 tipc_bcbearer_sort(); 347 tipc_bcbearer_sort();
348 tipc_disc_add_dest(b_ptr->link_req);
347} 349}
348 350
349void tipc_bearer_remove_dest(struct tipc_bearer *b_ptr, u32 dest) 351void tipc_bearer_remove_dest(struct tipc_bearer *b_ptr, u32 dest)
350{ 352{
351 tipc_nmap_remove(&b_ptr->nodes, dest); 353 tipc_nmap_remove(&b_ptr->nodes, dest);
352 tipc_disc_update_link_req(b_ptr->link_req);
353 tipc_bcbearer_sort(); 354 tipc_bcbearer_sort();
355 tipc_disc_remove_dest(b_ptr->link_req);
354} 356}
355 357
356/* 358/*
@@ -493,8 +495,15 @@ int tipc_enable_bearer(const char *name, u32 disc_domain, u32 priority)
493 warn("Bearer <%s> rejected, illegal name\n", name); 495 warn("Bearer <%s> rejected, illegal name\n", name);
494 return -EINVAL; 496 return -EINVAL;
495 } 497 }
496 if (!tipc_addr_domain_valid(disc_domain) || 498 if (tipc_addr_domain_valid(disc_domain) &&
497 !tipc_in_scope(disc_domain, tipc_own_addr)) { 499 (disc_domain != tipc_own_addr)) {
500 if (tipc_in_scope(disc_domain, tipc_own_addr)) {
501 disc_domain = tipc_own_addr & TIPC_CLUSTER_MASK;
502 res = 0; /* accept any node in own cluster */
503 } else if (in_own_cluster(disc_domain))
504 res = 0; /* accept specified node in own cluster */
505 }
506 if (res) {
498 warn("Bearer <%s> rejected, illegal discovery domain\n", name); 507 warn("Bearer <%s> rejected, illegal discovery domain\n", name);
499 return -EINVAL; 508 return -EINVAL;
500 } 509 }
@@ -511,7 +520,7 @@ int tipc_enable_bearer(const char *name, u32 disc_domain, u32 priority)
511 if (!m_ptr) { 520 if (!m_ptr) {
512 warn("Bearer <%s> rejected, media <%s> not registered\n", name, 521 warn("Bearer <%s> rejected, media <%s> not registered\n", name,
513 b_name.media_name); 522 b_name.media_name);
514 goto failed; 523 goto exit;
515 } 524 }
516 525
517 if (priority == TIPC_MEDIA_LINK_PRI) 526 if (priority == TIPC_MEDIA_LINK_PRI)
@@ -527,14 +536,14 @@ restart:
527 } 536 }
528 if (!strcmp(name, tipc_bearers[i].name)) { 537 if (!strcmp(name, tipc_bearers[i].name)) {
529 warn("Bearer <%s> rejected, already enabled\n", name); 538 warn("Bearer <%s> rejected, already enabled\n", name);
530 goto failed; 539 goto exit;
531 } 540 }
532 if ((tipc_bearers[i].priority == priority) && 541 if ((tipc_bearers[i].priority == priority) &&
533 (++with_this_prio > 2)) { 542 (++with_this_prio > 2)) {
534 if (priority-- == 0) { 543 if (priority-- == 0) {
535 warn("Bearer <%s> rejected, duplicate priority\n", 544 warn("Bearer <%s> rejected, duplicate priority\n",
536 name); 545 name);
537 goto failed; 546 goto exit;
538 } 547 }
539 warn("Bearer <%s> priority adjustment required %u->%u\n", 548 warn("Bearer <%s> priority adjustment required %u->%u\n",
540 name, priority + 1, priority); 549 name, priority + 1, priority);
@@ -544,7 +553,7 @@ restart:
544 if (bearer_id >= MAX_BEARERS) { 553 if (bearer_id >= MAX_BEARERS) {
545 warn("Bearer <%s> rejected, bearer limit reached (%u)\n", 554 warn("Bearer <%s> rejected, bearer limit reached (%u)\n",
546 name, MAX_BEARERS); 555 name, MAX_BEARERS);
547 goto failed; 556 goto exit;
548 } 557 }
549 558
550 b_ptr = &tipc_bearers[bearer_id]; 559 b_ptr = &tipc_bearers[bearer_id];
@@ -552,7 +561,7 @@ restart:
552 res = m_ptr->enable_bearer(b_ptr); 561 res = m_ptr->enable_bearer(b_ptr);
553 if (res) { 562 if (res) {
554 warn("Bearer <%s> rejected, enable failure (%d)\n", name, -res); 563 warn("Bearer <%s> rejected, enable failure (%d)\n", name, -res);
555 goto failed; 564 goto exit;
556 } 565 }
557 566
558 b_ptr->identity = bearer_id; 567 b_ptr->identity = bearer_id;
@@ -562,14 +571,18 @@ restart:
562 b_ptr->priority = priority; 571 b_ptr->priority = priority;
563 INIT_LIST_HEAD(&b_ptr->cong_links); 572 INIT_LIST_HEAD(&b_ptr->cong_links);
564 INIT_LIST_HEAD(&b_ptr->links); 573 INIT_LIST_HEAD(&b_ptr->links);
565 b_ptr->link_req = tipc_disc_init_link_req(b_ptr, &m_ptr->bcast_addr,
566 disc_domain);
567 spin_lock_init(&b_ptr->lock); 574 spin_lock_init(&b_ptr->lock);
568 write_unlock_bh(&tipc_net_lock); 575
576 res = tipc_disc_create(b_ptr, &m_ptr->bcast_addr, disc_domain);
577 if (res) {
578 bearer_disable(b_ptr);
579 warn("Bearer <%s> rejected, discovery object creation failed\n",
580 name);
581 goto exit;
582 }
569 info("Enabled bearer <%s>, discovery domain %s, priority %u\n", 583 info("Enabled bearer <%s>, discovery domain %s, priority %u\n",
570 name, tipc_addr_string_fill(addr_string, disc_domain), priority); 584 name, tipc_addr_string_fill(addr_string, disc_domain), priority);
571 return 0; 585exit:
572failed:
573 write_unlock_bh(&tipc_net_lock); 586 write_unlock_bh(&tipc_net_lock);
574 return res; 587 return res;
575} 588}
@@ -620,14 +633,14 @@ static void bearer_disable(struct tipc_bearer *b_ptr)
620 struct link *temp_l_ptr; 633 struct link *temp_l_ptr;
621 634
622 info("Disabling bearer <%s>\n", b_ptr->name); 635 info("Disabling bearer <%s>\n", b_ptr->name);
623 tipc_disc_stop_link_req(b_ptr->link_req);
624 spin_lock_bh(&b_ptr->lock); 636 spin_lock_bh(&b_ptr->lock);
625 b_ptr->link_req = NULL;
626 b_ptr->blocked = 1; 637 b_ptr->blocked = 1;
627 b_ptr->media->disable_bearer(b_ptr); 638 b_ptr->media->disable_bearer(b_ptr);
628 list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) { 639 list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) {
629 tipc_link_delete(l_ptr); 640 tipc_link_delete(l_ptr);
630 } 641 }
642 if (b_ptr->link_req)
643 tipc_disc_delete(b_ptr->link_req);
631 spin_unlock_bh(&b_ptr->lock); 644 spin_unlock_bh(&b_ptr->lock);
632 memset(b_ptr, 0, sizeof(struct tipc_bearer)); 645 memset(b_ptr, 0, sizeof(struct tipc_bearer));
633} 646}
diff --git a/net/tipc/core.c b/net/tipc/core.c
index c9a73e7763f6..943b6af84265 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -179,8 +179,7 @@ static int __init tipc_init(void)
179 if (tipc_log_resize(CONFIG_TIPC_LOG) != 0) 179 if (tipc_log_resize(CONFIG_TIPC_LOG) != 0)
180 warn("Unable to create log buffer\n"); 180 warn("Unable to create log buffer\n");
181 181
182 info("Activated (version " TIPC_MOD_VER 182 info("Activated (version " TIPC_MOD_VER ")\n");
183 " compiled " __DATE__ " " __TIME__ ")\n");
184 183
185 tipc_own_addr = 0; 184 tipc_own_addr = 0;
186 tipc_remote_management = 1; 185 tipc_remote_management = 1;
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index 491eff56b9da..0987933155b9 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -39,19 +39,17 @@
39#include "discover.h" 39#include "discover.h"
40 40
41#define TIPC_LINK_REQ_INIT 125 /* min delay during bearer start up */ 41#define TIPC_LINK_REQ_INIT 125 /* min delay during bearer start up */
42#define TIPC_LINK_REQ_FAST 2000 /* normal delay if bearer has no links */ 42#define TIPC_LINK_REQ_FAST 1000 /* max delay if bearer has no links */
43#define TIPC_LINK_REQ_SLOW 600000 /* normal delay if bearer has links */ 43#define TIPC_LINK_REQ_SLOW 60000 /* max delay if bearer has links */
44 44#define TIPC_LINK_REQ_INACTIVE 0xffffffff /* indicates no timer in use */
45/*
46 * TODO: Most of the inter-cluster setup stuff should be
47 * rewritten, and be made conformant with specification.
48 */
49 45
50 46
51/** 47/**
52 * struct link_req - information about an ongoing link setup request 48 * struct link_req - information about an ongoing link setup request
53 * @bearer: bearer issuing requests 49 * @bearer: bearer issuing requests
54 * @dest: destination address for request messages 50 * @dest: destination address for request messages
51 * @domain: network domain to which links can be established
52 * @num_nodes: number of nodes currently discovered (i.e. with an active link)
55 * @buf: request message to be (repeatedly) sent 53 * @buf: request message to be (repeatedly) sent
56 * @timer: timer governing period between requests 54 * @timer: timer governing period between requests
57 * @timer_intv: current interval between requests (in ms) 55 * @timer_intv: current interval between requests (in ms)
@@ -59,6 +57,8 @@
59struct link_req { 57struct link_req {
60 struct tipc_bearer *bearer; 58 struct tipc_bearer *bearer;
61 struct tipc_media_addr dest; 59 struct tipc_media_addr dest;
60 u32 domain;
61 int num_nodes;
62 struct sk_buff *buf; 62 struct sk_buff *buf;
63 struct timer_list timer; 63 struct timer_list timer;
64 unsigned int timer_intv; 64 unsigned int timer_intv;
@@ -147,7 +147,7 @@ void tipc_disc_recv_msg(struct sk_buff *buf, struct tipc_bearer *b_ptr)
147 } 147 }
148 if (!tipc_in_scope(dest, tipc_own_addr)) 148 if (!tipc_in_scope(dest, tipc_own_addr))
149 return; 149 return;
150 if (!in_own_cluster(orig)) 150 if (!tipc_in_scope(b_ptr->link_req->domain, orig))
151 return; 151 return;
152 152
153 /* Locate structure corresponding to requesting node */ 153 /* Locate structure corresponding to requesting node */
@@ -214,44 +214,54 @@ void tipc_disc_recv_msg(struct sk_buff *buf, struct tipc_bearer *b_ptr)
214} 214}
215 215
216/** 216/**
217 * tipc_disc_stop_link_req - stop sending periodic link setup requests 217 * disc_update - update frequency of periodic link setup requests
218 * @req: ptr to link request structure 218 * @req: ptr to link request structure
219 *
220 * Reinitiates discovery process if discovery object has no associated nodes
221 * and is either not currently searching or is searching at a slow rate
219 */ 222 */
220 223
221void tipc_disc_stop_link_req(struct link_req *req) 224static void disc_update(struct link_req *req)
222{ 225{
223 if (!req) 226 if (!req->num_nodes) {
224 return; 227 if ((req->timer_intv == TIPC_LINK_REQ_INACTIVE) ||
228 (req->timer_intv > TIPC_LINK_REQ_FAST)) {
229 req->timer_intv = TIPC_LINK_REQ_INIT;
230 k_start_timer(&req->timer, req->timer_intv);
231 }
232 }
233}
225 234
226 k_cancel_timer(&req->timer); 235/**
227 k_term_timer(&req->timer); 236 * tipc_disc_add_dest - increment set of discovered nodes
228 buf_discard(req->buf); 237 * @req: ptr to link request structure
229 kfree(req); 238 */
239
240void tipc_disc_add_dest(struct link_req *req)
241{
242 req->num_nodes++;
230} 243}
231 244
232/** 245/**
233 * tipc_disc_update_link_req - update frequency of periodic link setup requests 246 * tipc_disc_remove_dest - decrement set of discovered nodes
234 * @req: ptr to link request structure 247 * @req: ptr to link request structure
235 */ 248 */
236 249
237void tipc_disc_update_link_req(struct link_req *req) 250void tipc_disc_remove_dest(struct link_req *req)
238{ 251{
239 if (!req) 252 req->num_nodes--;
240 return; 253 disc_update(req);
254}
241 255
242 if (req->timer_intv == TIPC_LINK_REQ_SLOW) { 256/**
243 if (!req->bearer->nodes.count) { 257 * disc_send_msg - send link setup request message
244 req->timer_intv = TIPC_LINK_REQ_FAST; 258 * @req: ptr to link request structure
245 k_start_timer(&req->timer, req->timer_intv); 259 */
246 } 260
247 } else if (req->timer_intv == TIPC_LINK_REQ_FAST) { 261static void disc_send_msg(struct link_req *req)
248 if (req->bearer->nodes.count) { 262{
249 req->timer_intv = TIPC_LINK_REQ_SLOW; 263 if (!req->bearer->blocked)
250 k_start_timer(&req->timer, req->timer_intv); 264 tipc_bearer_send(req->bearer, req->buf, &req->dest);
251 }
252 } else {
253 /* leave timer "as is" if haven't yet reached a "normal" rate */
254 }
255} 265}
256 266
257/** 267/**
@@ -263,56 +273,86 @@ void tipc_disc_update_link_req(struct link_req *req)
263 273
264static void disc_timeout(struct link_req *req) 274static void disc_timeout(struct link_req *req)
265{ 275{
276 int max_delay;
277
266 spin_lock_bh(&req->bearer->lock); 278 spin_lock_bh(&req->bearer->lock);
267 279
268 req->bearer->media->send_msg(req->buf, req->bearer, &req->dest); 280 /* Stop searching if only desired node has been found */
269 281
270 if ((req->timer_intv == TIPC_LINK_REQ_SLOW) || 282 if (tipc_node(req->domain) && req->num_nodes) {
271 (req->timer_intv == TIPC_LINK_REQ_FAST)) { 283 req->timer_intv = TIPC_LINK_REQ_INACTIVE;
272 /* leave timer interval "as is" if already at a "normal" rate */ 284 goto exit;
273 } else {
274 req->timer_intv *= 2;
275 if (req->timer_intv > TIPC_LINK_REQ_FAST)
276 req->timer_intv = TIPC_LINK_REQ_FAST;
277 if ((req->timer_intv == TIPC_LINK_REQ_FAST) &&
278 (req->bearer->nodes.count))
279 req->timer_intv = TIPC_LINK_REQ_SLOW;
280 } 285 }
281 k_start_timer(&req->timer, req->timer_intv);
282 286
287 /*
288 * Send discovery message, then update discovery timer
289 *
290 * Keep doubling time between requests until limit is reached;
291 * hold at fast polling rate if don't have any associated nodes,
292 * otherwise hold at slow polling rate
293 */
294
295 disc_send_msg(req);
296
297 req->timer_intv *= 2;
298 if (req->num_nodes)
299 max_delay = TIPC_LINK_REQ_SLOW;
300 else
301 max_delay = TIPC_LINK_REQ_FAST;
302 if (req->timer_intv > max_delay)
303 req->timer_intv = max_delay;
304
305 k_start_timer(&req->timer, req->timer_intv);
306exit:
283 spin_unlock_bh(&req->bearer->lock); 307 spin_unlock_bh(&req->bearer->lock);
284} 308}
285 309
286/** 310/**
287 * tipc_disc_init_link_req - start sending periodic link setup requests 311 * tipc_disc_create - create object to send periodic link setup requests
288 * @b_ptr: ptr to bearer issuing requests 312 * @b_ptr: ptr to bearer issuing requests
289 * @dest: destination address for request messages 313 * @dest: destination address for request messages
290 * @dest_domain: network domain of node(s) which should respond to message 314 * @dest_domain: network domain to which links can be established
291 * 315 *
292 * Returns pointer to link request structure, or NULL if unable to create. 316 * Returns 0 if successful, otherwise -errno.
293 */ 317 */
294 318
295struct link_req *tipc_disc_init_link_req(struct tipc_bearer *b_ptr, 319int tipc_disc_create(struct tipc_bearer *b_ptr,
296 const struct tipc_media_addr *dest, 320 struct tipc_media_addr *dest, u32 dest_domain)
297 u32 dest_domain)
298{ 321{
299 struct link_req *req; 322 struct link_req *req;
300 323
301 req = kmalloc(sizeof(*req), GFP_ATOMIC); 324 req = kmalloc(sizeof(*req), GFP_ATOMIC);
302 if (!req) 325 if (!req)
303 return NULL; 326 return -ENOMEM;
304 327
305 req->buf = tipc_disc_init_msg(DSC_REQ_MSG, dest_domain, b_ptr); 328 req->buf = tipc_disc_init_msg(DSC_REQ_MSG, dest_domain, b_ptr);
306 if (!req->buf) { 329 if (!req->buf) {
307 kfree(req); 330 kfree(req);
308 return NULL; 331 return -ENOMSG;
309 } 332 }
310 333
311 memcpy(&req->dest, dest, sizeof(*dest)); 334 memcpy(&req->dest, dest, sizeof(*dest));
312 req->bearer = b_ptr; 335 req->bearer = b_ptr;
336 req->domain = dest_domain;
337 req->num_nodes = 0;
313 req->timer_intv = TIPC_LINK_REQ_INIT; 338 req->timer_intv = TIPC_LINK_REQ_INIT;
314 k_init_timer(&req->timer, (Handler)disc_timeout, (unsigned long)req); 339 k_init_timer(&req->timer, (Handler)disc_timeout, (unsigned long)req);
315 k_start_timer(&req->timer, req->timer_intv); 340 k_start_timer(&req->timer, req->timer_intv);
316 return req; 341 b_ptr->link_req = req;
342 disc_send_msg(req);
343 return 0;
344}
345
346/**
347 * tipc_disc_delete - destroy object sending periodic link setup requests
348 * @req: ptr to link request structure
349 */
350
351void tipc_disc_delete(struct link_req *req)
352{
353 k_cancel_timer(&req->timer);
354 k_term_timer(&req->timer);
355 buf_discard(req->buf);
356 kfree(req);
317} 357}
318 358
diff --git a/net/tipc/discover.h b/net/tipc/discover.h
index e48a167e47b2..a3af595b86cb 100644
--- a/net/tipc/discover.h
+++ b/net/tipc/discover.h
@@ -39,12 +39,11 @@
39 39
40struct link_req; 40struct link_req;
41 41
42struct link_req *tipc_disc_init_link_req(struct tipc_bearer *b_ptr, 42int tipc_disc_create(struct tipc_bearer *b_ptr, struct tipc_media_addr *dest,
43 const struct tipc_media_addr *dest, 43 u32 dest_domain);
44 u32 dest_domain); 44void tipc_disc_delete(struct link_req *req);
45void tipc_disc_update_link_req(struct link_req *req); 45void tipc_disc_add_dest(struct link_req *req);
46void tipc_disc_stop_link_req(struct link_req *req); 46void tipc_disc_remove_dest(struct link_req *req);
47
48void tipc_disc_recv_msg(struct sk_buff *buf, struct tipc_bearer *b_ptr); 47void tipc_disc_recv_msg(struct sk_buff *buf, struct tipc_bearer *b_ptr);
49 48
50#endif 49#endif
diff --git a/net/tipc/link.c b/net/tipc/link.c
index ebf338f7b14e..5ed4b4f7452d 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -92,7 +92,8 @@ static int link_recv_changeover_msg(struct link **l_ptr, struct sk_buff **buf);
92static void link_set_supervision_props(struct link *l_ptr, u32 tolerance); 92static void link_set_supervision_props(struct link *l_ptr, u32 tolerance);
93static int link_send_sections_long(struct tipc_port *sender, 93static int link_send_sections_long(struct tipc_port *sender,
94 struct iovec const *msg_sect, 94 struct iovec const *msg_sect,
95 u32 num_sect, u32 destnode); 95 u32 num_sect, unsigned int total_len,
96 u32 destnode);
96static void link_check_defragm_bufs(struct link *l_ptr); 97static void link_check_defragm_bufs(struct link *l_ptr);
97static void link_state_event(struct link *l_ptr, u32 event); 98static void link_state_event(struct link *l_ptr, u32 event);
98static void link_reset_statistics(struct link *l_ptr); 99static void link_reset_statistics(struct link *l_ptr);
@@ -842,6 +843,25 @@ static void link_add_to_outqueue(struct link *l_ptr,
842 l_ptr->stats.max_queue_sz = l_ptr->out_queue_size; 843 l_ptr->stats.max_queue_sz = l_ptr->out_queue_size;
843} 844}
844 845
846static void link_add_chain_to_outqueue(struct link *l_ptr,
847 struct sk_buff *buf_chain,
848 u32 long_msgno)
849{
850 struct sk_buff *buf;
851 struct tipc_msg *msg;
852
853 if (!l_ptr->next_out)
854 l_ptr->next_out = buf_chain;
855 while (buf_chain) {
856 buf = buf_chain;
857 buf_chain = buf_chain->next;
858
859 msg = buf_msg(buf);
860 msg_set_long_msgno(msg, long_msgno);
861 link_add_to_outqueue(l_ptr, buf, msg);
862 }
863}
864
845/* 865/*
846 * tipc_link_send_buf() is the 'full path' for messages, called from 866 * tipc_link_send_buf() is the 'full path' for messages, called from
847 * inside TIPC when the 'fast path' in tipc_send_buf 867 * inside TIPC when the 'fast path' in tipc_send_buf
@@ -864,8 +884,9 @@ int tipc_link_send_buf(struct link *l_ptr, struct sk_buff *buf)
864 884
865 if (unlikely(queue_size >= queue_limit)) { 885 if (unlikely(queue_size >= queue_limit)) {
866 if (imp <= TIPC_CRITICAL_IMPORTANCE) { 886 if (imp <= TIPC_CRITICAL_IMPORTANCE) {
867 return link_schedule_port(l_ptr, msg_origport(msg), 887 link_schedule_port(l_ptr, msg_origport(msg), size);
868 size); 888 buf_discard(buf);
889 return -ELINKCONG;
869 } 890 }
870 buf_discard(buf); 891 buf_discard(buf);
871 if (imp > CONN_MANAGER) { 892 if (imp > CONN_MANAGER) {
@@ -1042,6 +1063,7 @@ int tipc_send_buf_fast(struct sk_buff *buf, u32 destnode)
1042int tipc_link_send_sections_fast(struct tipc_port *sender, 1063int tipc_link_send_sections_fast(struct tipc_port *sender,
1043 struct iovec const *msg_sect, 1064 struct iovec const *msg_sect,
1044 const u32 num_sect, 1065 const u32 num_sect,
1066 unsigned int total_len,
1045 u32 destaddr) 1067 u32 destaddr)
1046{ 1068{
1047 struct tipc_msg *hdr = &sender->phdr; 1069 struct tipc_msg *hdr = &sender->phdr;
@@ -1057,8 +1079,8 @@ again:
1057 * (Must not hold any locks while building message.) 1079 * (Must not hold any locks while building message.)
1058 */ 1080 */
1059 1081
1060 res = tipc_msg_build(hdr, msg_sect, num_sect, sender->max_pkt, 1082 res = tipc_msg_build(hdr, msg_sect, num_sect, total_len,
1061 !sender->user_port, &buf); 1083 sender->max_pkt, !sender->user_port, &buf);
1062 1084
1063 read_lock_bh(&tipc_net_lock); 1085 read_lock_bh(&tipc_net_lock);
1064 node = tipc_node_find(destaddr); 1086 node = tipc_node_find(destaddr);
@@ -1069,8 +1091,6 @@ again:
1069 if (likely(buf)) { 1091 if (likely(buf)) {
1070 res = link_send_buf_fast(l_ptr, buf, 1092 res = link_send_buf_fast(l_ptr, buf,
1071 &sender->max_pkt); 1093 &sender->max_pkt);
1072 if (unlikely(res < 0))
1073 buf_discard(buf);
1074exit: 1094exit:
1075 tipc_node_unlock(node); 1095 tipc_node_unlock(node);
1076 read_unlock_bh(&tipc_net_lock); 1096 read_unlock_bh(&tipc_net_lock);
@@ -1105,7 +1125,8 @@ exit:
1105 goto again; 1125 goto again;
1106 1126
1107 return link_send_sections_long(sender, msg_sect, 1127 return link_send_sections_long(sender, msg_sect,
1108 num_sect, destaddr); 1128 num_sect, total_len,
1129 destaddr);
1109 } 1130 }
1110 tipc_node_unlock(node); 1131 tipc_node_unlock(node);
1111 } 1132 }
@@ -1117,7 +1138,7 @@ exit:
1117 return tipc_reject_msg(buf, TIPC_ERR_NO_NODE); 1138 return tipc_reject_msg(buf, TIPC_ERR_NO_NODE);
1118 if (res >= 0) 1139 if (res >= 0)
1119 return tipc_port_reject_sections(sender, hdr, msg_sect, num_sect, 1140 return tipc_port_reject_sections(sender, hdr, msg_sect, num_sect,
1120 TIPC_ERR_NO_NODE); 1141 total_len, TIPC_ERR_NO_NODE);
1121 return res; 1142 return res;
1122} 1143}
1123 1144
@@ -1138,12 +1159,13 @@ exit:
1138static int link_send_sections_long(struct tipc_port *sender, 1159static int link_send_sections_long(struct tipc_port *sender,
1139 struct iovec const *msg_sect, 1160 struct iovec const *msg_sect,
1140 u32 num_sect, 1161 u32 num_sect,
1162 unsigned int total_len,
1141 u32 destaddr) 1163 u32 destaddr)
1142{ 1164{
1143 struct link *l_ptr; 1165 struct link *l_ptr;
1144 struct tipc_node *node; 1166 struct tipc_node *node;
1145 struct tipc_msg *hdr = &sender->phdr; 1167 struct tipc_msg *hdr = &sender->phdr;
1146 u32 dsz = msg_data_sz(hdr); 1168 u32 dsz = total_len;
1147 u32 max_pkt, fragm_sz, rest; 1169 u32 max_pkt, fragm_sz, rest;
1148 struct tipc_msg fragm_hdr; 1170 struct tipc_msg fragm_hdr;
1149 struct sk_buff *buf, *buf_chain, *prev; 1171 struct sk_buff *buf, *buf_chain, *prev;
@@ -1169,7 +1191,6 @@ again:
1169 1191
1170 tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT, 1192 tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
1171 INT_H_SIZE, msg_destnode(hdr)); 1193 INT_H_SIZE, msg_destnode(hdr));
1172 msg_set_link_selector(&fragm_hdr, sender->ref);
1173 msg_set_size(&fragm_hdr, max_pkt); 1194 msg_set_size(&fragm_hdr, max_pkt);
1174 msg_set_fragm_no(&fragm_hdr, 1); 1195 msg_set_fragm_no(&fragm_hdr, 1);
1175 1196
@@ -1271,28 +1292,15 @@ reject:
1271 buf_discard(buf_chain); 1292 buf_discard(buf_chain);
1272 } 1293 }
1273 return tipc_port_reject_sections(sender, hdr, msg_sect, num_sect, 1294 return tipc_port_reject_sections(sender, hdr, msg_sect, num_sect,
1274 TIPC_ERR_NO_NODE); 1295 total_len, TIPC_ERR_NO_NODE);
1275 } 1296 }
1276 1297
1277 /* Append whole chain to send queue: */ 1298 /* Append chain of fragments to send queue & send them */
1278 1299
1279 buf = buf_chain; 1300 l_ptr->long_msg_seq_no++;
1280 l_ptr->long_msg_seq_no = mod(l_ptr->long_msg_seq_no + 1); 1301 link_add_chain_to_outqueue(l_ptr, buf_chain, l_ptr->long_msg_seq_no);
1281 if (!l_ptr->next_out) 1302 l_ptr->stats.sent_fragments += fragm_no;
1282 l_ptr->next_out = buf_chain;
1283 l_ptr->stats.sent_fragmented++; 1303 l_ptr->stats.sent_fragmented++;
1284 while (buf) {
1285 struct sk_buff *next = buf->next;
1286 struct tipc_msg *msg = buf_msg(buf);
1287
1288 l_ptr->stats.sent_fragments++;
1289 msg_set_long_msgno(msg, l_ptr->long_msg_seq_no);
1290 link_add_to_outqueue(l_ptr, buf, msg);
1291 buf = next;
1292 }
1293
1294 /* Send it, if possible: */
1295
1296 tipc_link_push_queue(l_ptr); 1304 tipc_link_push_queue(l_ptr);
1297 tipc_node_unlock(node); 1305 tipc_node_unlock(node);
1298 return dsz; 1306 return dsz;
@@ -2407,6 +2415,8 @@ void tipc_link_recv_bundle(struct sk_buff *buf)
2407 */ 2415 */
2408static int link_send_long_buf(struct link *l_ptr, struct sk_buff *buf) 2416static int link_send_long_buf(struct link *l_ptr, struct sk_buff *buf)
2409{ 2417{
2418 struct sk_buff *buf_chain = NULL;
2419 struct sk_buff *buf_chain_tail = (struct sk_buff *)&buf_chain;
2410 struct tipc_msg *inmsg = buf_msg(buf); 2420 struct tipc_msg *inmsg = buf_msg(buf);
2411 struct tipc_msg fragm_hdr; 2421 struct tipc_msg fragm_hdr;
2412 u32 insize = msg_size(inmsg); 2422 u32 insize = msg_size(inmsg);
@@ -2415,7 +2425,7 @@ static int link_send_long_buf(struct link *l_ptr, struct sk_buff *buf)
2415 u32 rest = insize; 2425 u32 rest = insize;
2416 u32 pack_sz = l_ptr->max_pkt; 2426 u32 pack_sz = l_ptr->max_pkt;
2417 u32 fragm_sz = pack_sz - INT_H_SIZE; 2427 u32 fragm_sz = pack_sz - INT_H_SIZE;
2418 u32 fragm_no = 1; 2428 u32 fragm_no = 0;
2419 u32 destaddr; 2429 u32 destaddr;
2420 2430
2421 if (msg_short(inmsg)) 2431 if (msg_short(inmsg))
@@ -2427,10 +2437,6 @@ static int link_send_long_buf(struct link *l_ptr, struct sk_buff *buf)
2427 2437
2428 tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT, 2438 tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
2429 INT_H_SIZE, destaddr); 2439 INT_H_SIZE, destaddr);
2430 msg_set_link_selector(&fragm_hdr, msg_link_selector(inmsg));
2431 msg_set_long_msgno(&fragm_hdr, mod(l_ptr->long_msg_seq_no++));
2432 msg_set_fragm_no(&fragm_hdr, fragm_no);
2433 l_ptr->stats.sent_fragmented++;
2434 2440
2435 /* Chop up message: */ 2441 /* Chop up message: */
2436 2442
@@ -2443,27 +2449,37 @@ static int link_send_long_buf(struct link *l_ptr, struct sk_buff *buf)
2443 } 2449 }
2444 fragm = tipc_buf_acquire(fragm_sz + INT_H_SIZE); 2450 fragm = tipc_buf_acquire(fragm_sz + INT_H_SIZE);
2445 if (fragm == NULL) { 2451 if (fragm == NULL) {
2446 warn("Link unable to fragment message\n"); 2452 buf_discard(buf);
2447 dsz = -ENOMEM; 2453 while (buf_chain) {
2448 goto exit; 2454 buf = buf_chain;
2455 buf_chain = buf_chain->next;
2456 buf_discard(buf);
2457 }
2458 return -ENOMEM;
2449 } 2459 }
2450 msg_set_size(&fragm_hdr, fragm_sz + INT_H_SIZE); 2460 msg_set_size(&fragm_hdr, fragm_sz + INT_H_SIZE);
2461 fragm_no++;
2462 msg_set_fragm_no(&fragm_hdr, fragm_no);
2451 skb_copy_to_linear_data(fragm, &fragm_hdr, INT_H_SIZE); 2463 skb_copy_to_linear_data(fragm, &fragm_hdr, INT_H_SIZE);
2452 skb_copy_to_linear_data_offset(fragm, INT_H_SIZE, crs, 2464 skb_copy_to_linear_data_offset(fragm, INT_H_SIZE, crs,
2453 fragm_sz); 2465 fragm_sz);
2454 /* Send queued messages first, if any: */ 2466 buf_chain_tail->next = fragm;
2467 buf_chain_tail = fragm;
2455 2468
2456 l_ptr->stats.sent_fragments++;
2457 tipc_link_send_buf(l_ptr, fragm);
2458 if (!tipc_link_is_up(l_ptr))
2459 return dsz;
2460 msg_set_fragm_no(&fragm_hdr, ++fragm_no);
2461 rest -= fragm_sz; 2469 rest -= fragm_sz;
2462 crs += fragm_sz; 2470 crs += fragm_sz;
2463 msg_set_type(&fragm_hdr, FRAGMENT); 2471 msg_set_type(&fragm_hdr, FRAGMENT);
2464 } 2472 }
2465exit:
2466 buf_discard(buf); 2473 buf_discard(buf);
2474
2475 /* Append chain of fragments to send queue & send them */
2476
2477 l_ptr->long_msg_seq_no++;
2478 link_add_chain_to_outqueue(l_ptr, buf_chain, l_ptr->long_msg_seq_no);
2479 l_ptr->stats.sent_fragments += fragm_no;
2480 l_ptr->stats.sent_fragmented++;
2481 tipc_link_push_queue(l_ptr);
2482
2467 return dsz; 2483 return dsz;
2468} 2484}
2469 2485
diff --git a/net/tipc/link.h b/net/tipc/link.h
index e6a30dbe1aaa..74fbecab1ea0 100644
--- a/net/tipc/link.h
+++ b/net/tipc/link.h
@@ -228,6 +228,7 @@ u32 tipc_link_get_max_pkt(u32 dest, u32 selector);
228int tipc_link_send_sections_fast(struct tipc_port *sender, 228int tipc_link_send_sections_fast(struct tipc_port *sender,
229 struct iovec const *msg_sect, 229 struct iovec const *msg_sect,
230 const u32 num_sect, 230 const u32 num_sect,
231 unsigned int total_len,
231 u32 destnode); 232 u32 destnode);
232void tipc_link_recv_bundle(struct sk_buff *buf); 233void tipc_link_recv_bundle(struct sk_buff *buf);
233int tipc_link_recv_fragment(struct sk_buff **pending, 234int tipc_link_recv_fragment(struct sk_buff **pending,
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index 6d92d17e7fb5..03e57bf92c73 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -68,20 +68,6 @@ void tipc_msg_init(struct tipc_msg *m, u32 user, u32 type,
68} 68}
69 69
70/** 70/**
71 * tipc_msg_calc_data_size - determine total data size for message
72 */
73
74int tipc_msg_calc_data_size(struct iovec const *msg_sect, u32 num_sect)
75{
76 int dsz = 0;
77 int i;
78
79 for (i = 0; i < num_sect; i++)
80 dsz += msg_sect[i].iov_len;
81 return dsz;
82}
83
84/**
85 * tipc_msg_build - create message using specified header and data 71 * tipc_msg_build - create message using specified header and data
86 * 72 *
87 * Note: Caller must not hold any locks in case copy_from_user() is interrupted! 73 * Note: Caller must not hold any locks in case copy_from_user() is interrupted!
@@ -89,18 +75,13 @@ int tipc_msg_calc_data_size(struct iovec const *msg_sect, u32 num_sect)
89 * Returns message data size or errno 75 * Returns message data size or errno
90 */ 76 */
91 77
92int tipc_msg_build(struct tipc_msg *hdr, 78int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
93 struct iovec const *msg_sect, u32 num_sect, 79 u32 num_sect, unsigned int total_len,
94 int max_size, int usrmem, struct sk_buff **buf) 80 int max_size, int usrmem, struct sk_buff **buf)
95{ 81{
96 int dsz, sz, hsz, pos, res, cnt; 82 int dsz, sz, hsz, pos, res, cnt;
97 83
98 dsz = tipc_msg_calc_data_size(msg_sect, num_sect); 84 dsz = total_len;
99 if (unlikely(dsz > TIPC_MAX_USER_MSG_SIZE)) {
100 *buf = NULL;
101 return -EINVAL;
102 }
103
104 pos = hsz = msg_hdr_sz(hdr); 85 pos = hsz = msg_hdr_sz(hdr);
105 sz = hsz + dsz; 86 sz = hsz + dsz;
106 msg_set_size(hdr, sz); 87 msg_set_size(hdr, sz);
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index de02339fc175..8452454731fa 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -39,41 +39,24 @@
39 39
40#include "bearer.h" 40#include "bearer.h"
41 41
42/*
43 * Constants and routines used to read and write TIPC payload message headers
44 *
45 * Note: Some items are also used with TIPC internal message headers
46 */
47
42#define TIPC_VERSION 2 48#define TIPC_VERSION 2
43 49
44/* 50/*
45 * TIPC user data message header format, version 2: 51 * Payload message users are defined in TIPC's public API:
46 * 52 * - TIPC_LOW_IMPORTANCE
47 * 53 * - TIPC_MEDIUM_IMPORTANCE
48 * 1 0 9 8 7 6 5 4|3 2 1 0 9 8 7 6|5 4 3 2 1 0 9 8|7 6 5 4 3 2 1 0 54 * - TIPC_HIGH_IMPORTANCE
49 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 55 * - TIPC_CRITICAL_IMPORTANCE
50 * w0:|vers | user |hdr sz |n|d|s|-| message size | 56 */
51 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 57
52 * w1:|mstyp| error |rer cnt|lsc|opt p| broadcast ack no | 58/*
53 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 59 * Payload message types
54 * w2:| link level ack no | broadcast/link level seq no |
55 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
56 * w3:| previous node |
57 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
58 * w4:| originating port |
59 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
60 * w5:| destination port |
61 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
62 * w6:| originating node |
63 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
64 * w7:| destination node |
65 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
66 * w8:| name type / transport sequence number |
67 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
68 * w9:| name instance/multicast lower bound |
69 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
70 * wA:| multicast upper bound |
71 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
72 * / /
73 * \ options \
74 * / /
75 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
76 *
77 */ 60 */
78 61
79#define TIPC_CONN_MSG 0 62#define TIPC_CONN_MSG 0
@@ -81,6 +64,9 @@
81#define TIPC_NAMED_MSG 2 64#define TIPC_NAMED_MSG 2
82#define TIPC_DIRECT_MSG 3 65#define TIPC_DIRECT_MSG 3
83 66
67/*
68 * Message header sizes
69 */
84 70
85#define SHORT_H_SIZE 24 /* Connected, in-cluster messages */ 71#define SHORT_H_SIZE 24 /* Connected, in-cluster messages */
86#define DIR_MSG_H_SIZE 32 /* Directly addressed messages */ 72#define DIR_MSG_H_SIZE 32 /* Directly addressed messages */
@@ -473,40 +459,11 @@ static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m)
473 459
474 460
475/* 461/*
476 TIPC internal message header format, version 2 462 * Constants and routines used to read and write TIPC internal message headers
477 463 */
478 1 0 9 8 7 6 5 4|3 2 1 0 9 8 7 6|5 4 3 2 1 0 9 8|7 6 5 4 3 2 1 0
479 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
480 w0:|vers |msg usr|hdr sz |n|resrv| packet size |
481 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
482 w1:|m typ| sequence gap | broadcast ack no |
483 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
484 w2:| link level ack no/bc_gap_from | seq no / bcast_gap_to |
485 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
486 w3:| previous node |
487 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
488 w4:| next sent broadcast/fragm no | next sent pkt/ fragm msg no |
489 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
490 w5:| session no |rsv=0|r|berid|link prio|netpl|p|
491 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
492 w6:| originating node |
493 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
494 w7:| destination node |
495 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
496 w8:| transport sequence number |
497 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
498 w9:| msg count / bcast tag | link tolerance |
499 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
500 \ \
501 / User Specific Data /
502 \ \
503 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
504
505 NB: CONN_MANAGER use data message format. LINK_CONFIG has own format.
506*/
507 464
508/* 465/*
509 * Internal users 466 * Internal message users
510 */ 467 */
511 468
512#define BCAST_PROTOCOL 5 469#define BCAST_PROTOCOL 5
@@ -520,7 +477,7 @@ static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m)
520#define LINK_CONFIG 13 477#define LINK_CONFIG 13
521 478
522/* 479/*
523 * Connection management protocol messages 480 * Connection management protocol message types
524 */ 481 */
525 482
526#define CONN_PROBE 0 483#define CONN_PROBE 0
@@ -528,12 +485,41 @@ static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m)
528#define CONN_ACK 2 485#define CONN_ACK 2
529 486
530/* 487/*
531 * Name distributor messages 488 * Name distributor message types
532 */ 489 */
533 490
534#define PUBLICATION 0 491#define PUBLICATION 0
535#define WITHDRAWAL 1 492#define WITHDRAWAL 1
536 493
494/*
495 * Segmentation message types
496 */
497
498#define FIRST_FRAGMENT 0
499#define FRAGMENT 1
500#define LAST_FRAGMENT 2
501
502/*
503 * Link management protocol message types
504 */
505
506#define STATE_MSG 0
507#define RESET_MSG 1
508#define ACTIVATE_MSG 2
509
510/*
511 * Changeover tunnel message types
512 */
513#define DUPLICATE_MSG 0
514#define ORIGINAL_MSG 1
515
516/*
517 * Config protocol message types
518 */
519
520#define DSC_REQ_MSG 0
521#define DSC_RESP_MSG 1
522
537 523
538/* 524/*
539 * Word 1 525 * Word 1
@@ -761,50 +747,11 @@ static inline void msg_set_link_tolerance(struct tipc_msg *m, u32 n)
761 msg_set_bits(m, 9, 0, 0xffff, n); 747 msg_set_bits(m, 9, 0, 0xffff, n);
762} 748}
763 749
764/*
765 * Segmentation message types
766 */
767
768#define FIRST_FRAGMENT 0
769#define FRAGMENT 1
770#define LAST_FRAGMENT 2
771
772/*
773 * Link management protocol message types
774 */
775
776#define STATE_MSG 0
777#define RESET_MSG 1
778#define ACTIVATE_MSG 2
779
780/*
781 * Changeover tunnel message types
782 */
783#define DUPLICATE_MSG 0
784#define ORIGINAL_MSG 1
785
786/*
787 * Routing table message types
788 */
789#define EXT_ROUTING_TABLE 0
790#define LOCAL_ROUTING_TABLE 1 /* obsoleted */
791#define SLAVE_ROUTING_TABLE 2
792#define ROUTE_ADDITION 3
793#define ROUTE_REMOVAL 4
794
795/*
796 * Config protocol message types
797 */
798
799#define DSC_REQ_MSG 0
800#define DSC_RESP_MSG 1
801
802u32 tipc_msg_tot_importance(struct tipc_msg *m); 750u32 tipc_msg_tot_importance(struct tipc_msg *m);
803void tipc_msg_init(struct tipc_msg *m, u32 user, u32 type, 751void tipc_msg_init(struct tipc_msg *m, u32 user, u32 type,
804 u32 hsize, u32 destnode); 752 u32 hsize, u32 destnode);
805int tipc_msg_calc_data_size(struct iovec const *msg_sect, u32 num_sect); 753int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
806int tipc_msg_build(struct tipc_msg *hdr, 754 u32 num_sect, unsigned int total_len,
807 struct iovec const *msg_sect, u32 num_sect,
808 int max_size, int usrmem, struct sk_buff **buf); 755 int max_size, int usrmem, struct sk_buff **buf);
809 756
810static inline void msg_set_media_addr(struct tipc_msg *m, struct tipc_media_addr *a) 757static inline void msg_set_media_addr(struct tipc_msg *m, struct tipc_media_addr *a)
diff --git a/net/tipc/port.c b/net/tipc/port.c
index 6ff78f9c7d65..c68dc956a423 100644
--- a/net/tipc/port.c
+++ b/net/tipc/port.c
@@ -74,7 +74,8 @@ static u32 port_peerport(struct tipc_port *p_ptr)
74 */ 74 */
75 75
76int tipc_multicast(u32 ref, struct tipc_name_seq const *seq, 76int tipc_multicast(u32 ref, struct tipc_name_seq const *seq,
77 u32 num_sect, struct iovec const *msg_sect) 77 u32 num_sect, struct iovec const *msg_sect,
78 unsigned int total_len)
78{ 79{
79 struct tipc_msg *hdr; 80 struct tipc_msg *hdr;
80 struct sk_buff *buf; 81 struct sk_buff *buf;
@@ -91,11 +92,14 @@ int tipc_multicast(u32 ref, struct tipc_name_seq const *seq,
91 92
92 hdr = &oport->phdr; 93 hdr = &oport->phdr;
93 msg_set_type(hdr, TIPC_MCAST_MSG); 94 msg_set_type(hdr, TIPC_MCAST_MSG);
95 msg_set_lookup_scope(hdr, TIPC_CLUSTER_SCOPE);
96 msg_set_destport(hdr, 0);
97 msg_set_destnode(hdr, 0);
94 msg_set_nametype(hdr, seq->type); 98 msg_set_nametype(hdr, seq->type);
95 msg_set_namelower(hdr, seq->lower); 99 msg_set_namelower(hdr, seq->lower);
96 msg_set_nameupper(hdr, seq->upper); 100 msg_set_nameupper(hdr, seq->upper);
97 msg_set_hdr_sz(hdr, MCAST_H_SIZE); 101 msg_set_hdr_sz(hdr, MCAST_H_SIZE);
98 res = tipc_msg_build(hdr, msg_sect, num_sect, MAX_MSG_SIZE, 102 res = tipc_msg_build(hdr, msg_sect, num_sect, total_len, MAX_MSG_SIZE,
99 !oport->user_port, &buf); 103 !oport->user_port, &buf);
100 if (unlikely(!buf)) 104 if (unlikely(!buf))
101 return res; 105 return res;
@@ -161,6 +165,7 @@ void tipc_port_recv_mcast(struct sk_buff *buf, struct port_list *dp)
161 /* Deliver a copy of message to each destination port */ 165 /* Deliver a copy of message to each destination port */
162 166
163 if (dp->count != 0) { 167 if (dp->count != 0) {
168 msg_set_destnode(msg, tipc_own_addr);
164 if (dp->count == 1) { 169 if (dp->count == 1) {
165 msg_set_destport(msg, dp->ports[0]); 170 msg_set_destport(msg, dp->ports[0]);
166 tipc_port_recv_msg(buf); 171 tipc_port_recv_msg(buf);
@@ -414,12 +419,12 @@ int tipc_reject_msg(struct sk_buff *buf, u32 err)
414 419
415int tipc_port_reject_sections(struct tipc_port *p_ptr, struct tipc_msg *hdr, 420int tipc_port_reject_sections(struct tipc_port *p_ptr, struct tipc_msg *hdr,
416 struct iovec const *msg_sect, u32 num_sect, 421 struct iovec const *msg_sect, u32 num_sect,
417 int err) 422 unsigned int total_len, int err)
418{ 423{
419 struct sk_buff *buf; 424 struct sk_buff *buf;
420 int res; 425 int res;
421 426
422 res = tipc_msg_build(hdr, msg_sect, num_sect, MAX_MSG_SIZE, 427 res = tipc_msg_build(hdr, msg_sect, num_sect, total_len, MAX_MSG_SIZE,
423 !p_ptr->user_port, &buf); 428 !p_ptr->user_port, &buf);
424 if (!buf) 429 if (!buf)
425 return res; 430 return res;
@@ -1065,6 +1070,7 @@ int tipc_connect2port(u32 ref, struct tipc_portid const *peer)
1065 msg_set_orignode(msg, tipc_own_addr); 1070 msg_set_orignode(msg, tipc_own_addr);
1066 msg_set_origport(msg, p_ptr->ref); 1071 msg_set_origport(msg, p_ptr->ref);
1067 msg_set_type(msg, TIPC_CONN_MSG); 1072 msg_set_type(msg, TIPC_CONN_MSG);
1073 msg_set_lookup_scope(msg, 0);
1068 msg_set_hdr_sz(msg, SHORT_H_SIZE); 1074 msg_set_hdr_sz(msg, SHORT_H_SIZE);
1069 1075
1070 p_ptr->probing_interval = PROBING_INTERVAL; 1076 p_ptr->probing_interval = PROBING_INTERVAL;
@@ -1158,12 +1164,13 @@ int tipc_shutdown(u32 ref)
1158 */ 1164 */
1159 1165
1160static int tipc_port_recv_sections(struct tipc_port *sender, unsigned int num_sect, 1166static int tipc_port_recv_sections(struct tipc_port *sender, unsigned int num_sect,
1161 struct iovec const *msg_sect) 1167 struct iovec const *msg_sect,
1168 unsigned int total_len)
1162{ 1169{
1163 struct sk_buff *buf; 1170 struct sk_buff *buf;
1164 int res; 1171 int res;
1165 1172
1166 res = tipc_msg_build(&sender->phdr, msg_sect, num_sect, 1173 res = tipc_msg_build(&sender->phdr, msg_sect, num_sect, total_len,
1167 MAX_MSG_SIZE, !sender->user_port, &buf); 1174 MAX_MSG_SIZE, !sender->user_port, &buf);
1168 if (likely(buf)) 1175 if (likely(buf))
1169 tipc_port_recv_msg(buf); 1176 tipc_port_recv_msg(buf);
@@ -1174,7 +1181,8 @@ static int tipc_port_recv_sections(struct tipc_port *sender, unsigned int num_se
1174 * tipc_send - send message sections on connection 1181 * tipc_send - send message sections on connection
1175 */ 1182 */
1176 1183
1177int tipc_send(u32 ref, unsigned int num_sect, struct iovec const *msg_sect) 1184int tipc_send(u32 ref, unsigned int num_sect, struct iovec const *msg_sect,
1185 unsigned int total_len)
1178{ 1186{
1179 struct tipc_port *p_ptr; 1187 struct tipc_port *p_ptr;
1180 u32 destnode; 1188 u32 destnode;
@@ -1189,9 +1197,10 @@ int tipc_send(u32 ref, unsigned int num_sect, struct iovec const *msg_sect)
1189 destnode = port_peernode(p_ptr); 1197 destnode = port_peernode(p_ptr);
1190 if (likely(destnode != tipc_own_addr)) 1198 if (likely(destnode != tipc_own_addr))
1191 res = tipc_link_send_sections_fast(p_ptr, msg_sect, num_sect, 1199 res = tipc_link_send_sections_fast(p_ptr, msg_sect, num_sect,
1192 destnode); 1200 total_len, destnode);
1193 else 1201 else
1194 res = tipc_port_recv_sections(p_ptr, num_sect, msg_sect); 1202 res = tipc_port_recv_sections(p_ptr, num_sect, msg_sect,
1203 total_len);
1195 1204
1196 if (likely(res != -ELINKCONG)) { 1205 if (likely(res != -ELINKCONG)) {
1197 p_ptr->congested = 0; 1206 p_ptr->congested = 0;
@@ -1202,8 +1211,7 @@ int tipc_send(u32 ref, unsigned int num_sect, struct iovec const *msg_sect)
1202 } 1211 }
1203 if (port_unreliable(p_ptr)) { 1212 if (port_unreliable(p_ptr)) {
1204 p_ptr->congested = 0; 1213 p_ptr->congested = 0;
1205 /* Just calculate msg length and return */ 1214 return total_len;
1206 return tipc_msg_calc_data_size(msg_sect, num_sect);
1207 } 1215 }
1208 return -ELINKCONG; 1216 return -ELINKCONG;
1209} 1217}
@@ -1213,7 +1221,8 @@ int tipc_send(u32 ref, unsigned int num_sect, struct iovec const *msg_sect)
1213 */ 1221 */
1214 1222
1215int tipc_send2name(u32 ref, struct tipc_name const *name, unsigned int domain, 1223int tipc_send2name(u32 ref, struct tipc_name const *name, unsigned int domain,
1216 unsigned int num_sect, struct iovec const *msg_sect) 1224 unsigned int num_sect, struct iovec const *msg_sect,
1225 unsigned int total_len)
1217{ 1226{
1218 struct tipc_port *p_ptr; 1227 struct tipc_port *p_ptr;
1219 struct tipc_msg *msg; 1228 struct tipc_msg *msg;
@@ -1240,23 +1249,23 @@ int tipc_send2name(u32 ref, struct tipc_name const *name, unsigned int domain,
1240 if (likely(destport)) { 1249 if (likely(destport)) {
1241 if (likely(destnode == tipc_own_addr)) 1250 if (likely(destnode == tipc_own_addr))
1242 res = tipc_port_recv_sections(p_ptr, num_sect, 1251 res = tipc_port_recv_sections(p_ptr, num_sect,
1243 msg_sect); 1252 msg_sect, total_len);
1244 else 1253 else
1245 res = tipc_link_send_sections_fast(p_ptr, msg_sect, 1254 res = tipc_link_send_sections_fast(p_ptr, msg_sect,
1246 num_sect, destnode); 1255 num_sect, total_len,
1256 destnode);
1247 if (likely(res != -ELINKCONG)) { 1257 if (likely(res != -ELINKCONG)) {
1248 if (res > 0) 1258 if (res > 0)
1249 p_ptr->sent++; 1259 p_ptr->sent++;
1250 return res; 1260 return res;
1251 } 1261 }
1252 if (port_unreliable(p_ptr)) { 1262 if (port_unreliable(p_ptr)) {
1253 /* Just calculate msg length and return */ 1263 return total_len;
1254 return tipc_msg_calc_data_size(msg_sect, num_sect);
1255 } 1264 }
1256 return -ELINKCONG; 1265 return -ELINKCONG;
1257 } 1266 }
1258 return tipc_port_reject_sections(p_ptr, msg, msg_sect, num_sect, 1267 return tipc_port_reject_sections(p_ptr, msg, msg_sect, num_sect,
1259 TIPC_ERR_NO_NAME); 1268 total_len, TIPC_ERR_NO_NAME);
1260} 1269}
1261 1270
1262/** 1271/**
@@ -1264,7 +1273,8 @@ int tipc_send2name(u32 ref, struct tipc_name const *name, unsigned int domain,
1264 */ 1273 */
1265 1274
1266int tipc_send2port(u32 ref, struct tipc_portid const *dest, 1275int tipc_send2port(u32 ref, struct tipc_portid const *dest,
1267 unsigned int num_sect, struct iovec const *msg_sect) 1276 unsigned int num_sect, struct iovec const *msg_sect,
1277 unsigned int total_len)
1268{ 1278{
1269 struct tipc_port *p_ptr; 1279 struct tipc_port *p_ptr;
1270 struct tipc_msg *msg; 1280 struct tipc_msg *msg;
@@ -1276,6 +1286,7 @@ int tipc_send2port(u32 ref, struct tipc_portid const *dest,
1276 1286
1277 msg = &p_ptr->phdr; 1287 msg = &p_ptr->phdr;
1278 msg_set_type(msg, TIPC_DIRECT_MSG); 1288 msg_set_type(msg, TIPC_DIRECT_MSG);
1289 msg_set_lookup_scope(msg, 0);
1279 msg_set_orignode(msg, tipc_own_addr); 1290 msg_set_orignode(msg, tipc_own_addr);
1280 msg_set_origport(msg, ref); 1291 msg_set_origport(msg, ref);
1281 msg_set_destnode(msg, dest->node); 1292 msg_set_destnode(msg, dest->node);
@@ -1283,18 +1294,18 @@ int tipc_send2port(u32 ref, struct tipc_portid const *dest,
1283 msg_set_hdr_sz(msg, DIR_MSG_H_SIZE); 1294 msg_set_hdr_sz(msg, DIR_MSG_H_SIZE);
1284 1295
1285 if (dest->node == tipc_own_addr) 1296 if (dest->node == tipc_own_addr)
1286 res = tipc_port_recv_sections(p_ptr, num_sect, msg_sect); 1297 res = tipc_port_recv_sections(p_ptr, num_sect, msg_sect,
1298 total_len);
1287 else 1299 else
1288 res = tipc_link_send_sections_fast(p_ptr, msg_sect, num_sect, 1300 res = tipc_link_send_sections_fast(p_ptr, msg_sect, num_sect,
1289 dest->node); 1301 total_len, dest->node);
1290 if (likely(res != -ELINKCONG)) { 1302 if (likely(res != -ELINKCONG)) {
1291 if (res > 0) 1303 if (res > 0)
1292 p_ptr->sent++; 1304 p_ptr->sent++;
1293 return res; 1305 return res;
1294 } 1306 }
1295 if (port_unreliable(p_ptr)) { 1307 if (port_unreliable(p_ptr)) {
1296 /* Just calculate msg length and return */ 1308 return total_len;
1297 return tipc_msg_calc_data_size(msg_sect, num_sect);
1298 } 1309 }
1299 return -ELINKCONG; 1310 return -ELINKCONG;
1300} 1311}
diff --git a/net/tipc/port.h b/net/tipc/port.h
index 87b9424ae0ec..b9aa34195aec 100644
--- a/net/tipc/port.h
+++ b/net/tipc/port.h
@@ -205,23 +205,27 @@ int tipc_disconnect_port(struct tipc_port *tp_ptr);
205/* 205/*
206 * TIPC messaging routines 206 * TIPC messaging routines
207 */ 207 */
208int tipc_send(u32 portref, unsigned int num_sect, struct iovec const *msg_sect); 208int tipc_send(u32 portref, unsigned int num_sect, struct iovec const *msg_sect,
209 unsigned int total_len);
209 210
210int tipc_send2name(u32 portref, struct tipc_name const *name, u32 domain, 211int tipc_send2name(u32 portref, struct tipc_name const *name, u32 domain,
211 unsigned int num_sect, struct iovec const *msg_sect); 212 unsigned int num_sect, struct iovec const *msg_sect,
213 unsigned int total_len);
212 214
213int tipc_send2port(u32 portref, struct tipc_portid const *dest, 215int tipc_send2port(u32 portref, struct tipc_portid const *dest,
214 unsigned int num_sect, struct iovec const *msg_sect); 216 unsigned int num_sect, struct iovec const *msg_sect,
217 unsigned int total_len);
215 218
216int tipc_send_buf2port(u32 portref, struct tipc_portid const *dest, 219int tipc_send_buf2port(u32 portref, struct tipc_portid const *dest,
217 struct sk_buff *buf, unsigned int dsz); 220 struct sk_buff *buf, unsigned int dsz);
218 221
219int tipc_multicast(u32 portref, struct tipc_name_seq const *seq, 222int tipc_multicast(u32 portref, struct tipc_name_seq const *seq,
220 unsigned int section_count, struct iovec const *msg); 223 unsigned int section_count, struct iovec const *msg,
224 unsigned int total_len);
221 225
222int tipc_port_reject_sections(struct tipc_port *p_ptr, struct tipc_msg *hdr, 226int tipc_port_reject_sections(struct tipc_port *p_ptr, struct tipc_msg *hdr,
223 struct iovec const *msg_sect, u32 num_sect, 227 struct iovec const *msg_sect, u32 num_sect,
224 int err); 228 unsigned int total_len, int err);
225struct sk_buff *tipc_port_get_ports(void); 229struct sk_buff *tipc_port_get_ports(void);
226void tipc_port_recv_proto_msg(struct sk_buff *buf); 230void tipc_port_recv_proto_msg(struct sk_buff *buf);
227void tipc_port_recv_mcast(struct sk_buff *buf, struct port_list *dp); 231void tipc_port_recv_mcast(struct sk_buff *buf, struct port_list *dp);
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 29d94d53198d..338837396642 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -535,6 +535,9 @@ static int send_msg(struct kiocb *iocb, struct socket *sock,
535 if (unlikely((m->msg_namelen < sizeof(*dest)) || 535 if (unlikely((m->msg_namelen < sizeof(*dest)) ||
536 (dest->family != AF_TIPC))) 536 (dest->family != AF_TIPC)))
537 return -EINVAL; 537 return -EINVAL;
538 if ((total_len > TIPC_MAX_USER_MSG_SIZE) ||
539 (m->msg_iovlen > (unsigned)INT_MAX))
540 return -EMSGSIZE;
538 541
539 if (iocb) 542 if (iocb)
540 lock_sock(sk); 543 lock_sock(sk);
@@ -573,12 +576,14 @@ static int send_msg(struct kiocb *iocb, struct socket *sock,
573 &dest->addr.name.name, 576 &dest->addr.name.name,
574 dest->addr.name.domain, 577 dest->addr.name.domain,
575 m->msg_iovlen, 578 m->msg_iovlen,
576 m->msg_iov); 579 m->msg_iov,
580 total_len);
577 } else if (dest->addrtype == TIPC_ADDR_ID) { 581 } else if (dest->addrtype == TIPC_ADDR_ID) {
578 res = tipc_send2port(tport->ref, 582 res = tipc_send2port(tport->ref,
579 &dest->addr.id, 583 &dest->addr.id,
580 m->msg_iovlen, 584 m->msg_iovlen,
581 m->msg_iov); 585 m->msg_iov,
586 total_len);
582 } else if (dest->addrtype == TIPC_ADDR_MCAST) { 587 } else if (dest->addrtype == TIPC_ADDR_MCAST) {
583 if (needs_conn) { 588 if (needs_conn) {
584 res = -EOPNOTSUPP; 589 res = -EOPNOTSUPP;
@@ -590,7 +595,8 @@ static int send_msg(struct kiocb *iocb, struct socket *sock,
590 res = tipc_multicast(tport->ref, 595 res = tipc_multicast(tport->ref,
591 &dest->addr.nameseq, 596 &dest->addr.nameseq,
592 m->msg_iovlen, 597 m->msg_iovlen,
593 m->msg_iov); 598 m->msg_iov,
599 total_len);
594 } 600 }
595 if (likely(res != -ELINKCONG)) { 601 if (likely(res != -ELINKCONG)) {
596 if (needs_conn && (res >= 0)) 602 if (needs_conn && (res >= 0))
@@ -640,6 +646,10 @@ static int send_packet(struct kiocb *iocb, struct socket *sock,
640 if (unlikely(dest)) 646 if (unlikely(dest))
641 return send_msg(iocb, sock, m, total_len); 647 return send_msg(iocb, sock, m, total_len);
642 648
649 if ((total_len > TIPC_MAX_USER_MSG_SIZE) ||
650 (m->msg_iovlen > (unsigned)INT_MAX))
651 return -EMSGSIZE;
652
643 if (iocb) 653 if (iocb)
644 lock_sock(sk); 654 lock_sock(sk);
645 655
@@ -652,7 +662,8 @@ static int send_packet(struct kiocb *iocb, struct socket *sock,
652 break; 662 break;
653 } 663 }
654 664
655 res = tipc_send(tport->ref, m->msg_iovlen, m->msg_iov); 665 res = tipc_send(tport->ref, m->msg_iovlen, m->msg_iov,
666 total_len);
656 if (likely(res != -ELINKCONG)) 667 if (likely(res != -ELINKCONG))
657 break; 668 break;
658 if (m->msg_flags & MSG_DONTWAIT) { 669 if (m->msg_flags & MSG_DONTWAIT) {
@@ -723,6 +734,12 @@ static int send_stream(struct kiocb *iocb, struct socket *sock,
723 goto exit; 734 goto exit;
724 } 735 }
725 736
737 if ((total_len > (unsigned)INT_MAX) ||
738 (m->msg_iovlen > (unsigned)INT_MAX)) {
739 res = -EMSGSIZE;
740 goto exit;
741 }
742
726 /* 743 /*
727 * Send each iovec entry using one or more messages 744 * Send each iovec entry using one or more messages
728 * 745 *
@@ -753,7 +770,7 @@ static int send_stream(struct kiocb *iocb, struct socket *sock,
753 bytes_to_send = curr_left; 770 bytes_to_send = curr_left;
754 my_iov.iov_base = curr_start; 771 my_iov.iov_base = curr_start;
755 my_iov.iov_len = bytes_to_send; 772 my_iov.iov_len = bytes_to_send;
756 res = send_packet(NULL, sock, &my_msg, 0); 773 res = send_packet(NULL, sock, &my_msg, bytes_to_send);
757 if (res < 0) { 774 if (res < 0) {
758 if (bytes_sent) 775 if (bytes_sent)
759 res = bytes_sent; 776 res = bytes_sent;
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index aae9eae13404..6cf726863485 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -109,7 +109,7 @@ static void subscr_send_event(struct subscription *sub,
109 sub->evt.found_upper = htohl(found_upper, sub->swap); 109 sub->evt.found_upper = htohl(found_upper, sub->swap);
110 sub->evt.port.ref = htohl(port_ref, sub->swap); 110 sub->evt.port.ref = htohl(port_ref, sub->swap);
111 sub->evt.port.node = htohl(node, sub->swap); 111 sub->evt.port.node = htohl(node, sub->swap);
112 tipc_send(sub->server_ref, 1, &msg_sect); 112 tipc_send(sub->server_ref, 1, &msg_sect, msg_sect.iov_len);
113} 113}
114 114
115/** 115/**
@@ -521,7 +521,7 @@ static void subscr_named_msg_event(void *usr_handle,
521 521
522 /* Send an ACK- to complete connection handshaking */ 522 /* Send an ACK- to complete connection handshaking */
523 523
524 tipc_send(server_port_ref, 0, NULL); 524 tipc_send(server_port_ref, 0, NULL, 0);
525 525
526 /* Handle optional subscription request */ 526 /* Handle optional subscription request */
527 527
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index b1d75beb7e20..0722a25a3a33 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -2254,7 +2254,7 @@ static int unix_seq_show(struct seq_file *seq, void *v)
2254 struct unix_sock *u = unix_sk(s); 2254 struct unix_sock *u = unix_sk(s);
2255 unix_state_lock(s); 2255 unix_state_lock(s);
2256 2256
2257 seq_printf(seq, "%p: %08X %08X %08X %04X %02X %5lu", 2257 seq_printf(seq, "%pK: %08X %08X %08X %04X %02X %5lu",
2258 s, 2258 s,
2259 atomic_read(&s->sk_refcnt), 2259 atomic_read(&s->sk_refcnt),
2260 0, 2260 0,
diff --git a/net/wireless/core.c b/net/wireless/core.c
index fe01de29bfe8..880dbe2e6f94 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -46,6 +46,11 @@ static struct dentry *ieee80211_debugfs_dir;
46/* for the cleanup, scan and event works */ 46/* for the cleanup, scan and event works */
47struct workqueue_struct *cfg80211_wq; 47struct workqueue_struct *cfg80211_wq;
48 48
49static bool cfg80211_disable_40mhz_24ghz;
50module_param(cfg80211_disable_40mhz_24ghz, bool, 0644);
51MODULE_PARM_DESC(cfg80211_disable_40mhz_24ghz,
52 "Disable 40MHz support in the 2.4GHz band");
53
49/* requires cfg80211_mutex to be held! */ 54/* requires cfg80211_mutex to be held! */
50struct cfg80211_registered_device *cfg80211_rdev_by_wiphy_idx(int wiphy_idx) 55struct cfg80211_registered_device *cfg80211_rdev_by_wiphy_idx(int wiphy_idx)
51{ 56{
@@ -361,11 +366,12 @@ struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv)
361 366
362 mutex_init(&rdev->mtx); 367 mutex_init(&rdev->mtx);
363 mutex_init(&rdev->devlist_mtx); 368 mutex_init(&rdev->devlist_mtx);
369 mutex_init(&rdev->sched_scan_mtx);
364 INIT_LIST_HEAD(&rdev->netdev_list); 370 INIT_LIST_HEAD(&rdev->netdev_list);
365 spin_lock_init(&rdev->bss_lock); 371 spin_lock_init(&rdev->bss_lock);
366 INIT_LIST_HEAD(&rdev->bss_list); 372 INIT_LIST_HEAD(&rdev->bss_list);
367 INIT_WORK(&rdev->scan_done_wk, __cfg80211_scan_done); 373 INIT_WORK(&rdev->scan_done_wk, __cfg80211_scan_done);
368 374 INIT_WORK(&rdev->sched_scan_results_wk, __cfg80211_sched_scan_results);
369#ifdef CONFIG_CFG80211_WEXT 375#ifdef CONFIG_CFG80211_WEXT
370 rdev->wiphy.wext = &cfg80211_wext_handler; 376 rdev->wiphy.wext = &cfg80211_wext_handler;
371#endif 377#endif
@@ -411,6 +417,67 @@ struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv)
411} 417}
412EXPORT_SYMBOL(wiphy_new); 418EXPORT_SYMBOL(wiphy_new);
413 419
420static int wiphy_verify_combinations(struct wiphy *wiphy)
421{
422 const struct ieee80211_iface_combination *c;
423 int i, j;
424
425 /* If we have combinations enforce them */
426 if (wiphy->n_iface_combinations)
427 wiphy->flags |= WIPHY_FLAG_ENFORCE_COMBINATIONS;
428
429 for (i = 0; i < wiphy->n_iface_combinations; i++) {
430 u32 cnt = 0;
431 u16 all_iftypes = 0;
432
433 c = &wiphy->iface_combinations[i];
434
435 /* Combinations with just one interface aren't real */
436 if (WARN_ON(c->max_interfaces < 2))
437 return -EINVAL;
438
439 /* Need at least one channel */
440 if (WARN_ON(!c->num_different_channels))
441 return -EINVAL;
442
443 if (WARN_ON(!c->n_limits))
444 return -EINVAL;
445
446 for (j = 0; j < c->n_limits; j++) {
447 u16 types = c->limits[j].types;
448
449 /*
450 * interface types shouldn't overlap, this is
451 * used in cfg80211_can_change_interface()
452 */
453 if (WARN_ON(types & all_iftypes))
454 return -EINVAL;
455 all_iftypes |= types;
456
457 if (WARN_ON(!c->limits[j].max))
458 return -EINVAL;
459
460 /* Shouldn't list software iftypes in combinations! */
461 if (WARN_ON(wiphy->software_iftypes & types))
462 return -EINVAL;
463
464 cnt += c->limits[j].max;
465 /*
466 * Don't advertise an unsupported type
467 * in a combination.
468 */
469 if (WARN_ON((wiphy->interface_modes & types) != types))
470 return -EINVAL;
471 }
472
473 /* You can't even choose that many! */
474 if (WARN_ON(cnt < c->max_interfaces))
475 return -EINVAL;
476 }
477
478 return 0;
479}
480
414int wiphy_register(struct wiphy *wiphy) 481int wiphy_register(struct wiphy *wiphy)
415{ 482{
416 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 483 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
@@ -439,6 +506,10 @@ int wiphy_register(struct wiphy *wiphy)
439 if (WARN_ON(ifmodes != wiphy->interface_modes)) 506 if (WARN_ON(ifmodes != wiphy->interface_modes))
440 wiphy->interface_modes = ifmodes; 507 wiphy->interface_modes = ifmodes;
441 508
509 res = wiphy_verify_combinations(wiphy);
510 if (res)
511 return res;
512
442 /* sanity check supported bands/channels */ 513 /* sanity check supported bands/channels */
443 for (band = 0; band < IEEE80211_NUM_BANDS; band++) { 514 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
444 sband = wiphy->bands[band]; 515 sband = wiphy->bands[band];
@@ -451,6 +522,18 @@ int wiphy_register(struct wiphy *wiphy)
451 return -EINVAL; 522 return -EINVAL;
452 523
453 /* 524 /*
525 * Since cfg80211_disable_40mhz_24ghz is global, we can
526 * modify the sband's ht data even if the driver uses a
527 * global structure for that.
528 */
529 if (cfg80211_disable_40mhz_24ghz &&
530 band == IEEE80211_BAND_2GHZ &&
531 sband->ht_cap.ht_supported) {
532 sband->ht_cap.cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
533 sband->ht_cap.cap &= ~IEEE80211_HT_CAP_SGI_40;
534 }
535
536 /*
454 * Since we use a u32 for rate bitmaps in 537 * Since we use a u32 for rate bitmaps in
455 * ieee80211_get_response_rate, we cannot 538 * ieee80211_get_response_rate, we cannot
456 * have more than 32 legacy rates. 539 * have more than 32 legacy rates.
@@ -476,6 +559,13 @@ int wiphy_register(struct wiphy *wiphy)
476 return -EINVAL; 559 return -EINVAL;
477 } 560 }
478 561
562 if (rdev->wiphy.wowlan.n_patterns) {
563 if (WARN_ON(!rdev->wiphy.wowlan.pattern_min_len ||
564 rdev->wiphy.wowlan.pattern_min_len >
565 rdev->wiphy.wowlan.pattern_max_len))
566 return -EINVAL;
567 }
568
479 /* check and set up bitrates */ 569 /* check and set up bitrates */
480 ieee80211_set_bitrate_flags(wiphy); 570 ieee80211_set_bitrate_flags(wiphy);
481 571
@@ -612,8 +702,10 @@ void cfg80211_dev_free(struct cfg80211_registered_device *rdev)
612 rfkill_destroy(rdev->rfkill); 702 rfkill_destroy(rdev->rfkill);
613 mutex_destroy(&rdev->mtx); 703 mutex_destroy(&rdev->mtx);
614 mutex_destroy(&rdev->devlist_mtx); 704 mutex_destroy(&rdev->devlist_mtx);
705 mutex_destroy(&rdev->sched_scan_mtx);
615 list_for_each_entry_safe(scan, tmp, &rdev->bss_list, list) 706 list_for_each_entry_safe(scan, tmp, &rdev->bss_list, list)
616 cfg80211_put_bss(&scan->pub); 707 cfg80211_put_bss(&scan->pub);
708 cfg80211_rdev_free_wowlan(rdev);
617 kfree(rdev); 709 kfree(rdev);
618} 710}
619 711
@@ -649,6 +741,15 @@ static void wdev_cleanup_work(struct work_struct *work)
649 741
650 cfg80211_unlock_rdev(rdev); 742 cfg80211_unlock_rdev(rdev);
651 743
744 mutex_lock(&rdev->sched_scan_mtx);
745
746 if (WARN_ON(rdev->sched_scan_req &&
747 rdev->sched_scan_req->dev == wdev->netdev)) {
748 __cfg80211_stop_sched_scan(rdev, false);
749 }
750
751 mutex_unlock(&rdev->sched_scan_mtx);
752
652 mutex_lock(&rdev->devlist_mtx); 753 mutex_lock(&rdev->devlist_mtx);
653 rdev->opencount--; 754 rdev->opencount--;
654 mutex_unlock(&rdev->devlist_mtx); 755 mutex_unlock(&rdev->devlist_mtx);
@@ -668,6 +769,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
668 struct net_device *dev = ndev; 769 struct net_device *dev = ndev;
669 struct wireless_dev *wdev = dev->ieee80211_ptr; 770 struct wireless_dev *wdev = dev->ieee80211_ptr;
670 struct cfg80211_registered_device *rdev; 771 struct cfg80211_registered_device *rdev;
772 int ret;
671 773
672 if (!wdev) 774 if (!wdev)
673 return NOTIFY_DONE; 775 return NOTIFY_DONE;
@@ -734,6 +836,10 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
734 break; 836 break;
735 case NL80211_IFTYPE_P2P_CLIENT: 837 case NL80211_IFTYPE_P2P_CLIENT:
736 case NL80211_IFTYPE_STATION: 838 case NL80211_IFTYPE_STATION:
839 mutex_lock(&rdev->sched_scan_mtx);
840 __cfg80211_stop_sched_scan(rdev, false);
841 mutex_unlock(&rdev->sched_scan_mtx);
842
737 wdev_lock(wdev); 843 wdev_lock(wdev);
738#ifdef CONFIG_CFG80211_WEXT 844#ifdef CONFIG_CFG80211_WEXT
739 kfree(wdev->wext.ie); 845 kfree(wdev->wext.ie);
@@ -752,6 +858,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
752 default: 858 default:
753 break; 859 break;
754 } 860 }
861 wdev->beacon_interval = 0;
755 break; 862 break;
756 case NETDEV_DOWN: 863 case NETDEV_DOWN:
757 dev_hold(dev); 864 dev_hold(dev);
@@ -858,6 +965,9 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
858 return notifier_from_errno(-EOPNOTSUPP); 965 return notifier_from_errno(-EOPNOTSUPP);
859 if (rfkill_blocked(rdev->rfkill)) 966 if (rfkill_blocked(rdev->rfkill))
860 return notifier_from_errno(-ERFKILL); 967 return notifier_from_errno(-ERFKILL);
968 ret = cfg80211_can_add_interface(rdev, wdev->iftype);
969 if (ret)
970 return notifier_from_errno(ret);
861 break; 971 break;
862 } 972 }
863 973
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 26a0a084e16b..a570ff9214ec 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -60,8 +60,12 @@ struct cfg80211_registered_device {
60 struct rb_root bss_tree; 60 struct rb_root bss_tree;
61 u32 bss_generation; 61 u32 bss_generation;
62 struct cfg80211_scan_request *scan_req; /* protected by RTNL */ 62 struct cfg80211_scan_request *scan_req; /* protected by RTNL */
63 struct cfg80211_sched_scan_request *sched_scan_req;
63 unsigned long suspend_at; 64 unsigned long suspend_at;
64 struct work_struct scan_done_wk; 65 struct work_struct scan_done_wk;
66 struct work_struct sched_scan_results_wk;
67
68 struct mutex sched_scan_mtx;
65 69
66#ifdef CONFIG_NL80211_TESTMODE 70#ifdef CONFIG_NL80211_TESTMODE
67 struct genl_info *testmode_info; 71 struct genl_info *testmode_info;
@@ -70,6 +74,8 @@ struct cfg80211_registered_device {
70 struct work_struct conn_work; 74 struct work_struct conn_work;
71 struct work_struct event_work; 75 struct work_struct event_work;
72 76
77 struct cfg80211_wowlan *wowlan;
78
73 /* must be last because of the way we do wiphy_priv(), 79 /* must be last because of the way we do wiphy_priv(),
74 * and it should at least be aligned to NETDEV_ALIGN */ 80 * and it should at least be aligned to NETDEV_ALIGN */
75 struct wiphy wiphy __attribute__((__aligned__(NETDEV_ALIGN))); 81 struct wiphy wiphy __attribute__((__aligned__(NETDEV_ALIGN)));
@@ -89,6 +95,18 @@ bool wiphy_idx_valid(int wiphy_idx)
89 return wiphy_idx >= 0; 95 return wiphy_idx >= 0;
90} 96}
91 97
98static inline void
99cfg80211_rdev_free_wowlan(struct cfg80211_registered_device *rdev)
100{
101 int i;
102
103 if (!rdev->wowlan)
104 return;
105 for (i = 0; i < rdev->wowlan->n_patterns; i++)
106 kfree(rdev->wowlan->patterns[i].mask);
107 kfree(rdev->wowlan->patterns);
108 kfree(rdev->wowlan);
109}
92 110
93extern struct workqueue_struct *cfg80211_wq; 111extern struct workqueue_struct *cfg80211_wq;
94extern struct mutex cfg80211_mutex; 112extern struct mutex cfg80211_mutex;
@@ -229,6 +247,7 @@ struct cfg80211_event {
229 u16 status; 247 u16 status;
230 } cr; 248 } cr;
231 struct { 249 struct {
250 struct ieee80211_channel *channel;
232 u8 bssid[ETH_ALEN]; 251 u8 bssid[ETH_ALEN];
233 const u8 *req_ie; 252 const u8 *req_ie;
234 const u8 *resp_ie; 253 const u8 *resp_ie;
@@ -376,7 +395,9 @@ int __cfg80211_disconnect(struct cfg80211_registered_device *rdev,
376int cfg80211_disconnect(struct cfg80211_registered_device *rdev, 395int cfg80211_disconnect(struct cfg80211_registered_device *rdev,
377 struct net_device *dev, u16 reason, 396 struct net_device *dev, u16 reason,
378 bool wextev); 397 bool wextev);
379void __cfg80211_roamed(struct wireless_dev *wdev, const u8 *bssid, 398void __cfg80211_roamed(struct wireless_dev *wdev,
399 struct ieee80211_channel *channel,
400 const u8 *bssid,
380 const u8 *req_ie, size_t req_ie_len, 401 const u8 *req_ie, size_t req_ie_len,
381 const u8 *resp_ie, size_t resp_ie_len); 402 const u8 *resp_ie, size_t resp_ie_len);
382int cfg80211_mgd_wext_connect(struct cfg80211_registered_device *rdev, 403int cfg80211_mgd_wext_connect(struct cfg80211_registered_device *rdev,
@@ -397,12 +418,26 @@ void cfg80211_sme_rx_auth(struct net_device *dev, const u8 *buf, size_t len);
397void cfg80211_sme_disassoc(struct net_device *dev, int idx); 418void cfg80211_sme_disassoc(struct net_device *dev, int idx);
398void __cfg80211_scan_done(struct work_struct *wk); 419void __cfg80211_scan_done(struct work_struct *wk);
399void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, bool leak); 420void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, bool leak);
421void __cfg80211_sched_scan_results(struct work_struct *wk);
422int __cfg80211_stop_sched_scan(struct cfg80211_registered_device *rdev,
423 bool driver_initiated);
400void cfg80211_upload_connect_keys(struct wireless_dev *wdev); 424void cfg80211_upload_connect_keys(struct wireless_dev *wdev);
401int cfg80211_change_iface(struct cfg80211_registered_device *rdev, 425int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
402 struct net_device *dev, enum nl80211_iftype ntype, 426 struct net_device *dev, enum nl80211_iftype ntype,
403 u32 *flags, struct vif_params *params); 427 u32 *flags, struct vif_params *params);
404void cfg80211_process_rdev_events(struct cfg80211_registered_device *rdev); 428void cfg80211_process_rdev_events(struct cfg80211_registered_device *rdev);
405 429
430int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
431 struct wireless_dev *wdev,
432 enum nl80211_iftype iftype);
433
434static inline int
435cfg80211_can_add_interface(struct cfg80211_registered_device *rdev,
436 enum nl80211_iftype iftype)
437{
438 return cfg80211_can_change_interface(rdev, NULL, iftype);
439}
440
406struct ieee80211_channel * 441struct ieee80211_channel *
407rdev_freq_to_chan(struct cfg80211_registered_device *rdev, 442rdev_freq_to_chan(struct cfg80211_registered_device *rdev,
408 int freq, enum nl80211_channel_type channel_type); 443 int freq, enum nl80211_channel_type channel_type);
@@ -412,6 +447,9 @@ int cfg80211_set_freq(struct cfg80211_registered_device *rdev,
412 447
413u16 cfg80211_calculate_bitrate(struct rate_info *rate); 448u16 cfg80211_calculate_bitrate(struct rate_info *rate);
414 449
450int cfg80211_validate_beacon_int(struct cfg80211_registered_device *rdev,
451 u32 beacon_int);
452
415#ifdef CONFIG_CFG80211_DEVELOPER_WARNINGS 453#ifdef CONFIG_CFG80211_DEVELOPER_WARNINGS
416#define CFG80211_DEV_WARN_ON(cond) WARN_ON(cond) 454#define CFG80211_DEV_WARN_ON(cond) WARN_ON(cond)
417#else 455#else
diff --git a/net/wireless/lib80211_crypt_wep.c b/net/wireless/lib80211_crypt_wep.c
index e2e88878ba35..2f265e033ae2 100644
--- a/net/wireless/lib80211_crypt_wep.c
+++ b/net/wireless/lib80211_crypt_wep.c
@@ -96,13 +96,12 @@ static int lib80211_wep_build_iv(struct sk_buff *skb, int hdr_len,
96 u8 *key, int keylen, void *priv) 96 u8 *key, int keylen, void *priv)
97{ 97{
98 struct lib80211_wep_data *wep = priv; 98 struct lib80211_wep_data *wep = priv;
99 u32 klen, len; 99 u32 klen;
100 u8 *pos; 100 u8 *pos;
101 101
102 if (skb_headroom(skb) < 4 || skb->len < hdr_len) 102 if (skb_headroom(skb) < 4 || skb->len < hdr_len)
103 return -1; 103 return -1;
104 104
105 len = skb->len - hdr_len;
106 pos = skb_push(skb, 4); 105 pos = skb_push(skb, 4);
107 memmove(pos, pos + 4, hdr_len); 106 memmove(pos, pos + 4, hdr_len);
108 pos += hdr_len; 107 pos += hdr_len;
diff --git a/net/wireless/mesh.c b/net/wireless/mesh.c
index 73e39c171ffb..5c116083eeca 100644
--- a/net/wireless/mesh.c
+++ b/net/wireless/mesh.c
@@ -1,5 +1,6 @@
1#include <linux/ieee80211.h> 1#include <linux/ieee80211.h>
2#include <net/cfg80211.h> 2#include <net/cfg80211.h>
3#include "nl80211.h"
3#include "core.h" 4#include "core.h"
4 5
5/* Default values, timeouts in ms */ 6/* Default values, timeouts in ms */
@@ -53,8 +54,9 @@ const struct mesh_config default_mesh_config = {
53const struct mesh_setup default_mesh_setup = { 54const struct mesh_setup default_mesh_setup = {
54 .path_sel_proto = IEEE80211_PATH_PROTOCOL_HWMP, 55 .path_sel_proto = IEEE80211_PATH_PROTOCOL_HWMP,
55 .path_metric = IEEE80211_PATH_METRIC_AIRTIME, 56 .path_metric = IEEE80211_PATH_METRIC_AIRTIME,
56 .vendor_ie = NULL, 57 .ie = NULL,
57 .vendor_ie_len = 0, 58 .ie_len = 0,
59 .is_secure = false,
58}; 60};
59 61
60int __cfg80211_join_mesh(struct cfg80211_registered_device *rdev, 62int __cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
@@ -72,6 +74,10 @@ int __cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
72 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT) 74 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT)
73 return -EOPNOTSUPP; 75 return -EOPNOTSUPP;
74 76
77 if (!(rdev->wiphy.flags & WIPHY_FLAG_MESH_AUTH) &&
78 setup->is_secure)
79 return -EOPNOTSUPP;
80
75 if (wdev->mesh_id_len) 81 if (wdev->mesh_id_len)
76 return -EALREADY; 82 return -EALREADY;
77 83
@@ -105,6 +111,19 @@ int cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
105 return err; 111 return err;
106} 112}
107 113
114void cfg80211_notify_new_peer_candidate(struct net_device *dev,
115 const u8 *macaddr, const u8* ie, u8 ie_len, gfp_t gfp)
116{
117 struct wireless_dev *wdev = dev->ieee80211_ptr;
118
119 if (WARN_ON(wdev->iftype != NL80211_IFTYPE_MESH_POINT))
120 return;
121
122 nl80211_send_new_peer_candidate(wiphy_to_dev(wdev->wiphy), dev,
123 macaddr, ie, ie_len, gfp);
124}
125EXPORT_SYMBOL(cfg80211_notify_new_peer_candidate);
126
108static int __cfg80211_leave_mesh(struct cfg80211_registered_device *rdev, 127static int __cfg80211_leave_mesh(struct cfg80211_registered_device *rdev,
109 struct net_device *dev) 128 struct net_device *dev)
110{ 129{
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index aa5df8865ff7..493b939970cd 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -770,6 +770,15 @@ void cfg80211_new_sta(struct net_device *dev, const u8 *mac_addr,
770} 770}
771EXPORT_SYMBOL(cfg80211_new_sta); 771EXPORT_SYMBOL(cfg80211_new_sta);
772 772
773void cfg80211_del_sta(struct net_device *dev, const u8 *mac_addr, gfp_t gfp)
774{
775 struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
776 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
777
778 nl80211_send_sta_del_event(rdev, dev, mac_addr, gfp);
779}
780EXPORT_SYMBOL(cfg80211_del_sta);
781
773struct cfg80211_mgmt_registration { 782struct cfg80211_mgmt_registration {
774 struct list_head list; 783 struct list_head list;
775 784
@@ -954,6 +963,16 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
954 if (memcmp(mgmt->bssid, dev->dev_addr, ETH_ALEN)) 963 if (memcmp(mgmt->bssid, dev->dev_addr, ETH_ALEN))
955 err = -EINVAL; 964 err = -EINVAL;
956 break; 965 break;
966 case NL80211_IFTYPE_MESH_POINT:
967 if (memcmp(mgmt->sa, mgmt->bssid, ETH_ALEN)) {
968 err = -EINVAL;
969 break;
970 }
971 /*
972 * check for mesh DA must be done by driver as
973 * cfg80211 doesn't track the stations
974 */
975 break;
957 default: 976 default:
958 err = -EOPNOTSUPP; 977 err = -EOPNOTSUPP;
959 break; 978 break;
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 4ebce4284e9d..cea338150d05 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -124,6 +124,7 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
124 [NL80211_ATTR_BSS_HT_OPMODE] = { .type = NLA_U16 }, 124 [NL80211_ATTR_BSS_HT_OPMODE] = { .type = NLA_U16 },
125 125
126 [NL80211_ATTR_MESH_CONFIG] = { .type = NLA_NESTED }, 126 [NL80211_ATTR_MESH_CONFIG] = { .type = NLA_NESTED },
127 [NL80211_ATTR_SUPPORT_MESH_AUTH] = { .type = NLA_FLAG },
127 128
128 [NL80211_ATTR_HT_CAPABILITY] = { .type = NLA_BINARY, 129 [NL80211_ATTR_HT_CAPABILITY] = { .type = NLA_BINARY,
129 .len = NL80211_HT_CAPABILITY_LEN }, 130 .len = NL80211_HT_CAPABILITY_LEN },
@@ -172,6 +173,9 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
172 [NL80211_ATTR_MCAST_RATE] = { .type = NLA_U32 }, 173 [NL80211_ATTR_MCAST_RATE] = { .type = NLA_U32 },
173 [NL80211_ATTR_OFFCHANNEL_TX_OK] = { .type = NLA_FLAG }, 174 [NL80211_ATTR_OFFCHANNEL_TX_OK] = { .type = NLA_FLAG },
174 [NL80211_ATTR_KEY_DEFAULT_TYPES] = { .type = NLA_NESTED }, 175 [NL80211_ATTR_KEY_DEFAULT_TYPES] = { .type = NLA_NESTED },
176 [NL80211_ATTR_WOWLAN_TRIGGERS] = { .type = NLA_NESTED },
177 [NL80211_ATTR_STA_PLINK_STATE] = { .type = NLA_U8 },
178 [NL80211_ATTR_SCHED_SCAN_INTERVAL] = { .type = NLA_U32 },
175}; 179};
176 180
177/* policy for the key attributes */ 181/* policy for the key attributes */
@@ -193,6 +197,15 @@ nl80211_key_default_policy[NUM_NL80211_KEY_DEFAULT_TYPES] = {
193 [NL80211_KEY_DEFAULT_TYPE_MULTICAST] = { .type = NLA_FLAG }, 197 [NL80211_KEY_DEFAULT_TYPE_MULTICAST] = { .type = NLA_FLAG },
194}; 198};
195 199
200/* policy for WoWLAN attributes */
201static const struct nla_policy
202nl80211_wowlan_policy[NUM_NL80211_WOWLAN_TRIG] = {
203 [NL80211_WOWLAN_TRIG_ANY] = { .type = NLA_FLAG },
204 [NL80211_WOWLAN_TRIG_DISCONNECT] = { .type = NLA_FLAG },
205 [NL80211_WOWLAN_TRIG_MAGIC_PKT] = { .type = NLA_FLAG },
206 [NL80211_WOWLAN_TRIG_PKT_PATTERN] = { .type = NLA_NESTED },
207};
208
196/* ifidx get helper */ 209/* ifidx get helper */
197static int nl80211_get_ifidx(struct netlink_callback *cb) 210static int nl80211_get_ifidx(struct netlink_callback *cb)
198{ 211{
@@ -533,6 +546,7 @@ static int nl80211_key_allowed(struct wireless_dev *wdev)
533 case NL80211_IFTYPE_AP: 546 case NL80211_IFTYPE_AP:
534 case NL80211_IFTYPE_AP_VLAN: 547 case NL80211_IFTYPE_AP_VLAN:
535 case NL80211_IFTYPE_P2P_GO: 548 case NL80211_IFTYPE_P2P_GO:
549 case NL80211_IFTYPE_MESH_POINT:
536 break; 550 break;
537 case NL80211_IFTYPE_ADHOC: 551 case NL80211_IFTYPE_ADHOC:
538 if (!wdev->current_bss) 552 if (!wdev->current_bss)
@@ -550,6 +564,88 @@ static int nl80211_key_allowed(struct wireless_dev *wdev)
550 return 0; 564 return 0;
551} 565}
552 566
567static int nl80211_put_iftypes(struct sk_buff *msg, u32 attr, u16 ifmodes)
568{
569 struct nlattr *nl_modes = nla_nest_start(msg, attr);
570 int i;
571
572 if (!nl_modes)
573 goto nla_put_failure;
574
575 i = 0;
576 while (ifmodes) {
577 if (ifmodes & 1)
578 NLA_PUT_FLAG(msg, i);
579 ifmodes >>= 1;
580 i++;
581 }
582
583 nla_nest_end(msg, nl_modes);
584 return 0;
585
586nla_put_failure:
587 return -ENOBUFS;
588}
589
590static int nl80211_put_iface_combinations(struct wiphy *wiphy,
591 struct sk_buff *msg)
592{
593 struct nlattr *nl_combis;
594 int i, j;
595
596 nl_combis = nla_nest_start(msg,
597 NL80211_ATTR_INTERFACE_COMBINATIONS);
598 if (!nl_combis)
599 goto nla_put_failure;
600
601 for (i = 0; i < wiphy->n_iface_combinations; i++) {
602 const struct ieee80211_iface_combination *c;
603 struct nlattr *nl_combi, *nl_limits;
604
605 c = &wiphy->iface_combinations[i];
606
607 nl_combi = nla_nest_start(msg, i + 1);
608 if (!nl_combi)
609 goto nla_put_failure;
610
611 nl_limits = nla_nest_start(msg, NL80211_IFACE_COMB_LIMITS);
612 if (!nl_limits)
613 goto nla_put_failure;
614
615 for (j = 0; j < c->n_limits; j++) {
616 struct nlattr *nl_limit;
617
618 nl_limit = nla_nest_start(msg, j + 1);
619 if (!nl_limit)
620 goto nla_put_failure;
621 NLA_PUT_U32(msg, NL80211_IFACE_LIMIT_MAX,
622 c->limits[j].max);
623 if (nl80211_put_iftypes(msg, NL80211_IFACE_LIMIT_TYPES,
624 c->limits[j].types))
625 goto nla_put_failure;
626 nla_nest_end(msg, nl_limit);
627 }
628
629 nla_nest_end(msg, nl_limits);
630
631 if (c->beacon_int_infra_match)
632 NLA_PUT_FLAG(msg,
633 NL80211_IFACE_COMB_STA_AP_BI_MATCH);
634 NLA_PUT_U32(msg, NL80211_IFACE_COMB_NUM_CHANNELS,
635 c->num_different_channels);
636 NLA_PUT_U32(msg, NL80211_IFACE_COMB_MAXNUM,
637 c->max_interfaces);
638
639 nla_nest_end(msg, nl_combi);
640 }
641
642 nla_nest_end(msg, nl_combis);
643
644 return 0;
645nla_put_failure:
646 return -ENOBUFS;
647}
648
553static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags, 649static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
554 struct cfg80211_registered_device *dev) 650 struct cfg80211_registered_device *dev)
555{ 651{
@@ -557,13 +653,11 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
557 struct nlattr *nl_bands, *nl_band; 653 struct nlattr *nl_bands, *nl_band;
558 struct nlattr *nl_freqs, *nl_freq; 654 struct nlattr *nl_freqs, *nl_freq;
559 struct nlattr *nl_rates, *nl_rate; 655 struct nlattr *nl_rates, *nl_rate;
560 struct nlattr *nl_modes;
561 struct nlattr *nl_cmds; 656 struct nlattr *nl_cmds;
562 enum ieee80211_band band; 657 enum ieee80211_band band;
563 struct ieee80211_channel *chan; 658 struct ieee80211_channel *chan;
564 struct ieee80211_rate *rate; 659 struct ieee80211_rate *rate;
565 int i; 660 int i;
566 u16 ifmodes = dev->wiphy.interface_modes;
567 const struct ieee80211_txrx_stypes *mgmt_stypes = 661 const struct ieee80211_txrx_stypes *mgmt_stypes =
568 dev->wiphy.mgmt_stypes; 662 dev->wiphy.mgmt_stypes;
569 663
@@ -594,6 +688,8 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
594 688
595 if (dev->wiphy.flags & WIPHY_FLAG_IBSS_RSN) 689 if (dev->wiphy.flags & WIPHY_FLAG_IBSS_RSN)
596 NLA_PUT_FLAG(msg, NL80211_ATTR_SUPPORT_IBSS_RSN); 690 NLA_PUT_FLAG(msg, NL80211_ATTR_SUPPORT_IBSS_RSN);
691 if (dev->wiphy.flags & WIPHY_FLAG_MESH_AUTH)
692 NLA_PUT_FLAG(msg, NL80211_ATTR_SUPPORT_MESH_AUTH);
597 693
598 NLA_PUT(msg, NL80211_ATTR_CIPHER_SUITES, 694 NLA_PUT(msg, NL80211_ATTR_CIPHER_SUITES,
599 sizeof(u32) * dev->wiphy.n_cipher_suites, 695 sizeof(u32) * dev->wiphy.n_cipher_suites,
@@ -621,20 +717,10 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
621 } 717 }
622 } 718 }
623 719
624 nl_modes = nla_nest_start(msg, NL80211_ATTR_SUPPORTED_IFTYPES); 720 if (nl80211_put_iftypes(msg, NL80211_ATTR_SUPPORTED_IFTYPES,
625 if (!nl_modes) 721 dev->wiphy.interface_modes))
626 goto nla_put_failure; 722 goto nla_put_failure;
627 723
628 i = 0;
629 while (ifmodes) {
630 if (ifmodes & 1)
631 NLA_PUT_FLAG(msg, i);
632 ifmodes >>= 1;
633 i++;
634 }
635
636 nla_nest_end(msg, nl_modes);
637
638 nl_bands = nla_nest_start(msg, NL80211_ATTR_WIPHY_BANDS); 724 nl_bands = nla_nest_start(msg, NL80211_ATTR_WIPHY_BANDS);
639 if (!nl_bands) 725 if (!nl_bands)
640 goto nla_put_failure; 726 goto nla_put_failure;
@@ -746,6 +832,8 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
746 } 832 }
747 CMD(set_channel, SET_CHANNEL); 833 CMD(set_channel, SET_CHANNEL);
748 CMD(set_wds_peer, SET_WDS_PEER); 834 CMD(set_wds_peer, SET_WDS_PEER);
835 if (dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN)
836 CMD(sched_scan_start, START_SCHED_SCAN);
749 837
750#undef CMD 838#undef CMD
751 839
@@ -818,6 +906,42 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
818 nla_nest_end(msg, nl_ifs); 906 nla_nest_end(msg, nl_ifs);
819 } 907 }
820 908
909 if (dev->wiphy.wowlan.flags || dev->wiphy.wowlan.n_patterns) {
910 struct nlattr *nl_wowlan;
911
912 nl_wowlan = nla_nest_start(msg,
913 NL80211_ATTR_WOWLAN_TRIGGERS_SUPPORTED);
914 if (!nl_wowlan)
915 goto nla_put_failure;
916
917 if (dev->wiphy.wowlan.flags & WIPHY_WOWLAN_ANY)
918 NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_ANY);
919 if (dev->wiphy.wowlan.flags & WIPHY_WOWLAN_DISCONNECT)
920 NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_DISCONNECT);
921 if (dev->wiphy.wowlan.flags & WIPHY_WOWLAN_MAGIC_PKT)
922 NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_MAGIC_PKT);
923 if (dev->wiphy.wowlan.n_patterns) {
924 struct nl80211_wowlan_pattern_support pat = {
925 .max_patterns = dev->wiphy.wowlan.n_patterns,
926 .min_pattern_len =
927 dev->wiphy.wowlan.pattern_min_len,
928 .max_pattern_len =
929 dev->wiphy.wowlan.pattern_max_len,
930 };
931 NLA_PUT(msg, NL80211_WOWLAN_TRIG_PKT_PATTERN,
932 sizeof(pat), &pat);
933 }
934
935 nla_nest_end(msg, nl_wowlan);
936 }
937
938 if (nl80211_put_iftypes(msg, NL80211_ATTR_SOFTWARE_IFTYPES,
939 dev->wiphy.software_iftypes))
940 goto nla_put_failure;
941
942 if (nl80211_put_iface_combinations(&dev->wiphy, msg))
943 goto nla_put_failure;
944
821 return genlmsg_end(msg, hdr); 945 return genlmsg_end(msg, hdr);
822 946
823 nla_put_failure: 947 nla_put_failure:
@@ -1679,14 +1803,6 @@ static int nl80211_set_key(struct sk_buff *skb, struct genl_info *info)
1679 if (err) 1803 if (err)
1680 goto out; 1804 goto out;
1681 1805
1682 if (!(rdev->wiphy.flags &
1683 WIPHY_FLAG_SUPPORTS_SEPARATE_DEFAULT_KEYS)) {
1684 if (!key.def_uni || !key.def_multi) {
1685 err = -EOPNOTSUPP;
1686 goto out;
1687 }
1688 }
1689
1690 err = rdev->ops->set_default_key(&rdev->wiphy, dev, key.idx, 1806 err = rdev->ops->set_default_key(&rdev->wiphy, dev, key.idx,
1691 key.def_uni, key.def_multi); 1807 key.def_uni, key.def_multi);
1692 1808
@@ -1837,8 +1953,9 @@ static int nl80211_addset_beacon(struct sk_buff *skb, struct genl_info *info)
1837 struct beacon_parameters *info); 1953 struct beacon_parameters *info);
1838 struct cfg80211_registered_device *rdev = info->user_ptr[0]; 1954 struct cfg80211_registered_device *rdev = info->user_ptr[0];
1839 struct net_device *dev = info->user_ptr[1]; 1955 struct net_device *dev = info->user_ptr[1];
1956 struct wireless_dev *wdev = dev->ieee80211_ptr;
1840 struct beacon_parameters params; 1957 struct beacon_parameters params;
1841 int haveinfo = 0; 1958 int haveinfo = 0, err;
1842 1959
1843 if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_BEACON_TAIL])) 1960 if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_BEACON_TAIL]))
1844 return -EINVAL; 1961 return -EINVAL;
@@ -1847,6 +1964,8 @@ static int nl80211_addset_beacon(struct sk_buff *skb, struct genl_info *info)
1847 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) 1964 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO)
1848 return -EOPNOTSUPP; 1965 return -EOPNOTSUPP;
1849 1966
1967 memset(&params, 0, sizeof(params));
1968
1850 switch (info->genlhdr->cmd) { 1969 switch (info->genlhdr->cmd) {
1851 case NL80211_CMD_NEW_BEACON: 1970 case NL80211_CMD_NEW_BEACON:
1852 /* these are required for NEW_BEACON */ 1971 /* these are required for NEW_BEACON */
@@ -1855,6 +1974,15 @@ static int nl80211_addset_beacon(struct sk_buff *skb, struct genl_info *info)
1855 !info->attrs[NL80211_ATTR_BEACON_HEAD]) 1974 !info->attrs[NL80211_ATTR_BEACON_HEAD])
1856 return -EINVAL; 1975 return -EINVAL;
1857 1976
1977 params.interval =
1978 nla_get_u32(info->attrs[NL80211_ATTR_BEACON_INTERVAL]);
1979 params.dtim_period =
1980 nla_get_u32(info->attrs[NL80211_ATTR_DTIM_PERIOD]);
1981
1982 err = cfg80211_validate_beacon_int(rdev, params.interval);
1983 if (err)
1984 return err;
1985
1858 call = rdev->ops->add_beacon; 1986 call = rdev->ops->add_beacon;
1859 break; 1987 break;
1860 case NL80211_CMD_SET_BEACON: 1988 case NL80211_CMD_SET_BEACON:
@@ -1868,20 +1996,6 @@ static int nl80211_addset_beacon(struct sk_buff *skb, struct genl_info *info)
1868 if (!call) 1996 if (!call)
1869 return -EOPNOTSUPP; 1997 return -EOPNOTSUPP;
1870 1998
1871 memset(&params, 0, sizeof(params));
1872
1873 if (info->attrs[NL80211_ATTR_BEACON_INTERVAL]) {
1874 params.interval =
1875 nla_get_u32(info->attrs[NL80211_ATTR_BEACON_INTERVAL]);
1876 haveinfo = 1;
1877 }
1878
1879 if (info->attrs[NL80211_ATTR_DTIM_PERIOD]) {
1880 params.dtim_period =
1881 nla_get_u32(info->attrs[NL80211_ATTR_DTIM_PERIOD]);
1882 haveinfo = 1;
1883 }
1884
1885 if (info->attrs[NL80211_ATTR_BEACON_HEAD]) { 1999 if (info->attrs[NL80211_ATTR_BEACON_HEAD]) {
1886 params.head = nla_data(info->attrs[NL80211_ATTR_BEACON_HEAD]); 2000 params.head = nla_data(info->attrs[NL80211_ATTR_BEACON_HEAD]);
1887 params.head_len = 2001 params.head_len =
@@ -1899,13 +2013,18 @@ static int nl80211_addset_beacon(struct sk_buff *skb, struct genl_info *info)
1899 if (!haveinfo) 2013 if (!haveinfo)
1900 return -EINVAL; 2014 return -EINVAL;
1901 2015
1902 return call(&rdev->wiphy, dev, &params); 2016 err = call(&rdev->wiphy, dev, &params);
2017 if (!err && params.interval)
2018 wdev->beacon_interval = params.interval;
2019 return err;
1903} 2020}
1904 2021
1905static int nl80211_del_beacon(struct sk_buff *skb, struct genl_info *info) 2022static int nl80211_del_beacon(struct sk_buff *skb, struct genl_info *info)
1906{ 2023{
1907 struct cfg80211_registered_device *rdev = info->user_ptr[0]; 2024 struct cfg80211_registered_device *rdev = info->user_ptr[0];
1908 struct net_device *dev = info->user_ptr[1]; 2025 struct net_device *dev = info->user_ptr[1];
2026 struct wireless_dev *wdev = dev->ieee80211_ptr;
2027 int err;
1909 2028
1910 if (!rdev->ops->del_beacon) 2029 if (!rdev->ops->del_beacon)
1911 return -EOPNOTSUPP; 2030 return -EOPNOTSUPP;
@@ -1914,7 +2033,10 @@ static int nl80211_del_beacon(struct sk_buff *skb, struct genl_info *info)
1914 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) 2033 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO)
1915 return -EOPNOTSUPP; 2034 return -EOPNOTSUPP;
1916 2035
1917 return rdev->ops->del_beacon(&rdev->wiphy, dev); 2036 err = rdev->ops->del_beacon(&rdev->wiphy, dev);
2037 if (!err)
2038 wdev->beacon_interval = 0;
2039 return err;
1918} 2040}
1919 2041
1920static const struct nla_policy sta_flags_policy[NL80211_STA_FLAG_MAX + 1] = { 2042static const struct nla_policy sta_flags_policy[NL80211_STA_FLAG_MAX + 1] = {
@@ -1922,6 +2044,7 @@ static const struct nla_policy sta_flags_policy[NL80211_STA_FLAG_MAX + 1] = {
1922 [NL80211_STA_FLAG_SHORT_PREAMBLE] = { .type = NLA_FLAG }, 2044 [NL80211_STA_FLAG_SHORT_PREAMBLE] = { .type = NLA_FLAG },
1923 [NL80211_STA_FLAG_WME] = { .type = NLA_FLAG }, 2045 [NL80211_STA_FLAG_WME] = { .type = NLA_FLAG },
1924 [NL80211_STA_FLAG_MFP] = { .type = NLA_FLAG }, 2046 [NL80211_STA_FLAG_MFP] = { .type = NLA_FLAG },
2047 [NL80211_STA_FLAG_AUTHENTICATED] = { .type = NLA_FLAG },
1925}; 2048};
1926 2049
1927static int parse_station_flags(struct genl_info *info, 2050static int parse_station_flags(struct genl_info *info,
@@ -2002,7 +2125,7 @@ static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq,
2002 const u8 *mac_addr, struct station_info *sinfo) 2125 const u8 *mac_addr, struct station_info *sinfo)
2003{ 2126{
2004 void *hdr; 2127 void *hdr;
2005 struct nlattr *sinfoattr; 2128 struct nlattr *sinfoattr, *bss_param;
2006 2129
2007 hdr = nl80211hdr_put(msg, pid, seq, flags, NL80211_CMD_NEW_STATION); 2130 hdr = nl80211hdr_put(msg, pid, seq, flags, NL80211_CMD_NEW_STATION);
2008 if (!hdr) 2131 if (!hdr)
@@ -2016,6 +2139,9 @@ static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq,
2016 sinfoattr = nla_nest_start(msg, NL80211_ATTR_STA_INFO); 2139 sinfoattr = nla_nest_start(msg, NL80211_ATTR_STA_INFO);
2017 if (!sinfoattr) 2140 if (!sinfoattr)
2018 goto nla_put_failure; 2141 goto nla_put_failure;
2142 if (sinfo->filled & STATION_INFO_CONNECTED_TIME)
2143 NLA_PUT_U32(msg, NL80211_STA_INFO_CONNECTED_TIME,
2144 sinfo->connected_time);
2019 if (sinfo->filled & STATION_INFO_INACTIVE_TIME) 2145 if (sinfo->filled & STATION_INFO_INACTIVE_TIME)
2020 NLA_PUT_U32(msg, NL80211_STA_INFO_INACTIVE_TIME, 2146 NLA_PUT_U32(msg, NL80211_STA_INFO_INACTIVE_TIME,
2021 sinfo->inactive_time); 2147 sinfo->inactive_time);
@@ -2062,6 +2188,25 @@ static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq,
2062 if (sinfo->filled & STATION_INFO_TX_FAILED) 2188 if (sinfo->filled & STATION_INFO_TX_FAILED)
2063 NLA_PUT_U32(msg, NL80211_STA_INFO_TX_FAILED, 2189 NLA_PUT_U32(msg, NL80211_STA_INFO_TX_FAILED,
2064 sinfo->tx_failed); 2190 sinfo->tx_failed);
2191 if (sinfo->filled & STATION_INFO_BSS_PARAM) {
2192 bss_param = nla_nest_start(msg, NL80211_STA_INFO_BSS_PARAM);
2193 if (!bss_param)
2194 goto nla_put_failure;
2195
2196 if (sinfo->bss_param.flags & BSS_PARAM_FLAGS_CTS_PROT)
2197 NLA_PUT_FLAG(msg, NL80211_STA_BSS_PARAM_CTS_PROT);
2198 if (sinfo->bss_param.flags & BSS_PARAM_FLAGS_SHORT_PREAMBLE)
2199 NLA_PUT_FLAG(msg, NL80211_STA_BSS_PARAM_SHORT_PREAMBLE);
2200 if (sinfo->bss_param.flags & BSS_PARAM_FLAGS_SHORT_SLOT_TIME)
2201 NLA_PUT_FLAG(msg,
2202 NL80211_STA_BSS_PARAM_SHORT_SLOT_TIME);
2203 NLA_PUT_U8(msg, NL80211_STA_BSS_PARAM_DTIM_PERIOD,
2204 sinfo->bss_param.dtim_period);
2205 NLA_PUT_U16(msg, NL80211_STA_BSS_PARAM_BEACON_INTERVAL,
2206 sinfo->bss_param.beacon_interval);
2207
2208 nla_nest_end(msg, bss_param);
2209 }
2065 nla_nest_end(msg, sinfoattr); 2210 nla_nest_end(msg, sinfoattr);
2066 2211
2067 return genlmsg_end(msg, hdr); 2212 return genlmsg_end(msg, hdr);
@@ -2190,6 +2335,7 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
2190 memset(&params, 0, sizeof(params)); 2335 memset(&params, 0, sizeof(params));
2191 2336
2192 params.listen_interval = -1; 2337 params.listen_interval = -1;
2338 params.plink_state = -1;
2193 2339
2194 if (info->attrs[NL80211_ATTR_STA_AID]) 2340 if (info->attrs[NL80211_ATTR_STA_AID])
2195 return -EINVAL; 2341 return -EINVAL;
@@ -2221,6 +2367,10 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
2221 params.plink_action = 2367 params.plink_action =
2222 nla_get_u8(info->attrs[NL80211_ATTR_STA_PLINK_ACTION]); 2368 nla_get_u8(info->attrs[NL80211_ATTR_STA_PLINK_ACTION]);
2223 2369
2370 if (info->attrs[NL80211_ATTR_STA_PLINK_STATE])
2371 params.plink_state =
2372 nla_get_u8(info->attrs[NL80211_ATTR_STA_PLINK_STATE]);
2373
2224 err = get_vlan(info, rdev, &params.vlan); 2374 err = get_vlan(info, rdev, &params.vlan);
2225 if (err) 2375 if (err)
2226 goto out; 2376 goto out;
@@ -2260,9 +2410,10 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
2260 err = -EINVAL; 2410 err = -EINVAL;
2261 if (params.listen_interval >= 0) 2411 if (params.listen_interval >= 0)
2262 err = -EINVAL; 2412 err = -EINVAL;
2263 if (params.supported_rates) 2413 if (params.sta_flags_mask &
2264 err = -EINVAL; 2414 ~(BIT(NL80211_STA_FLAG_AUTHENTICATED) |
2265 if (params.sta_flags_mask) 2415 BIT(NL80211_STA_FLAG_MFP) |
2416 BIT(NL80211_STA_FLAG_AUTHORIZED)))
2266 err = -EINVAL; 2417 err = -EINVAL;
2267 break; 2418 break;
2268 default: 2419 default:
@@ -2324,11 +2475,16 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
2324 params.ht_capa = 2475 params.ht_capa =
2325 nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]); 2476 nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]);
2326 2477
2478 if (info->attrs[NL80211_ATTR_STA_PLINK_ACTION])
2479 params.plink_action =
2480 nla_get_u8(info->attrs[NL80211_ATTR_STA_PLINK_ACTION]);
2481
2327 if (parse_station_flags(info, &params)) 2482 if (parse_station_flags(info, &params))
2328 return -EINVAL; 2483 return -EINVAL;
2329 2484
2330 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP && 2485 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
2331 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN && 2486 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN &&
2487 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT &&
2332 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) 2488 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO)
2333 return -EINVAL; 2489 return -EINVAL;
2334 2490
@@ -2804,8 +2960,10 @@ static const struct nla_policy
2804 nl80211_mesh_setup_params_policy[NL80211_MESH_SETUP_ATTR_MAX+1] = { 2960 nl80211_mesh_setup_params_policy[NL80211_MESH_SETUP_ATTR_MAX+1] = {
2805 [NL80211_MESH_SETUP_ENABLE_VENDOR_PATH_SEL] = { .type = NLA_U8 }, 2961 [NL80211_MESH_SETUP_ENABLE_VENDOR_PATH_SEL] = { .type = NLA_U8 },
2806 [NL80211_MESH_SETUP_ENABLE_VENDOR_METRIC] = { .type = NLA_U8 }, 2962 [NL80211_MESH_SETUP_ENABLE_VENDOR_METRIC] = { .type = NLA_U8 },
2807 [NL80211_MESH_SETUP_VENDOR_PATH_SEL_IE] = { .type = NLA_BINARY, 2963 [NL80211_MESH_SETUP_USERSPACE_AUTH] = { .type = NLA_FLAG },
2964 [NL80211_MESH_SETUP_IE] = { .type = NLA_BINARY,
2808 .len = IEEE80211_MAX_DATA_LEN }, 2965 .len = IEEE80211_MAX_DATA_LEN },
2966 [NL80211_MESH_SETUP_USERSPACE_AMPE] = { .type = NLA_FLAG },
2809}; 2967};
2810 2968
2811static int nl80211_parse_mesh_config(struct genl_info *info, 2969static int nl80211_parse_mesh_config(struct genl_info *info,
@@ -2906,14 +3064,17 @@ static int nl80211_parse_mesh_setup(struct genl_info *info,
2906 IEEE80211_PATH_METRIC_VENDOR : 3064 IEEE80211_PATH_METRIC_VENDOR :
2907 IEEE80211_PATH_METRIC_AIRTIME; 3065 IEEE80211_PATH_METRIC_AIRTIME;
2908 3066
2909 if (tb[NL80211_MESH_SETUP_VENDOR_PATH_SEL_IE]) { 3067
3068 if (tb[NL80211_MESH_SETUP_IE]) {
2910 struct nlattr *ieattr = 3069 struct nlattr *ieattr =
2911 tb[NL80211_MESH_SETUP_VENDOR_PATH_SEL_IE]; 3070 tb[NL80211_MESH_SETUP_IE];
2912 if (!is_valid_ie_attr(ieattr)) 3071 if (!is_valid_ie_attr(ieattr))
2913 return -EINVAL; 3072 return -EINVAL;
2914 setup->vendor_ie = nla_data(ieattr); 3073 setup->ie = nla_data(ieattr);
2915 setup->vendor_ie_len = nla_len(ieattr); 3074 setup->ie_len = nla_len(ieattr);
2916 } 3075 }
3076 setup->is_authenticated = nla_get_flag(tb[NL80211_MESH_SETUP_USERSPACE_AUTH]);
3077 setup->is_secure = nla_get_flag(tb[NL80211_MESH_SETUP_USERSPACE_AMPE]);
2917 3078
2918 return 0; 3079 return 0;
2919} 3080}
@@ -3133,8 +3294,6 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
3133 struct cfg80211_registered_device *rdev = info->user_ptr[0]; 3294 struct cfg80211_registered_device *rdev = info->user_ptr[0];
3134 struct net_device *dev = info->user_ptr[1]; 3295 struct net_device *dev = info->user_ptr[1];
3135 struct cfg80211_scan_request *request; 3296 struct cfg80211_scan_request *request;
3136 struct cfg80211_ssid *ssid;
3137 struct ieee80211_channel *channel;
3138 struct nlattr *attr; 3297 struct nlattr *attr;
3139 struct wiphy *wiphy; 3298 struct wiphy *wiphy;
3140 int err, tmp, n_ssids = 0, n_channels, i; 3299 int err, tmp, n_ssids = 0, n_channels, i;
@@ -3181,8 +3340,8 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
3181 return -EINVAL; 3340 return -EINVAL;
3182 3341
3183 request = kzalloc(sizeof(*request) 3342 request = kzalloc(sizeof(*request)
3184 + sizeof(*ssid) * n_ssids 3343 + sizeof(*request->ssids) * n_ssids
3185 + sizeof(channel) * n_channels 3344 + sizeof(*request->channels) * n_channels
3186 + ie_len, GFP_KERNEL); 3345 + ie_len, GFP_KERNEL);
3187 if (!request) 3346 if (!request)
3188 return -ENOMEM; 3347 return -ENOMEM;
@@ -3247,12 +3406,12 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
3247 i = 0; 3406 i = 0;
3248 if (info->attrs[NL80211_ATTR_SCAN_SSIDS]) { 3407 if (info->attrs[NL80211_ATTR_SCAN_SSIDS]) {
3249 nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS], tmp) { 3408 nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS], tmp) {
3250 if (request->ssids[i].ssid_len > IEEE80211_MAX_SSID_LEN) { 3409 if (nla_len(attr) > IEEE80211_MAX_SSID_LEN) {
3251 err = -EINVAL; 3410 err = -EINVAL;
3252 goto out_free; 3411 goto out_free;
3253 } 3412 }
3254 memcpy(request->ssids[i].ssid, nla_data(attr), nla_len(attr));
3255 request->ssids[i].ssid_len = nla_len(attr); 3413 request->ssids[i].ssid_len = nla_len(attr);
3414 memcpy(request->ssids[i].ssid, nla_data(attr), nla_len(attr));
3256 i++; 3415 i++;
3257 } 3416 }
3258 } 3417 }
@@ -3282,6 +3441,197 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
3282 return err; 3441 return err;
3283} 3442}
3284 3443
3444static int nl80211_start_sched_scan(struct sk_buff *skb,
3445 struct genl_info *info)
3446{
3447 struct cfg80211_sched_scan_request *request;
3448 struct cfg80211_registered_device *rdev = info->user_ptr[0];
3449 struct net_device *dev = info->user_ptr[1];
3450 struct nlattr *attr;
3451 struct wiphy *wiphy;
3452 int err, tmp, n_ssids = 0, n_channels, i;
3453 u32 interval;
3454 enum ieee80211_band band;
3455 size_t ie_len;
3456
3457 if (!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN) ||
3458 !rdev->ops->sched_scan_start)
3459 return -EOPNOTSUPP;
3460
3461 if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
3462 return -EINVAL;
3463
3464 if (!info->attrs[NL80211_ATTR_SCHED_SCAN_INTERVAL])
3465 return -EINVAL;
3466
3467 interval = nla_get_u32(info->attrs[NL80211_ATTR_SCHED_SCAN_INTERVAL]);
3468 if (interval == 0)
3469 return -EINVAL;
3470
3471 wiphy = &rdev->wiphy;
3472
3473 if (info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]) {
3474 n_channels = validate_scan_freqs(
3475 info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]);
3476 if (!n_channels)
3477 return -EINVAL;
3478 } else {
3479 n_channels = 0;
3480
3481 for (band = 0; band < IEEE80211_NUM_BANDS; band++)
3482 if (wiphy->bands[band])
3483 n_channels += wiphy->bands[band]->n_channels;
3484 }
3485
3486 if (info->attrs[NL80211_ATTR_SCAN_SSIDS])
3487 nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS],
3488 tmp)
3489 n_ssids++;
3490
3491 if (n_ssids > wiphy->max_scan_ssids)
3492 return -EINVAL;
3493
3494 if (info->attrs[NL80211_ATTR_IE])
3495 ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
3496 else
3497 ie_len = 0;
3498
3499 if (ie_len > wiphy->max_scan_ie_len)
3500 return -EINVAL;
3501
3502 mutex_lock(&rdev->sched_scan_mtx);
3503
3504 if (rdev->sched_scan_req) {
3505 err = -EINPROGRESS;
3506 goto out;
3507 }
3508
3509 request = kzalloc(sizeof(*request)
3510 + sizeof(*request->ssids) * n_ssids
3511 + sizeof(*request->channels) * n_channels
3512 + ie_len, GFP_KERNEL);
3513 if (!request) {
3514 err = -ENOMEM;
3515 goto out;
3516 }
3517
3518 if (n_ssids)
3519 request->ssids = (void *)&request->channels[n_channels];
3520 request->n_ssids = n_ssids;
3521 if (ie_len) {
3522 if (request->ssids)
3523 request->ie = (void *)(request->ssids + n_ssids);
3524 else
3525 request->ie = (void *)(request->channels + n_channels);
3526 }
3527
3528 i = 0;
3529 if (info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]) {
3530 /* user specified, bail out if channel not found */
3531 nla_for_each_nested(attr,
3532 info->attrs[NL80211_ATTR_SCAN_FREQUENCIES],
3533 tmp) {
3534 struct ieee80211_channel *chan;
3535
3536 chan = ieee80211_get_channel(wiphy, nla_get_u32(attr));
3537
3538 if (!chan) {
3539 err = -EINVAL;
3540 goto out_free;
3541 }
3542
3543 /* ignore disabled channels */
3544 if (chan->flags & IEEE80211_CHAN_DISABLED)
3545 continue;
3546
3547 request->channels[i] = chan;
3548 i++;
3549 }
3550 } else {
3551 /* all channels */
3552 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
3553 int j;
3554 if (!wiphy->bands[band])
3555 continue;
3556 for (j = 0; j < wiphy->bands[band]->n_channels; j++) {
3557 struct ieee80211_channel *chan;
3558
3559 chan = &wiphy->bands[band]->channels[j];
3560
3561 if (chan->flags & IEEE80211_CHAN_DISABLED)
3562 continue;
3563
3564 request->channels[i] = chan;
3565 i++;
3566 }
3567 }
3568 }
3569
3570 if (!i) {
3571 err = -EINVAL;
3572 goto out_free;
3573 }
3574
3575 request->n_channels = i;
3576
3577 i = 0;
3578 if (info->attrs[NL80211_ATTR_SCAN_SSIDS]) {
3579 nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS],
3580 tmp) {
3581 if (nla_len(attr) > IEEE80211_MAX_SSID_LEN) {
3582 err = -EINVAL;
3583 goto out_free;
3584 }
3585 request->ssids[i].ssid_len = nla_len(attr);
3586 memcpy(request->ssids[i].ssid, nla_data(attr),
3587 nla_len(attr));
3588 i++;
3589 }
3590 }
3591
3592 if (info->attrs[NL80211_ATTR_IE]) {
3593 request->ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
3594 memcpy((void *)request->ie,
3595 nla_data(info->attrs[NL80211_ATTR_IE]),
3596 request->ie_len);
3597 }
3598
3599 request->dev = dev;
3600 request->wiphy = &rdev->wiphy;
3601 request->interval = interval;
3602
3603 err = rdev->ops->sched_scan_start(&rdev->wiphy, dev, request);
3604 if (!err) {
3605 rdev->sched_scan_req = request;
3606 nl80211_send_sched_scan(rdev, dev,
3607 NL80211_CMD_START_SCHED_SCAN);
3608 goto out;
3609 }
3610
3611out_free:
3612 kfree(request);
3613out:
3614 mutex_unlock(&rdev->sched_scan_mtx);
3615 return err;
3616}
3617
3618static int nl80211_stop_sched_scan(struct sk_buff *skb,
3619 struct genl_info *info)
3620{
3621 struct cfg80211_registered_device *rdev = info->user_ptr[0];
3622 int err;
3623
3624 if (!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN) ||
3625 !rdev->ops->sched_scan_stop)
3626 return -EOPNOTSUPP;
3627
3628 mutex_lock(&rdev->sched_scan_mtx);
3629 err = __cfg80211_stop_sched_scan(rdev, false);
3630 mutex_unlock(&rdev->sched_scan_mtx);
3631
3632 return err;
3633}
3634
3285static int nl80211_send_bss(struct sk_buff *msg, u32 pid, u32 seq, int flags, 3635static int nl80211_send_bss(struct sk_buff *msg, u32 pid, u32 seq, int flags,
3286 struct cfg80211_registered_device *rdev, 3636 struct cfg80211_registered_device *rdev,
3287 struct wireless_dev *wdev, 3637 struct wireless_dev *wdev,
@@ -4780,6 +5130,194 @@ static int nl80211_leave_mesh(struct sk_buff *skb, struct genl_info *info)
4780 return cfg80211_leave_mesh(rdev, dev); 5130 return cfg80211_leave_mesh(rdev, dev);
4781} 5131}
4782 5132
5133static int nl80211_get_wowlan(struct sk_buff *skb, struct genl_info *info)
5134{
5135 struct cfg80211_registered_device *rdev = info->user_ptr[0];
5136 struct sk_buff *msg;
5137 void *hdr;
5138
5139 if (!rdev->wiphy.wowlan.flags && !rdev->wiphy.wowlan.n_patterns)
5140 return -EOPNOTSUPP;
5141
5142 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
5143 if (!msg)
5144 return -ENOMEM;
5145
5146 hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0,
5147 NL80211_CMD_GET_WOWLAN);
5148 if (!hdr)
5149 goto nla_put_failure;
5150
5151 if (rdev->wowlan) {
5152 struct nlattr *nl_wowlan;
5153
5154 nl_wowlan = nla_nest_start(msg, NL80211_ATTR_WOWLAN_TRIGGERS);
5155 if (!nl_wowlan)
5156 goto nla_put_failure;
5157
5158 if (rdev->wowlan->any)
5159 NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_ANY);
5160 if (rdev->wowlan->disconnect)
5161 NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_DISCONNECT);
5162 if (rdev->wowlan->magic_pkt)
5163 NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_MAGIC_PKT);
5164 if (rdev->wowlan->n_patterns) {
5165 struct nlattr *nl_pats, *nl_pat;
5166 int i, pat_len;
5167
5168 nl_pats = nla_nest_start(msg,
5169 NL80211_WOWLAN_TRIG_PKT_PATTERN);
5170 if (!nl_pats)
5171 goto nla_put_failure;
5172
5173 for (i = 0; i < rdev->wowlan->n_patterns; i++) {
5174 nl_pat = nla_nest_start(msg, i + 1);
5175 if (!nl_pat)
5176 goto nla_put_failure;
5177 pat_len = rdev->wowlan->patterns[i].pattern_len;
5178 NLA_PUT(msg, NL80211_WOWLAN_PKTPAT_MASK,
5179 DIV_ROUND_UP(pat_len, 8),
5180 rdev->wowlan->patterns[i].mask);
5181 NLA_PUT(msg, NL80211_WOWLAN_PKTPAT_PATTERN,
5182 pat_len,
5183 rdev->wowlan->patterns[i].pattern);
5184 nla_nest_end(msg, nl_pat);
5185 }
5186 nla_nest_end(msg, nl_pats);
5187 }
5188
5189 nla_nest_end(msg, nl_wowlan);
5190 }
5191
5192 genlmsg_end(msg, hdr);
5193 return genlmsg_reply(msg, info);
5194
5195nla_put_failure:
5196 nlmsg_free(msg);
5197 return -ENOBUFS;
5198}
5199
5200static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
5201{
5202 struct cfg80211_registered_device *rdev = info->user_ptr[0];
5203 struct nlattr *tb[NUM_NL80211_WOWLAN_TRIG];
5204 struct cfg80211_wowlan no_triggers = {};
5205 struct cfg80211_wowlan new_triggers = {};
5206 struct wiphy_wowlan_support *wowlan = &rdev->wiphy.wowlan;
5207 int err, i;
5208
5209 if (!rdev->wiphy.wowlan.flags && !rdev->wiphy.wowlan.n_patterns)
5210 return -EOPNOTSUPP;
5211
5212 if (!info->attrs[NL80211_ATTR_WOWLAN_TRIGGERS])
5213 goto no_triggers;
5214
5215 err = nla_parse(tb, MAX_NL80211_WOWLAN_TRIG,
5216 nla_data(info->attrs[NL80211_ATTR_WOWLAN_TRIGGERS]),
5217 nla_len(info->attrs[NL80211_ATTR_WOWLAN_TRIGGERS]),
5218 nl80211_wowlan_policy);
5219 if (err)
5220 return err;
5221
5222 if (tb[NL80211_WOWLAN_TRIG_ANY]) {
5223 if (!(wowlan->flags & WIPHY_WOWLAN_ANY))
5224 return -EINVAL;
5225 new_triggers.any = true;
5226 }
5227
5228 if (tb[NL80211_WOWLAN_TRIG_DISCONNECT]) {
5229 if (!(wowlan->flags & WIPHY_WOWLAN_DISCONNECT))
5230 return -EINVAL;
5231 new_triggers.disconnect = true;
5232 }
5233
5234 if (tb[NL80211_WOWLAN_TRIG_MAGIC_PKT]) {
5235 if (!(wowlan->flags & WIPHY_WOWLAN_MAGIC_PKT))
5236 return -EINVAL;
5237 new_triggers.magic_pkt = true;
5238 }
5239
5240 if (tb[NL80211_WOWLAN_TRIG_PKT_PATTERN]) {
5241 struct nlattr *pat;
5242 int n_patterns = 0;
5243 int rem, pat_len, mask_len;
5244 struct nlattr *pat_tb[NUM_NL80211_WOWLAN_PKTPAT];
5245
5246 nla_for_each_nested(pat, tb[NL80211_WOWLAN_TRIG_PKT_PATTERN],
5247 rem)
5248 n_patterns++;
5249 if (n_patterns > wowlan->n_patterns)
5250 return -EINVAL;
5251
5252 new_triggers.patterns = kcalloc(n_patterns,
5253 sizeof(new_triggers.patterns[0]),
5254 GFP_KERNEL);
5255 if (!new_triggers.patterns)
5256 return -ENOMEM;
5257
5258 new_triggers.n_patterns = n_patterns;
5259 i = 0;
5260
5261 nla_for_each_nested(pat, tb[NL80211_WOWLAN_TRIG_PKT_PATTERN],
5262 rem) {
5263 nla_parse(pat_tb, MAX_NL80211_WOWLAN_PKTPAT,
5264 nla_data(pat), nla_len(pat), NULL);
5265 err = -EINVAL;
5266 if (!pat_tb[NL80211_WOWLAN_PKTPAT_MASK] ||
5267 !pat_tb[NL80211_WOWLAN_PKTPAT_PATTERN])
5268 goto error;
5269 pat_len = nla_len(pat_tb[NL80211_WOWLAN_PKTPAT_PATTERN]);
5270 mask_len = DIV_ROUND_UP(pat_len, 8);
5271 if (nla_len(pat_tb[NL80211_WOWLAN_PKTPAT_MASK]) !=
5272 mask_len)
5273 goto error;
5274 if (pat_len > wowlan->pattern_max_len ||
5275 pat_len < wowlan->pattern_min_len)
5276 goto error;
5277
5278 new_triggers.patterns[i].mask =
5279 kmalloc(mask_len + pat_len, GFP_KERNEL);
5280 if (!new_triggers.patterns[i].mask) {
5281 err = -ENOMEM;
5282 goto error;
5283 }
5284 new_triggers.patterns[i].pattern =
5285 new_triggers.patterns[i].mask + mask_len;
5286 memcpy(new_triggers.patterns[i].mask,
5287 nla_data(pat_tb[NL80211_WOWLAN_PKTPAT_MASK]),
5288 mask_len);
5289 new_triggers.patterns[i].pattern_len = pat_len;
5290 memcpy(new_triggers.patterns[i].pattern,
5291 nla_data(pat_tb[NL80211_WOWLAN_PKTPAT_PATTERN]),
5292 pat_len);
5293 i++;
5294 }
5295 }
5296
5297 if (memcmp(&new_triggers, &no_triggers, sizeof(new_triggers))) {
5298 struct cfg80211_wowlan *ntrig;
5299 ntrig = kmemdup(&new_triggers, sizeof(new_triggers),
5300 GFP_KERNEL);
5301 if (!ntrig) {
5302 err = -ENOMEM;
5303 goto error;
5304 }
5305 cfg80211_rdev_free_wowlan(rdev);
5306 rdev->wowlan = ntrig;
5307 } else {
5308 no_triggers:
5309 cfg80211_rdev_free_wowlan(rdev);
5310 rdev->wowlan = NULL;
5311 }
5312
5313 return 0;
5314 error:
5315 for (i = 0; i < new_triggers.n_patterns; i++)
5316 kfree(new_triggers.patterns[i].mask);
5317 kfree(new_triggers.patterns);
5318 return err;
5319}
5320
4783#define NL80211_FLAG_NEED_WIPHY 0x01 5321#define NL80211_FLAG_NEED_WIPHY 0x01
4784#define NL80211_FLAG_NEED_NETDEV 0x02 5322#define NL80211_FLAG_NEED_NETDEV 0x02
4785#define NL80211_FLAG_NEED_RTNL 0x04 5323#define NL80211_FLAG_NEED_RTNL 0x04
@@ -5064,6 +5602,22 @@ static struct genl_ops nl80211_ops[] = {
5064 .dumpit = nl80211_dump_scan, 5602 .dumpit = nl80211_dump_scan,
5065 }, 5603 },
5066 { 5604 {
5605 .cmd = NL80211_CMD_START_SCHED_SCAN,
5606 .doit = nl80211_start_sched_scan,
5607 .policy = nl80211_policy,
5608 .flags = GENL_ADMIN_PERM,
5609 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
5610 NL80211_FLAG_NEED_RTNL,
5611 },
5612 {
5613 .cmd = NL80211_CMD_STOP_SCHED_SCAN,
5614 .doit = nl80211_stop_sched_scan,
5615 .policy = nl80211_policy,
5616 .flags = GENL_ADMIN_PERM,
5617 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
5618 NL80211_FLAG_NEED_RTNL,
5619 },
5620 {
5067 .cmd = NL80211_CMD_AUTHENTICATE, 5621 .cmd = NL80211_CMD_AUTHENTICATE,
5068 .doit = nl80211_authenticate, 5622 .doit = nl80211_authenticate,
5069 .policy = nl80211_policy, 5623 .policy = nl80211_policy,
@@ -5278,6 +5832,22 @@ static struct genl_ops nl80211_ops[] = {
5278 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | 5832 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
5279 NL80211_FLAG_NEED_RTNL, 5833 NL80211_FLAG_NEED_RTNL,
5280 }, 5834 },
5835 {
5836 .cmd = NL80211_CMD_GET_WOWLAN,
5837 .doit = nl80211_get_wowlan,
5838 .policy = nl80211_policy,
5839 /* can be retrieved by unprivileged users */
5840 .internal_flags = NL80211_FLAG_NEED_WIPHY |
5841 NL80211_FLAG_NEED_RTNL,
5842 },
5843 {
5844 .cmd = NL80211_CMD_SET_WOWLAN,
5845 .doit = nl80211_set_wowlan,
5846 .policy = nl80211_policy,
5847 .flags = GENL_ADMIN_PERM,
5848 .internal_flags = NL80211_FLAG_NEED_WIPHY |
5849 NL80211_FLAG_NEED_RTNL,
5850 },
5281}; 5851};
5282 5852
5283static struct genl_multicast_group nl80211_mlme_mcgrp = { 5853static struct genl_multicast_group nl80211_mlme_mcgrp = {
@@ -5373,6 +5943,28 @@ static int nl80211_send_scan_msg(struct sk_buff *msg,
5373 return -EMSGSIZE; 5943 return -EMSGSIZE;
5374} 5944}
5375 5945
5946static int
5947nl80211_send_sched_scan_msg(struct sk_buff *msg,
5948 struct cfg80211_registered_device *rdev,
5949 struct net_device *netdev,
5950 u32 pid, u32 seq, int flags, u32 cmd)
5951{
5952 void *hdr;
5953
5954 hdr = nl80211hdr_put(msg, pid, seq, flags, cmd);
5955 if (!hdr)
5956 return -1;
5957
5958 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
5959 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
5960
5961 return genlmsg_end(msg, hdr);
5962
5963 nla_put_failure:
5964 genlmsg_cancel(msg, hdr);
5965 return -EMSGSIZE;
5966}
5967
5376void nl80211_send_scan_start(struct cfg80211_registered_device *rdev, 5968void nl80211_send_scan_start(struct cfg80211_registered_device *rdev,
5377 struct net_device *netdev) 5969 struct net_device *netdev)
5378{ 5970{
@@ -5430,6 +6022,43 @@ void nl80211_send_scan_aborted(struct cfg80211_registered_device *rdev,
5430 nl80211_scan_mcgrp.id, GFP_KERNEL); 6022 nl80211_scan_mcgrp.id, GFP_KERNEL);
5431} 6023}
5432 6024
6025void nl80211_send_sched_scan_results(struct cfg80211_registered_device *rdev,
6026 struct net_device *netdev)
6027{
6028 struct sk_buff *msg;
6029
6030 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
6031 if (!msg)
6032 return;
6033
6034 if (nl80211_send_sched_scan_msg(msg, rdev, netdev, 0, 0, 0,
6035 NL80211_CMD_SCHED_SCAN_RESULTS) < 0) {
6036 nlmsg_free(msg);
6037 return;
6038 }
6039
6040 genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
6041 nl80211_scan_mcgrp.id, GFP_KERNEL);
6042}
6043
6044void nl80211_send_sched_scan(struct cfg80211_registered_device *rdev,
6045 struct net_device *netdev, u32 cmd)
6046{
6047 struct sk_buff *msg;
6048
6049 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
6050 if (!msg)
6051 return;
6052
6053 if (nl80211_send_sched_scan_msg(msg, rdev, netdev, 0, 0, 0, cmd) < 0) {
6054 nlmsg_free(msg);
6055 return;
6056 }
6057
6058 genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
6059 nl80211_scan_mcgrp.id, GFP_KERNEL);
6060}
6061
5433/* 6062/*
5434 * This can happen on global regulatory changes or device specific settings 6063 * This can happen on global regulatory changes or device specific settings
5435 * based on custom world regulatory domains. 6064 * based on custom world regulatory domains.
@@ -5785,6 +6414,44 @@ void nl80211_send_ibss_bssid(struct cfg80211_registered_device *rdev,
5785 nlmsg_free(msg); 6414 nlmsg_free(msg);
5786} 6415}
5787 6416
6417void nl80211_send_new_peer_candidate(struct cfg80211_registered_device *rdev,
6418 struct net_device *netdev,
6419 const u8 *macaddr, const u8* ie, u8 ie_len,
6420 gfp_t gfp)
6421{
6422 struct sk_buff *msg;
6423 void *hdr;
6424
6425 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
6426 if (!msg)
6427 return;
6428
6429 hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_NEW_PEER_CANDIDATE);
6430 if (!hdr) {
6431 nlmsg_free(msg);
6432 return;
6433 }
6434
6435 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
6436 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
6437 NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, macaddr);
6438 if (ie_len && ie)
6439 NLA_PUT(msg, NL80211_ATTR_IE, ie_len , ie);
6440
6441 if (genlmsg_end(msg, hdr) < 0) {
6442 nlmsg_free(msg);
6443 return;
6444 }
6445
6446 genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
6447 nl80211_mlme_mcgrp.id, gfp);
6448 return;
6449
6450 nla_put_failure:
6451 genlmsg_cancel(msg, hdr);
6452 nlmsg_free(msg);
6453}
6454
5788void nl80211_michael_mic_failure(struct cfg80211_registered_device *rdev, 6455void nl80211_michael_mic_failure(struct cfg80211_registered_device *rdev,
5789 struct net_device *netdev, const u8 *addr, 6456 struct net_device *netdev, const u8 *addr,
5790 enum nl80211_key_type key_type, int key_id, 6457 enum nl80211_key_type key_type, int key_id,
@@ -5808,7 +6475,8 @@ void nl80211_michael_mic_failure(struct cfg80211_registered_device *rdev,
5808 if (addr) 6475 if (addr)
5809 NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, addr); 6476 NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, addr);
5810 NLA_PUT_U32(msg, NL80211_ATTR_KEY_TYPE, key_type); 6477 NLA_PUT_U32(msg, NL80211_ATTR_KEY_TYPE, key_type);
5811 NLA_PUT_U8(msg, NL80211_ATTR_KEY_IDX, key_id); 6478 if (key_id != -1)
6479 NLA_PUT_U8(msg, NL80211_ATTR_KEY_IDX, key_id);
5812 if (tsc) 6480 if (tsc)
5813 NLA_PUT(msg, NL80211_ATTR_KEY_SEQ, 6, tsc); 6481 NLA_PUT(msg, NL80211_ATTR_KEY_SEQ, 6, tsc);
5814 6482
@@ -5966,6 +6634,40 @@ void nl80211_send_sta_event(struct cfg80211_registered_device *rdev,
5966 nl80211_mlme_mcgrp.id, gfp); 6634 nl80211_mlme_mcgrp.id, gfp);
5967} 6635}
5968 6636
6637void nl80211_send_sta_del_event(struct cfg80211_registered_device *rdev,
6638 struct net_device *dev, const u8 *mac_addr,
6639 gfp_t gfp)
6640{
6641 struct sk_buff *msg;
6642 void *hdr;
6643
6644 msg = nlmsg_new(NLMSG_GOODSIZE, gfp);
6645 if (!msg)
6646 return;
6647
6648 hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_DEL_STATION);
6649 if (!hdr) {
6650 nlmsg_free(msg);
6651 return;
6652 }
6653
6654 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex);
6655 NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr);
6656
6657 if (genlmsg_end(msg, hdr) < 0) {
6658 nlmsg_free(msg);
6659 return;
6660 }
6661
6662 genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
6663 nl80211_mlme_mcgrp.id, gfp);
6664 return;
6665
6666 nla_put_failure:
6667 genlmsg_cancel(msg, hdr);
6668 nlmsg_free(msg);
6669}
6670
5969int nl80211_send_mgmt(struct cfg80211_registered_device *rdev, 6671int nl80211_send_mgmt(struct cfg80211_registered_device *rdev,
5970 struct net_device *netdev, u32 nlpid, 6672 struct net_device *netdev, u32 nlpid,
5971 int freq, const u8 *buf, size_t len, gfp_t gfp) 6673 int freq, const u8 *buf, size_t len, gfp_t gfp)
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h
index e3f7fa886966..2f1bfb87a651 100644
--- a/net/wireless/nl80211.h
+++ b/net/wireless/nl80211.h
@@ -12,6 +12,10 @@ void nl80211_send_scan_done(struct cfg80211_registered_device *rdev,
12 struct net_device *netdev); 12 struct net_device *netdev);
13void nl80211_send_scan_aborted(struct cfg80211_registered_device *rdev, 13void nl80211_send_scan_aborted(struct cfg80211_registered_device *rdev,
14 struct net_device *netdev); 14 struct net_device *netdev);
15void nl80211_send_sched_scan(struct cfg80211_registered_device *rdev,
16 struct net_device *netdev, u32 cmd);
17void nl80211_send_sched_scan_results(struct cfg80211_registered_device *rdev,
18 struct net_device *netdev);
15void nl80211_send_reg_change_event(struct regulatory_request *request); 19void nl80211_send_reg_change_event(struct regulatory_request *request);
16void nl80211_send_rx_auth(struct cfg80211_registered_device *rdev, 20void nl80211_send_rx_auth(struct cfg80211_registered_device *rdev,
17 struct net_device *netdev, 21 struct net_device *netdev,
@@ -50,6 +54,10 @@ void nl80211_send_disconnected(struct cfg80211_registered_device *rdev,
50 struct net_device *netdev, u16 reason, 54 struct net_device *netdev, u16 reason,
51 const u8 *ie, size_t ie_len, bool from_ap); 55 const u8 *ie, size_t ie_len, bool from_ap);
52 56
57void nl80211_send_new_peer_candidate(struct cfg80211_registered_device *rdev,
58 struct net_device *netdev,
59 const u8 *macaddr, const u8* ie, u8 ie_len,
60 gfp_t gfp);
53void 61void
54nl80211_michael_mic_failure(struct cfg80211_registered_device *rdev, 62nl80211_michael_mic_failure(struct cfg80211_registered_device *rdev,
55 struct net_device *netdev, const u8 *addr, 63 struct net_device *netdev, const u8 *addr,
@@ -79,6 +87,9 @@ void nl80211_send_remain_on_channel_cancel(
79void nl80211_send_sta_event(struct cfg80211_registered_device *rdev, 87void nl80211_send_sta_event(struct cfg80211_registered_device *rdev,
80 struct net_device *dev, const u8 *mac_addr, 88 struct net_device *dev, const u8 *mac_addr,
81 struct station_info *sinfo, gfp_t gfp); 89 struct station_info *sinfo, gfp_t gfp);
90void nl80211_send_sta_del_event(struct cfg80211_registered_device *rdev,
91 struct net_device *dev, const u8 *mac_addr,
92 gfp_t gfp);
82 93
83int nl80211_send_mgmt(struct cfg80211_registered_device *rdev, 94int nl80211_send_mgmt(struct cfg80211_registered_device *rdev,
84 struct net_device *netdev, u32 nlpid, int freq, 95 struct net_device *netdev, u32 nlpid, int freq,
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index ab801a1097b2..1ad0f39fe091 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -106,6 +106,9 @@ struct reg_beacon {
106static void reg_todo(struct work_struct *work); 106static void reg_todo(struct work_struct *work);
107static DECLARE_WORK(reg_work, reg_todo); 107static DECLARE_WORK(reg_work, reg_todo);
108 108
109static void reg_timeout_work(struct work_struct *work);
110static DECLARE_DELAYED_WORK(reg_timeout, reg_timeout_work);
111
109/* We keep a static world regulatory domain in case of the absence of CRDA */ 112/* We keep a static world regulatory domain in case of the absence of CRDA */
110static const struct ieee80211_regdomain world_regdom = { 113static const struct ieee80211_regdomain world_regdom = {
111 .n_reg_rules = 5, 114 .n_reg_rules = 5,
@@ -669,11 +672,9 @@ static int freq_reg_info_regd(struct wiphy *wiphy,
669 for (i = 0; i < regd->n_reg_rules; i++) { 672 for (i = 0; i < regd->n_reg_rules; i++) {
670 const struct ieee80211_reg_rule *rr; 673 const struct ieee80211_reg_rule *rr;
671 const struct ieee80211_freq_range *fr = NULL; 674 const struct ieee80211_freq_range *fr = NULL;
672 const struct ieee80211_power_rule *pr = NULL;
673 675
674 rr = &regd->reg_rules[i]; 676 rr = &regd->reg_rules[i];
675 fr = &rr->freq_range; 677 fr = &rr->freq_range;
676 pr = &rr->power_rule;
677 678
678 /* 679 /*
679 * We only need to know if one frequency rule was 680 * We only need to know if one frequency rule was
@@ -1330,6 +1331,9 @@ static void reg_set_request_processed(void)
1330 need_more_processing = true; 1331 need_more_processing = true;
1331 spin_unlock(&reg_requests_lock); 1332 spin_unlock(&reg_requests_lock);
1332 1333
1334 if (last_request->initiator == NL80211_REGDOM_SET_BY_USER)
1335 cancel_delayed_work_sync(&reg_timeout);
1336
1333 if (need_more_processing) 1337 if (need_more_processing)
1334 schedule_work(&reg_work); 1338 schedule_work(&reg_work);
1335} 1339}
@@ -1440,8 +1444,18 @@ static void reg_process_hint(struct regulatory_request *reg_request)
1440 r = __regulatory_hint(wiphy, reg_request); 1444 r = __regulatory_hint(wiphy, reg_request);
1441 /* This is required so that the orig_* parameters are saved */ 1445 /* This is required so that the orig_* parameters are saved */
1442 if (r == -EALREADY && wiphy && 1446 if (r == -EALREADY && wiphy &&
1443 wiphy->flags & WIPHY_FLAG_STRICT_REGULATORY) 1447 wiphy->flags & WIPHY_FLAG_STRICT_REGULATORY) {
1444 wiphy_update_regulatory(wiphy, initiator); 1448 wiphy_update_regulatory(wiphy, initiator);
1449 return;
1450 }
1451
1452 /*
1453 * We only time out user hints, given that they should be the only
1454 * source of bogus requests.
1455 */
1456 if (r != -EALREADY &&
1457 reg_request->initiator == NL80211_REGDOM_SET_BY_USER)
1458 schedule_delayed_work(&reg_timeout, msecs_to_jiffies(3142));
1445} 1459}
1446 1460
1447/* 1461/*
@@ -1744,6 +1758,8 @@ static void restore_regulatory_settings(bool reset_user)
1744{ 1758{
1745 char alpha2[2]; 1759 char alpha2[2];
1746 struct reg_beacon *reg_beacon, *btmp; 1760 struct reg_beacon *reg_beacon, *btmp;
1761 struct regulatory_request *reg_request, *tmp;
1762 LIST_HEAD(tmp_reg_req_list);
1747 1763
1748 mutex_lock(&cfg80211_mutex); 1764 mutex_lock(&cfg80211_mutex);
1749 mutex_lock(&reg_mutex); 1765 mutex_lock(&reg_mutex);
@@ -1751,6 +1767,25 @@ static void restore_regulatory_settings(bool reset_user)
1751 reset_regdomains(); 1767 reset_regdomains();
1752 restore_alpha2(alpha2, reset_user); 1768 restore_alpha2(alpha2, reset_user);
1753 1769
1770 /*
1771 * If there's any pending requests we simply
1772 * stash them to a temporary pending queue and
1773 * add then after we've restored regulatory
1774 * settings.
1775 */
1776 spin_lock(&reg_requests_lock);
1777 if (!list_empty(&reg_requests_list)) {
1778 list_for_each_entry_safe(reg_request, tmp,
1779 &reg_requests_list, list) {
1780 if (reg_request->initiator !=
1781 NL80211_REGDOM_SET_BY_USER)
1782 continue;
1783 list_del(&reg_request->list);
1784 list_add_tail(&reg_request->list, &tmp_reg_req_list);
1785 }
1786 }
1787 spin_unlock(&reg_requests_lock);
1788
1754 /* Clear beacon hints */ 1789 /* Clear beacon hints */
1755 spin_lock_bh(&reg_pending_beacons_lock); 1790 spin_lock_bh(&reg_pending_beacons_lock);
1756 if (!list_empty(&reg_pending_beacons)) { 1791 if (!list_empty(&reg_pending_beacons)) {
@@ -1785,8 +1820,31 @@ static void restore_regulatory_settings(bool reset_user)
1785 */ 1820 */
1786 if (is_an_alpha2(alpha2)) 1821 if (is_an_alpha2(alpha2))
1787 regulatory_hint_user(user_alpha2); 1822 regulatory_hint_user(user_alpha2);
1788}
1789 1823
1824 if (list_empty(&tmp_reg_req_list))
1825 return;
1826
1827 mutex_lock(&cfg80211_mutex);
1828 mutex_lock(&reg_mutex);
1829
1830 spin_lock(&reg_requests_lock);
1831 list_for_each_entry_safe(reg_request, tmp, &tmp_reg_req_list, list) {
1832 REG_DBG_PRINT("Adding request for country %c%c back "
1833 "into the queue\n",
1834 reg_request->alpha2[0],
1835 reg_request->alpha2[1]);
1836 list_del(&reg_request->list);
1837 list_add_tail(&reg_request->list, &reg_requests_list);
1838 }
1839 spin_unlock(&reg_requests_lock);
1840
1841 mutex_unlock(&reg_mutex);
1842 mutex_unlock(&cfg80211_mutex);
1843
1844 REG_DBG_PRINT("Kicking the queue\n");
1845
1846 schedule_work(&reg_work);
1847}
1790 1848
1791void regulatory_hint_disconnect(void) 1849void regulatory_hint_disconnect(void)
1792{ 1850{
@@ -2125,6 +2183,13 @@ out:
2125 mutex_unlock(&reg_mutex); 2183 mutex_unlock(&reg_mutex);
2126} 2184}
2127 2185
2186static void reg_timeout_work(struct work_struct *work)
2187{
2188 REG_DBG_PRINT("Timeout while waiting for CRDA to reply, "
2189 "restoring regulatory settings");
2190 restore_regulatory_settings(true);
2191}
2192
2128int __init regulatory_init(void) 2193int __init regulatory_init(void)
2129{ 2194{
2130 int err = 0; 2195 int err = 0;
@@ -2178,6 +2243,7 @@ void /* __init_or_exit */ regulatory_exit(void)
2178 struct reg_beacon *reg_beacon, *btmp; 2243 struct reg_beacon *reg_beacon, *btmp;
2179 2244
2180 cancel_work_sync(&reg_work); 2245 cancel_work_sync(&reg_work);
2246 cancel_delayed_work_sync(&reg_timeout);
2181 2247
2182 mutex_lock(&cfg80211_mutex); 2248 mutex_lock(&cfg80211_mutex);
2183 mutex_lock(&reg_mutex); 2249 mutex_lock(&reg_mutex);
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index fbf6f33ae4d0..ae0c2256ba3b 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -93,6 +93,69 @@ void cfg80211_scan_done(struct cfg80211_scan_request *request, bool aborted)
93} 93}
94EXPORT_SYMBOL(cfg80211_scan_done); 94EXPORT_SYMBOL(cfg80211_scan_done);
95 95
96void __cfg80211_sched_scan_results(struct work_struct *wk)
97{
98 struct cfg80211_registered_device *rdev;
99
100 rdev = container_of(wk, struct cfg80211_registered_device,
101 sched_scan_results_wk);
102
103 mutex_lock(&rdev->sched_scan_mtx);
104
105 /* we don't have sched_scan_req anymore if the scan is stopping */
106 if (rdev->sched_scan_req)
107 nl80211_send_sched_scan_results(rdev,
108 rdev->sched_scan_req->dev);
109
110 mutex_unlock(&rdev->sched_scan_mtx);
111}
112
113void cfg80211_sched_scan_results(struct wiphy *wiphy)
114{
115 /* ignore if we're not scanning */
116 if (wiphy_to_dev(wiphy)->sched_scan_req)
117 queue_work(cfg80211_wq,
118 &wiphy_to_dev(wiphy)->sched_scan_results_wk);
119}
120EXPORT_SYMBOL(cfg80211_sched_scan_results);
121
122void cfg80211_sched_scan_stopped(struct wiphy *wiphy)
123{
124 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
125
126 mutex_lock(&rdev->sched_scan_mtx);
127 __cfg80211_stop_sched_scan(rdev, true);
128 mutex_unlock(&rdev->sched_scan_mtx);
129}
130EXPORT_SYMBOL(cfg80211_sched_scan_stopped);
131
132int __cfg80211_stop_sched_scan(struct cfg80211_registered_device *rdev,
133 bool driver_initiated)
134{
135 int err;
136 struct net_device *dev;
137
138 lockdep_assert_held(&rdev->sched_scan_mtx);
139
140 if (!rdev->sched_scan_req)
141 return 0;
142
143 dev = rdev->sched_scan_req->dev;
144
145 if (!driver_initiated) {
146 err = rdev->ops->sched_scan_stop(&rdev->wiphy, dev);
147 if (err)
148 return err;
149 }
150
151 nl80211_send_sched_scan(rdev, dev, NL80211_CMD_SCHED_SCAN_STOPPED);
152
153 kfree(rdev->sched_scan_req);
154 rdev->sched_scan_req = NULL;
155
156 return err;
157}
158
96static void bss_release(struct kref *ref) 159static void bss_release(struct kref *ref)
97{ 160{
98 struct cfg80211_internal_bss *bss; 161 struct cfg80211_internal_bss *bss;
@@ -204,13 +267,35 @@ static bool is_bss(struct cfg80211_bss *a,
204 return memcmp(ssidie + 2, ssid, ssid_len) == 0; 267 return memcmp(ssidie + 2, ssid, ssid_len) == 0;
205} 268}
206 269
270static bool is_mesh_bss(struct cfg80211_bss *a)
271{
272 const u8 *ie;
273
274 if (!WLAN_CAPABILITY_IS_STA_BSS(a->capability))
275 return false;
276
277 ie = cfg80211_find_ie(WLAN_EID_MESH_ID,
278 a->information_elements,
279 a->len_information_elements);
280 if (!ie)
281 return false;
282
283 ie = cfg80211_find_ie(WLAN_EID_MESH_CONFIG,
284 a->information_elements,
285 a->len_information_elements);
286 if (!ie)
287 return false;
288
289 return true;
290}
291
207static bool is_mesh(struct cfg80211_bss *a, 292static bool is_mesh(struct cfg80211_bss *a,
208 const u8 *meshid, size_t meshidlen, 293 const u8 *meshid, size_t meshidlen,
209 const u8 *meshcfg) 294 const u8 *meshcfg)
210{ 295{
211 const u8 *ie; 296 const u8 *ie;
212 297
213 if (!is_zero_ether_addr(a->bssid)) 298 if (!WLAN_CAPABILITY_IS_STA_BSS(a->capability))
214 return false; 299 return false;
215 300
216 ie = cfg80211_find_ie(WLAN_EID_MESH_ID, 301 ie = cfg80211_find_ie(WLAN_EID_MESH_ID,
@@ -248,11 +333,7 @@ static int cmp_bss(struct cfg80211_bss *a,
248 if (a->channel != b->channel) 333 if (a->channel != b->channel)
249 return b->channel->center_freq - a->channel->center_freq; 334 return b->channel->center_freq - a->channel->center_freq;
250 335
251 r = memcmp(a->bssid, b->bssid, ETH_ALEN); 336 if (is_mesh_bss(a) && is_mesh_bss(b)) {
252 if (r)
253 return r;
254
255 if (is_zero_ether_addr(a->bssid)) {
256 r = cmp_ies(WLAN_EID_MESH_ID, 337 r = cmp_ies(WLAN_EID_MESH_ID,
257 a->information_elements, 338 a->information_elements,
258 a->len_information_elements, 339 a->len_information_elements,
@@ -267,6 +348,10 @@ static int cmp_bss(struct cfg80211_bss *a,
267 b->len_information_elements); 348 b->len_information_elements);
268 } 349 }
269 350
351 r = memcmp(a->bssid, b->bssid, ETH_ALEN);
352 if (r)
353 return r;
354
270 return cmp_ies(WLAN_EID_SSID, 355 return cmp_ies(WLAN_EID_SSID,
271 a->information_elements, 356 a->information_elements,
272 a->len_information_elements, 357 a->len_information_elements,
@@ -394,7 +479,6 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
394 struct cfg80211_internal_bss *res) 479 struct cfg80211_internal_bss *res)
395{ 480{
396 struct cfg80211_internal_bss *found = NULL; 481 struct cfg80211_internal_bss *found = NULL;
397 const u8 *meshid, *meshcfg;
398 482
399 /* 483 /*
400 * The reference to "res" is donated to this function. 484 * The reference to "res" is donated to this function.
@@ -407,22 +491,6 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
407 491
408 res->ts = jiffies; 492 res->ts = jiffies;
409 493
410 if (is_zero_ether_addr(res->pub.bssid)) {
411 /* must be mesh, verify */
412 meshid = cfg80211_find_ie(WLAN_EID_MESH_ID,
413 res->pub.information_elements,
414 res->pub.len_information_elements);
415 meshcfg = cfg80211_find_ie(WLAN_EID_MESH_CONFIG,
416 res->pub.information_elements,
417 res->pub.len_information_elements);
418 if (!meshid || !meshcfg ||
419 meshcfg[1] != sizeof(struct ieee80211_meshconf_ie)) {
420 /* bogus mesh */
421 kref_put(&res->ref, bss_release);
422 return NULL;
423 }
424 }
425
426 spin_lock_bh(&dev->bss_lock); 494 spin_lock_bh(&dev->bss_lock);
427 495
428 found = rb_find_bss(dev, res); 496 found = rb_find_bss(dev, res);
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index e17b0bee6bdc..b7b6ff8be553 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -250,7 +250,8 @@ static struct cfg80211_bss *cfg80211_get_conn_bss(struct wireless_dev *wdev)
250 if (wdev->conn->params.privacy) 250 if (wdev->conn->params.privacy)
251 capa |= WLAN_CAPABILITY_PRIVACY; 251 capa |= WLAN_CAPABILITY_PRIVACY;
252 252
253 bss = cfg80211_get_bss(wdev->wiphy, NULL, wdev->conn->params.bssid, 253 bss = cfg80211_get_bss(wdev->wiphy, wdev->conn->params.channel,
254 wdev->conn->params.bssid,
254 wdev->conn->params.ssid, 255 wdev->conn->params.ssid,
255 wdev->conn->params.ssid_len, 256 wdev->conn->params.ssid_len,
256 WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_PRIVACY, 257 WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_PRIVACY,
@@ -470,7 +471,10 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
470 } 471 }
471 472
472 if (!bss) 473 if (!bss)
473 bss = cfg80211_get_bss(wdev->wiphy, NULL, bssid, 474 bss = cfg80211_get_bss(wdev->wiphy,
475 wdev->conn ? wdev->conn->params.channel :
476 NULL,
477 bssid,
474 wdev->ssid, wdev->ssid_len, 478 wdev->ssid, wdev->ssid_len,
475 WLAN_CAPABILITY_ESS, 479 WLAN_CAPABILITY_ESS,
476 WLAN_CAPABILITY_ESS); 480 WLAN_CAPABILITY_ESS);
@@ -538,7 +542,9 @@ void cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
538} 542}
539EXPORT_SYMBOL(cfg80211_connect_result); 543EXPORT_SYMBOL(cfg80211_connect_result);
540 544
541void __cfg80211_roamed(struct wireless_dev *wdev, const u8 *bssid, 545void __cfg80211_roamed(struct wireless_dev *wdev,
546 struct ieee80211_channel *channel,
547 const u8 *bssid,
542 const u8 *req_ie, size_t req_ie_len, 548 const u8 *req_ie, size_t req_ie_len,
543 const u8 *resp_ie, size_t resp_ie_len) 549 const u8 *resp_ie, size_t resp_ie_len)
544{ 550{
@@ -565,7 +571,7 @@ void __cfg80211_roamed(struct wireless_dev *wdev, const u8 *bssid,
565 cfg80211_put_bss(&wdev->current_bss->pub); 571 cfg80211_put_bss(&wdev->current_bss->pub);
566 wdev->current_bss = NULL; 572 wdev->current_bss = NULL;
567 573
568 bss = cfg80211_get_bss(wdev->wiphy, NULL, bssid, 574 bss = cfg80211_get_bss(wdev->wiphy, channel, bssid,
569 wdev->ssid, wdev->ssid_len, 575 wdev->ssid, wdev->ssid_len,
570 WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS); 576 WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS);
571 577
@@ -603,7 +609,9 @@ void __cfg80211_roamed(struct wireless_dev *wdev, const u8 *bssid,
603#endif 609#endif
604} 610}
605 611
606void cfg80211_roamed(struct net_device *dev, const u8 *bssid, 612void cfg80211_roamed(struct net_device *dev,
613 struct ieee80211_channel *channel,
614 const u8 *bssid,
607 const u8 *req_ie, size_t req_ie_len, 615 const u8 *req_ie, size_t req_ie_len,
608 const u8 *resp_ie, size_t resp_ie_len, gfp_t gfp) 616 const u8 *resp_ie, size_t resp_ie_len, gfp_t gfp)
609{ 617{
@@ -619,6 +627,7 @@ void cfg80211_roamed(struct net_device *dev, const u8 *bssid,
619 return; 627 return;
620 628
621 ev->type = EVENT_ROAMED; 629 ev->type = EVENT_ROAMED;
630 ev->rm.channel = channel;
622 memcpy(ev->rm.bssid, bssid, ETH_ALEN); 631 memcpy(ev->rm.bssid, bssid, ETH_ALEN);
623 ev->rm.req_ie = ((u8 *)ev) + sizeof(*ev); 632 ev->rm.req_ie = ((u8 *)ev) + sizeof(*ev);
624 ev->rm.req_ie_len = req_ie_len; 633 ev->rm.req_ie_len = req_ie_len;
diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c
index 4294fa22bb2d..c6e4ca6a7d2e 100644
--- a/net/wireless/sysfs.c
+++ b/net/wireless/sysfs.c
@@ -93,7 +93,7 @@ static int wiphy_suspend(struct device *dev, pm_message_t state)
93 93
94 if (rdev->ops->suspend) { 94 if (rdev->ops->suspend) {
95 rtnl_lock(); 95 rtnl_lock();
96 ret = rdev->ops->suspend(&rdev->wiphy); 96 ret = rdev->ops->suspend(&rdev->wiphy, rdev->wowlan);
97 rtnl_unlock(); 97 rtnl_unlock();
98 } 98 }
99 99
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 6a750bc6bcfe..4d7b83fbc32f 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -544,7 +544,8 @@ EXPORT_SYMBOL(ieee80211_data_from_8023);
544 544
545void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list, 545void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
546 const u8 *addr, enum nl80211_iftype iftype, 546 const u8 *addr, enum nl80211_iftype iftype,
547 const unsigned int extra_headroom) 547 const unsigned int extra_headroom,
548 bool has_80211_header)
548{ 549{
549 struct sk_buff *frame = NULL; 550 struct sk_buff *frame = NULL;
550 u16 ethertype; 551 u16 ethertype;
@@ -553,14 +554,18 @@ void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
553 int remaining, err; 554 int remaining, err;
554 u8 dst[ETH_ALEN], src[ETH_ALEN]; 555 u8 dst[ETH_ALEN], src[ETH_ALEN];
555 556
556 err = ieee80211_data_to_8023(skb, addr, iftype); 557 if (has_80211_header) {
557 if (err) 558 err = ieee80211_data_to_8023(skb, addr, iftype);
558 goto out; 559 if (err)
560 goto out;
559 561
560 /* skip the wrapping header */ 562 /* skip the wrapping header */
561 eth = (struct ethhdr *) skb_pull(skb, sizeof(struct ethhdr)); 563 eth = (struct ethhdr *) skb_pull(skb, sizeof(struct ethhdr));
562 if (!eth) 564 if (!eth)
563 goto out; 565 goto out;
566 } else {
567 eth = (struct ethhdr *) skb->data;
568 }
564 569
565 while (skb != frame) { 570 while (skb != frame) {
566 u8 padding; 571 u8 padding;
@@ -741,7 +746,7 @@ static void cfg80211_process_wdev_events(struct wireless_dev *wdev)
741 NULL); 746 NULL);
742 break; 747 break;
743 case EVENT_ROAMED: 748 case EVENT_ROAMED:
744 __cfg80211_roamed(wdev, ev->rm.bssid, 749 __cfg80211_roamed(wdev, ev->rm.channel, ev->rm.bssid,
745 ev->rm.req_ie, ev->rm.req_ie_len, 750 ev->rm.req_ie, ev->rm.req_ie_len,
746 ev->rm.resp_ie, ev->rm.resp_ie_len); 751 ev->rm.resp_ie, ev->rm.resp_ie_len);
747 break; 752 break;
@@ -803,6 +808,11 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
803 return -EBUSY; 808 return -EBUSY;
804 809
805 if (ntype != otype) { 810 if (ntype != otype) {
811 err = cfg80211_can_change_interface(rdev, dev->ieee80211_ptr,
812 ntype);
813 if (err)
814 return err;
815
806 dev->ieee80211_ptr->use_4addr = false; 816 dev->ieee80211_ptr->use_4addr = false;
807 dev->ieee80211_ptr->mesh_id_up_len = 0; 817 dev->ieee80211_ptr->mesh_id_up_len = 0;
808 818
@@ -896,3 +906,103 @@ u16 cfg80211_calculate_bitrate(struct rate_info *rate)
896 /* do NOT round down here */ 906 /* do NOT round down here */
897 return (bitrate + 50000) / 100000; 907 return (bitrate + 50000) / 100000;
898} 908}
909
910int cfg80211_validate_beacon_int(struct cfg80211_registered_device *rdev,
911 u32 beacon_int)
912{
913 struct wireless_dev *wdev;
914 int res = 0;
915
916 if (!beacon_int)
917 return -EINVAL;
918
919 mutex_lock(&rdev->devlist_mtx);
920
921 list_for_each_entry(wdev, &rdev->netdev_list, list) {
922 if (!wdev->beacon_interval)
923 continue;
924 if (wdev->beacon_interval != beacon_int) {
925 res = -EINVAL;
926 break;
927 }
928 }
929
930 mutex_unlock(&rdev->devlist_mtx);
931
932 return res;
933}
934
935int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
936 struct wireless_dev *wdev,
937 enum nl80211_iftype iftype)
938{
939 struct wireless_dev *wdev_iter;
940 int num[NUM_NL80211_IFTYPES];
941 int total = 1;
942 int i, j;
943
944 ASSERT_RTNL();
945
946 /* Always allow software iftypes */
947 if (rdev->wiphy.software_iftypes & BIT(iftype))
948 return 0;
949
950 /*
951 * Drivers will gradually all set this flag, until all
952 * have it we only enforce for those that set it.
953 */
954 if (!(rdev->wiphy.flags & WIPHY_FLAG_ENFORCE_COMBINATIONS))
955 return 0;
956
957 memset(num, 0, sizeof(num));
958
959 num[iftype] = 1;
960
961 mutex_lock(&rdev->devlist_mtx);
962 list_for_each_entry(wdev_iter, &rdev->netdev_list, list) {
963 if (wdev_iter == wdev)
964 continue;
965 if (!netif_running(wdev_iter->netdev))
966 continue;
967
968 if (rdev->wiphy.software_iftypes & BIT(wdev_iter->iftype))
969 continue;
970
971 num[wdev_iter->iftype]++;
972 total++;
973 }
974 mutex_unlock(&rdev->devlist_mtx);
975
976 for (i = 0; i < rdev->wiphy.n_iface_combinations; i++) {
977 const struct ieee80211_iface_combination *c;
978 struct ieee80211_iface_limit *limits;
979
980 c = &rdev->wiphy.iface_combinations[i];
981
982 limits = kmemdup(c->limits, sizeof(limits[0]) * c->n_limits,
983 GFP_KERNEL);
984 if (!limits)
985 return -ENOMEM;
986 if (total > c->max_interfaces)
987 goto cont;
988
989 for (iftype = 0; iftype < NUM_NL80211_IFTYPES; iftype++) {
990 if (rdev->wiphy.software_iftypes & BIT(iftype))
991 continue;
992 for (j = 0; j < c->n_limits; j++) {
993 if (!(limits[j].types & iftype))
994 continue;
995 if (limits[j].max < num[iftype])
996 goto cont;
997 limits[j].max -= num[iftype];
998 }
999 }
1000 /* yay, it fits */
1001 kfree(limits);
1002 return 0;
1003 cont:
1004 kfree(limits);
1005 }
1006
1007 return -EBUSY;
1008}
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index b4d745ea8ee1..5ce74a385525 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -50,7 +50,7 @@ static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family);
50static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo); 50static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo);
51static void xfrm_init_pmtu(struct dst_entry *dst); 51static void xfrm_init_pmtu(struct dst_entry *dst);
52static int stale_bundle(struct dst_entry *dst); 52static int stale_bundle(struct dst_entry *dst);
53static int xfrm_bundle_ok(struct xfrm_dst *xdst, int family); 53static int xfrm_bundle_ok(struct xfrm_dst *xdst);
54 54
55 55
56static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol, 56static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
@@ -1348,7 +1348,8 @@ static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
1348 default: 1348 default:
1349 BUG(); 1349 BUG();
1350 } 1350 }
1351 xdst = dst_alloc(dst_ops, 0); 1351 xdst = dst_alloc(dst_ops, NULL, 0, 0, 0);
1352 memset(&xdst->u.rt6.rt6i_table, 0, sizeof(*xdst) - sizeof(struct dst_entry));
1352 xfrm_policy_put_afinfo(afinfo); 1353 xfrm_policy_put_afinfo(afinfo);
1353 1354
1354 if (likely(xdst)) 1355 if (likely(xdst))
@@ -2240,7 +2241,7 @@ static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
2240 2241
2241static int stale_bundle(struct dst_entry *dst) 2242static int stale_bundle(struct dst_entry *dst)
2242{ 2243{
2243 return !xfrm_bundle_ok((struct xfrm_dst *)dst, AF_UNSPEC); 2244 return !xfrm_bundle_ok((struct xfrm_dst *)dst);
2244} 2245}
2245 2246
2246void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev) 2247void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
@@ -2312,7 +2313,7 @@ static void xfrm_init_pmtu(struct dst_entry *dst)
2312 * still valid. 2313 * still valid.
2313 */ 2314 */
2314 2315
2315static int xfrm_bundle_ok(struct xfrm_dst *first, int family) 2316static int xfrm_bundle_ok(struct xfrm_dst *first)
2316{ 2317{
2317 struct dst_entry *dst = &first->u.dst; 2318 struct dst_entry *dst = &first->u.dst;
2318 struct xfrm_dst *last; 2319 struct xfrm_dst *last;
diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c
index 47f1b8638df9..b11ea692bd7d 100644
--- a/net/xfrm/xfrm_replay.c
+++ b/net/xfrm/xfrm_replay.c
@@ -265,7 +265,7 @@ static void xfrm_replay_advance_bmp(struct xfrm_state *x, __be32 net_seq)
265 bitnr = bitnr & 0x1F; 265 bitnr = bitnr & 0x1F;
266 replay_esn->bmp[nr] |= (1U << bitnr); 266 replay_esn->bmp[nr] |= (1U << bitnr);
267 } else { 267 } else {
268 nr = replay_esn->replay_window >> 5; 268 nr = (replay_esn->replay_window - 1) >> 5;
269 for (i = 0; i <= nr; i++) 269 for (i = 0; i <= nr; i++)
270 replay_esn->bmp[i] = 0; 270 replay_esn->bmp[i] = 0;
271 271
@@ -471,7 +471,7 @@ static void xfrm_replay_advance_esn(struct xfrm_state *x, __be32 net_seq)
471 bitnr = bitnr & 0x1F; 471 bitnr = bitnr & 0x1F;
472 replay_esn->bmp[nr] |= (1U << bitnr); 472 replay_esn->bmp[nr] |= (1U << bitnr);
473 } else { 473 } else {
474 nr = replay_esn->replay_window >> 5; 474 nr = (replay_esn->replay_window - 1) >> 5;
475 for (i = 0; i <= nr; i++) 475 for (i = 0; i <= nr; i++)
476 replay_esn->bmp[i] = 0; 476 replay_esn->bmp[i] = 0;
477 477
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index dd78536d40de..9414b9c5b1e4 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -1036,15 +1036,15 @@ static struct xfrm_state *__find_acq_core(struct net *net, struct xfrm_mark *m,
1036 1036
1037 case AF_INET6: 1037 case AF_INET6:
1038 ipv6_addr_copy((struct in6_addr *)x->sel.daddr.a6, 1038 ipv6_addr_copy((struct in6_addr *)x->sel.daddr.a6,
1039 (struct in6_addr *)daddr); 1039 (const struct in6_addr *)daddr);
1040 ipv6_addr_copy((struct in6_addr *)x->sel.saddr.a6, 1040 ipv6_addr_copy((struct in6_addr *)x->sel.saddr.a6,
1041 (struct in6_addr *)saddr); 1041 (const struct in6_addr *)saddr);
1042 x->sel.prefixlen_d = 128; 1042 x->sel.prefixlen_d = 128;
1043 x->sel.prefixlen_s = 128; 1043 x->sel.prefixlen_s = 128;
1044 ipv6_addr_copy((struct in6_addr *)x->props.saddr.a6, 1044 ipv6_addr_copy((struct in6_addr *)x->props.saddr.a6,
1045 (struct in6_addr *)saddr); 1045 (const struct in6_addr *)saddr);
1046 ipv6_addr_copy((struct in6_addr *)x->id.daddr.a6, 1046 ipv6_addr_copy((struct in6_addr *)x->id.daddr.a6,
1047 (struct in6_addr *)daddr); 1047 (const struct in6_addr *)daddr);
1048 break; 1048 break;
1049 } 1049 }
1050 1050
@@ -1345,6 +1345,8 @@ out:
1345 xfrm_state_check_expire(x1); 1345 xfrm_state_check_expire(x1);
1346 1346
1347 err = 0; 1347 err = 0;
1348 x->km.state = XFRM_STATE_DEAD;
1349 __xfrm_state_put(x);
1348 } 1350 }
1349 spin_unlock_bh(&x1->lock); 1351 spin_unlock_bh(&x1->lock);
1350 1352
@@ -2092,8 +2094,8 @@ static void xfrm_audit_helper_sainfo(struct xfrm_state *x,
2092static void xfrm_audit_helper_pktinfo(struct sk_buff *skb, u16 family, 2094static void xfrm_audit_helper_pktinfo(struct sk_buff *skb, u16 family,
2093 struct audit_buffer *audit_buf) 2095 struct audit_buffer *audit_buf)
2094{ 2096{
2095 struct iphdr *iph4; 2097 const struct iphdr *iph4;
2096 struct ipv6hdr *iph6; 2098 const struct ipv6hdr *iph6;
2097 2099
2098 switch (family) { 2100 switch (family) {
2099 case AF_INET: 2101 case AF_INET: