aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorStephen Rothwell <sfr@canb.auug.org.au>2011-07-25 13:59:46 -0400
committerTrond Myklebust <Trond.Myklebust@netapp.com>2011-07-25 14:53:52 -0400
commit5f00bcb38ef9a980a33c6dbdc0044964b05f22dd (patch)
tree3175fb9375aecb50bde1be0bf4fa8aa8155131d6 /net
parent34006cee28f7344f9557a4be3816c7891b1bbab1 (diff)
parentb6844e8f64920cdee620157252169ba63afb0c89 (diff)
Merge branch 'master' into devel and apply fixup from Stephen Rothwell:
vfs/nfs: fixup for nfs_open_context change Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au> Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan.c19
-rw-r--r--net/8021q/vlan.h31
-rw-r--r--net/8021q/vlan_core.c46
-rw-r--r--net/8021q/vlan_dev.c14
-rw-r--r--net/8021q/vlanproc.c6
-rw-r--r--net/9p/client.c155
-rw-r--r--net/9p/mod.c4
-rw-r--r--net/9p/protocol.c44
-rw-r--r--net/9p/trans_virtio.c4
-rw-r--r--net/Kconfig1
-rw-r--r--net/Makefile1
-rw-r--r--net/TUNABLE50
-rw-r--r--net/appletalk/aarp.c138
-rw-r--r--net/appletalk/ddp.c380
-rw-r--r--net/atm/clip.c22
-rw-r--r--net/atm/mpc.c2
-rw-r--r--net/atm/pppoatm.c1
-rw-r--r--net/batman-adv/Kconfig1
-rw-r--r--net/batman-adv/aggregation.c73
-rw-r--r--net/batman-adv/aggregation.h13
-rw-r--r--net/batman-adv/bat_debugfs.c11
-rw-r--r--net/batman-adv/bat_sysfs.c102
-rw-r--r--net/batman-adv/bat_sysfs.h2
-rw-r--r--net/batman-adv/bitarray.c12
-rw-r--r--net/batman-adv/bitarray.h10
-rw-r--r--net/batman-adv/gateway_client.c268
-rw-r--r--net/batman-adv/gateway_client.h3
-rw-r--r--net/batman-adv/gateway_common.c25
-rw-r--r--net/batman-adv/hard-interface.c46
-rw-r--r--net/batman-adv/hard-interface.h20
-rw-r--r--net/batman-adv/hash.c7
-rw-r--r--net/batman-adv/hash.h6
-rw-r--r--net/batman-adv/icmp_socket.c4
-rw-r--r--net/batman-adv/main.c31
-rw-r--r--net/batman-adv/main.h85
-rw-r--r--net/batman-adv/originator.c36
-rw-r--r--net/batman-adv/originator.h18
-rw-r--r--net/batman-adv/packet.h142
-rw-r--r--net/batman-adv/ring_buffer.c4
-rw-r--r--net/batman-adv/ring_buffer.h2
-rw-r--r--net/batman-adv/routing.c376
-rw-r--r--net/batman-adv/routing.h15
-rw-r--r--net/batman-adv/send.c147
-rw-r--r--net/batman-adv/send.h14
-rw-r--r--net/batman-adv/soft-interface.c66
-rw-r--r--net/batman-adv/soft-interface.h5
-rw-r--r--net/batman-adv/translation-table.c1566
-rw-r--r--net/batman-adv/translation-table.h49
-rw-r--r--net/batman-adv/types.h74
-rw-r--r--net/batman-adv/unicast.c33
-rw-r--r--net/batman-adv/unicast.h8
-rw-r--r--net/batman-adv/vis.c104
-rw-r--r--net/bluetooth/Kconfig9
-rw-r--r--net/bluetooth/Makefile2
-rw-r--r--net/bluetooth/cmtp/capi.c3
-rw-r--r--net/bluetooth/hci_conn.c88
-rw-r--r--net/bluetooth/hci_core.c264
-rw-r--r--net/bluetooth/hci_event.c289
-rw-r--r--net/bluetooth/hci_sock.c70
-rw-r--r--net/bluetooth/hidp/core.c18
-rw-r--r--net/bluetooth/hidp/hidp.h1
-rw-r--r--net/bluetooth/l2cap_core.c1077
-rw-r--r--net/bluetooth/l2cap_sock.c442
-rw-r--r--net/bluetooth/lib.c23
-rw-r--r--net/bluetooth/mgmt.c281
-rw-r--r--net/bluetooth/rfcomm/sock.c6
-rw-r--r--net/bluetooth/sco.c4
-rw-r--r--net/bluetooth/smp.c702
-rw-r--r--net/bridge/br_device.c4
-rw-r--r--net/bridge/br_input.c6
-rw-r--r--net/bridge/br_multicast.c5
-rw-r--r--net/bridge/br_netfilter.c16
-rw-r--r--net/bridge/br_netlink.c15
-rw-r--r--net/caif/caif_dev.c1
-rw-r--r--net/caif/chnl_net.c2
-rw-r--r--net/can/af_can.c5
-rw-r--r--net/can/bcm.c1
-rw-r--r--net/ceph/ceph_fs.c17
-rw-r--r--net/ceph/crypto.c2
-rw-r--r--net/ceph/osd_client.c10
-rw-r--r--net/core/dev.c26
-rw-r--r--net/core/dst.c23
-rw-r--r--net/core/ethtool.c313
-rw-r--r--net/core/fib_rules.c6
-rw-r--r--net/core/neighbour.c191
-rw-r--r--net/core/net-sysfs.c2
-rw-r--r--net/core/net-traces.c2
-rw-r--r--net/core/net_namespace.c1
-rw-r--r--net/core/netpoll.c13
-rw-r--r--net/core/rtnetlink.c64
-rw-r--r--net/core/skbuff.c84
-rw-r--r--net/core/sock.c11
-rw-r--r--net/core/timestamping.c2
-rw-r--r--net/dcb/dcbnl.c677
-rw-r--r--net/dccp/ccid.c4
-rw-r--r--net/dccp/ccids/ccid2.c109
-rw-r--r--net/dccp/ccids/ccid2.h25
-rw-r--r--net/dccp/input.c61
-rw-r--r--net/dccp/output.c14
-rw-r--r--net/decnet/af_decnet.c697
-rw-r--r--net/decnet/dn_dev.c78
-rw-r--r--net/decnet/dn_fib.c81
-rw-r--r--net/decnet/dn_neigh.c42
-rw-r--r--net/decnet/dn_nsp_in.c182
-rw-r--r--net/decnet/dn_route.c143
-rw-r--r--net/decnet/dn_table.c23
-rw-r--r--net/decnet/netfilter/dn_rtmsg.c18
-rw-r--r--net/decnet/sysctl_net_decnet.c17
-rw-r--r--net/dsa/mv88e6131.c8
-rw-r--r--net/econet/af_econet.c172
-rw-r--r--net/ethernet/eth.c3
-rw-r--r--net/ieee802154/af_ieee802154.c2
-rw-r--r--net/ieee802154/dgram.c2
-rw-r--r--net/ieee802154/nl-phy.c31
-rw-r--r--net/ipv4/af_inet.c56
-rw-r--r--net/ipv4/arp.c41
-rw-r--r--net/ipv4/devinet.c6
-rw-r--r--net/ipv4/fib_frontend.c6
-rw-r--r--net/ipv4/fib_trie.c12
-rw-r--r--net/ipv4/gre.c1
-rw-r--r--net/ipv4/icmp.c14
-rw-r--r--net/ipv4/inet_diag.c2
-rw-r--r--net/ipv4/inet_lro.c74
-rw-r--r--net/ipv4/inetpeer.c293
-rw-r--r--net/ipv4/ip_fragment.c5
-rw-r--r--net/ipv4/ip_gre.c2
-rw-r--r--net/ipv4/ip_input.c4
-rw-r--r--net/ipv4/ip_output.c35
-rw-r--r--net/ipv4/ipconfig.c75
-rw-r--r--net/ipv4/ipmr.c3
-rw-r--r--net/ipv4/netfilter.c60
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c26
-rw-r--r--net/ipv4/netfilter/ipt_REJECT.c14
-rw-r--r--net/ipv4/netfilter/nf_defrag_ipv4.c2
-rw-r--r--net/ipv4/netfilter/nf_nat_snmp_basic.c210
-rw-r--r--net/ipv4/netfilter/nf_nat_standalone.c2
-rw-r--r--net/ipv4/raw.c36
-rw-r--r--net/ipv4/route.c94
-rw-r--r--net/ipv4/syncookies.c1
-rw-r--r--net/ipv4/sysctl_net_ipv4.c14
-rw-r--r--net/ipv4/tcp.c10
-rw-r--r--net/ipv4/tcp_input.c46
-rw-r--r--net/ipv4/tcp_ipv4.c11
-rw-r--r--net/ipv4/tcp_minisocks.c6
-rw-r--r--net/ipv4/udp.c15
-rw-r--r--net/ipv4/xfrm4_output.c7
-rw-r--r--net/ipv4/xfrm4_policy.c2
-rw-r--r--net/ipv6/addrconf.c55
-rw-r--r--net/ipv6/addrlabel.c9
-rw-r--r--net/ipv6/af_inet6.c2
-rw-r--r--net/ipv6/ip6_fib.c5
-rw-r--r--net/ipv6/ip6_output.c58
-rw-r--r--net/ipv6/ip6mr.c3
-rw-r--r--net/ipv6/ndisc.c16
-rw-r--r--net/ipv6/raw.c145
-rw-r--r--net/ipv6/route.c110
-rw-r--r--net/ipv6/sit.c4
-rw-r--r--net/ipv6/syncookies.c1
-rw-r--r--net/ipv6/tcp_ipv6.c5
-rw-r--r--net/ipv6/udp.c7
-rw-r--r--net/irda/af_irda.c4
-rw-r--r--net/irda/ircomm/ircomm_tty_attach.c2
-rw-r--r--net/irda/irda_device.c2
-rw-r--r--net/irda/iriap.c8
-rw-r--r--net/irda/irlan/irlan_client.c10
-rw-r--r--net/irda/irlan/irlan_common.c10
-rw-r--r--net/irda/irlan/irlan_eth.c2
-rw-r--r--net/irda/irlan/irlan_provider.c10
-rw-r--r--net/irda/irqueue.c4
-rw-r--r--net/irda/irttp.c18
-rw-r--r--net/iucv/iucv.c7
-rw-r--r--net/key/af_key.c20
-rw-r--r--net/l2tp/l2tp_ip.c19
-rw-r--r--net/lapb/lapb_iface.c30
-rw-r--r--net/lapb/lapb_in.c881
-rw-r--r--net/mac80211/aes_ccm.c37
-rw-r--r--net/mac80211/aes_ccm.h2
-rw-r--r--net/mac80211/aes_cmac.c10
-rw-r--r--net/mac80211/aes_cmac.h2
-rw-r--r--net/mac80211/agg-rx.c21
-rw-r--r--net/mac80211/cfg.c62
-rw-r--r--net/mac80211/debugfs_key.c13
-rw-r--r--net/mac80211/driver-ops.h28
-rw-r--r--net/mac80211/driver-trace.h101
-rw-r--r--net/mac80211/ht.c6
-rw-r--r--net/mac80211/ieee80211_i.h36
-rw-r--r--net/mac80211/iface.c7
-rw-r--r--net/mac80211/key.c171
-rw-r--r--net/mac80211/key.h32
-rw-r--r--net/mac80211/mesh_pathtbl.c4
-rw-r--r--net/mac80211/mlme.c111
-rw-r--r--net/mac80211/pm.c51
-rw-r--r--net/mac80211/rc80211_minstrel.c9
-rw-r--r--net/mac80211/rc80211_minstrel.h12
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c7
-rw-r--r--net/mac80211/rx.c38
-rw-r--r--net/mac80211/scan.c48
-rw-r--r--net/mac80211/sta_info.h6
-rw-r--r--net/mac80211/tkip.c108
-rw-r--r--net/mac80211/tkip.h8
-rw-r--r--net/mac80211/tx.c17
-rw-r--r--net/mac80211/util.c90
-rw-r--r--net/mac80211/wme.c3
-rw-r--r--net/mac80211/wme.h5
-rw-r--r--net/mac80211/work.c2
-rw-r--r--net/mac80211/wpa.c92
-rw-r--r--net/netfilter/ipset/Kconfig10
-rw-r--r--net/netfilter/ipset/Makefile1
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_ip.c35
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_ipmac.c38
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_port.c31
-rw-r--r--net/netfilter/ipset/ip_set_core.c166
-rw-r--r--net/netfilter/ipset/ip_set_hash_ip.c49
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipport.c69
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipportip.c73
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipportnet.c137
-rw-r--r--net/netfilter/ipset/ip_set_hash_net.c88
-rw-r--r--net/netfilter/ipset/ip_set_hash_netiface.c786
-rw-r--r--net/netfilter/ipset/ip_set_hash_netport.c119
-rw-r--r--net/netfilter/ipset/ip_set_list_set.c106
-rw-r--r--net/netfilter/ipset/pfxlen.c23
-rw-r--r--net/netfilter/ipvs/ip_vs_app.c14
-rw-r--r--net/netfilter/ipvs/ip_vs_conn.c4
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c78
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c28
-rw-r--r--net/netfilter/ipvs/ip_vs_est.c13
-rw-r--r--net/netfilter/ipvs/ip_vs_ftp.c52
-rw-r--r--net/netfilter/ipvs/ip_vs_proto.c4
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c13
-rw-r--r--net/netfilter/nf_conntrack_netlink.c4
-rw-r--r--net/netfilter/nfnetlink.c40
-rw-r--r--net/netfilter/nfnetlink_queue.c170
-rw-r--r--net/netfilter/xt_AUDIT.c5
-rw-r--r--net/netfilter/xt_CT.c11
-rw-r--r--net/netfilter/xt_HL.c64
-rw-r--r--net/netfilter/xt_RATEEST.c8
-rw-r--r--net/netfilter/xt_hl.c32
-rw-r--r--net/netfilter/xt_set.c153
-rw-r--r--net/netlabel/netlabel_unlabeled.c10
-rw-r--r--net/netlink/af_netlink.c20
-rw-r--r--net/netlink/genetlink.c2
-rw-r--r--net/netrom/nr_route.c22
-rw-r--r--net/nfc/Kconfig16
-rw-r--r--net/nfc/Makefile7
-rw-r--r--net/nfc/af_nfc.c98
-rw-r--r--net/nfc/core.c468
-rw-r--r--net/nfc/netlink.c537
-rw-r--r--net/nfc/nfc.h117
-rw-r--r--net/nfc/rawsock.c354
-rw-r--r--net/packet/af_packet.c438
-rw-r--r--net/phonet/pn_netlink.c13
-rw-r--r--net/rds/bind.c4
-rw-r--r--net/rds/ib.h1
-rw-r--r--net/rds/ib_cm.c6
-rw-r--r--net/rds/ib_send.c4
-rw-r--r--net/rds/iw.h1
-rw-r--r--net/rds/iw_cm.c9
-rw-r--r--net/rds/iw_rdma.c9
-rw-r--r--net/rds/iw_send.c4
-rw-r--r--net/rds/send.c7
-rw-r--r--net/rds/tcp_stats.c2
-rw-r--r--net/rose/rose_link.c7
-rw-r--r--net/rose/rose_route.c5
-rw-r--r--net/sched/act_api.c11
-rw-r--r--net/sched/act_csum.c2
-rw-r--r--net/sched/act_gact.c3
-rw-r--r--net/sched/act_ipt.c2
-rw-r--r--net/sched/act_mirred.c2
-rw-r--r--net/sched/act_nat.c2
-rw-r--r--net/sched/act_pedit.c2
-rw-r--r--net/sched/act_police.c2
-rw-r--r--net/sched/act_simple.c3
-rw-r--r--net/sched/act_skbedit.c2
-rw-r--r--net/sched/cls_api.c12
-rw-r--r--net/sched/cls_basic.c2
-rw-r--r--net/sched/cls_cgroup.c2
-rw-r--r--net/sched/cls_flow.c6
-rw-r--r--net/sched/cls_fw.c2
-rw-r--r--net/sched/cls_route.c2
-rw-r--r--net/sched/cls_rsvp.h4
-rw-r--r--net/sched/cls_tcindex.c2
-rw-r--r--net/sched/cls_u32.c2
-rw-r--r--net/sched/em_meta.c7
-rw-r--r--net/sched/sch_api.c18
-rw-r--r--net/sched/sch_atm.c1
-rw-r--r--net/sched/sch_choke.c2
-rw-r--r--net/sched/sch_generic.c10
-rw-r--r--net/sched/sch_netem.c1
-rw-r--r--net/sched/sch_sfq.c2
-rw-r--r--net/sched/sch_teql.c4
-rw-r--r--net/sctp/associola.c6
-rw-r--r--net/sctp/bind_addr.c17
-rw-r--r--net/sctp/input.c3
-rw-r--r--net/sctp/ipv6.c2
-rw-r--r--net/sctp/output.c19
-rw-r--r--net/sctp/outqueue.c33
-rw-r--r--net/sctp/protocol.c163
-rw-r--r--net/sctp/sm_make_chunk.c55
-rw-r--r--net/sctp/sm_sideeffect.c22
-rw-r--r--net/sctp/sm_statefuns.c83
-rw-r--r--net/sctp/sm_statetable.c2
-rw-r--r--net/sctp/socket.c236
-rw-r--r--net/sctp/sysctl.c7
-rw-r--r--net/sctp/ulpevent.c16
-rw-r--r--net/sunrpc/auth.c2
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c46
-rw-r--r--net/sunrpc/clnt.c36
-rw-r--r--net/sunrpc/rpc_pipe.c14
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c1
-rw-r--r--net/sunrpc/xprtrdma/verbs.c1
-rw-r--r--net/tipc/bcast.c6
-rw-r--r--net/tipc/bearer.c1
-rw-r--r--net/tipc/bearer.h2
-rw-r--r--net/tipc/core.h6
-rw-r--r--net/tipc/link.c6
-rw-r--r--net/tipc/msg.c6
-rw-r--r--net/tipc/msg.h34
-rw-r--r--net/tipc/name_distr.c6
-rw-r--r--net/tipc/name_table.c289
-rw-r--r--net/tipc/name_table.h14
-rw-r--r--net/tipc/port.c284
-rw-r--r--net/tipc/socket.c3
-rw-r--r--net/unix/af_unix.c38
-rw-r--r--net/wireless/core.c12
-rw-r--r--net/wireless/core.h2
-rw-r--r--net/wireless/mlme.c15
-rw-r--r--net/wireless/nl80211.c259
-rw-r--r--net/wireless/nl80211.h4
-rw-r--r--net/wireless/scan.c17
-rw-r--r--net/x25/af_x25.c471
-rw-r--r--net/x25/x25_dev.c44
-rw-r--r--net/x25/x25_in.c118
-rw-r--r--net/x25/x25_link.c87
-rw-r--r--net/x25/x25_subr.c78
-rw-r--r--net/xfrm/xfrm_policy.c15
-rw-r--r--net/xfrm/xfrm_state.c2
-rw-r--r--net/xfrm/xfrm_user.c3
337 files changed, 14814 insertions, 7024 deletions
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 917ecb93ea2..8970ba139d7 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -18,6 +18,8 @@
18 * 2 of the License, or (at your option) any later version. 18 * 2 of the License, or (at your option) any later version.
19 */ 19 */
20 20
21#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22
21#include <linux/capability.h> 23#include <linux/capability.h>
22#include <linux/module.h> 24#include <linux/module.h>
23#include <linux/netdevice.h> 25#include <linux/netdevice.h>
@@ -132,8 +134,6 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
132 vlan_gvrp_uninit_applicant(real_dev); 134 vlan_gvrp_uninit_applicant(real_dev);
133 135
134 rcu_assign_pointer(real_dev->vlgrp, NULL); 136 rcu_assign_pointer(real_dev->vlgrp, NULL);
135 if (ops->ndo_vlan_rx_register)
136 ops->ndo_vlan_rx_register(real_dev, NULL);
137 137
138 /* Free the group, after all cpu's are done. */ 138 /* Free the group, after all cpu's are done. */
139 call_rcu(&grp->rcu, vlan_rcu_free); 139 call_rcu(&grp->rcu, vlan_rcu_free);
@@ -149,13 +149,13 @@ int vlan_check_real_dev(struct net_device *real_dev, u16 vlan_id)
149 const struct net_device_ops *ops = real_dev->netdev_ops; 149 const struct net_device_ops *ops = real_dev->netdev_ops;
150 150
151 if (real_dev->features & NETIF_F_VLAN_CHALLENGED) { 151 if (real_dev->features & NETIF_F_VLAN_CHALLENGED) {
152 pr_info("8021q: VLANs not supported on %s\n", name); 152 pr_info("VLANs not supported on %s\n", name);
153 return -EOPNOTSUPP; 153 return -EOPNOTSUPP;
154 } 154 }
155 155
156 if ((real_dev->features & NETIF_F_HW_VLAN_FILTER) && 156 if ((real_dev->features & NETIF_F_HW_VLAN_FILTER) &&
157 (!ops->ndo_vlan_rx_add_vid || !ops->ndo_vlan_rx_kill_vid)) { 157 (!ops->ndo_vlan_rx_add_vid || !ops->ndo_vlan_rx_kill_vid)) {
158 pr_info("8021q: Device %s has buggy VLAN hw accel\n", name); 158 pr_info("Device %s has buggy VLAN hw accel\n", name);
159 return -EOPNOTSUPP; 159 return -EOPNOTSUPP;
160 } 160 }
161 161
@@ -205,8 +205,6 @@ int register_vlan_dev(struct net_device *dev)
205 grp->nr_vlans++; 205 grp->nr_vlans++;
206 206
207 if (ngrp) { 207 if (ngrp) {
208 if (ops->ndo_vlan_rx_register && (real_dev->features & NETIF_F_HW_VLAN_RX))
209 ops->ndo_vlan_rx_register(real_dev, ngrp);
210 rcu_assign_pointer(real_dev->vlgrp, ngrp); 208 rcu_assign_pointer(real_dev->vlgrp, ngrp);
211 } 209 }
212 if (real_dev->features & NETIF_F_HW_VLAN_FILTER) 210 if (real_dev->features & NETIF_F_HW_VLAN_FILTER)
@@ -344,13 +342,12 @@ static void __vlan_device_event(struct net_device *dev, unsigned long event)
344 case NETDEV_CHANGENAME: 342 case NETDEV_CHANGENAME:
345 vlan_proc_rem_dev(dev); 343 vlan_proc_rem_dev(dev);
346 if (vlan_proc_add_dev(dev) < 0) 344 if (vlan_proc_add_dev(dev) < 0)
347 pr_warning("8021q: failed to change proc name for %s\n", 345 pr_warn("failed to change proc name for %s\n",
348 dev->name); 346 dev->name);
349 break; 347 break;
350 case NETDEV_REGISTER: 348 case NETDEV_REGISTER:
351 if (vlan_proc_add_dev(dev) < 0) 349 if (vlan_proc_add_dev(dev) < 0)
352 pr_warning("8021q: failed to add proc entry for %s\n", 350 pr_warn("failed to add proc entry for %s\n", dev->name);
353 dev->name);
354 break; 351 break;
355 case NETDEV_UNREGISTER: 352 case NETDEV_UNREGISTER:
356 vlan_proc_rem_dev(dev); 353 vlan_proc_rem_dev(dev);
@@ -374,7 +371,7 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
374 if ((event == NETDEV_UP) && 371 if ((event == NETDEV_UP) &&
375 (dev->features & NETIF_F_HW_VLAN_FILTER) && 372 (dev->features & NETIF_F_HW_VLAN_FILTER) &&
376 dev->netdev_ops->ndo_vlan_rx_add_vid) { 373 dev->netdev_ops->ndo_vlan_rx_add_vid) {
377 pr_info("8021q: adding VLAN 0 to HW filter on device %s\n", 374 pr_info("adding VLAN 0 to HW filter on device %s\n",
378 dev->name); 375 dev->name);
379 dev->netdev_ops->ndo_vlan_rx_add_vid(dev, 0); 376 dev->netdev_ops->ndo_vlan_rx_add_vid(dev, 0);
380 } 377 }
diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h
index 9da07e30d1a..9fd45f3571f 100644
--- a/net/8021q/vlan.h
+++ b/net/8021q/vlan.h
@@ -74,6 +74,37 @@ static inline struct vlan_dev_info *vlan_dev_info(const struct net_device *dev)
74 return netdev_priv(dev); 74 return netdev_priv(dev);
75} 75}
76 76
77static inline struct net_device *vlan_group_get_device(struct vlan_group *vg,
78 u16 vlan_id)
79{
80 struct net_device **array;
81 array = vg->vlan_devices_arrays[vlan_id / VLAN_GROUP_ARRAY_PART_LEN];
82 return array ? array[vlan_id % VLAN_GROUP_ARRAY_PART_LEN] : NULL;
83}
84
85static inline void vlan_group_set_device(struct vlan_group *vg,
86 u16 vlan_id,
87 struct net_device *dev)
88{
89 struct net_device **array;
90 if (!vg)
91 return;
92 array = vg->vlan_devices_arrays[vlan_id / VLAN_GROUP_ARRAY_PART_LEN];
93 array[vlan_id % VLAN_GROUP_ARRAY_PART_LEN] = dev;
94}
95
96/* Must be invoked with rcu_read_lock or with RTNL. */
97static inline struct net_device *vlan_find_dev(struct net_device *real_dev,
98 u16 vlan_id)
99{
100 struct vlan_group *grp = rcu_dereference_rtnl(real_dev->vlgrp);
101
102 if (grp)
103 return vlan_group_get_device(grp, vlan_id);
104
105 return NULL;
106}
107
77/* found in vlan_dev.c */ 108/* found in vlan_dev.c */
78void vlan_dev_set_ingress_priority(const struct net_device *dev, 109void vlan_dev_set_ingress_priority(const struct net_device *dev,
79 u32 skb_prio, u16 vlan_prio); 110 u32 skb_prio, u16 vlan_prio);
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index fcc684678af..5f27f8e3025 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -63,6 +63,27 @@ bool vlan_do_receive(struct sk_buff **skbp)
63 return true; 63 return true;
64} 64}
65 65
66/* Must be invoked with rcu_read_lock or with RTNL. */
67struct net_device *__vlan_find_dev_deep(struct net_device *real_dev,
68 u16 vlan_id)
69{
70 struct vlan_group *grp = rcu_dereference_rtnl(real_dev->vlgrp);
71
72 if (grp) {
73 return vlan_group_get_device(grp, vlan_id);
74 } else {
75 /*
76 * Bonding slaves do not have grp assigned to themselves.
77 * Grp is assigned to bonding master instead.
78 */
79 if (netif_is_bond_slave(real_dev))
80 return __vlan_find_dev_deep(real_dev->master, vlan_id);
81 }
82
83 return NULL;
84}
85EXPORT_SYMBOL(__vlan_find_dev_deep);
86
66struct net_device *vlan_dev_real_dev(const struct net_device *dev) 87struct net_device *vlan_dev_real_dev(const struct net_device *dev)
67{ 88{
68 return vlan_dev_info(dev)->real_dev; 89 return vlan_dev_info(dev)->real_dev;
@@ -75,31 +96,6 @@ u16 vlan_dev_vlan_id(const struct net_device *dev)
75} 96}
76EXPORT_SYMBOL(vlan_dev_vlan_id); 97EXPORT_SYMBOL(vlan_dev_vlan_id);
77 98
78/* VLAN rx hw acceleration helper. This acts like netif_{rx,receive_skb}(). */
79int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
80 u16 vlan_tci, int polling)
81{
82 __vlan_hwaccel_put_tag(skb, vlan_tci);
83 return polling ? netif_receive_skb(skb) : netif_rx(skb);
84}
85EXPORT_SYMBOL(__vlan_hwaccel_rx);
86
87gro_result_t vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp,
88 unsigned int vlan_tci, struct sk_buff *skb)
89{
90 __vlan_hwaccel_put_tag(skb, vlan_tci);
91 return napi_gro_receive(napi, skb);
92}
93EXPORT_SYMBOL(vlan_gro_receive);
94
95gro_result_t vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp,
96 unsigned int vlan_tci)
97{
98 __vlan_hwaccel_put_tag(napi->skb, vlan_tci);
99 return napi_gro_frags(napi);
100}
101EXPORT_SYMBOL(vlan_gro_frags);
102
103static struct sk_buff *vlan_reorder_header(struct sk_buff *skb) 99static struct sk_buff *vlan_reorder_header(struct sk_buff *skb)
104{ 100{
105 if (skb_cow(skb, skb_headroom(skb)) < 0) 101 if (skb_cow(skb, skb_headroom(skb)) < 0)
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 7ea5cf9ea08..934e221c1d0 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -20,6 +20,8 @@
20 * 2 of the License, or (at your option) any later version. 20 * 2 of the License, or (at your option) any later version.
21 */ 21 */
22 22
23#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24
23#include <linux/module.h> 25#include <linux/module.h>
24#include <linux/slab.h> 26#include <linux/slab.h>
25#include <linux/skbuff.h> 27#include <linux/skbuff.h>
@@ -55,7 +57,7 @@ static int vlan_dev_rebuild_header(struct sk_buff *skb)
55 return arp_find(veth->h_dest, skb); 57 return arp_find(veth->h_dest, skb);
56#endif 58#endif
57 default: 59 default:
58 pr_debug("%s: unable to resolve type %X addresses.\n", 60 pr_debug("%s: unable to resolve type %X addresses\n",
59 dev->name, ntohs(veth->h_vlan_encapsulated_proto)); 61 dev->name, ntohs(veth->h_vlan_encapsulated_proto));
60 62
61 memcpy(veth->h_source, dev->dev_addr, ETH_ALEN); 63 memcpy(veth->h_source, dev->dev_addr, ETH_ALEN);
@@ -528,7 +530,11 @@ static int vlan_dev_init(struct net_device *dev)
528 (1<<__LINK_STATE_DORMANT))) | 530 (1<<__LINK_STATE_DORMANT))) |
529 (1<<__LINK_STATE_PRESENT); 531 (1<<__LINK_STATE_PRESENT);
530 532
531 dev->hw_features = NETIF_F_ALL_TX_OFFLOADS; 533 dev->hw_features = NETIF_F_ALL_CSUM | NETIF_F_SG |
534 NETIF_F_FRAGLIST | NETIF_F_ALL_TSO |
535 NETIF_F_HIGHDMA | NETIF_F_SCTP_CSUM |
536 NETIF_F_ALL_FCOE;
537
532 dev->features |= real_dev->vlan_features | NETIF_F_LLTX; 538 dev->features |= real_dev->vlan_features | NETIF_F_LLTX;
533 dev->gso_max_size = real_dev->gso_max_size; 539 dev->gso_max_size = real_dev->gso_max_size;
534 540
@@ -586,9 +592,13 @@ static void vlan_dev_uninit(struct net_device *dev)
586static u32 vlan_dev_fix_features(struct net_device *dev, u32 features) 592static u32 vlan_dev_fix_features(struct net_device *dev, u32 features)
587{ 593{
588 struct net_device *real_dev = vlan_dev_info(dev)->real_dev; 594 struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
595 u32 old_features = features;
589 596
590 features &= real_dev->features; 597 features &= real_dev->features;
591 features &= real_dev->vlan_features; 598 features &= real_dev->vlan_features;
599
600 features |= old_features & NETIF_F_SOFT_FEATURES;
601
592 if (dev_ethtool_get_rx_csum(real_dev)) 602 if (dev_ethtool_get_rx_csum(real_dev))
593 features |= NETIF_F_RXCSUM; 603 features |= NETIF_F_RXCSUM;
594 features |= NETIF_F_LLTX; 604 features |= NETIF_F_LLTX;
diff --git a/net/8021q/vlanproc.c b/net/8021q/vlanproc.c
index d940c49d168..d34b6daf893 100644
--- a/net/8021q/vlanproc.c
+++ b/net/8021q/vlanproc.c
@@ -17,6 +17,8 @@
17 * Jan 20, 1998 Ben Greear Initial Version 17 * Jan 20, 1998 Ben Greear Initial Version
18 *****************************************************************************/ 18 *****************************************************************************/
19 19
20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
20#include <linux/module.h> 22#include <linux/module.h>
21#include <linux/errno.h> 23#include <linux/errno.h>
22#include <linux/kernel.h> 24#include <linux/kernel.h>
@@ -155,7 +157,7 @@ int __net_init vlan_proc_init(struct net *net)
155 return 0; 157 return 0;
156 158
157err: 159err:
158 pr_err("%s: can't create entry in proc filesystem!\n", __func__); 160 pr_err("can't create entry in proc filesystem!\n");
159 vlan_proc_cleanup(net); 161 vlan_proc_cleanup(net);
160 return -ENOBUFS; 162 return -ENOBUFS;
161} 163}
@@ -229,7 +231,7 @@ static void *vlan_seq_next(struct seq_file *seq, void *v, loff_t *pos)
229 231
230 ++*pos; 232 ++*pos;
231 233
232 dev = (struct net_device *)v; 234 dev = v;
233 if (v == SEQ_START_TOKEN) 235 if (v == SEQ_START_TOKEN)
234 dev = net_device_entry(&net->dev_base_head); 236 dev = net_device_entry(&net->dev_base_head);
235 237
diff --git a/net/9p/client.c b/net/9p/client.c
index 9e3b0e640da..0505a03c374 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -72,23 +72,22 @@ inline int p9_is_proto_dotu(struct p9_client *clnt)
72EXPORT_SYMBOL(p9_is_proto_dotu); 72EXPORT_SYMBOL(p9_is_proto_dotu);
73 73
74/* Interpret mount option for protocol version */ 74/* Interpret mount option for protocol version */
75static int get_protocol_version(const substring_t *name) 75static int get_protocol_version(char *s)
76{ 76{
77 int version = -EINVAL; 77 int version = -EINVAL;
78 78
79 if (!strncmp("9p2000", name->from, name->to-name->from)) { 79 if (!strcmp(s, "9p2000")) {
80 version = p9_proto_legacy; 80 version = p9_proto_legacy;
81 P9_DPRINTK(P9_DEBUG_9P, "Protocol version: Legacy\n"); 81 P9_DPRINTK(P9_DEBUG_9P, "Protocol version: Legacy\n");
82 } else if (!strncmp("9p2000.u", name->from, name->to-name->from)) { 82 } else if (!strcmp(s, "9p2000.u")) {
83 version = p9_proto_2000u; 83 version = p9_proto_2000u;
84 P9_DPRINTK(P9_DEBUG_9P, "Protocol version: 9P2000.u\n"); 84 P9_DPRINTK(P9_DEBUG_9P, "Protocol version: 9P2000.u\n");
85 } else if (!strncmp("9p2000.L", name->from, name->to-name->from)) { 85 } else if (!strcmp(s, "9p2000.L")) {
86 version = p9_proto_2000L; 86 version = p9_proto_2000L;
87 P9_DPRINTK(P9_DEBUG_9P, "Protocol version: 9P2000.L\n"); 87 P9_DPRINTK(P9_DEBUG_9P, "Protocol version: 9P2000.L\n");
88 } else { 88 } else
89 P9_DPRINTK(P9_DEBUG_ERROR, "Unknown protocol version %s. ", 89 printk(KERN_INFO "9p: Unknown protocol version %s.\n", s);
90 name->from); 90
91 }
92 return version; 91 return version;
93} 92}
94 93
@@ -106,6 +105,7 @@ static int parse_opts(char *opts, struct p9_client *clnt)
106 char *p; 105 char *p;
107 substring_t args[MAX_OPT_ARGS]; 106 substring_t args[MAX_OPT_ARGS];
108 int option; 107 int option;
108 char *s;
109 int ret = 0; 109 int ret = 0;
110 110
111 clnt->proto_version = p9_proto_2000u; 111 clnt->proto_version = p9_proto_2000u;
@@ -141,22 +141,41 @@ static int parse_opts(char *opts, struct p9_client *clnt)
141 clnt->msize = option; 141 clnt->msize = option;
142 break; 142 break;
143 case Opt_trans: 143 case Opt_trans:
144 clnt->trans_mod = v9fs_get_trans_by_name(&args[0]); 144 s = match_strdup(&args[0]);
145 if(clnt->trans_mod == NULL) { 145 if (!s) {
146 ret = -ENOMEM;
146 P9_DPRINTK(P9_DEBUG_ERROR, 147 P9_DPRINTK(P9_DEBUG_ERROR,
147 "Could not find request transport: %s\n", 148 "problem allocating copy of trans arg\n");
148 (char *) &args[0]); 149 goto free_and_return;
150 }
151 clnt->trans_mod = v9fs_get_trans_by_name(s);
152 if (clnt->trans_mod == NULL) {
153 printk(KERN_INFO
154 "9p: Could not find "
155 "request transport: %s\n", s);
149 ret = -EINVAL; 156 ret = -EINVAL;
157 kfree(s);
150 goto free_and_return; 158 goto free_and_return;
151 } 159 }
160 kfree(s);
152 break; 161 break;
153 case Opt_legacy: 162 case Opt_legacy:
154 clnt->proto_version = p9_proto_legacy; 163 clnt->proto_version = p9_proto_legacy;
155 break; 164 break;
156 case Opt_version: 165 case Opt_version:
157 ret = get_protocol_version(&args[0]); 166 s = match_strdup(&args[0]);
158 if (ret == -EINVAL) 167 if (!s) {
168 ret = -ENOMEM;
169 P9_DPRINTK(P9_DEBUG_ERROR,
170 "problem allocating copy of version arg\n");
159 goto free_and_return; 171 goto free_and_return;
172 }
173 ret = get_protocol_version(s);
174 if (ret == -EINVAL) {
175 kfree(s);
176 goto free_and_return;
177 }
178 kfree(s);
160 clnt->proto_version = ret; 179 clnt->proto_version = ret;
161 break; 180 break;
162 default: 181 default:
@@ -280,7 +299,8 @@ struct p9_req_t *p9_tag_lookup(struct p9_client *c, u16 tag)
280 * buffer to read the data into */ 299 * buffer to read the data into */
281 tag++; 300 tag++;
282 301
283 BUG_ON(tag >= c->max_tag); 302 if(tag >= c->max_tag)
303 return NULL;
284 304
285 row = tag / P9_ROW_MAXTAG; 305 row = tag / P9_ROW_MAXTAG;
286 col = tag % P9_ROW_MAXTAG; 306 col = tag % P9_ROW_MAXTAG;
@@ -749,7 +769,7 @@ static int p9_client_version(struct p9_client *c)
749 err = p9pdu_readf(req->rc, c->proto_version, "ds", &msize, &version); 769 err = p9pdu_readf(req->rc, c->proto_version, "ds", &msize, &version);
750 if (err) { 770 if (err) {
751 P9_DPRINTK(P9_DEBUG_9P, "version error %d\n", err); 771 P9_DPRINTK(P9_DEBUG_9P, "version error %d\n", err);
752 p9pdu_dump(1, req->rc); 772 P9_DUMP_PKT(1, req->rc);
753 goto error; 773 goto error;
754 } 774 }
755 775
@@ -821,8 +841,8 @@ struct p9_client *p9_client_create(const char *dev_name, char *options)
821 if (err) 841 if (err)
822 goto destroy_fidpool; 842 goto destroy_fidpool;
823 843
824 if ((clnt->msize+P9_IOHDRSZ) > clnt->trans_mod->maxsize) 844 if (clnt->msize > clnt->trans_mod->maxsize)
825 clnt->msize = clnt->trans_mod->maxsize-P9_IOHDRSZ; 845 clnt->msize = clnt->trans_mod->maxsize;
826 846
827 err = p9_client_version(clnt); 847 err = p9_client_version(clnt);
828 if (err) 848 if (err)
@@ -911,7 +931,7 @@ struct p9_fid *p9_client_attach(struct p9_client *clnt, struct p9_fid *afid,
911 931
912 err = p9pdu_readf(req->rc, clnt->proto_version, "Q", &qid); 932 err = p9pdu_readf(req->rc, clnt->proto_version, "Q", &qid);
913 if (err) { 933 if (err) {
914 p9pdu_dump(1, req->rc); 934 P9_DUMP_PKT(1, req->rc);
915 p9_free_req(clnt, req); 935 p9_free_req(clnt, req);
916 goto error; 936 goto error;
917 } 937 }
@@ -971,7 +991,7 @@ struct p9_fid *p9_client_walk(struct p9_fid *oldfid, uint16_t nwname,
971 991
972 err = p9pdu_readf(req->rc, clnt->proto_version, "R", &nwqids, &wqids); 992 err = p9pdu_readf(req->rc, clnt->proto_version, "R", &nwqids, &wqids);
973 if (err) { 993 if (err) {
974 p9pdu_dump(1, req->rc); 994 P9_DUMP_PKT(1, req->rc);
975 p9_free_req(clnt, req); 995 p9_free_req(clnt, req);
976 goto clunk_fid; 996 goto clunk_fid;
977 } 997 }
@@ -1038,7 +1058,7 @@ int p9_client_open(struct p9_fid *fid, int mode)
1038 1058
1039 err = p9pdu_readf(req->rc, clnt->proto_version, "Qd", &qid, &iounit); 1059 err = p9pdu_readf(req->rc, clnt->proto_version, "Qd", &qid, &iounit);
1040 if (err) { 1060 if (err) {
1041 p9pdu_dump(1, req->rc); 1061 P9_DUMP_PKT(1, req->rc);
1042 goto free_and_error; 1062 goto free_and_error;
1043 } 1063 }
1044 1064
@@ -1081,7 +1101,7 @@ int p9_client_create_dotl(struct p9_fid *ofid, char *name, u32 flags, u32 mode,
1081 1101
1082 err = p9pdu_readf(req->rc, clnt->proto_version, "Qd", qid, &iounit); 1102 err = p9pdu_readf(req->rc, clnt->proto_version, "Qd", qid, &iounit);
1083 if (err) { 1103 if (err) {
1084 p9pdu_dump(1, req->rc); 1104 P9_DUMP_PKT(1, req->rc);
1085 goto free_and_error; 1105 goto free_and_error;
1086 } 1106 }
1087 1107
@@ -1126,7 +1146,7 @@ int p9_client_fcreate(struct p9_fid *fid, char *name, u32 perm, int mode,
1126 1146
1127 err = p9pdu_readf(req->rc, clnt->proto_version, "Qd", &qid, &iounit); 1147 err = p9pdu_readf(req->rc, clnt->proto_version, "Qd", &qid, &iounit);
1128 if (err) { 1148 if (err) {
1129 p9pdu_dump(1, req->rc); 1149 P9_DUMP_PKT(1, req->rc);
1130 goto free_and_error; 1150 goto free_and_error;
1131 } 1151 }
1132 1152
@@ -1165,7 +1185,7 @@ int p9_client_symlink(struct p9_fid *dfid, char *name, char *symtgt, gid_t gid,
1165 1185
1166 err = p9pdu_readf(req->rc, clnt->proto_version, "Q", qid); 1186 err = p9pdu_readf(req->rc, clnt->proto_version, "Q", qid);
1167 if (err) { 1187 if (err) {
1168 p9pdu_dump(1, req->rc); 1188 P9_DUMP_PKT(1, req->rc);
1169 goto free_and_error; 1189 goto free_and_error;
1170 } 1190 }
1171 1191
@@ -1249,9 +1269,11 @@ int p9_client_clunk(struct p9_fid *fid)
1249 P9_DPRINTK(P9_DEBUG_9P, "<<< RCLUNK fid %d\n", fid->fid); 1269 P9_DPRINTK(P9_DEBUG_9P, "<<< RCLUNK fid %d\n", fid->fid);
1250 1270
1251 p9_free_req(clnt, req); 1271 p9_free_req(clnt, req);
1252 p9_fid_destroy(fid);
1253
1254error: 1272error:
1273 /*
1274 * Fid is not valid even after a failed clunk
1275 */
1276 p9_fid_destroy(fid);
1255 return err; 1277 return err;
1256} 1278}
1257EXPORT_SYMBOL(p9_client_clunk); 1279EXPORT_SYMBOL(p9_client_clunk);
@@ -1281,6 +1303,29 @@ error:
1281} 1303}
1282EXPORT_SYMBOL(p9_client_remove); 1304EXPORT_SYMBOL(p9_client_remove);
1283 1305
1306int p9_client_unlinkat(struct p9_fid *dfid, const char *name, int flags)
1307{
1308 int err = 0;
1309 struct p9_req_t *req;
1310 struct p9_client *clnt;
1311
1312 P9_DPRINTK(P9_DEBUG_9P, ">>> TUNLINKAT fid %d %s %d\n",
1313 dfid->fid, name, flags);
1314
1315 clnt = dfid->clnt;
1316 req = p9_client_rpc(clnt, P9_TUNLINKAT, "dsd", dfid->fid, name, flags);
1317 if (IS_ERR(req)) {
1318 err = PTR_ERR(req);
1319 goto error;
1320 }
1321 P9_DPRINTK(P9_DEBUG_9P, "<<< RUNLINKAT fid %d %s\n", dfid->fid, name);
1322
1323 p9_free_req(clnt, req);
1324error:
1325 return err;
1326}
1327EXPORT_SYMBOL(p9_client_unlinkat);
1328
1284int 1329int
1285p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset, 1330p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
1286 u32 count) 1331 u32 count)
@@ -1318,11 +1363,12 @@ p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
1318 1363
1319 err = p9pdu_readf(req->rc, clnt->proto_version, "D", &count, &dataptr); 1364 err = p9pdu_readf(req->rc, clnt->proto_version, "D", &count, &dataptr);
1320 if (err) { 1365 if (err) {
1321 p9pdu_dump(1, req->rc); 1366 P9_DUMP_PKT(1, req->rc);
1322 goto free_and_error; 1367 goto free_and_error;
1323 } 1368 }
1324 1369
1325 P9_DPRINTK(P9_DEBUG_9P, "<<< RREAD count %d\n", count); 1370 P9_DPRINTK(P9_DEBUG_9P, "<<< RREAD count %d\n", count);
1371 P9_DUMP_PKT(1, req->rc);
1326 1372
1327 if (!req->tc->pbuf_size) { 1373 if (!req->tc->pbuf_size) {
1328 if (data) { 1374 if (data) {
@@ -1386,7 +1432,7 @@ p9_client_write(struct p9_fid *fid, char *data, const char __user *udata,
1386 1432
1387 err = p9pdu_readf(req->rc, clnt->proto_version, "d", &count); 1433 err = p9pdu_readf(req->rc, clnt->proto_version, "d", &count);
1388 if (err) { 1434 if (err) {
1389 p9pdu_dump(1, req->rc); 1435 P9_DUMP_PKT(1, req->rc);
1390 goto free_and_error; 1436 goto free_and_error;
1391 } 1437 }
1392 1438
@@ -1426,7 +1472,7 @@ struct p9_wstat *p9_client_stat(struct p9_fid *fid)
1426 1472
1427 err = p9pdu_readf(req->rc, clnt->proto_version, "wS", &ignored, ret); 1473 err = p9pdu_readf(req->rc, clnt->proto_version, "wS", &ignored, ret);
1428 if (err) { 1474 if (err) {
1429 p9pdu_dump(1, req->rc); 1475 P9_DUMP_PKT(1, req->rc);
1430 p9_free_req(clnt, req); 1476 p9_free_req(clnt, req);
1431 goto error; 1477 goto error;
1432 } 1478 }
@@ -1477,7 +1523,7 @@ struct p9_stat_dotl *p9_client_getattr_dotl(struct p9_fid *fid,
1477 1523
1478 err = p9pdu_readf(req->rc, clnt->proto_version, "A", ret); 1524 err = p9pdu_readf(req->rc, clnt->proto_version, "A", ret);
1479 if (err) { 1525 if (err) {
1480 p9pdu_dump(1, req->rc); 1526 P9_DUMP_PKT(1, req->rc);
1481 p9_free_req(clnt, req); 1527 p9_free_req(clnt, req);
1482 goto error; 1528 goto error;
1483 } 1529 }
@@ -1625,7 +1671,7 @@ int p9_client_statfs(struct p9_fid *fid, struct p9_rstatfs *sb)
1625 &sb->bsize, &sb->blocks, &sb->bfree, &sb->bavail, 1671 &sb->bsize, &sb->blocks, &sb->bfree, &sb->bavail,
1626 &sb->files, &sb->ffree, &sb->fsid, &sb->namelen); 1672 &sb->files, &sb->ffree, &sb->fsid, &sb->namelen);
1627 if (err) { 1673 if (err) {
1628 p9pdu_dump(1, req->rc); 1674 P9_DUMP_PKT(1, req->rc);
1629 p9_free_req(clnt, req); 1675 p9_free_req(clnt, req);
1630 goto error; 1676 goto error;
1631 } 1677 }
@@ -1643,7 +1689,8 @@ error:
1643} 1689}
1644EXPORT_SYMBOL(p9_client_statfs); 1690EXPORT_SYMBOL(p9_client_statfs);
1645 1691
1646int p9_client_rename(struct p9_fid *fid, struct p9_fid *newdirfid, char *name) 1692int p9_client_rename(struct p9_fid *fid,
1693 struct p9_fid *newdirfid, const char *name)
1647{ 1694{
1648 int err; 1695 int err;
1649 struct p9_req_t *req; 1696 struct p9_req_t *req;
@@ -1670,6 +1717,36 @@ error:
1670} 1717}
1671EXPORT_SYMBOL(p9_client_rename); 1718EXPORT_SYMBOL(p9_client_rename);
1672 1719
1720int p9_client_renameat(struct p9_fid *olddirfid, const char *old_name,
1721 struct p9_fid *newdirfid, const char *new_name)
1722{
1723 int err;
1724 struct p9_req_t *req;
1725 struct p9_client *clnt;
1726
1727 err = 0;
1728 clnt = olddirfid->clnt;
1729
1730 P9_DPRINTK(P9_DEBUG_9P, ">>> TRENAMEAT olddirfid %d old name %s"
1731 " newdirfid %d new name %s\n", olddirfid->fid, old_name,
1732 newdirfid->fid, new_name);
1733
1734 req = p9_client_rpc(clnt, P9_TRENAMEAT, "dsds", olddirfid->fid,
1735 old_name, newdirfid->fid, new_name);
1736 if (IS_ERR(req)) {
1737 err = PTR_ERR(req);
1738 goto error;
1739 }
1740
1741 P9_DPRINTK(P9_DEBUG_9P, "<<< RRENAMEAT newdirfid %d new name %s\n",
1742 newdirfid->fid, new_name);
1743
1744 p9_free_req(clnt, req);
1745error:
1746 return err;
1747}
1748EXPORT_SYMBOL(p9_client_renameat);
1749
1673/* 1750/*
1674 * An xattrwalk without @attr_name gives the fid for the lisxattr namespace 1751 * An xattrwalk without @attr_name gives the fid for the lisxattr namespace
1675 */ 1752 */
@@ -1701,7 +1778,7 @@ struct p9_fid *p9_client_xattrwalk(struct p9_fid *file_fid,
1701 } 1778 }
1702 err = p9pdu_readf(req->rc, clnt->proto_version, "q", attr_size); 1779 err = p9pdu_readf(req->rc, clnt->proto_version, "q", attr_size);
1703 if (err) { 1780 if (err) {
1704 p9pdu_dump(1, req->rc); 1781 P9_DUMP_PKT(1, req->rc);
1705 p9_free_req(clnt, req); 1782 p9_free_req(clnt, req);
1706 goto clunk_fid; 1783 goto clunk_fid;
1707 } 1784 }
@@ -1780,7 +1857,7 @@ int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset)
1780 1857
1781 err = p9pdu_readf(req->rc, clnt->proto_version, "D", &count, &dataptr); 1858 err = p9pdu_readf(req->rc, clnt->proto_version, "D", &count, &dataptr);
1782 if (err) { 1859 if (err) {
1783 p9pdu_dump(1, req->rc); 1860 P9_DUMP_PKT(1, req->rc);
1784 goto free_and_error; 1861 goto free_and_error;
1785 } 1862 }
1786 1863
@@ -1817,7 +1894,7 @@ int p9_client_mknod_dotl(struct p9_fid *fid, char *name, int mode,
1817 1894
1818 err = p9pdu_readf(req->rc, clnt->proto_version, "Q", qid); 1895 err = p9pdu_readf(req->rc, clnt->proto_version, "Q", qid);
1819 if (err) { 1896 if (err) {
1820 p9pdu_dump(1, req->rc); 1897 P9_DUMP_PKT(1, req->rc);
1821 goto error; 1898 goto error;
1822 } 1899 }
1823 P9_DPRINTK(P9_DEBUG_9P, "<<< RMKNOD qid %x.%llx.%x\n", qid->type, 1900 P9_DPRINTK(P9_DEBUG_9P, "<<< RMKNOD qid %x.%llx.%x\n", qid->type,
@@ -1848,7 +1925,7 @@ int p9_client_mkdir_dotl(struct p9_fid *fid, char *name, int mode,
1848 1925
1849 err = p9pdu_readf(req->rc, clnt->proto_version, "Q", qid); 1926 err = p9pdu_readf(req->rc, clnt->proto_version, "Q", qid);
1850 if (err) { 1927 if (err) {
1851 p9pdu_dump(1, req->rc); 1928 P9_DUMP_PKT(1, req->rc);
1852 goto error; 1929 goto error;
1853 } 1930 }
1854 P9_DPRINTK(P9_DEBUG_9P, "<<< RMKDIR qid %x.%llx.%x\n", qid->type, 1931 P9_DPRINTK(P9_DEBUG_9P, "<<< RMKDIR qid %x.%llx.%x\n", qid->type,
@@ -1883,7 +1960,7 @@ int p9_client_lock_dotl(struct p9_fid *fid, struct p9_flock *flock, u8 *status)
1883 1960
1884 err = p9pdu_readf(req->rc, clnt->proto_version, "b", status); 1961 err = p9pdu_readf(req->rc, clnt->proto_version, "b", status);
1885 if (err) { 1962 if (err) {
1886 p9pdu_dump(1, req->rc); 1963 P9_DUMP_PKT(1, req->rc);
1887 goto error; 1964 goto error;
1888 } 1965 }
1889 P9_DPRINTK(P9_DEBUG_9P, "<<< RLOCK status %i\n", *status); 1966 P9_DPRINTK(P9_DEBUG_9P, "<<< RLOCK status %i\n", *status);
@@ -1916,7 +1993,7 @@ int p9_client_getlock_dotl(struct p9_fid *fid, struct p9_getlock *glock)
1916 &glock->start, &glock->length, &glock->proc_id, 1993 &glock->start, &glock->length, &glock->proc_id,
1917 &glock->client_id); 1994 &glock->client_id);
1918 if (err) { 1995 if (err) {
1919 p9pdu_dump(1, req->rc); 1996 P9_DUMP_PKT(1, req->rc);
1920 goto error; 1997 goto error;
1921 } 1998 }
1922 P9_DPRINTK(P9_DEBUG_9P, "<<< RGETLOCK type %i start %lld length %lld " 1999 P9_DPRINTK(P9_DEBUG_9P, "<<< RGETLOCK type %i start %lld length %lld "
@@ -1944,7 +2021,7 @@ int p9_client_readlink(struct p9_fid *fid, char **target)
1944 2021
1945 err = p9pdu_readf(req->rc, clnt->proto_version, "s", target); 2022 err = p9pdu_readf(req->rc, clnt->proto_version, "s", target);
1946 if (err) { 2023 if (err) {
1947 p9pdu_dump(1, req->rc); 2024 P9_DUMP_PKT(1, req->rc);
1948 goto error; 2025 goto error;
1949 } 2026 }
1950 P9_DPRINTK(P9_DEBUG_9P, "<<< RREADLINK target %s\n", *target); 2027 P9_DPRINTK(P9_DEBUG_9P, "<<< RREADLINK target %s\n", *target);
diff --git a/net/9p/mod.c b/net/9p/mod.c
index 72c39827505..2664d129229 100644
--- a/net/9p/mod.c
+++ b/net/9p/mod.c
@@ -80,14 +80,14 @@ EXPORT_SYMBOL(v9fs_unregister_trans);
80 * @name: string identifying transport 80 * @name: string identifying transport
81 * 81 *
82 */ 82 */
83struct p9_trans_module *v9fs_get_trans_by_name(const substring_t *name) 83struct p9_trans_module *v9fs_get_trans_by_name(char *s)
84{ 84{
85 struct p9_trans_module *t, *found = NULL; 85 struct p9_trans_module *t, *found = NULL;
86 86
87 spin_lock(&v9fs_trans_lock); 87 spin_lock(&v9fs_trans_lock);
88 88
89 list_for_each_entry(t, &v9fs_trans_list, list) 89 list_for_each_entry(t, &v9fs_trans_list, list)
90 if (strncmp(t->name, name->from, name->to-name->from) == 0 && 90 if (strcmp(t->name, s) == 0 &&
91 try_module_get(t->owner)) { 91 try_module_get(t->owner)) {
92 found = t; 92 found = t;
93 break; 93 break;
diff --git a/net/9p/protocol.c b/net/9p/protocol.c
index a873277cb99..df58375ea6b 100644
--- a/net/9p/protocol.c
+++ b/net/9p/protocol.c
@@ -44,30 +44,24 @@ p9pdu_writef(struct p9_fcall *pdu, int proto_version, const char *fmt, ...);
44void 44void
45p9pdu_dump(int way, struct p9_fcall *pdu) 45p9pdu_dump(int way, struct p9_fcall *pdu)
46{ 46{
47 int i, n; 47 int len = pdu->size;
48 u8 *data = pdu->sdata; 48
49 int datalen = pdu->size; 49 if ((p9_debug_level & P9_DEBUG_VPKT) != P9_DEBUG_VPKT) {
50 char buf[255]; 50 if ((p9_debug_level & P9_DEBUG_PKT) == P9_DEBUG_PKT) {
51 int buflen = 255; 51 if (len > 32)
52 52 len = 32;
53 i = n = 0; 53 } else {
54 if (datalen > (buflen-16)) 54 /* shouldn't happen */
55 datalen = buflen-16; 55 return;
56 while (i < datalen) { 56 }
57 n += scnprintf(buf + n, buflen - n, "%02x ", data[i]);
58 if (i%4 == 3)
59 n += scnprintf(buf + n, buflen - n, " ");
60 if (i%32 == 31)
61 n += scnprintf(buf + n, buflen - n, "\n");
62
63 i++;
64 } 57 }
65 n += scnprintf(buf + n, buflen - n, "\n");
66 58
67 if (way) 59 if (way)
68 P9_DPRINTK(P9_DEBUG_PKT, "[[[(%d) %s\n", datalen, buf); 60 print_hex_dump_bytes("[9P] ", DUMP_PREFIX_OFFSET, pdu->sdata,
61 len);
69 else 62 else
70 P9_DPRINTK(P9_DEBUG_PKT, "]]](%d) %s\n", datalen, buf); 63 print_hex_dump_bytes("]9P[ ", DUMP_PREFIX_OFFSET, pdu->sdata,
64 len);
71} 65}
72#else 66#else
73void 67void
@@ -610,7 +604,7 @@ int p9stat_read(char *buf, int len, struct p9_wstat *st, int proto_version)
610 ret = p9pdu_readf(&fake_pdu, proto_version, "S", st); 604 ret = p9pdu_readf(&fake_pdu, proto_version, "S", st);
611 if (ret) { 605 if (ret) {
612 P9_DPRINTK(P9_DEBUG_9P, "<<< p9stat_read failed: %d\n", ret); 606 P9_DPRINTK(P9_DEBUG_9P, "<<< p9stat_read failed: %d\n", ret);
613 p9pdu_dump(1, &fake_pdu); 607 P9_DUMP_PKT(0, &fake_pdu);
614 } 608 }
615 609
616 return ret; 610 return ret;
@@ -632,11 +626,7 @@ int p9pdu_finalize(struct p9_fcall *pdu)
632 err = p9pdu_writef(pdu, 0, "d", size); 626 err = p9pdu_writef(pdu, 0, "d", size);
633 pdu->size = size; 627 pdu->size = size;
634 628
635#ifdef CONFIG_NET_9P_DEBUG 629 P9_DUMP_PKT(0, pdu);
636 if ((p9_debug_level & P9_DEBUG_PKT) == P9_DEBUG_PKT)
637 p9pdu_dump(0, pdu);
638#endif
639
640 P9_DPRINTK(P9_DEBUG_9P, ">>> size=%d type: %d tag: %d\n", pdu->size, 630 P9_DPRINTK(P9_DEBUG_9P, ">>> size=%d type: %d tag: %d\n", pdu->size,
641 pdu->id, pdu->tag); 631 pdu->id, pdu->tag);
642 632
@@ -669,7 +659,7 @@ int p9dirent_read(char *buf, int len, struct p9_dirent *dirent,
669 &dirent->d_off, &dirent->d_type, &nameptr); 659 &dirent->d_off, &dirent->d_type, &nameptr);
670 if (ret) { 660 if (ret) {
671 P9_DPRINTK(P9_DEBUG_9P, "<<< p9dirent_read failed: %d\n", ret); 661 P9_DPRINTK(P9_DEBUG_9P, "<<< p9dirent_read failed: %d\n", ret);
672 p9pdu_dump(1, &fake_pdu); 662 P9_DUMP_PKT(1, &fake_pdu);
673 goto out; 663 goto out;
674 } 664 }
675 665
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index 244e7074218..175b5135bdc 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -367,7 +367,7 @@ req_retry_pinned:
367 in += inp; 367 in += inp;
368 } else { 368 } else {
369 in = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, rdata, 369 in = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, rdata,
370 client->msize); 370 req->rc->capacity);
371 } 371 }
372 372
373 err = virtqueue_add_buf(chan->vq, chan->sg, out, in, req->tc); 373 err = virtqueue_add_buf(chan->vq, chan->sg, out, in, req->tc);
@@ -592,7 +592,7 @@ static struct p9_trans_module p9_virtio_trans = {
592 .close = p9_virtio_close, 592 .close = p9_virtio_close,
593 .request = p9_virtio_request, 593 .request = p9_virtio_request,
594 .cancel = p9_virtio_cancel, 594 .cancel = p9_virtio_cancel,
595 .maxsize = PAGE_SIZE*16, 595 .maxsize = PAGE_SIZE*VIRTQUEUE_NUM,
596 .pref = P9_TRANS_PREF_PAYLOAD_SEP, 596 .pref = P9_TRANS_PREF_PAYLOAD_SEP,
597 .def = 0, 597 .def = 0,
598 .owner = THIS_MODULE, 598 .owner = THIS_MODULE,
diff --git a/net/Kconfig b/net/Kconfig
index 878151c772c..a0731484423 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -322,6 +322,7 @@ source "net/rfkill/Kconfig"
322source "net/9p/Kconfig" 322source "net/9p/Kconfig"
323source "net/caif/Kconfig" 323source "net/caif/Kconfig"
324source "net/ceph/Kconfig" 324source "net/ceph/Kconfig"
325source "net/nfc/Kconfig"
325 326
326 327
327endif # if NET 328endif # if NET
diff --git a/net/Makefile b/net/Makefile
index a51d9465e62..acdde4950de 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -68,3 +68,4 @@ obj-$(CONFIG_WIMAX) += wimax/
68obj-$(CONFIG_DNS_RESOLVER) += dns_resolver/ 68obj-$(CONFIG_DNS_RESOLVER) += dns_resolver/
69obj-$(CONFIG_CEPH_LIB) += ceph/ 69obj-$(CONFIG_CEPH_LIB) += ceph/
70obj-$(CONFIG_BATMAN_ADV) += batman-adv/ 70obj-$(CONFIG_BATMAN_ADV) += batman-adv/
71obj-$(CONFIG_NFC) += nfc/
diff --git a/net/TUNABLE b/net/TUNABLE
deleted file mode 100644
index 9913211f07a..00000000000
--- a/net/TUNABLE
+++ /dev/null
@@ -1,50 +0,0 @@
1The following parameters should be tunable at compile time. Some of them
2exist as sysctls too.
3
4This is far from complete
5
6Item Description
7----------------------------------------------------------------------------
8MAX_LINKS Maximum number of netlink minor devices. (1-32)
9RIF_TABLE_SIZE Token ring RIF cache size (tunable)
10AARP_HASH_SIZE Size of Appletalk hash table (tunable)
11AX25_DEF_T1 AX.25 parameters. These are all tunable via
12AX25_DEF_T2 SIOCAX25SETPARMS
13AX25_DEF_T3 T1-T3,N2 have the meanings in the specification
14AX25_DEF_N2
15AX25_DEF_AXDEFMODE 8 = normal 128 is PE1CHL extended
16AX25_DEF_IPDEFMODE 'D' - datagram 'V' - virtual connection
17AX25_DEF_BACKOFF 'E'xponential 'L'inear
18AX25_DEF_NETROM Allow netrom 1=Y
19AX25_DF_TEXT Allow PID=Text 1=Y
20AX25_DEF_WINDOW Window for normal mode
21AX25_DEF_EWINDOW Window for PE1CHL mode
22AX25_DEF_DIGI 1 for inband 2 for cross band 3 for both
23AX25_DEF_CONMODE Allow connected modes 1=Yes
24AX25_ROUTE_MAX AX.25 route cache size - no currently tunable
25Unnamed (16) Number of protocol hash slots (tunable)
26DEV_NUMBUFFS Number of priority levels (not easily tunable)
27Unnamed (300) Maximum packet backlog queue (tunable)
28MAX_IOVEC Maximum number of iovecs in a message (tunable)
29MIN_WINDOW Offered minimum window (tunable)
30MAX_WINDOW Offered maximum window (tunable)
31MAX_HEADER Largest physical header (tunable)
32MAX_ADDR_LEN Largest physical address (tunable)
33SOCK_ARRAY_SIZE IP socket array hash size (tunable)
34IP_MAX_MEMBERSHIPS Largest number of groups per socket (BSD style) (tunable)
3516 Hard coded constant for amount of room allowed for
36 cache align and faster forwarding (tunable)
37IP_FRAG_TIME Time we hold a fragment for. (tunable)
38PORT_MASQ_BEGIN First port reserved for masquerade (tunable)
39PORT_MASQ_END Last port used for masquerade (tunable)
40MASQUERADE_EXPIRE_TCP_FIN Time we keep a masquerade for after a FIN
41MASQUERADE_EXPIRE_UDP Time we keep a UDP masquerade for (tunable)
42MAXVIFS Maximum mrouted vifs (1-32)
43MFC_LINES Lines in the multicast router cache (tunable)
44
45NetROM parameters are tunable via an ioctl passing a struct
46
474000 Size a Unix domain socket malloc falls back to
48 (tunable) should be 8K - a bit for 8K machines like
49 the ALPHA
50
diff --git a/net/appletalk/aarp.c b/net/appletalk/aarp.c
index 50dce798132..1acc69576df 100644
--- a/net/appletalk/aarp.c
+++ b/net/appletalk/aarp.c
@@ -779,87 +779,87 @@ static int aarp_rcv(struct sk_buff *skb, struct net_device *dev,
779 } 779 }
780 780
781 switch (function) { 781 switch (function) {
782 case AARP_REPLY: 782 case AARP_REPLY:
783 if (!unresolved_count) /* Speed up */ 783 if (!unresolved_count) /* Speed up */
784 break; 784 break;
785
786 /* Find the entry. */
787 a = __aarp_find_entry(unresolved[hash], dev, &sa);
788 if (!a || dev != a->dev)
789 break;
790 785
791 /* We can fill one in - this is good. */ 786 /* Find the entry. */
792 memcpy(a->hwaddr, ea->hw_src, ETH_ALEN); 787 a = __aarp_find_entry(unresolved[hash], dev, &sa);
793 __aarp_resolved(&unresolved[hash], a, hash); 788 if (!a || dev != a->dev)
794 if (!unresolved_count)
795 mod_timer(&aarp_timer,
796 jiffies + sysctl_aarp_expiry_time);
797 break; 789 break;
798 790
799 case AARP_REQUEST: 791 /* We can fill one in - this is good. */
800 case AARP_PROBE: 792 memcpy(a->hwaddr, ea->hw_src, ETH_ALEN);
793 __aarp_resolved(&unresolved[hash], a, hash);
794 if (!unresolved_count)
795 mod_timer(&aarp_timer,
796 jiffies + sysctl_aarp_expiry_time);
797 break;
798
799 case AARP_REQUEST:
800 case AARP_PROBE:
801
802 /*
803 * If it is my address set ma to my address and reply.
804 * We can treat probe and request the same. Probe
805 * simply means we shouldn't cache the querying host,
806 * as in a probe they are proposing an address not
807 * using one.
808 *
809 * Support for proxy-AARP added. We check if the
810 * address is one of our proxies before we toss the
811 * packet out.
812 */
813
814 sa.s_node = ea->pa_dst_node;
815 sa.s_net = ea->pa_dst_net;
816
817 /* See if we have a matching proxy. */
818 ma = __aarp_proxy_find(dev, &sa);
819 if (!ma)
820 ma = &ifa->address;
821 else { /* We need to make a copy of the entry. */
822 da.s_node = sa.s_node;
823 da.s_net = sa.s_net;
824 ma = &da;
825 }
801 826
827 if (function == AARP_PROBE) {
802 /* 828 /*
803 * If it is my address set ma to my address and reply. 829 * A probe implies someone trying to get an
804 * We can treat probe and request the same. Probe 830 * address. So as a precaution flush any
805 * simply means we shouldn't cache the querying host, 831 * entries we have for this address.
806 * as in a probe they are proposing an address not
807 * using one.
808 *
809 * Support for proxy-AARP added. We check if the
810 * address is one of our proxies before we toss the
811 * packet out.
812 */ 832 */
833 a = __aarp_find_entry(resolved[sa.s_node %
834 (AARP_HASH_SIZE - 1)],
835 skb->dev, &sa);
813 836
814 sa.s_node = ea->pa_dst_node; 837 /*
815 sa.s_net = ea->pa_dst_net; 838 * Make it expire next tick - that avoids us
816 839 * getting into a probe/flush/learn/probe/
817 /* See if we have a matching proxy. */ 840 * flush/learn cycle during probing of a slow
818 ma = __aarp_proxy_find(dev, &sa); 841 * to respond host addr.
819 if (!ma) 842 */
820 ma = &ifa->address; 843 if (a) {
821 else { /* We need to make a copy of the entry. */ 844 a->expires_at = jiffies - 1;
822 da.s_node = sa.s_node; 845 mod_timer(&aarp_timer, jiffies +
823 da.s_net = sa.s_net; 846 sysctl_aarp_tick_time);
824 ma = &da;
825 }
826
827 if (function == AARP_PROBE) {
828 /*
829 * A probe implies someone trying to get an
830 * address. So as a precaution flush any
831 * entries we have for this address.
832 */
833 a = __aarp_find_entry(resolved[sa.s_node %
834 (AARP_HASH_SIZE - 1)],
835 skb->dev, &sa);
836
837 /*
838 * Make it expire next tick - that avoids us
839 * getting into a probe/flush/learn/probe/
840 * flush/learn cycle during probing of a slow
841 * to respond host addr.
842 */
843 if (a) {
844 a->expires_at = jiffies - 1;
845 mod_timer(&aarp_timer, jiffies +
846 sysctl_aarp_tick_time);
847 }
848 } 847 }
848 }
849 849
850 if (sa.s_node != ma->s_node) 850 if (sa.s_node != ma->s_node)
851 break; 851 break;
852 852
853 if (sa.s_net && ma->s_net && sa.s_net != ma->s_net) 853 if (sa.s_net && ma->s_net && sa.s_net != ma->s_net)
854 break; 854 break;
855 855
856 sa.s_node = ea->pa_src_node; 856 sa.s_node = ea->pa_src_node;
857 sa.s_net = ea->pa_src_net; 857 sa.s_net = ea->pa_src_net;
858 858
859 /* aarp_my_address has found the address to use for us. 859 /* aarp_my_address has found the address to use for us.
860 */ 860 */
861 aarp_send_reply(dev, ma, &sa, ea->hw_src); 861 aarp_send_reply(dev, ma, &sa, ea->hw_src);
862 break; 862 break;
863 } 863 }
864 864
865unlock: 865unlock:
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index 956a5302002..b1fe7c35e8d 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -684,192 +684,192 @@ static int atif_ioctl(int cmd, void __user *arg)
684 atif = atalk_find_dev(dev); 684 atif = atalk_find_dev(dev);
685 685
686 switch (cmd) { 686 switch (cmd) {
687 case SIOCSIFADDR: 687 case SIOCSIFADDR:
688 if (!capable(CAP_NET_ADMIN)) 688 if (!capable(CAP_NET_ADMIN))
689 return -EPERM; 689 return -EPERM;
690 if (sa->sat_family != AF_APPLETALK) 690 if (sa->sat_family != AF_APPLETALK)
691 return -EINVAL; 691 return -EINVAL;
692 if (dev->type != ARPHRD_ETHER && 692 if (dev->type != ARPHRD_ETHER &&
693 dev->type != ARPHRD_LOOPBACK && 693 dev->type != ARPHRD_LOOPBACK &&
694 dev->type != ARPHRD_LOCALTLK && 694 dev->type != ARPHRD_LOCALTLK &&
695 dev->type != ARPHRD_PPP) 695 dev->type != ARPHRD_PPP)
696 return -EPROTONOSUPPORT; 696 return -EPROTONOSUPPORT;
697 697
698 nr = (struct atalk_netrange *)&sa->sat_zero[0]; 698 nr = (struct atalk_netrange *)&sa->sat_zero[0];
699 add_route = 1; 699 add_route = 1;
700
701 /*
702 * if this is a point-to-point iface, and we already
703 * have an iface for this AppleTalk address, then we
704 * should not add a route
705 */
706 if ((dev->flags & IFF_POINTOPOINT) &&
707 atalk_find_interface(sa->sat_addr.s_net,
708 sa->sat_addr.s_node)) {
709 printk(KERN_DEBUG "AppleTalk: point-to-point "
710 "interface added with "
711 "existing address\n");
712 add_route = 0;
713 }
714 700
715 /* 701 /*
716 * Phase 1 is fine on LocalTalk but we don't do 702 * if this is a point-to-point iface, and we already
717 * EtherTalk phase 1. Anyone wanting to add it go ahead. 703 * have an iface for this AppleTalk address, then we
718 */ 704 * should not add a route
719 if (dev->type == ARPHRD_ETHER && nr->nr_phase != 2) 705 */
720 return -EPROTONOSUPPORT; 706 if ((dev->flags & IFF_POINTOPOINT) &&
721 if (sa->sat_addr.s_node == ATADDR_BCAST || 707 atalk_find_interface(sa->sat_addr.s_net,
722 sa->sat_addr.s_node == 254) 708 sa->sat_addr.s_node)) {
723 return -EINVAL; 709 printk(KERN_DEBUG "AppleTalk: point-to-point "
724 if (atif) { 710 "interface added with "
725 /* Already setting address */ 711 "existing address\n");
726 if (atif->status & ATIF_PROBE) 712 add_route = 0;
727 return -EBUSY; 713 }
728
729 atif->address.s_net = sa->sat_addr.s_net;
730 atif->address.s_node = sa->sat_addr.s_node;
731 atrtr_device_down(dev); /* Flush old routes */
732 } else {
733 atif = atif_add_device(dev, &sa->sat_addr);
734 if (!atif)
735 return -ENOMEM;
736 }
737 atif->nets = *nr;
738
739 /*
740 * Check if the chosen address is used. If so we
741 * error and atalkd will try another.
742 */
743
744 if (!(dev->flags & IFF_LOOPBACK) &&
745 !(dev->flags & IFF_POINTOPOINT) &&
746 atif_probe_device(atif) < 0) {
747 atif_drop_device(dev);
748 return -EADDRINUSE;
749 }
750
751 /* Hey it worked - add the direct routes */
752 sa = (struct sockaddr_at *)&rtdef.rt_gateway;
753 sa->sat_family = AF_APPLETALK;
754 sa->sat_addr.s_net = atif->address.s_net;
755 sa->sat_addr.s_node = atif->address.s_node;
756 sa = (struct sockaddr_at *)&rtdef.rt_dst;
757 rtdef.rt_flags = RTF_UP;
758 sa->sat_family = AF_APPLETALK;
759 sa->sat_addr.s_node = ATADDR_ANYNODE;
760 if (dev->flags & IFF_LOOPBACK ||
761 dev->flags & IFF_POINTOPOINT)
762 rtdef.rt_flags |= RTF_HOST;
763
764 /* Routerless initial state */
765 if (nr->nr_firstnet == htons(0) &&
766 nr->nr_lastnet == htons(0xFFFE)) {
767 sa->sat_addr.s_net = atif->address.s_net;
768 atrtr_create(&rtdef, dev);
769 atrtr_set_default(dev);
770 } else {
771 limit = ntohs(nr->nr_lastnet);
772 if (limit - ntohs(nr->nr_firstnet) > 4096) {
773 printk(KERN_WARNING "Too many routes/"
774 "iface.\n");
775 return -EINVAL;
776 }
777 if (add_route)
778 for (ct = ntohs(nr->nr_firstnet);
779 ct <= limit; ct++) {
780 sa->sat_addr.s_net = htons(ct);
781 atrtr_create(&rtdef, dev);
782 }
783 }
784 dev_mc_add_global(dev, aarp_mcast);
785 return 0;
786 714
787 case SIOCGIFADDR: 715 /*
716 * Phase 1 is fine on LocalTalk but we don't do
717 * EtherTalk phase 1. Anyone wanting to add it go ahead.
718 */
719 if (dev->type == ARPHRD_ETHER && nr->nr_phase != 2)
720 return -EPROTONOSUPPORT;
721 if (sa->sat_addr.s_node == ATADDR_BCAST ||
722 sa->sat_addr.s_node == 254)
723 return -EINVAL;
724 if (atif) {
725 /* Already setting address */
726 if (atif->status & ATIF_PROBE)
727 return -EBUSY;
728
729 atif->address.s_net = sa->sat_addr.s_net;
730 atif->address.s_node = sa->sat_addr.s_node;
731 atrtr_device_down(dev); /* Flush old routes */
732 } else {
733 atif = atif_add_device(dev, &sa->sat_addr);
788 if (!atif) 734 if (!atif)
789 return -EADDRNOTAVAIL; 735 return -ENOMEM;
736 }
737 atif->nets = *nr;
790 738
791 sa->sat_family = AF_APPLETALK; 739 /*
792 sa->sat_addr = atif->address; 740 * Check if the chosen address is used. If so we
793 break; 741 * error and atalkd will try another.
742 */
794 743
795 case SIOCGIFBRDADDR: 744 if (!(dev->flags & IFF_LOOPBACK) &&
796 if (!atif) 745 !(dev->flags & IFF_POINTOPOINT) &&
797 return -EADDRNOTAVAIL; 746 atif_probe_device(atif) < 0) {
747 atif_drop_device(dev);
748 return -EADDRINUSE;
749 }
798 750
799 sa->sat_family = AF_APPLETALK; 751 /* Hey it worked - add the direct routes */
752 sa = (struct sockaddr_at *)&rtdef.rt_gateway;
753 sa->sat_family = AF_APPLETALK;
754 sa->sat_addr.s_net = atif->address.s_net;
755 sa->sat_addr.s_node = atif->address.s_node;
756 sa = (struct sockaddr_at *)&rtdef.rt_dst;
757 rtdef.rt_flags = RTF_UP;
758 sa->sat_family = AF_APPLETALK;
759 sa->sat_addr.s_node = ATADDR_ANYNODE;
760 if (dev->flags & IFF_LOOPBACK ||
761 dev->flags & IFF_POINTOPOINT)
762 rtdef.rt_flags |= RTF_HOST;
763
764 /* Routerless initial state */
765 if (nr->nr_firstnet == htons(0) &&
766 nr->nr_lastnet == htons(0xFFFE)) {
800 sa->sat_addr.s_net = atif->address.s_net; 767 sa->sat_addr.s_net = atif->address.s_net;
801 sa->sat_addr.s_node = ATADDR_BCAST; 768 atrtr_create(&rtdef, dev);
802 break; 769 atrtr_set_default(dev);
803 770 } else {
804 case SIOCATALKDIFADDR: 771 limit = ntohs(nr->nr_lastnet);
805 case SIOCDIFADDR: 772 if (limit - ntohs(nr->nr_firstnet) > 4096) {
806 if (!capable(CAP_NET_ADMIN)) 773 printk(KERN_WARNING "Too many routes/"
807 return -EPERM; 774 "iface.\n");
808 if (sa->sat_family != AF_APPLETALK)
809 return -EINVAL; 775 return -EINVAL;
810 atalk_dev_down(dev); 776 }
811 break; 777 if (add_route)
812 778 for (ct = ntohs(nr->nr_firstnet);
813 case SIOCSARP: 779 ct <= limit; ct++) {
814 if (!capable(CAP_NET_ADMIN)) 780 sa->sat_addr.s_net = htons(ct);
815 return -EPERM; 781 atrtr_create(&rtdef, dev);
816 if (sa->sat_family != AF_APPLETALK) 782 }
817 return -EINVAL; 783 }
818 /* 784 dev_mc_add_global(dev, aarp_mcast);
819 * for now, we only support proxy AARP on ELAP; 785 return 0;
820 * we should be able to do it for LocalTalk, too. 786
821 */ 787 case SIOCGIFADDR:
822 if (dev->type != ARPHRD_ETHER) 788 if (!atif)
823 return -EPROTONOSUPPORT; 789 return -EADDRNOTAVAIL;
824 790
825 /* 791 sa->sat_family = AF_APPLETALK;
826 * atif points to the current interface on this network; 792 sa->sat_addr = atif->address;
827 * we aren't concerned about its current status (at 793 break;
828 * least for now), but it has all the settings about 794
829 * the network we're going to probe. Consequently, it 795 case SIOCGIFBRDADDR:
830 * must exist. 796 if (!atif)
831 */ 797 return -EADDRNOTAVAIL;
832 if (!atif) 798
833 return -EADDRNOTAVAIL; 799 sa->sat_family = AF_APPLETALK;
800 sa->sat_addr.s_net = atif->address.s_net;
801 sa->sat_addr.s_node = ATADDR_BCAST;
802 break;
803
804 case SIOCATALKDIFADDR:
805 case SIOCDIFADDR:
806 if (!capable(CAP_NET_ADMIN))
807 return -EPERM;
808 if (sa->sat_family != AF_APPLETALK)
809 return -EINVAL;
810 atalk_dev_down(dev);
811 break;
834 812
835 nr = (struct atalk_netrange *)&(atif->nets); 813 case SIOCSARP:
836 /* 814 if (!capable(CAP_NET_ADMIN))
837 * Phase 1 is fine on Localtalk but we don't do 815 return -EPERM;
838 * Ethertalk phase 1. Anyone wanting to add it go ahead. 816 if (sa->sat_family != AF_APPLETALK)
839 */ 817 return -EINVAL;
840 if (dev->type == ARPHRD_ETHER && nr->nr_phase != 2) 818 /*
841 return -EPROTONOSUPPORT; 819 * for now, we only support proxy AARP on ELAP;
820 * we should be able to do it for LocalTalk, too.
821 */
822 if (dev->type != ARPHRD_ETHER)
823 return -EPROTONOSUPPORT;
842 824
843 if (sa->sat_addr.s_node == ATADDR_BCAST || 825 /*
844 sa->sat_addr.s_node == 254) 826 * atif points to the current interface on this network;
845 return -EINVAL; 827 * we aren't concerned about its current status (at
828 * least for now), but it has all the settings about
829 * the network we're going to probe. Consequently, it
830 * must exist.
831 */
832 if (!atif)
833 return -EADDRNOTAVAIL;
846 834
847 /* 835 nr = (struct atalk_netrange *)&(atif->nets);
848 * Check if the chosen address is used. If so we 836 /*
849 * error and ATCP will try another. 837 * Phase 1 is fine on Localtalk but we don't do
850 */ 838 * Ethertalk phase 1. Anyone wanting to add it go ahead.
851 if (atif_proxy_probe_device(atif, &(sa->sat_addr)) < 0) 839 */
852 return -EADDRINUSE; 840 if (dev->type == ARPHRD_ETHER && nr->nr_phase != 2)
841 return -EPROTONOSUPPORT;
853 842
854 /* 843 if (sa->sat_addr.s_node == ATADDR_BCAST ||
855 * We now have an address on the local network, and 844 sa->sat_addr.s_node == 254)
856 * the AARP code will defend it for us until we take it 845 return -EINVAL;
857 * down. We don't set up any routes right now, because
858 * ATCP will install them manually via SIOCADDRT.
859 */
860 break;
861 846
862 case SIOCDARP: 847 /*
863 if (!capable(CAP_NET_ADMIN)) 848 * Check if the chosen address is used. If so we
864 return -EPERM; 849 * error and ATCP will try another.
865 if (sa->sat_family != AF_APPLETALK) 850 */
866 return -EINVAL; 851 if (atif_proxy_probe_device(atif, &(sa->sat_addr)) < 0)
867 if (!atif) 852 return -EADDRINUSE;
868 return -EADDRNOTAVAIL;
869 853
870 /* give to aarp module to remove proxy entry */ 854 /*
871 aarp_proxy_remove(atif->dev, &(sa->sat_addr)); 855 * We now have an address on the local network, and
872 return 0; 856 * the AARP code will defend it for us until we take it
857 * down. We don't set up any routes right now, because
858 * ATCP will install them manually via SIOCADDRT.
859 */
860 break;
861
862 case SIOCDARP:
863 if (!capable(CAP_NET_ADMIN))
864 return -EPERM;
865 if (sa->sat_family != AF_APPLETALK)
866 return -EINVAL;
867 if (!atif)
868 return -EADDRNOTAVAIL;
869
870 /* give to aarp module to remove proxy entry */
871 aarp_proxy_remove(atif->dev, &(sa->sat_addr));
872 return 0;
873 } 873 }
874 874
875 return copy_to_user(arg, &atreq, sizeof(atreq)) ? -EFAULT : 0; 875 return copy_to_user(arg, &atreq, sizeof(atreq)) ? -EFAULT : 0;
@@ -884,25 +884,25 @@ static int atrtr_ioctl(unsigned int cmd, void __user *arg)
884 return -EFAULT; 884 return -EFAULT;
885 885
886 switch (cmd) { 886 switch (cmd) {
887 case SIOCDELRT: 887 case SIOCDELRT:
888 if (rt.rt_dst.sa_family != AF_APPLETALK) 888 if (rt.rt_dst.sa_family != AF_APPLETALK)
889 return -EINVAL; 889 return -EINVAL;
890 return atrtr_delete(&((struct sockaddr_at *) 890 return atrtr_delete(&((struct sockaddr_at *)
891 &rt.rt_dst)->sat_addr); 891 &rt.rt_dst)->sat_addr);
892 892
893 case SIOCADDRT: { 893 case SIOCADDRT: {
894 struct net_device *dev = NULL; 894 struct net_device *dev = NULL;
895 if (rt.rt_dev) { 895 if (rt.rt_dev) {
896 char name[IFNAMSIZ]; 896 char name[IFNAMSIZ];
897 if (copy_from_user(name, rt.rt_dev, IFNAMSIZ-1)) 897 if (copy_from_user(name, rt.rt_dev, IFNAMSIZ-1))
898 return -EFAULT; 898 return -EFAULT;
899 name[IFNAMSIZ-1] = '\0'; 899 name[IFNAMSIZ-1] = '\0';
900 dev = __dev_get_by_name(&init_net, name); 900 dev = __dev_get_by_name(&init_net, name);
901 if (!dev) 901 if (!dev)
902 return -ENODEV; 902 return -ENODEV;
903 }
904 return atrtr_create(&rt, dev);
905 } 903 }
904 return atrtr_create(&rt, dev);
905 }
906 } 906 }
907 return -EINVAL; 907 return -EINVAL;
908} 908}
diff --git a/net/atm/clip.c b/net/atm/clip.c
index 1d4be60e139..4bc8c67ecb1 100644
--- a/net/atm/clip.c
+++ b/net/atm/clip.c
@@ -271,10 +271,8 @@ static const struct neigh_ops clip_neigh_ops = {
271 .family = AF_INET, 271 .family = AF_INET,
272 .solicit = clip_neigh_solicit, 272 .solicit = clip_neigh_solicit,
273 .error_report = clip_neigh_error, 273 .error_report = clip_neigh_error,
274 .output = dev_queue_xmit, 274 .output = neigh_direct_output,
275 .connected_output = dev_queue_xmit, 275 .connected_output = neigh_direct_output,
276 .hh_output = dev_queue_xmit,
277 .queue_xmit = dev_queue_xmit,
278}; 276};
279 277
280static int clip_constructor(struct neighbour *neigh) 278static int clip_constructor(struct neighbour *neigh)
@@ -364,33 +362,37 @@ static netdev_tx_t clip_start_xmit(struct sk_buff *skb,
364 struct net_device *dev) 362 struct net_device *dev)
365{ 363{
366 struct clip_priv *clip_priv = PRIV(dev); 364 struct clip_priv *clip_priv = PRIV(dev);
365 struct dst_entry *dst = skb_dst(skb);
367 struct atmarp_entry *entry; 366 struct atmarp_entry *entry;
367 struct neighbour *n;
368 struct atm_vcc *vcc; 368 struct atm_vcc *vcc;
369 int old; 369 int old;
370 unsigned long flags; 370 unsigned long flags;
371 371
372 pr_debug("(skb %p)\n", skb); 372 pr_debug("(skb %p)\n", skb);
373 if (!skb_dst(skb)) { 373 if (!dst) {
374 pr_err("skb_dst(skb) == NULL\n"); 374 pr_err("skb_dst(skb) == NULL\n");
375 dev_kfree_skb(skb); 375 dev_kfree_skb(skb);
376 dev->stats.tx_dropped++; 376 dev->stats.tx_dropped++;
377 return NETDEV_TX_OK; 377 return NETDEV_TX_OK;
378 } 378 }
379 if (!skb_dst(skb)->neighbour) { 379 n = dst_get_neighbour(dst);
380 if (!n) {
380#if 0 381#if 0
381 skb_dst(skb)->neighbour = clip_find_neighbour(skb_dst(skb), 1); 382 n = clip_find_neighbour(skb_dst(skb), 1);
382 if (!skb_dst(skb)->neighbour) { 383 if (!n) {
383 dev_kfree_skb(skb); /* lost that one */ 384 dev_kfree_skb(skb); /* lost that one */
384 dev->stats.tx_dropped++; 385 dev->stats.tx_dropped++;
385 return 0; 386 return 0;
386 } 387 }
388 dst_set_neighbour(dst, n);
387#endif 389#endif
388 pr_err("NO NEIGHBOUR !\n"); 390 pr_err("NO NEIGHBOUR !\n");
389 dev_kfree_skb(skb); 391 dev_kfree_skb(skb);
390 dev->stats.tx_dropped++; 392 dev->stats.tx_dropped++;
391 return NETDEV_TX_OK; 393 return NETDEV_TX_OK;
392 } 394 }
393 entry = NEIGH2ENTRY(skb_dst(skb)->neighbour); 395 entry = NEIGH2ENTRY(n);
394 if (!entry->vccs) { 396 if (!entry->vccs) {
395 if (time_after(jiffies, entry->expires)) { 397 if (time_after(jiffies, entry->expires)) {
396 /* should be resolved */ 398 /* should be resolved */
@@ -407,7 +409,7 @@ static netdev_tx_t clip_start_xmit(struct sk_buff *skb,
407 } 409 }
408 pr_debug("neigh %p, vccs %p\n", entry, entry->vccs); 410 pr_debug("neigh %p, vccs %p\n", entry, entry->vccs);
409 ATM_SKB(skb)->vcc = vcc = entry->vccs->vcc; 411 ATM_SKB(skb)->vcc = vcc = entry->vccs->vcc;
410 pr_debug("using neighbour %p, vcc %p\n", skb_dst(skb)->neighbour, vcc); 412 pr_debug("using neighbour %p, vcc %p\n", n, vcc);
411 if (entry->vccs->encap) { 413 if (entry->vccs->encap) {
412 void *here; 414 void *here;
413 415
diff --git a/net/atm/mpc.c b/net/atm/mpc.c
index 3ccca42e6f9..aa972409f09 100644
--- a/net/atm/mpc.c
+++ b/net/atm/mpc.c
@@ -1005,7 +1005,7 @@ static int mpoa_event_listener(struct notifier_block *mpoa_notifier,
1005 struct mpoa_client *mpc; 1005 struct mpoa_client *mpc;
1006 struct lec_priv *priv; 1006 struct lec_priv *priv;
1007 1007
1008 dev = (struct net_device *)dev_ptr; 1008 dev = dev_ptr;
1009 1009
1010 if (!net_eq(dev_net(dev), &init_net)) 1010 if (!net_eq(dev_net(dev), &init_net))
1011 return NOTIFY_DONE; 1011 return NOTIFY_DONE;
diff --git a/net/atm/pppoatm.c b/net/atm/pppoatm.c
index e9aced0ec56..db4a11c61d1 100644
--- a/net/atm/pppoatm.c
+++ b/net/atm/pppoatm.c
@@ -37,6 +37,7 @@
37 37
38#include <linux/module.h> 38#include <linux/module.h>
39#include <linux/init.h> 39#include <linux/init.h>
40#include <linux/interrupt.h>
40#include <linux/skbuff.h> 41#include <linux/skbuff.h>
41#include <linux/slab.h> 42#include <linux/slab.h>
42#include <linux/atm.h> 43#include <linux/atm.h>
diff --git a/net/batman-adv/Kconfig b/net/batman-adv/Kconfig
index 6c051ad833e..2b68d068eaf 100644
--- a/net/batman-adv/Kconfig
+++ b/net/batman-adv/Kconfig
@@ -5,6 +5,7 @@
5config BATMAN_ADV 5config BATMAN_ADV
6 tristate "B.A.T.M.A.N. Advanced Meshing Protocol" 6 tristate "B.A.T.M.A.N. Advanced Meshing Protocol"
7 depends on NET 7 depends on NET
8 select CRC16
8 default n 9 default n
9 ---help--- 10 ---help---
10 11
diff --git a/net/batman-adv/aggregation.c b/net/batman-adv/aggregation.c
index a8c32030527..69467fe71ff 100644
--- a/net/batman-adv/aggregation.c
+++ b/net/batman-adv/aggregation.c
@@ -20,28 +20,26 @@
20 */ 20 */
21 21
22#include "main.h" 22#include "main.h"
23#include "translation-table.h"
23#include "aggregation.h" 24#include "aggregation.h"
24#include "send.h" 25#include "send.h"
25#include "routing.h" 26#include "routing.h"
26#include "hard-interface.h" 27#include "hard-interface.h"
27 28
28/* calculate the size of the tt information for a given packet */
29static int tt_len(struct batman_packet *batman_packet)
30{
31 return batman_packet->num_tt * ETH_ALEN;
32}
33
34/* return true if new_packet can be aggregated with forw_packet */ 29/* return true if new_packet can be aggregated with forw_packet */
35static bool can_aggregate_with(struct batman_packet *new_batman_packet, 30static bool can_aggregate_with(const struct batman_packet *new_batman_packet,
31 struct bat_priv *bat_priv,
36 int packet_len, 32 int packet_len,
37 unsigned long send_time, 33 unsigned long send_time,
38 bool directlink, 34 bool directlink,
39 struct hard_iface *if_incoming, 35 const struct hard_iface *if_incoming,
40 struct forw_packet *forw_packet) 36 const struct forw_packet *forw_packet)
41{ 37{
42 struct batman_packet *batman_packet = 38 struct batman_packet *batman_packet =
43 (struct batman_packet *)forw_packet->skb->data; 39 (struct batman_packet *)forw_packet->skb->data;
44 int aggregated_bytes = forw_packet->packet_len + packet_len; 40 int aggregated_bytes = forw_packet->packet_len + packet_len;
41 struct hard_iface *primary_if = NULL;
42 bool res = false;
45 43
46 /** 44 /**
47 * we can aggregate the current packet to this aggregated packet 45 * we can aggregate the current packet to this aggregated packet
@@ -66,6 +64,10 @@ static bool can_aggregate_with(struct batman_packet *new_batman_packet,
66 * packet 64 * packet
67 */ 65 */
68 66
67 primary_if = primary_if_get_selected(bat_priv);
68 if (!primary_if)
69 goto out;
70
69 /* packets without direct link flag and high TTL 71 /* packets without direct link flag and high TTL
70 * are flooded through the net */ 72 * are flooded through the net */
71 if ((!directlink) && 73 if ((!directlink) &&
@@ -75,8 +77,10 @@ static bool can_aggregate_with(struct batman_packet *new_batman_packet,
75 /* own packets originating non-primary 77 /* own packets originating non-primary
76 * interfaces leave only that interface */ 78 * interfaces leave only that interface */
77 ((!forw_packet->own) || 79 ((!forw_packet->own) ||
78 (forw_packet->if_incoming->if_num == 0))) 80 (forw_packet->if_incoming == primary_if))) {
79 return true; 81 res = true;
82 goto out;
83 }
80 84
81 /* if the incoming packet is sent via this one 85 /* if the incoming packet is sent via this one
82 * interface only - we still can aggregate */ 86 * interface only - we still can aggregate */
@@ -89,16 +93,22 @@ static bool can_aggregate_with(struct batman_packet *new_batman_packet,
89 * (= secondary interface packets in general) */ 93 * (= secondary interface packets in general) */
90 (batman_packet->flags & DIRECTLINK || 94 (batman_packet->flags & DIRECTLINK ||
91 (forw_packet->own && 95 (forw_packet->own &&
92 forw_packet->if_incoming->if_num != 0))) 96 forw_packet->if_incoming != primary_if))) {
93 return true; 97 res = true;
98 goto out;
99 }
94 } 100 }
95 101
96 return false; 102out:
103 if (primary_if)
104 hardif_free_ref(primary_if);
105 return res;
97} 106}
98 107
99/* create a new aggregated packet and add this packet to it */ 108/* create a new aggregated packet and add this packet to it */
100static void new_aggregated_packet(unsigned char *packet_buff, int packet_len, 109static void new_aggregated_packet(const unsigned char *packet_buff,
101 unsigned long send_time, bool direct_link, 110 int packet_len, unsigned long send_time,
111 bool direct_link,
102 struct hard_iface *if_incoming, 112 struct hard_iface *if_incoming,
103 int own_packet) 113 int own_packet)
104{ 114{
@@ -118,7 +128,7 @@ static void new_aggregated_packet(unsigned char *packet_buff, int packet_len,
118 } 128 }
119 } 129 }
120 130
121 forw_packet_aggr = kmalloc(sizeof(struct forw_packet), GFP_ATOMIC); 131 forw_packet_aggr = kmalloc(sizeof(*forw_packet_aggr), GFP_ATOMIC);
122 if (!forw_packet_aggr) { 132 if (!forw_packet_aggr) {
123 if (!own_packet) 133 if (!own_packet)
124 atomic_inc(&bat_priv->batman_queue_left); 134 atomic_inc(&bat_priv->batman_queue_left);
@@ -150,7 +160,7 @@ static void new_aggregated_packet(unsigned char *packet_buff, int packet_len,
150 forw_packet_aggr->own = own_packet; 160 forw_packet_aggr->own = own_packet;
151 forw_packet_aggr->if_incoming = if_incoming; 161 forw_packet_aggr->if_incoming = if_incoming;
152 forw_packet_aggr->num_packets = 0; 162 forw_packet_aggr->num_packets = 0;
153 forw_packet_aggr->direct_link_flags = 0; 163 forw_packet_aggr->direct_link_flags = NO_FLAGS;
154 forw_packet_aggr->send_time = send_time; 164 forw_packet_aggr->send_time = send_time;
155 165
156 /* save packet direct link flag status */ 166 /* save packet direct link flag status */
@@ -176,8 +186,7 @@ out:
176 186
177/* aggregate a new packet into the existing aggregation */ 187/* aggregate a new packet into the existing aggregation */
178static void aggregate(struct forw_packet *forw_packet_aggr, 188static void aggregate(struct forw_packet *forw_packet_aggr,
179 unsigned char *packet_buff, 189 const unsigned char *packet_buff, int packet_len,
180 int packet_len,
181 bool direct_link) 190 bool direct_link)
182{ 191{
183 unsigned char *skb_buff; 192 unsigned char *skb_buff;
@@ -195,7 +204,7 @@ static void aggregate(struct forw_packet *forw_packet_aggr,
195 204
196void add_bat_packet_to_list(struct bat_priv *bat_priv, 205void add_bat_packet_to_list(struct bat_priv *bat_priv,
197 unsigned char *packet_buff, int packet_len, 206 unsigned char *packet_buff, int packet_len,
198 struct hard_iface *if_incoming, char own_packet, 207 struct hard_iface *if_incoming, int own_packet,
199 unsigned long send_time) 208 unsigned long send_time)
200{ 209{
201 /** 210 /**
@@ -215,6 +224,7 @@ void add_bat_packet_to_list(struct bat_priv *bat_priv,
215 hlist_for_each_entry(forw_packet_pos, tmp_node, 224 hlist_for_each_entry(forw_packet_pos, tmp_node,
216 &bat_priv->forw_bat_list, list) { 225 &bat_priv->forw_bat_list, list) {
217 if (can_aggregate_with(batman_packet, 226 if (can_aggregate_with(batman_packet,
227 bat_priv,
218 packet_len, 228 packet_len,
219 send_time, 229 send_time,
220 direct_link, 230 direct_link,
@@ -253,8 +263,9 @@ void add_bat_packet_to_list(struct bat_priv *bat_priv,
253} 263}
254 264
255/* unpack the aggregated packets and process them one by one */ 265/* unpack the aggregated packets and process them one by one */
256void receive_aggr_bat_packet(struct ethhdr *ethhdr, unsigned char *packet_buff, 266void receive_aggr_bat_packet(const struct ethhdr *ethhdr,
257 int packet_len, struct hard_iface *if_incoming) 267 unsigned char *packet_buff, int packet_len,
268 struct hard_iface *if_incoming)
258{ 269{
259 struct batman_packet *batman_packet; 270 struct batman_packet *batman_packet;
260 int buff_pos = 0; 271 int buff_pos = 0;
@@ -263,18 +274,20 @@ void receive_aggr_bat_packet(struct ethhdr *ethhdr, unsigned char *packet_buff,
263 batman_packet = (struct batman_packet *)packet_buff; 274 batman_packet = (struct batman_packet *)packet_buff;
264 275
265 do { 276 do {
266 /* network to host order for our 32bit seqno, and the 277 /* network to host order for our 32bit seqno and the
267 orig_interval. */ 278 orig_interval */
268 batman_packet->seqno = ntohl(batman_packet->seqno); 279 batman_packet->seqno = ntohl(batman_packet->seqno);
280 batman_packet->tt_crc = ntohs(batman_packet->tt_crc);
269 281
270 tt_buff = packet_buff + buff_pos + BAT_PACKET_LEN; 282 tt_buff = packet_buff + buff_pos + BAT_PACKET_LEN;
271 receive_bat_packet(ethhdr, batman_packet,
272 tt_buff, tt_len(batman_packet),
273 if_incoming);
274 283
275 buff_pos += BAT_PACKET_LEN + tt_len(batman_packet); 284 receive_bat_packet(ethhdr, batman_packet, tt_buff, if_incoming);
285
286 buff_pos += BAT_PACKET_LEN +
287 tt_len(batman_packet->tt_num_changes);
288
276 batman_packet = (struct batman_packet *) 289 batman_packet = (struct batman_packet *)
277 (packet_buff + buff_pos); 290 (packet_buff + buff_pos);
278 } while (aggregated_packet(buff_pos, packet_len, 291 } while (aggregated_packet(buff_pos, packet_len,
279 batman_packet->num_tt)); 292 batman_packet->tt_num_changes));
280} 293}
diff --git a/net/batman-adv/aggregation.h b/net/batman-adv/aggregation.h
index 7e6d72fbf54..216337bb841 100644
--- a/net/batman-adv/aggregation.h
+++ b/net/batman-adv/aggregation.h
@@ -25,9 +25,11 @@
25#include "main.h" 25#include "main.h"
26 26
27/* is there another aggregated packet here? */ 27/* is there another aggregated packet here? */
28static inline int aggregated_packet(int buff_pos, int packet_len, int num_tt) 28static inline int aggregated_packet(int buff_pos, int packet_len,
29 int tt_num_changes)
29{ 30{
30 int next_buff_pos = buff_pos + BAT_PACKET_LEN + (num_tt * ETH_ALEN); 31 int next_buff_pos = buff_pos + BAT_PACKET_LEN + (tt_num_changes *
32 sizeof(struct tt_change));
31 33
32 return (next_buff_pos <= packet_len) && 34 return (next_buff_pos <= packet_len) &&
33 (next_buff_pos <= MAX_AGGREGATION_BYTES); 35 (next_buff_pos <= MAX_AGGREGATION_BYTES);
@@ -35,9 +37,10 @@ static inline int aggregated_packet(int buff_pos, int packet_len, int num_tt)
35 37
36void add_bat_packet_to_list(struct bat_priv *bat_priv, 38void add_bat_packet_to_list(struct bat_priv *bat_priv,
37 unsigned char *packet_buff, int packet_len, 39 unsigned char *packet_buff, int packet_len,
38 struct hard_iface *if_incoming, char own_packet, 40 struct hard_iface *if_incoming, int own_packet,
39 unsigned long send_time); 41 unsigned long send_time);
40void receive_aggr_bat_packet(struct ethhdr *ethhdr, unsigned char *packet_buff, 42void receive_aggr_bat_packet(const struct ethhdr *ethhdr,
41 int packet_len, struct hard_iface *if_incoming); 43 unsigned char *packet_buff, int packet_len,
44 struct hard_iface *if_incoming);
42 45
43#endif /* _NET_BATMAN_ADV_AGGREGATION_H_ */ 46#endif /* _NET_BATMAN_ADV_AGGREGATION_H_ */
diff --git a/net/batman-adv/bat_debugfs.c b/net/batman-adv/bat_debugfs.c
index abaeec5f624..d0af9bf69e4 100644
--- a/net/batman-adv/bat_debugfs.c
+++ b/net/batman-adv/bat_debugfs.c
@@ -50,7 +50,8 @@ static void emit_log_char(struct debug_log *debug_log, char c)
50 debug_log->log_start = debug_log->log_end - log_buff_len; 50 debug_log->log_start = debug_log->log_end - log_buff_len;
51} 51}
52 52
53static int fdebug_log(struct debug_log *debug_log, char *fmt, ...) 53__printf(2, 3)
54static int fdebug_log(struct debug_log *debug_log, const char *fmt, ...)
54{ 55{
55 va_list args; 56 va_list args;
56 static char debug_log_buf[256]; 57 static char debug_log_buf[256];
@@ -74,14 +75,14 @@ static int fdebug_log(struct debug_log *debug_log, char *fmt, ...)
74 return 0; 75 return 0;
75} 76}
76 77
77int debug_log(struct bat_priv *bat_priv, char *fmt, ...) 78int debug_log(struct bat_priv *bat_priv, const char *fmt, ...)
78{ 79{
79 va_list args; 80 va_list args;
80 char tmp_log_buf[256]; 81 char tmp_log_buf[256];
81 82
82 va_start(args, fmt); 83 va_start(args, fmt);
83 vscnprintf(tmp_log_buf, sizeof(tmp_log_buf), fmt, args); 84 vscnprintf(tmp_log_buf, sizeof(tmp_log_buf), fmt, args);
84 fdebug_log(bat_priv->debug_log, "[%10u] %s", 85 fdebug_log(bat_priv->debug_log, "[%10lu] %s",
85 (jiffies / HZ), tmp_log_buf); 86 (jiffies / HZ), tmp_log_buf);
86 va_end(args); 87 va_end(args);
87 88
@@ -114,7 +115,7 @@ static ssize_t log_read(struct file *file, char __user *buf,
114 !(debug_log->log_end - debug_log->log_start)) 115 !(debug_log->log_end - debug_log->log_start))
115 return -EAGAIN; 116 return -EAGAIN;
116 117
117 if ((!buf) || (count < 0)) 118 if (!buf)
118 return -EINVAL; 119 return -EINVAL;
119 120
120 if (count == 0) 121 if (count == 0)
@@ -184,7 +185,7 @@ static int debug_log_setup(struct bat_priv *bat_priv)
184 if (!bat_priv->debug_dir) 185 if (!bat_priv->debug_dir)
185 goto err; 186 goto err;
186 187
187 bat_priv->debug_log = kzalloc(sizeof(struct debug_log), GFP_ATOMIC); 188 bat_priv->debug_log = kzalloc(sizeof(*bat_priv->debug_log), GFP_ATOMIC);
188 if (!bat_priv->debug_log) 189 if (!bat_priv->debug_log)
189 goto err; 190 goto err;
190 191
diff --git a/net/batman-adv/bat_sysfs.c b/net/batman-adv/bat_sysfs.c
index 497a0700cc3..cd15deba60a 100644
--- a/net/batman-adv/bat_sysfs.c
+++ b/net/batman-adv/bat_sysfs.c
@@ -28,9 +28,31 @@
28#include "gateway_client.h" 28#include "gateway_client.h"
29#include "vis.h" 29#include "vis.h"
30 30
31#define to_dev(obj) container_of(obj, struct device, kobj) 31static struct net_device *kobj_to_netdev(struct kobject *obj)
32#define kobj_to_netdev(obj) to_net_dev(to_dev(obj->parent)) 32{
33#define kobj_to_batpriv(obj) netdev_priv(kobj_to_netdev(obj)) 33 struct device *dev = container_of(obj->parent, struct device, kobj);
34 return to_net_dev(dev);
35}
36
37static struct bat_priv *kobj_to_batpriv(struct kobject *obj)
38{
39 struct net_device *net_dev = kobj_to_netdev(obj);
40 return netdev_priv(net_dev);
41}
42
43#define UEV_TYPE_VAR "BATTYPE="
44#define UEV_ACTION_VAR "BATACTION="
45#define UEV_DATA_VAR "BATDATA="
46
47static char *uev_action_str[] = {
48 "add",
49 "del",
50 "change"
51};
52
53static char *uev_type_str[] = {
54 "gw"
55};
34 56
35/* Use this, if you have customized show and store functions */ 57/* Use this, if you have customized show and store functions */
36#define BAT_ATTR(_name, _mode, _show, _store) \ 58#define BAT_ATTR(_name, _mode, _show, _store) \
@@ -96,7 +118,7 @@ ssize_t show_##_name(struct kobject *kobj, struct attribute *attr, \
96 118
97static int store_bool_attr(char *buff, size_t count, 119static int store_bool_attr(char *buff, size_t count,
98 struct net_device *net_dev, 120 struct net_device *net_dev,
99 char *attr_name, atomic_t *attr) 121 const char *attr_name, atomic_t *attr)
100{ 122{
101 int enabled = -1; 123 int enabled = -1;
102 124
@@ -138,16 +160,15 @@ static inline ssize_t __store_bool_attr(char *buff, size_t count,
138{ 160{
139 int ret; 161 int ret;
140 162
141 ret = store_bool_attr(buff, count, net_dev, (char *)attr->name, 163 ret = store_bool_attr(buff, count, net_dev, attr->name, attr_store);
142 attr_store);
143 if (post_func && ret) 164 if (post_func && ret)
144 post_func(net_dev); 165 post_func(net_dev);
145 166
146 return ret; 167 return ret;
147} 168}
148 169
149static int store_uint_attr(char *buff, size_t count, 170static int store_uint_attr(const char *buff, size_t count,
150 struct net_device *net_dev, char *attr_name, 171 struct net_device *net_dev, const char *attr_name,
151 unsigned int min, unsigned int max, atomic_t *attr) 172 unsigned int min, unsigned int max, atomic_t *attr)
152{ 173{
153 unsigned long uint_val; 174 unsigned long uint_val;
@@ -183,15 +204,15 @@ static int store_uint_attr(char *buff, size_t count,
183 return count; 204 return count;
184} 205}
185 206
186static inline ssize_t __store_uint_attr(char *buff, size_t count, 207static inline ssize_t __store_uint_attr(const char *buff, size_t count,
187 int min, int max, 208 int min, int max,
188 void (*post_func)(struct net_device *), 209 void (*post_func)(struct net_device *),
189 struct attribute *attr, 210 const struct attribute *attr,
190 atomic_t *attr_store, struct net_device *net_dev) 211 atomic_t *attr_store, struct net_device *net_dev)
191{ 212{
192 int ret; 213 int ret;
193 214
194 ret = store_uint_attr(buff, count, net_dev, (char *)attr->name, 215 ret = store_uint_attr(buff, count, net_dev, attr->name,
195 min, max, attr_store); 216 min, max, attr_store);
196 if (post_func && ret) 217 if (post_func && ret)
197 post_func(net_dev); 218 post_func(net_dev);
@@ -368,7 +389,7 @@ BAT_ATTR_UINT(gw_sel_class, S_IRUGO | S_IWUSR, 1, TQ_MAX_VALUE,
368static BAT_ATTR(gw_bandwidth, S_IRUGO | S_IWUSR, show_gw_bwidth, 389static BAT_ATTR(gw_bandwidth, S_IRUGO | S_IWUSR, show_gw_bwidth,
369 store_gw_bwidth); 390 store_gw_bwidth);
370#ifdef CONFIG_BATMAN_ADV_DEBUG 391#ifdef CONFIG_BATMAN_ADV_DEBUG
371BAT_ATTR_UINT(log_level, S_IRUGO | S_IWUSR, 0, 3, NULL); 392BAT_ATTR_UINT(log_level, S_IRUGO | S_IWUSR, 0, 7, NULL);
372#endif 393#endif
373 394
374static struct bat_attribute *mesh_attrs[] = { 395static struct bat_attribute *mesh_attrs[] = {
@@ -594,3 +615,60 @@ void sysfs_del_hardif(struct kobject **hardif_obj)
594 kobject_put(*hardif_obj); 615 kobject_put(*hardif_obj);
595 *hardif_obj = NULL; 616 *hardif_obj = NULL;
596} 617}
618
619int throw_uevent(struct bat_priv *bat_priv, enum uev_type type,
620 enum uev_action action, const char *data)
621{
622 int ret = -1;
623 struct hard_iface *primary_if = NULL;
624 struct kobject *bat_kobj;
625 char *uevent_env[4] = { NULL, NULL, NULL, NULL };
626
627 primary_if = primary_if_get_selected(bat_priv);
628 if (!primary_if)
629 goto out;
630
631 bat_kobj = &primary_if->soft_iface->dev.kobj;
632
633 uevent_env[0] = kmalloc(strlen(UEV_TYPE_VAR) +
634 strlen(uev_type_str[type]) + 1,
635 GFP_ATOMIC);
636 if (!uevent_env[0])
637 goto out;
638
639 sprintf(uevent_env[0], "%s%s", UEV_TYPE_VAR, uev_type_str[type]);
640
641 uevent_env[1] = kmalloc(strlen(UEV_ACTION_VAR) +
642 strlen(uev_action_str[action]) + 1,
643 GFP_ATOMIC);
644 if (!uevent_env[1])
645 goto out;
646
647 sprintf(uevent_env[1], "%s%s", UEV_ACTION_VAR, uev_action_str[action]);
648
649 /* If the event is DEL, ignore the data field */
650 if (action != UEV_DEL) {
651 uevent_env[2] = kmalloc(strlen(UEV_DATA_VAR) +
652 strlen(data) + 1, GFP_ATOMIC);
653 if (!uevent_env[2])
654 goto out;
655
656 sprintf(uevent_env[2], "%s%s", UEV_DATA_VAR, data);
657 }
658
659 ret = kobject_uevent_env(bat_kobj, KOBJ_CHANGE, uevent_env);
660out:
661 kfree(uevent_env[0]);
662 kfree(uevent_env[1]);
663 kfree(uevent_env[2]);
664
665 if (primary_if)
666 hardif_free_ref(primary_if);
667
668 if (ret)
669 bat_dbg(DBG_BATMAN, bat_priv, "Impossible to send "
670 "uevent for (%s,%s,%s) event (err: %d)\n",
671 uev_type_str[type], uev_action_str[action],
672 (action == UEV_DEL ? "NULL" : data), ret);
673 return ret;
674}
diff --git a/net/batman-adv/bat_sysfs.h b/net/batman-adv/bat_sysfs.h
index 02f1fa7aadf..a3f75a723c5 100644
--- a/net/batman-adv/bat_sysfs.h
+++ b/net/batman-adv/bat_sysfs.h
@@ -38,5 +38,7 @@ int sysfs_add_meshif(struct net_device *dev);
38void sysfs_del_meshif(struct net_device *dev); 38void sysfs_del_meshif(struct net_device *dev);
39int sysfs_add_hardif(struct kobject **hardif_obj, struct net_device *dev); 39int sysfs_add_hardif(struct kobject **hardif_obj, struct net_device *dev);
40void sysfs_del_hardif(struct kobject **hardif_obj); 40void sysfs_del_hardif(struct kobject **hardif_obj);
41int throw_uevent(struct bat_priv *bat_priv, enum uev_type type,
42 enum uev_action action, const char *data);
41 43
42#endif /* _NET_BATMAN_ADV_SYSFS_H_ */ 44#endif /* _NET_BATMAN_ADV_SYSFS_H_ */
diff --git a/net/batman-adv/bitarray.c b/net/batman-adv/bitarray.c
index ad2ca925b3e..c1f4bfc09cc 100644
--- a/net/batman-adv/bitarray.c
+++ b/net/batman-adv/bitarray.c
@@ -26,8 +26,8 @@
26 26
27/* returns true if the corresponding bit in the given seq_bits indicates true 27/* returns true if the corresponding bit in the given seq_bits indicates true
28 * and curr_seqno is within range of last_seqno */ 28 * and curr_seqno is within range of last_seqno */
29uint8_t get_bit_status(unsigned long *seq_bits, uint32_t last_seqno, 29int get_bit_status(const unsigned long *seq_bits, uint32_t last_seqno,
30 uint32_t curr_seqno) 30 uint32_t curr_seqno)
31{ 31{
32 int32_t diff, word_offset, word_num; 32 int32_t diff, word_offset, word_num;
33 33
@@ -127,10 +127,10 @@ static void bit_reset_window(unsigned long *seq_bits)
127 * 1 if the window was moved (either new or very old) 127 * 1 if the window was moved (either new or very old)
128 * 0 if the window was not moved/shifted. 128 * 0 if the window was not moved/shifted.
129 */ 129 */
130char bit_get_packet(void *priv, unsigned long *seq_bits, 130int bit_get_packet(void *priv, unsigned long *seq_bits,
131 int32_t seq_num_diff, int8_t set_mark) 131 int32_t seq_num_diff, int set_mark)
132{ 132{
133 struct bat_priv *bat_priv = (struct bat_priv *)priv; 133 struct bat_priv *bat_priv = priv;
134 134
135 /* sequence number is slightly older. We already got a sequence number 135 /* sequence number is slightly older. We already got a sequence number
136 * higher than this one, so we just mark it. */ 136 * higher than this one, so we just mark it. */
@@ -190,7 +190,7 @@ char bit_get_packet(void *priv, unsigned long *seq_bits,
190/* count the hamming weight, how many good packets did we receive? just count 190/* count the hamming weight, how many good packets did we receive? just count
191 * the 1's. 191 * the 1's.
192 */ 192 */
193int bit_packet_count(unsigned long *seq_bits) 193int bit_packet_count(const unsigned long *seq_bits)
194{ 194{
195 int i, hamming = 0; 195 int i, hamming = 0;
196 196
diff --git a/net/batman-adv/bitarray.h b/net/batman-adv/bitarray.h
index 769c246d1fc..9c04422aeb0 100644
--- a/net/batman-adv/bitarray.h
+++ b/net/batman-adv/bitarray.h
@@ -26,8 +26,8 @@
26 26
27/* returns true if the corresponding bit in the given seq_bits indicates true 27/* returns true if the corresponding bit in the given seq_bits indicates true
28 * and curr_seqno is within range of last_seqno */ 28 * and curr_seqno is within range of last_seqno */
29uint8_t get_bit_status(unsigned long *seq_bits, uint32_t last_seqno, 29int get_bit_status(const unsigned long *seq_bits, uint32_t last_seqno,
30 uint32_t curr_seqno); 30 uint32_t curr_seqno);
31 31
32/* turn corresponding bit on, so we can remember that we got the packet */ 32/* turn corresponding bit on, so we can remember that we got the packet */
33void bit_mark(unsigned long *seq_bits, int32_t n); 33void bit_mark(unsigned long *seq_bits, int32_t n);
@@ -35,10 +35,10 @@ void bit_mark(unsigned long *seq_bits, int32_t n);
35 35
36/* receive and process one packet, returns 1 if received seq_num is considered 36/* receive and process one packet, returns 1 if received seq_num is considered
37 * new, 0 if old */ 37 * new, 0 if old */
38char bit_get_packet(void *priv, unsigned long *seq_bits, 38int bit_get_packet(void *priv, unsigned long *seq_bits,
39 int32_t seq_num_diff, int8_t set_mark); 39 int32_t seq_num_diff, int set_mark);
40 40
41/* count the hamming weight, how many good packets did we receive? */ 41/* count the hamming weight, how many good packets did we receive? */
42int bit_packet_count(unsigned long *seq_bits); 42int bit_packet_count(const unsigned long *seq_bits);
43 43
44#endif /* _NET_BATMAN_ADV_BITARRAY_H_ */ 44#endif /* _NET_BATMAN_ADV_BITARRAY_H_ */
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index 61605a0f3f3..056180ef9e1 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -20,15 +20,22 @@
20 */ 20 */
21 21
22#include "main.h" 22#include "main.h"
23#include "bat_sysfs.h"
23#include "gateway_client.h" 24#include "gateway_client.h"
24#include "gateway_common.h" 25#include "gateway_common.h"
25#include "hard-interface.h" 26#include "hard-interface.h"
26#include "originator.h" 27#include "originator.h"
28#include "routing.h"
27#include <linux/ip.h> 29#include <linux/ip.h>
28#include <linux/ipv6.h> 30#include <linux/ipv6.h>
29#include <linux/udp.h> 31#include <linux/udp.h>
30#include <linux/if_vlan.h> 32#include <linux/if_vlan.h>
31 33
34/* This is the offset of the options field in a dhcp packet starting at
35 * the beginning of the dhcp header */
36#define DHCP_OPTIONS_OFFSET 240
37#define DHCP_REQUEST 3
38
32static void gw_node_free_ref(struct gw_node *gw_node) 39static void gw_node_free_ref(struct gw_node *gw_node)
33{ 40{
34 if (atomic_dec_and_test(&gw_node->refcount)) 41 if (atomic_dec_and_test(&gw_node->refcount))
@@ -86,7 +93,7 @@ static void gw_select(struct bat_priv *bat_priv, struct gw_node *new_gw_node)
86 if (new_gw_node && !atomic_inc_not_zero(&new_gw_node->refcount)) 93 if (new_gw_node && !atomic_inc_not_zero(&new_gw_node->refcount))
87 new_gw_node = NULL; 94 new_gw_node = NULL;
88 95
89 curr_gw_node = bat_priv->curr_gw; 96 curr_gw_node = rcu_dereference_protected(bat_priv->curr_gw, 1);
90 rcu_assign_pointer(bat_priv->curr_gw, new_gw_node); 97 rcu_assign_pointer(bat_priv->curr_gw, new_gw_node);
91 98
92 if (curr_gw_node) 99 if (curr_gw_node)
@@ -97,40 +104,19 @@ static void gw_select(struct bat_priv *bat_priv, struct gw_node *new_gw_node)
97 104
98void gw_deselect(struct bat_priv *bat_priv) 105void gw_deselect(struct bat_priv *bat_priv)
99{ 106{
100 gw_select(bat_priv, NULL); 107 atomic_set(&bat_priv->gw_reselect, 1);
101} 108}
102 109
103void gw_election(struct bat_priv *bat_priv) 110static struct gw_node *gw_get_best_gw_node(struct bat_priv *bat_priv)
104{ 111{
105 struct hlist_node *node;
106 struct gw_node *gw_node, *curr_gw = NULL, *curr_gw_tmp = NULL;
107 struct neigh_node *router; 112 struct neigh_node *router;
108 uint8_t max_tq = 0; 113 struct hlist_node *node;
114 struct gw_node *gw_node, *curr_gw = NULL;
109 uint32_t max_gw_factor = 0, tmp_gw_factor = 0; 115 uint32_t max_gw_factor = 0, tmp_gw_factor = 0;
116 uint8_t max_tq = 0;
110 int down, up; 117 int down, up;
111 118
112 /**
113 * The batman daemon checks here if we already passed a full originator
114 * cycle in order to make sure we don't choose the first gateway we
115 * hear about. This check is based on the daemon's uptime which we
116 * don't have.
117 **/
118 if (atomic_read(&bat_priv->gw_mode) != GW_MODE_CLIENT)
119 return;
120
121 curr_gw = gw_get_selected_gw_node(bat_priv);
122 if (curr_gw)
123 goto out;
124
125 rcu_read_lock(); 119 rcu_read_lock();
126 if (hlist_empty(&bat_priv->gw_list)) {
127 bat_dbg(DBG_BATMAN, bat_priv,
128 "Removing selected gateway - "
129 "no gateway in range\n");
130 gw_deselect(bat_priv);
131 goto unlock;
132 }
133
134 hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) { 120 hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) {
135 if (gw_node->deleted) 121 if (gw_node->deleted)
136 continue; 122 continue;
@@ -139,6 +125,9 @@ void gw_election(struct bat_priv *bat_priv)
139 if (!router) 125 if (!router)
140 continue; 126 continue;
141 127
128 if (!atomic_inc_not_zero(&gw_node->refcount))
129 goto next;
130
142 switch (atomic_read(&bat_priv->gw_sel_class)) { 131 switch (atomic_read(&bat_priv->gw_sel_class)) {
143 case 1: /* fast connection */ 132 case 1: /* fast connection */
144 gw_bandwidth_to_kbit(gw_node->orig_node->gw_flags, 133 gw_bandwidth_to_kbit(gw_node->orig_node->gw_flags,
@@ -151,8 +140,12 @@ void gw_election(struct bat_priv *bat_priv)
151 140
152 if ((tmp_gw_factor > max_gw_factor) || 141 if ((tmp_gw_factor > max_gw_factor) ||
153 ((tmp_gw_factor == max_gw_factor) && 142 ((tmp_gw_factor == max_gw_factor) &&
154 (router->tq_avg > max_tq))) 143 (router->tq_avg > max_tq))) {
155 curr_gw_tmp = gw_node; 144 if (curr_gw)
145 gw_node_free_ref(curr_gw);
146 curr_gw = gw_node;
147 atomic_inc(&curr_gw->refcount);
148 }
156 break; 149 break;
157 150
158 default: /** 151 default: /**
@@ -163,8 +156,12 @@ void gw_election(struct bat_priv *bat_priv)
163 * soon as a better gateway appears which has 156 * soon as a better gateway appears which has
164 * $routing_class more tq points) 157 * $routing_class more tq points)
165 **/ 158 **/
166 if (router->tq_avg > max_tq) 159 if (router->tq_avg > max_tq) {
167 curr_gw_tmp = gw_node; 160 if (curr_gw)
161 gw_node_free_ref(curr_gw);
162 curr_gw = gw_node;
163 atomic_inc(&curr_gw->refcount);
164 }
168 break; 165 break;
169 } 166 }
170 167
@@ -174,42 +171,81 @@ void gw_election(struct bat_priv *bat_priv)
174 if (tmp_gw_factor > max_gw_factor) 171 if (tmp_gw_factor > max_gw_factor)
175 max_gw_factor = tmp_gw_factor; 172 max_gw_factor = tmp_gw_factor;
176 173
174 gw_node_free_ref(gw_node);
175
176next:
177 neigh_node_free_ref(router); 177 neigh_node_free_ref(router);
178 } 178 }
179 rcu_read_unlock();
179 180
180 if (curr_gw != curr_gw_tmp) { 181 return curr_gw;
181 router = orig_node_get_router(curr_gw_tmp->orig_node); 182}
182 if (!router)
183 goto unlock;
184 183
185 if ((curr_gw) && (!curr_gw_tmp)) 184void gw_election(struct bat_priv *bat_priv)
186 bat_dbg(DBG_BATMAN, bat_priv, 185{
187 "Removing selected gateway - " 186 struct gw_node *curr_gw = NULL, *next_gw = NULL;
188 "no gateway in range\n"); 187 struct neigh_node *router = NULL;
189 else if ((!curr_gw) && (curr_gw_tmp)) 188 char gw_addr[18] = { '\0' };
190 bat_dbg(DBG_BATMAN, bat_priv,
191 "Adding route to gateway %pM "
192 "(gw_flags: %i, tq: %i)\n",
193 curr_gw_tmp->orig_node->orig,
194 curr_gw_tmp->orig_node->gw_flags,
195 router->tq_avg);
196 else
197 bat_dbg(DBG_BATMAN, bat_priv,
198 "Changing route to gateway %pM "
199 "(gw_flags: %i, tq: %i)\n",
200 curr_gw_tmp->orig_node->orig,
201 curr_gw_tmp->orig_node->gw_flags,
202 router->tq_avg);
203 189
204 neigh_node_free_ref(router); 190 /**
205 gw_select(bat_priv, curr_gw_tmp); 191 * The batman daemon checks here if we already passed a full originator
192 * cycle in order to make sure we don't choose the first gateway we
193 * hear about. This check is based on the daemon's uptime which we
194 * don't have.
195 **/
196 if (atomic_read(&bat_priv->gw_mode) != GW_MODE_CLIENT)
197 goto out;
198
199 if (!atomic_dec_not_zero(&bat_priv->gw_reselect))
200 goto out;
201
202 curr_gw = gw_get_selected_gw_node(bat_priv);
203
204 next_gw = gw_get_best_gw_node(bat_priv);
205
206 if (curr_gw == next_gw)
207 goto out;
208
209 if (next_gw) {
210 sprintf(gw_addr, "%pM", next_gw->orig_node->orig);
211
212 router = orig_node_get_router(next_gw->orig_node);
213 if (!router) {
214 gw_deselect(bat_priv);
215 goto out;
216 }
206 } 217 }
207 218
208unlock: 219 if ((curr_gw) && (!next_gw)) {
209 rcu_read_unlock(); 220 bat_dbg(DBG_BATMAN, bat_priv,
221 "Removing selected gateway - no gateway in range\n");
222 throw_uevent(bat_priv, UEV_GW, UEV_DEL, NULL);
223 } else if ((!curr_gw) && (next_gw)) {
224 bat_dbg(DBG_BATMAN, bat_priv,
225 "Adding route to gateway %pM (gw_flags: %i, tq: %i)\n",
226 next_gw->orig_node->orig,
227 next_gw->orig_node->gw_flags,
228 router->tq_avg);
229 throw_uevent(bat_priv, UEV_GW, UEV_ADD, gw_addr);
230 } else {
231 bat_dbg(DBG_BATMAN, bat_priv,
232 "Changing route to gateway %pM "
233 "(gw_flags: %i, tq: %i)\n",
234 next_gw->orig_node->orig,
235 next_gw->orig_node->gw_flags,
236 router->tq_avg);
237 throw_uevent(bat_priv, UEV_GW, UEV_CHANGE, gw_addr);
238 }
239
240 gw_select(bat_priv, next_gw);
241
210out: 242out:
211 if (curr_gw) 243 if (curr_gw)
212 gw_node_free_ref(curr_gw); 244 gw_node_free_ref(curr_gw);
245 if (next_gw)
246 gw_node_free_ref(next_gw);
247 if (router)
248 neigh_node_free_ref(router);
213} 249}
214 250
215void gw_check_election(struct bat_priv *bat_priv, struct orig_node *orig_node) 251void gw_check_election(struct bat_priv *bat_priv, struct orig_node *orig_node)
@@ -273,11 +309,10 @@ static void gw_node_add(struct bat_priv *bat_priv,
273 struct gw_node *gw_node; 309 struct gw_node *gw_node;
274 int down, up; 310 int down, up;
275 311
276 gw_node = kmalloc(sizeof(struct gw_node), GFP_ATOMIC); 312 gw_node = kzalloc(sizeof(*gw_node), GFP_ATOMIC);
277 if (!gw_node) 313 if (!gw_node)
278 return; 314 return;
279 315
280 memset(gw_node, 0, sizeof(struct gw_node));
281 INIT_HLIST_NODE(&gw_node->list); 316 INIT_HLIST_NODE(&gw_node->list);
282 gw_node->orig_node = orig_node; 317 gw_node->orig_node = orig_node;
283 atomic_set(&gw_node->refcount, 1); 318 atomic_set(&gw_node->refcount, 1);
@@ -323,7 +358,7 @@ void gw_node_update(struct bat_priv *bat_priv,
323 358
324 gw_node->deleted = 0; 359 gw_node->deleted = 0;
325 360
326 if (new_gwflags == 0) { 361 if (new_gwflags == NO_FLAGS) {
327 gw_node->deleted = jiffies; 362 gw_node->deleted = jiffies;
328 bat_dbg(DBG_BATMAN, bat_priv, 363 bat_dbg(DBG_BATMAN, bat_priv,
329 "Gateway %pM removed from gateway list\n", 364 "Gateway %pM removed from gateway list\n",
@@ -336,7 +371,7 @@ void gw_node_update(struct bat_priv *bat_priv,
336 goto unlock; 371 goto unlock;
337 } 372 }
338 373
339 if (new_gwflags == 0) 374 if (new_gwflags == NO_FLAGS)
340 goto unlock; 375 goto unlock;
341 376
342 gw_node_add(bat_priv, orig_node, new_gwflags); 377 gw_node_add(bat_priv, orig_node, new_gwflags);
@@ -353,7 +388,7 @@ unlock:
353 388
354void gw_node_delete(struct bat_priv *bat_priv, struct orig_node *orig_node) 389void gw_node_delete(struct bat_priv *bat_priv, struct orig_node *orig_node)
355{ 390{
356 return gw_node_update(bat_priv, orig_node, 0); 391 gw_node_update(bat_priv, orig_node, 0);
357} 392}
358 393
359void gw_node_purge(struct bat_priv *bat_priv) 394void gw_node_purge(struct bat_priv *bat_priv)
@@ -361,7 +396,7 @@ void gw_node_purge(struct bat_priv *bat_priv)
361 struct gw_node *gw_node, *curr_gw; 396 struct gw_node *gw_node, *curr_gw;
362 struct hlist_node *node, *node_tmp; 397 struct hlist_node *node, *node_tmp;
363 unsigned long timeout = 2 * PURGE_TIMEOUT * HZ; 398 unsigned long timeout = 2 * PURGE_TIMEOUT * HZ;
364 char do_deselect = 0; 399 int do_deselect = 0;
365 400
366 curr_gw = gw_get_selected_gw_node(bat_priv); 401 curr_gw = gw_get_selected_gw_node(bat_priv);
367 402
@@ -394,8 +429,8 @@ void gw_node_purge(struct bat_priv *bat_priv)
394/** 429/**
395 * fails if orig_node has no router 430 * fails if orig_node has no router
396 */ 431 */
397static int _write_buffer_text(struct bat_priv *bat_priv, 432static int _write_buffer_text(struct bat_priv *bat_priv, struct seq_file *seq,
398 struct seq_file *seq, struct gw_node *gw_node) 433 const struct gw_node *gw_node)
399{ 434{
400 struct gw_node *curr_gw; 435 struct gw_node *curr_gw;
401 struct neigh_node *router; 436 struct neigh_node *router;
@@ -452,10 +487,9 @@ int gw_client_seq_print_text(struct seq_file *seq, void *offset)
452 } 487 }
453 488
454 seq_printf(seq, " %-12s (%s/%i) %17s [%10s]: gw_class ... " 489 seq_printf(seq, " %-12s (%s/%i) %17s [%10s]: gw_class ... "
455 "[B.A.T.M.A.N. adv %s%s, MainIF/MAC: %s/%pM (%s)]\n", 490 "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s)]\n",
456 "Gateway", "#", TQ_MAX_VALUE, "Nexthop", 491 "Gateway", "#", TQ_MAX_VALUE, "Nexthop",
457 "outgoingIF", SOURCE_VERSION, REVISION_VERSION_STR, 492 "outgoingIF", SOURCE_VERSION, primary_if->net_dev->name,
458 primary_if->net_dev->name,
459 primary_if->net_dev->dev_addr, net_dev->name); 493 primary_if->net_dev->dev_addr, net_dev->name);
460 494
461 rcu_read_lock(); 495 rcu_read_lock();
@@ -480,14 +514,75 @@ out:
480 return ret; 514 return ret;
481} 515}
482 516
483int gw_is_target(struct bat_priv *bat_priv, struct sk_buff *skb) 517static bool is_type_dhcprequest(struct sk_buff *skb, int header_len)
518{
519 int ret = false;
520 unsigned char *p;
521 int pkt_len;
522
523 if (skb_linearize(skb) < 0)
524 goto out;
525
526 pkt_len = skb_headlen(skb);
527
528 if (pkt_len < header_len + DHCP_OPTIONS_OFFSET + 1)
529 goto out;
530
531 p = skb->data + header_len + DHCP_OPTIONS_OFFSET;
532 pkt_len -= header_len + DHCP_OPTIONS_OFFSET + 1;
533
534 /* Access the dhcp option lists. Each entry is made up by:
535 * - octect 1: option type
536 * - octect 2: option data len (only if type != 255 and 0)
537 * - octect 3: option data */
538 while (*p != 255 && !ret) {
539 /* p now points to the first octect: option type */
540 if (*p == 53) {
541 /* type 53 is the message type option.
542 * Jump the len octect and go to the data octect */
543 if (pkt_len < 2)
544 goto out;
545 p += 2;
546
547 /* check if the message type is what we need */
548 if (*p == DHCP_REQUEST)
549 ret = true;
550 break;
551 } else if (*p == 0) {
552 /* option type 0 (padding), just go forward */
553 if (pkt_len < 1)
554 goto out;
555 pkt_len--;
556 p++;
557 } else {
558 /* This is any other option. So we get the length... */
559 if (pkt_len < 1)
560 goto out;
561 pkt_len--;
562 p++;
563
564 /* ...and then we jump over the data */
565 if (pkt_len < *p)
566 goto out;
567 pkt_len -= *p;
568 p += (*p);
569 }
570 }
571out:
572 return ret;
573}
574
575int gw_is_target(struct bat_priv *bat_priv, struct sk_buff *skb,
576 struct orig_node *old_gw)
484{ 577{
485 struct ethhdr *ethhdr; 578 struct ethhdr *ethhdr;
486 struct iphdr *iphdr; 579 struct iphdr *iphdr;
487 struct ipv6hdr *ipv6hdr; 580 struct ipv6hdr *ipv6hdr;
488 struct udphdr *udphdr; 581 struct udphdr *udphdr;
489 struct gw_node *curr_gw; 582 struct gw_node *curr_gw;
583 struct neigh_node *neigh_curr = NULL, *neigh_old = NULL;
490 unsigned int header_len = 0; 584 unsigned int header_len = 0;
585 int ret = 1;
491 586
492 if (atomic_read(&bat_priv->gw_mode) == GW_MODE_OFF) 587 if (atomic_read(&bat_priv->gw_mode) == GW_MODE_OFF)
493 return 0; 588 return 0;
@@ -509,7 +604,7 @@ int gw_is_target(struct bat_priv *bat_priv, struct sk_buff *skb)
509 /* check for ip header */ 604 /* check for ip header */
510 switch (ntohs(ethhdr->h_proto)) { 605 switch (ntohs(ethhdr->h_proto)) {
511 case ETH_P_IP: 606 case ETH_P_IP:
512 if (!pskb_may_pull(skb, header_len + sizeof(struct iphdr))) 607 if (!pskb_may_pull(skb, header_len + sizeof(*iphdr)))
513 return 0; 608 return 0;
514 iphdr = (struct iphdr *)(skb->data + header_len); 609 iphdr = (struct iphdr *)(skb->data + header_len);
515 header_len += iphdr->ihl * 4; 610 header_len += iphdr->ihl * 4;
@@ -520,10 +615,10 @@ int gw_is_target(struct bat_priv *bat_priv, struct sk_buff *skb)
520 615
521 break; 616 break;
522 case ETH_P_IPV6: 617 case ETH_P_IPV6:
523 if (!pskb_may_pull(skb, header_len + sizeof(struct ipv6hdr))) 618 if (!pskb_may_pull(skb, header_len + sizeof(*ipv6hdr)))
524 return 0; 619 return 0;
525 ipv6hdr = (struct ipv6hdr *)(skb->data + header_len); 620 ipv6hdr = (struct ipv6hdr *)(skb->data + header_len);
526 header_len += sizeof(struct ipv6hdr); 621 header_len += sizeof(*ipv6hdr);
527 622
528 /* check for udp header */ 623 /* check for udp header */
529 if (ipv6hdr->nexthdr != IPPROTO_UDP) 624 if (ipv6hdr->nexthdr != IPPROTO_UDP)
@@ -534,10 +629,10 @@ int gw_is_target(struct bat_priv *bat_priv, struct sk_buff *skb)
534 return 0; 629 return 0;
535 } 630 }
536 631
537 if (!pskb_may_pull(skb, header_len + sizeof(struct udphdr))) 632 if (!pskb_may_pull(skb, header_len + sizeof(*udphdr)))
538 return 0; 633 return 0;
539 udphdr = (struct udphdr *)(skb->data + header_len); 634 udphdr = (struct udphdr *)(skb->data + header_len);
540 header_len += sizeof(struct udphdr); 635 header_len += sizeof(*udphdr);
541 636
542 /* check for bootp port */ 637 /* check for bootp port */
543 if ((ntohs(ethhdr->h_proto) == ETH_P_IP) && 638 if ((ntohs(ethhdr->h_proto) == ETH_P_IP) &&
@@ -555,7 +650,30 @@ int gw_is_target(struct bat_priv *bat_priv, struct sk_buff *skb)
555 if (!curr_gw) 650 if (!curr_gw)
556 return 0; 651 return 0;
557 652
653 /* If old_gw != NULL then this packet is unicast.
654 * So, at this point we have to check the message type: if it is a
655 * DHCPREQUEST we have to decide whether to drop it or not */
656 if (old_gw && curr_gw->orig_node != old_gw) {
657 if (is_type_dhcprequest(skb, header_len)) {
658 /* If the dhcp packet has been sent to a different gw,
659 * we have to evaluate whether the old gw is still
660 * reliable enough */
661 neigh_curr = find_router(bat_priv, curr_gw->orig_node,
662 NULL);
663 neigh_old = find_router(bat_priv, old_gw, NULL);
664 if (!neigh_curr || !neigh_old)
665 goto free_neigh;
666 if (neigh_curr->tq_avg - neigh_old->tq_avg <
667 GW_THRESHOLD)
668 ret = -1;
669 }
670 }
671free_neigh:
672 if (neigh_old)
673 neigh_node_free_ref(neigh_old);
674 if (neigh_curr)
675 neigh_node_free_ref(neigh_curr);
558 if (curr_gw) 676 if (curr_gw)
559 gw_node_free_ref(curr_gw); 677 gw_node_free_ref(curr_gw);
560 return 1; 678 return ret;
561} 679}
diff --git a/net/batman-adv/gateway_client.h b/net/batman-adv/gateway_client.h
index 1ce8c6066da..b9b983c07fe 100644
--- a/net/batman-adv/gateway_client.h
+++ b/net/batman-adv/gateway_client.h
@@ -31,6 +31,7 @@ void gw_node_update(struct bat_priv *bat_priv,
31void gw_node_delete(struct bat_priv *bat_priv, struct orig_node *orig_node); 31void gw_node_delete(struct bat_priv *bat_priv, struct orig_node *orig_node);
32void gw_node_purge(struct bat_priv *bat_priv); 32void gw_node_purge(struct bat_priv *bat_priv);
33int gw_client_seq_print_text(struct seq_file *seq, void *offset); 33int gw_client_seq_print_text(struct seq_file *seq, void *offset);
34int gw_is_target(struct bat_priv *bat_priv, struct sk_buff *skb); 34int gw_is_target(struct bat_priv *bat_priv, struct sk_buff *skb,
35 struct orig_node *old_gw);
35 36
36#endif /* _NET_BATMAN_ADV_GATEWAY_CLIENT_H_ */ 37#endif /* _NET_BATMAN_ADV_GATEWAY_CLIENT_H_ */
diff --git a/net/batman-adv/gateway_common.c b/net/batman-adv/gateway_common.c
index 50d3a59a3d7..18661af0bc3 100644
--- a/net/batman-adv/gateway_common.c
+++ b/net/batman-adv/gateway_common.c
@@ -61,9 +61,9 @@ static void kbit_to_gw_bandwidth(int down, int up, long *gw_srv_class)
61/* returns the up and downspeeds in kbit, calculated from the class */ 61/* returns the up and downspeeds in kbit, calculated from the class */
62void gw_bandwidth_to_kbit(uint8_t gw_srv_class, int *down, int *up) 62void gw_bandwidth_to_kbit(uint8_t gw_srv_class, int *down, int *up)
63{ 63{
64 char sbit = (gw_srv_class & 0x80) >> 7; 64 int sbit = (gw_srv_class & 0x80) >> 7;
65 char dpart = (gw_srv_class & 0x78) >> 3; 65 int dpart = (gw_srv_class & 0x78) >> 3;
66 char upart = (gw_srv_class & 0x07); 66 int upart = (gw_srv_class & 0x07);
67 67
68 if (!gw_srv_class) { 68 if (!gw_srv_class) {
69 *down = 0; 69 *down = 0;
@@ -76,10 +76,11 @@ void gw_bandwidth_to_kbit(uint8_t gw_srv_class, int *down, int *up)
76} 76}
77 77
78static bool parse_gw_bandwidth(struct net_device *net_dev, char *buff, 78static bool parse_gw_bandwidth(struct net_device *net_dev, char *buff,
79 long *up, long *down) 79 int *up, int *down)
80{ 80{
81 int ret, multi = 1; 81 int ret, multi = 1;
82 char *slash_ptr, *tmp_ptr; 82 char *slash_ptr, *tmp_ptr;
83 long ldown, lup;
83 84
84 slash_ptr = strchr(buff, '/'); 85 slash_ptr = strchr(buff, '/');
85 if (slash_ptr) 86 if (slash_ptr)
@@ -96,7 +97,7 @@ static bool parse_gw_bandwidth(struct net_device *net_dev, char *buff,
96 *tmp_ptr = '\0'; 97 *tmp_ptr = '\0';
97 } 98 }
98 99
99 ret = strict_strtoul(buff, 10, down); 100 ret = strict_strtol(buff, 10, &ldown);
100 if (ret) { 101 if (ret) {
101 bat_err(net_dev, 102 bat_err(net_dev,
102 "Download speed of gateway mode invalid: %s\n", 103 "Download speed of gateway mode invalid: %s\n",
@@ -104,7 +105,7 @@ static bool parse_gw_bandwidth(struct net_device *net_dev, char *buff,
104 return false; 105 return false;
105 } 106 }
106 107
107 *down *= multi; 108 *down = ldown * multi;
108 109
109 /* we also got some upload info */ 110 /* we also got some upload info */
110 if (slash_ptr) { 111 if (slash_ptr) {
@@ -121,7 +122,7 @@ static bool parse_gw_bandwidth(struct net_device *net_dev, char *buff,
121 *tmp_ptr = '\0'; 122 *tmp_ptr = '\0';
122 } 123 }
123 124
124 ret = strict_strtoul(slash_ptr + 1, 10, up); 125 ret = strict_strtol(slash_ptr + 1, 10, &lup);
125 if (ret) { 126 if (ret) {
126 bat_err(net_dev, 127 bat_err(net_dev,
127 "Upload speed of gateway mode invalid: " 128 "Upload speed of gateway mode invalid: "
@@ -129,7 +130,7 @@ static bool parse_gw_bandwidth(struct net_device *net_dev, char *buff,
129 return false; 130 return false;
130 } 131 }
131 132
132 *up *= multi; 133 *up = lup * multi;
133 } 134 }
134 135
135 return true; 136 return true;
@@ -138,7 +139,8 @@ static bool parse_gw_bandwidth(struct net_device *net_dev, char *buff,
138ssize_t gw_bandwidth_set(struct net_device *net_dev, char *buff, size_t count) 139ssize_t gw_bandwidth_set(struct net_device *net_dev, char *buff, size_t count)
139{ 140{
140 struct bat_priv *bat_priv = netdev_priv(net_dev); 141 struct bat_priv *bat_priv = netdev_priv(net_dev);
141 long gw_bandwidth_tmp = 0, up = 0, down = 0; 142 long gw_bandwidth_tmp = 0;
143 int up = 0, down = 0;
142 bool ret; 144 bool ret;
143 145
144 ret = parse_gw_bandwidth(net_dev, buff, &up, &down); 146 ret = parse_gw_bandwidth(net_dev, buff, &up, &down);
@@ -158,12 +160,11 @@ ssize_t gw_bandwidth_set(struct net_device *net_dev, char *buff, size_t count)
158 * speeds, hence we need to calculate it back to show the number 160 * speeds, hence we need to calculate it back to show the number
159 * that is going to be propagated 161 * that is going to be propagated
160 **/ 162 **/
161 gw_bandwidth_to_kbit((uint8_t)gw_bandwidth_tmp, 163 gw_bandwidth_to_kbit((uint8_t)gw_bandwidth_tmp, &down, &up);
162 (int *)&down, (int *)&up);
163 164
164 gw_deselect(bat_priv); 165 gw_deselect(bat_priv);
165 bat_info(net_dev, "Changing gateway bandwidth from: '%i' to: '%ld' " 166 bat_info(net_dev, "Changing gateway bandwidth from: '%i' to: '%ld' "
166 "(propagating: %ld%s/%ld%s)\n", 167 "(propagating: %d%s/%d%s)\n",
167 atomic_read(&bat_priv->gw_bandwidth), gw_bandwidth_tmp, 168 atomic_read(&bat_priv->gw_bandwidth), gw_bandwidth_tmp,
168 (down > 2048 ? down / 1024 : down), 169 (down > 2048 ? down / 1024 : down),
169 (down > 2048 ? "MBit" : "KBit"), 170 (down > 2048 ? "MBit" : "KBit"),
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index dfbfccc9fe4..db7aacf1e09 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -46,7 +46,7 @@ void hardif_free_rcu(struct rcu_head *rcu)
46 kfree(hard_iface); 46 kfree(hard_iface);
47} 47}
48 48
49struct hard_iface *hardif_get_by_netdev(struct net_device *net_dev) 49struct hard_iface *hardif_get_by_netdev(const struct net_device *net_dev)
50{ 50{
51 struct hard_iface *hard_iface; 51 struct hard_iface *hard_iface;
52 52
@@ -64,7 +64,7 @@ out:
64 return hard_iface; 64 return hard_iface;
65} 65}
66 66
67static int is_valid_iface(struct net_device *net_dev) 67static int is_valid_iface(const struct net_device *net_dev)
68{ 68{
69 if (net_dev->flags & IFF_LOOPBACK) 69 if (net_dev->flags & IFF_LOOPBACK)
70 return 0; 70 return 0;
@@ -86,7 +86,7 @@ static int is_valid_iface(struct net_device *net_dev)
86 return 1; 86 return 1;
87} 87}
88 88
89static struct hard_iface *hardif_get_active(struct net_device *soft_iface) 89static struct hard_iface *hardif_get_active(const struct net_device *soft_iface)
90{ 90{
91 struct hard_iface *hard_iface; 91 struct hard_iface *hard_iface;
92 92
@@ -138,7 +138,7 @@ static void primary_if_select(struct bat_priv *bat_priv,
138 if (new_hard_iface && !atomic_inc_not_zero(&new_hard_iface->refcount)) 138 if (new_hard_iface && !atomic_inc_not_zero(&new_hard_iface->refcount))
139 new_hard_iface = NULL; 139 new_hard_iface = NULL;
140 140
141 curr_hard_iface = bat_priv->primary_if; 141 curr_hard_iface = rcu_dereference_protected(bat_priv->primary_if, 1);
142 rcu_assign_pointer(bat_priv->primary_if, new_hard_iface); 142 rcu_assign_pointer(bat_priv->primary_if, new_hard_iface);
143 143
144 if (curr_hard_iface) 144 if (curr_hard_iface)
@@ -152,15 +152,9 @@ static void primary_if_select(struct bat_priv *bat_priv,
152 batman_packet->ttl = TTL; 152 batman_packet->ttl = TTL;
153 153
154 primary_if_update_addr(bat_priv); 154 primary_if_update_addr(bat_priv);
155
156 /***
157 * hacky trick to make sure that we send the TT information via
158 * our new primary interface
159 */
160 atomic_set(&bat_priv->tt_local_changed, 1);
161} 155}
162 156
163static bool hardif_is_iface_up(struct hard_iface *hard_iface) 157static bool hardif_is_iface_up(const struct hard_iface *hard_iface)
164{ 158{
165 if (hard_iface->net_dev->flags & IFF_UP) 159 if (hard_iface->net_dev->flags & IFF_UP)
166 return true; 160 return true;
@@ -176,9 +170,9 @@ static void update_mac_addresses(struct hard_iface *hard_iface)
176 hard_iface->net_dev->dev_addr, ETH_ALEN); 170 hard_iface->net_dev->dev_addr, ETH_ALEN);
177} 171}
178 172
179static void check_known_mac_addr(struct net_device *net_dev) 173static void check_known_mac_addr(const struct net_device *net_dev)
180{ 174{
181 struct hard_iface *hard_iface; 175 const struct hard_iface *hard_iface;
182 176
183 rcu_read_lock(); 177 rcu_read_lock();
184 list_for_each_entry_rcu(hard_iface, &hardif_list, list) { 178 list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
@@ -204,8 +198,8 @@ static void check_known_mac_addr(struct net_device *net_dev)
204 198
205int hardif_min_mtu(struct net_device *soft_iface) 199int hardif_min_mtu(struct net_device *soft_iface)
206{ 200{
207 struct bat_priv *bat_priv = netdev_priv(soft_iface); 201 const struct bat_priv *bat_priv = netdev_priv(soft_iface);
208 struct hard_iface *hard_iface; 202 const struct hard_iface *hard_iface;
209 /* allow big frames if all devices are capable to do so 203 /* allow big frames if all devices are capable to do so
210 * (have MTU > 1500 + BAT_HEADER_LEN) */ 204 * (have MTU > 1500 + BAT_HEADER_LEN) */
211 int min_mtu = ETH_DATA_LEN; 205 int min_mtu = ETH_DATA_LEN;
@@ -285,7 +279,8 @@ static void hardif_deactivate_interface(struct hard_iface *hard_iface)
285 update_min_mtu(hard_iface->soft_iface); 279 update_min_mtu(hard_iface->soft_iface);
286} 280}
287 281
288int hardif_enable_interface(struct hard_iface *hard_iface, char *iface_name) 282int hardif_enable_interface(struct hard_iface *hard_iface,
283 const char *iface_name)
289{ 284{
290 struct bat_priv *bat_priv; 285 struct bat_priv *bat_priv;
291 struct batman_packet *batman_packet; 286 struct batman_packet *batman_packet;
@@ -336,10 +331,11 @@ int hardif_enable_interface(struct hard_iface *hard_iface, char *iface_name)
336 batman_packet = (struct batman_packet *)(hard_iface->packet_buff); 331 batman_packet = (struct batman_packet *)(hard_iface->packet_buff);
337 batman_packet->packet_type = BAT_PACKET; 332 batman_packet->packet_type = BAT_PACKET;
338 batman_packet->version = COMPAT_VERSION; 333 batman_packet->version = COMPAT_VERSION;
339 batman_packet->flags = 0; 334 batman_packet->flags = NO_FLAGS;
340 batman_packet->ttl = 2; 335 batman_packet->ttl = 2;
341 batman_packet->tq = TQ_MAX_VALUE; 336 batman_packet->tq = TQ_MAX_VALUE;
342 batman_packet->num_tt = 0; 337 batman_packet->tt_num_changes = 0;
338 batman_packet->ttvn = 0;
343 339
344 hard_iface->if_num = bat_priv->num_ifaces; 340 hard_iface->if_num = bat_priv->num_ifaces;
345 bat_priv->num_ifaces++; 341 bat_priv->num_ifaces++;
@@ -458,7 +454,7 @@ static struct hard_iface *hardif_add_interface(struct net_device *net_dev)
458 454
459 dev_hold(net_dev); 455 dev_hold(net_dev);
460 456
461 hard_iface = kmalloc(sizeof(struct hard_iface), GFP_ATOMIC); 457 hard_iface = kmalloc(sizeof(*hard_iface), GFP_ATOMIC);
462 if (!hard_iface) { 458 if (!hard_iface) {
463 pr_err("Can't add interface (%s): out of memory\n", 459 pr_err("Can't add interface (%s): out of memory\n",
464 net_dev->name); 460 net_dev->name);
@@ -522,7 +518,7 @@ void hardif_remove_interfaces(void)
522static int hard_if_event(struct notifier_block *this, 518static int hard_if_event(struct notifier_block *this,
523 unsigned long event, void *ptr) 519 unsigned long event, void *ptr)
524{ 520{
525 struct net_device *net_dev = (struct net_device *)ptr; 521 struct net_device *net_dev = ptr;
526 struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev); 522 struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev);
527 struct hard_iface *primary_if = NULL; 523 struct hard_iface *primary_if = NULL;
528 struct bat_priv *bat_priv; 524 struct bat_priv *bat_priv;
@@ -567,7 +563,7 @@ static int hard_if_event(struct notifier_block *this,
567 break; 563 break;
568 default: 564 default:
569 break; 565 break;
570 }; 566 }
571 567
572hardif_put: 568hardif_put:
573 hardif_free_ref(hard_iface); 569 hardif_free_ref(hard_iface);
@@ -658,6 +654,14 @@ static int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
658 case BAT_VIS: 654 case BAT_VIS:
659 ret = recv_vis_packet(skb, hard_iface); 655 ret = recv_vis_packet(skb, hard_iface);
660 break; 656 break;
657 /* Translation table query (request or response) */
658 case BAT_TT_QUERY:
659 ret = recv_tt_query(skb, hard_iface);
660 break;
661 /* Roaming advertisement */
662 case BAT_ROAM_ADV:
663 ret = recv_roam_adv(skb, hard_iface);
664 break;
661 default: 665 default:
662 ret = NET_RX_DROP; 666 ret = NET_RX_DROP;
663 } 667 }
diff --git a/net/batman-adv/hard-interface.h b/net/batman-adv/hard-interface.h
index 64265991460..442eacbc9e3 100644
--- a/net/batman-adv/hard-interface.h
+++ b/net/batman-adv/hard-interface.h
@@ -22,17 +22,21 @@
22#ifndef _NET_BATMAN_ADV_HARD_INTERFACE_H_ 22#ifndef _NET_BATMAN_ADV_HARD_INTERFACE_H_
23#define _NET_BATMAN_ADV_HARD_INTERFACE_H_ 23#define _NET_BATMAN_ADV_HARD_INTERFACE_H_
24 24
25#define IF_NOT_IN_USE 0 25enum hard_if_state {
26#define IF_TO_BE_REMOVED 1 26 IF_NOT_IN_USE,
27#define IF_INACTIVE 2 27 IF_TO_BE_REMOVED,
28#define IF_ACTIVE 3 28 IF_INACTIVE,
29#define IF_TO_BE_ACTIVATED 4 29 IF_ACTIVE,
30#define IF_I_WANT_YOU 5 30 IF_TO_BE_ACTIVATED,
31 IF_I_WANT_YOU
32};
31 33
32extern struct notifier_block hard_if_notifier; 34extern struct notifier_block hard_if_notifier;
33 35
34struct hard_iface *hardif_get_by_netdev(struct net_device *net_dev); 36struct hard_iface*
35int hardif_enable_interface(struct hard_iface *hard_iface, char *iface_name); 37hardif_get_by_netdev(const struct net_device *net_dev);
38int hardif_enable_interface(struct hard_iface *hard_iface,
39 const char *iface_name);
36void hardif_disable_interface(struct hard_iface *hard_iface); 40void hardif_disable_interface(struct hard_iface *hard_iface);
37void hardif_remove_interfaces(void); 41void hardif_remove_interfaces(void);
38int hardif_min_mtu(struct net_device *soft_iface); 42int hardif_min_mtu(struct net_device *soft_iface);
diff --git a/net/batman-adv/hash.c b/net/batman-adv/hash.c
index c5213d8f2cc..2a172505f51 100644
--- a/net/batman-adv/hash.c
+++ b/net/batman-adv/hash.c
@@ -46,15 +46,16 @@ struct hashtable_t *hash_new(int size)
46{ 46{
47 struct hashtable_t *hash; 47 struct hashtable_t *hash;
48 48
49 hash = kmalloc(sizeof(struct hashtable_t), GFP_ATOMIC); 49 hash = kmalloc(sizeof(*hash), GFP_ATOMIC);
50 if (!hash) 50 if (!hash)
51 return NULL; 51 return NULL;
52 52
53 hash->table = kmalloc(sizeof(struct element_t *) * size, GFP_ATOMIC); 53 hash->table = kmalloc(sizeof(*hash->table) * size, GFP_ATOMIC);
54 if (!hash->table) 54 if (!hash->table)
55 goto free_hash; 55 goto free_hash;
56 56
57 hash->list_locks = kmalloc(sizeof(spinlock_t) * size, GFP_ATOMIC); 57 hash->list_locks = kmalloc(sizeof(*hash->list_locks) * size,
58 GFP_ATOMIC);
58 if (!hash->list_locks) 59 if (!hash->list_locks)
59 goto free_table; 60 goto free_table;
60 61
diff --git a/net/batman-adv/hash.h b/net/batman-adv/hash.h
index 434822b2747..dd5c9fd7a90 100644
--- a/net/batman-adv/hash.h
+++ b/net/batman-adv/hash.h
@@ -28,12 +28,12 @@
28 * compare 2 element datas for their keys, 28 * compare 2 element datas for their keys,
29 * return 0 if same and not 0 if not 29 * return 0 if same and not 0 if not
30 * same */ 30 * same */
31typedef int (*hashdata_compare_cb)(struct hlist_node *, void *); 31typedef int (*hashdata_compare_cb)(const struct hlist_node *, const void *);
32 32
33/* the hashfunction, should return an index 33/* the hashfunction, should return an index
34 * based on the key in the data of the first 34 * based on the key in the data of the first
35 * argument and the size the second */ 35 * argument and the size the second */
36typedef int (*hashdata_choose_cb)(void *, int); 36typedef int (*hashdata_choose_cb)(const void *, int);
37typedef void (*hashdata_free_cb)(struct hlist_node *, void *); 37typedef void (*hashdata_free_cb)(struct hlist_node *, void *);
38 38
39struct hashtable_t { 39struct hashtable_t {
@@ -80,7 +80,7 @@ static inline void hash_delete(struct hashtable_t *hash,
80static inline int hash_add(struct hashtable_t *hash, 80static inline int hash_add(struct hashtable_t *hash,
81 hashdata_compare_cb compare, 81 hashdata_compare_cb compare,
82 hashdata_choose_cb choose, 82 hashdata_choose_cb choose,
83 void *data, struct hlist_node *data_node) 83 const void *data, struct hlist_node *data_node)
84{ 84{
85 int index; 85 int index;
86 struct hlist_head *head; 86 struct hlist_head *head;
diff --git a/net/batman-adv/icmp_socket.c b/net/batman-adv/icmp_socket.c
index fa22ba2bb83..ac3520e057c 100644
--- a/net/batman-adv/icmp_socket.c
+++ b/net/batman-adv/icmp_socket.c
@@ -46,7 +46,7 @@ static int bat_socket_open(struct inode *inode, struct file *file)
46 46
47 nonseekable_open(inode, file); 47 nonseekable_open(inode, file);
48 48
49 socket_client = kmalloc(sizeof(struct socket_client), GFP_KERNEL); 49 socket_client = kmalloc(sizeof(*socket_client), GFP_KERNEL);
50 50
51 if (!socket_client) 51 if (!socket_client)
52 return -ENOMEM; 52 return -ENOMEM;
@@ -310,7 +310,7 @@ static void bat_socket_add_packet(struct socket_client *socket_client,
310{ 310{
311 struct socket_packet *socket_packet; 311 struct socket_packet *socket_packet;
312 312
313 socket_packet = kmalloc(sizeof(struct socket_packet), GFP_ATOMIC); 313 socket_packet = kmalloc(sizeof(*socket_packet), GFP_ATOMIC);
314 314
315 if (!socket_packet) 315 if (!socket_packet)
316 return; 316 return;
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index 0a7cee0076f..b0f9068ade5 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -58,9 +58,8 @@ static int __init batman_init(void)
58 58
59 register_netdevice_notifier(&hard_if_notifier); 59 register_netdevice_notifier(&hard_if_notifier);
60 60
61 pr_info("B.A.T.M.A.N. advanced %s%s (compatibility version %i) " 61 pr_info("B.A.T.M.A.N. advanced %s (compatibility version %i) "
62 "loaded\n", SOURCE_VERSION, REVISION_VERSION_STR, 62 "loaded\n", SOURCE_VERSION, COMPAT_VERSION);
63 COMPAT_VERSION);
64 63
65 return 0; 64 return 0;
66} 65}
@@ -84,8 +83,10 @@ int mesh_init(struct net_device *soft_iface)
84 83
85 spin_lock_init(&bat_priv->forw_bat_list_lock); 84 spin_lock_init(&bat_priv->forw_bat_list_lock);
86 spin_lock_init(&bat_priv->forw_bcast_list_lock); 85 spin_lock_init(&bat_priv->forw_bcast_list_lock);
87 spin_lock_init(&bat_priv->tt_lhash_lock); 86 spin_lock_init(&bat_priv->tt_changes_list_lock);
88 spin_lock_init(&bat_priv->tt_ghash_lock); 87 spin_lock_init(&bat_priv->tt_req_list_lock);
88 spin_lock_init(&bat_priv->tt_roam_list_lock);
89 spin_lock_init(&bat_priv->tt_buff_lock);
89 spin_lock_init(&bat_priv->gw_list_lock); 90 spin_lock_init(&bat_priv->gw_list_lock);
90 spin_lock_init(&bat_priv->vis_hash_lock); 91 spin_lock_init(&bat_priv->vis_hash_lock);
91 spin_lock_init(&bat_priv->vis_list_lock); 92 spin_lock_init(&bat_priv->vis_list_lock);
@@ -96,14 +97,14 @@ int mesh_init(struct net_device *soft_iface)
96 INIT_HLIST_HEAD(&bat_priv->forw_bcast_list); 97 INIT_HLIST_HEAD(&bat_priv->forw_bcast_list);
97 INIT_HLIST_HEAD(&bat_priv->gw_list); 98 INIT_HLIST_HEAD(&bat_priv->gw_list);
98 INIT_HLIST_HEAD(&bat_priv->softif_neigh_vids); 99 INIT_HLIST_HEAD(&bat_priv->softif_neigh_vids);
100 INIT_LIST_HEAD(&bat_priv->tt_changes_list);
101 INIT_LIST_HEAD(&bat_priv->tt_req_list);
102 INIT_LIST_HEAD(&bat_priv->tt_roam_list);
99 103
100 if (originator_init(bat_priv) < 1) 104 if (originator_init(bat_priv) < 1)
101 goto err; 105 goto err;
102 106
103 if (tt_local_init(bat_priv) < 1) 107 if (tt_init(bat_priv) < 1)
104 goto err;
105
106 if (tt_global_init(bat_priv) < 1)
107 goto err; 108 goto err;
108 109
109 tt_local_add(soft_iface, soft_iface->dev_addr); 110 tt_local_add(soft_iface, soft_iface->dev_addr);
@@ -111,6 +112,7 @@ int mesh_init(struct net_device *soft_iface)
111 if (vis_init(bat_priv) < 1) 112 if (vis_init(bat_priv) < 1)
112 goto err; 113 goto err;
113 114
115 atomic_set(&bat_priv->gw_reselect, 0);
114 atomic_set(&bat_priv->mesh_state, MESH_ACTIVE); 116 atomic_set(&bat_priv->mesh_state, MESH_ACTIVE);
115 goto end; 117 goto end;
116 118
@@ -137,8 +139,7 @@ void mesh_free(struct net_device *soft_iface)
137 gw_node_purge(bat_priv); 139 gw_node_purge(bat_priv);
138 originator_free(bat_priv); 140 originator_free(bat_priv);
139 141
140 tt_local_free(bat_priv); 142 tt_free(bat_priv);
141 tt_global_free(bat_priv);
142 143
143 softif_neigh_purge(bat_priv); 144 softif_neigh_purge(bat_priv);
144 145
@@ -155,9 +156,9 @@ void dec_module_count(void)
155 module_put(THIS_MODULE); 156 module_put(THIS_MODULE);
156} 157}
157 158
158int is_my_mac(uint8_t *addr) 159int is_my_mac(const uint8_t *addr)
159{ 160{
160 struct hard_iface *hard_iface; 161 const struct hard_iface *hard_iface;
161 162
162 rcu_read_lock(); 163 rcu_read_lock();
163 list_for_each_entry_rcu(hard_iface, &hardif_list, list) { 164 list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
@@ -182,8 +183,4 @@ MODULE_LICENSE("GPL");
182MODULE_AUTHOR(DRIVER_AUTHOR); 183MODULE_AUTHOR(DRIVER_AUTHOR);
183MODULE_DESCRIPTION(DRIVER_DESC); 184MODULE_DESCRIPTION(DRIVER_DESC);
184MODULE_SUPPORTED_DEVICE(DRIVER_DEVICE); 185MODULE_SUPPORTED_DEVICE(DRIVER_DEVICE);
185#ifdef REVISION_VERSION
186MODULE_VERSION(SOURCE_VERSION "-" REVISION_VERSION);
187#else
188MODULE_VERSION(SOURCE_VERSION); 186MODULE_VERSION(SOURCE_VERSION);
189#endif
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index 148b49e0264..a6df61a6933 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -27,8 +27,9 @@
27#define DRIVER_DESC "B.A.T.M.A.N. advanced" 27#define DRIVER_DESC "B.A.T.M.A.N. advanced"
28#define DRIVER_DEVICE "batman-adv" 28#define DRIVER_DEVICE "batman-adv"
29 29
30#define SOURCE_VERSION "next" 30#ifndef SOURCE_VERSION
31 31#define SOURCE_VERSION "2011.3.0"
32#endif
32 33
33/* B.A.T.M.A.N. parameters */ 34/* B.A.T.M.A.N. parameters */
34 35
@@ -42,15 +43,25 @@
42 * -> TODO: check influence on TQ_LOCAL_WINDOW_SIZE */ 43 * -> TODO: check influence on TQ_LOCAL_WINDOW_SIZE */
43#define PURGE_TIMEOUT 200 44#define PURGE_TIMEOUT 200
44#define TT_LOCAL_TIMEOUT 3600 /* in seconds */ 45#define TT_LOCAL_TIMEOUT 3600 /* in seconds */
45 46#define TT_CLIENT_ROAM_TIMEOUT 600
46/* sliding packet range of received originator messages in squence numbers 47/* sliding packet range of received originator messages in squence numbers
47 * (should be a multiple of our word size) */ 48 * (should be a multiple of our word size) */
48#define TQ_LOCAL_WINDOW_SIZE 64 49#define TQ_LOCAL_WINDOW_SIZE 64
50#define TT_REQUEST_TIMEOUT 3 /* seconds we have to keep pending tt_req */
51
49#define TQ_GLOBAL_WINDOW_SIZE 5 52#define TQ_GLOBAL_WINDOW_SIZE 5
50#define TQ_LOCAL_BIDRECT_SEND_MINIMUM 1 53#define TQ_LOCAL_BIDRECT_SEND_MINIMUM 1
51#define TQ_LOCAL_BIDRECT_RECV_MINIMUM 1 54#define TQ_LOCAL_BIDRECT_RECV_MINIMUM 1
52#define TQ_TOTAL_BIDRECT_LIMIT 1 55#define TQ_TOTAL_BIDRECT_LIMIT 1
53 56
57#define TT_OGM_APPEND_MAX 3 /* number of OGMs sent with the last tt diff */
58
59#define ROAMING_MAX_TIME 20 /* Time in which a client can roam at most
60 * ROAMING_MAX_COUNT times */
61#define ROAMING_MAX_COUNT 5
62
63#define NO_FLAGS 0
64
54#define NUM_WORDS (TQ_LOCAL_WINDOW_SIZE / WORD_BIT_SIZE) 65#define NUM_WORDS (TQ_LOCAL_WINDOW_SIZE / WORD_BIT_SIZE)
55 66
56#define LOG_BUF_LEN 8192 /* has to be a power of 2 */ 67#define LOG_BUF_LEN 8192 /* has to be a power of 2 */
@@ -72,13 +83,27 @@
72#define RESET_PROTECTION_MS 30000 83#define RESET_PROTECTION_MS 30000
73#define EXPECTED_SEQNO_RANGE 65536 84#define EXPECTED_SEQNO_RANGE 65536
74 85
75#define MESH_INACTIVE 0 86enum mesh_state {
76#define MESH_ACTIVE 1 87 MESH_INACTIVE,
77#define MESH_DEACTIVATING 2 88 MESH_ACTIVE,
89 MESH_DEACTIVATING
90};
78 91
79#define BCAST_QUEUE_LEN 256 92#define BCAST_QUEUE_LEN 256
80#define BATMAN_QUEUE_LEN 256 93#define BATMAN_QUEUE_LEN 256
81 94
95enum uev_action {
96 UEV_ADD = 0,
97 UEV_DEL,
98 UEV_CHANGE
99};
100
101enum uev_type {
102 UEV_GW = 0
103};
104
105#define GW_THRESHOLD 50
106
82/* 107/*
83 * Debug Messages 108 * Debug Messages
84 */ 109 */
@@ -89,10 +114,12 @@
89#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 114#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
90 115
91/* all messages related to routing / flooding / broadcasting / etc */ 116/* all messages related to routing / flooding / broadcasting / etc */
92#define DBG_BATMAN 1 117enum dbg_level {
93/* route or tt entry added / changed / deleted */ 118 DBG_BATMAN = 1 << 0,
94#define DBG_ROUTES 2 119 DBG_ROUTES = 1 << 1, /* route added / changed / deleted */
95#define DBG_ALL 3 120 DBG_TT = 1 << 2, /* translation table operations */
121 DBG_ALL = 7
122};
96 123
97 124
98/* 125/*
@@ -118,12 +145,6 @@
118#include <linux/seq_file.h> 145#include <linux/seq_file.h>
119#include "types.h" 146#include "types.h"
120 147
121#ifndef REVISION_VERSION
122#define REVISION_VERSION_STR ""
123#else
124#define REVISION_VERSION_STR " "REVISION_VERSION
125#endif
126
127extern struct list_head hardif_list; 148extern struct list_head hardif_list;
128 149
129extern unsigned char broadcast_addr[]; 150extern unsigned char broadcast_addr[];
@@ -133,10 +154,10 @@ int mesh_init(struct net_device *soft_iface);
133void mesh_free(struct net_device *soft_iface); 154void mesh_free(struct net_device *soft_iface);
134void inc_module_count(void); 155void inc_module_count(void);
135void dec_module_count(void); 156void dec_module_count(void);
136int is_my_mac(uint8_t *addr); 157int is_my_mac(const uint8_t *addr);
137 158
138#ifdef CONFIG_BATMAN_ADV_DEBUG 159#ifdef CONFIG_BATMAN_ADV_DEBUG
139int debug_log(struct bat_priv *bat_priv, char *fmt, ...); 160int debug_log(struct bat_priv *bat_priv, const char *fmt, ...) __printf(2, 3);
140 161
141#define bat_dbg(type, bat_priv, fmt, arg...) \ 162#define bat_dbg(type, bat_priv, fmt, arg...) \
142 do { \ 163 do { \
@@ -145,9 +166,10 @@ int debug_log(struct bat_priv *bat_priv, char *fmt, ...);
145 } \ 166 } \
146 while (0) 167 while (0)
147#else /* !CONFIG_BATMAN_ADV_DEBUG */ 168#else /* !CONFIG_BATMAN_ADV_DEBUG */
148static inline void bat_dbg(char type __always_unused, 169__printf(3, 4)
170static inline void bat_dbg(int type __always_unused,
149 struct bat_priv *bat_priv __always_unused, 171 struct bat_priv *bat_priv __always_unused,
150 char *fmt __always_unused, ...) 172 const char *fmt __always_unused, ...)
151{ 173{
152} 174}
153#endif 175#endif
@@ -172,11 +194,32 @@ static inline void bat_dbg(char type __always_unused,
172 * 194 *
173 * note: can't use compare_ether_addr() as it requires aligned memory 195 * note: can't use compare_ether_addr() as it requires aligned memory
174 */ 196 */
175static inline int compare_eth(void *data1, void *data2) 197
198static inline int compare_eth(const void *data1, const void *data2)
176{ 199{
177 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0); 200 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
178} 201}
179 202
203
180#define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0) 204#define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0)
181 205
206/* Returns the smallest signed integer in two's complement with the sizeof x */
207#define smallest_signed_int(x) (1u << (7u + 8u * (sizeof(x) - 1u)))
208
209/* Checks if a sequence number x is a predecessor/successor of y.
210 * they handle overflows/underflows and can correctly check for a
211 * predecessor/successor unless the variable sequence number has grown by
212 * more then 2**(bitwidth(x)-1)-1.
213 * This means that for a uint8_t with the maximum value 255, it would think:
214 * - when adding nothing - it is neither a predecessor nor a successor
215 * - before adding more than 127 to the starting value - it is a predecessor,
216 * - when adding 128 - it is neither a predecessor nor a successor,
217 * - after adding more than 127 to the starting value - it is a successor */
218#define seq_before(x, y) ({typeof(x) _d1 = (x); \
219 typeof(y) _d2 = (y); \
220 typeof(x) _dummy = (_d1 - _d2); \
221 (void) (&_d1 == &_d2); \
222 _dummy > smallest_signed_int(_dummy); })
223#define seq_after(x, y) seq_before(y, x)
224
182#endif /* _NET_BATMAN_ADV_MAIN_H_ */ 225#endif /* _NET_BATMAN_ADV_MAIN_H_ */
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index 40a30bbcd14..f3c3f620d19 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -37,6 +37,14 @@ static void start_purge_timer(struct bat_priv *bat_priv)
37 queue_delayed_work(bat_event_workqueue, &bat_priv->orig_work, 1 * HZ); 37 queue_delayed_work(bat_event_workqueue, &bat_priv->orig_work, 1 * HZ);
38} 38}
39 39
40/* returns 1 if they are the same originator */
41static int compare_orig(const struct hlist_node *node, const void *data2)
42{
43 const void *data1 = container_of(node, struct orig_node, hash_entry);
44
45 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
46}
47
40int originator_init(struct bat_priv *bat_priv) 48int originator_init(struct bat_priv *bat_priv)
41{ 49{
42 if (bat_priv->orig_hash) 50 if (bat_priv->orig_hash)
@@ -77,7 +85,7 @@ struct neigh_node *orig_node_get_router(struct orig_node *orig_node)
77 85
78struct neigh_node *create_neighbor(struct orig_node *orig_node, 86struct neigh_node *create_neighbor(struct orig_node *orig_node,
79 struct orig_node *orig_neigh_node, 87 struct orig_node *orig_neigh_node,
80 uint8_t *neigh, 88 const uint8_t *neigh,
81 struct hard_iface *if_incoming) 89 struct hard_iface *if_incoming)
82{ 90{
83 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); 91 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
@@ -86,7 +94,7 @@ struct neigh_node *create_neighbor(struct orig_node *orig_node,
86 bat_dbg(DBG_BATMAN, bat_priv, 94 bat_dbg(DBG_BATMAN, bat_priv,
87 "Creating new last-hop neighbor of originator\n"); 95 "Creating new last-hop neighbor of originator\n");
88 96
89 neigh_node = kzalloc(sizeof(struct neigh_node), GFP_ATOMIC); 97 neigh_node = kzalloc(sizeof(*neigh_node), GFP_ATOMIC);
90 if (!neigh_node) 98 if (!neigh_node)
91 return NULL; 99 return NULL;
92 100
@@ -137,6 +145,7 @@ static void orig_node_free_rcu(struct rcu_head *rcu)
137 tt_global_del_orig(orig_node->bat_priv, orig_node, 145 tt_global_del_orig(orig_node->bat_priv, orig_node,
138 "originator timed out"); 146 "originator timed out");
139 147
148 kfree(orig_node->tt_buff);
140 kfree(orig_node->bcast_own); 149 kfree(orig_node->bcast_own);
141 kfree(orig_node->bcast_own_sum); 150 kfree(orig_node->bcast_own_sum);
142 kfree(orig_node); 151 kfree(orig_node);
@@ -183,7 +192,7 @@ void originator_free(struct bat_priv *bat_priv)
183 192
184/* this function finds or creates an originator entry for the given 193/* this function finds or creates an originator entry for the given
185 * address if it does not exits */ 194 * address if it does not exits */
186struct orig_node *get_orig_node(struct bat_priv *bat_priv, uint8_t *addr) 195struct orig_node *get_orig_node(struct bat_priv *bat_priv, const uint8_t *addr)
187{ 196{
188 struct orig_node *orig_node; 197 struct orig_node *orig_node;
189 int size; 198 int size;
@@ -196,7 +205,7 @@ struct orig_node *get_orig_node(struct bat_priv *bat_priv, uint8_t *addr)
196 bat_dbg(DBG_BATMAN, bat_priv, 205 bat_dbg(DBG_BATMAN, bat_priv,
197 "Creating new originator: %pM\n", addr); 206 "Creating new originator: %pM\n", addr);
198 207
199 orig_node = kzalloc(sizeof(struct orig_node), GFP_ATOMIC); 208 orig_node = kzalloc(sizeof(*orig_node), GFP_ATOMIC);
200 if (!orig_node) 209 if (!orig_node)
201 return NULL; 210 return NULL;
202 211
@@ -205,14 +214,20 @@ struct orig_node *get_orig_node(struct bat_priv *bat_priv, uint8_t *addr)
205 spin_lock_init(&orig_node->ogm_cnt_lock); 214 spin_lock_init(&orig_node->ogm_cnt_lock);
206 spin_lock_init(&orig_node->bcast_seqno_lock); 215 spin_lock_init(&orig_node->bcast_seqno_lock);
207 spin_lock_init(&orig_node->neigh_list_lock); 216 spin_lock_init(&orig_node->neigh_list_lock);
217 spin_lock_init(&orig_node->tt_buff_lock);
208 218
209 /* extra reference for return */ 219 /* extra reference for return */
210 atomic_set(&orig_node->refcount, 2); 220 atomic_set(&orig_node->refcount, 2);
211 221
222 orig_node->tt_poss_change = false;
212 orig_node->bat_priv = bat_priv; 223 orig_node->bat_priv = bat_priv;
213 memcpy(orig_node->orig, addr, ETH_ALEN); 224 memcpy(orig_node->orig, addr, ETH_ALEN);
214 orig_node->router = NULL; 225 orig_node->router = NULL;
226 orig_node->tt_crc = 0;
227 atomic_set(&orig_node->last_ttvn, 0);
215 orig_node->tt_buff = NULL; 228 orig_node->tt_buff = NULL;
229 orig_node->tt_buff_len = 0;
230 atomic_set(&orig_node->tt_size, 0);
216 orig_node->bcast_seqno_reset = jiffies - 1 231 orig_node->bcast_seqno_reset = jiffies - 1
217 - msecs_to_jiffies(RESET_PROTECTION_MS); 232 - msecs_to_jiffies(RESET_PROTECTION_MS);
218 orig_node->batman_seqno_reset = jiffies - 1 233 orig_node->batman_seqno_reset = jiffies - 1
@@ -322,9 +337,7 @@ static bool purge_orig_node(struct bat_priv *bat_priv,
322 if (purge_orig_neighbors(bat_priv, orig_node, 337 if (purge_orig_neighbors(bat_priv, orig_node,
323 &best_neigh_node)) { 338 &best_neigh_node)) {
324 update_routes(bat_priv, orig_node, 339 update_routes(bat_priv, orig_node,
325 best_neigh_node, 340 best_neigh_node);
326 orig_node->tt_buff,
327 orig_node->tt_buff_len);
328 } 341 }
329 } 342 }
330 343
@@ -419,9 +432,8 @@ int orig_seq_print_text(struct seq_file *seq, void *offset)
419 goto out; 432 goto out;
420 } 433 }
421 434
422 seq_printf(seq, "[B.A.T.M.A.N. adv %s%s, MainIF/MAC: %s/%pM (%s)]\n", 435 seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s)]\n",
423 SOURCE_VERSION, REVISION_VERSION_STR, 436 SOURCE_VERSION, primary_if->net_dev->name,
424 primary_if->net_dev->name,
425 primary_if->net_dev->dev_addr, net_dev->name); 437 primary_if->net_dev->dev_addr, net_dev->name);
426 seq_printf(seq, " %-15s %s (%s/%i) %17s [%10s]: %20s ...\n", 438 seq_printf(seq, " %-15s %s (%s/%i) %17s [%10s]: %20s ...\n",
427 "Originator", "last-seen", "#", TQ_MAX_VALUE, "Nexthop", 439 "Originator", "last-seen", "#", TQ_MAX_VALUE, "Nexthop",
@@ -559,7 +571,7 @@ static int orig_node_del_if(struct orig_node *orig_node,
559 memcpy(data_ptr, orig_node->bcast_own, del_if_num * chunk_size); 571 memcpy(data_ptr, orig_node->bcast_own, del_if_num * chunk_size);
560 572
561 /* copy second part */ 573 /* copy second part */
562 memcpy(data_ptr + del_if_num * chunk_size, 574 memcpy((char *)data_ptr + del_if_num * chunk_size,
563 orig_node->bcast_own + ((del_if_num + 1) * chunk_size), 575 orig_node->bcast_own + ((del_if_num + 1) * chunk_size),
564 (max_if_num - del_if_num) * chunk_size); 576 (max_if_num - del_if_num) * chunk_size);
565 577
@@ -579,7 +591,7 @@ free_bcast_own:
579 memcpy(data_ptr, orig_node->bcast_own_sum, 591 memcpy(data_ptr, orig_node->bcast_own_sum,
580 del_if_num * sizeof(uint8_t)); 592 del_if_num * sizeof(uint8_t));
581 593
582 memcpy(data_ptr + del_if_num * sizeof(uint8_t), 594 memcpy((char *)data_ptr + del_if_num * sizeof(uint8_t),
583 orig_node->bcast_own_sum + ((del_if_num + 1) * sizeof(uint8_t)), 595 orig_node->bcast_own_sum + ((del_if_num + 1) * sizeof(uint8_t)),
584 (max_if_num - del_if_num) * sizeof(uint8_t)); 596 (max_if_num - del_if_num) * sizeof(uint8_t));
585 597
diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h
index e1d641f27aa..cfc1f60a96a 100644
--- a/net/batman-adv/originator.h
+++ b/net/batman-adv/originator.h
@@ -28,10 +28,10 @@ int originator_init(struct bat_priv *bat_priv);
28void originator_free(struct bat_priv *bat_priv); 28void originator_free(struct bat_priv *bat_priv);
29void purge_orig_ref(struct bat_priv *bat_priv); 29void purge_orig_ref(struct bat_priv *bat_priv);
30void orig_node_free_ref(struct orig_node *orig_node); 30void orig_node_free_ref(struct orig_node *orig_node);
31struct orig_node *get_orig_node(struct bat_priv *bat_priv, uint8_t *addr); 31struct orig_node *get_orig_node(struct bat_priv *bat_priv, const uint8_t *addr);
32struct neigh_node *create_neighbor(struct orig_node *orig_node, 32struct neigh_node *create_neighbor(struct orig_node *orig_node,
33 struct orig_node *orig_neigh_node, 33 struct orig_node *orig_neigh_node,
34 uint8_t *neigh, 34 const uint8_t *neigh,
35 struct hard_iface *if_incoming); 35 struct hard_iface *if_incoming);
36void neigh_node_free_ref(struct neigh_node *neigh_node); 36void neigh_node_free_ref(struct neigh_node *neigh_node);
37struct neigh_node *orig_node_get_router(struct orig_node *orig_node); 37struct neigh_node *orig_node_get_router(struct orig_node *orig_node);
@@ -40,19 +40,11 @@ int orig_hash_add_if(struct hard_iface *hard_iface, int max_if_num);
40int orig_hash_del_if(struct hard_iface *hard_iface, int max_if_num); 40int orig_hash_del_if(struct hard_iface *hard_iface, int max_if_num);
41 41
42 42
43/* returns 1 if they are the same originator */
44static inline int compare_orig(struct hlist_node *node, void *data2)
45{
46 void *data1 = container_of(node, struct orig_node, hash_entry);
47
48 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
49}
50
51/* hashfunction to choose an entry in a hash table of given size */ 43/* hashfunction to choose an entry in a hash table of given size */
52/* hash algorithm from http://en.wikipedia.org/wiki/Hash_table */ 44/* hash algorithm from http://en.wikipedia.org/wiki/Hash_table */
53static inline int choose_orig(void *data, int32_t size) 45static inline int choose_orig(const void *data, int32_t size)
54{ 46{
55 unsigned char *key = data; 47 const unsigned char *key = data;
56 uint32_t hash = 0; 48 uint32_t hash = 0;
57 size_t i; 49 size_t i;
58 50
@@ -70,7 +62,7 @@ static inline int choose_orig(void *data, int32_t size)
70} 62}
71 63
72static inline struct orig_node *orig_hash_find(struct bat_priv *bat_priv, 64static inline struct orig_node *orig_hash_find(struct bat_priv *bat_priv,
73 void *data) 65 const void *data)
74{ 66{
75 struct hashtable_t *hash = bat_priv->orig_hash; 67 struct hashtable_t *hash = bat_priv->orig_hash;
76 struct hlist_head *head; 68 struct hlist_head *head;
diff --git a/net/batman-adv/packet.h b/net/batman-adv/packet.h
index eda99650e9f..b76b4be10b9 100644
--- a/net/batman-adv/packet.h
+++ b/net/batman-adv/packet.h
@@ -24,46 +24,84 @@
24 24
25#define ETH_P_BATMAN 0x4305 /* unofficial/not registered Ethertype */ 25#define ETH_P_BATMAN 0x4305 /* unofficial/not registered Ethertype */
26 26
27#define BAT_PACKET 0x01 27enum bat_packettype {
28#define BAT_ICMP 0x02 28 BAT_PACKET = 0x01,
29#define BAT_UNICAST 0x03 29 BAT_ICMP = 0x02,
30#define BAT_BCAST 0x04 30 BAT_UNICAST = 0x03,
31#define BAT_VIS 0x05 31 BAT_BCAST = 0x04,
32#define BAT_UNICAST_FRAG 0x06 32 BAT_VIS = 0x05,
33 BAT_UNICAST_FRAG = 0x06,
34 BAT_TT_QUERY = 0x07,
35 BAT_ROAM_ADV = 0x08
36};
33 37
34/* this file is included by batctl which needs these defines */ 38/* this file is included by batctl which needs these defines */
35#define COMPAT_VERSION 12 39#define COMPAT_VERSION 14
36#define DIRECTLINK 0x40 40
37#define VIS_SERVER 0x20 41enum batman_flags {
38#define PRIMARIES_FIRST_HOP 0x10 42 PRIMARIES_FIRST_HOP = 1 << 4,
43 VIS_SERVER = 1 << 5,
44 DIRECTLINK = 1 << 6
45};
39 46
40/* ICMP message types */ 47/* ICMP message types */
41#define ECHO_REPLY 0 48enum icmp_packettype {
42#define DESTINATION_UNREACHABLE 3 49 ECHO_REPLY = 0,
43#define ECHO_REQUEST 8 50 DESTINATION_UNREACHABLE = 3,
44#define TTL_EXCEEDED 11 51 ECHO_REQUEST = 8,
45#define PARAMETER_PROBLEM 12 52 TTL_EXCEEDED = 11,
53 PARAMETER_PROBLEM = 12
54};
46 55
47/* vis defines */ 56/* vis defines */
48#define VIS_TYPE_SERVER_SYNC 0 57enum vis_packettype {
49#define VIS_TYPE_CLIENT_UPDATE 1 58 VIS_TYPE_SERVER_SYNC = 0,
59 VIS_TYPE_CLIENT_UPDATE = 1
60};
50 61
51/* fragmentation defines */ 62/* fragmentation defines */
52#define UNI_FRAG_HEAD 0x01 63enum unicast_frag_flags {
53#define UNI_FRAG_LARGETAIL 0x02 64 UNI_FRAG_HEAD = 1 << 0,
65 UNI_FRAG_LARGETAIL = 1 << 1
66};
67
68/* TT_QUERY subtypes */
69#define TT_QUERY_TYPE_MASK 0x3
70
71enum tt_query_packettype {
72 TT_REQUEST = 0,
73 TT_RESPONSE = 1
74};
75
76/* TT_QUERY flags */
77enum tt_query_flags {
78 TT_FULL_TABLE = 1 << 2
79};
80
81/* TT_CLIENT flags.
82 * Flags from 1 to 1 << 7 are sent on the wire, while flags from 1 << 8 to
83 * 1 << 15 are used for local computation only */
84enum tt_client_flags {
85 TT_CLIENT_DEL = 1 << 0,
86 TT_CLIENT_ROAM = 1 << 1,
87 TT_CLIENT_NOPURGE = 1 << 8,
88 TT_CLIENT_NEW = 1 << 9,
89 TT_CLIENT_PENDING = 1 << 10
90};
54 91
55struct batman_packet { 92struct batman_packet {
56 uint8_t packet_type; 93 uint8_t packet_type;
57 uint8_t version; /* batman version field */ 94 uint8_t version; /* batman version field */
95 uint8_t ttl;
58 uint8_t flags; /* 0x40: DIRECTLINK flag, 0x20 VIS_SERVER flag... */ 96 uint8_t flags; /* 0x40: DIRECTLINK flag, 0x20 VIS_SERVER flag... */
59 uint8_t tq;
60 uint32_t seqno; 97 uint32_t seqno;
61 uint8_t orig[6]; 98 uint8_t orig[6];
62 uint8_t prev_sender[6]; 99 uint8_t prev_sender[6];
63 uint8_t ttl;
64 uint8_t num_tt;
65 uint8_t gw_flags; /* flags related to gateway class */ 100 uint8_t gw_flags; /* flags related to gateway class */
66 uint8_t align; 101 uint8_t tq;
102 uint8_t tt_num_changes;
103 uint8_t ttvn; /* translation table version number */
104 uint16_t tt_crc;
67} __packed; 105} __packed;
68 106
69#define BAT_PACKET_LEN sizeof(struct batman_packet) 107#define BAT_PACKET_LEN sizeof(struct batman_packet)
@@ -71,12 +109,13 @@ struct batman_packet {
71struct icmp_packet { 109struct icmp_packet {
72 uint8_t packet_type; 110 uint8_t packet_type;
73 uint8_t version; /* batman version field */ 111 uint8_t version; /* batman version field */
74 uint8_t msg_type; /* see ICMP message types above */
75 uint8_t ttl; 112 uint8_t ttl;
113 uint8_t msg_type; /* see ICMP message types above */
76 uint8_t dst[6]; 114 uint8_t dst[6];
77 uint8_t orig[6]; 115 uint8_t orig[6];
78 uint16_t seqno; 116 uint16_t seqno;
79 uint8_t uid; 117 uint8_t uid;
118 uint8_t reserved;
80} __packed; 119} __packed;
81 120
82#define BAT_RR_LEN 16 121#define BAT_RR_LEN 16
@@ -86,8 +125,8 @@ struct icmp_packet {
86struct icmp_packet_rr { 125struct icmp_packet_rr {
87 uint8_t packet_type; 126 uint8_t packet_type;
88 uint8_t version; /* batman version field */ 127 uint8_t version; /* batman version field */
89 uint8_t msg_type; /* see ICMP message types above */
90 uint8_t ttl; 128 uint8_t ttl;
129 uint8_t msg_type; /* see ICMP message types above */
91 uint8_t dst[6]; 130 uint8_t dst[6];
92 uint8_t orig[6]; 131 uint8_t orig[6];
93 uint16_t seqno; 132 uint16_t seqno;
@@ -99,16 +138,19 @@ struct icmp_packet_rr {
99struct unicast_packet { 138struct unicast_packet {
100 uint8_t packet_type; 139 uint8_t packet_type;
101 uint8_t version; /* batman version field */ 140 uint8_t version; /* batman version field */
102 uint8_t dest[6];
103 uint8_t ttl; 141 uint8_t ttl;
142 uint8_t ttvn; /* destination translation table version number */
143 uint8_t dest[6];
104} __packed; 144} __packed;
105 145
106struct unicast_frag_packet { 146struct unicast_frag_packet {
107 uint8_t packet_type; 147 uint8_t packet_type;
108 uint8_t version; /* batman version field */ 148 uint8_t version; /* batman version field */
109 uint8_t dest[6];
110 uint8_t ttl; 149 uint8_t ttl;
150 uint8_t ttvn; /* destination translation table version number */
151 uint8_t dest[6];
111 uint8_t flags; 152 uint8_t flags;
153 uint8_t align;
112 uint8_t orig[6]; 154 uint8_t orig[6];
113 uint16_t seqno; 155 uint16_t seqno;
114} __packed; 156} __packed;
@@ -116,21 +158,61 @@ struct unicast_frag_packet {
116struct bcast_packet { 158struct bcast_packet {
117 uint8_t packet_type; 159 uint8_t packet_type;
118 uint8_t version; /* batman version field */ 160 uint8_t version; /* batman version field */
119 uint8_t orig[6];
120 uint8_t ttl; 161 uint8_t ttl;
162 uint8_t reserved;
121 uint32_t seqno; 163 uint32_t seqno;
164 uint8_t orig[6];
122} __packed; 165} __packed;
123 166
124struct vis_packet { 167struct vis_packet {
125 uint8_t packet_type; 168 uint8_t packet_type;
126 uint8_t version; /* batman version field */ 169 uint8_t version; /* batman version field */
170 uint8_t ttl; /* TTL */
127 uint8_t vis_type; /* which type of vis-participant sent this? */ 171 uint8_t vis_type; /* which type of vis-participant sent this? */
128 uint8_t entries; /* number of entries behind this struct */
129 uint32_t seqno; /* sequence number */ 172 uint32_t seqno; /* sequence number */
130 uint8_t ttl; /* TTL */ 173 uint8_t entries; /* number of entries behind this struct */
174 uint8_t reserved;
131 uint8_t vis_orig[6]; /* originator that announces its neighbors */ 175 uint8_t vis_orig[6]; /* originator that announces its neighbors */
132 uint8_t target_orig[6]; /* who should receive this packet */ 176 uint8_t target_orig[6]; /* who should receive this packet */
133 uint8_t sender_orig[6]; /* who sent or rebroadcasted this packet */ 177 uint8_t sender_orig[6]; /* who sent or rebroadcasted this packet */
134} __packed; 178} __packed;
135 179
180struct tt_query_packet {
181 uint8_t packet_type;
182 uint8_t version; /* batman version field */
183 uint8_t ttl;
184 /* the flag field is a combination of:
185 * - TT_REQUEST or TT_RESPONSE
186 * - TT_FULL_TABLE */
187 uint8_t flags;
188 uint8_t dst[ETH_ALEN];
189 uint8_t src[ETH_ALEN];
190 /* the ttvn field is:
191 * if TT_REQUEST: ttvn that triggered the
192 * request
193 * if TT_RESPONSE: new ttvn for the src
194 * orig_node */
195 uint8_t ttvn;
196 /* tt_data field is:
197 * if TT_REQUEST: crc associated with the
198 * ttvn
199 * if TT_RESPONSE: table_size */
200 uint16_t tt_data;
201} __packed;
202
203struct roam_adv_packet {
204 uint8_t packet_type;
205 uint8_t version;
206 uint8_t ttl;
207 uint8_t reserved;
208 uint8_t dst[ETH_ALEN];
209 uint8_t src[ETH_ALEN];
210 uint8_t client[ETH_ALEN];
211} __packed;
212
213struct tt_change {
214 uint8_t flags;
215 uint8_t addr[ETH_ALEN];
216} __packed;
217
136#endif /* _NET_BATMAN_ADV_PACKET_H_ */ 218#endif /* _NET_BATMAN_ADV_PACKET_H_ */
diff --git a/net/batman-adv/ring_buffer.c b/net/batman-adv/ring_buffer.c
index 5bb6a619afe..f1ccfa76ce8 100644
--- a/net/batman-adv/ring_buffer.c
+++ b/net/batman-adv/ring_buffer.c
@@ -28,9 +28,9 @@ void ring_buffer_set(uint8_t lq_recv[], uint8_t *lq_index, uint8_t value)
28 *lq_index = (*lq_index + 1) % TQ_GLOBAL_WINDOW_SIZE; 28 *lq_index = (*lq_index + 1) % TQ_GLOBAL_WINDOW_SIZE;
29} 29}
30 30
31uint8_t ring_buffer_avg(uint8_t lq_recv[]) 31uint8_t ring_buffer_avg(const uint8_t lq_recv[])
32{ 32{
33 uint8_t *ptr; 33 const uint8_t *ptr;
34 uint16_t count = 0, i = 0, sum = 0; 34 uint16_t count = 0, i = 0, sum = 0;
35 35
36 ptr = lq_recv; 36 ptr = lq_recv;
diff --git a/net/batman-adv/ring_buffer.h b/net/batman-adv/ring_buffer.h
index 0395b274186..7cdfe62b657 100644
--- a/net/batman-adv/ring_buffer.h
+++ b/net/batman-adv/ring_buffer.h
@@ -23,6 +23,6 @@
23#define _NET_BATMAN_ADV_RING_BUFFER_H_ 23#define _NET_BATMAN_ADV_RING_BUFFER_H_
24 24
25void ring_buffer_set(uint8_t lq_recv[], uint8_t *lq_index, uint8_t value); 25void ring_buffer_set(uint8_t lq_recv[], uint8_t *lq_index, uint8_t value);
26uint8_t ring_buffer_avg(uint8_t lq_recv[]); 26uint8_t ring_buffer_avg(const uint8_t lq_recv[]);
27 27
28#endif /* _NET_BATMAN_ADV_RING_BUFFER_H_ */ 28#endif /* _NET_BATMAN_ADV_RING_BUFFER_H_ */
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index bb1c3ec7e3f..0f32c818874 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -64,28 +64,69 @@ void slide_own_bcast_window(struct hard_iface *hard_iface)
64 } 64 }
65} 65}
66 66
67static void update_TT(struct bat_priv *bat_priv, struct orig_node *orig_node, 67static void update_transtable(struct bat_priv *bat_priv,
68 unsigned char *tt_buff, int tt_buff_len) 68 struct orig_node *orig_node,
69 const unsigned char *tt_buff,
70 uint8_t tt_num_changes, uint8_t ttvn,
71 uint16_t tt_crc)
69{ 72{
70 if ((tt_buff_len != orig_node->tt_buff_len) || 73 uint8_t orig_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
71 ((tt_buff_len > 0) && 74 bool full_table = true;
72 (orig_node->tt_buff_len > 0) && 75
73 (memcmp(orig_node->tt_buff, tt_buff, tt_buff_len) != 0))) { 76 /* the ttvn increased by one -> we can apply the attached changes */
74 77 if (ttvn - orig_ttvn == 1) {
75 if (orig_node->tt_buff_len > 0) 78 /* the OGM could not contain the changes because they were too
76 tt_global_del_orig(bat_priv, orig_node, 79 * many to fit in one frame or because they have already been
77 "originator changed tt"); 80 * sent TT_OGM_APPEND_MAX times. In this case send a tt
78 81 * request */
79 if ((tt_buff_len > 0) && (tt_buff)) 82 if (!tt_num_changes) {
80 tt_global_add_orig(bat_priv, orig_node, 83 full_table = false;
81 tt_buff, tt_buff_len); 84 goto request_table;
85 }
86
87 tt_update_changes(bat_priv, orig_node, tt_num_changes, ttvn,
88 (struct tt_change *)tt_buff);
89
90 /* Even if we received the crc into the OGM, we prefer
91 * to recompute it to spot any possible inconsistency
92 * in the global table */
93 orig_node->tt_crc = tt_global_crc(bat_priv, orig_node);
94
95 /* The ttvn alone is not enough to guarantee consistency
96 * because a single value could repesent different states
97 * (due to the wrap around). Thus a node has to check whether
98 * the resulting table (after applying the changes) is still
99 * consistent or not. E.g. a node could disconnect while its
100 * ttvn is X and reconnect on ttvn = X + TTVN_MAX: in this case
101 * checking the CRC value is mandatory to detect the
102 * inconsistency */
103 if (orig_node->tt_crc != tt_crc)
104 goto request_table;
105
106 /* Roaming phase is over: tables are in sync again. I can
107 * unset the flag */
108 orig_node->tt_poss_change = false;
109 } else {
110 /* if we missed more than one change or our tables are not
111 * in sync anymore -> request fresh tt data */
112 if (ttvn != orig_ttvn || orig_node->tt_crc != tt_crc) {
113request_table:
114 bat_dbg(DBG_TT, bat_priv, "TT inconsistency for %pM. "
115 "Need to retrieve the correct information "
116 "(ttvn: %u last_ttvn: %u crc: %u last_crc: "
117 "%u num_changes: %u)\n", orig_node->orig, ttvn,
118 orig_ttvn, tt_crc, orig_node->tt_crc,
119 tt_num_changes);
120 send_tt_request(bat_priv, orig_node, ttvn, tt_crc,
121 full_table);
122 return;
123 }
82 } 124 }
83} 125}
84 126
85static void update_route(struct bat_priv *bat_priv, 127static void update_route(struct bat_priv *bat_priv,
86 struct orig_node *orig_node, 128 struct orig_node *orig_node,
87 struct neigh_node *neigh_node, 129 struct neigh_node *neigh_node)
88 unsigned char *tt_buff, int tt_buff_len)
89{ 130{
90 struct neigh_node *curr_router; 131 struct neigh_node *curr_router;
91 132
@@ -93,11 +134,10 @@ static void update_route(struct bat_priv *bat_priv,
93 134
94 /* route deleted */ 135 /* route deleted */
95 if ((curr_router) && (!neigh_node)) { 136 if ((curr_router) && (!neigh_node)) {
96
97 bat_dbg(DBG_ROUTES, bat_priv, "Deleting route towards: %pM\n", 137 bat_dbg(DBG_ROUTES, bat_priv, "Deleting route towards: %pM\n",
98 orig_node->orig); 138 orig_node->orig);
99 tt_global_del_orig(bat_priv, orig_node, 139 tt_global_del_orig(bat_priv, orig_node,
100 "originator timed out"); 140 "Deleted route towards originator");
101 141
102 /* route added */ 142 /* route added */
103 } else if ((!curr_router) && (neigh_node)) { 143 } else if ((!curr_router) && (neigh_node)) {
@@ -105,11 +145,8 @@ static void update_route(struct bat_priv *bat_priv,
105 bat_dbg(DBG_ROUTES, bat_priv, 145 bat_dbg(DBG_ROUTES, bat_priv,
106 "Adding route towards: %pM (via %pM)\n", 146 "Adding route towards: %pM (via %pM)\n",
107 orig_node->orig, neigh_node->addr); 147 orig_node->orig, neigh_node->addr);
108 tt_global_add_orig(bat_priv, orig_node,
109 tt_buff, tt_buff_len);
110
111 /* route changed */ 148 /* route changed */
112 } else { 149 } else if (neigh_node && curr_router) {
113 bat_dbg(DBG_ROUTES, bat_priv, 150 bat_dbg(DBG_ROUTES, bat_priv,
114 "Changing route towards: %pM " 151 "Changing route towards: %pM "
115 "(now via %pM - was via %pM)\n", 152 "(now via %pM - was via %pM)\n",
@@ -133,10 +170,8 @@ static void update_route(struct bat_priv *bat_priv,
133 neigh_node_free_ref(curr_router); 170 neigh_node_free_ref(curr_router);
134} 171}
135 172
136
137void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node, 173void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node,
138 struct neigh_node *neigh_node, unsigned char *tt_buff, 174 struct neigh_node *neigh_node)
139 int tt_buff_len)
140{ 175{
141 struct neigh_node *router = NULL; 176 struct neigh_node *router = NULL;
142 177
@@ -146,11 +181,7 @@ void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node,
146 router = orig_node_get_router(orig_node); 181 router = orig_node_get_router(orig_node);
147 182
148 if (router != neigh_node) 183 if (router != neigh_node)
149 update_route(bat_priv, orig_node, neigh_node, 184 update_route(bat_priv, orig_node, neigh_node);
150 tt_buff, tt_buff_len);
151 /* may be just TT changed */
152 else
153 update_TT(bat_priv, orig_node, tt_buff, tt_buff_len);
154 185
155out: 186out:
156 if (router) 187 if (router)
@@ -165,7 +196,7 @@ static int is_bidirectional_neigh(struct orig_node *orig_node,
165 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); 196 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
166 struct neigh_node *neigh_node = NULL, *tmp_neigh_node; 197 struct neigh_node *neigh_node = NULL, *tmp_neigh_node;
167 struct hlist_node *node; 198 struct hlist_node *node;
168 unsigned char total_count; 199 uint8_t total_count;
169 uint8_t orig_eq_count, neigh_rq_count, tq_own; 200 uint8_t orig_eq_count, neigh_rq_count, tq_own;
170 int tq_asym_penalty, ret = 0; 201 int tq_asym_penalty, ret = 0;
171 202
@@ -348,9 +379,9 @@ out:
348} 379}
349 380
350/* copy primary address for bonding */ 381/* copy primary address for bonding */
351static void bonding_save_primary(struct orig_node *orig_node, 382static void bonding_save_primary(const struct orig_node *orig_node,
352 struct orig_node *orig_neigh_node, 383 struct orig_node *orig_neigh_node,
353 struct batman_packet *batman_packet) 384 const struct batman_packet *batman_packet)
354{ 385{
355 if (!(batman_packet->flags & PRIMARIES_FIRST_HOP)) 386 if (!(batman_packet->flags & PRIMARIES_FIRST_HOP))
356 return; 387 return;
@@ -358,19 +389,16 @@ static void bonding_save_primary(struct orig_node *orig_node,
358 memcpy(orig_neigh_node->primary_addr, orig_node->orig, ETH_ALEN); 389 memcpy(orig_neigh_node->primary_addr, orig_node->orig, ETH_ALEN);
359} 390}
360 391
361static void update_orig(struct bat_priv *bat_priv, 392static void update_orig(struct bat_priv *bat_priv, struct orig_node *orig_node,
362 struct orig_node *orig_node, 393 const struct ethhdr *ethhdr,
363 struct ethhdr *ethhdr, 394 const struct batman_packet *batman_packet,
364 struct batman_packet *batman_packet,
365 struct hard_iface *if_incoming, 395 struct hard_iface *if_incoming,
366 unsigned char *tt_buff, int tt_buff_len, 396 const unsigned char *tt_buff, int is_duplicate)
367 char is_duplicate)
368{ 397{
369 struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL; 398 struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
370 struct neigh_node *router = NULL; 399 struct neigh_node *router = NULL;
371 struct orig_node *orig_node_tmp; 400 struct orig_node *orig_node_tmp;
372 struct hlist_node *node; 401 struct hlist_node *node;
373 int tmp_tt_buff_len;
374 uint8_t bcast_own_sum_orig, bcast_own_sum_neigh; 402 uint8_t bcast_own_sum_orig, bcast_own_sum_neigh;
375 403
376 bat_dbg(DBG_BATMAN, bat_priv, "update_originator(): " 404 bat_dbg(DBG_BATMAN, bat_priv, "update_originator(): "
@@ -435,9 +463,6 @@ static void update_orig(struct bat_priv *bat_priv,
435 463
436 bonding_candidate_add(orig_node, neigh_node); 464 bonding_candidate_add(orig_node, neigh_node);
437 465
438 tmp_tt_buff_len = (tt_buff_len > batman_packet->num_tt * ETH_ALEN ?
439 batman_packet->num_tt * ETH_ALEN : tt_buff_len);
440
441 /* if this neighbor already is our next hop there is nothing 466 /* if this neighbor already is our next hop there is nothing
442 * to change */ 467 * to change */
443 router = orig_node_get_router(orig_node); 468 router = orig_node_get_router(orig_node);
@@ -467,15 +492,19 @@ static void update_orig(struct bat_priv *bat_priv,
467 goto update_tt; 492 goto update_tt;
468 } 493 }
469 494
470 update_routes(bat_priv, orig_node, neigh_node, 495 update_routes(bat_priv, orig_node, neigh_node);
471 tt_buff, tmp_tt_buff_len);
472 goto update_gw;
473 496
474update_tt: 497update_tt:
475 update_routes(bat_priv, orig_node, router, 498 /* I have to check for transtable changes only if the OGM has been
476 tt_buff, tmp_tt_buff_len); 499 * sent through a primary interface */
500 if (((batman_packet->orig != ethhdr->h_source) &&
501 (batman_packet->ttl > 2)) ||
502 (batman_packet->flags & PRIMARIES_FIRST_HOP))
503 update_transtable(bat_priv, orig_node, tt_buff,
504 batman_packet->tt_num_changes,
505 batman_packet->ttvn,
506 batman_packet->tt_crc);
477 507
478update_gw:
479 if (orig_node->gw_flags != batman_packet->gw_flags) 508 if (orig_node->gw_flags != batman_packet->gw_flags)
480 gw_node_update(bat_priv, orig_node, batman_packet->gw_flags); 509 gw_node_update(bat_priv, orig_node, batman_packet->gw_flags);
481 510
@@ -531,15 +560,15 @@ static int window_protected(struct bat_priv *bat_priv,
531 * -1 the packet is old and has been received while the seqno window 560 * -1 the packet is old and has been received while the seqno window
532 * was protected. Caller should drop it. 561 * was protected. Caller should drop it.
533 */ 562 */
534static char count_real_packets(struct ethhdr *ethhdr, 563static int count_real_packets(const struct ethhdr *ethhdr,
535 struct batman_packet *batman_packet, 564 const struct batman_packet *batman_packet,
536 struct hard_iface *if_incoming) 565 const struct hard_iface *if_incoming)
537{ 566{
538 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); 567 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
539 struct orig_node *orig_node; 568 struct orig_node *orig_node;
540 struct neigh_node *tmp_neigh_node; 569 struct neigh_node *tmp_neigh_node;
541 struct hlist_node *node; 570 struct hlist_node *node;
542 char is_duplicate = 0; 571 int is_duplicate = 0;
543 int32_t seq_diff; 572 int32_t seq_diff;
544 int need_update = 0; 573 int need_update = 0;
545 int set_mark, ret = -1; 574 int set_mark, ret = -1;
@@ -595,9 +624,9 @@ out:
595 return ret; 624 return ret;
596} 625}
597 626
598void receive_bat_packet(struct ethhdr *ethhdr, 627void receive_bat_packet(const struct ethhdr *ethhdr,
599 struct batman_packet *batman_packet, 628 struct batman_packet *batman_packet,
600 unsigned char *tt_buff, int tt_buff_len, 629 const unsigned char *tt_buff,
601 struct hard_iface *if_incoming) 630 struct hard_iface *if_incoming)
602{ 631{
603 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); 632 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
@@ -605,10 +634,10 @@ void receive_bat_packet(struct ethhdr *ethhdr,
605 struct orig_node *orig_neigh_node, *orig_node; 634 struct orig_node *orig_neigh_node, *orig_node;
606 struct neigh_node *router = NULL, *router_router = NULL; 635 struct neigh_node *router = NULL, *router_router = NULL;
607 struct neigh_node *orig_neigh_router = NULL; 636 struct neigh_node *orig_neigh_router = NULL;
608 char has_directlink_flag; 637 int has_directlink_flag;
609 char is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0; 638 int is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0;
610 char is_broadcast = 0, is_bidirectional, is_single_hop_neigh; 639 int is_broadcast = 0, is_bidirectional, is_single_hop_neigh;
611 char is_duplicate; 640 int is_duplicate;
612 uint32_t if_incoming_seqno; 641 uint32_t if_incoming_seqno;
613 642
614 /* Silently drop when the batman packet is actually not a 643 /* Silently drop when the batman packet is actually not a
@@ -636,12 +665,14 @@ void receive_bat_packet(struct ethhdr *ethhdr,
636 665
637 bat_dbg(DBG_BATMAN, bat_priv, 666 bat_dbg(DBG_BATMAN, bat_priv,
638 "Received BATMAN packet via NB: %pM, IF: %s [%pM] " 667 "Received BATMAN packet via NB: %pM, IF: %s [%pM] "
639 "(from OG: %pM, via prev OG: %pM, seqno %d, tq %d, " 668 "(from OG: %pM, via prev OG: %pM, seqno %d, ttvn %u, "
640 "TTL %d, V %d, IDF %d)\n", 669 "crc %u, changes %u, td %d, TTL %d, V %d, IDF %d)\n",
641 ethhdr->h_source, if_incoming->net_dev->name, 670 ethhdr->h_source, if_incoming->net_dev->name,
642 if_incoming->net_dev->dev_addr, batman_packet->orig, 671 if_incoming->net_dev->dev_addr, batman_packet->orig,
643 batman_packet->prev_sender, batman_packet->seqno, 672 batman_packet->prev_sender, batman_packet->seqno,
644 batman_packet->tq, batman_packet->ttl, batman_packet->version, 673 batman_packet->ttvn, batman_packet->tt_crc,
674 batman_packet->tt_num_changes, batman_packet->tq,
675 batman_packet->ttl, batman_packet->version,
645 has_directlink_flag); 676 has_directlink_flag);
646 677
647 rcu_read_lock(); 678 rcu_read_lock();
@@ -664,7 +695,7 @@ void receive_bat_packet(struct ethhdr *ethhdr,
664 hard_iface->net_dev->dev_addr)) 695 hard_iface->net_dev->dev_addr))
665 is_my_oldorig = 1; 696 is_my_oldorig = 1;
666 697
667 if (compare_eth(ethhdr->h_source, broadcast_addr)) 698 if (is_broadcast_ether_addr(ethhdr->h_source))
668 is_broadcast = 1; 699 is_broadcast = 1;
669 } 700 }
670 rcu_read_unlock(); 701 rcu_read_unlock();
@@ -701,17 +732,16 @@ void receive_bat_packet(struct ethhdr *ethhdr,
701 732
702 /* neighbor has to indicate direct link and it has to 733 /* neighbor has to indicate direct link and it has to
703 * come via the corresponding interface */ 734 * come via the corresponding interface */
704 /* if received seqno equals last send seqno save new 735 /* save packet seqno for bidirectional check */
705 * seqno for bidirectional check */
706 if (has_directlink_flag && 736 if (has_directlink_flag &&
707 compare_eth(if_incoming->net_dev->dev_addr, 737 compare_eth(if_incoming->net_dev->dev_addr,
708 batman_packet->orig) && 738 batman_packet->orig)) {
709 (batman_packet->seqno - if_incoming_seqno + 2 == 0)) {
710 offset = if_incoming->if_num * NUM_WORDS; 739 offset = if_incoming->if_num * NUM_WORDS;
711 740
712 spin_lock_bh(&orig_neigh_node->ogm_cnt_lock); 741 spin_lock_bh(&orig_neigh_node->ogm_cnt_lock);
713 word = &(orig_neigh_node->bcast_own[offset]); 742 word = &(orig_neigh_node->bcast_own[offset]);
714 bit_mark(word, 0); 743 bit_mark(word,
744 if_incoming_seqno - batman_packet->seqno - 2);
715 orig_neigh_node->bcast_own_sum[if_incoming->if_num] = 745 orig_neigh_node->bcast_own_sum[if_incoming->if_num] =
716 bit_packet_count(word); 746 bit_packet_count(word);
717 spin_unlock_bh(&orig_neigh_node->ogm_cnt_lock); 747 spin_unlock_bh(&orig_neigh_node->ogm_cnt_lock);
@@ -794,14 +824,14 @@ void receive_bat_packet(struct ethhdr *ethhdr,
794 ((orig_node->last_real_seqno == batman_packet->seqno) && 824 ((orig_node->last_real_seqno == batman_packet->seqno) &&
795 (orig_node->last_ttl - 3 <= batman_packet->ttl)))) 825 (orig_node->last_ttl - 3 <= batman_packet->ttl))))
796 update_orig(bat_priv, orig_node, ethhdr, batman_packet, 826 update_orig(bat_priv, orig_node, ethhdr, batman_packet,
797 if_incoming, tt_buff, tt_buff_len, is_duplicate); 827 if_incoming, tt_buff, is_duplicate);
798 828
799 /* is single hop (direct) neighbor */ 829 /* is single hop (direct) neighbor */
800 if (is_single_hop_neigh) { 830 if (is_single_hop_neigh) {
801 831
802 /* mark direct link on incoming interface */ 832 /* mark direct link on incoming interface */
803 schedule_forward_packet(orig_node, ethhdr, batman_packet, 833 schedule_forward_packet(orig_node, ethhdr, batman_packet,
804 1, tt_buff_len, if_incoming); 834 1, if_incoming);
805 835
806 bat_dbg(DBG_BATMAN, bat_priv, "Forwarding packet: " 836 bat_dbg(DBG_BATMAN, bat_priv, "Forwarding packet: "
807 "rebroadcast neighbor packet with direct link flag\n"); 837 "rebroadcast neighbor packet with direct link flag\n");
@@ -824,7 +854,7 @@ void receive_bat_packet(struct ethhdr *ethhdr,
824 bat_dbg(DBG_BATMAN, bat_priv, 854 bat_dbg(DBG_BATMAN, bat_priv,
825 "Forwarding packet: rebroadcast originator packet\n"); 855 "Forwarding packet: rebroadcast originator packet\n");
826 schedule_forward_packet(orig_node, ethhdr, batman_packet, 856 schedule_forward_packet(orig_node, ethhdr, batman_packet,
827 0, tt_buff_len, if_incoming); 857 0, if_incoming);
828 858
829out_neigh: 859out_neigh:
830 if ((orig_neigh_node) && (!is_single_hop_neigh)) 860 if ((orig_neigh_node) && (!is_single_hop_neigh))
@@ -1077,7 +1107,7 @@ out:
1077 * This method rotates the bonding list and increases the 1107 * This method rotates the bonding list and increases the
1078 * returned router's refcount. */ 1108 * returned router's refcount. */
1079static struct neigh_node *find_bond_router(struct orig_node *primary_orig, 1109static struct neigh_node *find_bond_router(struct orig_node *primary_orig,
1080 struct hard_iface *recv_if) 1110 const struct hard_iface *recv_if)
1081{ 1111{
1082 struct neigh_node *tmp_neigh_node; 1112 struct neigh_node *tmp_neigh_node;
1083 struct neigh_node *router = NULL, *first_candidate = NULL; 1113 struct neigh_node *router = NULL, *first_candidate = NULL;
@@ -1128,7 +1158,7 @@ out:
1128 * 1158 *
1129 * Increases the returned router's refcount */ 1159 * Increases the returned router's refcount */
1130static struct neigh_node *find_ifalter_router(struct orig_node *primary_orig, 1160static struct neigh_node *find_ifalter_router(struct orig_node *primary_orig,
1131 struct hard_iface *recv_if) 1161 const struct hard_iface *recv_if)
1132{ 1162{
1133 struct neigh_node *tmp_neigh_node; 1163 struct neigh_node *tmp_neigh_node;
1134 struct neigh_node *router = NULL, *first_candidate = NULL; 1164 struct neigh_node *router = NULL, *first_candidate = NULL;
@@ -1171,12 +1201,124 @@ static struct neigh_node *find_ifalter_router(struct orig_node *primary_orig,
1171 return router; 1201 return router;
1172} 1202}
1173 1203
1204int recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if)
1205{
1206 struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
1207 struct tt_query_packet *tt_query;
1208 struct ethhdr *ethhdr;
1209
1210 /* drop packet if it has not necessary minimum size */
1211 if (unlikely(!pskb_may_pull(skb, sizeof(struct tt_query_packet))))
1212 goto out;
1213
1214 /* I could need to modify it */
1215 if (skb_cow(skb, sizeof(struct tt_query_packet)) < 0)
1216 goto out;
1217
1218 ethhdr = (struct ethhdr *)skb_mac_header(skb);
1219
1220 /* packet with unicast indication but broadcast recipient */
1221 if (is_broadcast_ether_addr(ethhdr->h_dest))
1222 goto out;
1223
1224 /* packet with broadcast sender address */
1225 if (is_broadcast_ether_addr(ethhdr->h_source))
1226 goto out;
1227
1228 tt_query = (struct tt_query_packet *)skb->data;
1229
1230 tt_query->tt_data = ntohs(tt_query->tt_data);
1231
1232 switch (tt_query->flags & TT_QUERY_TYPE_MASK) {
1233 case TT_REQUEST:
1234 /* If we cannot provide an answer the tt_request is
1235 * forwarded */
1236 if (!send_tt_response(bat_priv, tt_query)) {
1237 bat_dbg(DBG_TT, bat_priv,
1238 "Routing TT_REQUEST to %pM [%c]\n",
1239 tt_query->dst,
1240 (tt_query->flags & TT_FULL_TABLE ? 'F' : '.'));
1241 tt_query->tt_data = htons(tt_query->tt_data);
1242 return route_unicast_packet(skb, recv_if);
1243 }
1244 break;
1245 case TT_RESPONSE:
1246 /* packet needs to be linearised to access the TT changes */
1247 if (skb_linearize(skb) < 0)
1248 goto out;
1249
1250 if (is_my_mac(tt_query->dst))
1251 handle_tt_response(bat_priv, tt_query);
1252 else {
1253 bat_dbg(DBG_TT, bat_priv,
1254 "Routing TT_RESPONSE to %pM [%c]\n",
1255 tt_query->dst,
1256 (tt_query->flags & TT_FULL_TABLE ? 'F' : '.'));
1257 tt_query->tt_data = htons(tt_query->tt_data);
1258 return route_unicast_packet(skb, recv_if);
1259 }
1260 break;
1261 }
1262
1263out:
1264 /* returning NET_RX_DROP will make the caller function kfree the skb */
1265 return NET_RX_DROP;
1266}
1267
1268int recv_roam_adv(struct sk_buff *skb, struct hard_iface *recv_if)
1269{
1270 struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
1271 struct roam_adv_packet *roam_adv_packet;
1272 struct orig_node *orig_node;
1273 struct ethhdr *ethhdr;
1274
1275 /* drop packet if it has not necessary minimum size */
1276 if (unlikely(!pskb_may_pull(skb, sizeof(struct roam_adv_packet))))
1277 goto out;
1278
1279 ethhdr = (struct ethhdr *)skb_mac_header(skb);
1280
1281 /* packet with unicast indication but broadcast recipient */
1282 if (is_broadcast_ether_addr(ethhdr->h_dest))
1283 goto out;
1284
1285 /* packet with broadcast sender address */
1286 if (is_broadcast_ether_addr(ethhdr->h_source))
1287 goto out;
1288
1289 roam_adv_packet = (struct roam_adv_packet *)skb->data;
1290
1291 if (!is_my_mac(roam_adv_packet->dst))
1292 return route_unicast_packet(skb, recv_if);
1293
1294 orig_node = orig_hash_find(bat_priv, roam_adv_packet->src);
1295 if (!orig_node)
1296 goto out;
1297
1298 bat_dbg(DBG_TT, bat_priv, "Received ROAMING_ADV from %pM "
1299 "(client %pM)\n", roam_adv_packet->src,
1300 roam_adv_packet->client);
1301
1302 tt_global_add(bat_priv, orig_node, roam_adv_packet->client,
1303 atomic_read(&orig_node->last_ttvn) + 1, true);
1304
1305 /* Roaming phase starts: I have new information but the ttvn has not
1306 * been incremented yet. This flag will make me check all the incoming
1307 * packets for the correct destination. */
1308 bat_priv->tt_poss_change = true;
1309
1310 orig_node_free_ref(orig_node);
1311out:
1312 /* returning NET_RX_DROP will make the caller function kfree the skb */
1313 return NET_RX_DROP;
1314}
1315
1174/* find a suitable router for this originator, and use 1316/* find a suitable router for this originator, and use
1175 * bonding if possible. increases the found neighbors 1317 * bonding if possible. increases the found neighbors
1176 * refcount.*/ 1318 * refcount.*/
1177struct neigh_node *find_router(struct bat_priv *bat_priv, 1319struct neigh_node *find_router(struct bat_priv *bat_priv,
1178 struct orig_node *orig_node, 1320 struct orig_node *orig_node,
1179 struct hard_iface *recv_if) 1321 const struct hard_iface *recv_if)
1180{ 1322{
1181 struct orig_node *primary_orig_node; 1323 struct orig_node *primary_orig_node;
1182 struct orig_node *router_orig; 1324 struct orig_node *router_orig;
@@ -1240,6 +1382,9 @@ struct neigh_node *find_router(struct bat_priv *bat_priv,
1240 router = find_ifalter_router(primary_orig_node, recv_if); 1382 router = find_ifalter_router(primary_orig_node, recv_if);
1241 1383
1242return_router: 1384return_router:
1385 if (router && router->if_incoming->if_status != IF_ACTIVE)
1386 goto err_unlock;
1387
1243 rcu_read_unlock(); 1388 rcu_read_unlock();
1244 return router; 1389 return router;
1245err_unlock: 1390err_unlock:
@@ -1354,14 +1499,84 @@ out:
1354 return ret; 1499 return ret;
1355} 1500}
1356 1501
1502static int check_unicast_ttvn(struct bat_priv *bat_priv,
1503 struct sk_buff *skb) {
1504 uint8_t curr_ttvn;
1505 struct orig_node *orig_node;
1506 struct ethhdr *ethhdr;
1507 struct hard_iface *primary_if;
1508 struct unicast_packet *unicast_packet;
1509 bool tt_poss_change;
1510
1511 /* I could need to modify it */
1512 if (skb_cow(skb, sizeof(struct unicast_packet)) < 0)
1513 return 0;
1514
1515 unicast_packet = (struct unicast_packet *)skb->data;
1516
1517 if (is_my_mac(unicast_packet->dest)) {
1518 tt_poss_change = bat_priv->tt_poss_change;
1519 curr_ttvn = (uint8_t)atomic_read(&bat_priv->ttvn);
1520 } else {
1521 orig_node = orig_hash_find(bat_priv, unicast_packet->dest);
1522
1523 if (!orig_node)
1524 return 0;
1525
1526 curr_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
1527 tt_poss_change = orig_node->tt_poss_change;
1528 orig_node_free_ref(orig_node);
1529 }
1530
1531 /* Check whether I have to reroute the packet */
1532 if (seq_before(unicast_packet->ttvn, curr_ttvn) || tt_poss_change) {
1533 /* Linearize the skb before accessing it */
1534 if (skb_linearize(skb) < 0)
1535 return 0;
1536
1537 ethhdr = (struct ethhdr *)(skb->data +
1538 sizeof(struct unicast_packet));
1539 orig_node = transtable_search(bat_priv, ethhdr->h_dest);
1540
1541 if (!orig_node) {
1542 if (!is_my_client(bat_priv, ethhdr->h_dest))
1543 return 0;
1544 primary_if = primary_if_get_selected(bat_priv);
1545 if (!primary_if)
1546 return 0;
1547 memcpy(unicast_packet->dest,
1548 primary_if->net_dev->dev_addr, ETH_ALEN);
1549 hardif_free_ref(primary_if);
1550 } else {
1551 memcpy(unicast_packet->dest, orig_node->orig,
1552 ETH_ALEN);
1553 curr_ttvn = (uint8_t)
1554 atomic_read(&orig_node->last_ttvn);
1555 orig_node_free_ref(orig_node);
1556 }
1557
1558 bat_dbg(DBG_ROUTES, bat_priv, "TTVN mismatch (old_ttvn %u "
1559 "new_ttvn %u)! Rerouting unicast packet (for %pM) to "
1560 "%pM\n", unicast_packet->ttvn, curr_ttvn,
1561 ethhdr->h_dest, unicast_packet->dest);
1562
1563 unicast_packet->ttvn = curr_ttvn;
1564 }
1565 return 1;
1566}
1567
1357int recv_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if) 1568int recv_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
1358{ 1569{
1570 struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
1359 struct unicast_packet *unicast_packet; 1571 struct unicast_packet *unicast_packet;
1360 int hdr_size = sizeof(struct unicast_packet); 1572 int hdr_size = sizeof(*unicast_packet);
1361 1573
1362 if (check_unicast_packet(skb, hdr_size) < 0) 1574 if (check_unicast_packet(skb, hdr_size) < 0)
1363 return NET_RX_DROP; 1575 return NET_RX_DROP;
1364 1576
1577 if (!check_unicast_ttvn(bat_priv, skb))
1578 return NET_RX_DROP;
1579
1365 unicast_packet = (struct unicast_packet *)skb->data; 1580 unicast_packet = (struct unicast_packet *)skb->data;
1366 1581
1367 /* packet for me */ 1582 /* packet for me */
@@ -1377,13 +1592,16 @@ int recv_ucast_frag_packet(struct sk_buff *skb, struct hard_iface *recv_if)
1377{ 1592{
1378 struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); 1593 struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
1379 struct unicast_frag_packet *unicast_packet; 1594 struct unicast_frag_packet *unicast_packet;
1380 int hdr_size = sizeof(struct unicast_frag_packet); 1595 int hdr_size = sizeof(*unicast_packet);
1381 struct sk_buff *new_skb = NULL; 1596 struct sk_buff *new_skb = NULL;
1382 int ret; 1597 int ret;
1383 1598
1384 if (check_unicast_packet(skb, hdr_size) < 0) 1599 if (check_unicast_packet(skb, hdr_size) < 0)
1385 return NET_RX_DROP; 1600 return NET_RX_DROP;
1386 1601
1602 if (!check_unicast_ttvn(bat_priv, skb))
1603 return NET_RX_DROP;
1604
1387 unicast_packet = (struct unicast_frag_packet *)skb->data; 1605 unicast_packet = (struct unicast_frag_packet *)skb->data;
1388 1606
1389 /* packet for me */ 1607 /* packet for me */
@@ -1413,7 +1631,7 @@ int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
1413 struct orig_node *orig_node = NULL; 1631 struct orig_node *orig_node = NULL;
1414 struct bcast_packet *bcast_packet; 1632 struct bcast_packet *bcast_packet;
1415 struct ethhdr *ethhdr; 1633 struct ethhdr *ethhdr;
1416 int hdr_size = sizeof(struct bcast_packet); 1634 int hdr_size = sizeof(*bcast_packet);
1417 int ret = NET_RX_DROP; 1635 int ret = NET_RX_DROP;
1418 int32_t seq_diff; 1636 int32_t seq_diff;
1419 1637
@@ -1471,7 +1689,7 @@ int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
1471 spin_unlock_bh(&orig_node->bcast_seqno_lock); 1689 spin_unlock_bh(&orig_node->bcast_seqno_lock);
1472 1690
1473 /* rebroadcast packet */ 1691 /* rebroadcast packet */
1474 add_bcast_packet_to_list(bat_priv, skb); 1692 add_bcast_packet_to_list(bat_priv, skb, 1);
1475 1693
1476 /* broadcast for me */ 1694 /* broadcast for me */
1477 interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size); 1695 interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size);
@@ -1491,7 +1709,7 @@ int recv_vis_packet(struct sk_buff *skb, struct hard_iface *recv_if)
1491 struct vis_packet *vis_packet; 1709 struct vis_packet *vis_packet;
1492 struct ethhdr *ethhdr; 1710 struct ethhdr *ethhdr;
1493 struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); 1711 struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
1494 int hdr_size = sizeof(struct vis_packet); 1712 int hdr_size = sizeof(*vis_packet);
1495 1713
1496 /* keep skb linear */ 1714 /* keep skb linear */
1497 if (skb_linearize(skb) < 0) 1715 if (skb_linearize(skb) < 0)
diff --git a/net/batman-adv/routing.h b/net/batman-adv/routing.h
index 870f29842b2..fb14e9579b1 100644
--- a/net/batman-adv/routing.h
+++ b/net/batman-adv/routing.h
@@ -23,13 +23,12 @@
23#define _NET_BATMAN_ADV_ROUTING_H_ 23#define _NET_BATMAN_ADV_ROUTING_H_
24 24
25void slide_own_bcast_window(struct hard_iface *hard_iface); 25void slide_own_bcast_window(struct hard_iface *hard_iface);
26void receive_bat_packet(struct ethhdr *ethhdr, 26void receive_bat_packet(const struct ethhdr *ethhdr,
27 struct batman_packet *batman_packet, 27 struct batman_packet *batman_packet,
28 unsigned char *tt_buff, int tt_buff_len, 28 const unsigned char *tt_buff,
29 struct hard_iface *if_incoming); 29 struct hard_iface *if_incoming);
30void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node, 30void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node,
31 struct neigh_node *neigh_node, unsigned char *tt_buff, 31 struct neigh_node *neigh_node);
32 int tt_buff_len);
33int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if); 32int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if);
34int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if); 33int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if);
35int recv_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if); 34int recv_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if);
@@ -37,9 +36,11 @@ int recv_ucast_frag_packet(struct sk_buff *skb, struct hard_iface *recv_if);
37int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if); 36int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if);
38int recv_vis_packet(struct sk_buff *skb, struct hard_iface *recv_if); 37int recv_vis_packet(struct sk_buff *skb, struct hard_iface *recv_if);
39int recv_bat_packet(struct sk_buff *skb, struct hard_iface *recv_if); 38int recv_bat_packet(struct sk_buff *skb, struct hard_iface *recv_if);
39int recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if);
40int recv_roam_adv(struct sk_buff *skb, struct hard_iface *recv_if);
40struct neigh_node *find_router(struct bat_priv *bat_priv, 41struct neigh_node *find_router(struct bat_priv *bat_priv,
41 struct orig_node *orig_node, 42 struct orig_node *orig_node,
42 struct hard_iface *recv_if); 43 const struct hard_iface *recv_if);
43void bonding_candidate_del(struct orig_node *orig_node, 44void bonding_candidate_del(struct orig_node *orig_node,
44 struct neigh_node *neigh_node); 45 struct neigh_node *neigh_node);
45 46
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index 33779278f1b..58d14472068 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -33,14 +33,14 @@
33static void send_outstanding_bcast_packet(struct work_struct *work); 33static void send_outstanding_bcast_packet(struct work_struct *work);
34 34
35/* apply hop penalty for a normal link */ 35/* apply hop penalty for a normal link */
36static uint8_t hop_penalty(const uint8_t tq, struct bat_priv *bat_priv) 36static uint8_t hop_penalty(uint8_t tq, const struct bat_priv *bat_priv)
37{ 37{
38 int hop_penalty = atomic_read(&bat_priv->hop_penalty); 38 int hop_penalty = atomic_read(&bat_priv->hop_penalty);
39 return (tq * (TQ_MAX_VALUE - hop_penalty)) / (TQ_MAX_VALUE); 39 return (tq * (TQ_MAX_VALUE - hop_penalty)) / (TQ_MAX_VALUE);
40} 40}
41 41
42/* when do we schedule our own packet to be sent */ 42/* when do we schedule our own packet to be sent */
43static unsigned long own_send_time(struct bat_priv *bat_priv) 43static unsigned long own_send_time(const struct bat_priv *bat_priv)
44{ 44{
45 return jiffies + msecs_to_jiffies( 45 return jiffies + msecs_to_jiffies(
46 atomic_read(&bat_priv->orig_interval) - 46 atomic_read(&bat_priv->orig_interval) -
@@ -55,9 +55,8 @@ static unsigned long forward_send_time(void)
55 55
56/* send out an already prepared packet to the given address via the 56/* send out an already prepared packet to the given address via the
57 * specified batman interface */ 57 * specified batman interface */
58int send_skb_packet(struct sk_buff *skb, 58int send_skb_packet(struct sk_buff *skb, struct hard_iface *hard_iface,
59 struct hard_iface *hard_iface, 59 const uint8_t *dst_addr)
60 uint8_t *dst_addr)
61{ 60{
62 struct ethhdr *ethhdr; 61 struct ethhdr *ethhdr;
63 62
@@ -74,7 +73,7 @@ int send_skb_packet(struct sk_buff *skb,
74 } 73 }
75 74
76 /* push to the ethernet header. */ 75 /* push to the ethernet header. */
77 if (my_skb_head_push(skb, sizeof(struct ethhdr)) < 0) 76 if (my_skb_head_push(skb, sizeof(*ethhdr)) < 0)
78 goto send_skb_err; 77 goto send_skb_err;
79 78
80 skb_reset_mac_header(skb); 79 skb_reset_mac_header(skb);
@@ -121,7 +120,7 @@ static void send_packet_to_if(struct forw_packet *forw_packet,
121 /* adjust all flags and log packets */ 120 /* adjust all flags and log packets */
122 while (aggregated_packet(buff_pos, 121 while (aggregated_packet(buff_pos,
123 forw_packet->packet_len, 122 forw_packet->packet_len,
124 batman_packet->num_tt)) { 123 batman_packet->tt_num_changes)) {
125 124
126 /* we might have aggregated direct link packets with an 125 /* we might have aggregated direct link packets with an
127 * ordinary base packet */ 126 * ordinary base packet */
@@ -136,17 +135,17 @@ static void send_packet_to_if(struct forw_packet *forw_packet,
136 "Forwarding")); 135 "Forwarding"));
137 bat_dbg(DBG_BATMAN, bat_priv, 136 bat_dbg(DBG_BATMAN, bat_priv,
138 "%s %spacket (originator %pM, seqno %d, TQ %d, TTL %d," 137 "%s %spacket (originator %pM, seqno %d, TQ %d, TTL %d,"
139 " IDF %s) on interface %s [%pM]\n", 138 " IDF %s, hvn %d) on interface %s [%pM]\n",
140 fwd_str, (packet_num > 0 ? "aggregated " : ""), 139 fwd_str, (packet_num > 0 ? "aggregated " : ""),
141 batman_packet->orig, ntohl(batman_packet->seqno), 140 batman_packet->orig, ntohl(batman_packet->seqno),
142 batman_packet->tq, batman_packet->ttl, 141 batman_packet->tq, batman_packet->ttl,
143 (batman_packet->flags & DIRECTLINK ? 142 (batman_packet->flags & DIRECTLINK ?
144 "on" : "off"), 143 "on" : "off"),
145 hard_iface->net_dev->name, 144 batman_packet->ttvn, hard_iface->net_dev->name,
146 hard_iface->net_dev->dev_addr); 145 hard_iface->net_dev->dev_addr);
147 146
148 buff_pos += sizeof(struct batman_packet) + 147 buff_pos += sizeof(*batman_packet) +
149 (batman_packet->num_tt * ETH_ALEN); 148 tt_len(batman_packet->tt_num_changes);
150 packet_num++; 149 packet_num++;
151 batman_packet = (struct batman_packet *) 150 batman_packet = (struct batman_packet *)
152 (forw_packet->skb->data + buff_pos); 151 (forw_packet->skb->data + buff_pos);
@@ -164,26 +163,31 @@ static void send_packet(struct forw_packet *forw_packet)
164 struct hard_iface *hard_iface; 163 struct hard_iface *hard_iface;
165 struct net_device *soft_iface; 164 struct net_device *soft_iface;
166 struct bat_priv *bat_priv; 165 struct bat_priv *bat_priv;
166 struct hard_iface *primary_if = NULL;
167 struct batman_packet *batman_packet = 167 struct batman_packet *batman_packet =
168 (struct batman_packet *)(forw_packet->skb->data); 168 (struct batman_packet *)(forw_packet->skb->data);
169 unsigned char directlink = (batman_packet->flags & DIRECTLINK ? 1 : 0); 169 int directlink = (batman_packet->flags & DIRECTLINK ? 1 : 0);
170 170
171 if (!forw_packet->if_incoming) { 171 if (!forw_packet->if_incoming) {
172 pr_err("Error - can't forward packet: incoming iface not " 172 pr_err("Error - can't forward packet: incoming iface not "
173 "specified\n"); 173 "specified\n");
174 return; 174 goto out;
175 } 175 }
176 176
177 soft_iface = forw_packet->if_incoming->soft_iface; 177 soft_iface = forw_packet->if_incoming->soft_iface;
178 bat_priv = netdev_priv(soft_iface); 178 bat_priv = netdev_priv(soft_iface);
179 179
180 if (forw_packet->if_incoming->if_status != IF_ACTIVE) 180 if (forw_packet->if_incoming->if_status != IF_ACTIVE)
181 return; 181 goto out;
182
183 primary_if = primary_if_get_selected(bat_priv);
184 if (!primary_if)
185 goto out;
182 186
183 /* multihomed peer assumed */ 187 /* multihomed peer assumed */
184 /* non-primary OGMs are only broadcasted on their interface */ 188 /* non-primary OGMs are only broadcasted on their interface */
185 if ((directlink && (batman_packet->ttl == 1)) || 189 if ((directlink && (batman_packet->ttl == 1)) ||
186 (forw_packet->own && (forw_packet->if_incoming->if_num > 0))) { 190 (forw_packet->own && (forw_packet->if_incoming != primary_if))) {
187 191
188 /* FIXME: what about aggregated packets ? */ 192 /* FIXME: what about aggregated packets ? */
189 bat_dbg(DBG_BATMAN, bat_priv, 193 bat_dbg(DBG_BATMAN, bat_priv,
@@ -200,7 +204,7 @@ static void send_packet(struct forw_packet *forw_packet)
200 broadcast_addr); 204 broadcast_addr);
201 forw_packet->skb = NULL; 205 forw_packet->skb = NULL;
202 206
203 return; 207 goto out;
204 } 208 }
205 209
206 /* broadcast on every interface */ 210 /* broadcast on every interface */
@@ -212,28 +216,24 @@ static void send_packet(struct forw_packet *forw_packet)
212 send_packet_to_if(forw_packet, hard_iface); 216 send_packet_to_if(forw_packet, hard_iface);
213 } 217 }
214 rcu_read_unlock(); 218 rcu_read_unlock();
219
220out:
221 if (primary_if)
222 hardif_free_ref(primary_if);
215} 223}
216 224
217static void rebuild_batman_packet(struct bat_priv *bat_priv, 225static void realloc_packet_buffer(struct hard_iface *hard_iface,
218 struct hard_iface *hard_iface) 226 int new_len)
219{ 227{
220 int new_len;
221 unsigned char *new_buff; 228 unsigned char *new_buff;
222 struct batman_packet *batman_packet; 229 struct batman_packet *batman_packet;
223 230
224 new_len = sizeof(struct batman_packet) +
225 (bat_priv->num_local_tt * ETH_ALEN);
226 new_buff = kmalloc(new_len, GFP_ATOMIC); 231 new_buff = kmalloc(new_len, GFP_ATOMIC);
227 232
228 /* keep old buffer if kmalloc should fail */ 233 /* keep old buffer if kmalloc should fail */
229 if (new_buff) { 234 if (new_buff) {
230 memcpy(new_buff, hard_iface->packet_buff, 235 memcpy(new_buff, hard_iface->packet_buff,
231 sizeof(struct batman_packet)); 236 sizeof(*batman_packet));
232 batman_packet = (struct batman_packet *)new_buff;
233
234 batman_packet->num_tt = tt_local_fill_buffer(bat_priv,
235 new_buff + sizeof(struct batman_packet),
236 new_len - sizeof(struct batman_packet));
237 237
238 kfree(hard_iface->packet_buff); 238 kfree(hard_iface->packet_buff);
239 hard_iface->packet_buff = new_buff; 239 hard_iface->packet_buff = new_buff;
@@ -241,6 +241,46 @@ static void rebuild_batman_packet(struct bat_priv *bat_priv,
241 } 241 }
242} 242}
243 243
244/* when calling this function (hard_iface == primary_if) has to be true */
245static void prepare_packet_buffer(struct bat_priv *bat_priv,
246 struct hard_iface *hard_iface)
247{
248 int new_len;
249 struct batman_packet *batman_packet;
250
251 new_len = BAT_PACKET_LEN +
252 tt_len((uint8_t)atomic_read(&bat_priv->tt_local_changes));
253
254 /* if we have too many changes for one packet don't send any
255 * and wait for the tt table request which will be fragmented */
256 if (new_len > hard_iface->soft_iface->mtu)
257 new_len = BAT_PACKET_LEN;
258
259 realloc_packet_buffer(hard_iface, new_len);
260 batman_packet = (struct batman_packet *)hard_iface->packet_buff;
261
262 atomic_set(&bat_priv->tt_crc, tt_local_crc(bat_priv));
263
264 /* reset the sending counter */
265 atomic_set(&bat_priv->tt_ogm_append_cnt, TT_OGM_APPEND_MAX);
266
267 batman_packet->tt_num_changes = tt_changes_fill_buffer(bat_priv,
268 hard_iface->packet_buff + BAT_PACKET_LEN,
269 hard_iface->packet_len - BAT_PACKET_LEN);
270
271}
272
273static void reset_packet_buffer(struct bat_priv *bat_priv,
274 struct hard_iface *hard_iface)
275{
276 struct batman_packet *batman_packet;
277
278 realloc_packet_buffer(hard_iface, BAT_PACKET_LEN);
279
280 batman_packet = (struct batman_packet *)hard_iface->packet_buff;
281 batman_packet->tt_num_changes = 0;
282}
283
244void schedule_own_packet(struct hard_iface *hard_iface) 284void schedule_own_packet(struct hard_iface *hard_iface)
245{ 285{
246 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); 286 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
@@ -266,14 +306,21 @@ void schedule_own_packet(struct hard_iface *hard_iface)
266 if (hard_iface->if_status == IF_TO_BE_ACTIVATED) 306 if (hard_iface->if_status == IF_TO_BE_ACTIVATED)
267 hard_iface->if_status = IF_ACTIVE; 307 hard_iface->if_status = IF_ACTIVE;
268 308
269 /* if local tt has changed and interface is a primary interface */ 309 if (hard_iface == primary_if) {
270 if ((atomic_read(&bat_priv->tt_local_changed)) && 310 /* if at least one change happened */
271 (hard_iface == primary_if)) 311 if (atomic_read(&bat_priv->tt_local_changes) > 0) {
272 rebuild_batman_packet(bat_priv, hard_iface); 312 tt_commit_changes(bat_priv);
313 prepare_packet_buffer(bat_priv, hard_iface);
314 }
315
316 /* if the changes have been sent enough times */
317 if (!atomic_dec_not_zero(&bat_priv->tt_ogm_append_cnt))
318 reset_packet_buffer(bat_priv, hard_iface);
319 }
273 320
274 /** 321 /**
275 * NOTE: packet_buff might just have been re-allocated in 322 * NOTE: packet_buff might just have been re-allocated in
276 * rebuild_batman_packet() 323 * prepare_packet_buffer() or in reset_packet_buffer()
277 */ 324 */
278 batman_packet = (struct batman_packet *)hard_iface->packet_buff; 325 batman_packet = (struct batman_packet *)hard_iface->packet_buff;
279 326
@@ -281,6 +328,9 @@ void schedule_own_packet(struct hard_iface *hard_iface)
281 batman_packet->seqno = 328 batman_packet->seqno =
282 htonl((uint32_t)atomic_read(&hard_iface->seqno)); 329 htonl((uint32_t)atomic_read(&hard_iface->seqno));
283 330
331 batman_packet->ttvn = atomic_read(&bat_priv->ttvn);
332 batman_packet->tt_crc = htons((uint16_t)atomic_read(&bat_priv->tt_crc));
333
284 if (vis_server == VIS_TYPE_SERVER_SYNC) 334 if (vis_server == VIS_TYPE_SERVER_SYNC)
285 batman_packet->flags |= VIS_SERVER; 335 batman_packet->flags |= VIS_SERVER;
286 else 336 else
@@ -291,7 +341,7 @@ void schedule_own_packet(struct hard_iface *hard_iface)
291 batman_packet->gw_flags = 341 batman_packet->gw_flags =
292 (uint8_t)atomic_read(&bat_priv->gw_bandwidth); 342 (uint8_t)atomic_read(&bat_priv->gw_bandwidth);
293 else 343 else
294 batman_packet->gw_flags = 0; 344 batman_packet->gw_flags = NO_FLAGS;
295 345
296 atomic_inc(&hard_iface->seqno); 346 atomic_inc(&hard_iface->seqno);
297 347
@@ -307,15 +357,16 @@ void schedule_own_packet(struct hard_iface *hard_iface)
307} 357}
308 358
309void schedule_forward_packet(struct orig_node *orig_node, 359void schedule_forward_packet(struct orig_node *orig_node,
310 struct ethhdr *ethhdr, 360 const struct ethhdr *ethhdr,
311 struct batman_packet *batman_packet, 361 struct batman_packet *batman_packet,
312 uint8_t directlink, int tt_buff_len, 362 int directlink,
313 struct hard_iface *if_incoming) 363 struct hard_iface *if_incoming)
314{ 364{
315 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); 365 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
316 struct neigh_node *router; 366 struct neigh_node *router;
317 unsigned char in_tq, in_ttl, tq_avg = 0; 367 uint8_t in_tq, in_ttl, tq_avg = 0;
318 unsigned long send_time; 368 unsigned long send_time;
369 uint8_t tt_num_changes;
319 370
320 if (batman_packet->ttl <= 1) { 371 if (batman_packet->ttl <= 1) {
321 bat_dbg(DBG_BATMAN, bat_priv, "ttl exceeded\n"); 372 bat_dbg(DBG_BATMAN, bat_priv, "ttl exceeded\n");
@@ -326,6 +377,7 @@ void schedule_forward_packet(struct orig_node *orig_node,
326 377
327 in_tq = batman_packet->tq; 378 in_tq = batman_packet->tq;
328 in_ttl = batman_packet->ttl; 379 in_ttl = batman_packet->ttl;
380 tt_num_changes = batman_packet->tt_num_changes;
329 381
330 batman_packet->ttl--; 382 batman_packet->ttl--;
331 memcpy(batman_packet->prev_sender, ethhdr->h_source, ETH_ALEN); 383 memcpy(batman_packet->prev_sender, ethhdr->h_source, ETH_ALEN);
@@ -358,6 +410,7 @@ void schedule_forward_packet(struct orig_node *orig_node,
358 batman_packet->ttl); 410 batman_packet->ttl);
359 411
360 batman_packet->seqno = htonl(batman_packet->seqno); 412 batman_packet->seqno = htonl(batman_packet->seqno);
413 batman_packet->tt_crc = htons(batman_packet->tt_crc);
361 414
362 /* switch of primaries first hop flag when forwarding */ 415 /* switch of primaries first hop flag when forwarding */
363 batman_packet->flags &= ~PRIMARIES_FIRST_HOP; 416 batman_packet->flags &= ~PRIMARIES_FIRST_HOP;
@@ -369,7 +422,7 @@ void schedule_forward_packet(struct orig_node *orig_node,
369 send_time = forward_send_time(); 422 send_time = forward_send_time();
370 add_bat_packet_to_list(bat_priv, 423 add_bat_packet_to_list(bat_priv,
371 (unsigned char *)batman_packet, 424 (unsigned char *)batman_packet,
372 sizeof(struct batman_packet) + tt_buff_len, 425 sizeof(*batman_packet) + tt_len(tt_num_changes),
373 if_incoming, 0, send_time); 426 if_incoming, 0, send_time);
374} 427}
375 428
@@ -408,11 +461,13 @@ static void _add_bcast_packet_to_list(struct bat_priv *bat_priv,
408 * 461 *
409 * The skb is not consumed, so the caller should make sure that the 462 * The skb is not consumed, so the caller should make sure that the
410 * skb is freed. */ 463 * skb is freed. */
411int add_bcast_packet_to_list(struct bat_priv *bat_priv, struct sk_buff *skb) 464int add_bcast_packet_to_list(struct bat_priv *bat_priv,
465 const struct sk_buff *skb, unsigned long delay)
412{ 466{
413 struct hard_iface *primary_if = NULL; 467 struct hard_iface *primary_if = NULL;
414 struct forw_packet *forw_packet; 468 struct forw_packet *forw_packet;
415 struct bcast_packet *bcast_packet; 469 struct bcast_packet *bcast_packet;
470 struct sk_buff *newskb;
416 471
417 if (!atomic_dec_not_zero(&bat_priv->bcast_queue_left)) { 472 if (!atomic_dec_not_zero(&bat_priv->bcast_queue_left)) {
418 bat_dbg(DBG_BATMAN, bat_priv, "bcast packet queue full\n"); 473 bat_dbg(DBG_BATMAN, bat_priv, "bcast packet queue full\n");
@@ -423,28 +478,28 @@ int add_bcast_packet_to_list(struct bat_priv *bat_priv, struct sk_buff *skb)
423 if (!primary_if) 478 if (!primary_if)
424 goto out_and_inc; 479 goto out_and_inc;
425 480
426 forw_packet = kmalloc(sizeof(struct forw_packet), GFP_ATOMIC); 481 forw_packet = kmalloc(sizeof(*forw_packet), GFP_ATOMIC);
427 482
428 if (!forw_packet) 483 if (!forw_packet)
429 goto out_and_inc; 484 goto out_and_inc;
430 485
431 skb = skb_copy(skb, GFP_ATOMIC); 486 newskb = skb_copy(skb, GFP_ATOMIC);
432 if (!skb) 487 if (!newskb)
433 goto packet_free; 488 goto packet_free;
434 489
435 /* as we have a copy now, it is safe to decrease the TTL */ 490 /* as we have a copy now, it is safe to decrease the TTL */
436 bcast_packet = (struct bcast_packet *)skb->data; 491 bcast_packet = (struct bcast_packet *)newskb->data;
437 bcast_packet->ttl--; 492 bcast_packet->ttl--;
438 493
439 skb_reset_mac_header(skb); 494 skb_reset_mac_header(newskb);
440 495
441 forw_packet->skb = skb; 496 forw_packet->skb = newskb;
442 forw_packet->if_incoming = primary_if; 497 forw_packet->if_incoming = primary_if;
443 498
444 /* how often did we send the bcast packet ? */ 499 /* how often did we send the bcast packet ? */
445 forw_packet->num_packets = 0; 500 forw_packet->num_packets = 0;
446 501
447 _add_bcast_packet_to_list(bat_priv, forw_packet, 1); 502 _add_bcast_packet_to_list(bat_priv, forw_packet, delay);
448 return NETDEV_TX_OK; 503 return NETDEV_TX_OK;
449 504
450packet_free: 505packet_free:
@@ -537,7 +592,7 @@ out:
537} 592}
538 593
539void purge_outstanding_packets(struct bat_priv *bat_priv, 594void purge_outstanding_packets(struct bat_priv *bat_priv,
540 struct hard_iface *hard_iface) 595 const struct hard_iface *hard_iface)
541{ 596{
542 struct forw_packet *forw_packet; 597 struct forw_packet *forw_packet;
543 struct hlist_node *tmp_node, *safe_tmp_node; 598 struct hlist_node *tmp_node, *safe_tmp_node;
diff --git a/net/batman-adv/send.h b/net/batman-adv/send.h
index 247172d71e4..1f2d1e87766 100644
--- a/net/batman-adv/send.h
+++ b/net/batman-adv/send.h
@@ -22,18 +22,18 @@
22#ifndef _NET_BATMAN_ADV_SEND_H_ 22#ifndef _NET_BATMAN_ADV_SEND_H_
23#define _NET_BATMAN_ADV_SEND_H_ 23#define _NET_BATMAN_ADV_SEND_H_
24 24
25int send_skb_packet(struct sk_buff *skb, 25int send_skb_packet(struct sk_buff *skb, struct hard_iface *hard_iface,
26 struct hard_iface *hard_iface, 26 const uint8_t *dst_addr);
27 uint8_t *dst_addr);
28void schedule_own_packet(struct hard_iface *hard_iface); 27void schedule_own_packet(struct hard_iface *hard_iface);
29void schedule_forward_packet(struct orig_node *orig_node, 28void schedule_forward_packet(struct orig_node *orig_node,
30 struct ethhdr *ethhdr, 29 const struct ethhdr *ethhdr,
31 struct batman_packet *batman_packet, 30 struct batman_packet *batman_packet,
32 uint8_t directlink, int tt_buff_len, 31 int directlink,
33 struct hard_iface *if_outgoing); 32 struct hard_iface *if_outgoing);
34int add_bcast_packet_to_list(struct bat_priv *bat_priv, struct sk_buff *skb); 33int add_bcast_packet_to_list(struct bat_priv *bat_priv,
34 const struct sk_buff *skb, unsigned long delay);
35void send_outstanding_bat_packet(struct work_struct *work); 35void send_outstanding_bat_packet(struct work_struct *work);
36void purge_outstanding_packets(struct bat_priv *bat_priv, 36void purge_outstanding_packets(struct bat_priv *bat_priv,
37 struct hard_iface *hard_iface); 37 const struct hard_iface *hard_iface);
38 38
39#endif /* _NET_BATMAN_ADV_SEND_H_ */ 39#endif /* _NET_BATMAN_ADV_SEND_H_ */
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index d5aa60999e8..3e2f91ffa4e 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -30,6 +30,7 @@
30#include "gateway_common.h" 30#include "gateway_common.h"
31#include "gateway_client.h" 31#include "gateway_client.h"
32#include "bat_sysfs.h" 32#include "bat_sysfs.h"
33#include "originator.h"
33#include <linux/slab.h> 34#include <linux/slab.h>
34#include <linux/ethtool.h> 35#include <linux/ethtool.h>
35#include <linux/etherdevice.h> 36#include <linux/etherdevice.h>
@@ -123,8 +124,7 @@ static struct softif_neigh_vid *softif_neigh_vid_get(struct bat_priv *bat_priv,
123 goto out; 124 goto out;
124 } 125 }
125 126
126 softif_neigh_vid = kzalloc(sizeof(struct softif_neigh_vid), 127 softif_neigh_vid = kzalloc(sizeof(*softif_neigh_vid), GFP_ATOMIC);
127 GFP_ATOMIC);
128 if (!softif_neigh_vid) 128 if (!softif_neigh_vid)
129 goto out; 129 goto out;
130 130
@@ -146,7 +146,7 @@ out:
146} 146}
147 147
148static struct softif_neigh *softif_neigh_get(struct bat_priv *bat_priv, 148static struct softif_neigh *softif_neigh_get(struct bat_priv *bat_priv,
149 uint8_t *addr, short vid) 149 const uint8_t *addr, short vid)
150{ 150{
151 struct softif_neigh_vid *softif_neigh_vid; 151 struct softif_neigh_vid *softif_neigh_vid;
152 struct softif_neigh *softif_neigh = NULL; 152 struct softif_neigh *softif_neigh = NULL;
@@ -170,7 +170,7 @@ static struct softif_neigh *softif_neigh_get(struct bat_priv *bat_priv,
170 goto unlock; 170 goto unlock;
171 } 171 }
172 172
173 softif_neigh = kzalloc(sizeof(struct softif_neigh), GFP_ATOMIC); 173 softif_neigh = kzalloc(sizeof(*softif_neigh), GFP_ATOMIC);
174 if (!softif_neigh) 174 if (!softif_neigh)
175 goto unlock; 175 goto unlock;
176 176
@@ -242,7 +242,8 @@ static void softif_neigh_vid_select(struct bat_priv *bat_priv,
242 if (new_neigh && !atomic_inc_not_zero(&new_neigh->refcount)) 242 if (new_neigh && !atomic_inc_not_zero(&new_neigh->refcount))
243 new_neigh = NULL; 243 new_neigh = NULL;
244 244
245 curr_neigh = softif_neigh_vid->softif_neigh; 245 curr_neigh = rcu_dereference_protected(softif_neigh_vid->softif_neigh,
246 1);
246 rcu_assign_pointer(softif_neigh_vid->softif_neigh, new_neigh); 247 rcu_assign_pointer(softif_neigh_vid->softif_neigh, new_neigh);
247 248
248 if ((curr_neigh) && (!new_neigh)) 249 if ((curr_neigh) && (!new_neigh))
@@ -380,7 +381,7 @@ void softif_neigh_purge(struct bat_priv *bat_priv)
380 struct softif_neigh *softif_neigh, *curr_softif_neigh; 381 struct softif_neigh *softif_neigh, *curr_softif_neigh;
381 struct softif_neigh_vid *softif_neigh_vid; 382 struct softif_neigh_vid *softif_neigh_vid;
382 struct hlist_node *node, *node_tmp, *node_tmp2; 383 struct hlist_node *node, *node_tmp, *node_tmp2;
383 char do_deselect; 384 int do_deselect;
384 385
385 rcu_read_lock(); 386 rcu_read_lock();
386 hlist_for_each_entry_rcu(softif_neigh_vid, node, 387 hlist_for_each_entry_rcu(softif_neigh_vid, node,
@@ -534,7 +535,7 @@ static int interface_set_mac_addr(struct net_device *dev, void *p)
534 /* only modify transtable if it has been initialised before */ 535 /* only modify transtable if it has been initialised before */
535 if (atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE) { 536 if (atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE) {
536 tt_local_remove(bat_priv, dev->dev_addr, 537 tt_local_remove(bat_priv, dev->dev_addr,
537 "mac address changed"); 538 "mac address changed", false);
538 tt_local_add(dev, addr->sa_data); 539 tt_local_add(dev, addr->sa_data);
539 } 540 }
540 541
@@ -553,7 +554,7 @@ static int interface_change_mtu(struct net_device *dev, int new_mtu)
553 return 0; 554 return 0;
554} 555}
555 556
556int interface_tx(struct sk_buff *skb, struct net_device *soft_iface) 557static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
557{ 558{
558 struct ethhdr *ethhdr = (struct ethhdr *)skb->data; 559 struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
559 struct bat_priv *bat_priv = netdev_priv(soft_iface); 560 struct bat_priv *bat_priv = netdev_priv(soft_iface);
@@ -561,6 +562,7 @@ int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
561 struct bcast_packet *bcast_packet; 562 struct bcast_packet *bcast_packet;
562 struct vlan_ethhdr *vhdr; 563 struct vlan_ethhdr *vhdr;
563 struct softif_neigh *curr_softif_neigh = NULL; 564 struct softif_neigh *curr_softif_neigh = NULL;
565 struct orig_node *orig_node = NULL;
564 int data_len = skb->len, ret; 566 int data_len = skb->len, ret;
565 short vid = -1; 567 short vid = -1;
566 bool do_bcast = false; 568 bool do_bcast = false;
@@ -592,11 +594,13 @@ int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
592 if (curr_softif_neigh) 594 if (curr_softif_neigh)
593 goto dropped; 595 goto dropped;
594 596
595 /* TODO: check this for locks */ 597 /* Register the client MAC in the transtable */
596 tt_local_add(soft_iface, ethhdr->h_source); 598 tt_local_add(soft_iface, ethhdr->h_source);
597 599
598 if (is_multicast_ether_addr(ethhdr->h_dest)) { 600 orig_node = transtable_search(bat_priv, ethhdr->h_dest);
599 ret = gw_is_target(bat_priv, skb); 601 if (is_multicast_ether_addr(ethhdr->h_dest) ||
602 (orig_node && orig_node->gw_flags)) {
603 ret = gw_is_target(bat_priv, skb, orig_node);
600 604
601 if (ret < 0) 605 if (ret < 0)
602 goto dropped; 606 goto dropped;
@@ -611,7 +615,7 @@ int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
611 if (!primary_if) 615 if (!primary_if)
612 goto dropped; 616 goto dropped;
613 617
614 if (my_skb_head_push(skb, sizeof(struct bcast_packet)) < 0) 618 if (my_skb_head_push(skb, sizeof(*bcast_packet)) < 0)
615 goto dropped; 619 goto dropped;
616 620
617 bcast_packet = (struct bcast_packet *)skb->data; 621 bcast_packet = (struct bcast_packet *)skb->data;
@@ -630,7 +634,7 @@ int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
630 bcast_packet->seqno = 634 bcast_packet->seqno =
631 htonl(atomic_inc_return(&bat_priv->bcast_seqno)); 635 htonl(atomic_inc_return(&bat_priv->bcast_seqno));
632 636
633 add_bcast_packet_to_list(bat_priv, skb); 637 add_bcast_packet_to_list(bat_priv, skb, 1);
634 638
635 /* a copy is stored in the bcast list, therefore removing 639 /* a copy is stored in the bcast list, therefore removing
636 * the original skb. */ 640 * the original skb. */
@@ -656,6 +660,8 @@ end:
656 softif_neigh_free_ref(curr_softif_neigh); 660 softif_neigh_free_ref(curr_softif_neigh);
657 if (primary_if) 661 if (primary_if)
658 hardif_free_ref(primary_if); 662 hardif_free_ref(primary_if);
663 if (orig_node)
664 orig_node_free_ref(orig_node);
659 return NETDEV_TX_OK; 665 return NETDEV_TX_OK;
660} 666}
661 667
@@ -744,7 +750,6 @@ out:
744 return; 750 return;
745} 751}
746 752
747#ifdef HAVE_NET_DEVICE_OPS
748static const struct net_device_ops bat_netdev_ops = { 753static const struct net_device_ops bat_netdev_ops = {
749 .ndo_open = interface_open, 754 .ndo_open = interface_open,
750 .ndo_stop = interface_release, 755 .ndo_stop = interface_release,
@@ -754,7 +759,6 @@ static const struct net_device_ops bat_netdev_ops = {
754 .ndo_start_xmit = interface_tx, 759 .ndo_start_xmit = interface_tx,
755 .ndo_validate_addr = eth_validate_addr 760 .ndo_validate_addr = eth_validate_addr
756}; 761};
757#endif
758 762
759static void interface_setup(struct net_device *dev) 763static void interface_setup(struct net_device *dev)
760{ 764{
@@ -763,16 +767,7 @@ static void interface_setup(struct net_device *dev)
763 767
764 ether_setup(dev); 768 ether_setup(dev);
765 769
766#ifdef HAVE_NET_DEVICE_OPS
767 dev->netdev_ops = &bat_netdev_ops; 770 dev->netdev_ops = &bat_netdev_ops;
768#else
769 dev->open = interface_open;
770 dev->stop = interface_release;
771 dev->get_stats = interface_stats;
772 dev->set_mac_address = interface_set_mac_addr;
773 dev->change_mtu = interface_change_mtu;
774 dev->hard_start_xmit = interface_tx;
775#endif
776 dev->destructor = free_netdev; 771 dev->destructor = free_netdev;
777 dev->tx_queue_len = 0; 772 dev->tx_queue_len = 0;
778 773
@@ -790,17 +785,16 @@ static void interface_setup(struct net_device *dev)
790 785
791 SET_ETHTOOL_OPS(dev, &bat_ethtool_ops); 786 SET_ETHTOOL_OPS(dev, &bat_ethtool_ops);
792 787
793 memset(priv, 0, sizeof(struct bat_priv)); 788 memset(priv, 0, sizeof(*priv));
794} 789}
795 790
796struct net_device *softif_create(char *name) 791struct net_device *softif_create(const char *name)
797{ 792{
798 struct net_device *soft_iface; 793 struct net_device *soft_iface;
799 struct bat_priv *bat_priv; 794 struct bat_priv *bat_priv;
800 int ret; 795 int ret;
801 796
802 soft_iface = alloc_netdev(sizeof(struct bat_priv) , name, 797 soft_iface = alloc_netdev(sizeof(*bat_priv), name, interface_setup);
803 interface_setup);
804 798
805 if (!soft_iface) { 799 if (!soft_iface) {
806 pr_err("Unable to allocate the batman interface: %s\n", name); 800 pr_err("Unable to allocate the batman interface: %s\n", name);
@@ -831,7 +825,13 @@ struct net_device *softif_create(char *name)
831 825
832 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE); 826 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
833 atomic_set(&bat_priv->bcast_seqno, 1); 827 atomic_set(&bat_priv->bcast_seqno, 1);
834 atomic_set(&bat_priv->tt_local_changed, 0); 828 atomic_set(&bat_priv->ttvn, 0);
829 atomic_set(&bat_priv->tt_local_changes, 0);
830 atomic_set(&bat_priv->tt_ogm_append_cnt, 0);
831
832 bat_priv->tt_buff = NULL;
833 bat_priv->tt_buff_len = 0;
834 bat_priv->tt_poss_change = false;
835 835
836 bat_priv->primary_if = NULL; 836 bat_priv->primary_if = NULL;
837 bat_priv->num_ifaces = 0; 837 bat_priv->num_ifaces = 0;
@@ -872,15 +872,10 @@ void softif_destroy(struct net_device *soft_iface)
872 unregister_netdevice(soft_iface); 872 unregister_netdevice(soft_iface);
873} 873}
874 874
875int softif_is_valid(struct net_device *net_dev) 875int softif_is_valid(const struct net_device *net_dev)
876{ 876{
877#ifdef HAVE_NET_DEVICE_OPS
878 if (net_dev->netdev_ops->ndo_start_xmit == interface_tx) 877 if (net_dev->netdev_ops->ndo_start_xmit == interface_tx)
879 return 1; 878 return 1;
880#else
881 if (net_dev->hard_start_xmit == interface_tx)
882 return 1;
883#endif
884 879
885 return 0; 880 return 0;
886} 881}
@@ -924,4 +919,3 @@ static u32 bat_get_link(struct net_device *dev)
924{ 919{
925 return 1; 920 return 1;
926} 921}
927
diff --git a/net/batman-adv/soft-interface.h b/net/batman-adv/soft-interface.h
index 4789b6f2a0b..001546fc96f 100644
--- a/net/batman-adv/soft-interface.h
+++ b/net/batman-adv/soft-interface.h
@@ -25,12 +25,11 @@
25int my_skb_head_push(struct sk_buff *skb, unsigned int len); 25int my_skb_head_push(struct sk_buff *skb, unsigned int len);
26int softif_neigh_seq_print_text(struct seq_file *seq, void *offset); 26int softif_neigh_seq_print_text(struct seq_file *seq, void *offset);
27void softif_neigh_purge(struct bat_priv *bat_priv); 27void softif_neigh_purge(struct bat_priv *bat_priv);
28int interface_tx(struct sk_buff *skb, struct net_device *soft_iface);
29void interface_rx(struct net_device *soft_iface, 28void interface_rx(struct net_device *soft_iface,
30 struct sk_buff *skb, struct hard_iface *recv_if, 29 struct sk_buff *skb, struct hard_iface *recv_if,
31 int hdr_size); 30 int hdr_size);
32struct net_device *softif_create(char *name); 31struct net_device *softif_create(const char *name);
33void softif_destroy(struct net_device *soft_iface); 32void softif_destroy(struct net_device *soft_iface);
34int softif_is_valid(struct net_device *net_dev); 33int softif_is_valid(const struct net_device *net_dev);
35 34
36#endif /* _NET_BATMAN_ADV_SOFT_INTERFACE_H_ */ 35#endif /* _NET_BATMAN_ADV_SOFT_INTERFACE_H_ */
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index 7b729660cbf..fb6931d00cd 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -23,38 +23,45 @@
23#include "translation-table.h" 23#include "translation-table.h"
24#include "soft-interface.h" 24#include "soft-interface.h"
25#include "hard-interface.h" 25#include "hard-interface.h"
26#include "send.h"
26#include "hash.h" 27#include "hash.h"
27#include "originator.h" 28#include "originator.h"
29#include "routing.h"
28 30
29static void tt_local_purge(struct work_struct *work); 31#include <linux/crc16.h>
30static void _tt_global_del_orig(struct bat_priv *bat_priv, 32
31 struct tt_global_entry *tt_global_entry, 33static void _tt_global_del(struct bat_priv *bat_priv,
32 char *message); 34 struct tt_global_entry *tt_global_entry,
35 const char *message);
36static void tt_purge(struct work_struct *work);
33 37
34/* returns 1 if they are the same mac addr */ 38/* returns 1 if they are the same mac addr */
35static int compare_ltt(struct hlist_node *node, void *data2) 39static int compare_ltt(const struct hlist_node *node, const void *data2)
36{ 40{
37 void *data1 = container_of(node, struct tt_local_entry, hash_entry); 41 const void *data1 = container_of(node, struct tt_local_entry,
42 hash_entry);
38 43
39 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0); 44 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
40} 45}
41 46
42/* returns 1 if they are the same mac addr */ 47/* returns 1 if they are the same mac addr */
43static int compare_gtt(struct hlist_node *node, void *data2) 48static int compare_gtt(const struct hlist_node *node, const void *data2)
44{ 49{
45 void *data1 = container_of(node, struct tt_global_entry, hash_entry); 50 const void *data1 = container_of(node, struct tt_global_entry,
51 hash_entry);
46 52
47 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0); 53 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
48} 54}
49 55
50static void tt_local_start_timer(struct bat_priv *bat_priv) 56static void tt_start_timer(struct bat_priv *bat_priv)
51{ 57{
52 INIT_DELAYED_WORK(&bat_priv->tt_work, tt_local_purge); 58 INIT_DELAYED_WORK(&bat_priv->tt_work, tt_purge);
53 queue_delayed_work(bat_event_workqueue, &bat_priv->tt_work, 10 * HZ); 59 queue_delayed_work(bat_event_workqueue, &bat_priv->tt_work,
60 msecs_to_jiffies(5000));
54} 61}
55 62
56static struct tt_local_entry *tt_local_hash_find(struct bat_priv *bat_priv, 63static struct tt_local_entry *tt_local_hash_find(struct bat_priv *bat_priv,
57 void *data) 64 const void *data)
58{ 65{
59 struct hashtable_t *hash = bat_priv->tt_local_hash; 66 struct hashtable_t *hash = bat_priv->tt_local_hash;
60 struct hlist_head *head; 67 struct hlist_head *head;
@@ -73,6 +80,9 @@ static struct tt_local_entry *tt_local_hash_find(struct bat_priv *bat_priv,
73 if (!compare_eth(tt_local_entry, data)) 80 if (!compare_eth(tt_local_entry, data))
74 continue; 81 continue;
75 82
83 if (!atomic_inc_not_zero(&tt_local_entry->refcount))
84 continue;
85
76 tt_local_entry_tmp = tt_local_entry; 86 tt_local_entry_tmp = tt_local_entry;
77 break; 87 break;
78 } 88 }
@@ -82,7 +92,7 @@ static struct tt_local_entry *tt_local_hash_find(struct bat_priv *bat_priv,
82} 92}
83 93
84static struct tt_global_entry *tt_global_hash_find(struct bat_priv *bat_priv, 94static struct tt_global_entry *tt_global_hash_find(struct bat_priv *bat_priv,
85 void *data) 95 const void *data)
86{ 96{
87 struct hashtable_t *hash = bat_priv->tt_global_hash; 97 struct hashtable_t *hash = bat_priv->tt_global_hash;
88 struct hlist_head *head; 98 struct hlist_head *head;
@@ -102,6 +112,9 @@ static struct tt_global_entry *tt_global_hash_find(struct bat_priv *bat_priv,
102 if (!compare_eth(tt_global_entry, data)) 112 if (!compare_eth(tt_global_entry, data))
103 continue; 113 continue;
104 114
115 if (!atomic_inc_not_zero(&tt_global_entry->refcount))
116 continue;
117
105 tt_global_entry_tmp = tt_global_entry; 118 tt_global_entry_tmp = tt_global_entry;
106 break; 119 break;
107 } 120 }
@@ -110,7 +123,54 @@ static struct tt_global_entry *tt_global_hash_find(struct bat_priv *bat_priv,
110 return tt_global_entry_tmp; 123 return tt_global_entry_tmp;
111} 124}
112 125
113int tt_local_init(struct bat_priv *bat_priv) 126static bool is_out_of_time(unsigned long starting_time, unsigned long timeout)
127{
128 unsigned long deadline;
129 deadline = starting_time + msecs_to_jiffies(timeout);
130
131 return time_after(jiffies, deadline);
132}
133
134static void tt_local_entry_free_ref(struct tt_local_entry *tt_local_entry)
135{
136 if (atomic_dec_and_test(&tt_local_entry->refcount))
137 kfree_rcu(tt_local_entry, rcu);
138}
139
140static void tt_global_entry_free_ref(struct tt_global_entry *tt_global_entry)
141{
142 if (atomic_dec_and_test(&tt_global_entry->refcount))
143 kfree_rcu(tt_global_entry, rcu);
144}
145
146static void tt_local_event(struct bat_priv *bat_priv, const uint8_t *addr,
147 uint8_t flags)
148{
149 struct tt_change_node *tt_change_node;
150
151 tt_change_node = kmalloc(sizeof(*tt_change_node), GFP_ATOMIC);
152
153 if (!tt_change_node)
154 return;
155
156 tt_change_node->change.flags = flags;
157 memcpy(tt_change_node->change.addr, addr, ETH_ALEN);
158
159 spin_lock_bh(&bat_priv->tt_changes_list_lock);
160 /* track the change in the OGMinterval list */
161 list_add_tail(&tt_change_node->list, &bat_priv->tt_changes_list);
162 atomic_inc(&bat_priv->tt_local_changes);
163 spin_unlock_bh(&bat_priv->tt_changes_list_lock);
164
165 atomic_set(&bat_priv->tt_ogm_append_cnt, 0);
166}
167
168int tt_len(int changes_num)
169{
170 return changes_num * sizeof(struct tt_change);
171}
172
173static int tt_local_init(struct bat_priv *bat_priv)
114{ 174{
115 if (bat_priv->tt_local_hash) 175 if (bat_priv->tt_local_hash)
116 return 1; 176 return 1;
@@ -120,116 +180,114 @@ int tt_local_init(struct bat_priv *bat_priv)
120 if (!bat_priv->tt_local_hash) 180 if (!bat_priv->tt_local_hash)
121 return 0; 181 return 0;
122 182
123 atomic_set(&bat_priv->tt_local_changed, 0);
124 tt_local_start_timer(bat_priv);
125
126 return 1; 183 return 1;
127} 184}
128 185
129void tt_local_add(struct net_device *soft_iface, uint8_t *addr) 186void tt_local_add(struct net_device *soft_iface, const uint8_t *addr)
130{ 187{
131 struct bat_priv *bat_priv = netdev_priv(soft_iface); 188 struct bat_priv *bat_priv = netdev_priv(soft_iface);
132 struct tt_local_entry *tt_local_entry; 189 struct tt_local_entry *tt_local_entry = NULL;
133 struct tt_global_entry *tt_global_entry; 190 struct tt_global_entry *tt_global_entry = NULL;
134 int required_bytes;
135 191
136 spin_lock_bh(&bat_priv->tt_lhash_lock);
137 tt_local_entry = tt_local_hash_find(bat_priv, addr); 192 tt_local_entry = tt_local_hash_find(bat_priv, addr);
138 spin_unlock_bh(&bat_priv->tt_lhash_lock);
139 193
140 if (tt_local_entry) { 194 if (tt_local_entry) {
141 tt_local_entry->last_seen = jiffies; 195 tt_local_entry->last_seen = jiffies;
142 return; 196 goto out;
143 }
144
145 /* only announce as many hosts as possible in the batman-packet and
146 space in batman_packet->num_tt That also should give a limit to
147 MAC-flooding. */
148 required_bytes = (bat_priv->num_local_tt + 1) * ETH_ALEN;
149 required_bytes += BAT_PACKET_LEN;
150
151 if ((required_bytes > ETH_DATA_LEN) ||
152 (atomic_read(&bat_priv->aggregated_ogms) &&
153 required_bytes > MAX_AGGREGATION_BYTES) ||
154 (bat_priv->num_local_tt + 1 > 255)) {
155 bat_dbg(DBG_ROUTES, bat_priv,
156 "Can't add new local tt entry (%pM): "
157 "number of local tt entries exceeds packet size\n",
158 addr);
159 return;
160 } 197 }
161 198
162 bat_dbg(DBG_ROUTES, bat_priv, 199 tt_local_entry = kmalloc(sizeof(*tt_local_entry), GFP_ATOMIC);
163 "Creating new local tt entry: %pM\n", addr);
164
165 tt_local_entry = kmalloc(sizeof(struct tt_local_entry), GFP_ATOMIC);
166 if (!tt_local_entry) 200 if (!tt_local_entry)
167 return; 201 goto out;
202
203 bat_dbg(DBG_TT, bat_priv,
204 "Creating new local tt entry: %pM (ttvn: %d)\n", addr,
205 (uint8_t)atomic_read(&bat_priv->ttvn));
168 206
169 memcpy(tt_local_entry->addr, addr, ETH_ALEN); 207 memcpy(tt_local_entry->addr, addr, ETH_ALEN);
170 tt_local_entry->last_seen = jiffies; 208 tt_local_entry->last_seen = jiffies;
209 tt_local_entry->flags = NO_FLAGS;
210 atomic_set(&tt_local_entry->refcount, 2);
171 211
172 /* the batman interface mac address should never be purged */ 212 /* the batman interface mac address should never be purged */
173 if (compare_eth(addr, soft_iface->dev_addr)) 213 if (compare_eth(addr, soft_iface->dev_addr))
174 tt_local_entry->never_purge = 1; 214 tt_local_entry->flags |= TT_CLIENT_NOPURGE;
175 else
176 tt_local_entry->never_purge = 0;
177 215
178 spin_lock_bh(&bat_priv->tt_lhash_lock); 216 tt_local_event(bat_priv, addr, tt_local_entry->flags);
217
218 /* The local entry has to be marked as NEW to avoid to send it in
219 * a full table response going out before the next ttvn increment
220 * (consistency check) */
221 tt_local_entry->flags |= TT_CLIENT_NEW;
179 222
180 hash_add(bat_priv->tt_local_hash, compare_ltt, choose_orig, 223 hash_add(bat_priv->tt_local_hash, compare_ltt, choose_orig,
181 tt_local_entry, &tt_local_entry->hash_entry); 224 tt_local_entry, &tt_local_entry->hash_entry);
182 bat_priv->num_local_tt++;
183 atomic_set(&bat_priv->tt_local_changed, 1);
184
185 spin_unlock_bh(&bat_priv->tt_lhash_lock);
186 225
187 /* remove address from global hash if present */ 226 /* remove address from global hash if present */
188 spin_lock_bh(&bat_priv->tt_ghash_lock);
189
190 tt_global_entry = tt_global_hash_find(bat_priv, addr); 227 tt_global_entry = tt_global_hash_find(bat_priv, addr);
191 228
229 /* Check whether it is a roaming! */
230 if (tt_global_entry) {
231 /* This node is probably going to update its tt table */
232 tt_global_entry->orig_node->tt_poss_change = true;
233 /* The global entry has to be marked as PENDING and has to be
234 * kept for consistency purpose */
235 tt_global_entry->flags |= TT_CLIENT_PENDING;
236 send_roam_adv(bat_priv, tt_global_entry->addr,
237 tt_global_entry->orig_node);
238 }
239out:
240 if (tt_local_entry)
241 tt_local_entry_free_ref(tt_local_entry);
192 if (tt_global_entry) 242 if (tt_global_entry)
193 _tt_global_del_orig(bat_priv, tt_global_entry, 243 tt_global_entry_free_ref(tt_global_entry);
194 "local tt received");
195
196 spin_unlock_bh(&bat_priv->tt_ghash_lock);
197} 244}
198 245
199int tt_local_fill_buffer(struct bat_priv *bat_priv, 246int tt_changes_fill_buffer(struct bat_priv *bat_priv,
200 unsigned char *buff, int buff_len) 247 unsigned char *buff, int buff_len)
201{ 248{
202 struct hashtable_t *hash = bat_priv->tt_local_hash; 249 int count = 0, tot_changes = 0;
203 struct tt_local_entry *tt_local_entry; 250 struct tt_change_node *entry, *safe;
204 struct hlist_node *node;
205 struct hlist_head *head;
206 int i, count = 0;
207 251
208 spin_lock_bh(&bat_priv->tt_lhash_lock); 252 if (buff_len > 0)
253 tot_changes = buff_len / tt_len(1);
209 254
210 for (i = 0; i < hash->size; i++) { 255 spin_lock_bh(&bat_priv->tt_changes_list_lock);
211 head = &hash->table[i]; 256 atomic_set(&bat_priv->tt_local_changes, 0);
212
213 rcu_read_lock();
214 hlist_for_each_entry_rcu(tt_local_entry, node,
215 head, hash_entry) {
216 if (buff_len < (count + 1) * ETH_ALEN)
217 break;
218
219 memcpy(buff + (count * ETH_ALEN), tt_local_entry->addr,
220 ETH_ALEN);
221 257
258 list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list,
259 list) {
260 if (count < tot_changes) {
261 memcpy(buff + tt_len(count),
262 &entry->change, sizeof(struct tt_change));
222 count++; 263 count++;
223 } 264 }
224 rcu_read_unlock(); 265 list_del(&entry->list);
266 kfree(entry);
225 } 267 }
268 spin_unlock_bh(&bat_priv->tt_changes_list_lock);
269
270 /* Keep the buffer for possible tt_request */
271 spin_lock_bh(&bat_priv->tt_buff_lock);
272 kfree(bat_priv->tt_buff);
273 bat_priv->tt_buff_len = 0;
274 bat_priv->tt_buff = NULL;
275 /* We check whether this new OGM has no changes due to size
276 * problems */
277 if (buff_len > 0) {
278 /**
279 * if kmalloc() fails we will reply with the full table
280 * instead of providing the diff
281 */
282 bat_priv->tt_buff = kmalloc(buff_len, GFP_ATOMIC);
283 if (bat_priv->tt_buff) {
284 memcpy(bat_priv->tt_buff, buff, buff_len);
285 bat_priv->tt_buff_len = buff_len;
286 }
287 }
288 spin_unlock_bh(&bat_priv->tt_buff_lock);
226 289
227 /* if we did not get all new local tts see you next time ;-) */ 290 return tot_changes;
228 if (count == bat_priv->num_local_tt)
229 atomic_set(&bat_priv->tt_local_changed, 0);
230
231 spin_unlock_bh(&bat_priv->tt_lhash_lock);
232 return count;
233} 291}
234 292
235int tt_local_seq_print_text(struct seq_file *seq, void *offset) 293int tt_local_seq_print_text(struct seq_file *seq, void *offset)
@@ -261,10 +319,8 @@ int tt_local_seq_print_text(struct seq_file *seq, void *offset)
261 } 319 }
262 320
263 seq_printf(seq, "Locally retrieved addresses (from %s) " 321 seq_printf(seq, "Locally retrieved addresses (from %s) "
264 "announced via TT:\n", 322 "announced via TT (TTVN: %u):\n",
265 net_dev->name); 323 net_dev->name, (uint8_t)atomic_read(&bat_priv->ttvn));
266
267 spin_lock_bh(&bat_priv->tt_lhash_lock);
268 324
269 buf_size = 1; 325 buf_size = 1;
270 /* Estimate length for: " * xx:xx:xx:xx:xx:xx\n" */ 326 /* Estimate length for: " * xx:xx:xx:xx:xx:xx\n" */
@@ -279,7 +335,6 @@ int tt_local_seq_print_text(struct seq_file *seq, void *offset)
279 335
280 buff = kmalloc(buf_size, GFP_ATOMIC); 336 buff = kmalloc(buf_size, GFP_ATOMIC);
281 if (!buff) { 337 if (!buff) {
282 spin_unlock_bh(&bat_priv->tt_lhash_lock);
283 ret = -ENOMEM; 338 ret = -ENOMEM;
284 goto out; 339 goto out;
285 } 340 }
@@ -299,8 +354,6 @@ int tt_local_seq_print_text(struct seq_file *seq, void *offset)
299 rcu_read_unlock(); 354 rcu_read_unlock();
300 } 355 }
301 356
302 spin_unlock_bh(&bat_priv->tt_lhash_lock);
303
304 seq_printf(seq, "%s", buff); 357 seq_printf(seq, "%s", buff);
305 kfree(buff); 358 kfree(buff);
306out: 359out:
@@ -309,92 +362,109 @@ out:
309 return ret; 362 return ret;
310} 363}
311 364
312static void _tt_local_del(struct hlist_node *node, void *arg) 365static void tt_local_set_pending(struct bat_priv *bat_priv,
366 struct tt_local_entry *tt_local_entry,
367 uint16_t flags)
313{ 368{
314 struct bat_priv *bat_priv = (struct bat_priv *)arg; 369 tt_local_event(bat_priv, tt_local_entry->addr,
315 void *data = container_of(node, struct tt_local_entry, hash_entry); 370 tt_local_entry->flags | flags);
316 371
317 kfree(data); 372 /* The local client has to be merked as "pending to be removed" but has
318 bat_priv->num_local_tt--; 373 * to be kept in the table in order to send it in an full tables
319 atomic_set(&bat_priv->tt_local_changed, 1); 374 * response issued before the net ttvn increment (consistency check) */
375 tt_local_entry->flags |= TT_CLIENT_PENDING;
320} 376}
321 377
322static void tt_local_del(struct bat_priv *bat_priv, 378void tt_local_remove(struct bat_priv *bat_priv, const uint8_t *addr,
323 struct tt_local_entry *tt_local_entry, 379 const char *message, bool roaming)
324 char *message)
325{ 380{
326 bat_dbg(DBG_ROUTES, bat_priv, "Deleting local tt entry (%pM): %s\n", 381 struct tt_local_entry *tt_local_entry = NULL;
327 tt_local_entry->addr, message);
328
329 hash_remove(bat_priv->tt_local_hash, compare_ltt, choose_orig,
330 tt_local_entry->addr);
331 _tt_local_del(&tt_local_entry->hash_entry, bat_priv);
332}
333
334void tt_local_remove(struct bat_priv *bat_priv,
335 uint8_t *addr, char *message)
336{
337 struct tt_local_entry *tt_local_entry;
338
339 spin_lock_bh(&bat_priv->tt_lhash_lock);
340 382
341 tt_local_entry = tt_local_hash_find(bat_priv, addr); 383 tt_local_entry = tt_local_hash_find(bat_priv, addr);
384 if (!tt_local_entry)
385 goto out;
342 386
343 if (tt_local_entry) 387 tt_local_set_pending(bat_priv, tt_local_entry, TT_CLIENT_DEL |
344 tt_local_del(bat_priv, tt_local_entry, message); 388 (roaming ? TT_CLIENT_ROAM : NO_FLAGS));
345 389
346 spin_unlock_bh(&bat_priv->tt_lhash_lock); 390 bat_dbg(DBG_TT, bat_priv, "Local tt entry (%pM) pending to be removed: "
391 "%s\n", tt_local_entry->addr, message);
392out:
393 if (tt_local_entry)
394 tt_local_entry_free_ref(tt_local_entry);
347} 395}
348 396
349static void tt_local_purge(struct work_struct *work) 397static void tt_local_purge(struct bat_priv *bat_priv)
350{ 398{
351 struct delayed_work *delayed_work =
352 container_of(work, struct delayed_work, work);
353 struct bat_priv *bat_priv =
354 container_of(delayed_work, struct bat_priv, tt_work);
355 struct hashtable_t *hash = bat_priv->tt_local_hash; 399 struct hashtable_t *hash = bat_priv->tt_local_hash;
356 struct tt_local_entry *tt_local_entry; 400 struct tt_local_entry *tt_local_entry;
357 struct hlist_node *node, *node_tmp; 401 struct hlist_node *node, *node_tmp;
358 struct hlist_head *head; 402 struct hlist_head *head;
359 unsigned long timeout; 403 spinlock_t *list_lock; /* protects write access to the hash lists */
360 int i; 404 int i;
361 405
362 spin_lock_bh(&bat_priv->tt_lhash_lock);
363
364 for (i = 0; i < hash->size; i++) { 406 for (i = 0; i < hash->size; i++) {
365 head = &hash->table[i]; 407 head = &hash->table[i];
408 list_lock = &hash->list_locks[i];
366 409
410 spin_lock_bh(list_lock);
367 hlist_for_each_entry_safe(tt_local_entry, node, node_tmp, 411 hlist_for_each_entry_safe(tt_local_entry, node, node_tmp,
368 head, hash_entry) { 412 head, hash_entry) {
369 if (tt_local_entry->never_purge) 413 if (tt_local_entry->flags & TT_CLIENT_NOPURGE)
370 continue; 414 continue;
371 415
372 timeout = tt_local_entry->last_seen; 416 /* entry already marked for deletion */
373 timeout += TT_LOCAL_TIMEOUT * HZ; 417 if (tt_local_entry->flags & TT_CLIENT_PENDING)
418 continue;
374 419
375 if (time_before(jiffies, timeout)) 420 if (!is_out_of_time(tt_local_entry->last_seen,
421 TT_LOCAL_TIMEOUT * 1000))
376 continue; 422 continue;
377 423
378 tt_local_del(bat_priv, tt_local_entry, 424 tt_local_set_pending(bat_priv, tt_local_entry,
379 "address timed out"); 425 TT_CLIENT_DEL);
426 bat_dbg(DBG_TT, bat_priv, "Local tt entry (%pM) "
427 "pending to be removed: timed out\n",
428 tt_local_entry->addr);
380 } 429 }
430 spin_unlock_bh(list_lock);
381 } 431 }
382 432
383 spin_unlock_bh(&bat_priv->tt_lhash_lock);
384 tt_local_start_timer(bat_priv);
385} 433}
386 434
387void tt_local_free(struct bat_priv *bat_priv) 435static void tt_local_table_free(struct bat_priv *bat_priv)
388{ 436{
437 struct hashtable_t *hash;
438 spinlock_t *list_lock; /* protects write access to the hash lists */
439 struct tt_local_entry *tt_local_entry;
440 struct hlist_node *node, *node_tmp;
441 struct hlist_head *head;
442 int i;
443
389 if (!bat_priv->tt_local_hash) 444 if (!bat_priv->tt_local_hash)
390 return; 445 return;
391 446
392 cancel_delayed_work_sync(&bat_priv->tt_work); 447 hash = bat_priv->tt_local_hash;
393 hash_delete(bat_priv->tt_local_hash, _tt_local_del, bat_priv); 448
449 for (i = 0; i < hash->size; i++) {
450 head = &hash->table[i];
451 list_lock = &hash->list_locks[i];
452
453 spin_lock_bh(list_lock);
454 hlist_for_each_entry_safe(tt_local_entry, node, node_tmp,
455 head, hash_entry) {
456 hlist_del_rcu(node);
457 tt_local_entry_free_ref(tt_local_entry);
458 }
459 spin_unlock_bh(list_lock);
460 }
461
462 hash_destroy(hash);
463
394 bat_priv->tt_local_hash = NULL; 464 bat_priv->tt_local_hash = NULL;
395} 465}
396 466
397int tt_global_init(struct bat_priv *bat_priv) 467static int tt_global_init(struct bat_priv *bat_priv)
398{ 468{
399 if (bat_priv->tt_global_hash) 469 if (bat_priv->tt_global_hash)
400 return 1; 470 return 1;
@@ -407,74 +477,78 @@ int tt_global_init(struct bat_priv *bat_priv)
407 return 1; 477 return 1;
408} 478}
409 479
410void tt_global_add_orig(struct bat_priv *bat_priv, 480static void tt_changes_list_free(struct bat_priv *bat_priv)
411 struct orig_node *orig_node,
412 unsigned char *tt_buff, int tt_buff_len)
413{ 481{
414 struct tt_global_entry *tt_global_entry; 482 struct tt_change_node *entry, *safe;
415 struct tt_local_entry *tt_local_entry;
416 int tt_buff_count = 0;
417 unsigned char *tt_ptr;
418 483
419 while ((tt_buff_count + 1) * ETH_ALEN <= tt_buff_len) { 484 spin_lock_bh(&bat_priv->tt_changes_list_lock);
420 spin_lock_bh(&bat_priv->tt_ghash_lock);
421 485
422 tt_ptr = tt_buff + (tt_buff_count * ETH_ALEN); 486 list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list,
423 tt_global_entry = tt_global_hash_find(bat_priv, tt_ptr); 487 list) {
424 488 list_del(&entry->list);
425 if (!tt_global_entry) { 489 kfree(entry);
426 spin_unlock_bh(&bat_priv->tt_ghash_lock); 490 }
427
428 tt_global_entry =
429 kmalloc(sizeof(struct tt_global_entry),
430 GFP_ATOMIC);
431
432 if (!tt_global_entry)
433 break;
434 491
435 memcpy(tt_global_entry->addr, tt_ptr, ETH_ALEN); 492 atomic_set(&bat_priv->tt_local_changes, 0);
493 spin_unlock_bh(&bat_priv->tt_changes_list_lock);
494}
436 495
437 bat_dbg(DBG_ROUTES, bat_priv, 496/* caller must hold orig_node refcount */
438 "Creating new global tt entry: " 497int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node,
439 "%pM (via %pM)\n", 498 const unsigned char *tt_addr, uint8_t ttvn, bool roaming)
440 tt_global_entry->addr, orig_node->orig); 499{
500 struct tt_global_entry *tt_global_entry;
501 struct orig_node *orig_node_tmp;
502 int ret = 0;
441 503
442 spin_lock_bh(&bat_priv->tt_ghash_lock); 504 tt_global_entry = tt_global_hash_find(bat_priv, tt_addr);
443 hash_add(bat_priv->tt_global_hash, compare_gtt,
444 choose_orig, tt_global_entry,
445 &tt_global_entry->hash_entry);
446 505
447 } 506 if (!tt_global_entry) {
507 tt_global_entry =
508 kmalloc(sizeof(*tt_global_entry),
509 GFP_ATOMIC);
510 if (!tt_global_entry)
511 goto out;
448 512
513 memcpy(tt_global_entry->addr, tt_addr, ETH_ALEN);
514 /* Assign the new orig_node */
515 atomic_inc(&orig_node->refcount);
449 tt_global_entry->orig_node = orig_node; 516 tt_global_entry->orig_node = orig_node;
450 spin_unlock_bh(&bat_priv->tt_ghash_lock); 517 tt_global_entry->ttvn = ttvn;
451 518 tt_global_entry->flags = NO_FLAGS;
452 /* remove address from local hash if present */ 519 tt_global_entry->roam_at = 0;
453 spin_lock_bh(&bat_priv->tt_lhash_lock); 520 atomic_set(&tt_global_entry->refcount, 2);
454 521
455 tt_ptr = tt_buff + (tt_buff_count * ETH_ALEN); 522 hash_add(bat_priv->tt_global_hash, compare_gtt,
456 tt_local_entry = tt_local_hash_find(bat_priv, tt_ptr); 523 choose_orig, tt_global_entry,
457 524 &tt_global_entry->hash_entry);
458 if (tt_local_entry) 525 atomic_inc(&orig_node->tt_size);
459 tt_local_del(bat_priv, tt_local_entry, 526 } else {
460 "global tt received"); 527 if (tt_global_entry->orig_node != orig_node) {
461 528 atomic_dec(&tt_global_entry->orig_node->tt_size);
462 spin_unlock_bh(&bat_priv->tt_lhash_lock); 529 orig_node_tmp = tt_global_entry->orig_node;
463 530 atomic_inc(&orig_node->refcount);
464 tt_buff_count++; 531 tt_global_entry->orig_node = orig_node;
532 orig_node_free_ref(orig_node_tmp);
533 atomic_inc(&orig_node->tt_size);
534 }
535 tt_global_entry->ttvn = ttvn;
536 tt_global_entry->flags = NO_FLAGS;
537 tt_global_entry->roam_at = 0;
465 } 538 }
466 539
467 /* initialize, and overwrite if malloc succeeds */ 540 bat_dbg(DBG_TT, bat_priv,
468 orig_node->tt_buff = NULL; 541 "Creating new global tt entry: %pM (via %pM)\n",
469 orig_node->tt_buff_len = 0; 542 tt_global_entry->addr, orig_node->orig);
470 543
471 if (tt_buff_len > 0) { 544 /* remove address from local hash if present */
472 orig_node->tt_buff = kmalloc(tt_buff_len, GFP_ATOMIC); 545 tt_local_remove(bat_priv, tt_global_entry->addr,
473 if (orig_node->tt_buff) { 546 "global tt received", roaming);
474 memcpy(orig_node->tt_buff, tt_buff, tt_buff_len); 547 ret = 1;
475 orig_node->tt_buff_len = tt_buff_len; 548out:
476 } 549 if (tt_global_entry)
477 } 550 tt_global_entry_free_ref(tt_global_entry);
551 return ret;
478} 552}
479 553
480int tt_global_seq_print_text(struct seq_file *seq, void *offset) 554int tt_global_seq_print_text(struct seq_file *seq, void *offset)
@@ -508,26 +582,27 @@ int tt_global_seq_print_text(struct seq_file *seq, void *offset)
508 seq_printf(seq, 582 seq_printf(seq,
509 "Globally announced TT entries received via the mesh %s\n", 583 "Globally announced TT entries received via the mesh %s\n",
510 net_dev->name); 584 net_dev->name);
511 585 seq_printf(seq, " %-13s %s %-15s %s\n",
512 spin_lock_bh(&bat_priv->tt_ghash_lock); 586 "Client", "(TTVN)", "Originator", "(Curr TTVN)");
513 587
514 buf_size = 1; 588 buf_size = 1;
515 /* Estimate length for: " * xx:xx:xx:xx:xx:xx via xx:xx:xx:xx:xx:xx\n"*/ 589 /* Estimate length for: " * xx:xx:xx:xx:xx:xx (ttvn) via
590 * xx:xx:xx:xx:xx:xx (cur_ttvn)\n"*/
516 for (i = 0; i < hash->size; i++) { 591 for (i = 0; i < hash->size; i++) {
517 head = &hash->table[i]; 592 head = &hash->table[i];
518 593
519 rcu_read_lock(); 594 rcu_read_lock();
520 __hlist_for_each_rcu(node, head) 595 __hlist_for_each_rcu(node, head)
521 buf_size += 43; 596 buf_size += 59;
522 rcu_read_unlock(); 597 rcu_read_unlock();
523 } 598 }
524 599
525 buff = kmalloc(buf_size, GFP_ATOMIC); 600 buff = kmalloc(buf_size, GFP_ATOMIC);
526 if (!buff) { 601 if (!buff) {
527 spin_unlock_bh(&bat_priv->tt_ghash_lock);
528 ret = -ENOMEM; 602 ret = -ENOMEM;
529 goto out; 603 goto out;
530 } 604 }
605
531 buff[0] = '\0'; 606 buff[0] = '\0';
532 pos = 0; 607 pos = 0;
533 608
@@ -537,16 +612,18 @@ int tt_global_seq_print_text(struct seq_file *seq, void *offset)
537 rcu_read_lock(); 612 rcu_read_lock();
538 hlist_for_each_entry_rcu(tt_global_entry, node, 613 hlist_for_each_entry_rcu(tt_global_entry, node,
539 head, hash_entry) { 614 head, hash_entry) {
540 pos += snprintf(buff + pos, 44, 615 pos += snprintf(buff + pos, 61,
541 " * %pM via %pM\n", 616 " * %pM (%3u) via %pM (%3u)\n",
542 tt_global_entry->addr, 617 tt_global_entry->addr,
543 tt_global_entry->orig_node->orig); 618 tt_global_entry->ttvn,
619 tt_global_entry->orig_node->orig,
620 (uint8_t) atomic_read(
621 &tt_global_entry->orig_node->
622 last_ttvn));
544 } 623 }
545 rcu_read_unlock(); 624 rcu_read_unlock();
546 } 625 }
547 626
548 spin_unlock_bh(&bat_priv->tt_ghash_lock);
549
550 seq_printf(seq, "%s", buff); 627 seq_printf(seq, "%s", buff);
551 kfree(buff); 628 kfree(buff);
552out: 629out:
@@ -555,84 +632,1091 @@ out:
555 return ret; 632 return ret;
556} 633}
557 634
558static void _tt_global_del_orig(struct bat_priv *bat_priv, 635static void _tt_global_del(struct bat_priv *bat_priv,
559 struct tt_global_entry *tt_global_entry, 636 struct tt_global_entry *tt_global_entry,
560 char *message) 637 const char *message)
561{ 638{
562 bat_dbg(DBG_ROUTES, bat_priv, 639 if (!tt_global_entry)
640 goto out;
641
642 bat_dbg(DBG_TT, bat_priv,
563 "Deleting global tt entry %pM (via %pM): %s\n", 643 "Deleting global tt entry %pM (via %pM): %s\n",
564 tt_global_entry->addr, tt_global_entry->orig_node->orig, 644 tt_global_entry->addr, tt_global_entry->orig_node->orig,
565 message); 645 message);
566 646
647 atomic_dec(&tt_global_entry->orig_node->tt_size);
648
567 hash_remove(bat_priv->tt_global_hash, compare_gtt, choose_orig, 649 hash_remove(bat_priv->tt_global_hash, compare_gtt, choose_orig,
568 tt_global_entry->addr); 650 tt_global_entry->addr);
569 kfree(tt_global_entry); 651out:
652 if (tt_global_entry)
653 tt_global_entry_free_ref(tt_global_entry);
570} 654}
571 655
572void tt_global_del_orig(struct bat_priv *bat_priv, 656void tt_global_del(struct bat_priv *bat_priv,
573 struct orig_node *orig_node, char *message) 657 struct orig_node *orig_node, const unsigned char *addr,
658 const char *message, bool roaming)
574{ 659{
575 struct tt_global_entry *tt_global_entry; 660 struct tt_global_entry *tt_global_entry = NULL;
576 int tt_buff_count = 0;
577 unsigned char *tt_ptr;
578 661
579 if (orig_node->tt_buff_len == 0) 662 tt_global_entry = tt_global_hash_find(bat_priv, addr);
580 return; 663 if (!tt_global_entry)
664 goto out;
581 665
582 spin_lock_bh(&bat_priv->tt_ghash_lock); 666 if (tt_global_entry->orig_node == orig_node) {
667 if (roaming) {
668 tt_global_entry->flags |= TT_CLIENT_ROAM;
669 tt_global_entry->roam_at = jiffies;
670 goto out;
671 }
672 _tt_global_del(bat_priv, tt_global_entry, message);
673 }
674out:
675 if (tt_global_entry)
676 tt_global_entry_free_ref(tt_global_entry);
677}
583 678
584 while ((tt_buff_count + 1) * ETH_ALEN <= orig_node->tt_buff_len) { 679void tt_global_del_orig(struct bat_priv *bat_priv,
585 tt_ptr = orig_node->tt_buff + (tt_buff_count * ETH_ALEN); 680 struct orig_node *orig_node, const char *message)
586 tt_global_entry = tt_global_hash_find(bat_priv, tt_ptr); 681{
682 struct tt_global_entry *tt_global_entry;
683 int i;
684 struct hashtable_t *hash = bat_priv->tt_global_hash;
685 struct hlist_node *node, *safe;
686 struct hlist_head *head;
687 spinlock_t *list_lock; /* protects write access to the hash lists */
587 688
588 if ((tt_global_entry) && 689 for (i = 0; i < hash->size; i++) {
589 (tt_global_entry->orig_node == orig_node)) 690 head = &hash->table[i];
590 _tt_global_del_orig(bat_priv, tt_global_entry, 691 list_lock = &hash->list_locks[i];
591 message);
592 692
593 tt_buff_count++; 693 spin_lock_bh(list_lock);
694 hlist_for_each_entry_safe(tt_global_entry, node, safe,
695 head, hash_entry) {
696 if (tt_global_entry->orig_node == orig_node) {
697 bat_dbg(DBG_TT, bat_priv,
698 "Deleting global tt entry %pM "
699 "(via %pM): originator time out\n",
700 tt_global_entry->addr,
701 tt_global_entry->orig_node->orig);
702 hlist_del_rcu(node);
703 tt_global_entry_free_ref(tt_global_entry);
704 }
705 }
706 spin_unlock_bh(list_lock);
594 } 707 }
595 708 atomic_set(&orig_node->tt_size, 0);
596 spin_unlock_bh(&bat_priv->tt_ghash_lock);
597
598 orig_node->tt_buff_len = 0;
599 kfree(orig_node->tt_buff);
600 orig_node->tt_buff = NULL;
601} 709}
602 710
603static void tt_global_del(struct hlist_node *node, void *arg) 711static void tt_global_roam_purge(struct bat_priv *bat_priv)
604{ 712{
605 void *data = container_of(node, struct tt_global_entry, hash_entry); 713 struct hashtable_t *hash = bat_priv->tt_global_hash;
714 struct tt_global_entry *tt_global_entry;
715 struct hlist_node *node, *node_tmp;
716 struct hlist_head *head;
717 spinlock_t *list_lock; /* protects write access to the hash lists */
718 int i;
719
720 for (i = 0; i < hash->size; i++) {
721 head = &hash->table[i];
722 list_lock = &hash->list_locks[i];
723
724 spin_lock_bh(list_lock);
725 hlist_for_each_entry_safe(tt_global_entry, node, node_tmp,
726 head, hash_entry) {
727 if (!(tt_global_entry->flags & TT_CLIENT_ROAM))
728 continue;
729 if (!is_out_of_time(tt_global_entry->roam_at,
730 TT_CLIENT_ROAM_TIMEOUT * 1000))
731 continue;
732
733 bat_dbg(DBG_TT, bat_priv, "Deleting global "
734 "tt entry (%pM): Roaming timeout\n",
735 tt_global_entry->addr);
736 atomic_dec(&tt_global_entry->orig_node->tt_size);
737 hlist_del_rcu(node);
738 tt_global_entry_free_ref(tt_global_entry);
739 }
740 spin_unlock_bh(list_lock);
741 }
606 742
607 kfree(data);
608} 743}
609 744
610void tt_global_free(struct bat_priv *bat_priv) 745static void tt_global_table_free(struct bat_priv *bat_priv)
611{ 746{
747 struct hashtable_t *hash;
748 spinlock_t *list_lock; /* protects write access to the hash lists */
749 struct tt_global_entry *tt_global_entry;
750 struct hlist_node *node, *node_tmp;
751 struct hlist_head *head;
752 int i;
753
612 if (!bat_priv->tt_global_hash) 754 if (!bat_priv->tt_global_hash)
613 return; 755 return;
614 756
615 hash_delete(bat_priv->tt_global_hash, tt_global_del, NULL); 757 hash = bat_priv->tt_global_hash;
758
759 for (i = 0; i < hash->size; i++) {
760 head = &hash->table[i];
761 list_lock = &hash->list_locks[i];
762
763 spin_lock_bh(list_lock);
764 hlist_for_each_entry_safe(tt_global_entry, node, node_tmp,
765 head, hash_entry) {
766 hlist_del_rcu(node);
767 tt_global_entry_free_ref(tt_global_entry);
768 }
769 spin_unlock_bh(list_lock);
770 }
771
772 hash_destroy(hash);
773
616 bat_priv->tt_global_hash = NULL; 774 bat_priv->tt_global_hash = NULL;
617} 775}
618 776
619struct orig_node *transtable_search(struct bat_priv *bat_priv, uint8_t *addr) 777struct orig_node *transtable_search(struct bat_priv *bat_priv,
778 const uint8_t *addr)
620{ 779{
621 struct tt_global_entry *tt_global_entry; 780 struct tt_global_entry *tt_global_entry;
622 struct orig_node *orig_node = NULL; 781 struct orig_node *orig_node = NULL;
623 782
624 spin_lock_bh(&bat_priv->tt_ghash_lock);
625 tt_global_entry = tt_global_hash_find(bat_priv, addr); 783 tt_global_entry = tt_global_hash_find(bat_priv, addr);
626 784
627 if (!tt_global_entry) 785 if (!tt_global_entry)
628 goto out; 786 goto out;
629 787
630 if (!atomic_inc_not_zero(&tt_global_entry->orig_node->refcount)) 788 if (!atomic_inc_not_zero(&tt_global_entry->orig_node->refcount))
631 goto out; 789 goto free_tt;
790
791 /* A global client marked as PENDING has already moved from that
792 * originator */
793 if (tt_global_entry->flags & TT_CLIENT_PENDING)
794 goto free_tt;
632 795
633 orig_node = tt_global_entry->orig_node; 796 orig_node = tt_global_entry->orig_node;
634 797
798free_tt:
799 tt_global_entry_free_ref(tt_global_entry);
635out: 800out:
636 spin_unlock_bh(&bat_priv->tt_ghash_lock);
637 return orig_node; 801 return orig_node;
638} 802}
803
804/* Calculates the checksum of the local table of a given orig_node */
805uint16_t tt_global_crc(struct bat_priv *bat_priv, struct orig_node *orig_node)
806{
807 uint16_t total = 0, total_one;
808 struct hashtable_t *hash = bat_priv->tt_global_hash;
809 struct tt_global_entry *tt_global_entry;
810 struct hlist_node *node;
811 struct hlist_head *head;
812 int i, j;
813
814 for (i = 0; i < hash->size; i++) {
815 head = &hash->table[i];
816
817 rcu_read_lock();
818 hlist_for_each_entry_rcu(tt_global_entry, node,
819 head, hash_entry) {
820 if (compare_eth(tt_global_entry->orig_node,
821 orig_node)) {
822 /* Roaming clients are in the global table for
823 * consistency only. They don't have to be
824 * taken into account while computing the
825 * global crc */
826 if (tt_global_entry->flags & TT_CLIENT_ROAM)
827 continue;
828 total_one = 0;
829 for (j = 0; j < ETH_ALEN; j++)
830 total_one = crc16_byte(total_one,
831 tt_global_entry->addr[j]);
832 total ^= total_one;
833 }
834 }
835 rcu_read_unlock();
836 }
837
838 return total;
839}
840
841/* Calculates the checksum of the local table */
842uint16_t tt_local_crc(struct bat_priv *bat_priv)
843{
844 uint16_t total = 0, total_one;
845 struct hashtable_t *hash = bat_priv->tt_local_hash;
846 struct tt_local_entry *tt_local_entry;
847 struct hlist_node *node;
848 struct hlist_head *head;
849 int i, j;
850
851 for (i = 0; i < hash->size; i++) {
852 head = &hash->table[i];
853
854 rcu_read_lock();
855 hlist_for_each_entry_rcu(tt_local_entry, node,
856 head, hash_entry) {
857 /* not yet committed clients have not to be taken into
858 * account while computing the CRC */
859 if (tt_local_entry->flags & TT_CLIENT_NEW)
860 continue;
861 total_one = 0;
862 for (j = 0; j < ETH_ALEN; j++)
863 total_one = crc16_byte(total_one,
864 tt_local_entry->addr[j]);
865 total ^= total_one;
866 }
867 rcu_read_unlock();
868 }
869
870 return total;
871}
872
873static void tt_req_list_free(struct bat_priv *bat_priv)
874{
875 struct tt_req_node *node, *safe;
876
877 spin_lock_bh(&bat_priv->tt_req_list_lock);
878
879 list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) {
880 list_del(&node->list);
881 kfree(node);
882 }
883
884 spin_unlock_bh(&bat_priv->tt_req_list_lock);
885}
886
887void tt_save_orig_buffer(struct bat_priv *bat_priv, struct orig_node *orig_node,
888 const unsigned char *tt_buff, uint8_t tt_num_changes)
889{
890 uint16_t tt_buff_len = tt_len(tt_num_changes);
891
892 /* Replace the old buffer only if I received something in the
893 * last OGM (the OGM could carry no changes) */
894 spin_lock_bh(&orig_node->tt_buff_lock);
895 if (tt_buff_len > 0) {
896 kfree(orig_node->tt_buff);
897 orig_node->tt_buff_len = 0;
898 orig_node->tt_buff = kmalloc(tt_buff_len, GFP_ATOMIC);
899 if (orig_node->tt_buff) {
900 memcpy(orig_node->tt_buff, tt_buff, tt_buff_len);
901 orig_node->tt_buff_len = tt_buff_len;
902 }
903 }
904 spin_unlock_bh(&orig_node->tt_buff_lock);
905}
906
907static void tt_req_purge(struct bat_priv *bat_priv)
908{
909 struct tt_req_node *node, *safe;
910
911 spin_lock_bh(&bat_priv->tt_req_list_lock);
912 list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) {
913 if (is_out_of_time(node->issued_at,
914 TT_REQUEST_TIMEOUT * 1000)) {
915 list_del(&node->list);
916 kfree(node);
917 }
918 }
919 spin_unlock_bh(&bat_priv->tt_req_list_lock);
920}
921
922/* returns the pointer to the new tt_req_node struct if no request
923 * has already been issued for this orig_node, NULL otherwise */
924static struct tt_req_node *new_tt_req_node(struct bat_priv *bat_priv,
925 struct orig_node *orig_node)
926{
927 struct tt_req_node *tt_req_node_tmp, *tt_req_node = NULL;
928
929 spin_lock_bh(&bat_priv->tt_req_list_lock);
930 list_for_each_entry(tt_req_node_tmp, &bat_priv->tt_req_list, list) {
931 if (compare_eth(tt_req_node_tmp, orig_node) &&
932 !is_out_of_time(tt_req_node_tmp->issued_at,
933 TT_REQUEST_TIMEOUT * 1000))
934 goto unlock;
935 }
936
937 tt_req_node = kmalloc(sizeof(*tt_req_node), GFP_ATOMIC);
938 if (!tt_req_node)
939 goto unlock;
940
941 memcpy(tt_req_node->addr, orig_node->orig, ETH_ALEN);
942 tt_req_node->issued_at = jiffies;
943
944 list_add(&tt_req_node->list, &bat_priv->tt_req_list);
945unlock:
946 spin_unlock_bh(&bat_priv->tt_req_list_lock);
947 return tt_req_node;
948}
949
950/* data_ptr is useless here, but has to be kept to respect the prototype */
951static int tt_local_valid_entry(const void *entry_ptr, const void *data_ptr)
952{
953 const struct tt_local_entry *tt_local_entry = entry_ptr;
954
955 if (tt_local_entry->flags & TT_CLIENT_NEW)
956 return 0;
957 return 1;
958}
959
960static int tt_global_valid_entry(const void *entry_ptr, const void *data_ptr)
961{
962 const struct tt_global_entry *tt_global_entry = entry_ptr;
963 const struct orig_node *orig_node = data_ptr;
964
965 if (tt_global_entry->flags & TT_CLIENT_ROAM)
966 return 0;
967
968 return (tt_global_entry->orig_node == orig_node);
969}
970
971static struct sk_buff *tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
972 struct hashtable_t *hash,
973 struct hard_iface *primary_if,
974 int (*valid_cb)(const void *,
975 const void *),
976 void *cb_data)
977{
978 struct tt_local_entry *tt_local_entry;
979 struct tt_query_packet *tt_response;
980 struct tt_change *tt_change;
981 struct hlist_node *node;
982 struct hlist_head *head;
983 struct sk_buff *skb = NULL;
984 uint16_t tt_tot, tt_count;
985 ssize_t tt_query_size = sizeof(struct tt_query_packet);
986 int i;
987
988 if (tt_query_size + tt_len > primary_if->soft_iface->mtu) {
989 tt_len = primary_if->soft_iface->mtu - tt_query_size;
990 tt_len -= tt_len % sizeof(struct tt_change);
991 }
992 tt_tot = tt_len / sizeof(struct tt_change);
993
994 skb = dev_alloc_skb(tt_query_size + tt_len + ETH_HLEN);
995 if (!skb)
996 goto out;
997
998 skb_reserve(skb, ETH_HLEN);
999 tt_response = (struct tt_query_packet *)skb_put(skb,
1000 tt_query_size + tt_len);
1001 tt_response->ttvn = ttvn;
1002 tt_response->tt_data = htons(tt_tot);
1003
1004 tt_change = (struct tt_change *)(skb->data + tt_query_size);
1005 tt_count = 0;
1006
1007 rcu_read_lock();
1008 for (i = 0; i < hash->size; i++) {
1009 head = &hash->table[i];
1010
1011 hlist_for_each_entry_rcu(tt_local_entry, node,
1012 head, hash_entry) {
1013 if (tt_count == tt_tot)
1014 break;
1015
1016 if ((valid_cb) && (!valid_cb(tt_local_entry, cb_data)))
1017 continue;
1018
1019 memcpy(tt_change->addr, tt_local_entry->addr, ETH_ALEN);
1020 tt_change->flags = NO_FLAGS;
1021
1022 tt_count++;
1023 tt_change++;
1024 }
1025 }
1026 rcu_read_unlock();
1027
1028out:
1029 return skb;
1030}
1031
1032int send_tt_request(struct bat_priv *bat_priv, struct orig_node *dst_orig_node,
1033 uint8_t ttvn, uint16_t tt_crc, bool full_table)
1034{
1035 struct sk_buff *skb = NULL;
1036 struct tt_query_packet *tt_request;
1037 struct neigh_node *neigh_node = NULL;
1038 struct hard_iface *primary_if;
1039 struct tt_req_node *tt_req_node = NULL;
1040 int ret = 1;
1041
1042 primary_if = primary_if_get_selected(bat_priv);
1043 if (!primary_if)
1044 goto out;
1045
1046 /* The new tt_req will be issued only if I'm not waiting for a
1047 * reply from the same orig_node yet */
1048 tt_req_node = new_tt_req_node(bat_priv, dst_orig_node);
1049 if (!tt_req_node)
1050 goto out;
1051
1052 skb = dev_alloc_skb(sizeof(struct tt_query_packet) + ETH_HLEN);
1053 if (!skb)
1054 goto out;
1055
1056 skb_reserve(skb, ETH_HLEN);
1057
1058 tt_request = (struct tt_query_packet *)skb_put(skb,
1059 sizeof(struct tt_query_packet));
1060
1061 tt_request->packet_type = BAT_TT_QUERY;
1062 tt_request->version = COMPAT_VERSION;
1063 memcpy(tt_request->src, primary_if->net_dev->dev_addr, ETH_ALEN);
1064 memcpy(tt_request->dst, dst_orig_node->orig, ETH_ALEN);
1065 tt_request->ttl = TTL;
1066 tt_request->ttvn = ttvn;
1067 tt_request->tt_data = tt_crc;
1068 tt_request->flags = TT_REQUEST;
1069
1070 if (full_table)
1071 tt_request->flags |= TT_FULL_TABLE;
1072
1073 neigh_node = orig_node_get_router(dst_orig_node);
1074 if (!neigh_node)
1075 goto out;
1076
1077 bat_dbg(DBG_TT, bat_priv, "Sending TT_REQUEST to %pM via %pM "
1078 "[%c]\n", dst_orig_node->orig, neigh_node->addr,
1079 (full_table ? 'F' : '.'));
1080
1081 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
1082 ret = 0;
1083
1084out:
1085 if (neigh_node)
1086 neigh_node_free_ref(neigh_node);
1087 if (primary_if)
1088 hardif_free_ref(primary_if);
1089 if (ret)
1090 kfree_skb(skb);
1091 if (ret && tt_req_node) {
1092 spin_lock_bh(&bat_priv->tt_req_list_lock);
1093 list_del(&tt_req_node->list);
1094 spin_unlock_bh(&bat_priv->tt_req_list_lock);
1095 kfree(tt_req_node);
1096 }
1097 return ret;
1098}
1099
1100static bool send_other_tt_response(struct bat_priv *bat_priv,
1101 struct tt_query_packet *tt_request)
1102{
1103 struct orig_node *req_dst_orig_node = NULL, *res_dst_orig_node = NULL;
1104 struct neigh_node *neigh_node = NULL;
1105 struct hard_iface *primary_if = NULL;
1106 uint8_t orig_ttvn, req_ttvn, ttvn;
1107 int ret = false;
1108 unsigned char *tt_buff;
1109 bool full_table;
1110 uint16_t tt_len, tt_tot;
1111 struct sk_buff *skb = NULL;
1112 struct tt_query_packet *tt_response;
1113
1114 bat_dbg(DBG_TT, bat_priv,
1115 "Received TT_REQUEST from %pM for "
1116 "ttvn: %u (%pM) [%c]\n", tt_request->src,
1117 tt_request->ttvn, tt_request->dst,
1118 (tt_request->flags & TT_FULL_TABLE ? 'F' : '.'));
1119
1120 /* Let's get the orig node of the REAL destination */
1121 req_dst_orig_node = get_orig_node(bat_priv, tt_request->dst);
1122 if (!req_dst_orig_node)
1123 goto out;
1124
1125 res_dst_orig_node = get_orig_node(bat_priv, tt_request->src);
1126 if (!res_dst_orig_node)
1127 goto out;
1128
1129 neigh_node = orig_node_get_router(res_dst_orig_node);
1130 if (!neigh_node)
1131 goto out;
1132
1133 primary_if = primary_if_get_selected(bat_priv);
1134 if (!primary_if)
1135 goto out;
1136
1137 orig_ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn);
1138 req_ttvn = tt_request->ttvn;
1139
1140 /* I have not the requested data */
1141 if (orig_ttvn != req_ttvn ||
1142 tt_request->tt_data != req_dst_orig_node->tt_crc)
1143 goto out;
1144
1145 /* If it has explicitly been requested the full table */
1146 if (tt_request->flags & TT_FULL_TABLE ||
1147 !req_dst_orig_node->tt_buff)
1148 full_table = true;
1149 else
1150 full_table = false;
1151
1152 /* In this version, fragmentation is not implemented, then
1153 * I'll send only one packet with as much TT entries as I can */
1154 if (!full_table) {
1155 spin_lock_bh(&req_dst_orig_node->tt_buff_lock);
1156 tt_len = req_dst_orig_node->tt_buff_len;
1157 tt_tot = tt_len / sizeof(struct tt_change);
1158
1159 skb = dev_alloc_skb(sizeof(struct tt_query_packet) +
1160 tt_len + ETH_HLEN);
1161 if (!skb)
1162 goto unlock;
1163
1164 skb_reserve(skb, ETH_HLEN);
1165 tt_response = (struct tt_query_packet *)skb_put(skb,
1166 sizeof(struct tt_query_packet) + tt_len);
1167 tt_response->ttvn = req_ttvn;
1168 tt_response->tt_data = htons(tt_tot);
1169
1170 tt_buff = skb->data + sizeof(struct tt_query_packet);
1171 /* Copy the last orig_node's OGM buffer */
1172 memcpy(tt_buff, req_dst_orig_node->tt_buff,
1173 req_dst_orig_node->tt_buff_len);
1174
1175 spin_unlock_bh(&req_dst_orig_node->tt_buff_lock);
1176 } else {
1177 tt_len = (uint16_t)atomic_read(&req_dst_orig_node->tt_size) *
1178 sizeof(struct tt_change);
1179 ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn);
1180
1181 skb = tt_response_fill_table(tt_len, ttvn,
1182 bat_priv->tt_global_hash,
1183 primary_if, tt_global_valid_entry,
1184 req_dst_orig_node);
1185 if (!skb)
1186 goto out;
1187
1188 tt_response = (struct tt_query_packet *)skb->data;
1189 }
1190
1191 tt_response->packet_type = BAT_TT_QUERY;
1192 tt_response->version = COMPAT_VERSION;
1193 tt_response->ttl = TTL;
1194 memcpy(tt_response->src, req_dst_orig_node->orig, ETH_ALEN);
1195 memcpy(tt_response->dst, tt_request->src, ETH_ALEN);
1196 tt_response->flags = TT_RESPONSE;
1197
1198 if (full_table)
1199 tt_response->flags |= TT_FULL_TABLE;
1200
1201 bat_dbg(DBG_TT, bat_priv,
1202 "Sending TT_RESPONSE %pM via %pM for %pM (ttvn: %u)\n",
1203 res_dst_orig_node->orig, neigh_node->addr,
1204 req_dst_orig_node->orig, req_ttvn);
1205
1206 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
1207 ret = true;
1208 goto out;
1209
1210unlock:
1211 spin_unlock_bh(&req_dst_orig_node->tt_buff_lock);
1212
1213out:
1214 if (res_dst_orig_node)
1215 orig_node_free_ref(res_dst_orig_node);
1216 if (req_dst_orig_node)
1217 orig_node_free_ref(req_dst_orig_node);
1218 if (neigh_node)
1219 neigh_node_free_ref(neigh_node);
1220 if (primary_if)
1221 hardif_free_ref(primary_if);
1222 if (!ret)
1223 kfree_skb(skb);
1224 return ret;
1225
1226}
1227static bool send_my_tt_response(struct bat_priv *bat_priv,
1228 struct tt_query_packet *tt_request)
1229{
1230 struct orig_node *orig_node = NULL;
1231 struct neigh_node *neigh_node = NULL;
1232 struct hard_iface *primary_if = NULL;
1233 uint8_t my_ttvn, req_ttvn, ttvn;
1234 int ret = false;
1235 unsigned char *tt_buff;
1236 bool full_table;
1237 uint16_t tt_len, tt_tot;
1238 struct sk_buff *skb = NULL;
1239 struct tt_query_packet *tt_response;
1240
1241 bat_dbg(DBG_TT, bat_priv,
1242 "Received TT_REQUEST from %pM for "
1243 "ttvn: %u (me) [%c]\n", tt_request->src,
1244 tt_request->ttvn,
1245 (tt_request->flags & TT_FULL_TABLE ? 'F' : '.'));
1246
1247
1248 my_ttvn = (uint8_t)atomic_read(&bat_priv->ttvn);
1249 req_ttvn = tt_request->ttvn;
1250
1251 orig_node = get_orig_node(bat_priv, tt_request->src);
1252 if (!orig_node)
1253 goto out;
1254
1255 neigh_node = orig_node_get_router(orig_node);
1256 if (!neigh_node)
1257 goto out;
1258
1259 primary_if = primary_if_get_selected(bat_priv);
1260 if (!primary_if)
1261 goto out;
1262
1263 /* If the full table has been explicitly requested or the gap
1264 * is too big send the whole local translation table */
1265 if (tt_request->flags & TT_FULL_TABLE || my_ttvn != req_ttvn ||
1266 !bat_priv->tt_buff)
1267 full_table = true;
1268 else
1269 full_table = false;
1270
1271 /* In this version, fragmentation is not implemented, then
1272 * I'll send only one packet with as much TT entries as I can */
1273 if (!full_table) {
1274 spin_lock_bh(&bat_priv->tt_buff_lock);
1275 tt_len = bat_priv->tt_buff_len;
1276 tt_tot = tt_len / sizeof(struct tt_change);
1277
1278 skb = dev_alloc_skb(sizeof(struct tt_query_packet) +
1279 tt_len + ETH_HLEN);
1280 if (!skb)
1281 goto unlock;
1282
1283 skb_reserve(skb, ETH_HLEN);
1284 tt_response = (struct tt_query_packet *)skb_put(skb,
1285 sizeof(struct tt_query_packet) + tt_len);
1286 tt_response->ttvn = req_ttvn;
1287 tt_response->tt_data = htons(tt_tot);
1288
1289 tt_buff = skb->data + sizeof(struct tt_query_packet);
1290 memcpy(tt_buff, bat_priv->tt_buff,
1291 bat_priv->tt_buff_len);
1292 spin_unlock_bh(&bat_priv->tt_buff_lock);
1293 } else {
1294 tt_len = (uint16_t)atomic_read(&bat_priv->num_local_tt) *
1295 sizeof(struct tt_change);
1296 ttvn = (uint8_t)atomic_read(&bat_priv->ttvn);
1297
1298 skb = tt_response_fill_table(tt_len, ttvn,
1299 bat_priv->tt_local_hash,
1300 primary_if, tt_local_valid_entry,
1301 NULL);
1302 if (!skb)
1303 goto out;
1304
1305 tt_response = (struct tt_query_packet *)skb->data;
1306 }
1307
1308 tt_response->packet_type = BAT_TT_QUERY;
1309 tt_response->version = COMPAT_VERSION;
1310 tt_response->ttl = TTL;
1311 memcpy(tt_response->src, primary_if->net_dev->dev_addr, ETH_ALEN);
1312 memcpy(tt_response->dst, tt_request->src, ETH_ALEN);
1313 tt_response->flags = TT_RESPONSE;
1314
1315 if (full_table)
1316 tt_response->flags |= TT_FULL_TABLE;
1317
1318 bat_dbg(DBG_TT, bat_priv,
1319 "Sending TT_RESPONSE to %pM via %pM [%c]\n",
1320 orig_node->orig, neigh_node->addr,
1321 (tt_response->flags & TT_FULL_TABLE ? 'F' : '.'));
1322
1323 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
1324 ret = true;
1325 goto out;
1326
1327unlock:
1328 spin_unlock_bh(&bat_priv->tt_buff_lock);
1329out:
1330 if (orig_node)
1331 orig_node_free_ref(orig_node);
1332 if (neigh_node)
1333 neigh_node_free_ref(neigh_node);
1334 if (primary_if)
1335 hardif_free_ref(primary_if);
1336 if (!ret)
1337 kfree_skb(skb);
1338 /* This packet was for me, so it doesn't need to be re-routed */
1339 return true;
1340}
1341
1342bool send_tt_response(struct bat_priv *bat_priv,
1343 struct tt_query_packet *tt_request)
1344{
1345 if (is_my_mac(tt_request->dst))
1346 return send_my_tt_response(bat_priv, tt_request);
1347 else
1348 return send_other_tt_response(bat_priv, tt_request);
1349}
1350
1351static void _tt_update_changes(struct bat_priv *bat_priv,
1352 struct orig_node *orig_node,
1353 struct tt_change *tt_change,
1354 uint16_t tt_num_changes, uint8_t ttvn)
1355{
1356 int i;
1357
1358 for (i = 0; i < tt_num_changes; i++) {
1359 if ((tt_change + i)->flags & TT_CLIENT_DEL)
1360 tt_global_del(bat_priv, orig_node,
1361 (tt_change + i)->addr,
1362 "tt removed by changes",
1363 (tt_change + i)->flags & TT_CLIENT_ROAM);
1364 else
1365 if (!tt_global_add(bat_priv, orig_node,
1366 (tt_change + i)->addr, ttvn, false))
1367 /* In case of problem while storing a
1368 * global_entry, we stop the updating
1369 * procedure without committing the
1370 * ttvn change. This will avoid to send
1371 * corrupted data on tt_request
1372 */
1373 return;
1374 }
1375}
1376
1377static void tt_fill_gtable(struct bat_priv *bat_priv,
1378 struct tt_query_packet *tt_response)
1379{
1380 struct orig_node *orig_node = NULL;
1381
1382 orig_node = orig_hash_find(bat_priv, tt_response->src);
1383 if (!orig_node)
1384 goto out;
1385
1386 /* Purge the old table first.. */
1387 tt_global_del_orig(bat_priv, orig_node, "Received full table");
1388
1389 _tt_update_changes(bat_priv, orig_node,
1390 (struct tt_change *)(tt_response + 1),
1391 tt_response->tt_data, tt_response->ttvn);
1392
1393 spin_lock_bh(&orig_node->tt_buff_lock);
1394 kfree(orig_node->tt_buff);
1395 orig_node->tt_buff_len = 0;
1396 orig_node->tt_buff = NULL;
1397 spin_unlock_bh(&orig_node->tt_buff_lock);
1398
1399 atomic_set(&orig_node->last_ttvn, tt_response->ttvn);
1400
1401out:
1402 if (orig_node)
1403 orig_node_free_ref(orig_node);
1404}
1405
1406void tt_update_changes(struct bat_priv *bat_priv, struct orig_node *orig_node,
1407 uint16_t tt_num_changes, uint8_t ttvn,
1408 struct tt_change *tt_change)
1409{
1410 _tt_update_changes(bat_priv, orig_node, tt_change, tt_num_changes,
1411 ttvn);
1412
1413 tt_save_orig_buffer(bat_priv, orig_node, (unsigned char *)tt_change,
1414 tt_num_changes);
1415 atomic_set(&orig_node->last_ttvn, ttvn);
1416}
1417
1418bool is_my_client(struct bat_priv *bat_priv, const uint8_t *addr)
1419{
1420 struct tt_local_entry *tt_local_entry = NULL;
1421 bool ret = false;
1422
1423 tt_local_entry = tt_local_hash_find(bat_priv, addr);
1424 if (!tt_local_entry)
1425 goto out;
1426 /* Check if the client has been logically deleted (but is kept for
1427 * consistency purpose) */
1428 if (tt_local_entry->flags & TT_CLIENT_PENDING)
1429 goto out;
1430 ret = true;
1431out:
1432 if (tt_local_entry)
1433 tt_local_entry_free_ref(tt_local_entry);
1434 return ret;
1435}
1436
1437void handle_tt_response(struct bat_priv *bat_priv,
1438 struct tt_query_packet *tt_response)
1439{
1440 struct tt_req_node *node, *safe;
1441 struct orig_node *orig_node = NULL;
1442
1443 bat_dbg(DBG_TT, bat_priv, "Received TT_RESPONSE from %pM for "
1444 "ttvn %d t_size: %d [%c]\n",
1445 tt_response->src, tt_response->ttvn,
1446 tt_response->tt_data,
1447 (tt_response->flags & TT_FULL_TABLE ? 'F' : '.'));
1448
1449 orig_node = orig_hash_find(bat_priv, tt_response->src);
1450 if (!orig_node)
1451 goto out;
1452
1453 if (tt_response->flags & TT_FULL_TABLE)
1454 tt_fill_gtable(bat_priv, tt_response);
1455 else
1456 tt_update_changes(bat_priv, orig_node, tt_response->tt_data,
1457 tt_response->ttvn,
1458 (struct tt_change *)(tt_response + 1));
1459
1460 /* Delete the tt_req_node from pending tt_requests list */
1461 spin_lock_bh(&bat_priv->tt_req_list_lock);
1462 list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) {
1463 if (!compare_eth(node->addr, tt_response->src))
1464 continue;
1465 list_del(&node->list);
1466 kfree(node);
1467 }
1468 spin_unlock_bh(&bat_priv->tt_req_list_lock);
1469
1470 /* Recalculate the CRC for this orig_node and store it */
1471 orig_node->tt_crc = tt_global_crc(bat_priv, orig_node);
1472 /* Roaming phase is over: tables are in sync again. I can
1473 * unset the flag */
1474 orig_node->tt_poss_change = false;
1475out:
1476 if (orig_node)
1477 orig_node_free_ref(orig_node);
1478}
1479
1480int tt_init(struct bat_priv *bat_priv)
1481{
1482 if (!tt_local_init(bat_priv))
1483 return 0;
1484
1485 if (!tt_global_init(bat_priv))
1486 return 0;
1487
1488 tt_start_timer(bat_priv);
1489
1490 return 1;
1491}
1492
1493static void tt_roam_list_free(struct bat_priv *bat_priv)
1494{
1495 struct tt_roam_node *node, *safe;
1496
1497 spin_lock_bh(&bat_priv->tt_roam_list_lock);
1498
1499 list_for_each_entry_safe(node, safe, &bat_priv->tt_roam_list, list) {
1500 list_del(&node->list);
1501 kfree(node);
1502 }
1503
1504 spin_unlock_bh(&bat_priv->tt_roam_list_lock);
1505}
1506
1507static void tt_roam_purge(struct bat_priv *bat_priv)
1508{
1509 struct tt_roam_node *node, *safe;
1510
1511 spin_lock_bh(&bat_priv->tt_roam_list_lock);
1512 list_for_each_entry_safe(node, safe, &bat_priv->tt_roam_list, list) {
1513 if (!is_out_of_time(node->first_time,
1514 ROAMING_MAX_TIME * 1000))
1515 continue;
1516
1517 list_del(&node->list);
1518 kfree(node);
1519 }
1520 spin_unlock_bh(&bat_priv->tt_roam_list_lock);
1521}
1522
1523/* This function checks whether the client already reached the
1524 * maximum number of possible roaming phases. In this case the ROAMING_ADV
1525 * will not be sent.
1526 *
1527 * returns true if the ROAMING_ADV can be sent, false otherwise */
1528static bool tt_check_roam_count(struct bat_priv *bat_priv,
1529 uint8_t *client)
1530{
1531 struct tt_roam_node *tt_roam_node;
1532 bool ret = false;
1533
1534 spin_lock_bh(&bat_priv->tt_roam_list_lock);
1535 /* The new tt_req will be issued only if I'm not waiting for a
1536 * reply from the same orig_node yet */
1537 list_for_each_entry(tt_roam_node, &bat_priv->tt_roam_list, list) {
1538 if (!compare_eth(tt_roam_node->addr, client))
1539 continue;
1540
1541 if (is_out_of_time(tt_roam_node->first_time,
1542 ROAMING_MAX_TIME * 1000))
1543 continue;
1544
1545 if (!atomic_dec_not_zero(&tt_roam_node->counter))
1546 /* Sorry, you roamed too many times! */
1547 goto unlock;
1548 ret = true;
1549 break;
1550 }
1551
1552 if (!ret) {
1553 tt_roam_node = kmalloc(sizeof(*tt_roam_node), GFP_ATOMIC);
1554 if (!tt_roam_node)
1555 goto unlock;
1556
1557 tt_roam_node->first_time = jiffies;
1558 atomic_set(&tt_roam_node->counter, ROAMING_MAX_COUNT - 1);
1559 memcpy(tt_roam_node->addr, client, ETH_ALEN);
1560
1561 list_add(&tt_roam_node->list, &bat_priv->tt_roam_list);
1562 ret = true;
1563 }
1564
1565unlock:
1566 spin_unlock_bh(&bat_priv->tt_roam_list_lock);
1567 return ret;
1568}
1569
1570void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client,
1571 struct orig_node *orig_node)
1572{
1573 struct neigh_node *neigh_node = NULL;
1574 struct sk_buff *skb = NULL;
1575 struct roam_adv_packet *roam_adv_packet;
1576 int ret = 1;
1577 struct hard_iface *primary_if;
1578
1579 /* before going on we have to check whether the client has
1580 * already roamed to us too many times */
1581 if (!tt_check_roam_count(bat_priv, client))
1582 goto out;
1583
1584 skb = dev_alloc_skb(sizeof(struct roam_adv_packet) + ETH_HLEN);
1585 if (!skb)
1586 goto out;
1587
1588 skb_reserve(skb, ETH_HLEN);
1589
1590 roam_adv_packet = (struct roam_adv_packet *)skb_put(skb,
1591 sizeof(struct roam_adv_packet));
1592
1593 roam_adv_packet->packet_type = BAT_ROAM_ADV;
1594 roam_adv_packet->version = COMPAT_VERSION;
1595 roam_adv_packet->ttl = TTL;
1596 primary_if = primary_if_get_selected(bat_priv);
1597 if (!primary_if)
1598 goto out;
1599 memcpy(roam_adv_packet->src, primary_if->net_dev->dev_addr, ETH_ALEN);
1600 hardif_free_ref(primary_if);
1601 memcpy(roam_adv_packet->dst, orig_node->orig, ETH_ALEN);
1602 memcpy(roam_adv_packet->client, client, ETH_ALEN);
1603
1604 neigh_node = orig_node_get_router(orig_node);
1605 if (!neigh_node)
1606 goto out;
1607
1608 bat_dbg(DBG_TT, bat_priv,
1609 "Sending ROAMING_ADV to %pM (client %pM) via %pM\n",
1610 orig_node->orig, client, neigh_node->addr);
1611
1612 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
1613 ret = 0;
1614
1615out:
1616 if (neigh_node)
1617 neigh_node_free_ref(neigh_node);
1618 if (ret)
1619 kfree_skb(skb);
1620 return;
1621}
1622
1623static void tt_purge(struct work_struct *work)
1624{
1625 struct delayed_work *delayed_work =
1626 container_of(work, struct delayed_work, work);
1627 struct bat_priv *bat_priv =
1628 container_of(delayed_work, struct bat_priv, tt_work);
1629
1630 tt_local_purge(bat_priv);
1631 tt_global_roam_purge(bat_priv);
1632 tt_req_purge(bat_priv);
1633 tt_roam_purge(bat_priv);
1634
1635 tt_start_timer(bat_priv);
1636}
1637
1638void tt_free(struct bat_priv *bat_priv)
1639{
1640 cancel_delayed_work_sync(&bat_priv->tt_work);
1641
1642 tt_local_table_free(bat_priv);
1643 tt_global_table_free(bat_priv);
1644 tt_req_list_free(bat_priv);
1645 tt_changes_list_free(bat_priv);
1646 tt_roam_list_free(bat_priv);
1647
1648 kfree(bat_priv->tt_buff);
1649}
1650
1651/* This function will reset the specified flags from all the entries in
1652 * the given hash table and will increment num_local_tt for each involved
1653 * entry */
1654static void tt_local_reset_flags(struct bat_priv *bat_priv, uint16_t flags)
1655{
1656 int i;
1657 struct hashtable_t *hash = bat_priv->tt_local_hash;
1658 struct hlist_head *head;
1659 struct hlist_node *node;
1660 struct tt_local_entry *tt_local_entry;
1661
1662 if (!hash)
1663 return;
1664
1665 for (i = 0; i < hash->size; i++) {
1666 head = &hash->table[i];
1667
1668 rcu_read_lock();
1669 hlist_for_each_entry_rcu(tt_local_entry, node,
1670 head, hash_entry) {
1671 tt_local_entry->flags &= ~flags;
1672 atomic_inc(&bat_priv->num_local_tt);
1673 }
1674 rcu_read_unlock();
1675 }
1676
1677}
1678
1679/* Purge out all the tt local entries marked with TT_CLIENT_PENDING */
1680static void tt_local_purge_pending_clients(struct bat_priv *bat_priv)
1681{
1682 struct hashtable_t *hash = bat_priv->tt_local_hash;
1683 struct tt_local_entry *tt_local_entry;
1684 struct hlist_node *node, *node_tmp;
1685 struct hlist_head *head;
1686 spinlock_t *list_lock; /* protects write access to the hash lists */
1687 int i;
1688
1689 if (!hash)
1690 return;
1691
1692 for (i = 0; i < hash->size; i++) {
1693 head = &hash->table[i];
1694 list_lock = &hash->list_locks[i];
1695
1696 spin_lock_bh(list_lock);
1697 hlist_for_each_entry_safe(tt_local_entry, node, node_tmp,
1698 head, hash_entry) {
1699 if (!(tt_local_entry->flags & TT_CLIENT_PENDING))
1700 continue;
1701
1702 bat_dbg(DBG_TT, bat_priv, "Deleting local tt entry "
1703 "(%pM): pending\n", tt_local_entry->addr);
1704
1705 atomic_dec(&bat_priv->num_local_tt);
1706 hlist_del_rcu(node);
1707 tt_local_entry_free_ref(tt_local_entry);
1708 }
1709 spin_unlock_bh(list_lock);
1710 }
1711
1712}
1713
1714void tt_commit_changes(struct bat_priv *bat_priv)
1715{
1716 tt_local_reset_flags(bat_priv, TT_CLIENT_NEW);
1717 tt_local_purge_pending_clients(bat_priv);
1718
1719 /* Increment the TTVN only once per OGM interval */
1720 atomic_inc(&bat_priv->ttvn);
1721 bat_priv->tt_poss_change = false;
1722}
diff --git a/net/batman-adv/translation-table.h b/net/batman-adv/translation-table.h
index 46152c38cc9..d4122cba53b 100644
--- a/net/batman-adv/translation-table.h
+++ b/net/batman-adv/translation-table.h
@@ -22,22 +22,45 @@
22#ifndef _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ 22#ifndef _NET_BATMAN_ADV_TRANSLATION_TABLE_H_
23#define _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ 23#define _NET_BATMAN_ADV_TRANSLATION_TABLE_H_
24 24
25int tt_local_init(struct bat_priv *bat_priv); 25int tt_len(int changes_num);
26void tt_local_add(struct net_device *soft_iface, uint8_t *addr); 26int tt_changes_fill_buffer(struct bat_priv *bat_priv,
27 unsigned char *buff, int buff_len);
28int tt_init(struct bat_priv *bat_priv);
29void tt_local_add(struct net_device *soft_iface, const uint8_t *addr);
27void tt_local_remove(struct bat_priv *bat_priv, 30void tt_local_remove(struct bat_priv *bat_priv,
28 uint8_t *addr, char *message); 31 const uint8_t *addr, const char *message, bool roaming);
29int tt_local_fill_buffer(struct bat_priv *bat_priv,
30 unsigned char *buff, int buff_len);
31int tt_local_seq_print_text(struct seq_file *seq, void *offset); 32int tt_local_seq_print_text(struct seq_file *seq, void *offset);
32void tt_local_free(struct bat_priv *bat_priv); 33void tt_global_add_orig(struct bat_priv *bat_priv, struct orig_node *orig_node,
33int tt_global_init(struct bat_priv *bat_priv); 34 const unsigned char *tt_buff, int tt_buff_len);
34void tt_global_add_orig(struct bat_priv *bat_priv, 35int tt_global_add(struct bat_priv *bat_priv,
35 struct orig_node *orig_node, 36 struct orig_node *orig_node, const unsigned char *addr,
36 unsigned char *tt_buff, int tt_buff_len); 37 uint8_t ttvn, bool roaming);
37int tt_global_seq_print_text(struct seq_file *seq, void *offset); 38int tt_global_seq_print_text(struct seq_file *seq, void *offset);
38void tt_global_del_orig(struct bat_priv *bat_priv, 39void tt_global_del_orig(struct bat_priv *bat_priv,
39 struct orig_node *orig_node, char *message); 40 struct orig_node *orig_node, const char *message);
40void tt_global_free(struct bat_priv *bat_priv); 41void tt_global_del(struct bat_priv *bat_priv,
41struct orig_node *transtable_search(struct bat_priv *bat_priv, uint8_t *addr); 42 struct orig_node *orig_node, const unsigned char *addr,
43 const char *message, bool roaming);
44struct orig_node *transtable_search(struct bat_priv *bat_priv,
45 const uint8_t *addr);
46void tt_save_orig_buffer(struct bat_priv *bat_priv, struct orig_node *orig_node,
47 const unsigned char *tt_buff, uint8_t tt_num_changes);
48uint16_t tt_local_crc(struct bat_priv *bat_priv);
49uint16_t tt_global_crc(struct bat_priv *bat_priv, struct orig_node *orig_node);
50void tt_free(struct bat_priv *bat_priv);
51int send_tt_request(struct bat_priv *bat_priv,
52 struct orig_node *dst_orig_node, uint8_t hvn,
53 uint16_t tt_crc, bool full_table);
54bool send_tt_response(struct bat_priv *bat_priv,
55 struct tt_query_packet *tt_request);
56void tt_update_changes(struct bat_priv *bat_priv, struct orig_node *orig_node,
57 uint16_t tt_num_changes, uint8_t ttvn,
58 struct tt_change *tt_change);
59bool is_my_client(struct bat_priv *bat_priv, const uint8_t *addr);
60void handle_tt_response(struct bat_priv *bat_priv,
61 struct tt_query_packet *tt_response);
62void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client,
63 struct orig_node *orig_node);
64void tt_commit_changes(struct bat_priv *bat_priv);
42 65
43#endif /* _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ */ 66#endif /* _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ */
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index fab70e8b16e..25bd1db3537 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -75,8 +75,18 @@ struct orig_node {
75 unsigned long batman_seqno_reset; 75 unsigned long batman_seqno_reset;
76 uint8_t gw_flags; 76 uint8_t gw_flags;
77 uint8_t flags; 77 uint8_t flags;
78 atomic_t last_ttvn; /* last seen translation table version number */
79 uint16_t tt_crc;
78 unsigned char *tt_buff; 80 unsigned char *tt_buff;
79 int16_t tt_buff_len; 81 int16_t tt_buff_len;
82 spinlock_t tt_buff_lock; /* protects tt_buff */
83 atomic_t tt_size;
84 /* The tt_poss_change flag is used to detect an ongoing roaming phase.
85 * If true, then I sent a Roaming_adv to this orig_node and I have to
86 * inspect every packet directed to it to check whether it is still
87 * the true destination or not. This flag will be reset to false as
88 * soon as I receive a new TTVN from this orig_node */
89 bool tt_poss_change;
80 uint32_t last_real_seqno; 90 uint32_t last_real_seqno;
81 uint8_t last_ttl; 91 uint8_t last_ttl;
82 unsigned long bcast_bits[NUM_WORDS]; 92 unsigned long bcast_bits[NUM_WORDS];
@@ -94,6 +104,7 @@ struct orig_node {
94 spinlock_t ogm_cnt_lock; 104 spinlock_t ogm_cnt_lock;
95 /* bcast_seqno_lock protects bcast_bits, last_bcast_seqno */ 105 /* bcast_seqno_lock protects bcast_bits, last_bcast_seqno */
96 spinlock_t bcast_seqno_lock; 106 spinlock_t bcast_seqno_lock;
107 spinlock_t tt_list_lock; /* protects tt_list */
97 atomic_t bond_candidates; 108 atomic_t bond_candidates;
98 struct list_head bond_list; 109 struct list_head bond_list;
99}; 110};
@@ -145,6 +156,15 @@ struct bat_priv {
145 atomic_t bcast_seqno; 156 atomic_t bcast_seqno;
146 atomic_t bcast_queue_left; 157 atomic_t bcast_queue_left;
147 atomic_t batman_queue_left; 158 atomic_t batman_queue_left;
159 atomic_t ttvn; /* tranlation table version number */
160 atomic_t tt_ogm_append_cnt;
161 atomic_t tt_local_changes; /* changes registered in a OGM interval */
162 /* The tt_poss_change flag is used to detect an ongoing roaming phase.
163 * If true, then I received a Roaming_adv and I have to inspect every
164 * packet directed to me to check whether I am still the true
165 * destination or not. This flag will be reset to false as soon as I
166 * increase my TTVN */
167 bool tt_poss_change;
148 char num_ifaces; 168 char num_ifaces;
149 struct debug_log *debug_log; 169 struct debug_log *debug_log;
150 struct kobject *mesh_obj; 170 struct kobject *mesh_obj;
@@ -153,26 +173,35 @@ struct bat_priv {
153 struct hlist_head forw_bcast_list; 173 struct hlist_head forw_bcast_list;
154 struct hlist_head gw_list; 174 struct hlist_head gw_list;
155 struct hlist_head softif_neigh_vids; 175 struct hlist_head softif_neigh_vids;
176 struct list_head tt_changes_list; /* tracks changes in a OGM int */
156 struct list_head vis_send_list; 177 struct list_head vis_send_list;
157 struct hashtable_t *orig_hash; 178 struct hashtable_t *orig_hash;
158 struct hashtable_t *tt_local_hash; 179 struct hashtable_t *tt_local_hash;
159 struct hashtable_t *tt_global_hash; 180 struct hashtable_t *tt_global_hash;
181 struct list_head tt_req_list; /* list of pending tt_requests */
182 struct list_head tt_roam_list;
160 struct hashtable_t *vis_hash; 183 struct hashtable_t *vis_hash;
161 spinlock_t forw_bat_list_lock; /* protects forw_bat_list */ 184 spinlock_t forw_bat_list_lock; /* protects forw_bat_list */
162 spinlock_t forw_bcast_list_lock; /* protects */ 185 spinlock_t forw_bcast_list_lock; /* protects */
163 spinlock_t tt_lhash_lock; /* protects tt_local_hash */ 186 spinlock_t tt_changes_list_lock; /* protects tt_changes */
164 spinlock_t tt_ghash_lock; /* protects tt_global_hash */ 187 spinlock_t tt_req_list_lock; /* protects tt_req_list */
188 spinlock_t tt_roam_list_lock; /* protects tt_roam_list */
165 spinlock_t gw_list_lock; /* protects gw_list and curr_gw */ 189 spinlock_t gw_list_lock; /* protects gw_list and curr_gw */
166 spinlock_t vis_hash_lock; /* protects vis_hash */ 190 spinlock_t vis_hash_lock; /* protects vis_hash */
167 spinlock_t vis_list_lock; /* protects vis_info::recv_list */ 191 spinlock_t vis_list_lock; /* protects vis_info::recv_list */
168 spinlock_t softif_neigh_lock; /* protects soft-interface neigh list */ 192 spinlock_t softif_neigh_lock; /* protects soft-interface neigh list */
169 spinlock_t softif_neigh_vid_lock; /* protects soft-interface vid list */ 193 spinlock_t softif_neigh_vid_lock; /* protects soft-interface vid list */
170 int16_t num_local_tt; 194 atomic_t num_local_tt;
171 atomic_t tt_local_changed; 195 /* Checksum of the local table, recomputed before sending a new OGM */
196 atomic_t tt_crc;
197 unsigned char *tt_buff;
198 int16_t tt_buff_len;
199 spinlock_t tt_buff_lock; /* protects tt_buff */
172 struct delayed_work tt_work; 200 struct delayed_work tt_work;
173 struct delayed_work orig_work; 201 struct delayed_work orig_work;
174 struct delayed_work vis_work; 202 struct delayed_work vis_work;
175 struct gw_node __rcu *curr_gw; /* rcu protected pointer */ 203 struct gw_node __rcu *curr_gw; /* rcu protected pointer */
204 atomic_t gw_reselect;
176 struct hard_iface __rcu *primary_if; /* rcu protected pointer */ 205 struct hard_iface __rcu *primary_if; /* rcu protected pointer */
177 struct vis_info *my_vis_info; 206 struct vis_info *my_vis_info;
178}; 207};
@@ -195,14 +224,39 @@ struct socket_packet {
195struct tt_local_entry { 224struct tt_local_entry {
196 uint8_t addr[ETH_ALEN]; 225 uint8_t addr[ETH_ALEN];
197 unsigned long last_seen; 226 unsigned long last_seen;
198 char never_purge; 227 uint16_t flags;
228 atomic_t refcount;
229 struct rcu_head rcu;
199 struct hlist_node hash_entry; 230 struct hlist_node hash_entry;
200}; 231};
201 232
202struct tt_global_entry { 233struct tt_global_entry {
203 uint8_t addr[ETH_ALEN]; 234 uint8_t addr[ETH_ALEN];
204 struct orig_node *orig_node; 235 struct orig_node *orig_node;
205 struct hlist_node hash_entry; 236 uint8_t ttvn;
237 uint16_t flags; /* only TT_GLOBAL_ROAM is used */
238 unsigned long roam_at; /* time at which TT_GLOBAL_ROAM was set */
239 atomic_t refcount;
240 struct rcu_head rcu;
241 struct hlist_node hash_entry; /* entry in the global table */
242};
243
244struct tt_change_node {
245 struct list_head list;
246 struct tt_change change;
247};
248
249struct tt_req_node {
250 uint8_t addr[ETH_ALEN];
251 unsigned long issued_at;
252 struct list_head list;
253};
254
255struct tt_roam_node {
256 uint8_t addr[ETH_ALEN];
257 atomic_t counter;
258 unsigned long first_time;
259 struct list_head list;
206}; 260};
207 261
208/** 262/**
@@ -246,10 +300,10 @@ struct frag_packet_list_entry {
246}; 300};
247 301
248struct vis_info { 302struct vis_info {
249 unsigned long first_seen; 303 unsigned long first_seen;
250 struct list_head recv_list; 304 /* list of server-neighbors we received a vis-packet
251 /* list of server-neighbors we received a vis-packet 305 * from. we should not reply to them. */
252 * from. we should not reply to them. */ 306 struct list_head recv_list;
253 struct list_head send_list; 307 struct list_head send_list;
254 struct kref refcount; 308 struct kref refcount;
255 struct hlist_node hash_entry; 309 struct hlist_node hash_entry;
diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
index 19c3daf34ac..32b125fb3d3 100644
--- a/net/batman-adv/unicast.c
+++ b/net/batman-adv/unicast.c
@@ -39,8 +39,8 @@ static struct sk_buff *frag_merge_packet(struct list_head *head,
39 (struct unicast_frag_packet *)skb->data; 39 (struct unicast_frag_packet *)skb->data;
40 struct sk_buff *tmp_skb; 40 struct sk_buff *tmp_skb;
41 struct unicast_packet *unicast_packet; 41 struct unicast_packet *unicast_packet;
42 int hdr_len = sizeof(struct unicast_packet); 42 int hdr_len = sizeof(*unicast_packet);
43 int uni_diff = sizeof(struct unicast_frag_packet) - hdr_len; 43 int uni_diff = sizeof(*up) - hdr_len;
44 44
45 /* set skb to the first part and tmp_skb to the second part */ 45 /* set skb to the first part and tmp_skb to the second part */
46 if (up->flags & UNI_FRAG_HEAD) { 46 if (up->flags & UNI_FRAG_HEAD) {
@@ -53,7 +53,7 @@ static struct sk_buff *frag_merge_packet(struct list_head *head,
53 if (skb_linearize(skb) < 0 || skb_linearize(tmp_skb) < 0) 53 if (skb_linearize(skb) < 0 || skb_linearize(tmp_skb) < 0)
54 goto err; 54 goto err;
55 55
56 skb_pull(tmp_skb, sizeof(struct unicast_frag_packet)); 56 skb_pull(tmp_skb, sizeof(*up));
57 if (pskb_expand_head(skb, 0, tmp_skb->len, GFP_ATOMIC) < 0) 57 if (pskb_expand_head(skb, 0, tmp_skb->len, GFP_ATOMIC) < 0)
58 goto err; 58 goto err;
59 59
@@ -99,8 +99,7 @@ static int frag_create_buffer(struct list_head *head)
99 struct frag_packet_list_entry *tfp; 99 struct frag_packet_list_entry *tfp;
100 100
101 for (i = 0; i < FRAG_BUFFER_SIZE; i++) { 101 for (i = 0; i < FRAG_BUFFER_SIZE; i++) {
102 tfp = kmalloc(sizeof(struct frag_packet_list_entry), 102 tfp = kmalloc(sizeof(*tfp), GFP_ATOMIC);
103 GFP_ATOMIC);
104 if (!tfp) { 103 if (!tfp) {
105 frag_list_free(head); 104 frag_list_free(head);
106 return -ENOMEM; 105 return -ENOMEM;
@@ -115,7 +114,7 @@ static int frag_create_buffer(struct list_head *head)
115} 114}
116 115
117static struct frag_packet_list_entry *frag_search_packet(struct list_head *head, 116static struct frag_packet_list_entry *frag_search_packet(struct list_head *head,
118 struct unicast_frag_packet *up) 117 const struct unicast_frag_packet *up)
119{ 118{
120 struct frag_packet_list_entry *tfp; 119 struct frag_packet_list_entry *tfp;
121 struct unicast_frag_packet *tmp_up = NULL; 120 struct unicast_frag_packet *tmp_up = NULL;
@@ -218,14 +217,14 @@ out:
218} 217}
219 218
220int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv, 219int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
221 struct hard_iface *hard_iface, uint8_t dstaddr[]) 220 struct hard_iface *hard_iface, const uint8_t dstaddr[])
222{ 221{
223 struct unicast_packet tmp_uc, *unicast_packet; 222 struct unicast_packet tmp_uc, *unicast_packet;
224 struct hard_iface *primary_if; 223 struct hard_iface *primary_if;
225 struct sk_buff *frag_skb; 224 struct sk_buff *frag_skb;
226 struct unicast_frag_packet *frag1, *frag2; 225 struct unicast_frag_packet *frag1, *frag2;
227 int uc_hdr_len = sizeof(struct unicast_packet); 226 int uc_hdr_len = sizeof(*unicast_packet);
228 int ucf_hdr_len = sizeof(struct unicast_frag_packet); 227 int ucf_hdr_len = sizeof(*frag1);
229 int data_len = skb->len - uc_hdr_len; 228 int data_len = skb->len - uc_hdr_len;
230 int large_tail = 0, ret = NET_RX_DROP; 229 int large_tail = 0, ret = NET_RX_DROP;
231 uint16_t seqno; 230 uint16_t seqno;
@@ -250,14 +249,14 @@ int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
250 frag1 = (struct unicast_frag_packet *)skb->data; 249 frag1 = (struct unicast_frag_packet *)skb->data;
251 frag2 = (struct unicast_frag_packet *)frag_skb->data; 250 frag2 = (struct unicast_frag_packet *)frag_skb->data;
252 251
253 memcpy(frag1, &tmp_uc, sizeof(struct unicast_packet)); 252 memcpy(frag1, &tmp_uc, sizeof(tmp_uc));
254 253
255 frag1->ttl--; 254 frag1->ttl--;
256 frag1->version = COMPAT_VERSION; 255 frag1->version = COMPAT_VERSION;
257 frag1->packet_type = BAT_UNICAST_FRAG; 256 frag1->packet_type = BAT_UNICAST_FRAG;
258 257
259 memcpy(frag1->orig, primary_if->net_dev->dev_addr, ETH_ALEN); 258 memcpy(frag1->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
260 memcpy(frag2, frag1, sizeof(struct unicast_frag_packet)); 259 memcpy(frag2, frag1, sizeof(*frag2));
261 260
262 if (data_len & 1) 261 if (data_len & 1)
263 large_tail = UNI_FRAG_LARGETAIL; 262 large_tail = UNI_FRAG_LARGETAIL;
@@ -295,7 +294,7 @@ int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv)
295 294
296 /* get routing information */ 295 /* get routing information */
297 if (is_multicast_ether_addr(ethhdr->h_dest)) { 296 if (is_multicast_ether_addr(ethhdr->h_dest)) {
298 orig_node = (struct orig_node *)gw_get_selected_orig(bat_priv); 297 orig_node = gw_get_selected_orig(bat_priv);
299 if (orig_node) 298 if (orig_node)
300 goto find_router; 299 goto find_router;
301 } 300 }
@@ -314,10 +313,7 @@ find_router:
314 if (!neigh_node) 313 if (!neigh_node)
315 goto out; 314 goto out;
316 315
317 if (neigh_node->if_incoming->if_status != IF_ACTIVE) 316 if (my_skb_head_push(skb, sizeof(*unicast_packet)) < 0)
318 goto out;
319
320 if (my_skb_head_push(skb, sizeof(struct unicast_packet)) < 0)
321 goto out; 317 goto out;
322 318
323 unicast_packet = (struct unicast_packet *)skb->data; 319 unicast_packet = (struct unicast_packet *)skb->data;
@@ -329,9 +325,12 @@ find_router:
329 unicast_packet->ttl = TTL; 325 unicast_packet->ttl = TTL;
330 /* copy the destination for faster routing */ 326 /* copy the destination for faster routing */
331 memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN); 327 memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN);
328 /* set the destination tt version number */
329 unicast_packet->ttvn =
330 (uint8_t)atomic_read(&orig_node->last_ttvn);
332 331
333 if (atomic_read(&bat_priv->fragmentation) && 332 if (atomic_read(&bat_priv->fragmentation) &&
334 data_len + sizeof(struct unicast_packet) > 333 data_len + sizeof(*unicast_packet) >
335 neigh_node->if_incoming->net_dev->mtu) { 334 neigh_node->if_incoming->net_dev->mtu) {
336 /* send frag skb decreases ttl */ 335 /* send frag skb decreases ttl */
337 unicast_packet->ttl++; 336 unicast_packet->ttl++;
diff --git a/net/batman-adv/unicast.h b/net/batman-adv/unicast.h
index 16ad7a9242b..62f54b95462 100644
--- a/net/batman-adv/unicast.h
+++ b/net/batman-adv/unicast.h
@@ -32,11 +32,11 @@ int frag_reassemble_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
32void frag_list_free(struct list_head *head); 32void frag_list_free(struct list_head *head);
33int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv); 33int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv);
34int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv, 34int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv,
35 struct hard_iface *hard_iface, uint8_t dstaddr[]); 35 struct hard_iface *hard_iface, const uint8_t dstaddr[]);
36 36
37static inline int frag_can_reassemble(struct sk_buff *skb, int mtu) 37static inline int frag_can_reassemble(const struct sk_buff *skb, int mtu)
38{ 38{
39 struct unicast_frag_packet *unicast_packet; 39 const struct unicast_frag_packet *unicast_packet;
40 int uneven_correction = 0; 40 int uneven_correction = 0;
41 unsigned int merged_size; 41 unsigned int merged_size;
42 42
@@ -49,7 +49,7 @@ static inline int frag_can_reassemble(struct sk_buff *skb, int mtu)
49 uneven_correction = -1; 49 uneven_correction = -1;
50 } 50 }
51 51
52 merged_size = (skb->len - sizeof(struct unicast_frag_packet)) * 2; 52 merged_size = (skb->len - sizeof(*unicast_packet)) * 2;
53 merged_size += sizeof(struct unicast_packet) + uneven_correction; 53 merged_size += sizeof(struct unicast_packet) + uneven_correction;
54 54
55 return merged_size <= mtu; 55 return merged_size <= mtu;
diff --git a/net/batman-adv/vis.c b/net/batman-adv/vis.c
index c39f20cc1ba..8a1b98589d7 100644
--- a/net/batman-adv/vis.c
+++ b/net/batman-adv/vis.c
@@ -30,22 +30,6 @@
30 30
31#define MAX_VIS_PACKET_SIZE 1000 31#define MAX_VIS_PACKET_SIZE 1000
32 32
33/* Returns the smallest signed integer in two's complement with the sizeof x */
34#define smallest_signed_int(x) (1u << (7u + 8u * (sizeof(x) - 1u)))
35
36/* Checks if a sequence number x is a predecessor/successor of y.
37 * they handle overflows/underflows and can correctly check for a
38 * predecessor/successor unless the variable sequence number has grown by
39 * more then 2**(bitwidth(x)-1)-1.
40 * This means that for a uint8_t with the maximum value 255, it would think:
41 * - when adding nothing - it is neither a predecessor nor a successor
42 * - before adding more than 127 to the starting value - it is a predecessor,
43 * - when adding 128 - it is neither a predecessor nor a successor,
44 * - after adding more than 127 to the starting value - it is a successor */
45#define seq_before(x, y) ({typeof(x) _dummy = (x - y); \
46 _dummy > smallest_signed_int(_dummy); })
47#define seq_after(x, y) seq_before(y, x)
48
49static void start_vis_timer(struct bat_priv *bat_priv); 33static void start_vis_timer(struct bat_priv *bat_priv);
50 34
51/* free the info */ 35/* free the info */
@@ -68,10 +52,10 @@ static void free_info(struct kref *ref)
68} 52}
69 53
70/* Compare two vis packets, used by the hashing algorithm */ 54/* Compare two vis packets, used by the hashing algorithm */
71static int vis_info_cmp(struct hlist_node *node, void *data2) 55static int vis_info_cmp(const struct hlist_node *node, const void *data2)
72{ 56{
73 struct vis_info *d1, *d2; 57 const struct vis_info *d1, *d2;
74 struct vis_packet *p1, *p2; 58 const struct vis_packet *p1, *p2;
75 59
76 d1 = container_of(node, struct vis_info, hash_entry); 60 d1 = container_of(node, struct vis_info, hash_entry);
77 d2 = data2; 61 d2 = data2;
@@ -82,11 +66,11 @@ static int vis_info_cmp(struct hlist_node *node, void *data2)
82 66
83/* hash function to choose an entry in a hash table of given size */ 67/* hash function to choose an entry in a hash table of given size */
84/* hash algorithm from http://en.wikipedia.org/wiki/Hash_table */ 68/* hash algorithm from http://en.wikipedia.org/wiki/Hash_table */
85static int vis_info_choose(void *data, int size) 69static int vis_info_choose(const void *data, int size)
86{ 70{
87 struct vis_info *vis_info = data; 71 const struct vis_info *vis_info = data;
88 struct vis_packet *packet; 72 const struct vis_packet *packet;
89 unsigned char *key; 73 const unsigned char *key;
90 uint32_t hash = 0; 74 uint32_t hash = 0;
91 size_t i; 75 size_t i;
92 76
@@ -106,7 +90,7 @@ static int vis_info_choose(void *data, int size)
106} 90}
107 91
108static struct vis_info *vis_hash_find(struct bat_priv *bat_priv, 92static struct vis_info *vis_hash_find(struct bat_priv *bat_priv,
109 void *data) 93 const void *data)
110{ 94{
111 struct hashtable_t *hash = bat_priv->vis_hash; 95 struct hashtable_t *hash = bat_priv->vis_hash;
112 struct hlist_head *head; 96 struct hlist_head *head;
@@ -143,7 +127,7 @@ static void vis_data_insert_interface(const uint8_t *interface,
143 struct hlist_node *pos; 127 struct hlist_node *pos;
144 128
145 hlist_for_each_entry(entry, pos, if_list, list) { 129 hlist_for_each_entry(entry, pos, if_list, list) {
146 if (compare_eth(entry->addr, (void *)interface)) 130 if (compare_eth(entry->addr, interface))
147 return; 131 return;
148 } 132 }
149 133
@@ -156,7 +140,8 @@ static void vis_data_insert_interface(const uint8_t *interface,
156 hlist_add_head(&entry->list, if_list); 140 hlist_add_head(&entry->list, if_list);
157} 141}
158 142
159static ssize_t vis_data_read_prim_sec(char *buff, struct hlist_head *if_list) 143static ssize_t vis_data_read_prim_sec(char *buff,
144 const struct hlist_head *if_list)
160{ 145{
161 struct if_list_entry *entry; 146 struct if_list_entry *entry;
162 struct hlist_node *pos; 147 struct hlist_node *pos;
@@ -189,8 +174,9 @@ static size_t vis_data_count_prim_sec(struct hlist_head *if_list)
189} 174}
190 175
191/* read an entry */ 176/* read an entry */
192static ssize_t vis_data_read_entry(char *buff, struct vis_info_entry *entry, 177static ssize_t vis_data_read_entry(char *buff,
193 uint8_t *src, bool primary) 178 const struct vis_info_entry *entry,
179 const uint8_t *src, bool primary)
194{ 180{
195 /* maximal length: max(4+17+2, 3+17+1+3+2) == 26 */ 181 /* maximal length: max(4+17+2, 3+17+1+3+2) == 26 */
196 if (primary && entry->quality == 0) 182 if (primary && entry->quality == 0)
@@ -239,7 +225,7 @@ int vis_seq_print_text(struct seq_file *seq, void *offset)
239 hlist_for_each_entry_rcu(info, node, head, hash_entry) { 225 hlist_for_each_entry_rcu(info, node, head, hash_entry) {
240 packet = (struct vis_packet *)info->skb_packet->data; 226 packet = (struct vis_packet *)info->skb_packet->data;
241 entries = (struct vis_info_entry *) 227 entries = (struct vis_info_entry *)
242 ((char *)packet + sizeof(struct vis_packet)); 228 ((char *)packet + sizeof(*packet));
243 229
244 for (j = 0; j < packet->entries; j++) { 230 for (j = 0; j < packet->entries; j++) {
245 if (entries[j].quality == 0) 231 if (entries[j].quality == 0)
@@ -287,7 +273,7 @@ int vis_seq_print_text(struct seq_file *seq, void *offset)
287 hlist_for_each_entry_rcu(info, node, head, hash_entry) { 273 hlist_for_each_entry_rcu(info, node, head, hash_entry) {
288 packet = (struct vis_packet *)info->skb_packet->data; 274 packet = (struct vis_packet *)info->skb_packet->data;
289 entries = (struct vis_info_entry *) 275 entries = (struct vis_info_entry *)
290 ((char *)packet + sizeof(struct vis_packet)); 276 ((char *)packet + sizeof(*packet));
291 277
292 for (j = 0; j < packet->entries; j++) { 278 for (j = 0; j < packet->entries; j++) {
293 if (entries[j].quality == 0) 279 if (entries[j].quality == 0)
@@ -361,11 +347,11 @@ static void send_list_del(struct vis_info *info)
361 347
362/* tries to add one entry to the receive list. */ 348/* tries to add one entry to the receive list. */
363static void recv_list_add(struct bat_priv *bat_priv, 349static void recv_list_add(struct bat_priv *bat_priv,
364 struct list_head *recv_list, char *mac) 350 struct list_head *recv_list, const char *mac)
365{ 351{
366 struct recvlist_node *entry; 352 struct recvlist_node *entry;
367 353
368 entry = kmalloc(sizeof(struct recvlist_node), GFP_ATOMIC); 354 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
369 if (!entry) 355 if (!entry)
370 return; 356 return;
371 357
@@ -377,9 +363,9 @@ static void recv_list_add(struct bat_priv *bat_priv,
377 363
378/* returns 1 if this mac is in the recv_list */ 364/* returns 1 if this mac is in the recv_list */
379static int recv_list_is_in(struct bat_priv *bat_priv, 365static int recv_list_is_in(struct bat_priv *bat_priv,
380 struct list_head *recv_list, char *mac) 366 const struct list_head *recv_list, const char *mac)
381{ 367{
382 struct recvlist_node *entry; 368 const struct recvlist_node *entry;
383 369
384 spin_lock_bh(&bat_priv->vis_list_lock); 370 spin_lock_bh(&bat_priv->vis_list_lock);
385 list_for_each_entry(entry, recv_list, list) { 371 list_for_each_entry(entry, recv_list, list) {
@@ -412,11 +398,11 @@ static struct vis_info *add_packet(struct bat_priv *bat_priv,
412 return NULL; 398 return NULL;
413 399
414 /* see if the packet is already in vis_hash */ 400 /* see if the packet is already in vis_hash */
415 search_elem.skb_packet = dev_alloc_skb(sizeof(struct vis_packet)); 401 search_elem.skb_packet = dev_alloc_skb(sizeof(*search_packet));
416 if (!search_elem.skb_packet) 402 if (!search_elem.skb_packet)
417 return NULL; 403 return NULL;
418 search_packet = (struct vis_packet *)skb_put(search_elem.skb_packet, 404 search_packet = (struct vis_packet *)skb_put(search_elem.skb_packet,
419 sizeof(struct vis_packet)); 405 sizeof(*search_packet));
420 406
421 memcpy(search_packet->vis_orig, vis_packet->vis_orig, ETH_ALEN); 407 memcpy(search_packet->vis_orig, vis_packet->vis_orig, ETH_ALEN);
422 old_info = vis_hash_find(bat_priv, &search_elem); 408 old_info = vis_hash_find(bat_priv, &search_elem);
@@ -442,27 +428,26 @@ static struct vis_info *add_packet(struct bat_priv *bat_priv,
442 kref_put(&old_info->refcount, free_info); 428 kref_put(&old_info->refcount, free_info);
443 } 429 }
444 430
445 info = kmalloc(sizeof(struct vis_info), GFP_ATOMIC); 431 info = kmalloc(sizeof(*info), GFP_ATOMIC);
446 if (!info) 432 if (!info)
447 return NULL; 433 return NULL;
448 434
449 info->skb_packet = dev_alloc_skb(sizeof(struct vis_packet) + 435 info->skb_packet = dev_alloc_skb(sizeof(*packet) + vis_info_len +
450 vis_info_len + sizeof(struct ethhdr)); 436 sizeof(struct ethhdr));
451 if (!info->skb_packet) { 437 if (!info->skb_packet) {
452 kfree(info); 438 kfree(info);
453 return NULL; 439 return NULL;
454 } 440 }
455 skb_reserve(info->skb_packet, sizeof(struct ethhdr)); 441 skb_reserve(info->skb_packet, sizeof(struct ethhdr));
456 packet = (struct vis_packet *)skb_put(info->skb_packet, 442 packet = (struct vis_packet *)skb_put(info->skb_packet, sizeof(*packet)
457 sizeof(struct vis_packet) + 443 + vis_info_len);
458 vis_info_len);
459 444
460 kref_init(&info->refcount); 445 kref_init(&info->refcount);
461 INIT_LIST_HEAD(&info->send_list); 446 INIT_LIST_HEAD(&info->send_list);
462 INIT_LIST_HEAD(&info->recv_list); 447 INIT_LIST_HEAD(&info->recv_list);
463 info->first_seen = jiffies; 448 info->first_seen = jiffies;
464 info->bat_priv = bat_priv; 449 info->bat_priv = bat_priv;
465 memcpy(packet, vis_packet, sizeof(struct vis_packet) + vis_info_len); 450 memcpy(packet, vis_packet, sizeof(*packet) + vis_info_len);
466 451
467 /* initialize and add new packet. */ 452 /* initialize and add new packet. */
468 *is_new = 1; 453 *is_new = 1;
@@ -599,9 +584,9 @@ static int find_best_vis_server(struct bat_priv *bat_priv,
599} 584}
600 585
601/* Return true if the vis packet is full. */ 586/* Return true if the vis packet is full. */
602static bool vis_packet_full(struct vis_info *info) 587static bool vis_packet_full(const struct vis_info *info)
603{ 588{
604 struct vis_packet *packet; 589 const struct vis_packet *packet;
605 packet = (struct vis_packet *)info->skb_packet->data; 590 packet = (struct vis_packet *)info->skb_packet->data;
606 591
607 if (MAX_VIS_PACKET_SIZE / sizeof(struct vis_info_entry) 592 if (MAX_VIS_PACKET_SIZE / sizeof(struct vis_info_entry)
@@ -619,7 +604,7 @@ static int generate_vis_packet(struct bat_priv *bat_priv)
619 struct hlist_head *head; 604 struct hlist_head *head;
620 struct orig_node *orig_node; 605 struct orig_node *orig_node;
621 struct neigh_node *router; 606 struct neigh_node *router;
622 struct vis_info *info = (struct vis_info *)bat_priv->my_vis_info; 607 struct vis_info *info = bat_priv->my_vis_info;
623 struct vis_packet *packet = (struct vis_packet *)info->skb_packet->data; 608 struct vis_packet *packet = (struct vis_packet *)info->skb_packet->data;
624 struct vis_info_entry *entry; 609 struct vis_info_entry *entry;
625 struct tt_local_entry *tt_local_entry; 610 struct tt_local_entry *tt_local_entry;
@@ -632,7 +617,7 @@ static int generate_vis_packet(struct bat_priv *bat_priv)
632 packet->ttl = TTL; 617 packet->ttl = TTL;
633 packet->seqno = htonl(ntohl(packet->seqno) + 1); 618 packet->seqno = htonl(ntohl(packet->seqno) + 1);
634 packet->entries = 0; 619 packet->entries = 0;
635 skb_trim(info->skb_packet, sizeof(struct vis_packet)); 620 skb_trim(info->skb_packet, sizeof(*packet));
636 621
637 if (packet->vis_type == VIS_TYPE_CLIENT_UPDATE) { 622 if (packet->vis_type == VIS_TYPE_CLIENT_UPDATE) {
638 best_tq = find_best_vis_server(bat_priv, info); 623 best_tq = find_best_vis_server(bat_priv, info);
@@ -680,11 +665,12 @@ next:
680 665
681 hash = bat_priv->tt_local_hash; 666 hash = bat_priv->tt_local_hash;
682 667
683 spin_lock_bh(&bat_priv->tt_lhash_lock);
684 for (i = 0; i < hash->size; i++) { 668 for (i = 0; i < hash->size; i++) {
685 head = &hash->table[i]; 669 head = &hash->table[i];
686 670
687 hlist_for_each_entry(tt_local_entry, node, head, hash_entry) { 671 rcu_read_lock();
672 hlist_for_each_entry_rcu(tt_local_entry, node, head,
673 hash_entry) {
688 entry = (struct vis_info_entry *) 674 entry = (struct vis_info_entry *)
689 skb_put(info->skb_packet, 675 skb_put(info->skb_packet,
690 sizeof(*entry)); 676 sizeof(*entry));
@@ -693,14 +679,12 @@ next:
693 entry->quality = 0; /* 0 means TT */ 679 entry->quality = 0; /* 0 means TT */
694 packet->entries++; 680 packet->entries++;
695 681
696 if (vis_packet_full(info)) { 682 if (vis_packet_full(info))
697 spin_unlock_bh(&bat_priv->tt_lhash_lock); 683 goto unlock;
698 return 0;
699 }
700 } 684 }
685 rcu_read_unlock();
701 } 686 }
702 687
703 spin_unlock_bh(&bat_priv->tt_lhash_lock);
704 return 0; 688 return 0;
705 689
706unlock: 690unlock:
@@ -908,17 +892,15 @@ int vis_init(struct bat_priv *bat_priv)
908 goto err; 892 goto err;
909 } 893 }
910 894
911 bat_priv->my_vis_info->skb_packet = dev_alloc_skb( 895 bat_priv->my_vis_info->skb_packet = dev_alloc_skb(sizeof(*packet) +
912 sizeof(struct vis_packet) + 896 MAX_VIS_PACKET_SIZE +
913 MAX_VIS_PACKET_SIZE + 897 sizeof(struct ethhdr));
914 sizeof(struct ethhdr));
915 if (!bat_priv->my_vis_info->skb_packet) 898 if (!bat_priv->my_vis_info->skb_packet)
916 goto free_info; 899 goto free_info;
917 900
918 skb_reserve(bat_priv->my_vis_info->skb_packet, sizeof(struct ethhdr)); 901 skb_reserve(bat_priv->my_vis_info->skb_packet, sizeof(struct ethhdr));
919 packet = (struct vis_packet *)skb_put( 902 packet = (struct vis_packet *)skb_put(bat_priv->my_vis_info->skb_packet,
920 bat_priv->my_vis_info->skb_packet, 903 sizeof(*packet));
921 sizeof(struct vis_packet));
922 904
923 /* prefill the vis info */ 905 /* prefill the vis info */
924 bat_priv->my_vis_info->first_seen = jiffies - 906 bat_priv->my_vis_info->first_seen = jiffies -
diff --git a/net/bluetooth/Kconfig b/net/bluetooth/Kconfig
index 6ae5ec50858..bfb3dc03c9d 100644
--- a/net/bluetooth/Kconfig
+++ b/net/bluetooth/Kconfig
@@ -6,6 +6,7 @@ menuconfig BT
6 tristate "Bluetooth subsystem support" 6 tristate "Bluetooth subsystem support"
7 depends on NET && !S390 7 depends on NET && !S390
8 depends on RFKILL || !RFKILL 8 depends on RFKILL || !RFKILL
9 select CRYPTO
9 help 10 help
10 Bluetooth is low-cost, low-power, short-range wireless technology. 11 Bluetooth is low-cost, low-power, short-range wireless technology.
11 It was designed as a replacement for cables and other short-range 12 It was designed as a replacement for cables and other short-range
@@ -22,6 +23,7 @@ menuconfig BT
22 BNEP Module (Bluetooth Network Encapsulation Protocol) 23 BNEP Module (Bluetooth Network Encapsulation Protocol)
23 CMTP Module (CAPI Message Transport Protocol) 24 CMTP Module (CAPI Message Transport Protocol)
24 HIDP Module (Human Interface Device Protocol) 25 HIDP Module (Human Interface Device Protocol)
26 SMP Module (Security Manager Protocol)
25 27
26 Say Y here to compile Bluetooth support into the kernel or say M to 28 Say Y here to compile Bluetooth support into the kernel or say M to
27 compile it as module (bluetooth). 29 compile it as module (bluetooth).
@@ -36,11 +38,18 @@ if BT != n
36config BT_L2CAP 38config BT_L2CAP
37 bool "L2CAP protocol support" 39 bool "L2CAP protocol support"
38 select CRC16 40 select CRC16
41 select CRYPTO
42 select CRYPTO_BLKCIPHER
43 select CRYPTO_AES
44 select CRYPTO_ECB
39 help 45 help
40 L2CAP (Logical Link Control and Adaptation Protocol) provides 46 L2CAP (Logical Link Control and Adaptation Protocol) provides
41 connection oriented and connection-less data transport. L2CAP 47 connection oriented and connection-less data transport. L2CAP
42 support is required for most Bluetooth applications. 48 support is required for most Bluetooth applications.
43 49
50 Also included is support for SMP (Security Manager Protocol) which
51 is the security layer on top of LE (Low Energy) links.
52
44config BT_SCO 53config BT_SCO
45 bool "SCO links support" 54 bool "SCO links support"
46 help 55 help
diff --git a/net/bluetooth/Makefile b/net/bluetooth/Makefile
index f04fe9a9d63..9b67f3d08fa 100644
--- a/net/bluetooth/Makefile
+++ b/net/bluetooth/Makefile
@@ -9,5 +9,5 @@ obj-$(CONFIG_BT_CMTP) += cmtp/
9obj-$(CONFIG_BT_HIDP) += hidp/ 9obj-$(CONFIG_BT_HIDP) += hidp/
10 10
11bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o hci_sock.o hci_sysfs.o lib.o 11bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o hci_sock.o hci_sysfs.o lib.o
12bluetooth-$(CONFIG_BT_L2CAP) += l2cap_core.o l2cap_sock.o 12bluetooth-$(CONFIG_BT_L2CAP) += l2cap_core.o l2cap_sock.o smp.o
13bluetooth-$(CONFIG_BT_SCO) += sco.o 13bluetooth-$(CONFIG_BT_SCO) += sco.o
diff --git a/net/bluetooth/cmtp/capi.c b/net/bluetooth/cmtp/capi.c
index 744233cba24..040f67b1297 100644
--- a/net/bluetooth/cmtp/capi.c
+++ b/net/bluetooth/cmtp/capi.c
@@ -326,7 +326,7 @@ void cmtp_recv_capimsg(struct cmtp_session *session, struct sk_buff *skb)
326{ 326{
327 struct capi_ctr *ctrl = &session->ctrl; 327 struct capi_ctr *ctrl = &session->ctrl;
328 struct cmtp_application *application; 328 struct cmtp_application *application;
329 __u16 cmd, appl; 329 __u16 appl;
330 __u32 contr; 330 __u32 contr;
331 331
332 BT_DBG("session %p skb %p len %d", session, skb, skb->len); 332 BT_DBG("session %p skb %p len %d", session, skb, skb->len);
@@ -344,7 +344,6 @@ void cmtp_recv_capimsg(struct cmtp_session *session, struct sk_buff *skb)
344 return; 344 return;
345 } 345 }
346 346
347 cmd = CAPICMD(CAPIMSG_COMMAND(skb->data), CAPIMSG_SUBCOMMAND(skb->data));
348 appl = CAPIMSG_APPID(skb->data); 347 appl = CAPIMSG_APPID(skb->data);
349 contr = CAPIMSG_CONTROL(skb->data); 348 contr = CAPIMSG_CONTROL(skb->data);
350 349
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 3163330cd4f..ea7f031f3b0 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -53,11 +53,13 @@ static void hci_le_connect(struct hci_conn *conn)
53 conn->state = BT_CONNECT; 53 conn->state = BT_CONNECT;
54 conn->out = 1; 54 conn->out = 1;
55 conn->link_mode |= HCI_LM_MASTER; 55 conn->link_mode |= HCI_LM_MASTER;
56 conn->sec_level = BT_SECURITY_LOW;
56 57
57 memset(&cp, 0, sizeof(cp)); 58 memset(&cp, 0, sizeof(cp));
58 cp.scan_interval = cpu_to_le16(0x0004); 59 cp.scan_interval = cpu_to_le16(0x0004);
59 cp.scan_window = cpu_to_le16(0x0004); 60 cp.scan_window = cpu_to_le16(0x0004);
60 bacpy(&cp.peer_addr, &conn->dst); 61 bacpy(&cp.peer_addr, &conn->dst);
62 cp.peer_addr_type = conn->dst_type;
61 cp.conn_interval_min = cpu_to_le16(0x0008); 63 cp.conn_interval_min = cpu_to_le16(0x0008);
62 cp.conn_interval_max = cpu_to_le16(0x0100); 64 cp.conn_interval_max = cpu_to_le16(0x0100);
63 cp.supervision_timeout = cpu_to_le16(0x0064); 65 cp.supervision_timeout = cpu_to_le16(0x0064);
@@ -203,6 +205,55 @@ void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max,
203} 205}
204EXPORT_SYMBOL(hci_le_conn_update); 206EXPORT_SYMBOL(hci_le_conn_update);
205 207
208void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8],
209 __u8 ltk[16])
210{
211 struct hci_dev *hdev = conn->hdev;
212 struct hci_cp_le_start_enc cp;
213
214 BT_DBG("%p", conn);
215
216 memset(&cp, 0, sizeof(cp));
217
218 cp.handle = cpu_to_le16(conn->handle);
219 memcpy(cp.ltk, ltk, sizeof(cp.ltk));
220 cp.ediv = ediv;
221 memcpy(cp.rand, rand, sizeof(rand));
222
223 hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
224}
225EXPORT_SYMBOL(hci_le_start_enc);
226
227void hci_le_ltk_reply(struct hci_conn *conn, u8 ltk[16])
228{
229 struct hci_dev *hdev = conn->hdev;
230 struct hci_cp_le_ltk_reply cp;
231
232 BT_DBG("%p", conn);
233
234 memset(&cp, 0, sizeof(cp));
235
236 cp.handle = cpu_to_le16(conn->handle);
237 memcpy(cp.ltk, ltk, sizeof(ltk));
238
239 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
240}
241EXPORT_SYMBOL(hci_le_ltk_reply);
242
243void hci_le_ltk_neg_reply(struct hci_conn *conn)
244{
245 struct hci_dev *hdev = conn->hdev;
246 struct hci_cp_le_ltk_neg_reply cp;
247
248 BT_DBG("%p", conn);
249
250 memset(&cp, 0, sizeof(cp));
251
252 cp.handle = cpu_to_le16(conn->handle);
253
254 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(cp), &cp);
255}
256
206/* Device _must_ be locked */ 257/* Device _must_ be locked */
207void hci_sco_setup(struct hci_conn *conn, __u8 status) 258void hci_sco_setup(struct hci_conn *conn, __u8 status)
208{ 259{
@@ -393,6 +444,9 @@ int hci_conn_del(struct hci_conn *conn)
393 444
394 hci_dev_put(hdev); 445 hci_dev_put(hdev);
395 446
447 if (conn->handle == 0)
448 kfree(conn);
449
396 return 0; 450 return 0;
397} 451}
398 452
@@ -447,14 +501,23 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8
447 BT_DBG("%s dst %s", hdev->name, batostr(dst)); 501 BT_DBG("%s dst %s", hdev->name, batostr(dst));
448 502
449 if (type == LE_LINK) { 503 if (type == LE_LINK) {
504 struct adv_entry *entry;
505
450 le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst); 506 le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
451 if (le) 507 if (le)
452 return ERR_PTR(-EBUSY); 508 return ERR_PTR(-EBUSY);
509
510 entry = hci_find_adv_entry(hdev, dst);
511 if (!entry)
512 return ERR_PTR(-EHOSTUNREACH);
513
453 le = hci_conn_add(hdev, LE_LINK, dst); 514 le = hci_conn_add(hdev, LE_LINK, dst);
454 if (!le) 515 if (!le)
455 return ERR_PTR(-ENOMEM); 516 return ERR_PTR(-ENOMEM);
456 if (le->state == BT_OPEN) 517
457 hci_le_connect(le); 518 le->dst_type = entry->bdaddr_type;
519
520 hci_le_connect(le);
458 521
459 hci_conn_hold(le); 522 hci_conn_hold(le);
460 523
@@ -497,7 +560,7 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8
497 if (acl->state == BT_CONNECTED && 560 if (acl->state == BT_CONNECTED &&
498 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) { 561 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
499 acl->power_save = 1; 562 acl->power_save = 1;
500 hci_conn_enter_active_mode(acl); 563 hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
501 564
502 if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->pend)) { 565 if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->pend)) {
503 /* defer SCO setup until mode change completed */ 566 /* defer SCO setup until mode change completed */
@@ -548,6 +611,8 @@ static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
548 cp.handle = cpu_to_le16(conn->handle); 611 cp.handle = cpu_to_le16(conn->handle);
549 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED, 612 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
550 sizeof(cp), &cp); 613 sizeof(cp), &cp);
614 if (conn->key_type != 0xff)
615 set_bit(HCI_CONN_REAUTH_PEND, &conn->pend);
551 } 616 }
552 617
553 return 0; 618 return 0;
@@ -608,11 +673,11 @@ int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
608 goto encrypt; 673 goto encrypt;
609 674
610auth: 675auth:
611 if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) 676 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
612 return 0; 677 return 0;
613 678
614 hci_conn_auth(conn, sec_level, auth_type); 679 if (!hci_conn_auth(conn, sec_level, auth_type))
615 return 0; 680 return 0;
616 681
617encrypt: 682encrypt:
618 if (conn->link_mode & HCI_LM_ENCRYPT) 683 if (conn->link_mode & HCI_LM_ENCRYPT)
@@ -631,9 +696,7 @@ int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
631 if (sec_level != BT_SECURITY_HIGH) 696 if (sec_level != BT_SECURITY_HIGH)
632 return 1; /* Accept if non-secure is required */ 697 return 1; /* Accept if non-secure is required */
633 698
634 if (conn->key_type == HCI_LK_AUTH_COMBINATION || 699 if (conn->sec_level == BT_SECURITY_HIGH)
635 (conn->key_type == HCI_LK_COMBINATION &&
636 conn->pin_length == 16))
637 return 1; 700 return 1;
638 701
639 return 0; /* Reject not secure link */ 702 return 0; /* Reject not secure link */
@@ -676,7 +739,7 @@ int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
676EXPORT_SYMBOL(hci_conn_switch_role); 739EXPORT_SYMBOL(hci_conn_switch_role);
677 740
678/* Enter active mode */ 741/* Enter active mode */
679void hci_conn_enter_active_mode(struct hci_conn *conn) 742void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
680{ 743{
681 struct hci_dev *hdev = conn->hdev; 744 struct hci_dev *hdev = conn->hdev;
682 745
@@ -685,7 +748,10 @@ void hci_conn_enter_active_mode(struct hci_conn *conn)
685 if (test_bit(HCI_RAW, &hdev->flags)) 748 if (test_bit(HCI_RAW, &hdev->flags))
686 return; 749 return;
687 750
688 if (conn->mode != HCI_CM_SNIFF || !conn->power_save) 751 if (conn->mode != HCI_CM_SNIFF)
752 goto timer;
753
754 if (!conn->power_save && !force_active)
689 goto timer; 755 goto timer;
690 756
691 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) { 757 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 815269b07f2..ec0bc3f60f2 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -42,6 +42,7 @@
42#include <linux/notifier.h> 42#include <linux/notifier.h>
43#include <linux/rfkill.h> 43#include <linux/rfkill.h>
44#include <linux/timer.h> 44#include <linux/timer.h>
45#include <linux/crypto.h>
45#include <net/sock.h> 46#include <net/sock.h>
46 47
47#include <asm/system.h> 48#include <asm/system.h>
@@ -145,7 +146,7 @@ static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev,
145 146
146 switch (hdev->req_status) { 147 switch (hdev->req_status) {
147 case HCI_REQ_DONE: 148 case HCI_REQ_DONE:
148 err = -bt_err(hdev->req_result); 149 err = -bt_to_errno(hdev->req_result);
149 break; 150 break;
150 151
151 case HCI_REQ_CANCELED: 152 case HCI_REQ_CANCELED:
@@ -539,7 +540,7 @@ int hci_dev_open(__u16 dev)
539 ret = __hci_request(hdev, hci_init_req, 0, 540 ret = __hci_request(hdev, hci_init_req, 0,
540 msecs_to_jiffies(HCI_INIT_TIMEOUT)); 541 msecs_to_jiffies(HCI_INIT_TIMEOUT));
541 542
542 if (lmp_le_capable(hdev)) 543 if (lmp_host_le_capable(hdev))
543 ret = __hci_request(hdev, hci_le_init_req, 0, 544 ret = __hci_request(hdev, hci_le_init_req, 0,
544 msecs_to_jiffies(HCI_INIT_TIMEOUT)); 545 msecs_to_jiffies(HCI_INIT_TIMEOUT));
545 546
@@ -1056,6 +1057,42 @@ static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1056 return 0; 1057 return 0;
1057} 1058}
1058 1059
1060struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1061{
1062 struct link_key *k;
1063
1064 list_for_each_entry(k, &hdev->link_keys, list) {
1065 struct key_master_id *id;
1066
1067 if (k->type != HCI_LK_SMP_LTK)
1068 continue;
1069
1070 if (k->dlen != sizeof(*id))
1071 continue;
1072
1073 id = (void *) &k->data;
1074 if (id->ediv == ediv &&
1075 (memcmp(rand, id->rand, sizeof(id->rand)) == 0))
1076 return k;
1077 }
1078
1079 return NULL;
1080}
1081EXPORT_SYMBOL(hci_find_ltk);
1082
1083struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
1084 bdaddr_t *bdaddr, u8 type)
1085{
1086 struct link_key *k;
1087
1088 list_for_each_entry(k, &hdev->link_keys, list)
1089 if (k->type == type && bacmp(bdaddr, &k->bdaddr) == 0)
1090 return k;
1091
1092 return NULL;
1093}
1094EXPORT_SYMBOL(hci_find_link_key_type);
1095
1059int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key, 1096int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1060 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len) 1097 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1061{ 1098{
@@ -1111,6 +1148,44 @@ int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1111 return 0; 1148 return 0;
1112} 1149}
1113 1150
1151int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
1152 u8 key_size, __le16 ediv, u8 rand[8], u8 ltk[16])
1153{
1154 struct link_key *key, *old_key;
1155 struct key_master_id *id;
1156 u8 old_key_type;
1157
1158 BT_DBG("%s addr %s", hdev->name, batostr(bdaddr));
1159
1160 old_key = hci_find_link_key_type(hdev, bdaddr, HCI_LK_SMP_LTK);
1161 if (old_key) {
1162 key = old_key;
1163 old_key_type = old_key->type;
1164 } else {
1165 key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC);
1166 if (!key)
1167 return -ENOMEM;
1168 list_add(&key->list, &hdev->link_keys);
1169 old_key_type = 0xff;
1170 }
1171
1172 key->dlen = sizeof(*id);
1173
1174 bacpy(&key->bdaddr, bdaddr);
1175 memcpy(key->val, ltk, sizeof(key->val));
1176 key->type = HCI_LK_SMP_LTK;
1177 key->pin_len = key_size;
1178
1179 id = (void *) &key->data;
1180 id->ediv = ediv;
1181 memcpy(id->rand, rand, sizeof(id->rand));
1182
1183 if (new_key)
1184 mgmt_new_key(hdev->id, key, old_key_type);
1185
1186 return 0;
1187}
1188
1114int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr) 1189int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1115{ 1190{
1116 struct link_key *key; 1191 struct link_key *key;
@@ -1202,6 +1277,169 @@ int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1202 return 0; 1277 return 0;
1203} 1278}
1204 1279
1280struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1281 bdaddr_t *bdaddr)
1282{
1283 struct list_head *p;
1284
1285 list_for_each(p, &hdev->blacklist) {
1286 struct bdaddr_list *b;
1287
1288 b = list_entry(p, struct bdaddr_list, list);
1289
1290 if (bacmp(bdaddr, &b->bdaddr) == 0)
1291 return b;
1292 }
1293
1294 return NULL;
1295}
1296
1297int hci_blacklist_clear(struct hci_dev *hdev)
1298{
1299 struct list_head *p, *n;
1300
1301 list_for_each_safe(p, n, &hdev->blacklist) {
1302 struct bdaddr_list *b;
1303
1304 b = list_entry(p, struct bdaddr_list, list);
1305
1306 list_del(p);
1307 kfree(b);
1308 }
1309
1310 return 0;
1311}
1312
1313int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
1314{
1315 struct bdaddr_list *entry;
1316 int err;
1317
1318 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1319 return -EBADF;
1320
1321 hci_dev_lock_bh(hdev);
1322
1323 if (hci_blacklist_lookup(hdev, bdaddr)) {
1324 err = -EEXIST;
1325 goto err;
1326 }
1327
1328 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1329 if (!entry) {
1330 return -ENOMEM;
1331 goto err;
1332 }
1333
1334 bacpy(&entry->bdaddr, bdaddr);
1335
1336 list_add(&entry->list, &hdev->blacklist);
1337
1338 err = 0;
1339
1340err:
1341 hci_dev_unlock_bh(hdev);
1342 return err;
1343}
1344
1345int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
1346{
1347 struct bdaddr_list *entry;
1348 int err = 0;
1349
1350 hci_dev_lock_bh(hdev);
1351
1352 if (bacmp(bdaddr, BDADDR_ANY) == 0) {
1353 hci_blacklist_clear(hdev);
1354 goto done;
1355 }
1356
1357 entry = hci_blacklist_lookup(hdev, bdaddr);
1358 if (!entry) {
1359 err = -ENOENT;
1360 goto done;
1361 }
1362
1363 list_del(&entry->list);
1364 kfree(entry);
1365
1366done:
1367 hci_dev_unlock_bh(hdev);
1368 return err;
1369}
1370
1371static void hci_clear_adv_cache(unsigned long arg)
1372{
1373 struct hci_dev *hdev = (void *) arg;
1374
1375 hci_dev_lock(hdev);
1376
1377 hci_adv_entries_clear(hdev);
1378
1379 hci_dev_unlock(hdev);
1380}
1381
1382int hci_adv_entries_clear(struct hci_dev *hdev)
1383{
1384 struct adv_entry *entry, *tmp;
1385
1386 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1387 list_del(&entry->list);
1388 kfree(entry);
1389 }
1390
1391 BT_DBG("%s adv cache cleared", hdev->name);
1392
1393 return 0;
1394}
1395
1396struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1397{
1398 struct adv_entry *entry;
1399
1400 list_for_each_entry(entry, &hdev->adv_entries, list)
1401 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1402 return entry;
1403
1404 return NULL;
1405}
1406
1407static inline int is_connectable_adv(u8 evt_type)
1408{
1409 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1410 return 1;
1411
1412 return 0;
1413}
1414
1415int hci_add_adv_entry(struct hci_dev *hdev,
1416 struct hci_ev_le_advertising_info *ev)
1417{
1418 struct adv_entry *entry;
1419
1420 if (!is_connectable_adv(ev->evt_type))
1421 return -EINVAL;
1422
1423 /* Only new entries should be added to adv_entries. So, if
1424 * bdaddr was found, don't add it. */
1425 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1426 return 0;
1427
1428 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1429 if (!entry)
1430 return -ENOMEM;
1431
1432 bacpy(&entry->bdaddr, &ev->bdaddr);
1433 entry->bdaddr_type = ev->bdaddr_type;
1434
1435 list_add(&entry->list, &hdev->adv_entries);
1436
1437 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1438 batostr(&entry->bdaddr), entry->bdaddr_type);
1439
1440 return 0;
1441}
1442
1205/* Register HCI device */ 1443/* Register HCI device */
1206int hci_register_dev(struct hci_dev *hdev) 1444int hci_register_dev(struct hci_dev *hdev)
1207{ 1445{
@@ -1268,6 +1506,10 @@ int hci_register_dev(struct hci_dev *hdev)
1268 1506
1269 INIT_LIST_HEAD(&hdev->remote_oob_data); 1507 INIT_LIST_HEAD(&hdev->remote_oob_data);
1270 1508
1509 INIT_LIST_HEAD(&hdev->adv_entries);
1510 setup_timer(&hdev->adv_timer, hci_clear_adv_cache,
1511 (unsigned long) hdev);
1512
1271 INIT_WORK(&hdev->power_on, hci_power_on); 1513 INIT_WORK(&hdev->power_on, hci_power_on);
1272 INIT_WORK(&hdev->power_off, hci_power_off); 1514 INIT_WORK(&hdev->power_off, hci_power_off);
1273 setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev); 1515 setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
@@ -1282,6 +1524,11 @@ int hci_register_dev(struct hci_dev *hdev)
1282 if (!hdev->workqueue) 1524 if (!hdev->workqueue)
1283 goto nomem; 1525 goto nomem;
1284 1526
1527 hdev->tfm = crypto_alloc_blkcipher("ecb(aes)", 0, CRYPTO_ALG_ASYNC);
1528 if (IS_ERR(hdev->tfm))
1529 BT_INFO("Failed to load transform for ecb(aes): %ld",
1530 PTR_ERR(hdev->tfm));
1531
1285 hci_register_sysfs(hdev); 1532 hci_register_sysfs(hdev);
1286 1533
1287 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev, 1534 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
@@ -1330,6 +1577,9 @@ int hci_unregister_dev(struct hci_dev *hdev)
1330 !test_bit(HCI_SETUP, &hdev->flags)) 1577 !test_bit(HCI_SETUP, &hdev->flags))
1331 mgmt_index_removed(hdev->id); 1578 mgmt_index_removed(hdev->id);
1332 1579
1580 if (!IS_ERR(hdev->tfm))
1581 crypto_free_blkcipher(hdev->tfm);
1582
1333 hci_notify(hdev, HCI_DEV_UNREG); 1583 hci_notify(hdev, HCI_DEV_UNREG);
1334 1584
1335 if (hdev->rfkill) { 1585 if (hdev->rfkill) {
@@ -1340,6 +1590,7 @@ int hci_unregister_dev(struct hci_dev *hdev)
1340 hci_unregister_sysfs(hdev); 1590 hci_unregister_sysfs(hdev);
1341 1591
1342 hci_del_off_timer(hdev); 1592 hci_del_off_timer(hdev);
1593 del_timer(&hdev->adv_timer);
1343 1594
1344 destroy_workqueue(hdev->workqueue); 1595 destroy_workqueue(hdev->workqueue);
1345 1596
@@ -1348,6 +1599,7 @@ int hci_unregister_dev(struct hci_dev *hdev)
1348 hci_uuids_clear(hdev); 1599 hci_uuids_clear(hdev);
1349 hci_link_keys_clear(hdev); 1600 hci_link_keys_clear(hdev);
1350 hci_remote_oob_data_clear(hdev); 1601 hci_remote_oob_data_clear(hdev);
1602 hci_adv_entries_clear(hdev);
1351 hci_dev_unlock_bh(hdev); 1603 hci_dev_unlock_bh(hdev);
1352 1604
1353 __hci_dev_put(hdev); 1605 __hci_dev_put(hdev);
@@ -1519,7 +1771,7 @@ int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1519 1771
1520 data += (count - rem); 1772 data += (count - rem);
1521 count = rem; 1773 count = rem;
1522 }; 1774 }
1523 1775
1524 return rem; 1776 return rem;
1525} 1777}
@@ -1554,7 +1806,7 @@ int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1554 1806
1555 data += (count - rem); 1807 data += (count - rem);
1556 count = rem; 1808 count = rem;
1557 }; 1809 }
1558 1810
1559 return rem; 1811 return rem;
1560} 1812}
@@ -1891,7 +2143,7 @@ static inline void hci_sched_acl(struct hci_dev *hdev)
1891 while (quote-- && (skb = skb_dequeue(&conn->data_q))) { 2143 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1892 BT_DBG("skb %p len %d", skb, skb->len); 2144 BT_DBG("skb %p len %d", skb, skb->len);
1893 2145
1894 hci_conn_enter_active_mode(conn); 2146 hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
1895 2147
1896 hci_send_frame(skb); 2148 hci_send_frame(skb);
1897 hdev->acl_last_tx = jiffies; 2149 hdev->acl_last_tx = jiffies;
@@ -2030,7 +2282,7 @@ static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2030 if (conn) { 2282 if (conn) {
2031 register struct hci_proto *hp; 2283 register struct hci_proto *hp;
2032 2284
2033 hci_conn_enter_active_mode(conn); 2285 hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
2034 2286
2035 /* Send to upper protocol */ 2287 /* Send to upper protocol */
2036 hp = hci_proto[HCI_PROTO_L2CAP]; 2288 hp = hci_proto[HCI_PROTO_L2CAP];
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 77930aa522e..a40170e022e 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -45,6 +45,8 @@
45#include <net/bluetooth/bluetooth.h> 45#include <net/bluetooth/bluetooth.h>
46#include <net/bluetooth/hci_core.h> 46#include <net/bluetooth/hci_core.h>
47 47
48static int enable_le;
49
48/* Handle HCI Event packets */ 50/* Handle HCI Event packets */
49 51
50static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb) 52static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
@@ -525,6 +527,20 @@ static void hci_setup_event_mask(struct hci_dev *hdev)
525 hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events); 527 hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
526} 528}
527 529
530static void hci_set_le_support(struct hci_dev *hdev)
531{
532 struct hci_cp_write_le_host_supported cp;
533
534 memset(&cp, 0, sizeof(cp));
535
536 if (enable_le) {
537 cp.le = 1;
538 cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR);
539 }
540
541 hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp), &cp);
542}
543
528static void hci_setup(struct hci_dev *hdev) 544static void hci_setup(struct hci_dev *hdev)
529{ 545{
530 hci_setup_event_mask(hdev); 546 hci_setup_event_mask(hdev);
@@ -542,6 +558,17 @@ static void hci_setup(struct hci_dev *hdev)
542 558
543 if (hdev->features[7] & LMP_INQ_TX_PWR) 559 if (hdev->features[7] & LMP_INQ_TX_PWR)
544 hci_send_cmd(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL); 560 hci_send_cmd(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
561
562 if (hdev->features[7] & LMP_EXTFEATURES) {
563 struct hci_cp_read_local_ext_features cp;
564
565 cp.page = 0x01;
566 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES,
567 sizeof(cp), &cp);
568 }
569
570 if (hdev->features[4] & LMP_LE)
571 hci_set_le_support(hdev);
545} 572}
546 573
547static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb) 574static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
@@ -658,6 +685,21 @@ static void hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb
658 hdev->features[6], hdev->features[7]); 685 hdev->features[6], hdev->features[7]);
659} 686}
660 687
688static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
689 struct sk_buff *skb)
690{
691 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
692
693 BT_DBG("%s status 0x%x", hdev->name, rp->status);
694
695 if (rp->status)
696 return;
697
698 memcpy(hdev->extfeatures, rp->features, 8);
699
700 hci_req_complete(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, rp->status);
701}
702
661static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb) 703static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
662{ 704{
663 struct hci_rp_read_buffer_size *rp = (void *) skb->data; 705 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
@@ -841,6 +883,72 @@ static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
841 rp->randomizer, rp->status); 883 rp->randomizer, rp->status);
842} 884}
843 885
886static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
887 struct sk_buff *skb)
888{
889 struct hci_cp_le_set_scan_enable *cp;
890 __u8 status = *((__u8 *) skb->data);
891
892 BT_DBG("%s status 0x%x", hdev->name, status);
893
894 if (status)
895 return;
896
897 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
898 if (!cp)
899 return;
900
901 hci_dev_lock(hdev);
902
903 if (cp->enable == 0x01) {
904 del_timer(&hdev->adv_timer);
905 hci_adv_entries_clear(hdev);
906 } else if (cp->enable == 0x00) {
907 mod_timer(&hdev->adv_timer, jiffies + ADV_CLEAR_TIMEOUT);
908 }
909
910 hci_dev_unlock(hdev);
911}
912
913static void hci_cc_le_ltk_reply(struct hci_dev *hdev, struct sk_buff *skb)
914{
915 struct hci_rp_le_ltk_reply *rp = (void *) skb->data;
916
917 BT_DBG("%s status 0x%x", hdev->name, rp->status);
918
919 if (rp->status)
920 return;
921
922 hci_req_complete(hdev, HCI_OP_LE_LTK_REPLY, rp->status);
923}
924
925static void hci_cc_le_ltk_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
926{
927 struct hci_rp_le_ltk_neg_reply *rp = (void *) skb->data;
928
929 BT_DBG("%s status 0x%x", hdev->name, rp->status);
930
931 if (rp->status)
932 return;
933
934 hci_req_complete(hdev, HCI_OP_LE_LTK_NEG_REPLY, rp->status);
935}
936
937static inline void hci_cc_write_le_host_supported(struct hci_dev *hdev,
938 struct sk_buff *skb)
939{
940 struct hci_cp_read_local_ext_features cp;
941 __u8 status = *((__u8 *) skb->data);
942
943 BT_DBG("%s status 0x%x", hdev->name, status);
944
945 if (status)
946 return;
947
948 cp.page = 0x01;
949 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, sizeof(cp), &cp);
950}
951
844static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status) 952static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
845{ 953{
846 BT_DBG("%s status 0x%x", hdev->name, status); 954 BT_DBG("%s status 0x%x", hdev->name, status);
@@ -1209,16 +1317,23 @@ static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
1209 } else { 1317 } else {
1210 if (!conn) { 1318 if (!conn) {
1211 conn = hci_conn_add(hdev, LE_LINK, &cp->peer_addr); 1319 conn = hci_conn_add(hdev, LE_LINK, &cp->peer_addr);
1212 if (conn) 1320 if (conn) {
1321 conn->dst_type = cp->peer_addr_type;
1213 conn->out = 1; 1322 conn->out = 1;
1214 else 1323 } else {
1215 BT_ERR("No memory for new connection"); 1324 BT_ERR("No memory for new connection");
1325 }
1216 } 1326 }
1217 } 1327 }
1218 1328
1219 hci_dev_unlock(hdev); 1329 hci_dev_unlock(hdev);
1220} 1330}
1221 1331
1332static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1333{
1334 BT_DBG("%s status 0x%x", hdev->name, status);
1335}
1336
1222static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 1337static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1223{ 1338{
1224 __u8 status = *((__u8 *) skb->data); 1339 __u8 status = *((__u8 *) skb->data);
@@ -1462,51 +1577,58 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s
1462 hci_dev_lock(hdev); 1577 hci_dev_lock(hdev);
1463 1578
1464 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); 1579 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1465 if (conn) { 1580 if (!conn)
1466 if (!ev->status) { 1581 goto unlock;
1582
1583 if (!ev->status) {
1584 if (!(conn->ssp_mode > 0 && hdev->ssp_mode > 0) &&
1585 test_bit(HCI_CONN_REAUTH_PEND, &conn->pend)) {
1586 BT_INFO("re-auth of legacy device is not possible.");
1587 } else {
1467 conn->link_mode |= HCI_LM_AUTH; 1588 conn->link_mode |= HCI_LM_AUTH;
1468 conn->sec_level = conn->pending_sec_level; 1589 conn->sec_level = conn->pending_sec_level;
1469 } else {
1470 mgmt_auth_failed(hdev->id, &conn->dst, ev->status);
1471 } 1590 }
1591 } else {
1592 mgmt_auth_failed(hdev->id, &conn->dst, ev->status);
1593 }
1472 1594
1473 clear_bit(HCI_CONN_AUTH_PEND, &conn->pend); 1595 clear_bit(HCI_CONN_AUTH_PEND, &conn->pend);
1596 clear_bit(HCI_CONN_REAUTH_PEND, &conn->pend);
1474 1597
1475 if (conn->state == BT_CONFIG) { 1598 if (conn->state == BT_CONFIG) {
1476 if (!ev->status && hdev->ssp_mode > 0 && 1599 if (!ev->status && hdev->ssp_mode > 0 && conn->ssp_mode > 0) {
1477 conn->ssp_mode > 0) { 1600 struct hci_cp_set_conn_encrypt cp;
1478 struct hci_cp_set_conn_encrypt cp; 1601 cp.handle = ev->handle;
1479 cp.handle = ev->handle; 1602 cp.encrypt = 0x01;
1480 cp.encrypt = 0x01; 1603 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1481 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, 1604 &cp);
1482 sizeof(cp), &cp);
1483 } else {
1484 conn->state = BT_CONNECTED;
1485 hci_proto_connect_cfm(conn, ev->status);
1486 hci_conn_put(conn);
1487 }
1488 } else { 1605 } else {
1489 hci_auth_cfm(conn, ev->status); 1606 conn->state = BT_CONNECTED;
1490 1607 hci_proto_connect_cfm(conn, ev->status);
1491 hci_conn_hold(conn);
1492 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1493 hci_conn_put(conn); 1608 hci_conn_put(conn);
1494 } 1609 }
1610 } else {
1611 hci_auth_cfm(conn, ev->status);
1495 1612
1496 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) { 1613 hci_conn_hold(conn);
1497 if (!ev->status) { 1614 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1498 struct hci_cp_set_conn_encrypt cp; 1615 hci_conn_put(conn);
1499 cp.handle = ev->handle; 1616 }
1500 cp.encrypt = 0x01; 1617
1501 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, 1618 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) {
1502 sizeof(cp), &cp); 1619 if (!ev->status) {
1503 } else { 1620 struct hci_cp_set_conn_encrypt cp;
1504 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend); 1621 cp.handle = ev->handle;
1505 hci_encrypt_cfm(conn, ev->status, 0x00); 1622 cp.encrypt = 0x01;
1506 } 1623 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1624 &cp);
1625 } else {
1626 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend);
1627 hci_encrypt_cfm(conn, ev->status, 0x00);
1507 } 1628 }
1508 } 1629 }
1509 1630
1631unlock:
1510 hci_dev_unlock(hdev); 1632 hci_dev_unlock(hdev);
1511} 1633}
1512 1634
@@ -1557,6 +1679,7 @@ static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *
1557 /* Encryption implies authentication */ 1679 /* Encryption implies authentication */
1558 conn->link_mode |= HCI_LM_AUTH; 1680 conn->link_mode |= HCI_LM_AUTH;
1559 conn->link_mode |= HCI_LM_ENCRYPT; 1681 conn->link_mode |= HCI_LM_ENCRYPT;
1682 conn->sec_level = conn->pending_sec_level;
1560 } else 1683 } else
1561 conn->link_mode &= ~HCI_LM_ENCRYPT; 1684 conn->link_mode &= ~HCI_LM_ENCRYPT;
1562 } 1685 }
@@ -1760,6 +1883,10 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk
1760 hci_cc_read_local_features(hdev, skb); 1883 hci_cc_read_local_features(hdev, skb);
1761 break; 1884 break;
1762 1885
1886 case HCI_OP_READ_LOCAL_EXT_FEATURES:
1887 hci_cc_read_local_ext_features(hdev, skb);
1888 break;
1889
1763 case HCI_OP_READ_BUFFER_SIZE: 1890 case HCI_OP_READ_BUFFER_SIZE:
1764 hci_cc_read_buffer_size(hdev, skb); 1891 hci_cc_read_buffer_size(hdev, skb);
1765 break; 1892 break;
@@ -1816,6 +1943,22 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk
1816 hci_cc_user_confirm_neg_reply(hdev, skb); 1943 hci_cc_user_confirm_neg_reply(hdev, skb);
1817 break; 1944 break;
1818 1945
1946 case HCI_OP_LE_SET_SCAN_ENABLE:
1947 hci_cc_le_set_scan_enable(hdev, skb);
1948 break;
1949
1950 case HCI_OP_LE_LTK_REPLY:
1951 hci_cc_le_ltk_reply(hdev, skb);
1952 break;
1953
1954 case HCI_OP_LE_LTK_NEG_REPLY:
1955 hci_cc_le_ltk_neg_reply(hdev, skb);
1956 break;
1957
1958 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
1959 hci_cc_write_le_host_supported(hdev, skb);
1960 break;
1961
1819 default: 1962 default:
1820 BT_DBG("%s opcode 0x%x", hdev->name, opcode); 1963 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1821 break; 1964 break;
@@ -1894,6 +2037,10 @@ static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
1894 hci_cs_le_create_conn(hdev, ev->status); 2037 hci_cs_le_create_conn(hdev, ev->status);
1895 break; 2038 break;
1896 2039
2040 case HCI_OP_LE_START_ENC:
2041 hci_cs_le_start_enc(hdev, ev->status);
2042 break;
2043
1897 default: 2044 default:
1898 BT_DBG("%s opcode 0x%x", hdev->name, opcode); 2045 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1899 break; 2046 break;
@@ -2658,6 +2805,8 @@ static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff
2658 hci_dev_unlock(hdev); 2805 hci_dev_unlock(hdev);
2659 return; 2806 return;
2660 } 2807 }
2808
2809 conn->dst_type = ev->bdaddr_type;
2661 } 2810 }
2662 2811
2663 if (ev->status) { 2812 if (ev->status) {
@@ -2670,6 +2819,7 @@ static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff
2670 2819
2671 mgmt_connected(hdev->id, &ev->bdaddr); 2820 mgmt_connected(hdev->id, &ev->bdaddr);
2672 2821
2822 conn->sec_level = BT_SECURITY_LOW;
2673 conn->handle = __le16_to_cpu(ev->handle); 2823 conn->handle = __le16_to_cpu(ev->handle);
2674 conn->state = BT_CONNECTED; 2824 conn->state = BT_CONNECTED;
2675 2825
@@ -2682,6 +2832,64 @@ unlock:
2682 hci_dev_unlock(hdev); 2832 hci_dev_unlock(hdev);
2683} 2833}
2684 2834
2835static inline void hci_le_adv_report_evt(struct hci_dev *hdev,
2836 struct sk_buff *skb)
2837{
2838 struct hci_ev_le_advertising_info *ev;
2839 u8 num_reports;
2840
2841 num_reports = skb->data[0];
2842 ev = (void *) &skb->data[1];
2843
2844 hci_dev_lock(hdev);
2845
2846 hci_add_adv_entry(hdev, ev);
2847
2848 while (--num_reports) {
2849 ev = (void *) (ev->data + ev->length + 1);
2850 hci_add_adv_entry(hdev, ev);
2851 }
2852
2853 hci_dev_unlock(hdev);
2854}
2855
2856static inline void hci_le_ltk_request_evt(struct hci_dev *hdev,
2857 struct sk_buff *skb)
2858{
2859 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
2860 struct hci_cp_le_ltk_reply cp;
2861 struct hci_cp_le_ltk_neg_reply neg;
2862 struct hci_conn *conn;
2863 struct link_key *ltk;
2864
2865 BT_DBG("%s handle %d", hdev->name, cpu_to_le16(ev->handle));
2866
2867 hci_dev_lock(hdev);
2868
2869 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2870 if (conn == NULL)
2871 goto not_found;
2872
2873 ltk = hci_find_ltk(hdev, ev->ediv, ev->random);
2874 if (ltk == NULL)
2875 goto not_found;
2876
2877 memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
2878 cp.handle = cpu_to_le16(conn->handle);
2879 conn->pin_length = ltk->pin_len;
2880
2881 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
2882
2883 hci_dev_unlock(hdev);
2884
2885 return;
2886
2887not_found:
2888 neg.handle = ev->handle;
2889 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
2890 hci_dev_unlock(hdev);
2891}
2892
2685static inline void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb) 2893static inline void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
2686{ 2894{
2687 struct hci_ev_le_meta *le_ev = (void *) skb->data; 2895 struct hci_ev_le_meta *le_ev = (void *) skb->data;
@@ -2693,6 +2901,14 @@ static inline void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
2693 hci_le_conn_complete_evt(hdev, skb); 2901 hci_le_conn_complete_evt(hdev, skb);
2694 break; 2902 break;
2695 2903
2904 case HCI_EV_LE_ADVERTISING_REPORT:
2905 hci_le_adv_report_evt(hdev, skb);
2906 break;
2907
2908 case HCI_EV_LE_LTK_REQ:
2909 hci_le_ltk_request_evt(hdev, skb);
2910 break;
2911
2696 default: 2912 default:
2697 break; 2913 break;
2698 } 2914 }
@@ -2886,3 +3102,6 @@ void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
2886 hci_send_to_sock(hdev, skb, NULL); 3102 hci_send_to_sock(hdev, skb, NULL);
2887 kfree_skb(skb); 3103 kfree_skb(skb);
2888} 3104}
3105
3106module_param(enable_le, bool, 0444);
3107MODULE_PARM_DESC(enable_le, "Enable LE support");
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 295e4a88fff..ff02cf5e77c 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -180,82 +180,24 @@ static int hci_sock_release(struct socket *sock)
180 return 0; 180 return 0;
181} 181}
182 182
183struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr) 183static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
184{
185 struct list_head *p;
186
187 list_for_each(p, &hdev->blacklist) {
188 struct bdaddr_list *b;
189
190 b = list_entry(p, struct bdaddr_list, list);
191
192 if (bacmp(bdaddr, &b->bdaddr) == 0)
193 return b;
194 }
195
196 return NULL;
197}
198
199static int hci_blacklist_add(struct hci_dev *hdev, void __user *arg)
200{ 184{
201 bdaddr_t bdaddr; 185 bdaddr_t bdaddr;
202 struct bdaddr_list *entry;
203 186
204 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr))) 187 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
205 return -EFAULT; 188 return -EFAULT;
206 189
207 if (bacmp(&bdaddr, BDADDR_ANY) == 0) 190 return hci_blacklist_add(hdev, &bdaddr);
208 return -EBADF;
209
210 if (hci_blacklist_lookup(hdev, &bdaddr))
211 return -EEXIST;
212
213 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
214 if (!entry)
215 return -ENOMEM;
216
217 bacpy(&entry->bdaddr, &bdaddr);
218
219 list_add(&entry->list, &hdev->blacklist);
220
221 return 0;
222}
223
224int hci_blacklist_clear(struct hci_dev *hdev)
225{
226 struct list_head *p, *n;
227
228 list_for_each_safe(p, n, &hdev->blacklist) {
229 struct bdaddr_list *b;
230
231 b = list_entry(p, struct bdaddr_list, list);
232
233 list_del(p);
234 kfree(b);
235 }
236
237 return 0;
238} 191}
239 192
240static int hci_blacklist_del(struct hci_dev *hdev, void __user *arg) 193static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
241{ 194{
242 bdaddr_t bdaddr; 195 bdaddr_t bdaddr;
243 struct bdaddr_list *entry;
244 196
245 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr))) 197 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
246 return -EFAULT; 198 return -EFAULT;
247 199
248 if (bacmp(&bdaddr, BDADDR_ANY) == 0) 200 return hci_blacklist_del(hdev, &bdaddr);
249 return hci_blacklist_clear(hdev);
250
251 entry = hci_blacklist_lookup(hdev, &bdaddr);
252 if (!entry)
253 return -ENOENT;
254
255 list_del(&entry->list);
256 kfree(entry);
257
258 return 0;
259} 201}
260 202
261/* Ioctls that require bound socket */ 203/* Ioctls that require bound socket */
@@ -290,12 +232,12 @@ static inline int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd, unsign
290 case HCIBLOCKADDR: 232 case HCIBLOCKADDR:
291 if (!capable(CAP_NET_ADMIN)) 233 if (!capable(CAP_NET_ADMIN))
292 return -EACCES; 234 return -EACCES;
293 return hci_blacklist_add(hdev, (void __user *) arg); 235 return hci_sock_blacklist_add(hdev, (void __user *) arg);
294 236
295 case HCIUNBLOCKADDR: 237 case HCIUNBLOCKADDR:
296 if (!capable(CAP_NET_ADMIN)) 238 if (!capable(CAP_NET_ADMIN))
297 return -EACCES; 239 return -EACCES;
298 return hci_blacklist_del(hdev, (void __user *) arg); 240 return hci_sock_blacklist_del(hdev, (void __user *) arg);
299 241
300 default: 242 default:
301 if (hdev->ioctl) 243 if (hdev->ioctl)
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index c405a954a60..43b4c2deb7c 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -464,7 +464,8 @@ static void hidp_idle_timeout(unsigned long arg)
464{ 464{
465 struct hidp_session *session = (struct hidp_session *) arg; 465 struct hidp_session *session = (struct hidp_session *) arg;
466 466
467 kthread_stop(session->task); 467 atomic_inc(&session->terminate);
468 wake_up_process(session->task);
468} 469}
469 470
470static void hidp_set_timer(struct hidp_session *session) 471static void hidp_set_timer(struct hidp_session *session)
@@ -535,7 +536,8 @@ static void hidp_process_hid_control(struct hidp_session *session,
535 skb_queue_purge(&session->ctrl_transmit); 536 skb_queue_purge(&session->ctrl_transmit);
536 skb_queue_purge(&session->intr_transmit); 537 skb_queue_purge(&session->intr_transmit);
537 538
538 kthread_stop(session->task); 539 atomic_inc(&session->terminate);
540 wake_up_process(current);
539 } 541 }
540} 542}
541 543
@@ -706,9 +708,8 @@ static int hidp_session(void *arg)
706 add_wait_queue(sk_sleep(intr_sk), &intr_wait); 708 add_wait_queue(sk_sleep(intr_sk), &intr_wait);
707 session->waiting_for_startup = 0; 709 session->waiting_for_startup = 0;
708 wake_up_interruptible(&session->startup_queue); 710 wake_up_interruptible(&session->startup_queue);
709 while (!kthread_should_stop()) { 711 set_current_state(TASK_INTERRUPTIBLE);
710 set_current_state(TASK_INTERRUPTIBLE); 712 while (!atomic_read(&session->terminate)) {
711
712 if (ctrl_sk->sk_state != BT_CONNECTED || 713 if (ctrl_sk->sk_state != BT_CONNECTED ||
713 intr_sk->sk_state != BT_CONNECTED) 714 intr_sk->sk_state != BT_CONNECTED)
714 break; 715 break;
@@ -726,6 +727,7 @@ static int hidp_session(void *arg)
726 hidp_process_transmit(session); 727 hidp_process_transmit(session);
727 728
728 schedule(); 729 schedule();
730 set_current_state(TASK_INTERRUPTIBLE);
729 } 731 }
730 set_current_state(TASK_RUNNING); 732 set_current_state(TASK_RUNNING);
731 remove_wait_queue(sk_sleep(intr_sk), &intr_wait); 733 remove_wait_queue(sk_sleep(intr_sk), &intr_wait);
@@ -1060,7 +1062,8 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock,
1060err_add_device: 1062err_add_device:
1061 hid_destroy_device(session->hid); 1063 hid_destroy_device(session->hid);
1062 session->hid = NULL; 1064 session->hid = NULL;
1063 kthread_stop(session->task); 1065 atomic_inc(&session->terminate);
1066 wake_up_process(session->task);
1064 1067
1065unlink: 1068unlink:
1066 hidp_del_timer(session); 1069 hidp_del_timer(session);
@@ -1111,7 +1114,8 @@ int hidp_del_connection(struct hidp_conndel_req *req)
1111 skb_queue_purge(&session->ctrl_transmit); 1114 skb_queue_purge(&session->ctrl_transmit);
1112 skb_queue_purge(&session->intr_transmit); 1115 skb_queue_purge(&session->intr_transmit);
1113 1116
1114 kthread_stop(session->task); 1117 atomic_inc(&session->terminate);
1118 wake_up_process(session->task);
1115 } 1119 }
1116 } else 1120 } else
1117 err = -ENOENT; 1121 err = -ENOENT;
diff --git a/net/bluetooth/hidp/hidp.h b/net/bluetooth/hidp/hidp.h
index 19e95004b28..af1bcc823f2 100644
--- a/net/bluetooth/hidp/hidp.h
+++ b/net/bluetooth/hidp/hidp.h
@@ -142,6 +142,7 @@ struct hidp_session {
142 uint ctrl_mtu; 142 uint ctrl_mtu;
143 uint intr_mtu; 143 uint intr_mtu;
144 144
145 atomic_t terminate;
145 struct task_struct *task; 146 struct task_struct *task;
146 147
147 unsigned char keys[8]; 148 unsigned char keys[8];
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index e64a1c2df23..3204ba8a701 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -54,26 +54,39 @@
54#include <net/bluetooth/bluetooth.h> 54#include <net/bluetooth/bluetooth.h>
55#include <net/bluetooth/hci_core.h> 55#include <net/bluetooth/hci_core.h>
56#include <net/bluetooth/l2cap.h> 56#include <net/bluetooth/l2cap.h>
57#include <net/bluetooth/smp.h>
57 58
58int disable_ertm; 59int disable_ertm;
59 60
60static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN; 61static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
61static u8 l2cap_fixed_chan[8] = { 0x02, }; 62static u8 l2cap_fixed_chan[8] = { 0x02, };
62 63
63static struct workqueue_struct *_busy_wq; 64static LIST_HEAD(chan_list);
64 65static DEFINE_RWLOCK(chan_list_lock);
65LIST_HEAD(chan_list);
66DEFINE_RWLOCK(chan_list_lock);
67
68static void l2cap_busy_work(struct work_struct *work);
69 66
70static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, 67static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
71 u8 code, u8 ident, u16 dlen, void *data); 68 u8 code, u8 ident, u16 dlen, void *data);
69static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
70 void *data);
72static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data); 71static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
72static void l2cap_send_disconn_req(struct l2cap_conn *conn,
73 struct l2cap_chan *chan, int err);
73 74
74static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb); 75static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
75 76
76/* ---- L2CAP channels ---- */ 77/* ---- L2CAP channels ---- */
78
79static inline void chan_hold(struct l2cap_chan *c)
80{
81 atomic_inc(&c->refcnt);
82}
83
84static inline void chan_put(struct l2cap_chan *c)
85{
86 if (atomic_dec_and_test(&c->refcnt))
87 kfree(c);
88}
89
77static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid) 90static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
78{ 91{
79 struct l2cap_chan *c; 92 struct l2cap_chan *c;
@@ -204,6 +217,62 @@ static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
204 return 0; 217 return 0;
205} 218}
206 219
220static void l2cap_set_timer(struct l2cap_chan *chan, struct timer_list *timer, long timeout)
221{
222 BT_DBG("chan %p state %d timeout %ld", chan->sk, chan->state, timeout);
223
224 if (!mod_timer(timer, jiffies + msecs_to_jiffies(timeout)))
225 chan_hold(chan);
226}
227
228static void l2cap_clear_timer(struct l2cap_chan *chan, struct timer_list *timer)
229{
230 BT_DBG("chan %p state %d", chan, chan->state);
231
232 if (timer_pending(timer) && del_timer(timer))
233 chan_put(chan);
234}
235
236static void l2cap_state_change(struct l2cap_chan *chan, int state)
237{
238 chan->state = state;
239 chan->ops->state_change(chan->data, state);
240}
241
242static void l2cap_chan_timeout(unsigned long arg)
243{
244 struct l2cap_chan *chan = (struct l2cap_chan *) arg;
245 struct sock *sk = chan->sk;
246 int reason;
247
248 BT_DBG("chan %p state %d", chan, chan->state);
249
250 bh_lock_sock(sk);
251
252 if (sock_owned_by_user(sk)) {
253 /* sk is owned by user. Try again later */
254 __set_chan_timer(chan, HZ / 5);
255 bh_unlock_sock(sk);
256 chan_put(chan);
257 return;
258 }
259
260 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
261 reason = ECONNREFUSED;
262 else if (chan->state == BT_CONNECT &&
263 chan->sec_level != BT_SECURITY_SDP)
264 reason = ECONNREFUSED;
265 else
266 reason = ETIMEDOUT;
267
268 l2cap_chan_close(chan, reason);
269
270 bh_unlock_sock(sk);
271
272 chan->ops->close(chan->data);
273 chan_put(chan);
274}
275
207struct l2cap_chan *l2cap_chan_create(struct sock *sk) 276struct l2cap_chan *l2cap_chan_create(struct sock *sk)
208{ 277{
209 struct l2cap_chan *chan; 278 struct l2cap_chan *chan;
@@ -218,6 +287,12 @@ struct l2cap_chan *l2cap_chan_create(struct sock *sk)
218 list_add(&chan->global_l, &chan_list); 287 list_add(&chan->global_l, &chan_list);
219 write_unlock_bh(&chan_list_lock); 288 write_unlock_bh(&chan_list_lock);
220 289
290 setup_timer(&chan->chan_timer, l2cap_chan_timeout, (unsigned long) chan);
291
292 chan->state = BT_OPEN;
293
294 atomic_set(&chan->refcnt, 1);
295
221 return chan; 296 return chan;
222} 297}
223 298
@@ -227,13 +302,11 @@ void l2cap_chan_destroy(struct l2cap_chan *chan)
227 list_del(&chan->global_l); 302 list_del(&chan->global_l);
228 write_unlock_bh(&chan_list_lock); 303 write_unlock_bh(&chan_list_lock);
229 304
230 kfree(chan); 305 chan_put(chan);
231} 306}
232 307
233static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) 308static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
234{ 309{
235 struct sock *sk = chan->sk;
236
237 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, 310 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
238 chan->psm, chan->dcid); 311 chan->psm, chan->dcid);
239 312
@@ -241,7 +314,7 @@ static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
241 314
242 chan->conn = conn; 315 chan->conn = conn;
243 316
244 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) { 317 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
245 if (conn->hcon->type == LE_LINK) { 318 if (conn->hcon->type == LE_LINK) {
246 /* LE connection */ 319 /* LE connection */
247 chan->omtu = L2CAP_LE_DEFAULT_MTU; 320 chan->omtu = L2CAP_LE_DEFAULT_MTU;
@@ -252,7 +325,7 @@ static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
252 chan->scid = l2cap_alloc_cid(conn); 325 chan->scid = l2cap_alloc_cid(conn);
253 chan->omtu = L2CAP_DEFAULT_MTU; 326 chan->omtu = L2CAP_DEFAULT_MTU;
254 } 327 }
255 } else if (sk->sk_type == SOCK_DGRAM) { 328 } else if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
256 /* Connectionless socket */ 329 /* Connectionless socket */
257 chan->scid = L2CAP_CID_CONN_LESS; 330 chan->scid = L2CAP_CID_CONN_LESS;
258 chan->dcid = L2CAP_CID_CONN_LESS; 331 chan->dcid = L2CAP_CID_CONN_LESS;
@@ -264,20 +337,20 @@ static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
264 chan->omtu = L2CAP_DEFAULT_MTU; 337 chan->omtu = L2CAP_DEFAULT_MTU;
265 } 338 }
266 339
267 sock_hold(sk); 340 chan_hold(chan);
268 341
269 list_add(&chan->list, &conn->chan_l); 342 list_add(&chan->list, &conn->chan_l);
270} 343}
271 344
272/* Delete channel. 345/* Delete channel.
273 * Must be called on the locked socket. */ 346 * Must be called on the locked socket. */
274void l2cap_chan_del(struct l2cap_chan *chan, int err) 347static void l2cap_chan_del(struct l2cap_chan *chan, int err)
275{ 348{
276 struct sock *sk = chan->sk; 349 struct sock *sk = chan->sk;
277 struct l2cap_conn *conn = chan->conn; 350 struct l2cap_conn *conn = chan->conn;
278 struct sock *parent = bt_sk(sk)->parent; 351 struct sock *parent = bt_sk(sk)->parent;
279 352
280 l2cap_sock_clear_timer(sk); 353 __clear_chan_timer(chan);
281 354
282 BT_DBG("chan %p, conn %p, err %d", chan, conn, err); 355 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
283 356
@@ -286,13 +359,13 @@ void l2cap_chan_del(struct l2cap_chan *chan, int err)
286 write_lock_bh(&conn->chan_lock); 359 write_lock_bh(&conn->chan_lock);
287 list_del(&chan->list); 360 list_del(&chan->list);
288 write_unlock_bh(&conn->chan_lock); 361 write_unlock_bh(&conn->chan_lock);
289 __sock_put(sk); 362 chan_put(chan);
290 363
291 chan->conn = NULL; 364 chan->conn = NULL;
292 hci_conn_put(conn->hcon); 365 hci_conn_put(conn->hcon);
293 } 366 }
294 367
295 sk->sk_state = BT_CLOSED; 368 l2cap_state_change(chan, BT_CLOSED);
296 sock_set_flag(sk, SOCK_ZAPPED); 369 sock_set_flag(sk, SOCK_ZAPPED);
297 370
298 if (err) 371 if (err)
@@ -304,8 +377,8 @@ void l2cap_chan_del(struct l2cap_chan *chan, int err)
304 } else 377 } else
305 sk->sk_state_change(sk); 378 sk->sk_state_change(sk);
306 379
307 if (!(chan->conf_state & L2CAP_CONF_OUTPUT_DONE && 380 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
308 chan->conf_state & L2CAP_CONF_INPUT_DONE)) 381 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
309 return; 382 return;
310 383
311 skb_queue_purge(&chan->tx_q); 384 skb_queue_purge(&chan->tx_q);
@@ -313,12 +386,11 @@ void l2cap_chan_del(struct l2cap_chan *chan, int err)
313 if (chan->mode == L2CAP_MODE_ERTM) { 386 if (chan->mode == L2CAP_MODE_ERTM) {
314 struct srej_list *l, *tmp; 387 struct srej_list *l, *tmp;
315 388
316 del_timer(&chan->retrans_timer); 389 __clear_retrans_timer(chan);
317 del_timer(&chan->monitor_timer); 390 __clear_monitor_timer(chan);
318 del_timer(&chan->ack_timer); 391 __clear_ack_timer(chan);
319 392
320 skb_queue_purge(&chan->srej_q); 393 skb_queue_purge(&chan->srej_q);
321 skb_queue_purge(&chan->busy_q);
322 394
323 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) { 395 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
324 list_del(&l->list); 396 list_del(&l->list);
@@ -327,11 +399,86 @@ void l2cap_chan_del(struct l2cap_chan *chan, int err)
327 } 399 }
328} 400}
329 401
330static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan) 402static void l2cap_chan_cleanup_listen(struct sock *parent)
403{
404 struct sock *sk;
405
406 BT_DBG("parent %p", parent);
407
408 /* Close not yet accepted channels */
409 while ((sk = bt_accept_dequeue(parent, NULL))) {
410 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
411 __clear_chan_timer(chan);
412 lock_sock(sk);
413 l2cap_chan_close(chan, ECONNRESET);
414 release_sock(sk);
415 chan->ops->close(chan->data);
416 }
417}
418
419void l2cap_chan_close(struct l2cap_chan *chan, int reason)
331{ 420{
421 struct l2cap_conn *conn = chan->conn;
332 struct sock *sk = chan->sk; 422 struct sock *sk = chan->sk;
333 423
334 if (sk->sk_type == SOCK_RAW) { 424 BT_DBG("chan %p state %d socket %p", chan, chan->state, sk->sk_socket);
425
426 switch (chan->state) {
427 case BT_LISTEN:
428 l2cap_chan_cleanup_listen(sk);
429
430 l2cap_state_change(chan, BT_CLOSED);
431 sock_set_flag(sk, SOCK_ZAPPED);
432 break;
433
434 case BT_CONNECTED:
435 case BT_CONFIG:
436 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
437 conn->hcon->type == ACL_LINK) {
438 __clear_chan_timer(chan);
439 __set_chan_timer(chan, sk->sk_sndtimeo);
440 l2cap_send_disconn_req(conn, chan, reason);
441 } else
442 l2cap_chan_del(chan, reason);
443 break;
444
445 case BT_CONNECT2:
446 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
447 conn->hcon->type == ACL_LINK) {
448 struct l2cap_conn_rsp rsp;
449 __u16 result;
450
451 if (bt_sk(sk)->defer_setup)
452 result = L2CAP_CR_SEC_BLOCK;
453 else
454 result = L2CAP_CR_BAD_PSM;
455 l2cap_state_change(chan, BT_DISCONN);
456
457 rsp.scid = cpu_to_le16(chan->dcid);
458 rsp.dcid = cpu_to_le16(chan->scid);
459 rsp.result = cpu_to_le16(result);
460 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
461 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
462 sizeof(rsp), &rsp);
463 }
464
465 l2cap_chan_del(chan, reason);
466 break;
467
468 case BT_CONNECT:
469 case BT_DISCONN:
470 l2cap_chan_del(chan, reason);
471 break;
472
473 default:
474 sock_set_flag(sk, SOCK_ZAPPED);
475 break;
476 }
477}
478
479static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
480{
481 if (chan->chan_type == L2CAP_CHAN_RAW) {
335 switch (chan->sec_level) { 482 switch (chan->sec_level) {
336 case BT_SECURITY_HIGH: 483 case BT_SECURITY_HIGH:
337 return HCI_AT_DEDICATED_BONDING_MITM; 484 return HCI_AT_DEDICATED_BONDING_MITM;
@@ -371,7 +518,7 @@ static inline int l2cap_check_security(struct l2cap_chan *chan)
371 return hci_conn_security(conn->hcon, chan->sec_level, auth_type); 518 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
372} 519}
373 520
374u8 l2cap_get_ident(struct l2cap_conn *conn) 521static u8 l2cap_get_ident(struct l2cap_conn *conn)
375{ 522{
376 u8 id; 523 u8 id;
377 524
@@ -393,7 +540,7 @@ u8 l2cap_get_ident(struct l2cap_conn *conn)
393 return id; 540 return id;
394} 541}
395 542
396void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data) 543static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
397{ 544{
398 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data); 545 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
399 u8 flags; 546 u8 flags;
@@ -408,6 +555,8 @@ void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *d
408 else 555 else
409 flags = ACL_START; 556 flags = ACL_START;
410 557
558 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
559
411 hci_send_acl(conn->hcon, skb, flags); 560 hci_send_acl(conn->hcon, skb, flags);
412} 561}
413 562
@@ -415,13 +564,11 @@ static inline void l2cap_send_sframe(struct l2cap_chan *chan, u16 control)
415{ 564{
416 struct sk_buff *skb; 565 struct sk_buff *skb;
417 struct l2cap_hdr *lh; 566 struct l2cap_hdr *lh;
418 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
419 struct l2cap_conn *conn = chan->conn; 567 struct l2cap_conn *conn = chan->conn;
420 struct sock *sk = (struct sock *)pi;
421 int count, hlen = L2CAP_HDR_SIZE + 2; 568 int count, hlen = L2CAP_HDR_SIZE + 2;
422 u8 flags; 569 u8 flags;
423 570
424 if (sk->sk_state != BT_CONNECTED) 571 if (chan->state != BT_CONNECTED)
425 return; 572 return;
426 573
427 if (chan->fcs == L2CAP_FCS_CRC16) 574 if (chan->fcs == L2CAP_FCS_CRC16)
@@ -432,15 +579,11 @@ static inline void l2cap_send_sframe(struct l2cap_chan *chan, u16 control)
432 count = min_t(unsigned int, conn->mtu, hlen); 579 count = min_t(unsigned int, conn->mtu, hlen);
433 control |= L2CAP_CTRL_FRAME_TYPE; 580 control |= L2CAP_CTRL_FRAME_TYPE;
434 581
435 if (chan->conn_state & L2CAP_CONN_SEND_FBIT) { 582 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
436 control |= L2CAP_CTRL_FINAL; 583 control |= L2CAP_CTRL_FINAL;
437 chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
438 }
439 584
440 if (chan->conn_state & L2CAP_CONN_SEND_PBIT) { 585 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
441 control |= L2CAP_CTRL_POLL; 586 control |= L2CAP_CTRL_POLL;
442 chan->conn_state &= ~L2CAP_CONN_SEND_PBIT;
443 }
444 587
445 skb = bt_skb_alloc(count, GFP_ATOMIC); 588 skb = bt_skb_alloc(count, GFP_ATOMIC);
446 if (!skb) 589 if (!skb)
@@ -461,14 +604,16 @@ static inline void l2cap_send_sframe(struct l2cap_chan *chan, u16 control)
461 else 604 else
462 flags = ACL_START; 605 flags = ACL_START;
463 606
607 bt_cb(skb)->force_active = chan->force_active;
608
464 hci_send_acl(chan->conn->hcon, skb, flags); 609 hci_send_acl(chan->conn->hcon, skb, flags);
465} 610}
466 611
467static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u16 control) 612static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u16 control)
468{ 613{
469 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) { 614 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
470 control |= L2CAP_SUPER_RCV_NOT_READY; 615 control |= L2CAP_SUPER_RCV_NOT_READY;
471 chan->conn_state |= L2CAP_CONN_RNR_SENT; 616 set_bit(CONN_RNR_SENT, &chan->conn_state);
472 } else 617 } else
473 control |= L2CAP_SUPER_RCV_READY; 618 control |= L2CAP_SUPER_RCV_READY;
474 619
@@ -479,7 +624,7 @@ static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u16 control)
479 624
480static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan) 625static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
481{ 626{
482 return !(chan->conf_state & L2CAP_CONF_CONNECT_PEND); 627 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
483} 628}
484 629
485static void l2cap_do_start(struct l2cap_chan *chan) 630static void l2cap_do_start(struct l2cap_chan *chan)
@@ -497,7 +642,7 @@ static void l2cap_do_start(struct l2cap_chan *chan)
497 req.psm = chan->psm; 642 req.psm = chan->psm;
498 643
499 chan->ident = l2cap_get_ident(conn); 644 chan->ident = l2cap_get_ident(conn);
500 chan->conf_state |= L2CAP_CONF_CONNECT_PEND; 645 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
501 646
502 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, 647 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
503 sizeof(req), &req); 648 sizeof(req), &req);
@@ -533,7 +678,7 @@ static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
533 } 678 }
534} 679}
535 680
536void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err) 681static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
537{ 682{
538 struct sock *sk; 683 struct sock *sk;
539 struct l2cap_disconn_req req; 684 struct l2cap_disconn_req req;
@@ -544,9 +689,9 @@ void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, in
544 sk = chan->sk; 689 sk = chan->sk;
545 690
546 if (chan->mode == L2CAP_MODE_ERTM) { 691 if (chan->mode == L2CAP_MODE_ERTM) {
547 del_timer(&chan->retrans_timer); 692 __clear_retrans_timer(chan);
548 del_timer(&chan->monitor_timer); 693 __clear_monitor_timer(chan);
549 del_timer(&chan->ack_timer); 694 __clear_ack_timer(chan);
550 } 695 }
551 696
552 req.dcid = cpu_to_le16(chan->dcid); 697 req.dcid = cpu_to_le16(chan->dcid);
@@ -554,7 +699,7 @@ void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, in
554 l2cap_send_cmd(conn, l2cap_get_ident(conn), 699 l2cap_send_cmd(conn, l2cap_get_ident(conn),
555 L2CAP_DISCONN_REQ, sizeof(req), &req); 700 L2CAP_DISCONN_REQ, sizeof(req), &req);
556 701
557 sk->sk_state = BT_DISCONN; 702 l2cap_state_change(chan, BT_DISCONN);
558 sk->sk_err = err; 703 sk->sk_err = err;
559} 704}
560 705
@@ -572,13 +717,12 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
572 717
573 bh_lock_sock(sk); 718 bh_lock_sock(sk);
574 719
575 if (sk->sk_type != SOCK_SEQPACKET && 720 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
576 sk->sk_type != SOCK_STREAM) {
577 bh_unlock_sock(sk); 721 bh_unlock_sock(sk);
578 continue; 722 continue;
579 } 723 }
580 724
581 if (sk->sk_state == BT_CONNECT) { 725 if (chan->state == BT_CONNECT) {
582 struct l2cap_conn_req req; 726 struct l2cap_conn_req req;
583 727
584 if (!l2cap_check_security(chan) || 728 if (!l2cap_check_security(chan) ||
@@ -587,15 +731,14 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
587 continue; 731 continue;
588 } 732 }
589 733
590 if (!l2cap_mode_supported(chan->mode, 734 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
591 conn->feat_mask) 735 && test_bit(CONF_STATE2_DEVICE,
592 && chan->conf_state & 736 &chan->conf_state)) {
593 L2CAP_CONF_STATE2_DEVICE) { 737 /* l2cap_chan_close() calls list_del(chan)
594 /* __l2cap_sock_close() calls list_del(chan)
595 * so release the lock */ 738 * so release the lock */
596 read_unlock_bh(&conn->chan_lock); 739 read_unlock(&conn->chan_lock);
597 __l2cap_sock_close(sk, ECONNRESET); 740 l2cap_chan_close(chan, ECONNRESET);
598 read_lock_bh(&conn->chan_lock); 741 read_lock(&conn->chan_lock);
599 bh_unlock_sock(sk); 742 bh_unlock_sock(sk);
600 continue; 743 continue;
601 } 744 }
@@ -604,12 +747,12 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
604 req.psm = chan->psm; 747 req.psm = chan->psm;
605 748
606 chan->ident = l2cap_get_ident(conn); 749 chan->ident = l2cap_get_ident(conn);
607 chan->conf_state |= L2CAP_CONF_CONNECT_PEND; 750 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
608 751
609 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, 752 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
610 sizeof(req), &req); 753 sizeof(req), &req);
611 754
612 } else if (sk->sk_state == BT_CONNECT2) { 755 } else if (chan->state == BT_CONNECT2) {
613 struct l2cap_conn_rsp rsp; 756 struct l2cap_conn_rsp rsp;
614 char buf[128]; 757 char buf[128];
615 rsp.scid = cpu_to_le16(chan->dcid); 758 rsp.scid = cpu_to_le16(chan->dcid);
@@ -620,10 +763,11 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
620 struct sock *parent = bt_sk(sk)->parent; 763 struct sock *parent = bt_sk(sk)->parent;
621 rsp.result = cpu_to_le16(L2CAP_CR_PEND); 764 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
622 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND); 765 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
623 parent->sk_data_ready(parent, 0); 766 if (parent)
767 parent->sk_data_ready(parent, 0);
624 768
625 } else { 769 } else {
626 sk->sk_state = BT_CONFIG; 770 l2cap_state_change(chan, BT_CONFIG);
627 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS); 771 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
628 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); 772 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
629 } 773 }
@@ -635,13 +779,13 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
635 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, 779 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
636 sizeof(rsp), &rsp); 780 sizeof(rsp), &rsp);
637 781
638 if (chan->conf_state & L2CAP_CONF_REQ_SENT || 782 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
639 rsp.result != L2CAP_CR_SUCCESS) { 783 rsp.result != L2CAP_CR_SUCCESS) {
640 bh_unlock_sock(sk); 784 bh_unlock_sock(sk);
641 continue; 785 continue;
642 } 786 }
643 787
644 chan->conf_state |= L2CAP_CONF_REQ_SENT; 788 set_bit(CONF_REQ_SENT, &chan->conf_state);
645 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, 789 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
646 l2cap_build_conf_req(chan, buf), buf); 790 l2cap_build_conf_req(chan, buf), buf);
647 chan->num_conf_req++; 791 chan->num_conf_req++;
@@ -665,7 +809,7 @@ static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdadd
665 list_for_each_entry(c, &chan_list, global_l) { 809 list_for_each_entry(c, &chan_list, global_l) {
666 struct sock *sk = c->sk; 810 struct sock *sk = c->sk;
667 811
668 if (state && sk->sk_state != state) 812 if (state && c->state != state)
669 continue; 813 continue;
670 814
671 if (c->scid == cid) { 815 if (c->scid == cid) {
@@ -709,24 +853,16 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn)
709 goto clean; 853 goto clean;
710 } 854 }
711 855
712 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC); 856 chan = pchan->ops->new_connection(pchan->data);
713 if (!sk) 857 if (!chan)
714 goto clean;
715
716 chan = l2cap_chan_create(sk);
717 if (!chan) {
718 l2cap_sock_kill(sk);
719 goto clean; 858 goto clean;
720 }
721 859
722 l2cap_pi(sk)->chan = chan; 860 sk = chan->sk;
723 861
724 write_lock_bh(&conn->chan_lock); 862 write_lock_bh(&conn->chan_lock);
725 863
726 hci_conn_hold(conn->hcon); 864 hci_conn_hold(conn->hcon);
727 865
728 l2cap_sock_init(sk, parent);
729
730 bacpy(&bt_sk(sk)->src, conn->src); 866 bacpy(&bt_sk(sk)->src, conn->src);
731 bacpy(&bt_sk(sk)->dst, conn->dst); 867 bacpy(&bt_sk(sk)->dst, conn->dst);
732 868
@@ -734,9 +870,9 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn)
734 870
735 __l2cap_chan_add(conn, chan); 871 __l2cap_chan_add(conn, chan);
736 872
737 l2cap_sock_set_timer(sk, sk->sk_sndtimeo); 873 __set_chan_timer(chan, sk->sk_sndtimeo);
738 874
739 sk->sk_state = BT_CONNECTED; 875 l2cap_state_change(chan, BT_CONNECTED);
740 parent->sk_data_ready(parent, 0); 876 parent->sk_data_ready(parent, 0);
741 877
742 write_unlock_bh(&conn->chan_lock); 878 write_unlock_bh(&conn->chan_lock);
@@ -745,6 +881,23 @@ clean:
745 bh_unlock_sock(parent); 881 bh_unlock_sock(parent);
746} 882}
747 883
884static void l2cap_chan_ready(struct sock *sk)
885{
886 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
887 struct sock *parent = bt_sk(sk)->parent;
888
889 BT_DBG("sk %p, parent %p", sk, parent);
890
891 chan->conf_state = 0;
892 __clear_chan_timer(chan);
893
894 l2cap_state_change(chan, BT_CONNECTED);
895 sk->sk_state_change(sk);
896
897 if (parent)
898 parent->sk_data_ready(parent, 0);
899}
900
748static void l2cap_conn_ready(struct l2cap_conn *conn) 901static void l2cap_conn_ready(struct l2cap_conn *conn)
749{ 902{
750 struct l2cap_chan *chan; 903 struct l2cap_chan *chan;
@@ -762,17 +915,15 @@ static void l2cap_conn_ready(struct l2cap_conn *conn)
762 bh_lock_sock(sk); 915 bh_lock_sock(sk);
763 916
764 if (conn->hcon->type == LE_LINK) { 917 if (conn->hcon->type == LE_LINK) {
765 l2cap_sock_clear_timer(sk); 918 if (smp_conn_security(conn, chan->sec_level))
766 sk->sk_state = BT_CONNECTED; 919 l2cap_chan_ready(sk);
767 sk->sk_state_change(sk);
768 }
769 920
770 if (sk->sk_type != SOCK_SEQPACKET && 921 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
771 sk->sk_type != SOCK_STREAM) { 922 __clear_chan_timer(chan);
772 l2cap_sock_clear_timer(sk); 923 l2cap_state_change(chan, BT_CONNECTED);
773 sk->sk_state = BT_CONNECTED;
774 sk->sk_state_change(sk); 924 sk->sk_state_change(sk);
775 } else if (sk->sk_state == BT_CONNECT) 925
926 } else if (chan->state == BT_CONNECT)
776 l2cap_do_start(chan); 927 l2cap_do_start(chan);
777 928
778 bh_unlock_sock(sk); 929 bh_unlock_sock(sk);
@@ -810,6 +961,45 @@ static void l2cap_info_timeout(unsigned long arg)
810 l2cap_conn_start(conn); 961 l2cap_conn_start(conn);
811} 962}
812 963
964static void l2cap_conn_del(struct hci_conn *hcon, int err)
965{
966 struct l2cap_conn *conn = hcon->l2cap_data;
967 struct l2cap_chan *chan, *l;
968 struct sock *sk;
969
970 if (!conn)
971 return;
972
973 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
974
975 kfree_skb(conn->rx_skb);
976
977 /* Kill channels */
978 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
979 sk = chan->sk;
980 bh_lock_sock(sk);
981 l2cap_chan_del(chan, err);
982 bh_unlock_sock(sk);
983 chan->ops->close(chan->data);
984 }
985
986 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
987 del_timer_sync(&conn->info_timer);
988
989 if (test_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend))
990 del_timer(&conn->security_timer);
991
992 hcon->l2cap_data = NULL;
993 kfree(conn);
994}
995
996static void security_timeout(unsigned long arg)
997{
998 struct l2cap_conn *conn = (void *) arg;
999
1000 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1001}
1002
813static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status) 1003static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
814{ 1004{
815 struct l2cap_conn *conn = hcon->l2cap_data; 1005 struct l2cap_conn *conn = hcon->l2cap_data;
@@ -841,7 +1031,10 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
841 1031
842 INIT_LIST_HEAD(&conn->chan_l); 1032 INIT_LIST_HEAD(&conn->chan_l);
843 1033
844 if (hcon->type != LE_LINK) 1034 if (hcon->type == LE_LINK)
1035 setup_timer(&conn->security_timer, security_timeout,
1036 (unsigned long) conn);
1037 else
845 setup_timer(&conn->info_timer, l2cap_info_timeout, 1038 setup_timer(&conn->info_timer, l2cap_info_timeout,
846 (unsigned long) conn); 1039 (unsigned long) conn);
847 1040
@@ -850,35 +1043,6 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
850 return conn; 1043 return conn;
851} 1044}
852 1045
853static void l2cap_conn_del(struct hci_conn *hcon, int err)
854{
855 struct l2cap_conn *conn = hcon->l2cap_data;
856 struct l2cap_chan *chan, *l;
857 struct sock *sk;
858
859 if (!conn)
860 return;
861
862 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
863
864 kfree_skb(conn->rx_skb);
865
866 /* Kill channels */
867 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
868 sk = chan->sk;
869 bh_lock_sock(sk);
870 l2cap_chan_del(chan, err);
871 bh_unlock_sock(sk);
872 l2cap_sock_kill(sk);
873 }
874
875 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
876 del_timer_sync(&conn->info_timer);
877
878 hcon->l2cap_data = NULL;
879 kfree(conn);
880}
881
882static inline void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) 1046static inline void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
883{ 1047{
884 write_lock_bh(&conn->chan_lock); 1048 write_lock_bh(&conn->chan_lock);
@@ -900,7 +1064,7 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr
900 list_for_each_entry(c, &chan_list, global_l) { 1064 list_for_each_entry(c, &chan_list, global_l) {
901 struct sock *sk = c->sk; 1065 struct sock *sk = c->sk;
902 1066
903 if (state && sk->sk_state != state) 1067 if (state && c->state != state)
904 continue; 1068 continue;
905 1069
906 if (c->psm == psm) { 1070 if (c->psm == psm) {
@@ -967,15 +1131,14 @@ int l2cap_chan_connect(struct l2cap_chan *chan)
967 1131
968 l2cap_chan_add(conn, chan); 1132 l2cap_chan_add(conn, chan);
969 1133
970 sk->sk_state = BT_CONNECT; 1134 l2cap_state_change(chan, BT_CONNECT);
971 l2cap_sock_set_timer(sk, sk->sk_sndtimeo); 1135 __set_chan_timer(chan, sk->sk_sndtimeo);
972 1136
973 if (hcon->state == BT_CONNECTED) { 1137 if (hcon->state == BT_CONNECTED) {
974 if (sk->sk_type != SOCK_SEQPACKET && 1138 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
975 sk->sk_type != SOCK_STREAM) { 1139 __clear_chan_timer(chan);
976 l2cap_sock_clear_timer(sk);
977 if (l2cap_check_security(chan)) 1140 if (l2cap_check_security(chan))
978 sk->sk_state = BT_CONNECTED; 1141 l2cap_state_change(chan, BT_CONNECTED);
979 } else 1142 } else
980 l2cap_do_start(chan); 1143 l2cap_do_start(chan);
981 } 1144 }
@@ -1035,7 +1198,7 @@ static void l2cap_monitor_timeout(unsigned long arg)
1035 } 1198 }
1036 1199
1037 chan->retry_count++; 1200 chan->retry_count++;
1038 __mod_monitor_timer(); 1201 __set_monitor_timer(chan);
1039 1202
1040 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL); 1203 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1041 bh_unlock_sock(sk); 1204 bh_unlock_sock(sk);
@@ -1050,9 +1213,9 @@ static void l2cap_retrans_timeout(unsigned long arg)
1050 1213
1051 bh_lock_sock(sk); 1214 bh_lock_sock(sk);
1052 chan->retry_count = 1; 1215 chan->retry_count = 1;
1053 __mod_monitor_timer(); 1216 __set_monitor_timer(chan);
1054 1217
1055 chan->conn_state |= L2CAP_CONN_WAIT_F; 1218 set_bit(CONN_WAIT_F, &chan->conn_state);
1056 1219
1057 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL); 1220 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1058 bh_unlock_sock(sk); 1221 bh_unlock_sock(sk);
@@ -1074,7 +1237,7 @@ static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1074 } 1237 }
1075 1238
1076 if (!chan->unacked_frames) 1239 if (!chan->unacked_frames)
1077 del_timer(&chan->retrans_timer); 1240 __clear_retrans_timer(chan);
1078} 1241}
1079 1242
1080void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb) 1243void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
@@ -1089,6 +1252,7 @@ void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
1089 else 1252 else
1090 flags = ACL_START; 1253 flags = ACL_START;
1091 1254
1255 bt_cb(skb)->force_active = chan->force_active;
1092 hci_send_acl(hcon, skb, flags); 1256 hci_send_acl(hcon, skb, flags);
1093} 1257}
1094 1258
@@ -1142,10 +1306,8 @@ static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u8 tx_seq)
1142 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE); 1306 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1143 control &= L2CAP_CTRL_SAR; 1307 control &= L2CAP_CTRL_SAR;
1144 1308
1145 if (chan->conn_state & L2CAP_CONN_SEND_FBIT) { 1309 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1146 control |= L2CAP_CTRL_FINAL; 1310 control |= L2CAP_CTRL_FINAL;
1147 chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1148 }
1149 1311
1150 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT) 1312 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1151 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT); 1313 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
@@ -1163,11 +1325,10 @@ static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u8 tx_seq)
1163int l2cap_ertm_send(struct l2cap_chan *chan) 1325int l2cap_ertm_send(struct l2cap_chan *chan)
1164{ 1326{
1165 struct sk_buff *skb, *tx_skb; 1327 struct sk_buff *skb, *tx_skb;
1166 struct sock *sk = chan->sk;
1167 u16 control, fcs; 1328 u16 control, fcs;
1168 int nsent = 0; 1329 int nsent = 0;
1169 1330
1170 if (sk->sk_state != BT_CONNECTED) 1331 if (chan->state != BT_CONNECTED)
1171 return -ENOTCONN; 1332 return -ENOTCONN;
1172 1333
1173 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) { 1334 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
@@ -1185,10 +1346,9 @@ int l2cap_ertm_send(struct l2cap_chan *chan)
1185 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE); 1346 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1186 control &= L2CAP_CTRL_SAR; 1347 control &= L2CAP_CTRL_SAR;
1187 1348
1188 if (chan->conn_state & L2CAP_CONN_SEND_FBIT) { 1349 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1189 control |= L2CAP_CTRL_FINAL; 1350 control |= L2CAP_CTRL_FINAL;
1190 chan->conn_state &= ~L2CAP_CONN_SEND_FBIT; 1351
1191 }
1192 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT) 1352 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1193 | (chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT); 1353 | (chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1194 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE); 1354 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
@@ -1201,7 +1361,7 @@ int l2cap_ertm_send(struct l2cap_chan *chan)
1201 1361
1202 l2cap_do_send(chan, tx_skb); 1362 l2cap_do_send(chan, tx_skb);
1203 1363
1204 __mod_retrans_timer(); 1364 __set_retrans_timer(chan);
1205 1365
1206 bt_cb(skb)->tx_seq = chan->next_tx_seq; 1366 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1207 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64; 1367 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
@@ -1240,9 +1400,9 @@ static void l2cap_send_ack(struct l2cap_chan *chan)
1240 1400
1241 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT; 1401 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1242 1402
1243 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) { 1403 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1244 control |= L2CAP_SUPER_RCV_NOT_READY; 1404 control |= L2CAP_SUPER_RCV_NOT_READY;
1245 chan->conn_state |= L2CAP_CONN_RNR_SENT; 1405 set_bit(CONN_RNR_SENT, &chan->conn_state);
1246 l2cap_send_sframe(chan, control); 1406 l2cap_send_sframe(chan, control);
1247 return; 1407 return;
1248 } 1408 }
@@ -1450,28 +1610,83 @@ int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t le
1450 return size; 1610 return size;
1451} 1611}
1452 1612
1453static void l2cap_chan_ready(struct sock *sk) 1613int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1454{ 1614{
1455 struct sock *parent = bt_sk(sk)->parent; 1615 struct sk_buff *skb;
1456 struct l2cap_chan *chan = l2cap_pi(sk)->chan; 1616 u16 control;
1617 int err;
1457 1618
1458 BT_DBG("sk %p, parent %p", sk, parent); 1619 /* Connectionless channel */
1620 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
1621 skb = l2cap_create_connless_pdu(chan, msg, len);
1622 if (IS_ERR(skb))
1623 return PTR_ERR(skb);
1459 1624
1460 chan->conf_state = 0; 1625 l2cap_do_send(chan, skb);
1461 l2cap_sock_clear_timer(sk); 1626 return len;
1627 }
1462 1628
1463 if (!parent) { 1629 switch (chan->mode) {
1464 /* Outgoing channel. 1630 case L2CAP_MODE_BASIC:
1465 * Wake up socket sleeping on connect. 1631 /* Check outgoing MTU */
1466 */ 1632 if (len > chan->omtu)
1467 sk->sk_state = BT_CONNECTED; 1633 return -EMSGSIZE;
1468 sk->sk_state_change(sk); 1634
1469 } else { 1635 /* Create a basic PDU */
1470 /* Incoming channel. 1636 skb = l2cap_create_basic_pdu(chan, msg, len);
1471 * Wake up socket sleeping on accept. 1637 if (IS_ERR(skb))
1472 */ 1638 return PTR_ERR(skb);
1473 parent->sk_data_ready(parent, 0); 1639
1640 l2cap_do_send(chan, skb);
1641 err = len;
1642 break;
1643
1644 case L2CAP_MODE_ERTM:
1645 case L2CAP_MODE_STREAMING:
1646 /* Entire SDU fits into one PDU */
1647 if (len <= chan->remote_mps) {
1648 control = L2CAP_SDU_UNSEGMENTED;
1649 skb = l2cap_create_iframe_pdu(chan, msg, len, control,
1650 0);
1651 if (IS_ERR(skb))
1652 return PTR_ERR(skb);
1653
1654 __skb_queue_tail(&chan->tx_q, skb);
1655
1656 if (chan->tx_send_head == NULL)
1657 chan->tx_send_head = skb;
1658
1659 } else {
1660 /* Segment SDU into multiples PDUs */
1661 err = l2cap_sar_segment_sdu(chan, msg, len);
1662 if (err < 0)
1663 return err;
1664 }
1665
1666 if (chan->mode == L2CAP_MODE_STREAMING) {
1667 l2cap_streaming_send(chan);
1668 err = len;
1669 break;
1670 }
1671
1672 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
1673 test_bit(CONN_WAIT_F, &chan->conn_state)) {
1674 err = len;
1675 break;
1676 }
1677
1678 err = l2cap_ertm_send(chan);
1679 if (err >= 0)
1680 err = len;
1681
1682 break;
1683
1684 default:
1685 BT_DBG("bad state %1.1x", chan->mode);
1686 err = -EBADFD;
1474 } 1687 }
1688
1689 return err;
1475} 1690}
1476 1691
1477/* Copy frame to all raw sockets on that connection */ 1692/* Copy frame to all raw sockets on that connection */
@@ -1485,7 +1700,7 @@ static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1485 read_lock(&conn->chan_lock); 1700 read_lock(&conn->chan_lock);
1486 list_for_each_entry(chan, &conn->chan_l, list) { 1701 list_for_each_entry(chan, &conn->chan_l, list) {
1487 struct sock *sk = chan->sk; 1702 struct sock *sk = chan->sk;
1488 if (sk->sk_type != SOCK_RAW) 1703 if (chan->chan_type != L2CAP_CHAN_RAW)
1489 continue; 1704 continue;
1490 1705
1491 /* Don't send frame to the socket it came from */ 1706 /* Don't send frame to the socket it came from */
@@ -1495,7 +1710,7 @@ static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1495 if (!nskb) 1710 if (!nskb)
1496 continue; 1711 continue;
1497 1712
1498 if (sock_queue_rcv_skb(sk, nskb)) 1713 if (chan->ops->recv(chan->data, nskb))
1499 kfree_skb(nskb); 1714 kfree_skb(nskb);
1500 } 1715 }
1501 read_unlock(&conn->chan_lock); 1716 read_unlock(&conn->chan_lock);
@@ -1654,11 +1869,9 @@ static inline void l2cap_ertm_init(struct l2cap_chan *chan)
1654 setup_timer(&chan->ack_timer, l2cap_ack_timeout, (unsigned long) chan); 1869 setup_timer(&chan->ack_timer, l2cap_ack_timeout, (unsigned long) chan);
1655 1870
1656 skb_queue_head_init(&chan->srej_q); 1871 skb_queue_head_init(&chan->srej_q);
1657 skb_queue_head_init(&chan->busy_q);
1658 1872
1659 INIT_LIST_HEAD(&chan->srej_l); 1873 INIT_LIST_HEAD(&chan->srej_l);
1660 1874
1661 INIT_WORK(&chan->busy_work, l2cap_busy_work);
1662 1875
1663 sk->sk_backlog_rcv = l2cap_ertm_data_rcv; 1876 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
1664} 1877}
@@ -1690,7 +1903,7 @@ static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
1690 switch (chan->mode) { 1903 switch (chan->mode) {
1691 case L2CAP_MODE_STREAMING: 1904 case L2CAP_MODE_STREAMING:
1692 case L2CAP_MODE_ERTM: 1905 case L2CAP_MODE_ERTM:
1693 if (chan->conf_state & L2CAP_CONF_STATE2_DEVICE) 1906 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
1694 break; 1907 break;
1695 1908
1696 /* fall through */ 1909 /* fall through */
@@ -1737,7 +1950,7 @@ done:
1737 break; 1950 break;
1738 1951
1739 if (chan->fcs == L2CAP_FCS_NONE || 1952 if (chan->fcs == L2CAP_FCS_NONE ||
1740 chan->conf_state & L2CAP_CONF_NO_FCS_RECV) { 1953 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
1741 chan->fcs = L2CAP_FCS_NONE; 1954 chan->fcs = L2CAP_FCS_NONE;
1742 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs); 1955 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
1743 } 1956 }
@@ -1760,7 +1973,7 @@ done:
1760 break; 1973 break;
1761 1974
1762 if (chan->fcs == L2CAP_FCS_NONE || 1975 if (chan->fcs == L2CAP_FCS_NONE ||
1763 chan->conf_state & L2CAP_CONF_NO_FCS_RECV) { 1976 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
1764 chan->fcs = L2CAP_FCS_NONE; 1977 chan->fcs = L2CAP_FCS_NONE;
1765 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs); 1978 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
1766 } 1979 }
@@ -1812,7 +2025,7 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
1812 2025
1813 case L2CAP_CONF_FCS: 2026 case L2CAP_CONF_FCS:
1814 if (val == L2CAP_FCS_NONE) 2027 if (val == L2CAP_FCS_NONE)
1815 chan->conf_state |= L2CAP_CONF_NO_FCS_RECV; 2028 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
1816 2029
1817 break; 2030 break;
1818 2031
@@ -1832,7 +2045,7 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
1832 switch (chan->mode) { 2045 switch (chan->mode) {
1833 case L2CAP_MODE_STREAMING: 2046 case L2CAP_MODE_STREAMING:
1834 case L2CAP_MODE_ERTM: 2047 case L2CAP_MODE_ERTM:
1835 if (!(chan->conf_state & L2CAP_CONF_STATE2_DEVICE)) { 2048 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
1836 chan->mode = l2cap_select_mode(rfc.mode, 2049 chan->mode = l2cap_select_mode(rfc.mode,
1837 chan->conn->feat_mask); 2050 chan->conn->feat_mask);
1838 break; 2051 break;
@@ -1865,14 +2078,14 @@ done:
1865 result = L2CAP_CONF_UNACCEPT; 2078 result = L2CAP_CONF_UNACCEPT;
1866 else { 2079 else {
1867 chan->omtu = mtu; 2080 chan->omtu = mtu;
1868 chan->conf_state |= L2CAP_CONF_MTU_DONE; 2081 set_bit(CONF_MTU_DONE, &chan->conf_state);
1869 } 2082 }
1870 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu); 2083 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
1871 2084
1872 switch (rfc.mode) { 2085 switch (rfc.mode) {
1873 case L2CAP_MODE_BASIC: 2086 case L2CAP_MODE_BASIC:
1874 chan->fcs = L2CAP_FCS_NONE; 2087 chan->fcs = L2CAP_FCS_NONE;
1875 chan->conf_state |= L2CAP_CONF_MODE_DONE; 2088 set_bit(CONF_MODE_DONE, &chan->conf_state);
1876 break; 2089 break;
1877 2090
1878 case L2CAP_MODE_ERTM: 2091 case L2CAP_MODE_ERTM:
@@ -1889,7 +2102,7 @@ done:
1889 rfc.monitor_timeout = 2102 rfc.monitor_timeout =
1890 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO); 2103 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
1891 2104
1892 chan->conf_state |= L2CAP_CONF_MODE_DONE; 2105 set_bit(CONF_MODE_DONE, &chan->conf_state);
1893 2106
1894 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, 2107 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1895 sizeof(rfc), (unsigned long) &rfc); 2108 sizeof(rfc), (unsigned long) &rfc);
@@ -1902,7 +2115,7 @@ done:
1902 2115
1903 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size); 2116 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
1904 2117
1905 chan->conf_state |= L2CAP_CONF_MODE_DONE; 2118 set_bit(CONF_MODE_DONE, &chan->conf_state);
1906 2119
1907 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, 2120 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1908 sizeof(rfc), (unsigned long) &rfc); 2121 sizeof(rfc), (unsigned long) &rfc);
@@ -1917,7 +2130,7 @@ done:
1917 } 2130 }
1918 2131
1919 if (result == L2CAP_CONF_SUCCESS) 2132 if (result == L2CAP_CONF_SUCCESS)
1920 chan->conf_state |= L2CAP_CONF_OUTPUT_DONE; 2133 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
1921 } 2134 }
1922 rsp->scid = cpu_to_le16(chan->dcid); 2135 rsp->scid = cpu_to_le16(chan->dcid);
1923 rsp->result = cpu_to_le16(result); 2136 rsp->result = cpu_to_le16(result);
@@ -1959,7 +2172,7 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
1959 if (olen == sizeof(rfc)) 2172 if (olen == sizeof(rfc))
1960 memcpy(&rfc, (void *)val, olen); 2173 memcpy(&rfc, (void *)val, olen);
1961 2174
1962 if ((chan->conf_state & L2CAP_CONF_STATE2_DEVICE) && 2175 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
1963 rfc.mode != chan->mode) 2176 rfc.mode != chan->mode)
1964 return -ECONNREFUSED; 2177 return -ECONNREFUSED;
1965 2178
@@ -2021,10 +2234,9 @@ void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2021 l2cap_send_cmd(conn, chan->ident, 2234 l2cap_send_cmd(conn, chan->ident,
2022 L2CAP_CONN_RSP, sizeof(rsp), &rsp); 2235 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2023 2236
2024 if (chan->conf_state & L2CAP_CONF_REQ_SENT) 2237 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2025 return; 2238 return;
2026 2239
2027 chan->conf_state |= L2CAP_CONF_REQ_SENT;
2028 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, 2240 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2029 l2cap_build_conf_req(chan, buf), buf); 2241 l2cap_build_conf_req(chan, buf), buf);
2030 chan->num_conf_req++; 2242 chan->num_conf_req++;
@@ -2066,9 +2278,9 @@ done:
2066 2278
2067static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data) 2279static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2068{ 2280{
2069 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data; 2281 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
2070 2282
2071 if (rej->reason != 0x0000) 2283 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
2072 return 0; 2284 return 0;
2073 2285
2074 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) && 2286 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
@@ -2124,17 +2336,11 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
2124 goto response; 2336 goto response;
2125 } 2337 }
2126 2338
2127 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC); 2339 chan = pchan->ops->new_connection(pchan->data);
2128 if (!sk) 2340 if (!chan)
2129 goto response;
2130
2131 chan = l2cap_chan_create(sk);
2132 if (!chan) {
2133 l2cap_sock_kill(sk);
2134 goto response; 2341 goto response;
2135 }
2136 2342
2137 l2cap_pi(sk)->chan = chan; 2343 sk = chan->sk;
2138 2344
2139 write_lock_bh(&conn->chan_lock); 2345 write_lock_bh(&conn->chan_lock);
2140 2346
@@ -2142,13 +2348,12 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
2142 if (__l2cap_get_chan_by_dcid(conn, scid)) { 2348 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2143 write_unlock_bh(&conn->chan_lock); 2349 write_unlock_bh(&conn->chan_lock);
2144 sock_set_flag(sk, SOCK_ZAPPED); 2350 sock_set_flag(sk, SOCK_ZAPPED);
2145 l2cap_sock_kill(sk); 2351 chan->ops->close(chan->data);
2146 goto response; 2352 goto response;
2147 } 2353 }
2148 2354
2149 hci_conn_hold(conn->hcon); 2355 hci_conn_hold(conn->hcon);
2150 2356
2151 l2cap_sock_init(sk, parent);
2152 bacpy(&bt_sk(sk)->src, conn->src); 2357 bacpy(&bt_sk(sk)->src, conn->src);
2153 bacpy(&bt_sk(sk)->dst, conn->dst); 2358 bacpy(&bt_sk(sk)->dst, conn->dst);
2154 chan->psm = psm; 2359 chan->psm = psm;
@@ -2160,29 +2365,29 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
2160 2365
2161 dcid = chan->scid; 2366 dcid = chan->scid;
2162 2367
2163 l2cap_sock_set_timer(sk, sk->sk_sndtimeo); 2368 __set_chan_timer(chan, sk->sk_sndtimeo);
2164 2369
2165 chan->ident = cmd->ident; 2370 chan->ident = cmd->ident;
2166 2371
2167 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) { 2372 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2168 if (l2cap_check_security(chan)) { 2373 if (l2cap_check_security(chan)) {
2169 if (bt_sk(sk)->defer_setup) { 2374 if (bt_sk(sk)->defer_setup) {
2170 sk->sk_state = BT_CONNECT2; 2375 l2cap_state_change(chan, BT_CONNECT2);
2171 result = L2CAP_CR_PEND; 2376 result = L2CAP_CR_PEND;
2172 status = L2CAP_CS_AUTHOR_PEND; 2377 status = L2CAP_CS_AUTHOR_PEND;
2173 parent->sk_data_ready(parent, 0); 2378 parent->sk_data_ready(parent, 0);
2174 } else { 2379 } else {
2175 sk->sk_state = BT_CONFIG; 2380 l2cap_state_change(chan, BT_CONFIG);
2176 result = L2CAP_CR_SUCCESS; 2381 result = L2CAP_CR_SUCCESS;
2177 status = L2CAP_CS_NO_INFO; 2382 status = L2CAP_CS_NO_INFO;
2178 } 2383 }
2179 } else { 2384 } else {
2180 sk->sk_state = BT_CONNECT2; 2385 l2cap_state_change(chan, BT_CONNECT2);
2181 result = L2CAP_CR_PEND; 2386 result = L2CAP_CR_PEND;
2182 status = L2CAP_CS_AUTHEN_PEND; 2387 status = L2CAP_CS_AUTHEN_PEND;
2183 } 2388 }
2184 } else { 2389 } else {
2185 sk->sk_state = BT_CONNECT2; 2390 l2cap_state_change(chan, BT_CONNECT2);
2186 result = L2CAP_CR_PEND; 2391 result = L2CAP_CR_PEND;
2187 status = L2CAP_CS_NO_INFO; 2392 status = L2CAP_CS_NO_INFO;
2188 } 2393 }
@@ -2213,10 +2418,10 @@ sendresp:
2213 L2CAP_INFO_REQ, sizeof(info), &info); 2418 L2CAP_INFO_REQ, sizeof(info), &info);
2214 } 2419 }
2215 2420
2216 if (chan && !(chan->conf_state & L2CAP_CONF_REQ_SENT) && 2421 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
2217 result == L2CAP_CR_SUCCESS) { 2422 result == L2CAP_CR_SUCCESS) {
2218 u8 buf[128]; 2423 u8 buf[128];
2219 chan->conf_state |= L2CAP_CONF_REQ_SENT; 2424 set_bit(CONF_REQ_SENT, &chan->conf_state);
2220 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, 2425 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2221 l2cap_build_conf_req(chan, buf), buf); 2426 l2cap_build_conf_req(chan, buf), buf);
2222 chan->num_conf_req++; 2427 chan->num_conf_req++;
@@ -2254,31 +2459,29 @@ static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hd
2254 2459
2255 switch (result) { 2460 switch (result) {
2256 case L2CAP_CR_SUCCESS: 2461 case L2CAP_CR_SUCCESS:
2257 sk->sk_state = BT_CONFIG; 2462 l2cap_state_change(chan, BT_CONFIG);
2258 chan->ident = 0; 2463 chan->ident = 0;
2259 chan->dcid = dcid; 2464 chan->dcid = dcid;
2260 chan->conf_state &= ~L2CAP_CONF_CONNECT_PEND; 2465 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
2261 2466
2262 if (chan->conf_state & L2CAP_CONF_REQ_SENT) 2467 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2263 break; 2468 break;
2264 2469
2265 chan->conf_state |= L2CAP_CONF_REQ_SENT;
2266
2267 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, 2470 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2268 l2cap_build_conf_req(chan, req), req); 2471 l2cap_build_conf_req(chan, req), req);
2269 chan->num_conf_req++; 2472 chan->num_conf_req++;
2270 break; 2473 break;
2271 2474
2272 case L2CAP_CR_PEND: 2475 case L2CAP_CR_PEND:
2273 chan->conf_state |= L2CAP_CONF_CONNECT_PEND; 2476 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
2274 break; 2477 break;
2275 2478
2276 default: 2479 default:
2277 /* don't delete l2cap channel if sk is owned by user */ 2480 /* don't delete l2cap channel if sk is owned by user */
2278 if (sock_owned_by_user(sk)) { 2481 if (sock_owned_by_user(sk)) {
2279 sk->sk_state = BT_DISCONN; 2482 l2cap_state_change(chan, BT_DISCONN);
2280 l2cap_sock_clear_timer(sk); 2483 __clear_chan_timer(chan);
2281 l2cap_sock_set_timer(sk, HZ / 5); 2484 __set_chan_timer(chan, HZ / 5);
2282 break; 2485 break;
2283 } 2486 }
2284 2487
@@ -2292,14 +2495,12 @@ static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hd
2292 2495
2293static inline void set_default_fcs(struct l2cap_chan *chan) 2496static inline void set_default_fcs(struct l2cap_chan *chan)
2294{ 2497{
2295 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
2296
2297 /* FCS is enabled only in ERTM or streaming mode, if one or both 2498 /* FCS is enabled only in ERTM or streaming mode, if one or both
2298 * sides request it. 2499 * sides request it.
2299 */ 2500 */
2300 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING) 2501 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2301 chan->fcs = L2CAP_FCS_NONE; 2502 chan->fcs = L2CAP_FCS_NONE;
2302 else if (!(pi->chan->conf_state & L2CAP_CONF_NO_FCS_RECV)) 2503 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
2303 chan->fcs = L2CAP_FCS_CRC16; 2504 chan->fcs = L2CAP_FCS_CRC16;
2304} 2505}
2305 2506
@@ -2323,10 +2524,13 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
2323 2524
2324 sk = chan->sk; 2525 sk = chan->sk;
2325 2526
2326 if (sk->sk_state != BT_CONFIG) { 2527 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
2327 struct l2cap_cmd_rej rej; 2528 struct l2cap_cmd_rej_cid rej;
2529
2530 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
2531 rej.scid = cpu_to_le16(chan->scid);
2532 rej.dcid = cpu_to_le16(chan->dcid);
2328 2533
2329 rej.reason = cpu_to_le16(0x0002);
2330 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ, 2534 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2331 sizeof(rej), &rej); 2535 sizeof(rej), &rej);
2332 goto unlock; 2536 goto unlock;
@@ -2334,7 +2538,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
2334 2538
2335 /* Reject if config buffer is too small. */ 2539 /* Reject if config buffer is too small. */
2336 len = cmd_len - sizeof(*req); 2540 len = cmd_len - sizeof(*req);
2337 if (chan->conf_len + len > sizeof(chan->conf_req)) { 2541 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
2338 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, 2542 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2339 l2cap_build_conf_rsp(chan, rsp, 2543 l2cap_build_conf_rsp(chan, rsp,
2340 L2CAP_CONF_REJECT, flags), rsp); 2544 L2CAP_CONF_REJECT, flags), rsp);
@@ -2366,13 +2570,13 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
2366 /* Reset config buffer. */ 2570 /* Reset config buffer. */
2367 chan->conf_len = 0; 2571 chan->conf_len = 0;
2368 2572
2369 if (!(chan->conf_state & L2CAP_CONF_OUTPUT_DONE)) 2573 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
2370 goto unlock; 2574 goto unlock;
2371 2575
2372 if (chan->conf_state & L2CAP_CONF_INPUT_DONE) { 2576 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
2373 set_default_fcs(chan); 2577 set_default_fcs(chan);
2374 2578
2375 sk->sk_state = BT_CONNECTED; 2579 l2cap_state_change(chan, BT_CONNECTED);
2376 2580
2377 chan->next_tx_seq = 0; 2581 chan->next_tx_seq = 0;
2378 chan->expected_tx_seq = 0; 2582 chan->expected_tx_seq = 0;
@@ -2384,9 +2588,8 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
2384 goto unlock; 2588 goto unlock;
2385 } 2589 }
2386 2590
2387 if (!(chan->conf_state & L2CAP_CONF_REQ_SENT)) { 2591 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
2388 u8 buf[64]; 2592 u8 buf[64];
2389 chan->conf_state |= L2CAP_CONF_REQ_SENT;
2390 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, 2593 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2391 l2cap_build_conf_req(chan, buf), buf); 2594 l2cap_build_conf_req(chan, buf), buf);
2392 chan->num_conf_req++; 2595 chan->num_conf_req++;
@@ -2451,7 +2654,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
2451 2654
2452 default: 2655 default:
2453 sk->sk_err = ECONNRESET; 2656 sk->sk_err = ECONNRESET;
2454 l2cap_sock_set_timer(sk, HZ * 5); 2657 __set_chan_timer(chan, HZ * 5);
2455 l2cap_send_disconn_req(conn, chan, ECONNRESET); 2658 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2456 goto done; 2659 goto done;
2457 } 2660 }
@@ -2459,12 +2662,12 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
2459 if (flags & 0x01) 2662 if (flags & 0x01)
2460 goto done; 2663 goto done;
2461 2664
2462 chan->conf_state |= L2CAP_CONF_INPUT_DONE; 2665 set_bit(CONF_INPUT_DONE, &chan->conf_state);
2463 2666
2464 if (chan->conf_state & L2CAP_CONF_OUTPUT_DONE) { 2667 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
2465 set_default_fcs(chan); 2668 set_default_fcs(chan);
2466 2669
2467 sk->sk_state = BT_CONNECTED; 2670 l2cap_state_change(chan, BT_CONNECTED);
2468 chan->next_tx_seq = 0; 2671 chan->next_tx_seq = 0;
2469 chan->expected_tx_seq = 0; 2672 chan->expected_tx_seq = 0;
2470 skb_queue_head_init(&chan->tx_q); 2673 skb_queue_head_init(&chan->tx_q);
@@ -2506,9 +2709,9 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd
2506 2709
2507 /* don't delete l2cap channel if sk is owned by user */ 2710 /* don't delete l2cap channel if sk is owned by user */
2508 if (sock_owned_by_user(sk)) { 2711 if (sock_owned_by_user(sk)) {
2509 sk->sk_state = BT_DISCONN; 2712 l2cap_state_change(chan, BT_DISCONN);
2510 l2cap_sock_clear_timer(sk); 2713 __clear_chan_timer(chan);
2511 l2cap_sock_set_timer(sk, HZ / 5); 2714 __set_chan_timer(chan, HZ / 5);
2512 bh_unlock_sock(sk); 2715 bh_unlock_sock(sk);
2513 return 0; 2716 return 0;
2514 } 2717 }
@@ -2516,7 +2719,7 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd
2516 l2cap_chan_del(chan, ECONNRESET); 2719 l2cap_chan_del(chan, ECONNRESET);
2517 bh_unlock_sock(sk); 2720 bh_unlock_sock(sk);
2518 2721
2519 l2cap_sock_kill(sk); 2722 chan->ops->close(chan->data);
2520 return 0; 2723 return 0;
2521} 2724}
2522 2725
@@ -2540,9 +2743,9 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd
2540 2743
2541 /* don't delete l2cap channel if sk is owned by user */ 2744 /* don't delete l2cap channel if sk is owned by user */
2542 if (sock_owned_by_user(sk)) { 2745 if (sock_owned_by_user(sk)) {
2543 sk->sk_state = BT_DISCONN; 2746 l2cap_state_change(chan,BT_DISCONN);
2544 l2cap_sock_clear_timer(sk); 2747 __clear_chan_timer(chan);
2545 l2cap_sock_set_timer(sk, HZ / 5); 2748 __set_chan_timer(chan, HZ / 5);
2546 bh_unlock_sock(sk); 2749 bh_unlock_sock(sk);
2547 return 0; 2750 return 0;
2548 } 2751 }
@@ -2550,7 +2753,7 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd
2550 l2cap_chan_del(chan, 0); 2753 l2cap_chan_del(chan, 0);
2551 bh_unlock_sock(sk); 2754 bh_unlock_sock(sk);
2552 2755
2553 l2cap_sock_kill(sk); 2756 chan->ops->close(chan->data);
2554 return 0; 2757 return 0;
2555} 2758}
2556 2759
@@ -2818,12 +3021,12 @@ static inline void l2cap_sig_channel(struct l2cap_conn *conn,
2818 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data); 3021 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
2819 3022
2820 if (err) { 3023 if (err) {
2821 struct l2cap_cmd_rej rej; 3024 struct l2cap_cmd_rej_unk rej;
2822 3025
2823 BT_ERR("Wrong link type (%d)", err); 3026 BT_ERR("Wrong link type (%d)", err);
2824 3027
2825 /* FIXME: Map err to a valid reason */ 3028 /* FIXME: Map err to a valid reason */
2826 rej.reason = cpu_to_le16(0); 3029 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
2827 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej); 3030 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
2828 } 3031 }
2829 3032
@@ -2858,18 +3061,18 @@ static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
2858 3061
2859 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT; 3062 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
2860 3063
2861 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) { 3064 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
2862 control |= L2CAP_SUPER_RCV_NOT_READY; 3065 control |= L2CAP_SUPER_RCV_NOT_READY;
2863 l2cap_send_sframe(chan, control); 3066 l2cap_send_sframe(chan, control);
2864 chan->conn_state |= L2CAP_CONN_RNR_SENT; 3067 set_bit(CONN_RNR_SENT, &chan->conn_state);
2865 } 3068 }
2866 3069
2867 if (chan->conn_state & L2CAP_CONN_REMOTE_BUSY) 3070 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2868 l2cap_retransmit_frames(chan); 3071 l2cap_retransmit_frames(chan);
2869 3072
2870 l2cap_ertm_send(chan); 3073 l2cap_ertm_send(chan);
2871 3074
2872 if (!(chan->conn_state & L2CAP_CONN_LOCAL_BUSY) && 3075 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2873 chan->frames_sent == 0) { 3076 chan->frames_sent == 0) {
2874 control |= L2CAP_SUPER_RCV_READY; 3077 control |= L2CAP_SUPER_RCV_READY;
2875 l2cap_send_sframe(chan, control); 3078 l2cap_send_sframe(chan, control);
@@ -2925,17 +3128,13 @@ static int l2cap_ertm_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *sk
2925 3128
2926 switch (control & L2CAP_CTRL_SAR) { 3129 switch (control & L2CAP_CTRL_SAR) {
2927 case L2CAP_SDU_UNSEGMENTED: 3130 case L2CAP_SDU_UNSEGMENTED:
2928 if (chan->conn_state & L2CAP_CONN_SAR_SDU) 3131 if (test_bit(CONN_SAR_SDU, &chan->conn_state))
2929 goto drop; 3132 goto drop;
2930 3133
2931 err = sock_queue_rcv_skb(chan->sk, skb); 3134 return chan->ops->recv(chan->data, skb);
2932 if (!err)
2933 return err;
2934
2935 break;
2936 3135
2937 case L2CAP_SDU_START: 3136 case L2CAP_SDU_START:
2938 if (chan->conn_state & L2CAP_CONN_SAR_SDU) 3137 if (test_bit(CONN_SAR_SDU, &chan->conn_state))
2939 goto drop; 3138 goto drop;
2940 3139
2941 chan->sdu_len = get_unaligned_le16(skb->data); 3140 chan->sdu_len = get_unaligned_le16(skb->data);
@@ -2954,12 +3153,12 @@ static int l2cap_ertm_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *sk
2954 3153
2955 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len); 3154 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
2956 3155
2957 chan->conn_state |= L2CAP_CONN_SAR_SDU; 3156 set_bit(CONN_SAR_SDU, &chan->conn_state);
2958 chan->partial_sdu_len = skb->len; 3157 chan->partial_sdu_len = skb->len;
2959 break; 3158 break;
2960 3159
2961 case L2CAP_SDU_CONTINUE: 3160 case L2CAP_SDU_CONTINUE:
2962 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU)) 3161 if (!test_bit(CONN_SAR_SDU, &chan->conn_state))
2963 goto disconnect; 3162 goto disconnect;
2964 3163
2965 if (!chan->sdu) 3164 if (!chan->sdu)
@@ -2974,39 +3173,34 @@ static int l2cap_ertm_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *sk
2974 break; 3173 break;
2975 3174
2976 case L2CAP_SDU_END: 3175 case L2CAP_SDU_END:
2977 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU)) 3176 if (!test_bit(CONN_SAR_SDU, &chan->conn_state))
2978 goto disconnect; 3177 goto disconnect;
2979 3178
2980 if (!chan->sdu) 3179 if (!chan->sdu)
2981 goto disconnect; 3180 goto disconnect;
2982 3181
2983 if (!(chan->conn_state & L2CAP_CONN_SAR_RETRY)) { 3182 chan->partial_sdu_len += skb->len;
2984 chan->partial_sdu_len += skb->len;
2985 3183
2986 if (chan->partial_sdu_len > chan->imtu) 3184 if (chan->partial_sdu_len > chan->imtu)
2987 goto drop; 3185 goto drop;
2988 3186
2989 if (chan->partial_sdu_len != chan->sdu_len) 3187 if (chan->partial_sdu_len != chan->sdu_len)
2990 goto drop; 3188 goto drop;
2991 3189
2992 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len); 3190 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
2993 }
2994 3191
2995 _skb = skb_clone(chan->sdu, GFP_ATOMIC); 3192 _skb = skb_clone(chan->sdu, GFP_ATOMIC);
2996 if (!_skb) { 3193 if (!_skb) {
2997 chan->conn_state |= L2CAP_CONN_SAR_RETRY;
2998 return -ENOMEM; 3194 return -ENOMEM;
2999 } 3195 }
3000 3196
3001 err = sock_queue_rcv_skb(chan->sk, _skb); 3197 err = chan->ops->recv(chan->data, _skb);
3002 if (err < 0) { 3198 if (err < 0) {
3003 kfree_skb(_skb); 3199 kfree_skb(_skb);
3004 chan->conn_state |= L2CAP_CONN_SAR_RETRY;
3005 return err; 3200 return err;
3006 } 3201 }
3007 3202
3008 chan->conn_state &= ~L2CAP_CONN_SAR_RETRY; 3203 clear_bit(CONN_SAR_SDU, &chan->conn_state);
3009 chan->conn_state &= ~L2CAP_CONN_SAR_SDU;
3010 3204
3011 kfree_skb(chan->sdu); 3205 kfree_skb(chan->sdu);
3012 break; 3206 break;
@@ -3025,128 +3219,55 @@ disconnect:
3025 return 0; 3219 return 0;
3026} 3220}
3027 3221
3028static int l2cap_try_push_rx_skb(struct l2cap_chan *chan) 3222static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
3029{ 3223{
3030 struct sk_buff *skb;
3031 u16 control; 3224 u16 control;
3032 int err;
3033
3034 while ((skb = skb_dequeue(&chan->busy_q))) {
3035 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3036 err = l2cap_ertm_reassembly_sdu(chan, skb, control);
3037 if (err < 0) {
3038 skb_queue_head(&chan->busy_q, skb);
3039 return -EBUSY;
3040 }
3041 3225
3042 chan->buffer_seq = (chan->buffer_seq + 1) % 64; 3226 BT_DBG("chan %p, Enter local busy", chan);
3043 }
3044 3227
3045 if (!(chan->conn_state & L2CAP_CONN_RNR_SENT)) 3228 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3046 goto done;
3047 3229
3048 control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT; 3230 control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3049 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL; 3231 control |= L2CAP_SUPER_RCV_NOT_READY;
3050 l2cap_send_sframe(chan, control); 3232 l2cap_send_sframe(chan, control);
3051 chan->retry_count = 1;
3052 3233
3053 del_timer(&chan->retrans_timer); 3234 set_bit(CONN_RNR_SENT, &chan->conn_state);
3054 __mod_monitor_timer();
3055 3235
3056 chan->conn_state |= L2CAP_CONN_WAIT_F; 3236 __clear_ack_timer(chan);
3057
3058done:
3059 chan->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
3060 chan->conn_state &= ~L2CAP_CONN_RNR_SENT;
3061
3062 BT_DBG("chan %p, Exit local busy", chan);
3063
3064 return 0;
3065} 3237}
3066 3238
3067static void l2cap_busy_work(struct work_struct *work) 3239static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
3068{ 3240{
3069 DECLARE_WAITQUEUE(wait, current); 3241 u16 control;
3070 struct l2cap_chan *chan =
3071 container_of(work, struct l2cap_chan, busy_work);
3072 struct sock *sk = chan->sk;
3073 int n_tries = 0, timeo = HZ/5, err;
3074 struct sk_buff *skb;
3075
3076 lock_sock(sk);
3077
3078 add_wait_queue(sk_sleep(sk), &wait);
3079 while ((skb = skb_peek(&chan->busy_q))) {
3080 set_current_state(TASK_INTERRUPTIBLE);
3081
3082 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
3083 err = -EBUSY;
3084 l2cap_send_disconn_req(chan->conn, chan, EBUSY);
3085 break;
3086 }
3087
3088 if (!timeo)
3089 timeo = HZ/5;
3090 3242
3091 if (signal_pending(current)) { 3243 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3092 err = sock_intr_errno(timeo); 3244 goto done;
3093 break;
3094 }
3095 3245
3096 release_sock(sk); 3246 control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3097 timeo = schedule_timeout(timeo); 3247 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3098 lock_sock(sk); 3248 l2cap_send_sframe(chan, control);
3249 chan->retry_count = 1;
3099 3250
3100 err = sock_error(sk); 3251 __clear_retrans_timer(chan);
3101 if (err) 3252 __set_monitor_timer(chan);
3102 break;
3103 3253
3104 if (l2cap_try_push_rx_skb(chan) == 0) 3254 set_bit(CONN_WAIT_F, &chan->conn_state);
3105 break;
3106 }
3107 3255
3108 set_current_state(TASK_RUNNING); 3256done:
3109 remove_wait_queue(sk_sleep(sk), &wait); 3257 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3258 clear_bit(CONN_RNR_SENT, &chan->conn_state);
3110 3259
3111 release_sock(sk); 3260 BT_DBG("chan %p, Exit local busy", chan);
3112} 3261}
3113 3262
3114static int l2cap_push_rx_skb(struct l2cap_chan *chan, struct sk_buff *skb, u16 control) 3263void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
3115{ 3264{
3116 int sctrl, err; 3265 if (chan->mode == L2CAP_MODE_ERTM) {
3117 3266 if (busy)
3118 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) { 3267 l2cap_ertm_enter_local_busy(chan);
3119 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT; 3268 else
3120 __skb_queue_tail(&chan->busy_q, skb); 3269 l2cap_ertm_exit_local_busy(chan);
3121 return l2cap_try_push_rx_skb(chan);
3122
3123
3124 }
3125
3126 err = l2cap_ertm_reassembly_sdu(chan, skb, control);
3127 if (err >= 0) {
3128 chan->buffer_seq = (chan->buffer_seq + 1) % 64;
3129 return err;
3130 } 3270 }
3131
3132 /* Busy Condition */
3133 BT_DBG("chan %p, Enter local busy", chan);
3134
3135 chan->conn_state |= L2CAP_CONN_LOCAL_BUSY;
3136 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3137 __skb_queue_tail(&chan->busy_q, skb);
3138
3139 sctrl = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3140 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3141 l2cap_send_sframe(chan, sctrl);
3142
3143 chan->conn_state |= L2CAP_CONN_RNR_SENT;
3144
3145 del_timer(&chan->ack_timer);
3146
3147 queue_work(_busy_wq, &chan->busy_work);
3148
3149 return err;
3150} 3271}
3151 3272
3152static int l2cap_streaming_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control) 3273static int l2cap_streaming_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
@@ -3161,19 +3282,19 @@ static int l2cap_streaming_reassembly_sdu(struct l2cap_chan *chan, struct sk_buf
3161 3282
3162 switch (control & L2CAP_CTRL_SAR) { 3283 switch (control & L2CAP_CTRL_SAR) {
3163 case L2CAP_SDU_UNSEGMENTED: 3284 case L2CAP_SDU_UNSEGMENTED:
3164 if (chan->conn_state & L2CAP_CONN_SAR_SDU) { 3285 if (test_bit(CONN_SAR_SDU, &chan->conn_state)) {
3165 kfree_skb(chan->sdu); 3286 kfree_skb(chan->sdu);
3166 break; 3287 break;
3167 } 3288 }
3168 3289
3169 err = sock_queue_rcv_skb(chan->sk, skb); 3290 err = chan->ops->recv(chan->data, skb);
3170 if (!err) 3291 if (!err)
3171 return 0; 3292 return 0;
3172 3293
3173 break; 3294 break;
3174 3295
3175 case L2CAP_SDU_START: 3296 case L2CAP_SDU_START:
3176 if (chan->conn_state & L2CAP_CONN_SAR_SDU) { 3297 if (test_bit(CONN_SAR_SDU, &chan->conn_state)) {
3177 kfree_skb(chan->sdu); 3298 kfree_skb(chan->sdu);
3178 break; 3299 break;
3179 } 3300 }
@@ -3194,13 +3315,13 @@ static int l2cap_streaming_reassembly_sdu(struct l2cap_chan *chan, struct sk_buf
3194 3315
3195 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len); 3316 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3196 3317
3197 chan->conn_state |= L2CAP_CONN_SAR_SDU; 3318 set_bit(CONN_SAR_SDU, &chan->conn_state);
3198 chan->partial_sdu_len = skb->len; 3319 chan->partial_sdu_len = skb->len;
3199 err = 0; 3320 err = 0;
3200 break; 3321 break;
3201 3322
3202 case L2CAP_SDU_CONTINUE: 3323 case L2CAP_SDU_CONTINUE:
3203 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU)) 3324 if (!test_bit(CONN_SAR_SDU, &chan->conn_state))
3204 break; 3325 break;
3205 3326
3206 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len); 3327 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
@@ -3214,12 +3335,12 @@ static int l2cap_streaming_reassembly_sdu(struct l2cap_chan *chan, struct sk_buf
3214 break; 3335 break;
3215 3336
3216 case L2CAP_SDU_END: 3337 case L2CAP_SDU_END:
3217 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU)) 3338 if (!test_bit(CONN_SAR_SDU, &chan->conn_state))
3218 break; 3339 break;
3219 3340
3220 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len); 3341 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3221 3342
3222 chan->conn_state &= ~L2CAP_CONN_SAR_SDU; 3343 clear_bit(CONN_SAR_SDU, &chan->conn_state);
3223 chan->partial_sdu_len += skb->len; 3344 chan->partial_sdu_len += skb->len;
3224 3345
3225 if (chan->partial_sdu_len > chan->imtu) 3346 if (chan->partial_sdu_len > chan->imtu)
@@ -3227,7 +3348,7 @@ static int l2cap_streaming_reassembly_sdu(struct l2cap_chan *chan, struct sk_buf
3227 3348
3228 if (chan->partial_sdu_len == chan->sdu_len) { 3349 if (chan->partial_sdu_len == chan->sdu_len) {
3229 _skb = skb_clone(chan->sdu, GFP_ATOMIC); 3350 _skb = skb_clone(chan->sdu, GFP_ATOMIC);
3230 err = sock_queue_rcv_skb(chan->sk, _skb); 3351 err = chan->ops->recv(chan->data, _skb);
3231 if (err < 0) 3352 if (err < 0)
3232 kfree_skb(_skb); 3353 kfree_skb(_skb);
3233 } 3354 }
@@ -3247,13 +3368,22 @@ static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq)
3247 struct sk_buff *skb; 3368 struct sk_buff *skb;
3248 u16 control; 3369 u16 control;
3249 3370
3250 while ((skb = skb_peek(&chan->srej_q))) { 3371 while ((skb = skb_peek(&chan->srej_q)) &&
3372 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3373 int err;
3374
3251 if (bt_cb(skb)->tx_seq != tx_seq) 3375 if (bt_cb(skb)->tx_seq != tx_seq)
3252 break; 3376 break;
3253 3377
3254 skb = skb_dequeue(&chan->srej_q); 3378 skb = skb_dequeue(&chan->srej_q);
3255 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT; 3379 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3256 l2cap_ertm_reassembly_sdu(chan, skb, control); 3380 err = l2cap_ertm_reassembly_sdu(chan, skb, control);
3381
3382 if (err < 0) {
3383 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3384 break;
3385 }
3386
3257 chan->buffer_seq_srej = 3387 chan->buffer_seq_srej =
3258 (chan->buffer_seq_srej + 1) % 64; 3388 (chan->buffer_seq_srej + 1) % 64;
3259 tx_seq = (tx_seq + 1) % 64; 3389 tx_seq = (tx_seq + 1) % 64;
@@ -3310,19 +3440,16 @@ static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_cont
3310 tx_seq, rx_control); 3440 tx_seq, rx_control);
3311 3441
3312 if (L2CAP_CTRL_FINAL & rx_control && 3442 if (L2CAP_CTRL_FINAL & rx_control &&
3313 chan->conn_state & L2CAP_CONN_WAIT_F) { 3443 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3314 del_timer(&chan->monitor_timer); 3444 __clear_monitor_timer(chan);
3315 if (chan->unacked_frames > 0) 3445 if (chan->unacked_frames > 0)
3316 __mod_retrans_timer(); 3446 __set_retrans_timer(chan);
3317 chan->conn_state &= ~L2CAP_CONN_WAIT_F; 3447 clear_bit(CONN_WAIT_F, &chan->conn_state);
3318 } 3448 }
3319 3449
3320 chan->expected_ack_seq = req_seq; 3450 chan->expected_ack_seq = req_seq;
3321 l2cap_drop_acked_frames(chan); 3451 l2cap_drop_acked_frames(chan);
3322 3452
3323 if (tx_seq == chan->expected_tx_seq)
3324 goto expected;
3325
3326 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64; 3453 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
3327 if (tx_seq_offset < 0) 3454 if (tx_seq_offset < 0)
3328 tx_seq_offset += 64; 3455 tx_seq_offset += 64;
@@ -3333,10 +3460,13 @@ static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_cont
3333 goto drop; 3460 goto drop;
3334 } 3461 }
3335 3462
3336 if (chan->conn_state == L2CAP_CONN_LOCAL_BUSY) 3463 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
3337 goto drop; 3464 goto drop;
3338 3465
3339 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) { 3466 if (tx_seq == chan->expected_tx_seq)
3467 goto expected;
3468
3469 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3340 struct srej_list *first; 3470 struct srej_list *first;
3341 3471
3342 first = list_first_entry(&chan->srej_l, 3472 first = list_first_entry(&chan->srej_l,
@@ -3350,7 +3480,7 @@ static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_cont
3350 3480
3351 if (list_empty(&chan->srej_l)) { 3481 if (list_empty(&chan->srej_l)) {
3352 chan->buffer_seq = chan->buffer_seq_srej; 3482 chan->buffer_seq = chan->buffer_seq_srej;
3353 chan->conn_state &= ~L2CAP_CONN_SREJ_SENT; 3483 clear_bit(CONN_SREJ_SENT, &chan->conn_state);
3354 l2cap_send_ack(chan); 3484 l2cap_send_ack(chan);
3355 BT_DBG("chan %p, Exit SREJ_SENT", chan); 3485 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3356 } 3486 }
@@ -3379,7 +3509,7 @@ static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_cont
3379 if (tx_seq_offset < expected_tx_seq_offset) 3509 if (tx_seq_offset < expected_tx_seq_offset)
3380 goto drop; 3510 goto drop;
3381 3511
3382 chan->conn_state |= L2CAP_CONN_SREJ_SENT; 3512 set_bit(CONN_SREJ_SENT, &chan->conn_state);
3383 3513
3384 BT_DBG("chan %p, Enter SREJ", chan); 3514 BT_DBG("chan %p, Enter SREJ", chan);
3385 3515
@@ -3387,39 +3517,39 @@ static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_cont
3387 chan->buffer_seq_srej = chan->buffer_seq; 3517 chan->buffer_seq_srej = chan->buffer_seq;
3388 3518
3389 __skb_queue_head_init(&chan->srej_q); 3519 __skb_queue_head_init(&chan->srej_q);
3390 __skb_queue_head_init(&chan->busy_q);
3391 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar); 3520 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3392 3521
3393 chan->conn_state |= L2CAP_CONN_SEND_PBIT; 3522 set_bit(CONN_SEND_PBIT, &chan->conn_state);
3394 3523
3395 l2cap_send_srejframe(chan, tx_seq); 3524 l2cap_send_srejframe(chan, tx_seq);
3396 3525
3397 del_timer(&chan->ack_timer); 3526 __clear_ack_timer(chan);
3398 } 3527 }
3399 return 0; 3528 return 0;
3400 3529
3401expected: 3530expected:
3402 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64; 3531 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3403 3532
3404 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) { 3533 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3405 bt_cb(skb)->tx_seq = tx_seq; 3534 bt_cb(skb)->tx_seq = tx_seq;
3406 bt_cb(skb)->sar = sar; 3535 bt_cb(skb)->sar = sar;
3407 __skb_queue_tail(&chan->srej_q, skb); 3536 __skb_queue_tail(&chan->srej_q, skb);
3408 return 0; 3537 return 0;
3409 } 3538 }
3410 3539
3411 err = l2cap_push_rx_skb(chan, skb, rx_control); 3540 err = l2cap_ertm_reassembly_sdu(chan, skb, rx_control);
3412 if (err < 0) 3541 chan->buffer_seq = (chan->buffer_seq + 1) % 64;
3413 return 0; 3542 if (err < 0) {
3543 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3544 return err;
3545 }
3414 3546
3415 if (rx_control & L2CAP_CTRL_FINAL) { 3547 if (rx_control & L2CAP_CTRL_FINAL) {
3416 if (chan->conn_state & L2CAP_CONN_REJ_ACT) 3548 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3417 chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3418 else
3419 l2cap_retransmit_frames(chan); 3549 l2cap_retransmit_frames(chan);
3420 } 3550 }
3421 3551
3422 __mod_ack_timer(); 3552 __set_ack_timer(chan);
3423 3553
3424 chan->num_acked = (chan->num_acked + 1) % num_to_ack; 3554 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
3425 if (chan->num_acked == num_to_ack - 1) 3555 if (chan->num_acked == num_to_ack - 1)
@@ -3441,33 +3571,31 @@ static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u16 rx_co
3441 l2cap_drop_acked_frames(chan); 3571 l2cap_drop_acked_frames(chan);
3442 3572
3443 if (rx_control & L2CAP_CTRL_POLL) { 3573 if (rx_control & L2CAP_CTRL_POLL) {
3444 chan->conn_state |= L2CAP_CONN_SEND_FBIT; 3574 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3445 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) { 3575 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3446 if ((chan->conn_state & L2CAP_CONN_REMOTE_BUSY) && 3576 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3447 (chan->unacked_frames > 0)) 3577 (chan->unacked_frames > 0))
3448 __mod_retrans_timer(); 3578 __set_retrans_timer(chan);
3449 3579
3450 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY; 3580 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3451 l2cap_send_srejtail(chan); 3581 l2cap_send_srejtail(chan);
3452 } else { 3582 } else {
3453 l2cap_send_i_or_rr_or_rnr(chan); 3583 l2cap_send_i_or_rr_or_rnr(chan);
3454 } 3584 }
3455 3585
3456 } else if (rx_control & L2CAP_CTRL_FINAL) { 3586 } else if (rx_control & L2CAP_CTRL_FINAL) {
3457 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY; 3587 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3458 3588
3459 if (chan->conn_state & L2CAP_CONN_REJ_ACT) 3589 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3460 chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3461 else
3462 l2cap_retransmit_frames(chan); 3590 l2cap_retransmit_frames(chan);
3463 3591
3464 } else { 3592 } else {
3465 if ((chan->conn_state & L2CAP_CONN_REMOTE_BUSY) && 3593 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3466 (chan->unacked_frames > 0)) 3594 (chan->unacked_frames > 0))
3467 __mod_retrans_timer(); 3595 __set_retrans_timer(chan);
3468 3596
3469 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY; 3597 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3470 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) 3598 if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
3471 l2cap_send_ack(chan); 3599 l2cap_send_ack(chan);
3472 else 3600 else
3473 l2cap_ertm_send(chan); 3601 l2cap_ertm_send(chan);
@@ -3480,21 +3608,19 @@ static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u16 rx_c
3480 3608
3481 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control); 3609 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3482 3610
3483 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY; 3611 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3484 3612
3485 chan->expected_ack_seq = tx_seq; 3613 chan->expected_ack_seq = tx_seq;
3486 l2cap_drop_acked_frames(chan); 3614 l2cap_drop_acked_frames(chan);
3487 3615
3488 if (rx_control & L2CAP_CTRL_FINAL) { 3616 if (rx_control & L2CAP_CTRL_FINAL) {
3489 if (chan->conn_state & L2CAP_CONN_REJ_ACT) 3617 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3490 chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3491 else
3492 l2cap_retransmit_frames(chan); 3618 l2cap_retransmit_frames(chan);
3493 } else { 3619 } else {
3494 l2cap_retransmit_frames(chan); 3620 l2cap_retransmit_frames(chan);
3495 3621
3496 if (chan->conn_state & L2CAP_CONN_WAIT_F) 3622 if (test_bit(CONN_WAIT_F, &chan->conn_state))
3497 chan->conn_state |= L2CAP_CONN_REJ_ACT; 3623 set_bit(CONN_REJ_ACT, &chan->conn_state);
3498 } 3624 }
3499} 3625}
3500static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_control) 3626static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_control)
@@ -3503,32 +3629,32 @@ static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_
3503 3629
3504 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control); 3630 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3505 3631
3506 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY; 3632 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3507 3633
3508 if (rx_control & L2CAP_CTRL_POLL) { 3634 if (rx_control & L2CAP_CTRL_POLL) {
3509 chan->expected_ack_seq = tx_seq; 3635 chan->expected_ack_seq = tx_seq;
3510 l2cap_drop_acked_frames(chan); 3636 l2cap_drop_acked_frames(chan);
3511 3637
3512 chan->conn_state |= L2CAP_CONN_SEND_FBIT; 3638 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3513 l2cap_retransmit_one_frame(chan, tx_seq); 3639 l2cap_retransmit_one_frame(chan, tx_seq);
3514 3640
3515 l2cap_ertm_send(chan); 3641 l2cap_ertm_send(chan);
3516 3642
3517 if (chan->conn_state & L2CAP_CONN_WAIT_F) { 3643 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
3518 chan->srej_save_reqseq = tx_seq; 3644 chan->srej_save_reqseq = tx_seq;
3519 chan->conn_state |= L2CAP_CONN_SREJ_ACT; 3645 set_bit(CONN_SREJ_ACT, &chan->conn_state);
3520 } 3646 }
3521 } else if (rx_control & L2CAP_CTRL_FINAL) { 3647 } else if (rx_control & L2CAP_CTRL_FINAL) {
3522 if ((chan->conn_state & L2CAP_CONN_SREJ_ACT) && 3648 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
3523 chan->srej_save_reqseq == tx_seq) 3649 chan->srej_save_reqseq == tx_seq)
3524 chan->conn_state &= ~L2CAP_CONN_SREJ_ACT; 3650 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
3525 else 3651 else
3526 l2cap_retransmit_one_frame(chan, tx_seq); 3652 l2cap_retransmit_one_frame(chan, tx_seq);
3527 } else { 3653 } else {
3528 l2cap_retransmit_one_frame(chan, tx_seq); 3654 l2cap_retransmit_one_frame(chan, tx_seq);
3529 if (chan->conn_state & L2CAP_CONN_WAIT_F) { 3655 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
3530 chan->srej_save_reqseq = tx_seq; 3656 chan->srej_save_reqseq = tx_seq;
3531 chan->conn_state |= L2CAP_CONN_SREJ_ACT; 3657 set_bit(CONN_SREJ_ACT, &chan->conn_state);
3532 } 3658 }
3533 } 3659 }
3534} 3660}
@@ -3539,15 +3665,15 @@ static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u16 rx_c
3539 3665
3540 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control); 3666 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3541 3667
3542 chan->conn_state |= L2CAP_CONN_REMOTE_BUSY; 3668 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3543 chan->expected_ack_seq = tx_seq; 3669 chan->expected_ack_seq = tx_seq;
3544 l2cap_drop_acked_frames(chan); 3670 l2cap_drop_acked_frames(chan);
3545 3671
3546 if (rx_control & L2CAP_CTRL_POLL) 3672 if (rx_control & L2CAP_CTRL_POLL)
3547 chan->conn_state |= L2CAP_CONN_SEND_FBIT; 3673 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3548 3674
3549 if (!(chan->conn_state & L2CAP_CONN_SREJ_SENT)) { 3675 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3550 del_timer(&chan->retrans_timer); 3676 __clear_retrans_timer(chan);
3551 if (rx_control & L2CAP_CTRL_POLL) 3677 if (rx_control & L2CAP_CTRL_POLL)
3552 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL); 3678 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
3553 return; 3679 return;
@@ -3564,11 +3690,11 @@ static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u16 rx_cont
3564 BT_DBG("chan %p rx_control 0x%4.4x len %d", chan, rx_control, skb->len); 3690 BT_DBG("chan %p rx_control 0x%4.4x len %d", chan, rx_control, skb->len);
3565 3691
3566 if (L2CAP_CTRL_FINAL & rx_control && 3692 if (L2CAP_CTRL_FINAL & rx_control &&
3567 chan->conn_state & L2CAP_CONN_WAIT_F) { 3693 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3568 del_timer(&chan->monitor_timer); 3694 __clear_monitor_timer(chan);
3569 if (chan->unacked_frames > 0) 3695 if (chan->unacked_frames > 0)
3570 __mod_retrans_timer(); 3696 __set_retrans_timer(chan);
3571 chan->conn_state &= ~L2CAP_CONN_WAIT_F; 3697 clear_bit(CONN_WAIT_F, &chan->conn_state);
3572 } 3698 }
3573 3699
3574 switch (rx_control & L2CAP_CTRL_SUPERVISE) { 3700 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
@@ -3667,7 +3793,6 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk
3667{ 3793{
3668 struct l2cap_chan *chan; 3794 struct l2cap_chan *chan;
3669 struct sock *sk = NULL; 3795 struct sock *sk = NULL;
3670 struct l2cap_pinfo *pi;
3671 u16 control; 3796 u16 control;
3672 u8 tx_seq; 3797 u8 tx_seq;
3673 int len; 3798 int len;
@@ -3679,11 +3804,10 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk
3679 } 3804 }
3680 3805
3681 sk = chan->sk; 3806 sk = chan->sk;
3682 pi = l2cap_pi(sk);
3683 3807
3684 BT_DBG("chan %p, len %d", chan, skb->len); 3808 BT_DBG("chan %p, len %d", chan, skb->len);
3685 3809
3686 if (sk->sk_state != BT_CONNECTED) 3810 if (chan->state != BT_CONNECTED)
3687 goto drop; 3811 goto drop;
3688 3812
3689 switch (chan->mode) { 3813 switch (chan->mode) {
@@ -3696,7 +3820,7 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk
3696 if (chan->imtu < skb->len) 3820 if (chan->imtu < skb->len)
3697 goto drop; 3821 goto drop;
3698 3822
3699 if (!sock_queue_rcv_skb(sk, skb)) 3823 if (!chan->ops->recv(chan->data, skb))
3700 goto done; 3824 goto done;
3701 break; 3825 break;
3702 3826
@@ -3768,13 +3892,13 @@ static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, str
3768 3892
3769 BT_DBG("sk %p, len %d", sk, skb->len); 3893 BT_DBG("sk %p, len %d", sk, skb->len);
3770 3894
3771 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED) 3895 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
3772 goto drop; 3896 goto drop;
3773 3897
3774 if (l2cap_pi(sk)->chan->imtu < skb->len) 3898 if (chan->imtu < skb->len)
3775 goto drop; 3899 goto drop;
3776 3900
3777 if (!sock_queue_rcv_skb(sk, skb)) 3901 if (!chan->ops->recv(chan->data, skb))
3778 goto done; 3902 goto done;
3779 3903
3780drop: 3904drop:
@@ -3801,13 +3925,13 @@ static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct
3801 3925
3802 BT_DBG("sk %p, len %d", sk, skb->len); 3926 BT_DBG("sk %p, len %d", sk, skb->len);
3803 3927
3804 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED) 3928 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
3805 goto drop; 3929 goto drop;
3806 3930
3807 if (l2cap_pi(sk)->chan->imtu < skb->len) 3931 if (chan->imtu < skb->len)
3808 goto drop; 3932 goto drop;
3809 3933
3810 if (!sock_queue_rcv_skb(sk, skb)) 3934 if (!chan->ops->recv(chan->data, skb))
3811 goto done; 3935 goto done;
3812 3936
3813drop: 3937drop:
@@ -3852,6 +3976,11 @@ static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3852 l2cap_att_channel(conn, cid, skb); 3976 l2cap_att_channel(conn, cid, skb);
3853 break; 3977 break;
3854 3978
3979 case L2CAP_CID_SMP:
3980 if (smp_sig_channel(conn, skb))
3981 l2cap_conn_del(conn->hcon, EACCES);
3982 break;
3983
3855 default: 3984 default:
3856 l2cap_data_channel(conn, cid, skb); 3985 l2cap_data_channel(conn, cid, skb);
3857 break; 3986 break;
@@ -3875,7 +4004,7 @@ static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3875 list_for_each_entry(c, &chan_list, global_l) { 4004 list_for_each_entry(c, &chan_list, global_l) {
3876 struct sock *sk = c->sk; 4005 struct sock *sk = c->sk;
3877 4006
3878 if (sk->sk_state != BT_LISTEN) 4007 if (c->state != BT_LISTEN)
3879 continue; 4008 continue;
3880 4009
3881 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) { 4010 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
@@ -3908,7 +4037,7 @@ static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
3908 if (conn) 4037 if (conn)
3909 l2cap_conn_ready(conn); 4038 l2cap_conn_ready(conn);
3910 } else 4039 } else
3911 l2cap_conn_del(hcon, bt_err(status)); 4040 l2cap_conn_del(hcon, bt_to_errno(status));
3912 4041
3913 return 0; 4042 return 0;
3914} 4043}
@@ -3919,7 +4048,7 @@ static int l2cap_disconn_ind(struct hci_conn *hcon)
3919 4048
3920 BT_DBG("hcon %p", hcon); 4049 BT_DBG("hcon %p", hcon);
3921 4050
3922 if (hcon->type != ACL_LINK || !conn) 4051 if ((hcon->type != ACL_LINK && hcon->type != LE_LINK) || !conn)
3923 return 0x13; 4052 return 0x13;
3924 4053
3925 return conn->disc_reason; 4054 return conn->disc_reason;
@@ -3932,27 +4061,25 @@ static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
3932 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK)) 4061 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
3933 return -EINVAL; 4062 return -EINVAL;
3934 4063
3935 l2cap_conn_del(hcon, bt_err(reason)); 4064 l2cap_conn_del(hcon, bt_to_errno(reason));
3936 4065
3937 return 0; 4066 return 0;
3938} 4067}
3939 4068
3940static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt) 4069static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
3941{ 4070{
3942 struct sock *sk = chan->sk; 4071 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
3943
3944 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
3945 return; 4072 return;
3946 4073
3947 if (encrypt == 0x00) { 4074 if (encrypt == 0x00) {
3948 if (chan->sec_level == BT_SECURITY_MEDIUM) { 4075 if (chan->sec_level == BT_SECURITY_MEDIUM) {
3949 l2cap_sock_clear_timer(sk); 4076 __clear_chan_timer(chan);
3950 l2cap_sock_set_timer(sk, HZ * 5); 4077 __set_chan_timer(chan, HZ * 5);
3951 } else if (chan->sec_level == BT_SECURITY_HIGH) 4078 } else if (chan->sec_level == BT_SECURITY_HIGH)
3952 __l2cap_sock_close(sk, ECONNREFUSED); 4079 l2cap_chan_close(chan, ECONNREFUSED);
3953 } else { 4080 } else {
3954 if (chan->sec_level == BT_SECURITY_MEDIUM) 4081 if (chan->sec_level == BT_SECURITY_MEDIUM)
3955 l2cap_sock_clear_timer(sk); 4082 __clear_chan_timer(chan);
3956 } 4083 }
3957} 4084}
3958 4085
@@ -3973,50 +4100,74 @@ static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
3973 4100
3974 bh_lock_sock(sk); 4101 bh_lock_sock(sk);
3975 4102
3976 if (chan->conf_state & L2CAP_CONF_CONNECT_PEND) { 4103 BT_DBG("chan->scid %d", chan->scid);
4104
4105 if (chan->scid == L2CAP_CID_LE_DATA) {
4106 if (!status && encrypt) {
4107 chan->sec_level = hcon->sec_level;
4108 del_timer(&conn->security_timer);
4109 l2cap_chan_ready(sk);
4110 smp_distribute_keys(conn, 0);
4111 }
4112
4113 bh_unlock_sock(sk);
4114 continue;
4115 }
4116
4117 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
3977 bh_unlock_sock(sk); 4118 bh_unlock_sock(sk);
3978 continue; 4119 continue;
3979 } 4120 }
3980 4121
3981 if (!status && (sk->sk_state == BT_CONNECTED || 4122 if (!status && (chan->state == BT_CONNECTED ||
3982 sk->sk_state == BT_CONFIG)) { 4123 chan->state == BT_CONFIG)) {
3983 l2cap_check_encryption(chan, encrypt); 4124 l2cap_check_encryption(chan, encrypt);
3984 bh_unlock_sock(sk); 4125 bh_unlock_sock(sk);
3985 continue; 4126 continue;
3986 } 4127 }
3987 4128
3988 if (sk->sk_state == BT_CONNECT) { 4129 if (chan->state == BT_CONNECT) {
3989 if (!status) { 4130 if (!status) {
3990 struct l2cap_conn_req req; 4131 struct l2cap_conn_req req;
3991 req.scid = cpu_to_le16(chan->scid); 4132 req.scid = cpu_to_le16(chan->scid);
3992 req.psm = chan->psm; 4133 req.psm = chan->psm;
3993 4134
3994 chan->ident = l2cap_get_ident(conn); 4135 chan->ident = l2cap_get_ident(conn);
3995 chan->conf_state |= L2CAP_CONF_CONNECT_PEND; 4136 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3996 4137
3997 l2cap_send_cmd(conn, chan->ident, 4138 l2cap_send_cmd(conn, chan->ident,
3998 L2CAP_CONN_REQ, sizeof(req), &req); 4139 L2CAP_CONN_REQ, sizeof(req), &req);
3999 } else { 4140 } else {
4000 l2cap_sock_clear_timer(sk); 4141 __clear_chan_timer(chan);
4001 l2cap_sock_set_timer(sk, HZ / 10); 4142 __set_chan_timer(chan, HZ / 10);
4002 } 4143 }
4003 } else if (sk->sk_state == BT_CONNECT2) { 4144 } else if (chan->state == BT_CONNECT2) {
4004 struct l2cap_conn_rsp rsp; 4145 struct l2cap_conn_rsp rsp;
4005 __u16 result; 4146 __u16 res, stat;
4006 4147
4007 if (!status) { 4148 if (!status) {
4008 sk->sk_state = BT_CONFIG; 4149 if (bt_sk(sk)->defer_setup) {
4009 result = L2CAP_CR_SUCCESS; 4150 struct sock *parent = bt_sk(sk)->parent;
4151 res = L2CAP_CR_PEND;
4152 stat = L2CAP_CS_AUTHOR_PEND;
4153 if (parent)
4154 parent->sk_data_ready(parent, 0);
4155 } else {
4156 l2cap_state_change(chan, BT_CONFIG);
4157 res = L2CAP_CR_SUCCESS;
4158 stat = L2CAP_CS_NO_INFO;
4159 }
4010 } else { 4160 } else {
4011 sk->sk_state = BT_DISCONN; 4161 l2cap_state_change(chan, BT_DISCONN);
4012 l2cap_sock_set_timer(sk, HZ / 10); 4162 __set_chan_timer(chan, HZ / 10);
4013 result = L2CAP_CR_SEC_BLOCK; 4163 res = L2CAP_CR_SEC_BLOCK;
4164 stat = L2CAP_CS_NO_INFO;
4014 } 4165 }
4015 4166
4016 rsp.scid = cpu_to_le16(chan->dcid); 4167 rsp.scid = cpu_to_le16(chan->dcid);
4017 rsp.dcid = cpu_to_le16(chan->scid); 4168 rsp.dcid = cpu_to_le16(chan->scid);
4018 rsp.result = cpu_to_le16(result); 4169 rsp.result = cpu_to_le16(res);
4019 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); 4170 rsp.status = cpu_to_le16(stat);
4020 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, 4171 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4021 sizeof(rsp), &rsp); 4172 sizeof(rsp), &rsp);
4022 } 4173 }
@@ -4152,10 +4303,10 @@ static int l2cap_debugfs_show(struct seq_file *f, void *p)
4152 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n", 4303 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4153 batostr(&bt_sk(sk)->src), 4304 batostr(&bt_sk(sk)->src),
4154 batostr(&bt_sk(sk)->dst), 4305 batostr(&bt_sk(sk)->dst),
4155 sk->sk_state, __le16_to_cpu(c->psm), 4306 c->state, __le16_to_cpu(c->psm),
4156 c->scid, c->dcid, c->imtu, c->omtu, 4307 c->scid, c->dcid, c->imtu, c->omtu,
4157 c->sec_level, c->mode); 4308 c->sec_level, c->mode);
4158 } 4309}
4159 4310
4160 read_unlock_bh(&chan_list_lock); 4311 read_unlock_bh(&chan_list_lock);
4161 4312
@@ -4195,12 +4346,6 @@ int __init l2cap_init(void)
4195 if (err < 0) 4346 if (err < 0)
4196 return err; 4347 return err;
4197 4348
4198 _busy_wq = create_singlethread_workqueue("l2cap");
4199 if (!_busy_wq) {
4200 err = -ENOMEM;
4201 goto error;
4202 }
4203
4204 err = hci_register_proto(&l2cap_hci_proto); 4349 err = hci_register_proto(&l2cap_hci_proto);
4205 if (err < 0) { 4350 if (err < 0) {
4206 BT_ERR("L2CAP protocol registration failed"); 4351 BT_ERR("L2CAP protocol registration failed");
@@ -4218,7 +4363,6 @@ int __init l2cap_init(void)
4218 return 0; 4363 return 0;
4219 4364
4220error: 4365error:
4221 destroy_workqueue(_busy_wq);
4222 l2cap_cleanup_sockets(); 4366 l2cap_cleanup_sockets();
4223 return err; 4367 return err;
4224} 4368}
@@ -4227,9 +4371,6 @@ void l2cap_exit(void)
4227{ 4371{
4228 debugfs_remove(l2cap_debugfs); 4372 debugfs_remove(l2cap_debugfs);
4229 4373
4230 flush_workqueue(_busy_wq);
4231 destroy_workqueue(_busy_wq);
4232
4233 if (hci_unregister_proto(&l2cap_hci_proto) < 0) 4374 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4234 BT_ERR("L2CAP protocol unregistration failed"); 4375 BT_ERR("L2CAP protocol unregistration failed");
4235 4376
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index 8248303f44e..5c36b3e8739 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -29,54 +29,11 @@
29#include <net/bluetooth/bluetooth.h> 29#include <net/bluetooth/bluetooth.h>
30#include <net/bluetooth/hci_core.h> 30#include <net/bluetooth/hci_core.h>
31#include <net/bluetooth/l2cap.h> 31#include <net/bluetooth/l2cap.h>
32#include <net/bluetooth/smp.h>
32 33
33static const struct proto_ops l2cap_sock_ops; 34static const struct proto_ops l2cap_sock_ops;
34 35static void l2cap_sock_init(struct sock *sk, struct sock *parent);
35/* ---- L2CAP timers ---- */ 36static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio);
36static void l2cap_sock_timeout(unsigned long arg)
37{
38 struct sock *sk = (struct sock *) arg;
39 int reason;
40
41 BT_DBG("sock %p state %d", sk, sk->sk_state);
42
43 bh_lock_sock(sk);
44
45 if (sock_owned_by_user(sk)) {
46 /* sk is owned by user. Try again later */
47 l2cap_sock_set_timer(sk, HZ / 5);
48 bh_unlock_sock(sk);
49 sock_put(sk);
50 return;
51 }
52
53 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
54 reason = ECONNREFUSED;
55 else if (sk->sk_state == BT_CONNECT &&
56 l2cap_pi(sk)->chan->sec_level != BT_SECURITY_SDP)
57 reason = ECONNREFUSED;
58 else
59 reason = ETIMEDOUT;
60
61 __l2cap_sock_close(sk, reason);
62
63 bh_unlock_sock(sk);
64
65 l2cap_sock_kill(sk);
66 sock_put(sk);
67}
68
69void l2cap_sock_set_timer(struct sock *sk, long timeout)
70{
71 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
72 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
73}
74
75void l2cap_sock_clear_timer(struct sock *sk)
76{
77 BT_DBG("sock %p state %d", sk, sk->sk_state);
78 sk_stop_timer(sk, &sk->sk_timer);
79}
80 37
81static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen) 38static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
82{ 39{
@@ -133,6 +90,8 @@ static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
133 chan->sec_level = BT_SECURITY_SDP; 90 chan->sec_level = BT_SECURITY_SDP;
134 91
135 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr); 92 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
93
94 chan->state = BT_BOUND;
136 sk->sk_state = BT_BOUND; 95 sk->sk_state = BT_BOUND;
137 96
138done: 97done:
@@ -162,7 +121,7 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int al
162 121
163 lock_sock(sk); 122 lock_sock(sk);
164 123
165 if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) 124 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED
166 && !(la.l2_psm || la.l2_cid)) { 125 && !(la.l2_psm || la.l2_cid)) {
167 err = -EINVAL; 126 err = -EINVAL;
168 goto done; 127 goto done;
@@ -204,8 +163,8 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int al
204 } 163 }
205 164
206 /* PSM must be odd and lsb of upper byte must be 0 */ 165 /* PSM must be odd and lsb of upper byte must be 0 */
207 if ((__le16_to_cpu(la.l2_psm) & 0x0101) != 0x0001 && 166 if ((__le16_to_cpu(la.l2_psm) & 0x0101) != 0x0001 && !la.l2_cid &&
208 sk->sk_type != SOCK_RAW && !la.l2_cid) { 167 chan->chan_type != L2CAP_CHAN_RAW) {
209 err = -EINVAL; 168 err = -EINVAL;
210 goto done; 169 goto done;
211 } 170 }
@@ -258,6 +217,8 @@ static int l2cap_sock_listen(struct socket *sock, int backlog)
258 217
259 sk->sk_max_ack_backlog = backlog; 218 sk->sk_max_ack_backlog = backlog;
260 sk->sk_ack_backlog = 0; 219 sk->sk_ack_backlog = 0;
220
221 chan->state = BT_LISTEN;
261 sk->sk_state = BT_LISTEN; 222 sk->sk_state = BT_LISTEN;
262 223
263done: 224done:
@@ -437,6 +398,7 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch
437 struct sock *sk = sock->sk; 398 struct sock *sk = sock->sk;
438 struct l2cap_chan *chan = l2cap_pi(sk)->chan; 399 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
439 struct bt_security sec; 400 struct bt_security sec;
401 struct bt_power pwr;
440 int len, err = 0; 402 int len, err = 0;
441 403
442 BT_DBG("sk %p", sk); 404 BT_DBG("sk %p", sk);
@@ -454,14 +416,18 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch
454 416
455 switch (optname) { 417 switch (optname) {
456 case BT_SECURITY: 418 case BT_SECURITY:
457 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM 419 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED &&
458 && sk->sk_type != SOCK_RAW) { 420 chan->chan_type != L2CAP_CHAN_RAW) {
459 err = -EINVAL; 421 err = -EINVAL;
460 break; 422 break;
461 } 423 }
462 424
425 memset(&sec, 0, sizeof(sec));
463 sec.level = chan->sec_level; 426 sec.level = chan->sec_level;
464 427
428 if (sk->sk_state == BT_CONNECTED)
429 sec.key_size = chan->conn->hcon->enc_key_size;
430
465 len = min_t(unsigned int, len, sizeof(sec)); 431 len = min_t(unsigned int, len, sizeof(sec));
466 if (copy_to_user(optval, (char *) &sec, len)) 432 if (copy_to_user(optval, (char *) &sec, len))
467 err = -EFAULT; 433 err = -EFAULT;
@@ -485,6 +451,21 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch
485 451
486 break; 452 break;
487 453
454 case BT_POWER:
455 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
456 && sk->sk_type != SOCK_RAW) {
457 err = -EINVAL;
458 break;
459 }
460
461 pwr.force_active = chan->force_active;
462
463 len = min_t(unsigned int, len, sizeof(pwr));
464 if (copy_to_user(optval, (char *) &pwr, len))
465 err = -EFAULT;
466
467 break;
468
488 default: 469 default:
489 err = -ENOPROTOOPT; 470 err = -ENOPROTOOPT;
490 break; 471 break;
@@ -535,7 +516,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
535 chan->mode = opts.mode; 516 chan->mode = opts.mode;
536 switch (chan->mode) { 517 switch (chan->mode) {
537 case L2CAP_MODE_BASIC: 518 case L2CAP_MODE_BASIC:
538 chan->conf_state &= ~L2CAP_CONF_STATE2_DEVICE; 519 clear_bit(CONF_STATE2_DEVICE, &chan->conf_state);
539 break; 520 break;
540 case L2CAP_MODE_ERTM: 521 case L2CAP_MODE_ERTM:
541 case L2CAP_MODE_STREAMING: 522 case L2CAP_MODE_STREAMING:
@@ -585,6 +566,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
585 struct sock *sk = sock->sk; 566 struct sock *sk = sock->sk;
586 struct l2cap_chan *chan = l2cap_pi(sk)->chan; 567 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
587 struct bt_security sec; 568 struct bt_security sec;
569 struct bt_power pwr;
570 struct l2cap_conn *conn;
588 int len, err = 0; 571 int len, err = 0;
589 u32 opt; 572 u32 opt;
590 573
@@ -600,8 +583,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
600 583
601 switch (optname) { 584 switch (optname) {
602 case BT_SECURITY: 585 case BT_SECURITY:
603 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM 586 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED &&
604 && sk->sk_type != SOCK_RAW) { 587 chan->chan_type != L2CAP_CHAN_RAW) {
605 err = -EINVAL; 588 err = -EINVAL;
606 break; 589 break;
607 } 590 }
@@ -621,6 +604,20 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
621 } 604 }
622 605
623 chan->sec_level = sec.level; 606 chan->sec_level = sec.level;
607
608 conn = chan->conn;
609 if (conn && chan->scid == L2CAP_CID_LE_DATA) {
610 if (!conn->hcon->out) {
611 err = -EINVAL;
612 break;
613 }
614
615 if (smp_conn_security(conn, sec.level))
616 break;
617
618 err = 0;
619 sk->sk_state = BT_CONFIG;
620 }
624 break; 621 break;
625 622
626 case BT_DEFER_SETUP: 623 case BT_DEFER_SETUP:
@@ -661,6 +658,23 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
661 chan->flushable = opt; 658 chan->flushable = opt;
662 break; 659 break;
663 660
661 case BT_POWER:
662 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED &&
663 chan->chan_type != L2CAP_CHAN_RAW) {
664 err = -EINVAL;
665 break;
666 }
667
668 pwr.force_active = BT_POWER_FORCE_ACTIVE_ON;
669
670 len = min_t(unsigned int, sizeof(pwr), optlen);
671 if (copy_from_user((char *) &pwr, optval, len)) {
672 err = -EFAULT;
673 break;
674 }
675 chan->force_active = pwr.force_active;
676 break;
677
664 default: 678 default:
665 err = -ENOPROTOOPT; 679 err = -ENOPROTOOPT;
666 break; 680 break;
@@ -674,8 +688,6 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct ms
674{ 688{
675 struct sock *sk = sock->sk; 689 struct sock *sk = sock->sk;
676 struct l2cap_chan *chan = l2cap_pi(sk)->chan; 690 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
677 struct sk_buff *skb;
678 u16 control;
679 int err; 691 int err;
680 692
681 BT_DBG("sock %p, sk %p", sock, sk); 693 BT_DBG("sock %p, sk %p", sock, sk);
@@ -690,87 +702,12 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct ms
690 lock_sock(sk); 702 lock_sock(sk);
691 703
692 if (sk->sk_state != BT_CONNECTED) { 704 if (sk->sk_state != BT_CONNECTED) {
693 err = -ENOTCONN; 705 release_sock(sk);
694 goto done; 706 return -ENOTCONN;
695 }
696
697 /* Connectionless channel */
698 if (sk->sk_type == SOCK_DGRAM) {
699 skb = l2cap_create_connless_pdu(chan, msg, len);
700 if (IS_ERR(skb)) {
701 err = PTR_ERR(skb);
702 } else {
703 l2cap_do_send(chan, skb);
704 err = len;
705 }
706 goto done;
707 } 707 }
708 708
709 switch (chan->mode) { 709 err = l2cap_chan_send(chan, msg, len);
710 case L2CAP_MODE_BASIC:
711 /* Check outgoing MTU */
712 if (len > chan->omtu) {
713 err = -EMSGSIZE;
714 goto done;
715 }
716
717 /* Create a basic PDU */
718 skb = l2cap_create_basic_pdu(chan, msg, len);
719 if (IS_ERR(skb)) {
720 err = PTR_ERR(skb);
721 goto done;
722 }
723
724 l2cap_do_send(chan, skb);
725 err = len;
726 break;
727
728 case L2CAP_MODE_ERTM:
729 case L2CAP_MODE_STREAMING:
730 /* Entire SDU fits into one PDU */
731 if (len <= chan->remote_mps) {
732 control = L2CAP_SDU_UNSEGMENTED;
733 skb = l2cap_create_iframe_pdu(chan, msg, len, control,
734 0);
735 if (IS_ERR(skb)) {
736 err = PTR_ERR(skb);
737 goto done;
738 }
739 __skb_queue_tail(&chan->tx_q, skb);
740
741 if (chan->tx_send_head == NULL)
742 chan->tx_send_head = skb;
743 710
744 } else {
745 /* Segment SDU into multiples PDUs */
746 err = l2cap_sar_segment_sdu(chan, msg, len);
747 if (err < 0)
748 goto done;
749 }
750
751 if (chan->mode == L2CAP_MODE_STREAMING) {
752 l2cap_streaming_send(chan);
753 err = len;
754 break;
755 }
756
757 if ((chan->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
758 (chan->conn_state & L2CAP_CONN_WAIT_F)) {
759 err = len;
760 break;
761 }
762 err = l2cap_ertm_send(chan);
763
764 if (err >= 0)
765 err = len;
766 break;
767
768 default:
769 BT_DBG("bad state %1.1x", chan->mode);
770 err = -EBADFD;
771 }
772
773done:
774 release_sock(sk); 711 release_sock(sk);
775 return err; 712 return err;
776} 713}
@@ -778,13 +715,15 @@ done:
778static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags) 715static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
779{ 716{
780 struct sock *sk = sock->sk; 717 struct sock *sk = sock->sk;
718 struct l2cap_pinfo *pi = l2cap_pi(sk);
719 int err;
781 720
782 lock_sock(sk); 721 lock_sock(sk);
783 722
784 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) { 723 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
785 sk->sk_state = BT_CONFIG; 724 sk->sk_state = BT_CONFIG;
786 725
787 __l2cap_connect_rsp_defer(l2cap_pi(sk)->chan); 726 __l2cap_connect_rsp_defer(pi->chan);
788 release_sock(sk); 727 release_sock(sk);
789 return 0; 728 return 0;
790 } 729 }
@@ -792,15 +731,43 @@ static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct ms
792 release_sock(sk); 731 release_sock(sk);
793 732
794 if (sock->type == SOCK_STREAM) 733 if (sock->type == SOCK_STREAM)
795 return bt_sock_stream_recvmsg(iocb, sock, msg, len, flags); 734 err = bt_sock_stream_recvmsg(iocb, sock, msg, len, flags);
735 else
736 err = bt_sock_recvmsg(iocb, sock, msg, len, flags);
796 737
797 return bt_sock_recvmsg(iocb, sock, msg, len, flags); 738 if (pi->chan->mode != L2CAP_MODE_ERTM)
739 return err;
740
741 /* Attempt to put pending rx data in the socket buffer */
742
743 lock_sock(sk);
744
745 if (!test_bit(CONN_LOCAL_BUSY, &pi->chan->conn_state))
746 goto done;
747
748 if (pi->rx_busy_skb) {
749 if (!sock_queue_rcv_skb(sk, pi->rx_busy_skb))
750 pi->rx_busy_skb = NULL;
751 else
752 goto done;
753 }
754
755 /* Restore data flow when half of the receive buffer is
756 * available. This avoids resending large numbers of
757 * frames.
758 */
759 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf >> 1)
760 l2cap_chan_busy(pi->chan, 0);
761
762done:
763 release_sock(sk);
764 return err;
798} 765}
799 766
800/* Kill socket (only if zapped and orphan) 767/* Kill socket (only if zapped and orphan)
801 * Must be called on unlocked socket. 768 * Must be called on unlocked socket.
802 */ 769 */
803void l2cap_sock_kill(struct sock *sk) 770static void l2cap_sock_kill(struct sock *sk)
804{ 771{
805 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket) 772 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
806 return; 773 return;
@@ -814,87 +781,6 @@ void l2cap_sock_kill(struct sock *sk)
814 sock_put(sk); 781 sock_put(sk);
815} 782}
816 783
817/* Must be called on unlocked socket. */
818static void l2cap_sock_close(struct sock *sk)
819{
820 l2cap_sock_clear_timer(sk);
821 lock_sock(sk);
822 __l2cap_sock_close(sk, ECONNRESET);
823 release_sock(sk);
824 l2cap_sock_kill(sk);
825}
826
827static void l2cap_sock_cleanup_listen(struct sock *parent)
828{
829 struct sock *sk;
830
831 BT_DBG("parent %p", parent);
832
833 /* Close not yet accepted channels */
834 while ((sk = bt_accept_dequeue(parent, NULL)))
835 l2cap_sock_close(sk);
836
837 parent->sk_state = BT_CLOSED;
838 sock_set_flag(parent, SOCK_ZAPPED);
839}
840
841void __l2cap_sock_close(struct sock *sk, int reason)
842{
843 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
844 struct l2cap_conn *conn = chan->conn;
845
846 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
847
848 switch (sk->sk_state) {
849 case BT_LISTEN:
850 l2cap_sock_cleanup_listen(sk);
851 break;
852
853 case BT_CONNECTED:
854 case BT_CONFIG:
855 if ((sk->sk_type == SOCK_SEQPACKET ||
856 sk->sk_type == SOCK_STREAM) &&
857 conn->hcon->type == ACL_LINK) {
858 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
859 l2cap_send_disconn_req(conn, chan, reason);
860 } else
861 l2cap_chan_del(chan, reason);
862 break;
863
864 case BT_CONNECT2:
865 if ((sk->sk_type == SOCK_SEQPACKET ||
866 sk->sk_type == SOCK_STREAM) &&
867 conn->hcon->type == ACL_LINK) {
868 struct l2cap_conn_rsp rsp;
869 __u16 result;
870
871 if (bt_sk(sk)->defer_setup)
872 result = L2CAP_CR_SEC_BLOCK;
873 else
874 result = L2CAP_CR_BAD_PSM;
875
876 rsp.scid = cpu_to_le16(chan->dcid);
877 rsp.dcid = cpu_to_le16(chan->scid);
878 rsp.result = cpu_to_le16(result);
879 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
880 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
881 sizeof(rsp), &rsp);
882 }
883
884 l2cap_chan_del(chan, reason);
885 break;
886
887 case BT_CONNECT:
888 case BT_DISCONN:
889 l2cap_chan_del(chan, reason);
890 break;
891
892 default:
893 sock_set_flag(sk, SOCK_ZAPPED);
894 break;
895 }
896}
897
898static int l2cap_sock_shutdown(struct socket *sock, int how) 784static int l2cap_sock_shutdown(struct socket *sock, int how)
899{ 785{
900 struct sock *sk = sock->sk; 786 struct sock *sk = sock->sk;
@@ -912,8 +798,7 @@ static int l2cap_sock_shutdown(struct socket *sock, int how)
912 err = __l2cap_wait_ack(sk); 798 err = __l2cap_wait_ack(sk);
913 799
914 sk->sk_shutdown = SHUTDOWN_MASK; 800 sk->sk_shutdown = SHUTDOWN_MASK;
915 l2cap_sock_clear_timer(sk); 801 l2cap_chan_close(chan, 0);
916 __l2cap_sock_close(sk, 0);
917 802
918 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) 803 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
919 err = bt_sock_wait_state(sk, BT_CLOSED, 804 err = bt_sock_wait_state(sk, BT_CLOSED,
@@ -944,15 +829,85 @@ static int l2cap_sock_release(struct socket *sock)
944 return err; 829 return err;
945} 830}
946 831
832static struct l2cap_chan *l2cap_sock_new_connection_cb(void *data)
833{
834 struct sock *sk, *parent = data;
835
836 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP,
837 GFP_ATOMIC);
838 if (!sk)
839 return NULL;
840
841 l2cap_sock_init(sk, parent);
842
843 return l2cap_pi(sk)->chan;
844}
845
846static int l2cap_sock_recv_cb(void *data, struct sk_buff *skb)
847{
848 int err;
849 struct sock *sk = data;
850 struct l2cap_pinfo *pi = l2cap_pi(sk);
851
852 if (pi->rx_busy_skb)
853 return -ENOMEM;
854
855 err = sock_queue_rcv_skb(sk, skb);
856
857 /* For ERTM, handle one skb that doesn't fit into the recv
858 * buffer. This is important to do because the data frames
859 * have already been acked, so the skb cannot be discarded.
860 *
861 * Notify the l2cap core that the buffer is full, so the
862 * LOCAL_BUSY state is entered and no more frames are
863 * acked and reassembled until there is buffer space
864 * available.
865 */
866 if (err < 0 && pi->chan->mode == L2CAP_MODE_ERTM) {
867 pi->rx_busy_skb = skb;
868 l2cap_chan_busy(pi->chan, 1);
869 err = 0;
870 }
871
872 return err;
873}
874
875static void l2cap_sock_close_cb(void *data)
876{
877 struct sock *sk = data;
878
879 l2cap_sock_kill(sk);
880}
881
882static void l2cap_sock_state_change_cb(void *data, int state)
883{
884 struct sock *sk = data;
885
886 sk->sk_state = state;
887}
888
889static struct l2cap_ops l2cap_chan_ops = {
890 .name = "L2CAP Socket Interface",
891 .new_connection = l2cap_sock_new_connection_cb,
892 .recv = l2cap_sock_recv_cb,
893 .close = l2cap_sock_close_cb,
894 .state_change = l2cap_sock_state_change_cb,
895};
896
947static void l2cap_sock_destruct(struct sock *sk) 897static void l2cap_sock_destruct(struct sock *sk)
948{ 898{
949 BT_DBG("sk %p", sk); 899 BT_DBG("sk %p", sk);
950 900
901 if (l2cap_pi(sk)->rx_busy_skb) {
902 kfree_skb(l2cap_pi(sk)->rx_busy_skb);
903 l2cap_pi(sk)->rx_busy_skb = NULL;
904 }
905
951 skb_queue_purge(&sk->sk_receive_queue); 906 skb_queue_purge(&sk->sk_receive_queue);
952 skb_queue_purge(&sk->sk_write_queue); 907 skb_queue_purge(&sk->sk_write_queue);
953} 908}
954 909
955void l2cap_sock_init(struct sock *sk, struct sock *parent) 910static void l2cap_sock_init(struct sock *sk, struct sock *parent)
956{ 911{
957 struct l2cap_pinfo *pi = l2cap_pi(sk); 912 struct l2cap_pinfo *pi = l2cap_pi(sk);
958 struct l2cap_chan *chan = pi->chan; 913 struct l2cap_chan *chan = pi->chan;
@@ -965,6 +920,7 @@ void l2cap_sock_init(struct sock *sk, struct sock *parent)
965 sk->sk_type = parent->sk_type; 920 sk->sk_type = parent->sk_type;
966 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup; 921 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
967 922
923 chan->chan_type = pchan->chan_type;
968 chan->imtu = pchan->imtu; 924 chan->imtu = pchan->imtu;
969 chan->omtu = pchan->omtu; 925 chan->omtu = pchan->omtu;
970 chan->conf_state = pchan->conf_state; 926 chan->conf_state = pchan->conf_state;
@@ -976,12 +932,27 @@ void l2cap_sock_init(struct sock *sk, struct sock *parent)
976 chan->role_switch = pchan->role_switch; 932 chan->role_switch = pchan->role_switch;
977 chan->force_reliable = pchan->force_reliable; 933 chan->force_reliable = pchan->force_reliable;
978 chan->flushable = pchan->flushable; 934 chan->flushable = pchan->flushable;
935 chan->force_active = pchan->force_active;
979 } else { 936 } else {
937
938 switch (sk->sk_type) {
939 case SOCK_RAW:
940 chan->chan_type = L2CAP_CHAN_RAW;
941 break;
942 case SOCK_DGRAM:
943 chan->chan_type = L2CAP_CHAN_CONN_LESS;
944 break;
945 case SOCK_SEQPACKET:
946 case SOCK_STREAM:
947 chan->chan_type = L2CAP_CHAN_CONN_ORIENTED;
948 break;
949 }
950
980 chan->imtu = L2CAP_DEFAULT_MTU; 951 chan->imtu = L2CAP_DEFAULT_MTU;
981 chan->omtu = 0; 952 chan->omtu = 0;
982 if (!disable_ertm && sk->sk_type == SOCK_STREAM) { 953 if (!disable_ertm && sk->sk_type == SOCK_STREAM) {
983 chan->mode = L2CAP_MODE_ERTM; 954 chan->mode = L2CAP_MODE_ERTM;
984 chan->conf_state |= L2CAP_CONF_STATE2_DEVICE; 955 set_bit(CONF_STATE2_DEVICE, &chan->conf_state);
985 } else { 956 } else {
986 chan->mode = L2CAP_MODE_BASIC; 957 chan->mode = L2CAP_MODE_BASIC;
987 } 958 }
@@ -992,10 +963,15 @@ void l2cap_sock_init(struct sock *sk, struct sock *parent)
992 chan->role_switch = 0; 963 chan->role_switch = 0;
993 chan->force_reliable = 0; 964 chan->force_reliable = 0;
994 chan->flushable = BT_FLUSHABLE_OFF; 965 chan->flushable = BT_FLUSHABLE_OFF;
966 chan->force_active = BT_POWER_FORCE_ACTIVE_ON;
967
995 } 968 }
996 969
997 /* Default config options */ 970 /* Default config options */
998 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO; 971 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
972
973 chan->data = sk;
974 chan->ops = &l2cap_chan_ops;
999} 975}
1000 976
1001static struct proto l2cap_proto = { 977static struct proto l2cap_proto = {
@@ -1004,9 +980,10 @@ static struct proto l2cap_proto = {
1004 .obj_size = sizeof(struct l2cap_pinfo) 980 .obj_size = sizeof(struct l2cap_pinfo)
1005}; 981};
1006 982
1007struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio) 983static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
1008{ 984{
1009 struct sock *sk; 985 struct sock *sk;
986 struct l2cap_chan *chan;
1010 987
1011 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto); 988 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
1012 if (!sk) 989 if (!sk)
@@ -1023,7 +1000,13 @@ struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, g
1023 sk->sk_protocol = proto; 1000 sk->sk_protocol = proto;
1024 sk->sk_state = BT_OPEN; 1001 sk->sk_state = BT_OPEN;
1025 1002
1026 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk); 1003 chan = l2cap_chan_create(sk);
1004 if (!chan) {
1005 l2cap_sock_kill(sk);
1006 return NULL;
1007 }
1008
1009 l2cap_pi(sk)->chan = chan;
1027 1010
1028 return sk; 1011 return sk;
1029} 1012}
@@ -1032,7 +1015,6 @@ static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
1032 int kern) 1015 int kern)
1033{ 1016{
1034 struct sock *sk; 1017 struct sock *sk;
1035 struct l2cap_chan *chan;
1036 1018
1037 BT_DBG("sock %p", sock); 1019 BT_DBG("sock %p", sock);
1038 1020
@@ -1051,14 +1033,6 @@ static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
1051 if (!sk) 1033 if (!sk)
1052 return -ENOMEM; 1034 return -ENOMEM;
1053 1035
1054 chan = l2cap_chan_create(sk);
1055 if (!chan) {
1056 l2cap_sock_kill(sk);
1057 return -ENOMEM;
1058 }
1059
1060 l2cap_pi(sk)->chan = chan;
1061
1062 l2cap_sock_init(sk, NULL); 1036 l2cap_sock_init(sk, NULL);
1063 return 0; 1037 return 0;
1064} 1038}
diff --git a/net/bluetooth/lib.c b/net/bluetooth/lib.c
index b826d1bf10d..86a6bed229d 100644
--- a/net/bluetooth/lib.c
+++ b/net/bluetooth/lib.c
@@ -59,7 +59,7 @@ char *batostr(bdaddr_t *ba)
59EXPORT_SYMBOL(batostr); 59EXPORT_SYMBOL(batostr);
60 60
61/* Bluetooth error codes to Unix errno mapping */ 61/* Bluetooth error codes to Unix errno mapping */
62int bt_err(__u16 code) 62int bt_to_errno(__u16 code)
63{ 63{
64 switch (code) { 64 switch (code) {
65 case 0: 65 case 0:
@@ -149,4 +149,23 @@ int bt_err(__u16 code)
149 return ENOSYS; 149 return ENOSYS;
150 } 150 }
151} 151}
152EXPORT_SYMBOL(bt_err); 152EXPORT_SYMBOL(bt_to_errno);
153
154int bt_printk(const char *level, const char *format, ...)
155{
156 struct va_format vaf;
157 va_list args;
158 int r;
159
160 va_start(args, format);
161
162 vaf.fmt = format;
163 vaf.va = &args;
164
165 r = printk("%sBluetooth: %pV\n", level, &vaf);
166
167 va_end(args);
168
169 return r;
170}
171EXPORT_SYMBOL(bt_printk);
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index dae382ce702..53e109eb043 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -41,7 +41,7 @@ struct pending_cmd {
41 void *user_data; 41 void *user_data;
42}; 42};
43 43
44LIST_HEAD(cmd_list); 44static LIST_HEAD(cmd_list);
45 45
46static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status) 46static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
47{ 47{
@@ -179,7 +179,7 @@ static int read_controller_info(struct sock *sk, u16 index)
179 179
180 hci_del_off_timer(hdev); 180 hci_del_off_timer(hdev);
181 181
182 hci_dev_lock(hdev); 182 hci_dev_lock_bh(hdev);
183 183
184 set_bit(HCI_MGMT, &hdev->flags); 184 set_bit(HCI_MGMT, &hdev->flags);
185 185
@@ -208,7 +208,7 @@ static int read_controller_info(struct sock *sk, u16 index)
208 208
209 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name)); 209 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
210 210
211 hci_dev_unlock(hdev); 211 hci_dev_unlock_bh(hdev);
212 hci_dev_put(hdev); 212 hci_dev_put(hdev);
213 213
214 return cmd_complete(sk, index, MGMT_OP_READ_INFO, &rp, sizeof(rp)); 214 return cmd_complete(sk, index, MGMT_OP_READ_INFO, &rp, sizeof(rp));
@@ -316,7 +316,7 @@ static int set_powered(struct sock *sk, u16 index, unsigned char *data, u16 len)
316 if (!hdev) 316 if (!hdev)
317 return cmd_status(sk, index, MGMT_OP_SET_POWERED, ENODEV); 317 return cmd_status(sk, index, MGMT_OP_SET_POWERED, ENODEV);
318 318
319 hci_dev_lock(hdev); 319 hci_dev_lock_bh(hdev);
320 320
321 up = test_bit(HCI_UP, &hdev->flags); 321 up = test_bit(HCI_UP, &hdev->flags);
322 if ((cp->val && up) || (!cp->val && !up)) { 322 if ((cp->val && up) || (!cp->val && !up)) {
@@ -343,7 +343,7 @@ static int set_powered(struct sock *sk, u16 index, unsigned char *data, u16 len)
343 err = 0; 343 err = 0;
344 344
345failed: 345failed:
346 hci_dev_unlock(hdev); 346 hci_dev_unlock_bh(hdev);
347 hci_dev_put(hdev); 347 hci_dev_put(hdev);
348 return err; 348 return err;
349} 349}
@@ -368,7 +368,7 @@ static int set_discoverable(struct sock *sk, u16 index, unsigned char *data,
368 if (!hdev) 368 if (!hdev)
369 return cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, ENODEV); 369 return cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, ENODEV);
370 370
371 hci_dev_lock(hdev); 371 hci_dev_lock_bh(hdev);
372 372
373 if (!test_bit(HCI_UP, &hdev->flags)) { 373 if (!test_bit(HCI_UP, &hdev->flags)) {
374 err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, ENETDOWN); 374 err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, ENETDOWN);
@@ -403,7 +403,7 @@ static int set_discoverable(struct sock *sk, u16 index, unsigned char *data,
403 mgmt_pending_remove(cmd); 403 mgmt_pending_remove(cmd);
404 404
405failed: 405failed:
406 hci_dev_unlock(hdev); 406 hci_dev_unlock_bh(hdev);
407 hci_dev_put(hdev); 407 hci_dev_put(hdev);
408 408
409 return err; 409 return err;
@@ -429,7 +429,7 @@ static int set_connectable(struct sock *sk, u16 index, unsigned char *data,
429 if (!hdev) 429 if (!hdev)
430 return cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, ENODEV); 430 return cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, ENODEV);
431 431
432 hci_dev_lock(hdev); 432 hci_dev_lock_bh(hdev);
433 433
434 if (!test_bit(HCI_UP, &hdev->flags)) { 434 if (!test_bit(HCI_UP, &hdev->flags)) {
435 err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, ENETDOWN); 435 err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, ENETDOWN);
@@ -463,7 +463,7 @@ static int set_connectable(struct sock *sk, u16 index, unsigned char *data,
463 mgmt_pending_remove(cmd); 463 mgmt_pending_remove(cmd);
464 464
465failed: 465failed:
466 hci_dev_unlock(hdev); 466 hci_dev_unlock_bh(hdev);
467 hci_dev_put(hdev); 467 hci_dev_put(hdev);
468 468
469 return err; 469 return err;
@@ -522,7 +522,7 @@ static int set_pairable(struct sock *sk, u16 index, unsigned char *data,
522 if (!hdev) 522 if (!hdev)
523 return cmd_status(sk, index, MGMT_OP_SET_PAIRABLE, ENODEV); 523 return cmd_status(sk, index, MGMT_OP_SET_PAIRABLE, ENODEV);
524 524
525 hci_dev_lock(hdev); 525 hci_dev_lock_bh(hdev);
526 526
527 if (cp->val) 527 if (cp->val)
528 set_bit(HCI_PAIRABLE, &hdev->flags); 528 set_bit(HCI_PAIRABLE, &hdev->flags);
@@ -538,7 +538,7 @@ static int set_pairable(struct sock *sk, u16 index, unsigned char *data,
538 err = mgmt_event(MGMT_EV_PAIRABLE, index, &ev, sizeof(ev), sk); 538 err = mgmt_event(MGMT_EV_PAIRABLE, index, &ev, sizeof(ev), sk);
539 539
540failed: 540failed:
541 hci_dev_unlock(hdev); 541 hci_dev_unlock_bh(hdev);
542 hci_dev_put(hdev); 542 hci_dev_put(hdev);
543 543
544 return err; 544 return err;
@@ -739,7 +739,7 @@ static int add_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len)
739 if (!hdev) 739 if (!hdev)
740 return cmd_status(sk, index, MGMT_OP_ADD_UUID, ENODEV); 740 return cmd_status(sk, index, MGMT_OP_ADD_UUID, ENODEV);
741 741
742 hci_dev_lock(hdev); 742 hci_dev_lock_bh(hdev);
743 743
744 uuid = kmalloc(sizeof(*uuid), GFP_ATOMIC); 744 uuid = kmalloc(sizeof(*uuid), GFP_ATOMIC);
745 if (!uuid) { 745 if (!uuid) {
@@ -763,7 +763,7 @@ static int add_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len)
763 err = cmd_complete(sk, index, MGMT_OP_ADD_UUID, NULL, 0); 763 err = cmd_complete(sk, index, MGMT_OP_ADD_UUID, NULL, 0);
764 764
765failed: 765failed:
766 hci_dev_unlock(hdev); 766 hci_dev_unlock_bh(hdev);
767 hci_dev_put(hdev); 767 hci_dev_put(hdev);
768 768
769 return err; 769 return err;
@@ -788,7 +788,7 @@ static int remove_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len)
788 if (!hdev) 788 if (!hdev)
789 return cmd_status(sk, index, MGMT_OP_REMOVE_UUID, ENODEV); 789 return cmd_status(sk, index, MGMT_OP_REMOVE_UUID, ENODEV);
790 790
791 hci_dev_lock(hdev); 791 hci_dev_lock_bh(hdev);
792 792
793 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) { 793 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
794 err = hci_uuids_clear(hdev); 794 err = hci_uuids_clear(hdev);
@@ -823,7 +823,7 @@ static int remove_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len)
823 err = cmd_complete(sk, index, MGMT_OP_REMOVE_UUID, NULL, 0); 823 err = cmd_complete(sk, index, MGMT_OP_REMOVE_UUID, NULL, 0);
824 824
825unlock: 825unlock:
826 hci_dev_unlock(hdev); 826 hci_dev_unlock_bh(hdev);
827 hci_dev_put(hdev); 827 hci_dev_put(hdev);
828 828
829 return err; 829 return err;
@@ -847,7 +847,7 @@ static int set_dev_class(struct sock *sk, u16 index, unsigned char *data,
847 if (!hdev) 847 if (!hdev)
848 return cmd_status(sk, index, MGMT_OP_SET_DEV_CLASS, ENODEV); 848 return cmd_status(sk, index, MGMT_OP_SET_DEV_CLASS, ENODEV);
849 849
850 hci_dev_lock(hdev); 850 hci_dev_lock_bh(hdev);
851 851
852 hdev->major_class = cp->major; 852 hdev->major_class = cp->major;
853 hdev->minor_class = cp->minor; 853 hdev->minor_class = cp->minor;
@@ -857,7 +857,7 @@ static int set_dev_class(struct sock *sk, u16 index, unsigned char *data,
857 if (err == 0) 857 if (err == 0)
858 err = cmd_complete(sk, index, MGMT_OP_SET_DEV_CLASS, NULL, 0); 858 err = cmd_complete(sk, index, MGMT_OP_SET_DEV_CLASS, NULL, 0);
859 859
860 hci_dev_unlock(hdev); 860 hci_dev_unlock_bh(hdev);
861 hci_dev_put(hdev); 861 hci_dev_put(hdev);
862 862
863 return err; 863 return err;
@@ -879,7 +879,7 @@ static int set_service_cache(struct sock *sk, u16 index, unsigned char *data,
879 if (!hdev) 879 if (!hdev)
880 return cmd_status(sk, index, MGMT_OP_SET_SERVICE_CACHE, ENODEV); 880 return cmd_status(sk, index, MGMT_OP_SET_SERVICE_CACHE, ENODEV);
881 881
882 hci_dev_lock(hdev); 882 hci_dev_lock_bh(hdev);
883 883
884 BT_DBG("hci%u enable %d", index, cp->enable); 884 BT_DBG("hci%u enable %d", index, cp->enable);
885 885
@@ -897,7 +897,7 @@ static int set_service_cache(struct sock *sk, u16 index, unsigned char *data,
897 err = cmd_complete(sk, index, MGMT_OP_SET_SERVICE_CACHE, NULL, 897 err = cmd_complete(sk, index, MGMT_OP_SET_SERVICE_CACHE, NULL,
898 0); 898 0);
899 899
900 hci_dev_unlock(hdev); 900 hci_dev_unlock_bh(hdev);
901 hci_dev_put(hdev); 901 hci_dev_put(hdev);
902 902
903 return err; 903 return err;
@@ -908,7 +908,7 @@ static int load_keys(struct sock *sk, u16 index, unsigned char *data, u16 len)
908 struct hci_dev *hdev; 908 struct hci_dev *hdev;
909 struct mgmt_cp_load_keys *cp; 909 struct mgmt_cp_load_keys *cp;
910 u16 key_count, expected_len; 910 u16 key_count, expected_len;
911 int i; 911 int i, err;
912 912
913 cp = (void *) data; 913 cp = (void *) data;
914 914
@@ -918,9 +918,9 @@ static int load_keys(struct sock *sk, u16 index, unsigned char *data, u16 len)
918 key_count = get_unaligned_le16(&cp->key_count); 918 key_count = get_unaligned_le16(&cp->key_count);
919 919
920 expected_len = sizeof(*cp) + key_count * sizeof(struct mgmt_key_info); 920 expected_len = sizeof(*cp) + key_count * sizeof(struct mgmt_key_info);
921 if (expected_len != len) { 921 if (expected_len > len) {
922 BT_ERR("load_keys: expected %u bytes, got %u bytes", 922 BT_ERR("load_keys: expected at least %u bytes, got %u bytes",
923 len, expected_len); 923 expected_len, len);
924 return -EINVAL; 924 return -EINVAL;
925 } 925 }
926 926
@@ -931,7 +931,7 @@ static int load_keys(struct sock *sk, u16 index, unsigned char *data, u16 len)
931 BT_DBG("hci%u debug_keys %u key_count %u", index, cp->debug_keys, 931 BT_DBG("hci%u debug_keys %u key_count %u", index, cp->debug_keys,
932 key_count); 932 key_count);
933 933
934 hci_dev_lock(hdev); 934 hci_dev_lock_bh(hdev);
935 935
936 hci_link_keys_clear(hdev); 936 hci_link_keys_clear(hdev);
937 937
@@ -942,17 +942,36 @@ static int load_keys(struct sock *sk, u16 index, unsigned char *data, u16 len)
942 else 942 else
943 clear_bit(HCI_DEBUG_KEYS, &hdev->flags); 943 clear_bit(HCI_DEBUG_KEYS, &hdev->flags);
944 944
945 for (i = 0; i < key_count; i++) { 945 len -= sizeof(*cp);
946 struct mgmt_key_info *key = &cp->keys[i]; 946 i = 0;
947
948 while (i < len) {
949 struct mgmt_key_info *key = (void *) cp->keys + i;
950
951 i += sizeof(*key) + key->dlen;
952
953 if (key->type == HCI_LK_SMP_LTK) {
954 struct key_master_id *id = (void *) key->data;
955
956 if (key->dlen != sizeof(struct key_master_id))
957 continue;
958
959 hci_add_ltk(hdev, 0, &key->bdaddr, key->pin_len,
960 id->ediv, id->rand, key->val);
961
962 continue;
963 }
947 964
948 hci_add_link_key(hdev, NULL, 0, &key->bdaddr, key->val, key->type, 965 hci_add_link_key(hdev, NULL, 0, &key->bdaddr, key->val, key->type,
949 key->pin_len); 966 key->pin_len);
950 } 967 }
951 968
952 hci_dev_unlock(hdev); 969 err = cmd_complete(sk, index, MGMT_OP_LOAD_KEYS, NULL, 0);
970
971 hci_dev_unlock_bh(hdev);
953 hci_dev_put(hdev); 972 hci_dev_put(hdev);
954 973
955 return 0; 974 return err;
956} 975}
957 976
958static int remove_key(struct sock *sk, u16 index, unsigned char *data, u16 len) 977static int remove_key(struct sock *sk, u16 index, unsigned char *data, u16 len)
@@ -971,7 +990,7 @@ static int remove_key(struct sock *sk, u16 index, unsigned char *data, u16 len)
971 if (!hdev) 990 if (!hdev)
972 return cmd_status(sk, index, MGMT_OP_REMOVE_KEY, ENODEV); 991 return cmd_status(sk, index, MGMT_OP_REMOVE_KEY, ENODEV);
973 992
974 hci_dev_lock(hdev); 993 hci_dev_lock_bh(hdev);
975 994
976 err = hci_remove_link_key(hdev, &cp->bdaddr); 995 err = hci_remove_link_key(hdev, &cp->bdaddr);
977 if (err < 0) { 996 if (err < 0) {
@@ -990,11 +1009,11 @@ static int remove_key(struct sock *sk, u16 index, unsigned char *data, u16 len)
990 1009
991 put_unaligned_le16(conn->handle, &dc.handle); 1010 put_unaligned_le16(conn->handle, &dc.handle);
992 dc.reason = 0x13; /* Remote User Terminated Connection */ 1011 dc.reason = 0x13; /* Remote User Terminated Connection */
993 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, 0, NULL); 1012 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
994 } 1013 }
995 1014
996unlock: 1015unlock:
997 hci_dev_unlock(hdev); 1016 hci_dev_unlock_bh(hdev);
998 hci_dev_put(hdev); 1017 hci_dev_put(hdev);
999 1018
1000 return err; 1019 return err;
@@ -1020,7 +1039,7 @@ static int disconnect(struct sock *sk, u16 index, unsigned char *data, u16 len)
1020 if (!hdev) 1039 if (!hdev)
1021 return cmd_status(sk, index, MGMT_OP_DISCONNECT, ENODEV); 1040 return cmd_status(sk, index, MGMT_OP_DISCONNECT, ENODEV);
1022 1041
1023 hci_dev_lock(hdev); 1042 hci_dev_lock_bh(hdev);
1024 1043
1025 if (!test_bit(HCI_UP, &hdev->flags)) { 1044 if (!test_bit(HCI_UP, &hdev->flags)) {
1026 err = cmd_status(sk, index, MGMT_OP_DISCONNECT, ENETDOWN); 1045 err = cmd_status(sk, index, MGMT_OP_DISCONNECT, ENETDOWN);
@@ -1055,7 +1074,7 @@ static int disconnect(struct sock *sk, u16 index, unsigned char *data, u16 len)
1055 mgmt_pending_remove(cmd); 1074 mgmt_pending_remove(cmd);
1056 1075
1057failed: 1076failed:
1058 hci_dev_unlock(hdev); 1077 hci_dev_unlock_bh(hdev);
1059 hci_dev_put(hdev); 1078 hci_dev_put(hdev);
1060 1079
1061 return err; 1080 return err;
@@ -1076,7 +1095,7 @@ static int get_connections(struct sock *sk, u16 index)
1076 if (!hdev) 1095 if (!hdev)
1077 return cmd_status(sk, index, MGMT_OP_GET_CONNECTIONS, ENODEV); 1096 return cmd_status(sk, index, MGMT_OP_GET_CONNECTIONS, ENODEV);
1078 1097
1079 hci_dev_lock(hdev); 1098 hci_dev_lock_bh(hdev);
1080 1099
1081 count = 0; 1100 count = 0;
1082 list_for_each(p, &hdev->conn_hash.list) { 1101 list_for_each(p, &hdev->conn_hash.list) {
@@ -1092,8 +1111,6 @@ static int get_connections(struct sock *sk, u16 index)
1092 1111
1093 put_unaligned_le16(count, &rp->conn_count); 1112 put_unaligned_le16(count, &rp->conn_count);
1094 1113
1095 read_lock(&hci_dev_list_lock);
1096
1097 i = 0; 1114 i = 0;
1098 list_for_each(p, &hdev->conn_hash.list) { 1115 list_for_each(p, &hdev->conn_hash.list) {
1099 struct hci_conn *c = list_entry(p, struct hci_conn, list); 1116 struct hci_conn *c = list_entry(p, struct hci_conn, list);
@@ -1101,22 +1118,41 @@ static int get_connections(struct sock *sk, u16 index)
1101 bacpy(&rp->conn[i++], &c->dst); 1118 bacpy(&rp->conn[i++], &c->dst);
1102 } 1119 }
1103 1120
1104 read_unlock(&hci_dev_list_lock);
1105
1106 err = cmd_complete(sk, index, MGMT_OP_GET_CONNECTIONS, rp, rp_len); 1121 err = cmd_complete(sk, index, MGMT_OP_GET_CONNECTIONS, rp, rp_len);
1107 1122
1108unlock: 1123unlock:
1109 kfree(rp); 1124 kfree(rp);
1110 hci_dev_unlock(hdev); 1125 hci_dev_unlock_bh(hdev);
1111 hci_dev_put(hdev); 1126 hci_dev_put(hdev);
1112 return err; 1127 return err;
1113} 1128}
1114 1129
1130static int send_pin_code_neg_reply(struct sock *sk, u16 index,
1131 struct hci_dev *hdev, struct mgmt_cp_pin_code_neg_reply *cp)
1132{
1133 struct pending_cmd *cmd;
1134 int err;
1135
1136 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, index, cp,
1137 sizeof(*cp));
1138 if (!cmd)
1139 return -ENOMEM;
1140
1141 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY, sizeof(cp->bdaddr),
1142 &cp->bdaddr);
1143 if (err < 0)
1144 mgmt_pending_remove(cmd);
1145
1146 return err;
1147}
1148
1115static int pin_code_reply(struct sock *sk, u16 index, unsigned char *data, 1149static int pin_code_reply(struct sock *sk, u16 index, unsigned char *data,
1116 u16 len) 1150 u16 len)
1117{ 1151{
1118 struct hci_dev *hdev; 1152 struct hci_dev *hdev;
1153 struct hci_conn *conn;
1119 struct mgmt_cp_pin_code_reply *cp; 1154 struct mgmt_cp_pin_code_reply *cp;
1155 struct mgmt_cp_pin_code_neg_reply ncp;
1120 struct hci_cp_pin_code_reply reply; 1156 struct hci_cp_pin_code_reply reply;
1121 struct pending_cmd *cmd; 1157 struct pending_cmd *cmd;
1122 int err; 1158 int err;
@@ -1132,13 +1168,32 @@ static int pin_code_reply(struct sock *sk, u16 index, unsigned char *data,
1132 if (!hdev) 1168 if (!hdev)
1133 return cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, ENODEV); 1169 return cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, ENODEV);
1134 1170
1135 hci_dev_lock(hdev); 1171 hci_dev_lock_bh(hdev);
1136 1172
1137 if (!test_bit(HCI_UP, &hdev->flags)) { 1173 if (!test_bit(HCI_UP, &hdev->flags)) {
1138 err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, ENETDOWN); 1174 err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, ENETDOWN);
1139 goto failed; 1175 goto failed;
1140 } 1176 }
1141 1177
1178 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1179 if (!conn) {
1180 err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, ENOTCONN);
1181 goto failed;
1182 }
1183
1184 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
1185 bacpy(&ncp.bdaddr, &cp->bdaddr);
1186
1187 BT_ERR("PIN code is not 16 bytes long");
1188
1189 err = send_pin_code_neg_reply(sk, index, hdev, &ncp);
1190 if (err >= 0)
1191 err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY,
1192 EINVAL);
1193
1194 goto failed;
1195 }
1196
1142 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, index, data, len); 1197 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, index, data, len);
1143 if (!cmd) { 1198 if (!cmd) {
1144 err = -ENOMEM; 1199 err = -ENOMEM;
@@ -1147,14 +1202,14 @@ static int pin_code_reply(struct sock *sk, u16 index, unsigned char *data,
1147 1202
1148 bacpy(&reply.bdaddr, &cp->bdaddr); 1203 bacpy(&reply.bdaddr, &cp->bdaddr);
1149 reply.pin_len = cp->pin_len; 1204 reply.pin_len = cp->pin_len;
1150 memcpy(reply.pin_code, cp->pin_code, 16); 1205 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
1151 1206
1152 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply); 1207 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
1153 if (err < 0) 1208 if (err < 0)
1154 mgmt_pending_remove(cmd); 1209 mgmt_pending_remove(cmd);
1155 1210
1156failed: 1211failed:
1157 hci_dev_unlock(hdev); 1212 hci_dev_unlock_bh(hdev);
1158 hci_dev_put(hdev); 1213 hci_dev_put(hdev);
1159 1214
1160 return err; 1215 return err;
@@ -1165,7 +1220,6 @@ static int pin_code_neg_reply(struct sock *sk, u16 index, unsigned char *data,
1165{ 1220{
1166 struct hci_dev *hdev; 1221 struct hci_dev *hdev;
1167 struct mgmt_cp_pin_code_neg_reply *cp; 1222 struct mgmt_cp_pin_code_neg_reply *cp;
1168 struct pending_cmd *cmd;
1169 int err; 1223 int err;
1170 1224
1171 BT_DBG(""); 1225 BT_DBG("");
@@ -1181,7 +1235,7 @@ static int pin_code_neg_reply(struct sock *sk, u16 index, unsigned char *data,
1181 return cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY, 1235 return cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY,
1182 ENODEV); 1236 ENODEV);
1183 1237
1184 hci_dev_lock(hdev); 1238 hci_dev_lock_bh(hdev);
1185 1239
1186 if (!test_bit(HCI_UP, &hdev->flags)) { 1240 if (!test_bit(HCI_UP, &hdev->flags)) {
1187 err = cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY, 1241 err = cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY,
@@ -1189,20 +1243,10 @@ static int pin_code_neg_reply(struct sock *sk, u16 index, unsigned char *data,
1189 goto failed; 1243 goto failed;
1190 } 1244 }
1191 1245
1192 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, index, 1246 err = send_pin_code_neg_reply(sk, index, hdev, cp);
1193 data, len);
1194 if (!cmd) {
1195 err = -ENOMEM;
1196 goto failed;
1197 }
1198
1199 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY, sizeof(cp->bdaddr),
1200 &cp->bdaddr);
1201 if (err < 0)
1202 mgmt_pending_remove(cmd);
1203 1247
1204failed: 1248failed:
1205 hci_dev_unlock(hdev); 1249 hci_dev_unlock_bh(hdev);
1206 hci_dev_put(hdev); 1250 hci_dev_put(hdev);
1207 1251
1208 return err; 1252 return err;
@@ -1225,14 +1269,14 @@ static int set_io_capability(struct sock *sk, u16 index, unsigned char *data,
1225 if (!hdev) 1269 if (!hdev)
1226 return cmd_status(sk, index, MGMT_OP_SET_IO_CAPABILITY, ENODEV); 1270 return cmd_status(sk, index, MGMT_OP_SET_IO_CAPABILITY, ENODEV);
1227 1271
1228 hci_dev_lock(hdev); 1272 hci_dev_lock_bh(hdev);
1229 1273
1230 hdev->io_capability = cp->io_capability; 1274 hdev->io_capability = cp->io_capability;
1231 1275
1232 BT_DBG("%s IO capability set to 0x%02x", hdev->name, 1276 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
1233 hdev->io_capability); 1277 hdev->io_capability);
1234 1278
1235 hci_dev_unlock(hdev); 1279 hci_dev_unlock_bh(hdev);
1236 hci_dev_put(hdev); 1280 hci_dev_put(hdev);
1237 1281
1238 return cmd_complete(sk, index, MGMT_OP_SET_IO_CAPABILITY, NULL, 0); 1282 return cmd_complete(sk, index, MGMT_OP_SET_IO_CAPABILITY, NULL, 0);
@@ -1318,7 +1362,7 @@ static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len)
1318 if (!hdev) 1362 if (!hdev)
1319 return cmd_status(sk, index, MGMT_OP_PAIR_DEVICE, ENODEV); 1363 return cmd_status(sk, index, MGMT_OP_PAIR_DEVICE, ENODEV);
1320 1364
1321 hci_dev_lock(hdev); 1365 hci_dev_lock_bh(hdev);
1322 1366
1323 if (cp->io_cap == 0x03) { 1367 if (cp->io_cap == 0x03) {
1324 sec_level = BT_SECURITY_MEDIUM; 1368 sec_level = BT_SECURITY_MEDIUM;
@@ -1360,7 +1404,7 @@ static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len)
1360 err = 0; 1404 err = 0;
1361 1405
1362unlock: 1406unlock:
1363 hci_dev_unlock(hdev); 1407 hci_dev_unlock_bh(hdev);
1364 hci_dev_put(hdev); 1408 hci_dev_put(hdev);
1365 1409
1366 return err; 1410 return err;
@@ -1392,7 +1436,7 @@ static int user_confirm_reply(struct sock *sk, u16 index, unsigned char *data,
1392 if (!hdev) 1436 if (!hdev)
1393 return cmd_status(sk, index, mgmt_op, ENODEV); 1437 return cmd_status(sk, index, mgmt_op, ENODEV);
1394 1438
1395 hci_dev_lock(hdev); 1439 hci_dev_lock_bh(hdev);
1396 1440
1397 if (!test_bit(HCI_UP, &hdev->flags)) { 1441 if (!test_bit(HCI_UP, &hdev->flags)) {
1398 err = cmd_status(sk, index, mgmt_op, ENETDOWN); 1442 err = cmd_status(sk, index, mgmt_op, ENETDOWN);
@@ -1410,7 +1454,7 @@ static int user_confirm_reply(struct sock *sk, u16 index, unsigned char *data,
1410 mgmt_pending_remove(cmd); 1454 mgmt_pending_remove(cmd);
1411 1455
1412failed: 1456failed:
1413 hci_dev_unlock(hdev); 1457 hci_dev_unlock_bh(hdev);
1414 hci_dev_put(hdev); 1458 hci_dev_put(hdev);
1415 1459
1416 return err; 1460 return err;
@@ -1434,7 +1478,7 @@ static int set_local_name(struct sock *sk, u16 index, unsigned char *data,
1434 if (!hdev) 1478 if (!hdev)
1435 return cmd_status(sk, index, MGMT_OP_SET_LOCAL_NAME, ENODEV); 1479 return cmd_status(sk, index, MGMT_OP_SET_LOCAL_NAME, ENODEV);
1436 1480
1437 hci_dev_lock(hdev); 1481 hci_dev_lock_bh(hdev);
1438 1482
1439 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, index, data, len); 1483 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, index, data, len);
1440 if (!cmd) { 1484 if (!cmd) {
@@ -1449,7 +1493,7 @@ static int set_local_name(struct sock *sk, u16 index, unsigned char *data,
1449 mgmt_pending_remove(cmd); 1493 mgmt_pending_remove(cmd);
1450 1494
1451failed: 1495failed:
1452 hci_dev_unlock(hdev); 1496 hci_dev_unlock_bh(hdev);
1453 hci_dev_put(hdev); 1497 hci_dev_put(hdev);
1454 1498
1455 return err; 1499 return err;
@@ -1468,7 +1512,7 @@ static int read_local_oob_data(struct sock *sk, u16 index)
1468 return cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA, 1512 return cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA,
1469 ENODEV); 1513 ENODEV);
1470 1514
1471 hci_dev_lock(hdev); 1515 hci_dev_lock_bh(hdev);
1472 1516
1473 if (!test_bit(HCI_UP, &hdev->flags)) { 1517 if (!test_bit(HCI_UP, &hdev->flags)) {
1474 err = cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA, 1518 err = cmd_status(sk, index, MGMT_OP_READ_LOCAL_OOB_DATA,
@@ -1498,7 +1542,7 @@ static int read_local_oob_data(struct sock *sk, u16 index)
1498 mgmt_pending_remove(cmd); 1542 mgmt_pending_remove(cmd);
1499 1543
1500unlock: 1544unlock:
1501 hci_dev_unlock(hdev); 1545 hci_dev_unlock_bh(hdev);
1502 hci_dev_put(hdev); 1546 hci_dev_put(hdev);
1503 1547
1504 return err; 1548 return err;
@@ -1522,7 +1566,7 @@ static int add_remote_oob_data(struct sock *sk, u16 index, unsigned char *data,
1522 return cmd_status(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA, 1566 return cmd_status(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA,
1523 ENODEV); 1567 ENODEV);
1524 1568
1525 hci_dev_lock(hdev); 1569 hci_dev_lock_bh(hdev);
1526 1570
1527 err = hci_add_remote_oob_data(hdev, &cp->bdaddr, cp->hash, 1571 err = hci_add_remote_oob_data(hdev, &cp->bdaddr, cp->hash,
1528 cp->randomizer); 1572 cp->randomizer);
@@ -1532,7 +1576,7 @@ static int add_remote_oob_data(struct sock *sk, u16 index, unsigned char *data,
1532 err = cmd_complete(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA, NULL, 1576 err = cmd_complete(sk, index, MGMT_OP_ADD_REMOTE_OOB_DATA, NULL,
1533 0); 1577 0);
1534 1578
1535 hci_dev_unlock(hdev); 1579 hci_dev_unlock_bh(hdev);
1536 hci_dev_put(hdev); 1580 hci_dev_put(hdev);
1537 1581
1538 return err; 1582 return err;
@@ -1556,7 +1600,7 @@ static int remove_remote_oob_data(struct sock *sk, u16 index,
1556 return cmd_status(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA, 1600 return cmd_status(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
1557 ENODEV); 1601 ENODEV);
1558 1602
1559 hci_dev_lock(hdev); 1603 hci_dev_lock_bh(hdev);
1560 1604
1561 err = hci_remove_remote_oob_data(hdev, &cp->bdaddr); 1605 err = hci_remove_remote_oob_data(hdev, &cp->bdaddr);
1562 if (err < 0) 1606 if (err < 0)
@@ -1566,7 +1610,7 @@ static int remove_remote_oob_data(struct sock *sk, u16 index,
1566 err = cmd_complete(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA, 1610 err = cmd_complete(sk, index, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
1567 NULL, 0); 1611 NULL, 0);
1568 1612
1569 hci_dev_unlock(hdev); 1613 hci_dev_unlock_bh(hdev);
1570 hci_dev_put(hdev); 1614 hci_dev_put(hdev);
1571 1615
1572 return err; 1616 return err;
@@ -1641,6 +1685,70 @@ failed:
1641 return err; 1685 return err;
1642} 1686}
1643 1687
1688static int block_device(struct sock *sk, u16 index, unsigned char *data,
1689 u16 len)
1690{
1691 struct hci_dev *hdev;
1692 struct mgmt_cp_block_device *cp;
1693 int err;
1694
1695 BT_DBG("hci%u", index);
1696
1697 cp = (void *) data;
1698
1699 if (len != sizeof(*cp))
1700 return cmd_status(sk, index, MGMT_OP_BLOCK_DEVICE,
1701 EINVAL);
1702
1703 hdev = hci_dev_get(index);
1704 if (!hdev)
1705 return cmd_status(sk, index, MGMT_OP_BLOCK_DEVICE,
1706 ENODEV);
1707
1708 err = hci_blacklist_add(hdev, &cp->bdaddr);
1709
1710 if (err < 0)
1711 err = cmd_status(sk, index, MGMT_OP_BLOCK_DEVICE, -err);
1712 else
1713 err = cmd_complete(sk, index, MGMT_OP_BLOCK_DEVICE,
1714 NULL, 0);
1715 hci_dev_put(hdev);
1716
1717 return err;
1718}
1719
1720static int unblock_device(struct sock *sk, u16 index, unsigned char *data,
1721 u16 len)
1722{
1723 struct hci_dev *hdev;
1724 struct mgmt_cp_unblock_device *cp;
1725 int err;
1726
1727 BT_DBG("hci%u", index);
1728
1729 cp = (void *) data;
1730
1731 if (len != sizeof(*cp))
1732 return cmd_status(sk, index, MGMT_OP_UNBLOCK_DEVICE,
1733 EINVAL);
1734
1735 hdev = hci_dev_get(index);
1736 if (!hdev)
1737 return cmd_status(sk, index, MGMT_OP_UNBLOCK_DEVICE,
1738 ENODEV);
1739
1740 err = hci_blacklist_del(hdev, &cp->bdaddr);
1741
1742 if (err < 0)
1743 err = cmd_status(sk, index, MGMT_OP_UNBLOCK_DEVICE, -err);
1744 else
1745 err = cmd_complete(sk, index, MGMT_OP_UNBLOCK_DEVICE,
1746 NULL, 0);
1747 hci_dev_put(hdev);
1748
1749 return err;
1750}
1751
1644int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen) 1752int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
1645{ 1753{
1646 unsigned char *buf; 1754 unsigned char *buf;
@@ -1755,6 +1863,12 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
1755 case MGMT_OP_STOP_DISCOVERY: 1863 case MGMT_OP_STOP_DISCOVERY:
1756 err = stop_discovery(sk, index); 1864 err = stop_discovery(sk, index);
1757 break; 1865 break;
1866 case MGMT_OP_BLOCK_DEVICE:
1867 err = block_device(sk, index, buf + sizeof(*hdr), len);
1868 break;
1869 case MGMT_OP_UNBLOCK_DEVICE:
1870 err = unblock_device(sk, index, buf + sizeof(*hdr), len);
1871 break;
1758 default: 1872 default:
1759 BT_DBG("Unknown op %u", opcode); 1873 BT_DBG("Unknown op %u", opcode);
1760 err = cmd_status(sk, index, opcode, 0x01); 1874 err = cmd_status(sk, index, opcode, 0x01);
@@ -1863,17 +1977,28 @@ int mgmt_connectable(u16 index, u8 connectable)
1863 1977
1864int mgmt_new_key(u16 index, struct link_key *key, u8 persistent) 1978int mgmt_new_key(u16 index, struct link_key *key, u8 persistent)
1865{ 1979{
1866 struct mgmt_ev_new_key ev; 1980 struct mgmt_ev_new_key *ev;
1981 int err, total;
1867 1982
1868 memset(&ev, 0, sizeof(ev)); 1983 total = sizeof(struct mgmt_ev_new_key) + key->dlen;
1984 ev = kzalloc(total, GFP_ATOMIC);
1985 if (!ev)
1986 return -ENOMEM;
1869 1987
1870 ev.store_hint = persistent; 1988 bacpy(&ev->key.bdaddr, &key->bdaddr);
1871 bacpy(&ev.key.bdaddr, &key->bdaddr); 1989 ev->key.type = key->type;
1872 ev.key.type = key->type; 1990 memcpy(ev->key.val, key->val, 16);
1873 memcpy(ev.key.val, key->val, 16); 1991 ev->key.pin_len = key->pin_len;
1874 ev.key.pin_len = key->pin_len; 1992 ev->key.dlen = key->dlen;
1993 ev->store_hint = persistent;
1875 1994
1876 return mgmt_event(MGMT_EV_NEW_KEY, index, &ev, sizeof(ev), NULL); 1995 memcpy(ev->key.data, key->data, key->dlen);
1996
1997 err = mgmt_event(MGMT_EV_NEW_KEY, index, ev, total, NULL);
1998
1999 kfree(ev);
2000
2001 return err;
1877} 2002}
1878 2003
1879int mgmt_connected(u16 index, bdaddr_t *bdaddr) 2004int mgmt_connected(u16 index, bdaddr_t *bdaddr)
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index 1b10727ce52..8f01e6b11a7 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -679,7 +679,8 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
679{ 679{
680 struct sock *sk = sock->sk; 680 struct sock *sk = sock->sk;
681 struct bt_security sec; 681 struct bt_security sec;
682 int len, err = 0; 682 int err = 0;
683 size_t len;
683 u32 opt; 684 u32 opt;
684 685
685 BT_DBG("sk %p", sk); 686 BT_DBG("sk %p", sk);
@@ -741,7 +742,6 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
741static int rfcomm_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen) 742static int rfcomm_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
742{ 743{
743 struct sock *sk = sock->sk; 744 struct sock *sk = sock->sk;
744 struct sock *l2cap_sk;
745 struct rfcomm_conninfo cinfo; 745 struct rfcomm_conninfo cinfo;
746 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn; 746 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
747 int len, err = 0; 747 int len, err = 0;
@@ -786,8 +786,6 @@ static int rfcomm_sock_getsockopt_old(struct socket *sock, int optname, char __u
786 break; 786 break;
787 } 787 }
788 788
789 l2cap_sk = rfcomm_pi(sk)->dlc->session->sock->sk;
790
791 memset(&cinfo, 0, sizeof(cinfo)); 789 memset(&cinfo, 0, sizeof(cinfo));
792 cinfo.hci_handle = conn->hcon->handle; 790 cinfo.hci_handle = conn->hcon->handle;
793 memcpy(cinfo.dev_class, conn->hcon->dev_class, 3); 791 memcpy(cinfo.dev_class, conn->hcon->dev_class, 3);
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index cb4fb7837e5..4c3621b5e0a 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -932,7 +932,7 @@ static int sco_connect_cfm(struct hci_conn *hcon, __u8 status)
932 if (conn) 932 if (conn)
933 sco_conn_ready(conn); 933 sco_conn_ready(conn);
934 } else 934 } else
935 sco_conn_del(hcon, bt_err(status)); 935 sco_conn_del(hcon, bt_to_errno(status));
936 936
937 return 0; 937 return 0;
938} 938}
@@ -944,7 +944,7 @@ static int sco_disconn_cfm(struct hci_conn *hcon, __u8 reason)
944 if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK) 944 if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK)
945 return -EINVAL; 945 return -EINVAL;
946 946
947 sco_conn_del(hcon, bt_err(reason)); 947 sco_conn_del(hcon, bt_to_errno(reason));
948 948
949 return 0; 949 return 0;
950} 950}
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
new file mode 100644
index 00000000000..391888b88a9
--- /dev/null
+++ b/net/bluetooth/smp.c
@@ -0,0 +1,702 @@
1/*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies).
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License version 2 as
7 published by the Free Software Foundation;
8
9 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
10 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
11 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
12 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
13 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
14 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17
18 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
19 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
20 SOFTWARE IS DISCLAIMED.
21*/
22
23#include <net/bluetooth/bluetooth.h>
24#include <net/bluetooth/hci_core.h>
25#include <net/bluetooth/l2cap.h>
26#include <net/bluetooth/smp.h>
27#include <linux/crypto.h>
28#include <linux/scatterlist.h>
29#include <crypto/b128ops.h>
30
31#define SMP_TIMEOUT 30000 /* 30 seconds */
32
33static inline void swap128(u8 src[16], u8 dst[16])
34{
35 int i;
36 for (i = 0; i < 16; i++)
37 dst[15 - i] = src[i];
38}
39
40static inline void swap56(u8 src[7], u8 dst[7])
41{
42 int i;
43 for (i = 0; i < 7; i++)
44 dst[6 - i] = src[i];
45}
46
47static int smp_e(struct crypto_blkcipher *tfm, const u8 *k, u8 *r)
48{
49 struct blkcipher_desc desc;
50 struct scatterlist sg;
51 int err, iv_len;
52 unsigned char iv[128];
53
54 if (tfm == NULL) {
55 BT_ERR("tfm %p", tfm);
56 return -EINVAL;
57 }
58
59 desc.tfm = tfm;
60 desc.flags = 0;
61
62 err = crypto_blkcipher_setkey(tfm, k, 16);
63 if (err) {
64 BT_ERR("cipher setkey failed: %d", err);
65 return err;
66 }
67
68 sg_init_one(&sg, r, 16);
69
70 iv_len = crypto_blkcipher_ivsize(tfm);
71 if (iv_len) {
72 memset(&iv, 0xff, iv_len);
73 crypto_blkcipher_set_iv(tfm, iv, iv_len);
74 }
75
76 err = crypto_blkcipher_encrypt(&desc, &sg, &sg, 16);
77 if (err)
78 BT_ERR("Encrypt data error %d", err);
79
80 return err;
81}
82
83static int smp_c1(struct crypto_blkcipher *tfm, u8 k[16], u8 r[16],
84 u8 preq[7], u8 pres[7], u8 _iat, bdaddr_t *ia,
85 u8 _rat, bdaddr_t *ra, u8 res[16])
86{
87 u8 p1[16], p2[16];
88 int err;
89
90 memset(p1, 0, 16);
91
92 /* p1 = pres || preq || _rat || _iat */
93 swap56(pres, p1);
94 swap56(preq, p1 + 7);
95 p1[14] = _rat;
96 p1[15] = _iat;
97
98 memset(p2, 0, 16);
99
100 /* p2 = padding || ia || ra */
101 baswap((bdaddr_t *) (p2 + 4), ia);
102 baswap((bdaddr_t *) (p2 + 10), ra);
103
104 /* res = r XOR p1 */
105 u128_xor((u128 *) res, (u128 *) r, (u128 *) p1);
106
107 /* res = e(k, res) */
108 err = smp_e(tfm, k, res);
109 if (err) {
110 BT_ERR("Encrypt data error");
111 return err;
112 }
113
114 /* res = res XOR p2 */
115 u128_xor((u128 *) res, (u128 *) res, (u128 *) p2);
116
117 /* res = e(k, res) */
118 err = smp_e(tfm, k, res);
119 if (err)
120 BT_ERR("Encrypt data error");
121
122 return err;
123}
124
125static int smp_s1(struct crypto_blkcipher *tfm, u8 k[16],
126 u8 r1[16], u8 r2[16], u8 _r[16])
127{
128 int err;
129
130 /* Just least significant octets from r1 and r2 are considered */
131 memcpy(_r, r1 + 8, 8);
132 memcpy(_r + 8, r2 + 8, 8);
133
134 err = smp_e(tfm, k, _r);
135 if (err)
136 BT_ERR("Encrypt data error");
137
138 return err;
139}
140
141static int smp_rand(u8 *buf)
142{
143 get_random_bytes(buf, 16);
144
145 return 0;
146}
147
148static struct sk_buff *smp_build_cmd(struct l2cap_conn *conn, u8 code,
149 u16 dlen, void *data)
150{
151 struct sk_buff *skb;
152 struct l2cap_hdr *lh;
153 int len;
154
155 len = L2CAP_HDR_SIZE + sizeof(code) + dlen;
156
157 if (len > conn->mtu)
158 return NULL;
159
160 skb = bt_skb_alloc(len, GFP_ATOMIC);
161 if (!skb)
162 return NULL;
163
164 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
165 lh->len = cpu_to_le16(sizeof(code) + dlen);
166 lh->cid = cpu_to_le16(L2CAP_CID_SMP);
167
168 memcpy(skb_put(skb, sizeof(code)), &code, sizeof(code));
169
170 memcpy(skb_put(skb, dlen), data, dlen);
171
172 return skb;
173}
174
175static void smp_send_cmd(struct l2cap_conn *conn, u8 code, u16 len, void *data)
176{
177 struct sk_buff *skb = smp_build_cmd(conn, code, len, data);
178
179 BT_DBG("code 0x%2.2x", code);
180
181 if (!skb)
182 return;
183
184 hci_send_acl(conn->hcon, skb, 0);
185}
186
187static __u8 seclevel_to_authreq(__u8 level)
188{
189 switch (level) {
190 case BT_SECURITY_HIGH:
191 /* Right now we don't support bonding */
192 return SMP_AUTH_MITM;
193
194 default:
195 return SMP_AUTH_NONE;
196 }
197}
198
199static void build_pairing_cmd(struct l2cap_conn *conn,
200 struct smp_cmd_pairing *req,
201 struct smp_cmd_pairing *rsp,
202 __u8 authreq)
203{
204 u8 dist_keys;
205
206 dist_keys = 0;
207 if (test_bit(HCI_PAIRABLE, &conn->hcon->hdev->flags)) {
208 dist_keys = SMP_DIST_ENC_KEY | SMP_DIST_ID_KEY | SMP_DIST_SIGN;
209 authreq |= SMP_AUTH_BONDING;
210 }
211
212 if (rsp == NULL) {
213 req->io_capability = conn->hcon->io_capability;
214 req->oob_flag = SMP_OOB_NOT_PRESENT;
215 req->max_key_size = SMP_MAX_ENC_KEY_SIZE;
216 req->init_key_dist = dist_keys;
217 req->resp_key_dist = dist_keys;
218 req->auth_req = authreq;
219 return;
220 }
221
222 rsp->io_capability = conn->hcon->io_capability;
223 rsp->oob_flag = SMP_OOB_NOT_PRESENT;
224 rsp->max_key_size = SMP_MAX_ENC_KEY_SIZE;
225 rsp->init_key_dist = req->init_key_dist & dist_keys;
226 rsp->resp_key_dist = req->resp_key_dist & dist_keys;
227 rsp->auth_req = authreq;
228}
229
230static u8 check_enc_key_size(struct l2cap_conn *conn, __u8 max_key_size)
231{
232 if ((max_key_size > SMP_MAX_ENC_KEY_SIZE) ||
233 (max_key_size < SMP_MIN_ENC_KEY_SIZE))
234 return SMP_ENC_KEY_SIZE;
235
236 conn->smp_key_size = max_key_size;
237
238 return 0;
239}
240
241static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
242{
243 struct smp_cmd_pairing rsp, *req = (void *) skb->data;
244 u8 key_size;
245
246 BT_DBG("conn %p", conn);
247
248 conn->preq[0] = SMP_CMD_PAIRING_REQ;
249 memcpy(&conn->preq[1], req, sizeof(*req));
250 skb_pull(skb, sizeof(*req));
251
252 if (req->oob_flag)
253 return SMP_OOB_NOT_AVAIL;
254
255 /* We didn't start the pairing, so no requirements */
256 build_pairing_cmd(conn, req, &rsp, SMP_AUTH_NONE);
257
258 key_size = min(req->max_key_size, rsp.max_key_size);
259 if (check_enc_key_size(conn, key_size))
260 return SMP_ENC_KEY_SIZE;
261
262 /* Just works */
263 memset(conn->tk, 0, sizeof(conn->tk));
264
265 conn->prsp[0] = SMP_CMD_PAIRING_RSP;
266 memcpy(&conn->prsp[1], &rsp, sizeof(rsp));
267
268 smp_send_cmd(conn, SMP_CMD_PAIRING_RSP, sizeof(rsp), &rsp);
269
270 mod_timer(&conn->security_timer, jiffies +
271 msecs_to_jiffies(SMP_TIMEOUT));
272
273 return 0;
274}
275
276static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb)
277{
278 struct smp_cmd_pairing *req, *rsp = (void *) skb->data;
279 struct smp_cmd_pairing_confirm cp;
280 struct crypto_blkcipher *tfm = conn->hcon->hdev->tfm;
281 int ret;
282 u8 res[16], key_size;
283
284 BT_DBG("conn %p", conn);
285
286 skb_pull(skb, sizeof(*rsp));
287
288 req = (void *) &conn->preq[1];
289
290 key_size = min(req->max_key_size, rsp->max_key_size);
291 if (check_enc_key_size(conn, key_size))
292 return SMP_ENC_KEY_SIZE;
293
294 if (rsp->oob_flag)
295 return SMP_OOB_NOT_AVAIL;
296
297 /* Just works */
298 memset(conn->tk, 0, sizeof(conn->tk));
299
300 conn->prsp[0] = SMP_CMD_PAIRING_RSP;
301 memcpy(&conn->prsp[1], rsp, sizeof(*rsp));
302
303 ret = smp_rand(conn->prnd);
304 if (ret)
305 return SMP_UNSPECIFIED;
306
307 ret = smp_c1(tfm, conn->tk, conn->prnd, conn->preq, conn->prsp, 0,
308 conn->src, conn->hcon->dst_type, conn->dst, res);
309 if (ret)
310 return SMP_UNSPECIFIED;
311
312 swap128(res, cp.confirm_val);
313
314 smp_send_cmd(conn, SMP_CMD_PAIRING_CONFIRM, sizeof(cp), &cp);
315
316 return 0;
317}
318
319static u8 smp_cmd_pairing_confirm(struct l2cap_conn *conn, struct sk_buff *skb)
320{
321 struct crypto_blkcipher *tfm = conn->hcon->hdev->tfm;
322
323 BT_DBG("conn %p %s", conn, conn->hcon->out ? "master" : "slave");
324
325 memcpy(conn->pcnf, skb->data, sizeof(conn->pcnf));
326 skb_pull(skb, sizeof(conn->pcnf));
327
328 if (conn->hcon->out) {
329 u8 random[16];
330
331 swap128(conn->prnd, random);
332 smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(random),
333 random);
334 } else {
335 struct smp_cmd_pairing_confirm cp;
336 int ret;
337 u8 res[16];
338
339 ret = smp_rand(conn->prnd);
340 if (ret)
341 return SMP_UNSPECIFIED;
342
343 ret = smp_c1(tfm, conn->tk, conn->prnd, conn->preq, conn->prsp,
344 conn->hcon->dst_type, conn->dst,
345 0, conn->src, res);
346 if (ret)
347 return SMP_CONFIRM_FAILED;
348
349 swap128(res, cp.confirm_val);
350
351 smp_send_cmd(conn, SMP_CMD_PAIRING_CONFIRM, sizeof(cp), &cp);
352 }
353
354 mod_timer(&conn->security_timer, jiffies +
355 msecs_to_jiffies(SMP_TIMEOUT));
356
357 return 0;
358}
359
360static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb)
361{
362 struct hci_conn *hcon = conn->hcon;
363 struct crypto_blkcipher *tfm = hcon->hdev->tfm;
364 int ret;
365 u8 key[16], res[16], random[16], confirm[16];
366
367 swap128(skb->data, random);
368 skb_pull(skb, sizeof(random));
369
370 if (conn->hcon->out)
371 ret = smp_c1(tfm, conn->tk, random, conn->preq, conn->prsp, 0,
372 conn->src, conn->hcon->dst_type, conn->dst,
373 res);
374 else
375 ret = smp_c1(tfm, conn->tk, random, conn->preq, conn->prsp,
376 conn->hcon->dst_type, conn->dst, 0, conn->src,
377 res);
378 if (ret)
379 return SMP_UNSPECIFIED;
380
381 BT_DBG("conn %p %s", conn, conn->hcon->out ? "master" : "slave");
382
383 swap128(res, confirm);
384
385 if (memcmp(conn->pcnf, confirm, sizeof(conn->pcnf)) != 0) {
386 BT_ERR("Pairing failed (confirmation values mismatch)");
387 return SMP_CONFIRM_FAILED;
388 }
389
390 if (conn->hcon->out) {
391 u8 stk[16], rand[8];
392 __le16 ediv;
393
394 memset(rand, 0, sizeof(rand));
395 ediv = 0;
396
397 smp_s1(tfm, conn->tk, random, conn->prnd, key);
398 swap128(key, stk);
399
400 memset(stk + conn->smp_key_size, 0,
401 SMP_MAX_ENC_KEY_SIZE - conn->smp_key_size);
402
403 hci_le_start_enc(hcon, ediv, rand, stk);
404 hcon->enc_key_size = conn->smp_key_size;
405 } else {
406 u8 stk[16], r[16], rand[8];
407 __le16 ediv;
408
409 memset(rand, 0, sizeof(rand));
410 ediv = 0;
411
412 swap128(conn->prnd, r);
413 smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(r), r);
414
415 smp_s1(tfm, conn->tk, conn->prnd, random, key);
416 swap128(key, stk);
417
418 memset(stk + conn->smp_key_size, 0,
419 SMP_MAX_ENC_KEY_SIZE - conn->smp_key_size);
420
421 hci_add_ltk(conn->hcon->hdev, 0, conn->dst, conn->smp_key_size,
422 ediv, rand, stk);
423 }
424
425 return 0;
426}
427
428static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
429{
430 struct smp_cmd_security_req *rp = (void *) skb->data;
431 struct smp_cmd_pairing cp;
432 struct hci_conn *hcon = conn->hcon;
433
434 BT_DBG("conn %p", conn);
435
436 if (test_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend))
437 return 0;
438
439 skb_pull(skb, sizeof(*rp));
440
441 memset(&cp, 0, sizeof(cp));
442 build_pairing_cmd(conn, &cp, NULL, rp->auth_req);
443
444 conn->preq[0] = SMP_CMD_PAIRING_REQ;
445 memcpy(&conn->preq[1], &cp, sizeof(cp));
446
447 smp_send_cmd(conn, SMP_CMD_PAIRING_REQ, sizeof(cp), &cp);
448
449 mod_timer(&conn->security_timer, jiffies +
450 msecs_to_jiffies(SMP_TIMEOUT));
451
452 set_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend);
453
454 return 0;
455}
456
457int smp_conn_security(struct l2cap_conn *conn, __u8 sec_level)
458{
459 struct hci_conn *hcon = conn->hcon;
460 __u8 authreq;
461
462 BT_DBG("conn %p hcon %p level 0x%2.2x", conn, hcon, sec_level);
463
464 if (!lmp_host_le_capable(hcon->hdev))
465 return 1;
466
467 if (IS_ERR(hcon->hdev->tfm))
468 return 1;
469
470 if (test_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend))
471 return 0;
472
473 if (sec_level == BT_SECURITY_LOW)
474 return 1;
475
476 if (hcon->sec_level >= sec_level)
477 return 1;
478
479 authreq = seclevel_to_authreq(sec_level);
480
481 if (hcon->link_mode & HCI_LM_MASTER) {
482 struct smp_cmd_pairing cp;
483 struct link_key *key;
484
485 key = hci_find_link_key_type(hcon->hdev, conn->dst,
486 HCI_LK_SMP_LTK);
487 if (key) {
488 struct key_master_id *master = (void *) key->data;
489
490 hci_le_start_enc(hcon, master->ediv, master->rand,
491 key->val);
492 hcon->enc_key_size = key->pin_len;
493
494 goto done;
495 }
496
497 build_pairing_cmd(conn, &cp, NULL, authreq);
498 conn->preq[0] = SMP_CMD_PAIRING_REQ;
499 memcpy(&conn->preq[1], &cp, sizeof(cp));
500
501 mod_timer(&conn->security_timer, jiffies +
502 msecs_to_jiffies(SMP_TIMEOUT));
503
504 smp_send_cmd(conn, SMP_CMD_PAIRING_REQ, sizeof(cp), &cp);
505 } else {
506 struct smp_cmd_security_req cp;
507 cp.auth_req = authreq;
508 smp_send_cmd(conn, SMP_CMD_SECURITY_REQ, sizeof(cp), &cp);
509 }
510
511done:
512 hcon->pending_sec_level = sec_level;
513 set_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend);
514
515 return 0;
516}
517
518static int smp_cmd_encrypt_info(struct l2cap_conn *conn, struct sk_buff *skb)
519{
520 struct smp_cmd_encrypt_info *rp = (void *) skb->data;
521
522 skb_pull(skb, sizeof(*rp));
523
524 memcpy(conn->tk, rp->ltk, sizeof(conn->tk));
525
526 return 0;
527}
528
529static int smp_cmd_master_ident(struct l2cap_conn *conn, struct sk_buff *skb)
530{
531 struct smp_cmd_master_ident *rp = (void *) skb->data;
532
533 skb_pull(skb, sizeof(*rp));
534
535 hci_add_ltk(conn->hcon->hdev, 1, conn->src, conn->smp_key_size,
536 rp->ediv, rp->rand, conn->tk);
537
538 smp_distribute_keys(conn, 1);
539
540 return 0;
541}
542
543int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
544{
545 __u8 code = skb->data[0];
546 __u8 reason;
547 int err = 0;
548
549 if (!lmp_host_le_capable(conn->hcon->hdev)) {
550 err = -ENOTSUPP;
551 reason = SMP_PAIRING_NOTSUPP;
552 goto done;
553 }
554
555 if (IS_ERR(conn->hcon->hdev->tfm)) {
556 err = PTR_ERR(conn->hcon->hdev->tfm);
557 reason = SMP_PAIRING_NOTSUPP;
558 goto done;
559 }
560
561 skb_pull(skb, sizeof(code));
562
563 switch (code) {
564 case SMP_CMD_PAIRING_REQ:
565 reason = smp_cmd_pairing_req(conn, skb);
566 break;
567
568 case SMP_CMD_PAIRING_FAIL:
569 reason = 0;
570 err = -EPERM;
571 break;
572
573 case SMP_CMD_PAIRING_RSP:
574 reason = smp_cmd_pairing_rsp(conn, skb);
575 break;
576
577 case SMP_CMD_SECURITY_REQ:
578 reason = smp_cmd_security_req(conn, skb);
579 break;
580
581 case SMP_CMD_PAIRING_CONFIRM:
582 reason = smp_cmd_pairing_confirm(conn, skb);
583 break;
584
585 case SMP_CMD_PAIRING_RANDOM:
586 reason = smp_cmd_pairing_random(conn, skb);
587 break;
588
589 case SMP_CMD_ENCRYPT_INFO:
590 reason = smp_cmd_encrypt_info(conn, skb);
591 break;
592
593 case SMP_CMD_MASTER_IDENT:
594 reason = smp_cmd_master_ident(conn, skb);
595 break;
596
597 case SMP_CMD_IDENT_INFO:
598 case SMP_CMD_IDENT_ADDR_INFO:
599 case SMP_CMD_SIGN_INFO:
600 /* Just ignored */
601 reason = 0;
602 break;
603
604 default:
605 BT_DBG("Unknown command code 0x%2.2x", code);
606
607 reason = SMP_CMD_NOTSUPP;
608 err = -EOPNOTSUPP;
609 goto done;
610 }
611
612done:
613 if (reason)
614 smp_send_cmd(conn, SMP_CMD_PAIRING_FAIL, sizeof(reason),
615 &reason);
616
617 kfree_skb(skb);
618 return err;
619}
620
621int smp_distribute_keys(struct l2cap_conn *conn, __u8 force)
622{
623 struct smp_cmd_pairing *req, *rsp;
624 __u8 *keydist;
625
626 BT_DBG("conn %p force %d", conn, force);
627
628 if (IS_ERR(conn->hcon->hdev->tfm))
629 return PTR_ERR(conn->hcon->hdev->tfm);
630
631 rsp = (void *) &conn->prsp[1];
632
633 /* The responder sends its keys first */
634 if (!force && conn->hcon->out && (rsp->resp_key_dist & 0x07))
635 return 0;
636
637 req = (void *) &conn->preq[1];
638
639 if (conn->hcon->out) {
640 keydist = &rsp->init_key_dist;
641 *keydist &= req->init_key_dist;
642 } else {
643 keydist = &rsp->resp_key_dist;
644 *keydist &= req->resp_key_dist;
645 }
646
647
648 BT_DBG("keydist 0x%x", *keydist);
649
650 if (*keydist & SMP_DIST_ENC_KEY) {
651 struct smp_cmd_encrypt_info enc;
652 struct smp_cmd_master_ident ident;
653 __le16 ediv;
654
655 get_random_bytes(enc.ltk, sizeof(enc.ltk));
656 get_random_bytes(&ediv, sizeof(ediv));
657 get_random_bytes(ident.rand, sizeof(ident.rand));
658
659 smp_send_cmd(conn, SMP_CMD_ENCRYPT_INFO, sizeof(enc), &enc);
660
661 hci_add_ltk(conn->hcon->hdev, 1, conn->dst, conn->smp_key_size,
662 ediv, ident.rand, enc.ltk);
663
664 ident.ediv = cpu_to_le16(ediv);
665
666 smp_send_cmd(conn, SMP_CMD_MASTER_IDENT, sizeof(ident), &ident);
667
668 *keydist &= ~SMP_DIST_ENC_KEY;
669 }
670
671 if (*keydist & SMP_DIST_ID_KEY) {
672 struct smp_cmd_ident_addr_info addrinfo;
673 struct smp_cmd_ident_info idinfo;
674
675 /* Send a dummy key */
676 get_random_bytes(idinfo.irk, sizeof(idinfo.irk));
677
678 smp_send_cmd(conn, SMP_CMD_IDENT_INFO, sizeof(idinfo), &idinfo);
679
680 /* Just public address */
681 memset(&addrinfo, 0, sizeof(addrinfo));
682 bacpy(&addrinfo.bdaddr, conn->src);
683
684 smp_send_cmd(conn, SMP_CMD_IDENT_ADDR_INFO, sizeof(addrinfo),
685 &addrinfo);
686
687 *keydist &= ~SMP_DIST_ID_KEY;
688 }
689
690 if (*keydist & SMP_DIST_SIGN) {
691 struct smp_cmd_sign_info sign;
692
693 /* Send a dummy key */
694 get_random_bytes(sign.csrk, sizeof(sign.csrk));
695
696 smp_send_cmd(conn, SMP_CMD_SIGN_INFO, sizeof(sign), &sign);
697
698 *keydist &= ~SMP_DIST_SIGN;
699 }
700
701 return 0;
702}
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index c188c803c09..32b8f9f7f79 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -49,7 +49,9 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
49 skb_pull(skb, ETH_HLEN); 49 skb_pull(skb, ETH_HLEN);
50 50
51 rcu_read_lock(); 51 rcu_read_lock();
52 if (is_multicast_ether_addr(dest)) { 52 if (is_broadcast_ether_addr(dest))
53 br_flood_deliver(br, skb);
54 else if (is_multicast_ether_addr(dest)) {
53 if (unlikely(netpoll_tx_running(dev))) { 55 if (unlikely(netpoll_tx_running(dev))) {
54 br_flood_deliver(br, skb); 56 br_flood_deliver(br, skb);
55 goto out; 57 goto out;
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index f3ac1e858ee..f06ee39c73f 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -60,7 +60,7 @@ int br_handle_frame_finish(struct sk_buff *skb)
60 br = p->br; 60 br = p->br;
61 br_fdb_update(br, p, eth_hdr(skb)->h_source); 61 br_fdb_update(br, p, eth_hdr(skb)->h_source);
62 62
63 if (is_multicast_ether_addr(dest) && 63 if (!is_broadcast_ether_addr(dest) && is_multicast_ether_addr(dest) &&
64 br_multicast_rcv(br, p, skb)) 64 br_multicast_rcv(br, p, skb))
65 goto drop; 65 goto drop;
66 66
@@ -77,7 +77,9 @@ int br_handle_frame_finish(struct sk_buff *skb)
77 77
78 dst = NULL; 78 dst = NULL;
79 79
80 if (is_multicast_ether_addr(dest)) { 80 if (is_broadcast_ether_addr(dest))
81 skb2 = skb;
82 else if (is_multicast_ether_addr(dest)) {
81 mdst = br_mdb_get(br, skb); 83 mdst = br_mdb_get(br, skb);
82 if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) { 84 if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) {
83 if ((mdst && mdst->mglist) || 85 if ((mdst && mdst->mglist) ||
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 29b9812c8da..2d85ca7111d 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -1379,8 +1379,11 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
1379 if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl))) 1379 if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
1380 return -EINVAL; 1380 return -EINVAL;
1381 1381
1382 if (iph->protocol != IPPROTO_IGMP) 1382 if (iph->protocol != IPPROTO_IGMP) {
1383 if ((iph->daddr & IGMP_LOCAL_GROUP_MASK) != IGMP_LOCAL_GROUP)
1384 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1383 return 0; 1385 return 0;
1386 }
1384 1387
1385 len = ntohs(iph->tot_len); 1388 len = ntohs(iph->tot_len);
1386 if (skb->len < len || len < ip_hdrlen(skb)) 1389 if (skb->len < len || len < ip_hdrlen(skb))
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index 56149ec36d7..d6ec3720c77 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -109,11 +109,17 @@ static u32 *fake_cow_metrics(struct dst_entry *dst, unsigned long old)
109 return NULL; 109 return NULL;
110} 110}
111 111
112static struct neighbour *fake_neigh_lookup(const struct dst_entry *dst, const void *daddr)
113{
114 return NULL;
115}
116
112static struct dst_ops fake_dst_ops = { 117static struct dst_ops fake_dst_ops = {
113 .family = AF_INET, 118 .family = AF_INET,
114 .protocol = cpu_to_be16(ETH_P_IP), 119 .protocol = cpu_to_be16(ETH_P_IP),
115 .update_pmtu = fake_update_pmtu, 120 .update_pmtu = fake_update_pmtu,
116 .cow_metrics = fake_cow_metrics, 121 .cow_metrics = fake_cow_metrics,
122 .neigh_lookup = fake_neigh_lookup,
117}; 123};
118 124
119/* 125/*
@@ -343,24 +349,26 @@ static int br_nf_pre_routing_finish_ipv6(struct sk_buff *skb)
343static int br_nf_pre_routing_finish_bridge(struct sk_buff *skb) 349static int br_nf_pre_routing_finish_bridge(struct sk_buff *skb)
344{ 350{
345 struct nf_bridge_info *nf_bridge = skb->nf_bridge; 351 struct nf_bridge_info *nf_bridge = skb->nf_bridge;
352 struct neighbour *neigh;
346 struct dst_entry *dst; 353 struct dst_entry *dst;
347 354
348 skb->dev = bridge_parent(skb->dev); 355 skb->dev = bridge_parent(skb->dev);
349 if (!skb->dev) 356 if (!skb->dev)
350 goto free_skb; 357 goto free_skb;
351 dst = skb_dst(skb); 358 dst = skb_dst(skb);
352 if (dst->hh) { 359 neigh = dst_get_neighbour(dst);
353 neigh_hh_bridge(dst->hh, skb); 360 if (neigh->hh.hh_len) {
361 neigh_hh_bridge(&neigh->hh, skb);
354 skb->dev = nf_bridge->physindev; 362 skb->dev = nf_bridge->physindev;
355 return br_handle_frame_finish(skb); 363 return br_handle_frame_finish(skb);
356 } else if (dst->neighbour) { 364 } else {
357 /* the neighbour function below overwrites the complete 365 /* the neighbour function below overwrites the complete
358 * MAC header, so we save the Ethernet source address and 366 * MAC header, so we save the Ethernet source address and
359 * protocol number. */ 367 * protocol number. */
360 skb_copy_from_linear_data_offset(skb, -(ETH_HLEN-ETH_ALEN), skb->nf_bridge->data, ETH_HLEN-ETH_ALEN); 368 skb_copy_from_linear_data_offset(skb, -(ETH_HLEN-ETH_ALEN), skb->nf_bridge->data, ETH_HLEN-ETH_ALEN);
361 /* tell br_dev_xmit to continue with forwarding */ 369 /* tell br_dev_xmit to continue with forwarding */
362 nf_bridge->mask |= BRNF_BRIDGED_DNAT; 370 nf_bridge->mask |= BRNF_BRIDGED_DNAT;
363 return dst->neighbour->output(skb); 371 return neigh->output(neigh, skb);
364 } 372 }
365free_skb: 373free_skb:
366 kfree_skb(skb); 374 kfree_skb(skb);
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index ffb0dc4cc0e..6814083a92f 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -218,19 +218,24 @@ int __init br_netlink_init(void)
218 if (err < 0) 218 if (err < 0)
219 goto err1; 219 goto err1;
220 220
221 err = __rtnl_register(PF_BRIDGE, RTM_GETLINK, NULL, br_dump_ifinfo); 221 err = __rtnl_register(PF_BRIDGE, RTM_GETLINK, NULL,
222 br_dump_ifinfo, NULL);
222 if (err) 223 if (err)
223 goto err2; 224 goto err2;
224 err = __rtnl_register(PF_BRIDGE, RTM_SETLINK, br_rtm_setlink, NULL); 225 err = __rtnl_register(PF_BRIDGE, RTM_SETLINK,
226 br_rtm_setlink, NULL, NULL);
225 if (err) 227 if (err)
226 goto err3; 228 goto err3;
227 err = __rtnl_register(PF_BRIDGE, RTM_NEWNEIGH, br_fdb_add, NULL); 229 err = __rtnl_register(PF_BRIDGE, RTM_NEWNEIGH,
230 br_fdb_add, NULL, NULL);
228 if (err) 231 if (err)
229 goto err3; 232 goto err3;
230 err = __rtnl_register(PF_BRIDGE, RTM_DELNEIGH, br_fdb_delete, NULL); 233 err = __rtnl_register(PF_BRIDGE, RTM_DELNEIGH,
234 br_fdb_delete, NULL, NULL);
231 if (err) 235 if (err)
232 goto err3; 236 goto err3;
233 err = __rtnl_register(PF_BRIDGE, RTM_GETNEIGH, NULL, br_fdb_dump); 237 err = __rtnl_register(PF_BRIDGE, RTM_GETNEIGH,
238 NULL, br_fdb_dump, NULL);
234 if (err) 239 if (err)
235 goto err3; 240 goto err3;
236 241
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
index 682c0fedf36..7c2fa0a0814 100644
--- a/net/caif/caif_dev.c
+++ b/net/caif/caif_dev.c
@@ -11,7 +11,6 @@
11 11
12#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__ 12#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
13 13
14#include <linux/version.h>
15#include <linux/kernel.h> 14#include <linux/kernel.h>
16#include <linux/if_arp.h> 15#include <linux/if_arp.h>
17#include <linux/net.h> 16#include <linux/net.h>
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
index adbb424403d..865690948bb 100644
--- a/net/caif/chnl_net.c
+++ b/net/caif/chnl_net.c
@@ -7,8 +7,8 @@
7 7
8#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__ 8#define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__
9 9
10#include <linux/version.h>
11#include <linux/fs.h> 10#include <linux/fs.h>
11#include <linux/hardirq.h>
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/netdevice.h> 14#include <linux/netdevice.h>
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 094fc5332d4..8ce926d3b2c 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -58,6 +58,7 @@
58#include <linux/skbuff.h> 58#include <linux/skbuff.h>
59#include <linux/can.h> 59#include <linux/can.h>
60#include <linux/can/core.h> 60#include <linux/can/core.h>
61#include <linux/ratelimit.h>
61#include <net/net_namespace.h> 62#include <net/net_namespace.h>
62#include <net/sock.h> 63#include <net/sock.h>
63 64
@@ -161,8 +162,8 @@ static int can_create(struct net *net, struct socket *sock, int protocol,
161 * return the error code immediately. Below we will 162 * return the error code immediately. Below we will
162 * return -EPROTONOSUPPORT 163 * return -EPROTONOSUPPORT
163 */ 164 */
164 if (err && printk_ratelimit()) 165 if (err)
165 printk(KERN_ERR "can: request_module " 166 printk_ratelimited(KERN_ERR "can: request_module "
166 "(can-proto-%d) failed.\n", protocol); 167 "(can-proto-%d) failed.\n", protocol);
167 168
168 cp = can_get_proto(protocol); 169 cp = can_get_proto(protocol);
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 184a6572b67..d6c8ae5b2e6 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -43,6 +43,7 @@
43 43
44#include <linux/module.h> 44#include <linux/module.h>
45#include <linux/init.h> 45#include <linux/init.h>
46#include <linux/interrupt.h>
46#include <linux/hrtimer.h> 47#include <linux/hrtimer.h>
47#include <linux/list.h> 48#include <linux/list.h>
48#include <linux/proc_fs.h> 49#include <linux/proc_fs.h>
diff --git a/net/ceph/ceph_fs.c b/net/ceph/ceph_fs.c
index a3a3a31d3c3..41466ccb972 100644
--- a/net/ceph/ceph_fs.c
+++ b/net/ceph/ceph_fs.c
@@ -36,16 +36,19 @@ int ceph_flags_to_mode(int flags)
36 if ((flags & O_DIRECTORY) == O_DIRECTORY) 36 if ((flags & O_DIRECTORY) == O_DIRECTORY)
37 return CEPH_FILE_MODE_PIN; 37 return CEPH_FILE_MODE_PIN;
38#endif 38#endif
39 if ((flags & O_APPEND) == O_APPEND)
40 flags |= O_WRONLY;
41 39
42 if ((flags & O_ACCMODE) == O_RDWR) 40 switch (flags & O_ACCMODE) {
43 mode = CEPH_FILE_MODE_RDWR; 41 case O_WRONLY:
44 else if ((flags & O_ACCMODE) == O_WRONLY)
45 mode = CEPH_FILE_MODE_WR; 42 mode = CEPH_FILE_MODE_WR;
46 else 43 break;
44 case O_RDONLY:
47 mode = CEPH_FILE_MODE_RD; 45 mode = CEPH_FILE_MODE_RD;
48 46 break;
47 case O_RDWR:
48 case O_ACCMODE: /* this is what the VFS does */
49 mode = CEPH_FILE_MODE_RDWR;
50 break;
51 }
49#ifdef O_LAZY 52#ifdef O_LAZY
50 if (flags & O_LAZY) 53 if (flags & O_LAZY)
51 mode |= CEPH_FILE_MODE_LAZY; 54 mode |= CEPH_FILE_MODE_LAZY;
diff --git a/net/ceph/crypto.c b/net/ceph/crypto.c
index 5a8009c9e0c..85f3bc0a706 100644
--- a/net/ceph/crypto.c
+++ b/net/ceph/crypto.c
@@ -444,7 +444,7 @@ int ceph_key_instantiate(struct key *key, const void *data, size_t datalen)
444 goto err; 444 goto err;
445 445
446 /* TODO ceph_crypto_key_decode should really take const input */ 446 /* TODO ceph_crypto_key_decode should really take const input */
447 p = (void*)data; 447 p = (void *)data;
448 ret = ceph_crypto_key_decode(ckey, &p, (char*)data+datalen); 448 ret = ceph_crypto_key_decode(ckey, &p, (char*)data+datalen);
449 if (ret < 0) 449 if (ret < 0)
450 goto err_ckey; 450 goto err_ckey;
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 9cb627a4073..7330c2757c0 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -477,8 +477,9 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
477 calc_layout(osdc, vino, layout, off, plen, req, ops); 477 calc_layout(osdc, vino, layout, off, plen, req, ops);
478 req->r_file_layout = *layout; /* keep a copy */ 478 req->r_file_layout = *layout; /* keep a copy */
479 479
480 /* in case it differs from natural alignment that calc_layout 480 /* in case it differs from natural (file) alignment that
481 filled in for us */ 481 calc_layout filled in for us */
482 req->r_num_pages = calc_pages_for(page_align, *plen);
482 req->r_page_alignment = page_align; 483 req->r_page_alignment = page_align;
483 484
484 ceph_osdc_build_request(req, off, plen, ops, 485 ceph_osdc_build_request(req, off, plen, ops,
@@ -2027,8 +2028,9 @@ static struct ceph_msg *get_reply(struct ceph_connection *con,
2027 int want = calc_pages_for(req->r_page_alignment, data_len); 2028 int want = calc_pages_for(req->r_page_alignment, data_len);
2028 2029
2029 if (unlikely(req->r_num_pages < want)) { 2030 if (unlikely(req->r_num_pages < want)) {
2030 pr_warning("tid %lld reply %d > expected %d pages\n", 2031 pr_warning("tid %lld reply has %d bytes %d pages, we"
2031 tid, want, m->nr_pages); 2032 " had only %d pages ready\n", tid, data_len,
2033 want, req->r_num_pages);
2032 *skip = 1; 2034 *skip = 1;
2033 ceph_msg_put(m); 2035 ceph_msg_put(m);
2034 m = NULL; 2036 m = NULL;
diff --git a/net/core/dev.c b/net/core/dev.c
index 9c58c1ec41a..9444c5cb413 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -199,6 +199,11 @@ static struct list_head ptype_all __read_mostly; /* Taps */
199DEFINE_RWLOCK(dev_base_lock); 199DEFINE_RWLOCK(dev_base_lock);
200EXPORT_SYMBOL(dev_base_lock); 200EXPORT_SYMBOL(dev_base_lock);
201 201
202static inline void dev_base_seq_inc(struct net *net)
203{
204 while (++net->dev_base_seq == 0);
205}
206
202static inline struct hlist_head *dev_name_hash(struct net *net, const char *name) 207static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
203{ 208{
204 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ)); 209 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
@@ -237,6 +242,9 @@ static int list_netdevice(struct net_device *dev)
237 hlist_add_head_rcu(&dev->index_hlist, 242 hlist_add_head_rcu(&dev->index_hlist,
238 dev_index_hash(net, dev->ifindex)); 243 dev_index_hash(net, dev->ifindex));
239 write_unlock_bh(&dev_base_lock); 244 write_unlock_bh(&dev_base_lock);
245
246 dev_base_seq_inc(net);
247
240 return 0; 248 return 0;
241} 249}
242 250
@@ -253,6 +261,8 @@ static void unlist_netdevice(struct net_device *dev)
253 hlist_del_rcu(&dev->name_hlist); 261 hlist_del_rcu(&dev->name_hlist);
254 hlist_del_rcu(&dev->index_hlist); 262 hlist_del_rcu(&dev->index_hlist);
255 write_unlock_bh(&dev_base_lock); 263 write_unlock_bh(&dev_base_lock);
264
265 dev_base_seq_inc(dev_net(dev));
256} 266}
257 267
258/* 268/*
@@ -2532,7 +2542,7 @@ __u32 __skb_get_rxhash(struct sk_buff *skb)
2532 goto done; 2542 goto done;
2533 2543
2534 ip = (const struct iphdr *) (skb->data + nhoff); 2544 ip = (const struct iphdr *) (skb->data + nhoff);
2535 if (ip->frag_off & htons(IP_MF | IP_OFFSET)) 2545 if (ip_is_fragment(ip))
2536 ip_proto = 0; 2546 ip_proto = 0;
2537 else 2547 else
2538 ip_proto = ip->protocol; 2548 ip_proto = ip->protocol;
@@ -5199,7 +5209,7 @@ static void rollback_registered(struct net_device *dev)
5199 list_del(&single); 5209 list_del(&single);
5200} 5210}
5201 5211
5202u32 netdev_fix_features(struct net_device *dev, u32 features) 5212static u32 netdev_fix_features(struct net_device *dev, u32 features)
5203{ 5213{
5204 /* Fix illegal checksum combinations */ 5214 /* Fix illegal checksum combinations */
5205 if ((features & NETIF_F_HW_CSUM) && 5215 if ((features & NETIF_F_HW_CSUM) &&
@@ -5258,7 +5268,6 @@ u32 netdev_fix_features(struct net_device *dev, u32 features)
5258 5268
5259 return features; 5269 return features;
5260} 5270}
5261EXPORT_SYMBOL(netdev_fix_features);
5262 5271
5263int __netdev_update_features(struct net_device *dev) 5272int __netdev_update_features(struct net_device *dev)
5264{ 5273{
@@ -5478,11 +5487,9 @@ int register_netdevice(struct net_device *dev)
5478 dev->features |= NETIF_F_NOCACHE_COPY; 5487 dev->features |= NETIF_F_NOCACHE_COPY;
5479 } 5488 }
5480 5489
5481 /* Enable GRO and NETIF_F_HIGHDMA for vlans by default, 5490 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
5482 * vlan_dev_init() will do the dev->features check, so these features
5483 * are enabled only if supported by underlying device.
5484 */ 5491 */
5485 dev->vlan_features |= (NETIF_F_GRO | NETIF_F_HIGHDMA); 5492 dev->vlan_features |= NETIF_F_HIGHDMA;
5486 5493
5487 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev); 5494 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
5488 ret = notifier_to_errno(ret); 5495 ret = notifier_to_errno(ret);
@@ -5867,8 +5874,6 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
5867 5874
5868 dev->gso_max_size = GSO_MAX_SIZE; 5875 dev->gso_max_size = GSO_MAX_SIZE;
5869 5876
5870 INIT_LIST_HEAD(&dev->ethtool_ntuple_list.list);
5871 dev->ethtool_ntuple_list.count = 0;
5872 INIT_LIST_HEAD(&dev->napi_list); 5877 INIT_LIST_HEAD(&dev->napi_list);
5873 INIT_LIST_HEAD(&dev->unreg_list); 5878 INIT_LIST_HEAD(&dev->unreg_list);
5874 INIT_LIST_HEAD(&dev->link_watch_list); 5879 INIT_LIST_HEAD(&dev->link_watch_list);
@@ -5932,9 +5937,6 @@ void free_netdev(struct net_device *dev)
5932 /* Flush device addresses */ 5937 /* Flush device addresses */
5933 dev_addr_flush(dev); 5938 dev_addr_flush(dev);
5934 5939
5935 /* Clear ethtool n-tuple list */
5936 ethtool_ntuple_flush(dev);
5937
5938 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list) 5940 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
5939 netif_napi_del(p); 5941 netif_napi_del(p);
5940 5942
diff --git a/net/core/dst.c b/net/core/dst.c
index 9ccca038444..14b33baf073 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -171,8 +171,7 @@ void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
171 dst_init_metrics(dst, dst_default_metrics, true); 171 dst_init_metrics(dst, dst_default_metrics, true);
172 dst->expires = 0UL; 172 dst->expires = 0UL;
173 dst->path = dst; 173 dst->path = dst;
174 dst->neighbour = NULL; 174 dst->_neighbour = NULL;
175 dst->hh = NULL;
176#ifdef CONFIG_XFRM 175#ifdef CONFIG_XFRM
177 dst->xfrm = NULL; 176 dst->xfrm = NULL;
178#endif 177#endif
@@ -190,7 +189,8 @@ void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
190 dst->lastuse = jiffies; 189 dst->lastuse = jiffies;
191 dst->flags = flags; 190 dst->flags = flags;
192 dst->next = NULL; 191 dst->next = NULL;
193 dst_entries_add(ops, 1); 192 if (!(flags & DST_NOCOUNT))
193 dst_entries_add(ops, 1);
194 return dst; 194 return dst;
195} 195}
196EXPORT_SYMBOL(dst_alloc); 196EXPORT_SYMBOL(dst_alloc);
@@ -225,25 +225,20 @@ struct dst_entry *dst_destroy(struct dst_entry * dst)
225{ 225{
226 struct dst_entry *child; 226 struct dst_entry *child;
227 struct neighbour *neigh; 227 struct neighbour *neigh;
228 struct hh_cache *hh;
229 228
230 smp_rmb(); 229 smp_rmb();
231 230
232again: 231again:
233 neigh = dst->neighbour; 232 neigh = dst->_neighbour;
234 hh = dst->hh;
235 child = dst->child; 233 child = dst->child;
236 234
237 dst->hh = NULL;
238 if (hh)
239 hh_cache_put(hh);
240
241 if (neigh) { 235 if (neigh) {
242 dst->neighbour = NULL; 236 dst->_neighbour = NULL;
243 neigh_release(neigh); 237 neigh_release(neigh);
244 } 238 }
245 239
246 dst_entries_add(dst->ops, -1); 240 if (!(dst->flags & DST_NOCOUNT))
241 dst_entries_add(dst->ops, -1);
247 242
248 if (dst->ops->destroy) 243 if (dst->ops->destroy)
249 dst->ops->destroy(dst); 244 dst->ops->destroy(dst);
@@ -368,8 +363,8 @@ static void dst_ifdown(struct dst_entry *dst, struct net_device *dev,
368 dst->dev = dev_net(dst->dev)->loopback_dev; 363 dst->dev = dev_net(dst->dev)->loopback_dev;
369 dev_hold(dst->dev); 364 dev_hold(dst->dev);
370 dev_put(dev); 365 dev_put(dev);
371 if (dst->neighbour && dst->neighbour->dev == dev) { 366 if (dst->_neighbour && dst->_neighbour->dev == dev) {
372 dst->neighbour->dev = dst->dev; 367 dst->_neighbour->dev = dst->dev;
373 dev_hold(dst->dev); 368 dev_hold(dst->dev);
374 dev_put(dev); 369 dev_put(dev);
375 } 370 }
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index fd14116ad7f..6cdba5fc2be 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -169,18 +169,6 @@ int ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported)
169} 169}
170EXPORT_SYMBOL(ethtool_op_set_flags); 170EXPORT_SYMBOL(ethtool_op_set_flags);
171 171
172void ethtool_ntuple_flush(struct net_device *dev)
173{
174 struct ethtool_rx_ntuple_flow_spec_container *fsc, *f;
175
176 list_for_each_entry_safe(fsc, f, &dev->ethtool_ntuple_list.list, list) {
177 list_del(&fsc->list);
178 kfree(fsc);
179 }
180 dev->ethtool_ntuple_list.count = 0;
181}
182EXPORT_SYMBOL(ethtool_ntuple_flush);
183
184/* Handlers for each ethtool command */ 172/* Handlers for each ethtool command */
185 173
186#define ETHTOOL_DEV_FEATURE_WORDS 1 174#define ETHTOOL_DEV_FEATURE_WORDS 1
@@ -865,34 +853,6 @@ out:
865 return ret; 853 return ret;
866} 854}
867 855
868static void __rx_ntuple_filter_add(struct ethtool_rx_ntuple_list *list,
869 struct ethtool_rx_ntuple_flow_spec *spec,
870 struct ethtool_rx_ntuple_flow_spec_container *fsc)
871{
872
873 /* don't add filters forever */
874 if (list->count >= ETHTOOL_MAX_NTUPLE_LIST_ENTRY) {
875 /* free the container */
876 kfree(fsc);
877 return;
878 }
879
880 /* Copy the whole filter over */
881 fsc->fs.flow_type = spec->flow_type;
882 memcpy(&fsc->fs.h_u, &spec->h_u, sizeof(spec->h_u));
883 memcpy(&fsc->fs.m_u, &spec->m_u, sizeof(spec->m_u));
884
885 fsc->fs.vlan_tag = spec->vlan_tag;
886 fsc->fs.vlan_tag_mask = spec->vlan_tag_mask;
887 fsc->fs.data = spec->data;
888 fsc->fs.data_mask = spec->data_mask;
889 fsc->fs.action = spec->action;
890
891 /* add to the list */
892 list_add_tail_rcu(&fsc->list, &list->list);
893 list->count++;
894}
895
896/* 856/*
897 * ethtool does not (or did not) set masks for flow parameters that are 857 * ethtool does not (or did not) set masks for flow parameters that are
898 * not specified, so if both value and mask are 0 then this must be 858 * not specified, so if both value and mask are 0 then this must be
@@ -930,8 +890,6 @@ static noinline_for_stack int ethtool_set_rx_ntuple(struct net_device *dev,
930{ 890{
931 struct ethtool_rx_ntuple cmd; 891 struct ethtool_rx_ntuple cmd;
932 const struct ethtool_ops *ops = dev->ethtool_ops; 892 const struct ethtool_ops *ops = dev->ethtool_ops;
933 struct ethtool_rx_ntuple_flow_spec_container *fsc = NULL;
934 int ret;
935 893
936 if (!ops->set_rx_ntuple) 894 if (!ops->set_rx_ntuple)
937 return -EOPNOTSUPP; 895 return -EOPNOTSUPP;
@@ -944,269 +902,7 @@ static noinline_for_stack int ethtool_set_rx_ntuple(struct net_device *dev,
944 902
945 rx_ntuple_fix_masks(&cmd.fs); 903 rx_ntuple_fix_masks(&cmd.fs);
946 904
947 /* 905 return ops->set_rx_ntuple(dev, &cmd);
948 * Cache filter in dev struct for GET operation only if
949 * the underlying driver doesn't have its own GET operation, and
950 * only if the filter was added successfully. First make sure we
951 * can allocate the filter, then continue if successful.
952 */
953 if (!ops->get_rx_ntuple) {
954 fsc = kmalloc(sizeof(*fsc), GFP_ATOMIC);
955 if (!fsc)
956 return -ENOMEM;
957 }
958
959 ret = ops->set_rx_ntuple(dev, &cmd);
960 if (ret) {
961 kfree(fsc);
962 return ret;
963 }
964
965 if (!ops->get_rx_ntuple)
966 __rx_ntuple_filter_add(&dev->ethtool_ntuple_list, &cmd.fs, fsc);
967
968 return ret;
969}
970
971static int ethtool_get_rx_ntuple(struct net_device *dev, void __user *useraddr)
972{
973 struct ethtool_gstrings gstrings;
974 const struct ethtool_ops *ops = dev->ethtool_ops;
975 struct ethtool_rx_ntuple_flow_spec_container *fsc;
976 u8 *data;
977 char *p;
978 int ret, i, num_strings = 0;
979
980 if (!ops->get_sset_count)
981 return -EOPNOTSUPP;
982
983 if (copy_from_user(&gstrings, useraddr, sizeof(gstrings)))
984 return -EFAULT;
985
986 ret = ops->get_sset_count(dev, gstrings.string_set);
987 if (ret < 0)
988 return ret;
989
990 gstrings.len = ret;
991
992 data = kzalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER);
993 if (!data)
994 return -ENOMEM;
995
996 if (ops->get_rx_ntuple) {
997 /* driver-specific filter grab */
998 ret = ops->get_rx_ntuple(dev, gstrings.string_set, data);
999 goto copy;
1000 }
1001
1002 /* default ethtool filter grab */
1003 i = 0;
1004 p = (char *)data;
1005 list_for_each_entry(fsc, &dev->ethtool_ntuple_list.list, list) {
1006 sprintf(p, "Filter %d:\n", i);
1007 p += ETH_GSTRING_LEN;
1008 num_strings++;
1009
1010 switch (fsc->fs.flow_type) {
1011 case TCP_V4_FLOW:
1012 sprintf(p, "\tFlow Type: TCP\n");
1013 p += ETH_GSTRING_LEN;
1014 num_strings++;
1015 break;
1016 case UDP_V4_FLOW:
1017 sprintf(p, "\tFlow Type: UDP\n");
1018 p += ETH_GSTRING_LEN;
1019 num_strings++;
1020 break;
1021 case SCTP_V4_FLOW:
1022 sprintf(p, "\tFlow Type: SCTP\n");
1023 p += ETH_GSTRING_LEN;
1024 num_strings++;
1025 break;
1026 case AH_ESP_V4_FLOW:
1027 sprintf(p, "\tFlow Type: AH ESP\n");
1028 p += ETH_GSTRING_LEN;
1029 num_strings++;
1030 break;
1031 case ESP_V4_FLOW:
1032 sprintf(p, "\tFlow Type: ESP\n");
1033 p += ETH_GSTRING_LEN;
1034 num_strings++;
1035 break;
1036 case IP_USER_FLOW:
1037 sprintf(p, "\tFlow Type: Raw IP\n");
1038 p += ETH_GSTRING_LEN;
1039 num_strings++;
1040 break;
1041 case IPV4_FLOW:
1042 sprintf(p, "\tFlow Type: IPv4\n");
1043 p += ETH_GSTRING_LEN;
1044 num_strings++;
1045 break;
1046 default:
1047 sprintf(p, "\tFlow Type: Unknown\n");
1048 p += ETH_GSTRING_LEN;
1049 num_strings++;
1050 goto unknown_filter;
1051 }
1052
1053 /* now the rest of the filters */
1054 switch (fsc->fs.flow_type) {
1055 case TCP_V4_FLOW:
1056 case UDP_V4_FLOW:
1057 case SCTP_V4_FLOW:
1058 sprintf(p, "\tSrc IP addr: 0x%x\n",
1059 fsc->fs.h_u.tcp_ip4_spec.ip4src);
1060 p += ETH_GSTRING_LEN;
1061 num_strings++;
1062 sprintf(p, "\tSrc IP mask: 0x%x\n",
1063 fsc->fs.m_u.tcp_ip4_spec.ip4src);
1064 p += ETH_GSTRING_LEN;
1065 num_strings++;
1066 sprintf(p, "\tDest IP addr: 0x%x\n",
1067 fsc->fs.h_u.tcp_ip4_spec.ip4dst);
1068 p += ETH_GSTRING_LEN;
1069 num_strings++;
1070 sprintf(p, "\tDest IP mask: 0x%x\n",
1071 fsc->fs.m_u.tcp_ip4_spec.ip4dst);
1072 p += ETH_GSTRING_LEN;
1073 num_strings++;
1074 sprintf(p, "\tSrc Port: %d, mask: 0x%x\n",
1075 fsc->fs.h_u.tcp_ip4_spec.psrc,
1076 fsc->fs.m_u.tcp_ip4_spec.psrc);
1077 p += ETH_GSTRING_LEN;
1078 num_strings++;
1079 sprintf(p, "\tDest Port: %d, mask: 0x%x\n",
1080 fsc->fs.h_u.tcp_ip4_spec.pdst,
1081 fsc->fs.m_u.tcp_ip4_spec.pdst);
1082 p += ETH_GSTRING_LEN;
1083 num_strings++;
1084 sprintf(p, "\tTOS: %d, mask: 0x%x\n",
1085 fsc->fs.h_u.tcp_ip4_spec.tos,
1086 fsc->fs.m_u.tcp_ip4_spec.tos);
1087 p += ETH_GSTRING_LEN;
1088 num_strings++;
1089 break;
1090 case AH_ESP_V4_FLOW:
1091 case ESP_V4_FLOW:
1092 sprintf(p, "\tSrc IP addr: 0x%x\n",
1093 fsc->fs.h_u.ah_ip4_spec.ip4src);
1094 p += ETH_GSTRING_LEN;
1095 num_strings++;
1096 sprintf(p, "\tSrc IP mask: 0x%x\n",
1097 fsc->fs.m_u.ah_ip4_spec.ip4src);
1098 p += ETH_GSTRING_LEN;
1099 num_strings++;
1100 sprintf(p, "\tDest IP addr: 0x%x\n",
1101 fsc->fs.h_u.ah_ip4_spec.ip4dst);
1102 p += ETH_GSTRING_LEN;
1103 num_strings++;
1104 sprintf(p, "\tDest IP mask: 0x%x\n",
1105 fsc->fs.m_u.ah_ip4_spec.ip4dst);
1106 p += ETH_GSTRING_LEN;
1107 num_strings++;
1108 sprintf(p, "\tSPI: %d, mask: 0x%x\n",
1109 fsc->fs.h_u.ah_ip4_spec.spi,
1110 fsc->fs.m_u.ah_ip4_spec.spi);
1111 p += ETH_GSTRING_LEN;
1112 num_strings++;
1113 sprintf(p, "\tTOS: %d, mask: 0x%x\n",
1114 fsc->fs.h_u.ah_ip4_spec.tos,
1115 fsc->fs.m_u.ah_ip4_spec.tos);
1116 p += ETH_GSTRING_LEN;
1117 num_strings++;
1118 break;
1119 case IP_USER_FLOW:
1120 sprintf(p, "\tSrc IP addr: 0x%x\n",
1121 fsc->fs.h_u.usr_ip4_spec.ip4src);
1122 p += ETH_GSTRING_LEN;
1123 num_strings++;
1124 sprintf(p, "\tSrc IP mask: 0x%x\n",
1125 fsc->fs.m_u.usr_ip4_spec.ip4src);
1126 p += ETH_GSTRING_LEN;
1127 num_strings++;
1128 sprintf(p, "\tDest IP addr: 0x%x\n",
1129 fsc->fs.h_u.usr_ip4_spec.ip4dst);
1130 p += ETH_GSTRING_LEN;
1131 num_strings++;
1132 sprintf(p, "\tDest IP mask: 0x%x\n",
1133 fsc->fs.m_u.usr_ip4_spec.ip4dst);
1134 p += ETH_GSTRING_LEN;
1135 num_strings++;
1136 break;
1137 case IPV4_FLOW:
1138 sprintf(p, "\tSrc IP addr: 0x%x\n",
1139 fsc->fs.h_u.usr_ip4_spec.ip4src);
1140 p += ETH_GSTRING_LEN;
1141 num_strings++;
1142 sprintf(p, "\tSrc IP mask: 0x%x\n",
1143 fsc->fs.m_u.usr_ip4_spec.ip4src);
1144 p += ETH_GSTRING_LEN;
1145 num_strings++;
1146 sprintf(p, "\tDest IP addr: 0x%x\n",
1147 fsc->fs.h_u.usr_ip4_spec.ip4dst);
1148 p += ETH_GSTRING_LEN;
1149 num_strings++;
1150 sprintf(p, "\tDest IP mask: 0x%x\n",
1151 fsc->fs.m_u.usr_ip4_spec.ip4dst);
1152 p += ETH_GSTRING_LEN;
1153 num_strings++;
1154 sprintf(p, "\tL4 bytes: 0x%x, mask: 0x%x\n",
1155 fsc->fs.h_u.usr_ip4_spec.l4_4_bytes,
1156 fsc->fs.m_u.usr_ip4_spec.l4_4_bytes);
1157 p += ETH_GSTRING_LEN;
1158 num_strings++;
1159 sprintf(p, "\tTOS: %d, mask: 0x%x\n",
1160 fsc->fs.h_u.usr_ip4_spec.tos,
1161 fsc->fs.m_u.usr_ip4_spec.tos);
1162 p += ETH_GSTRING_LEN;
1163 num_strings++;
1164 sprintf(p, "\tIP Version: %d, mask: 0x%x\n",
1165 fsc->fs.h_u.usr_ip4_spec.ip_ver,
1166 fsc->fs.m_u.usr_ip4_spec.ip_ver);
1167 p += ETH_GSTRING_LEN;
1168 num_strings++;
1169 sprintf(p, "\tProtocol: %d, mask: 0x%x\n",
1170 fsc->fs.h_u.usr_ip4_spec.proto,
1171 fsc->fs.m_u.usr_ip4_spec.proto);
1172 p += ETH_GSTRING_LEN;
1173 num_strings++;
1174 break;
1175 }
1176 sprintf(p, "\tVLAN: %d, mask: 0x%x\n",
1177 fsc->fs.vlan_tag, fsc->fs.vlan_tag_mask);
1178 p += ETH_GSTRING_LEN;
1179 num_strings++;
1180 sprintf(p, "\tUser-defined: 0x%Lx\n", fsc->fs.data);
1181 p += ETH_GSTRING_LEN;
1182 num_strings++;
1183 sprintf(p, "\tUser-defined mask: 0x%Lx\n", fsc->fs.data_mask);
1184 p += ETH_GSTRING_LEN;
1185 num_strings++;
1186 if (fsc->fs.action == ETHTOOL_RXNTUPLE_ACTION_DROP)
1187 sprintf(p, "\tAction: Drop\n");
1188 else
1189 sprintf(p, "\tAction: Direct to queue %d\n",
1190 fsc->fs.action);
1191 p += ETH_GSTRING_LEN;
1192 num_strings++;
1193unknown_filter:
1194 i++;
1195 }
1196copy:
1197 /* indicate to userspace how many strings we actually have */
1198 gstrings.len = num_strings;
1199 ret = -EFAULT;
1200 if (copy_to_user(useraddr, &gstrings, sizeof(gstrings)))
1201 goto out;
1202 useraddr += sizeof(gstrings);
1203 if (copy_to_user(useraddr, data, gstrings.len * ETH_GSTRING_LEN))
1204 goto out;
1205 ret = 0;
1206
1207out:
1208 kfree(data);
1209 return ret;
1210} 906}
1211 907
1212static int ethtool_get_regs(struct net_device *dev, char __user *useraddr) 908static int ethtool_get_regs(struct net_device *dev, char __user *useraddr)
@@ -1227,7 +923,7 @@ static int ethtool_get_regs(struct net_device *dev, char __user *useraddr)
1227 regs.len = reglen; 923 regs.len = reglen;
1228 924
1229 regbuf = vzalloc(reglen); 925 regbuf = vzalloc(reglen);
1230 if (!regbuf) 926 if (reglen && !regbuf)
1231 return -ENOMEM; 927 return -ENOMEM;
1232 928
1233 ops->get_regs(dev, &regs, regbuf); 929 ops->get_regs(dev, &regs, regbuf);
@@ -1236,7 +932,7 @@ static int ethtool_get_regs(struct net_device *dev, char __user *useraddr)
1236 if (copy_to_user(useraddr, &regs, sizeof(regs))) 932 if (copy_to_user(useraddr, &regs, sizeof(regs)))
1237 goto out; 933 goto out;
1238 useraddr += offsetof(struct ethtool_regs, data); 934 useraddr += offsetof(struct ethtool_regs, data);
1239 if (copy_to_user(useraddr, regbuf, regs.len)) 935 if (regbuf && copy_to_user(useraddr, regbuf, regs.len))
1240 goto out; 936 goto out;
1241 ret = 0; 937 ret = 0;
1242 938
@@ -2101,9 +1797,6 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
2101 case ETHTOOL_SRXNTUPLE: 1797 case ETHTOOL_SRXNTUPLE:
2102 rc = ethtool_set_rx_ntuple(dev, useraddr); 1798 rc = ethtool_set_rx_ntuple(dev, useraddr);
2103 break; 1799 break;
2104 case ETHTOOL_GRXNTUPLE:
2105 rc = ethtool_get_rx_ntuple(dev, useraddr);
2106 break;
2107 case ETHTOOL_GSSET_INFO: 1800 case ETHTOOL_GSSET_INFO:
2108 rc = ethtool_get_sset_info(dev, useraddr); 1801 rc = ethtool_get_sset_info(dev, useraddr);
2109 break; 1802 break;
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 008dc70b064..e7ab0c0285b 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -740,9 +740,9 @@ static struct pernet_operations fib_rules_net_ops = {
740static int __init fib_rules_init(void) 740static int __init fib_rules_init(void)
741{ 741{
742 int err; 742 int err;
743 rtnl_register(PF_UNSPEC, RTM_NEWRULE, fib_nl_newrule, NULL); 743 rtnl_register(PF_UNSPEC, RTM_NEWRULE, fib_nl_newrule, NULL, NULL);
744 rtnl_register(PF_UNSPEC, RTM_DELRULE, fib_nl_delrule, NULL); 744 rtnl_register(PF_UNSPEC, RTM_DELRULE, fib_nl_delrule, NULL, NULL);
745 rtnl_register(PF_UNSPEC, RTM_GETRULE, NULL, fib_nl_dumprule); 745 rtnl_register(PF_UNSPEC, RTM_GETRULE, NULL, fib_nl_dumprule, NULL);
746 746
747 err = register_pernet_subsys(&fib_rules_net_ops); 747 err = register_pernet_subsys(&fib_rules_net_ops);
748 if (err < 0) 748 if (err < 0)
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 799f06e03a2..8fab9b0bb20 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -98,7 +98,7 @@ static const struct file_operations neigh_stat_seq_fops;
98 98
99static DEFINE_RWLOCK(neigh_tbl_lock); 99static DEFINE_RWLOCK(neigh_tbl_lock);
100 100
101static int neigh_blackhole(struct sk_buff *skb) 101static int neigh_blackhole(struct neighbour *neigh, struct sk_buff *skb)
102{ 102{
103 kfree_skb(skb); 103 kfree_skb(skb);
104 return -ENETDOWN; 104 return -ENETDOWN;
@@ -137,7 +137,7 @@ static int neigh_forced_gc(struct neigh_table *tbl)
137 write_lock_bh(&tbl->lock); 137 write_lock_bh(&tbl->lock);
138 nht = rcu_dereference_protected(tbl->nht, 138 nht = rcu_dereference_protected(tbl->nht,
139 lockdep_is_held(&tbl->lock)); 139 lockdep_is_held(&tbl->lock));
140 for (i = 0; i <= nht->hash_mask; i++) { 140 for (i = 0; i < (1 << nht->hash_shift); i++) {
141 struct neighbour *n; 141 struct neighbour *n;
142 struct neighbour __rcu **np; 142 struct neighbour __rcu **np;
143 143
@@ -210,7 +210,7 @@ static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
210 nht = rcu_dereference_protected(tbl->nht, 210 nht = rcu_dereference_protected(tbl->nht,
211 lockdep_is_held(&tbl->lock)); 211 lockdep_is_held(&tbl->lock));
212 212
213 for (i = 0; i <= nht->hash_mask; i++) { 213 for (i = 0; i < (1 << nht->hash_shift); i++) {
214 struct neighbour *n; 214 struct neighbour *n;
215 struct neighbour __rcu **np = &nht->hash_buckets[i]; 215 struct neighbour __rcu **np = &nht->hash_buckets[i];
216 216
@@ -297,6 +297,7 @@ static struct neighbour *neigh_alloc(struct neigh_table *tbl)
297 n->updated = n->used = now; 297 n->updated = n->used = now;
298 n->nud_state = NUD_NONE; 298 n->nud_state = NUD_NONE;
299 n->output = neigh_blackhole; 299 n->output = neigh_blackhole;
300 seqlock_init(&n->hh.hh_lock);
300 n->parms = neigh_parms_clone(&tbl->parms); 301 n->parms = neigh_parms_clone(&tbl->parms);
301 setup_timer(&n->timer, neigh_timer_handler, (unsigned long)n); 302 setup_timer(&n->timer, neigh_timer_handler, (unsigned long)n);
302 303
@@ -312,9 +313,9 @@ out_entries:
312 goto out; 313 goto out;
313} 314}
314 315
315static struct neigh_hash_table *neigh_hash_alloc(unsigned int entries) 316static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift)
316{ 317{
317 size_t size = entries * sizeof(struct neighbour *); 318 size_t size = (1 << shift) * sizeof(struct neighbour *);
318 struct neigh_hash_table *ret; 319 struct neigh_hash_table *ret;
319 struct neighbour __rcu **buckets; 320 struct neighbour __rcu **buckets;
320 321
@@ -332,8 +333,9 @@ static struct neigh_hash_table *neigh_hash_alloc(unsigned int entries)
332 return NULL; 333 return NULL;
333 } 334 }
334 ret->hash_buckets = buckets; 335 ret->hash_buckets = buckets;
335 ret->hash_mask = entries - 1; 336 ret->hash_shift = shift;
336 get_random_bytes(&ret->hash_rnd, sizeof(ret->hash_rnd)); 337 get_random_bytes(&ret->hash_rnd, sizeof(ret->hash_rnd));
338 ret->hash_rnd |= 1;
337 return ret; 339 return ret;
338} 340}
339 341
@@ -342,7 +344,7 @@ static void neigh_hash_free_rcu(struct rcu_head *head)
342 struct neigh_hash_table *nht = container_of(head, 344 struct neigh_hash_table *nht = container_of(head,
343 struct neigh_hash_table, 345 struct neigh_hash_table,
344 rcu); 346 rcu);
345 size_t size = (nht->hash_mask + 1) * sizeof(struct neighbour *); 347 size_t size = (1 << nht->hash_shift) * sizeof(struct neighbour *);
346 struct neighbour __rcu **buckets = nht->hash_buckets; 348 struct neighbour __rcu **buckets = nht->hash_buckets;
347 349
348 if (size <= PAGE_SIZE) 350 if (size <= PAGE_SIZE)
@@ -353,21 +355,20 @@ static void neigh_hash_free_rcu(struct rcu_head *head)
353} 355}
354 356
355static struct neigh_hash_table *neigh_hash_grow(struct neigh_table *tbl, 357static struct neigh_hash_table *neigh_hash_grow(struct neigh_table *tbl,
356 unsigned long new_entries) 358 unsigned long new_shift)
357{ 359{
358 unsigned int i, hash; 360 unsigned int i, hash;
359 struct neigh_hash_table *new_nht, *old_nht; 361 struct neigh_hash_table *new_nht, *old_nht;
360 362
361 NEIGH_CACHE_STAT_INC(tbl, hash_grows); 363 NEIGH_CACHE_STAT_INC(tbl, hash_grows);
362 364
363 BUG_ON(!is_power_of_2(new_entries));
364 old_nht = rcu_dereference_protected(tbl->nht, 365 old_nht = rcu_dereference_protected(tbl->nht,
365 lockdep_is_held(&tbl->lock)); 366 lockdep_is_held(&tbl->lock));
366 new_nht = neigh_hash_alloc(new_entries); 367 new_nht = neigh_hash_alloc(new_shift);
367 if (!new_nht) 368 if (!new_nht)
368 return old_nht; 369 return old_nht;
369 370
370 for (i = 0; i <= old_nht->hash_mask; i++) { 371 for (i = 0; i < (1 << old_nht->hash_shift); i++) {
371 struct neighbour *n, *next; 372 struct neighbour *n, *next;
372 373
373 for (n = rcu_dereference_protected(old_nht->hash_buckets[i], 374 for (n = rcu_dereference_protected(old_nht->hash_buckets[i],
@@ -377,7 +378,7 @@ static struct neigh_hash_table *neigh_hash_grow(struct neigh_table *tbl,
377 hash = tbl->hash(n->primary_key, n->dev, 378 hash = tbl->hash(n->primary_key, n->dev,
378 new_nht->hash_rnd); 379 new_nht->hash_rnd);
379 380
380 hash &= new_nht->hash_mask; 381 hash >>= (32 - new_nht->hash_shift);
381 next = rcu_dereference_protected(n->next, 382 next = rcu_dereference_protected(n->next,
382 lockdep_is_held(&tbl->lock)); 383 lockdep_is_held(&tbl->lock));
383 384
@@ -406,7 +407,7 @@ struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
406 407
407 rcu_read_lock_bh(); 408 rcu_read_lock_bh();
408 nht = rcu_dereference_bh(tbl->nht); 409 nht = rcu_dereference_bh(tbl->nht);
409 hash_val = tbl->hash(pkey, dev, nht->hash_rnd) & nht->hash_mask; 410 hash_val = tbl->hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
410 411
411 for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]); 412 for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
412 n != NULL; 413 n != NULL;
@@ -436,7 +437,7 @@ struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
436 437
437 rcu_read_lock_bh(); 438 rcu_read_lock_bh();
438 nht = rcu_dereference_bh(tbl->nht); 439 nht = rcu_dereference_bh(tbl->nht);
439 hash_val = tbl->hash(pkey, NULL, nht->hash_rnd) & nht->hash_mask; 440 hash_val = tbl->hash(pkey, NULL, nht->hash_rnd) >> (32 - nht->hash_shift);
440 441
441 for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]); 442 for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
442 n != NULL; 443 n != NULL;
@@ -492,10 +493,10 @@ struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey,
492 nht = rcu_dereference_protected(tbl->nht, 493 nht = rcu_dereference_protected(tbl->nht,
493 lockdep_is_held(&tbl->lock)); 494 lockdep_is_held(&tbl->lock));
494 495
495 if (atomic_read(&tbl->entries) > (nht->hash_mask + 1)) 496 if (atomic_read(&tbl->entries) > (1 << nht->hash_shift))
496 nht = neigh_hash_grow(tbl, (nht->hash_mask + 1) << 1); 497 nht = neigh_hash_grow(tbl, nht->hash_shift + 1);
497 498
498 hash_val = tbl->hash(pkey, dev, nht->hash_rnd) & nht->hash_mask; 499 hash_val = tbl->hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
499 500
500 if (n->parms->dead) { 501 if (n->parms->dead) {
501 rc = ERR_PTR(-EINVAL); 502 rc = ERR_PTR(-EINVAL);
@@ -688,8 +689,6 @@ static void neigh_destroy_rcu(struct rcu_head *head)
688 */ 689 */
689void neigh_destroy(struct neighbour *neigh) 690void neigh_destroy(struct neighbour *neigh)
690{ 691{
691 struct hh_cache *hh;
692
693 NEIGH_CACHE_STAT_INC(neigh->tbl, destroys); 692 NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
694 693
695 if (!neigh->dead) { 694 if (!neigh->dead) {
@@ -702,16 +701,6 @@ void neigh_destroy(struct neighbour *neigh)
702 if (neigh_del_timer(neigh)) 701 if (neigh_del_timer(neigh))
703 printk(KERN_WARNING "Impossible event.\n"); 702 printk(KERN_WARNING "Impossible event.\n");
704 703
705 while ((hh = neigh->hh) != NULL) {
706 neigh->hh = hh->hh_next;
707 hh->hh_next = NULL;
708
709 write_seqlock_bh(&hh->hh_lock);
710 hh->hh_output = neigh_blackhole;
711 write_sequnlock_bh(&hh->hh_lock);
712 hh_cache_put(hh);
713 }
714
715 skb_queue_purge(&neigh->arp_queue); 704 skb_queue_purge(&neigh->arp_queue);
716 705
717 dev_put(neigh->dev); 706 dev_put(neigh->dev);
@@ -731,14 +720,9 @@ EXPORT_SYMBOL(neigh_destroy);
731 */ 720 */
732static void neigh_suspect(struct neighbour *neigh) 721static void neigh_suspect(struct neighbour *neigh)
733{ 722{
734 struct hh_cache *hh;
735
736 NEIGH_PRINTK2("neigh %p is suspected.\n", neigh); 723 NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
737 724
738 neigh->output = neigh->ops->output; 725 neigh->output = neigh->ops->output;
739
740 for (hh = neigh->hh; hh; hh = hh->hh_next)
741 hh->hh_output = neigh->ops->output;
742} 726}
743 727
744/* Neighbour state is OK; 728/* Neighbour state is OK;
@@ -748,14 +732,9 @@ static void neigh_suspect(struct neighbour *neigh)
748 */ 732 */
749static void neigh_connect(struct neighbour *neigh) 733static void neigh_connect(struct neighbour *neigh)
750{ 734{
751 struct hh_cache *hh;
752
753 NEIGH_PRINTK2("neigh %p is connected.\n", neigh); 735 NEIGH_PRINTK2("neigh %p is connected.\n", neigh);
754 736
755 neigh->output = neigh->ops->connected_output; 737 neigh->output = neigh->ops->connected_output;
756
757 for (hh = neigh->hh; hh; hh = hh->hh_next)
758 hh->hh_output = neigh->ops->hh_output;
759} 738}
760 739
761static void neigh_periodic_work(struct work_struct *work) 740static void neigh_periodic_work(struct work_struct *work)
@@ -784,7 +763,7 @@ static void neigh_periodic_work(struct work_struct *work)
784 neigh_rand_reach_time(p->base_reachable_time); 763 neigh_rand_reach_time(p->base_reachable_time);
785 } 764 }
786 765
787 for (i = 0 ; i <= nht->hash_mask; i++) { 766 for (i = 0 ; i < (1 << nht->hash_shift); i++) {
788 np = &nht->hash_buckets[i]; 767 np = &nht->hash_buckets[i];
789 768
790 while ((n = rcu_dereference_protected(*np, 769 while ((n = rcu_dereference_protected(*np,
@@ -1015,7 +994,7 @@ out_unlock_bh:
1015} 994}
1016EXPORT_SYMBOL(__neigh_event_send); 995EXPORT_SYMBOL(__neigh_event_send);
1017 996
1018static void neigh_update_hhs(const struct neighbour *neigh) 997static void neigh_update_hhs(struct neighbour *neigh)
1019{ 998{
1020 struct hh_cache *hh; 999 struct hh_cache *hh;
1021 void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *) 1000 void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
@@ -1025,7 +1004,8 @@ static void neigh_update_hhs(const struct neighbour *neigh)
1025 update = neigh->dev->header_ops->cache_update; 1004 update = neigh->dev->header_ops->cache_update;
1026 1005
1027 if (update) { 1006 if (update) {
1028 for (hh = neigh->hh; hh; hh = hh->hh_next) { 1007 hh = &neigh->hh;
1008 if (hh->hh_len) {
1029 write_seqlock_bh(&hh->hh_lock); 1009 write_seqlock_bh(&hh->hh_lock);
1030 update(hh, neigh->dev, neigh->ha); 1010 update(hh, neigh->dev, neigh->ha);
1031 write_sequnlock_bh(&hh->hh_lock); 1011 write_sequnlock_bh(&hh->hh_lock);
@@ -1173,12 +1153,13 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1173 1153
1174 while (neigh->nud_state & NUD_VALID && 1154 while (neigh->nud_state & NUD_VALID &&
1175 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) { 1155 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1176 struct neighbour *n1 = neigh; 1156 struct dst_entry *dst = skb_dst(skb);
1157 struct neighbour *n2, *n1 = neigh;
1177 write_unlock_bh(&neigh->lock); 1158 write_unlock_bh(&neigh->lock);
1178 /* On shaper/eql skb->dst->neighbour != neigh :( */ 1159 /* On shaper/eql skb->dst->neighbour != neigh :( */
1179 if (skb_dst(skb) && skb_dst(skb)->neighbour) 1160 if (dst && (n2 = dst_get_neighbour(dst)) != NULL)
1180 n1 = skb_dst(skb)->neighbour; 1161 n1 = n2;
1181 n1->output(skb); 1162 n1->output(n1, skb);
1182 write_lock_bh(&neigh->lock); 1163 write_lock_bh(&neigh->lock);
1183 } 1164 }
1184 skb_queue_purge(&neigh->arp_queue); 1165 skb_queue_purge(&neigh->arp_queue);
@@ -1211,67 +1192,21 @@ struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1211} 1192}
1212EXPORT_SYMBOL(neigh_event_ns); 1193EXPORT_SYMBOL(neigh_event_ns);
1213 1194
1214static inline bool neigh_hh_lookup(struct neighbour *n, struct dst_entry *dst,
1215 __be16 protocol)
1216{
1217 struct hh_cache *hh;
1218
1219 smp_rmb(); /* paired with smp_wmb() in neigh_hh_init() */
1220 for (hh = n->hh; hh; hh = hh->hh_next) {
1221 if (hh->hh_type == protocol) {
1222 atomic_inc(&hh->hh_refcnt);
1223 if (unlikely(cmpxchg(&dst->hh, NULL, hh) != NULL))
1224 hh_cache_put(hh);
1225 return true;
1226 }
1227 }
1228 return false;
1229}
1230
1231/* called with read_lock_bh(&n->lock); */ 1195/* called with read_lock_bh(&n->lock); */
1232static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst, 1196static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst)
1233 __be16 protocol)
1234{ 1197{
1235 struct hh_cache *hh;
1236 struct net_device *dev = dst->dev; 1198 struct net_device *dev = dst->dev;
1237 1199 __be16 prot = dst->ops->protocol;
1238 if (likely(neigh_hh_lookup(n, dst, protocol))) 1200 struct hh_cache *hh = &n->hh;
1239 return;
1240
1241 /* slow path */
1242 hh = kzalloc(sizeof(*hh), GFP_ATOMIC);
1243 if (!hh)
1244 return;
1245
1246 seqlock_init(&hh->hh_lock);
1247 hh->hh_type = protocol;
1248 atomic_set(&hh->hh_refcnt, 2);
1249
1250 if (dev->header_ops->cache(n, hh)) {
1251 kfree(hh);
1252 return;
1253 }
1254 1201
1255 write_lock_bh(&n->lock); 1202 write_lock_bh(&n->lock);
1256 1203
1257 /* must check if another thread already did the insert */ 1204 /* Only one thread can come in here and initialize the
1258 if (neigh_hh_lookup(n, dst, protocol)) { 1205 * hh_cache entry.
1259 kfree(hh); 1206 */
1260 goto end; 1207 if (!hh->hh_len)
1261 } 1208 dev->header_ops->cache(n, hh, prot);
1262
1263 if (n->nud_state & NUD_CONNECTED)
1264 hh->hh_output = n->ops->hh_output;
1265 else
1266 hh->hh_output = n->ops->output;
1267
1268 hh->hh_next = n->hh;
1269 smp_wmb(); /* paired with smp_rmb() in neigh_hh_lookup() */
1270 n->hh = hh;
1271 1209
1272 if (unlikely(cmpxchg(&dst->hh, NULL, hh) != NULL))
1273 hh_cache_put(hh);
1274end:
1275 write_unlock_bh(&n->lock); 1210 write_unlock_bh(&n->lock);
1276} 1211}
1277 1212
@@ -1280,7 +1215,7 @@ end:
1280 * but resolution is not made yet. 1215 * but resolution is not made yet.
1281 */ 1216 */
1282 1217
1283int neigh_compat_output(struct sk_buff *skb) 1218int neigh_compat_output(struct neighbour *neigh, struct sk_buff *skb)
1284{ 1219{
1285 struct net_device *dev = skb->dev; 1220 struct net_device *dev = skb->dev;
1286 1221
@@ -1297,13 +1232,12 @@ EXPORT_SYMBOL(neigh_compat_output);
1297 1232
1298/* Slow and careful. */ 1233/* Slow and careful. */
1299 1234
1300int neigh_resolve_output(struct sk_buff *skb) 1235int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb)
1301{ 1236{
1302 struct dst_entry *dst = skb_dst(skb); 1237 struct dst_entry *dst = skb_dst(skb);
1303 struct neighbour *neigh;
1304 int rc = 0; 1238 int rc = 0;
1305 1239
1306 if (!dst || !(neigh = dst->neighbour)) 1240 if (!dst)
1307 goto discard; 1241 goto discard;
1308 1242
1309 __skb_pull(skb, skb_network_offset(skb)); 1243 __skb_pull(skb, skb_network_offset(skb));
@@ -1313,10 +1247,8 @@ int neigh_resolve_output(struct sk_buff *skb)
1313 struct net_device *dev = neigh->dev; 1247 struct net_device *dev = neigh->dev;
1314 unsigned int seq; 1248 unsigned int seq;
1315 1249
1316 if (dev->header_ops->cache && 1250 if (dev->header_ops->cache && !neigh->hh.hh_len)
1317 !dst->hh && 1251 neigh_hh_init(neigh, dst);
1318 !(dst->flags & DST_NOCACHE))
1319 neigh_hh_init(neigh, dst, dst->ops->protocol);
1320 1252
1321 do { 1253 do {
1322 seq = read_seqbegin(&neigh->ha_lock); 1254 seq = read_seqbegin(&neigh->ha_lock);
@@ -1325,7 +1257,7 @@ int neigh_resolve_output(struct sk_buff *skb)
1325 } while (read_seqretry(&neigh->ha_lock, seq)); 1257 } while (read_seqretry(&neigh->ha_lock, seq));
1326 1258
1327 if (err >= 0) 1259 if (err >= 0)
1328 rc = neigh->ops->queue_xmit(skb); 1260 rc = dev_queue_xmit(skb);
1329 else 1261 else
1330 goto out_kfree_skb; 1262 goto out_kfree_skb;
1331 } 1263 }
@@ -1333,7 +1265,7 @@ out:
1333 return rc; 1265 return rc;
1334discard: 1266discard:
1335 NEIGH_PRINTK1("neigh_resolve_output: dst=%p neigh=%p\n", 1267 NEIGH_PRINTK1("neigh_resolve_output: dst=%p neigh=%p\n",
1336 dst, dst ? dst->neighbour : NULL); 1268 dst, neigh);
1337out_kfree_skb: 1269out_kfree_skb:
1338 rc = -EINVAL; 1270 rc = -EINVAL;
1339 kfree_skb(skb); 1271 kfree_skb(skb);
@@ -1343,13 +1275,11 @@ EXPORT_SYMBOL(neigh_resolve_output);
1343 1275
1344/* As fast as possible without hh cache */ 1276/* As fast as possible without hh cache */
1345 1277
1346int neigh_connected_output(struct sk_buff *skb) 1278int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb)
1347{ 1279{
1348 int err;
1349 struct dst_entry *dst = skb_dst(skb);
1350 struct neighbour *neigh = dst->neighbour;
1351 struct net_device *dev = neigh->dev; 1280 struct net_device *dev = neigh->dev;
1352 unsigned int seq; 1281 unsigned int seq;
1282 int err;
1353 1283
1354 __skb_pull(skb, skb_network_offset(skb)); 1284 __skb_pull(skb, skb_network_offset(skb));
1355 1285
@@ -1360,7 +1290,7 @@ int neigh_connected_output(struct sk_buff *skb)
1360 } while (read_seqretry(&neigh->ha_lock, seq)); 1290 } while (read_seqretry(&neigh->ha_lock, seq));
1361 1291
1362 if (err >= 0) 1292 if (err >= 0)
1363 err = neigh->ops->queue_xmit(skb); 1293 err = dev_queue_xmit(skb);
1364 else { 1294 else {
1365 err = -EINVAL; 1295 err = -EINVAL;
1366 kfree_skb(skb); 1296 kfree_skb(skb);
@@ -1369,6 +1299,12 @@ int neigh_connected_output(struct sk_buff *skb)
1369} 1299}
1370EXPORT_SYMBOL(neigh_connected_output); 1300EXPORT_SYMBOL(neigh_connected_output);
1371 1301
1302int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb)
1303{
1304 return dev_queue_xmit(skb);
1305}
1306EXPORT_SYMBOL(neigh_direct_output);
1307
1372static void neigh_proxy_process(unsigned long arg) 1308static void neigh_proxy_process(unsigned long arg)
1373{ 1309{
1374 struct neigh_table *tbl = (struct neigh_table *)arg; 1310 struct neigh_table *tbl = (struct neigh_table *)arg;
@@ -1540,7 +1476,7 @@ void neigh_table_init_no_netlink(struct neigh_table *tbl)
1540 panic("cannot create neighbour proc dir entry"); 1476 panic("cannot create neighbour proc dir entry");
1541#endif 1477#endif
1542 1478
1543 RCU_INIT_POINTER(tbl->nht, neigh_hash_alloc(8)); 1479 RCU_INIT_POINTER(tbl->nht, neigh_hash_alloc(3));
1544 1480
1545 phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *); 1481 phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1546 tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL); 1482 tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
@@ -1857,7 +1793,7 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
1857 rcu_read_lock_bh(); 1793 rcu_read_lock_bh();
1858 nht = rcu_dereference_bh(tbl->nht); 1794 nht = rcu_dereference_bh(tbl->nht);
1859 ndc.ndtc_hash_rnd = nht->hash_rnd; 1795 ndc.ndtc_hash_rnd = nht->hash_rnd;
1860 ndc.ndtc_hash_mask = nht->hash_mask; 1796 ndc.ndtc_hash_mask = ((1 << nht->hash_shift) - 1);
1861 rcu_read_unlock_bh(); 1797 rcu_read_unlock_bh();
1862 1798
1863 NLA_PUT(skb, NDTA_CONFIG, sizeof(ndc), &ndc); 1799 NLA_PUT(skb, NDTA_CONFIG, sizeof(ndc), &ndc);
@@ -2200,7 +2136,7 @@ static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2200 rcu_read_lock_bh(); 2136 rcu_read_lock_bh();
2201 nht = rcu_dereference_bh(tbl->nht); 2137 nht = rcu_dereference_bh(tbl->nht);
2202 2138
2203 for (h = 0; h <= nht->hash_mask; h++) { 2139 for (h = 0; h < (1 << nht->hash_shift); h++) {
2204 if (h < s_h) 2140 if (h < s_h)
2205 continue; 2141 continue;
2206 if (h > s_h) 2142 if (h > s_h)
@@ -2264,7 +2200,7 @@ void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void
2264 nht = rcu_dereference_bh(tbl->nht); 2200 nht = rcu_dereference_bh(tbl->nht);
2265 2201
2266 read_lock(&tbl->lock); /* avoid resizes */ 2202 read_lock(&tbl->lock); /* avoid resizes */
2267 for (chain = 0; chain <= nht->hash_mask; chain++) { 2203 for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
2268 struct neighbour *n; 2204 struct neighbour *n;
2269 2205
2270 for (n = rcu_dereference_bh(nht->hash_buckets[chain]); 2206 for (n = rcu_dereference_bh(nht->hash_buckets[chain]);
@@ -2286,7 +2222,7 @@ void __neigh_for_each_release(struct neigh_table *tbl,
2286 2222
2287 nht = rcu_dereference_protected(tbl->nht, 2223 nht = rcu_dereference_protected(tbl->nht,
2288 lockdep_is_held(&tbl->lock)); 2224 lockdep_is_held(&tbl->lock));
2289 for (chain = 0; chain <= nht->hash_mask; chain++) { 2225 for (chain = 0; chain < (1 << nht->hash_shift); chain++) {
2290 struct neighbour *n; 2226 struct neighbour *n;
2291 struct neighbour __rcu **np; 2227 struct neighbour __rcu **np;
2292 2228
@@ -2323,7 +2259,7 @@ static struct neighbour *neigh_get_first(struct seq_file *seq)
2323 int bucket = state->bucket; 2259 int bucket = state->bucket;
2324 2260
2325 state->flags &= ~NEIGH_SEQ_IS_PNEIGH; 2261 state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
2326 for (bucket = 0; bucket <= nht->hash_mask; bucket++) { 2262 for (bucket = 0; bucket < (1 << nht->hash_shift); bucket++) {
2327 n = rcu_dereference_bh(nht->hash_buckets[bucket]); 2263 n = rcu_dereference_bh(nht->hash_buckets[bucket]);
2328 2264
2329 while (n) { 2265 while (n) {
@@ -2390,7 +2326,7 @@ next:
2390 if (n) 2326 if (n)
2391 break; 2327 break;
2392 2328
2393 if (++state->bucket > nht->hash_mask) 2329 if (++state->bucket >= (1 << nht->hash_shift))
2394 break; 2330 break;
2395 2331
2396 n = rcu_dereference_bh(nht->hash_buckets[state->bucket]); 2332 n = rcu_dereference_bh(nht->hash_buckets[state->bucket]);
@@ -2909,12 +2845,13 @@ EXPORT_SYMBOL(neigh_sysctl_unregister);
2909 2845
2910static int __init neigh_init(void) 2846static int __init neigh_init(void)
2911{ 2847{
2912 rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL); 2848 rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL, NULL);
2913 rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL); 2849 rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL, NULL);
2914 rtnl_register(PF_UNSPEC, RTM_GETNEIGH, NULL, neigh_dump_info); 2850 rtnl_register(PF_UNSPEC, RTM_GETNEIGH, NULL, neigh_dump_info, NULL);
2915 2851
2916 rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info); 2852 rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info,
2917 rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL); 2853 NULL);
2854 rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL, NULL);
2918 2855
2919 return 0; 2856 return 0;
2920} 2857}
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 33d2a1fba13..1683e5db2f2 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -100,7 +100,6 @@ NETDEVICE_SHOW(addr_assign_type, fmt_dec);
100NETDEVICE_SHOW(addr_len, fmt_dec); 100NETDEVICE_SHOW(addr_len, fmt_dec);
101NETDEVICE_SHOW(iflink, fmt_dec); 101NETDEVICE_SHOW(iflink, fmt_dec);
102NETDEVICE_SHOW(ifindex, fmt_dec); 102NETDEVICE_SHOW(ifindex, fmt_dec);
103NETDEVICE_SHOW(features, fmt_hex);
104NETDEVICE_SHOW(type, fmt_dec); 103NETDEVICE_SHOW(type, fmt_dec);
105NETDEVICE_SHOW(link_mode, fmt_dec); 104NETDEVICE_SHOW(link_mode, fmt_dec);
106 105
@@ -312,7 +311,6 @@ static struct device_attribute net_class_attributes[] = {
312 __ATTR(ifalias, S_IRUGO | S_IWUSR, show_ifalias, store_ifalias), 311 __ATTR(ifalias, S_IRUGO | S_IWUSR, show_ifalias, store_ifalias),
313 __ATTR(iflink, S_IRUGO, show_iflink, NULL), 312 __ATTR(iflink, S_IRUGO, show_iflink, NULL),
314 __ATTR(ifindex, S_IRUGO, show_ifindex, NULL), 313 __ATTR(ifindex, S_IRUGO, show_ifindex, NULL),
315 __ATTR(features, S_IRUGO, show_features, NULL),
316 __ATTR(type, S_IRUGO, show_type, NULL), 314 __ATTR(type, S_IRUGO, show_type, NULL),
317 __ATTR(link_mode, S_IRUGO, show_link_mode, NULL), 315 __ATTR(link_mode, S_IRUGO, show_link_mode, NULL),
318 __ATTR(address, S_IRUGO, show_address, NULL), 316 __ATTR(address, S_IRUGO, show_address, NULL),
diff --git a/net/core/net-traces.c b/net/core/net-traces.c
index 7f1bb2aba03..52380b1d552 100644
--- a/net/core/net-traces.c
+++ b/net/core/net-traces.c
@@ -28,6 +28,8 @@
28#include <trace/events/skb.h> 28#include <trace/events/skb.h>
29#include <trace/events/net.h> 29#include <trace/events/net.h>
30#include <trace/events/napi.h> 30#include <trace/events/napi.h>
31#include <trace/events/sock.h>
32#include <trace/events/udp.h>
31 33
32EXPORT_TRACEPOINT_SYMBOL_GPL(kfree_skb); 34EXPORT_TRACEPOINT_SYMBOL_GPL(kfree_skb);
33 35
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index ea489db1bc2..5bbdbf0d366 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -129,6 +129,7 @@ static __net_init int setup_net(struct net *net)
129 129
130 atomic_set(&net->count, 1); 130 atomic_set(&net->count, 1);
131 atomic_set(&net->passive, 1); 131 atomic_set(&net->passive, 1);
132 net->dev_base_seq = 1;
132 133
133#ifdef NETNS_REFCNT_DEBUG 134#ifdef NETNS_REFCNT_DEBUG
134 atomic_set(&net->use_count, 0); 135 atomic_set(&net->use_count, 0);
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 18d9cbda3a3..adf84dd8c7b 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -177,7 +177,7 @@ static void service_arp_queue(struct netpoll_info *npi)
177 } 177 }
178} 178}
179 179
180void netpoll_poll_dev(struct net_device *dev) 180static void netpoll_poll_dev(struct net_device *dev)
181{ 181{
182 const struct net_device_ops *ops; 182 const struct net_device_ops *ops;
183 183
@@ -208,13 +208,6 @@ void netpoll_poll_dev(struct net_device *dev)
208 208
209 zap_completion_queue(); 209 zap_completion_queue();
210} 210}
211EXPORT_SYMBOL(netpoll_poll_dev);
212
213void netpoll_poll(struct netpoll *np)
214{
215 netpoll_poll_dev(np->dev);
216}
217EXPORT_SYMBOL(netpoll_poll);
218 211
219static void refill_skbs(void) 212static void refill_skbs(void)
220{ 213{
@@ -275,7 +268,7 @@ repeat:
275 268
276 if (!skb) { 269 if (!skb) {
277 if (++count < 10) { 270 if (++count < 10) {
278 netpoll_poll(np); 271 netpoll_poll_dev(np->dev);
279 goto repeat; 272 goto repeat;
280 } 273 }
281 return NULL; 274 return NULL;
@@ -336,7 +329,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
336 } 329 }
337 330
338 /* tickle device maybe there is some cleanup */ 331 /* tickle device maybe there is some cleanup */
339 netpoll_poll(np); 332 netpoll_poll_dev(np->dev);
340 333
341 udelay(USEC_PER_POLL); 334 udelay(USEC_PER_POLL);
342 } 335 }
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index abd936d8a71..99d9e953fe3 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -56,9 +56,11 @@
56struct rtnl_link { 56struct rtnl_link {
57 rtnl_doit_func doit; 57 rtnl_doit_func doit;
58 rtnl_dumpit_func dumpit; 58 rtnl_dumpit_func dumpit;
59 rtnl_calcit_func calcit;
59}; 60};
60 61
61static DEFINE_MUTEX(rtnl_mutex); 62static DEFINE_MUTEX(rtnl_mutex);
63static u16 min_ifinfo_dump_size;
62 64
63void rtnl_lock(void) 65void rtnl_lock(void)
64{ 66{
@@ -144,12 +146,28 @@ static rtnl_dumpit_func rtnl_get_dumpit(int protocol, int msgindex)
144 return tab ? tab[msgindex].dumpit : NULL; 146 return tab ? tab[msgindex].dumpit : NULL;
145} 147}
146 148
149static rtnl_calcit_func rtnl_get_calcit(int protocol, int msgindex)
150{
151 struct rtnl_link *tab;
152
153 if (protocol <= RTNL_FAMILY_MAX)
154 tab = rtnl_msg_handlers[protocol];
155 else
156 tab = NULL;
157
158 if (tab == NULL || tab[msgindex].calcit == NULL)
159 tab = rtnl_msg_handlers[PF_UNSPEC];
160
161 return tab ? tab[msgindex].calcit : NULL;
162}
163
147/** 164/**
148 * __rtnl_register - Register a rtnetlink message type 165 * __rtnl_register - Register a rtnetlink message type
149 * @protocol: Protocol family or PF_UNSPEC 166 * @protocol: Protocol family or PF_UNSPEC
150 * @msgtype: rtnetlink message type 167 * @msgtype: rtnetlink message type
151 * @doit: Function pointer called for each request message 168 * @doit: Function pointer called for each request message
152 * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message 169 * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message
170 * @calcit: Function pointer to calc size of dump message
153 * 171 *
154 * Registers the specified function pointers (at least one of them has 172 * Registers the specified function pointers (at least one of them has
155 * to be non-NULL) to be called whenever a request message for the 173 * to be non-NULL) to be called whenever a request message for the
@@ -162,7 +180,8 @@ static rtnl_dumpit_func rtnl_get_dumpit(int protocol, int msgindex)
162 * Returns 0 on success or a negative error code. 180 * Returns 0 on success or a negative error code.
163 */ 181 */
164int __rtnl_register(int protocol, int msgtype, 182int __rtnl_register(int protocol, int msgtype,
165 rtnl_doit_func doit, rtnl_dumpit_func dumpit) 183 rtnl_doit_func doit, rtnl_dumpit_func dumpit,
184 rtnl_calcit_func calcit)
166{ 185{
167 struct rtnl_link *tab; 186 struct rtnl_link *tab;
168 int msgindex; 187 int msgindex;
@@ -185,6 +204,9 @@ int __rtnl_register(int protocol, int msgtype,
185 if (dumpit) 204 if (dumpit)
186 tab[msgindex].dumpit = dumpit; 205 tab[msgindex].dumpit = dumpit;
187 206
207 if (calcit)
208 tab[msgindex].calcit = calcit;
209
188 return 0; 210 return 0;
189} 211}
190EXPORT_SYMBOL_GPL(__rtnl_register); 212EXPORT_SYMBOL_GPL(__rtnl_register);
@@ -199,9 +221,10 @@ EXPORT_SYMBOL_GPL(__rtnl_register);
199 * of memory implies no sense in continuing. 221 * of memory implies no sense in continuing.
200 */ 222 */
201void rtnl_register(int protocol, int msgtype, 223void rtnl_register(int protocol, int msgtype,
202 rtnl_doit_func doit, rtnl_dumpit_func dumpit) 224 rtnl_doit_func doit, rtnl_dumpit_func dumpit,
225 rtnl_calcit_func calcit)
203{ 226{
204 if (__rtnl_register(protocol, msgtype, doit, dumpit) < 0) 227 if (__rtnl_register(protocol, msgtype, doit, dumpit, calcit) < 0)
205 panic("Unable to register rtnetlink message handler, " 228 panic("Unable to register rtnetlink message handler, "
206 "protocol = %d, message type = %d\n", 229 "protocol = %d, message type = %d\n",
207 protocol, msgtype); 230 protocol, msgtype);
@@ -1009,6 +1032,8 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
1009 s_idx = cb->args[1]; 1032 s_idx = cb->args[1];
1010 1033
1011 rcu_read_lock(); 1034 rcu_read_lock();
1035 cb->seq = net->dev_base_seq;
1036
1012 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { 1037 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
1013 idx = 0; 1038 idx = 0;
1014 head = &net->dev_index_head[h]; 1039 head = &net->dev_index_head[h];
@@ -1020,6 +1045,8 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
1020 cb->nlh->nlmsg_seq, 0, 1045 cb->nlh->nlmsg_seq, 0,
1021 NLM_F_MULTI) <= 0) 1046 NLM_F_MULTI) <= 0)
1022 goto out; 1047 goto out;
1048
1049 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
1023cont: 1050cont:
1024 idx++; 1051 idx++;
1025 } 1052 }
@@ -1818,6 +1845,11 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
1818 return err; 1845 return err;
1819} 1846}
1820 1847
1848static u16 rtnl_calcit(struct sk_buff *skb)
1849{
1850 return min_ifinfo_dump_size;
1851}
1852
1821static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb) 1853static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb)
1822{ 1854{
1823 int idx; 1855 int idx;
@@ -1847,11 +1879,14 @@ void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change)
1847 struct net *net = dev_net(dev); 1879 struct net *net = dev_net(dev);
1848 struct sk_buff *skb; 1880 struct sk_buff *skb;
1849 int err = -ENOBUFS; 1881 int err = -ENOBUFS;
1882 size_t if_info_size;
1850 1883
1851 skb = nlmsg_new(if_nlmsg_size(dev), GFP_KERNEL); 1884 skb = nlmsg_new((if_info_size = if_nlmsg_size(dev)), GFP_KERNEL);
1852 if (skb == NULL) 1885 if (skb == NULL)
1853 goto errout; 1886 goto errout;
1854 1887
1888 min_ifinfo_dump_size = max_t(u16, if_info_size, min_ifinfo_dump_size);
1889
1855 err = rtnl_fill_ifinfo(skb, dev, type, 0, 0, change, 0); 1890 err = rtnl_fill_ifinfo(skb, dev, type, 0, 0, change, 0);
1856 if (err < 0) { 1891 if (err < 0) {
1857 /* -EMSGSIZE implies BUG in if_nlmsg_size() */ 1892 /* -EMSGSIZE implies BUG in if_nlmsg_size() */
@@ -1902,14 +1937,20 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
1902 if (kind == 2 && nlh->nlmsg_flags&NLM_F_DUMP) { 1937 if (kind == 2 && nlh->nlmsg_flags&NLM_F_DUMP) {
1903 struct sock *rtnl; 1938 struct sock *rtnl;
1904 rtnl_dumpit_func dumpit; 1939 rtnl_dumpit_func dumpit;
1940 rtnl_calcit_func calcit;
1941 u16 min_dump_alloc = 0;
1905 1942
1906 dumpit = rtnl_get_dumpit(family, type); 1943 dumpit = rtnl_get_dumpit(family, type);
1907 if (dumpit == NULL) 1944 if (dumpit == NULL)
1908 return -EOPNOTSUPP; 1945 return -EOPNOTSUPP;
1946 calcit = rtnl_get_calcit(family, type);
1947 if (calcit)
1948 min_dump_alloc = calcit(skb);
1909 1949
1910 __rtnl_unlock(); 1950 __rtnl_unlock();
1911 rtnl = net->rtnl; 1951 rtnl = net->rtnl;
1912 err = netlink_dump_start(rtnl, skb, nlh, dumpit, NULL); 1952 err = netlink_dump_start(rtnl, skb, nlh, dumpit,
1953 NULL, min_dump_alloc);
1913 rtnl_lock(); 1954 rtnl_lock();
1914 return err; 1955 return err;
1915 } 1956 }
@@ -2019,12 +2060,13 @@ void __init rtnetlink_init(void)
2019 netlink_set_nonroot(NETLINK_ROUTE, NL_NONROOT_RECV); 2060 netlink_set_nonroot(NETLINK_ROUTE, NL_NONROOT_RECV);
2020 register_netdevice_notifier(&rtnetlink_dev_notifier); 2061 register_netdevice_notifier(&rtnetlink_dev_notifier);
2021 2062
2022 rtnl_register(PF_UNSPEC, RTM_GETLINK, rtnl_getlink, rtnl_dump_ifinfo); 2063 rtnl_register(PF_UNSPEC, RTM_GETLINK, rtnl_getlink,
2023 rtnl_register(PF_UNSPEC, RTM_SETLINK, rtnl_setlink, NULL); 2064 rtnl_dump_ifinfo, rtnl_calcit);
2024 rtnl_register(PF_UNSPEC, RTM_NEWLINK, rtnl_newlink, NULL); 2065 rtnl_register(PF_UNSPEC, RTM_SETLINK, rtnl_setlink, NULL, NULL);
2025 rtnl_register(PF_UNSPEC, RTM_DELLINK, rtnl_dellink, NULL); 2066 rtnl_register(PF_UNSPEC, RTM_NEWLINK, rtnl_newlink, NULL, NULL);
2067 rtnl_register(PF_UNSPEC, RTM_DELLINK, rtnl_dellink, NULL, NULL);
2026 2068
2027 rtnl_register(PF_UNSPEC, RTM_GETADDR, NULL, rtnl_dump_all); 2069 rtnl_register(PF_UNSPEC, RTM_GETADDR, NULL, rtnl_dump_all, NULL);
2028 rtnl_register(PF_UNSPEC, RTM_GETROUTE, NULL, rtnl_dump_all); 2070 rtnl_register(PF_UNSPEC, RTM_GETROUTE, NULL, rtnl_dump_all, NULL);
2029} 2071}
2030 2072
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 46cbd28f40f..2beda824636 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -329,6 +329,18 @@ static void skb_release_data(struct sk_buff *skb)
329 put_page(skb_shinfo(skb)->frags[i].page); 329 put_page(skb_shinfo(skb)->frags[i].page);
330 } 330 }
331 331
332 /*
333 * If skb buf is from userspace, we need to notify the caller
334 * the lower device DMA has done;
335 */
336 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
337 struct ubuf_info *uarg;
338
339 uarg = skb_shinfo(skb)->destructor_arg;
340 if (uarg->callback)
341 uarg->callback(uarg);
342 }
343
332 if (skb_has_frag_list(skb)) 344 if (skb_has_frag_list(skb))
333 skb_drop_fraglist(skb); 345 skb_drop_fraglist(skb);
334 346
@@ -481,6 +493,9 @@ bool skb_recycle_check(struct sk_buff *skb, int skb_size)
481 if (irqs_disabled()) 493 if (irqs_disabled())
482 return false; 494 return false;
483 495
496 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY)
497 return false;
498
484 if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE) 499 if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE)
485 return false; 500 return false;
486 501
@@ -596,6 +611,51 @@ struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src)
596} 611}
597EXPORT_SYMBOL_GPL(skb_morph); 612EXPORT_SYMBOL_GPL(skb_morph);
598 613
614/* skb frags copy userspace buffers to kernel */
615static int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
616{
617 int i;
618 int num_frags = skb_shinfo(skb)->nr_frags;
619 struct page *page, *head = NULL;
620 struct ubuf_info *uarg = skb_shinfo(skb)->destructor_arg;
621
622 for (i = 0; i < num_frags; i++) {
623 u8 *vaddr;
624 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
625
626 page = alloc_page(GFP_ATOMIC);
627 if (!page) {
628 while (head) {
629 struct page *next = (struct page *)head->private;
630 put_page(head);
631 head = next;
632 }
633 return -ENOMEM;
634 }
635 vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]);
636 memcpy(page_address(page),
637 vaddr + f->page_offset, f->size);
638 kunmap_skb_frag(vaddr);
639 page->private = (unsigned long)head;
640 head = page;
641 }
642
643 /* skb frags release userspace buffers */
644 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
645 put_page(skb_shinfo(skb)->frags[i].page);
646
647 uarg->callback(uarg);
648
649 /* skb frags point to kernel buffers */
650 for (i = skb_shinfo(skb)->nr_frags; i > 0; i--) {
651 skb_shinfo(skb)->frags[i - 1].page_offset = 0;
652 skb_shinfo(skb)->frags[i - 1].page = head;
653 head = (struct page *)head->private;
654 }
655 return 0;
656}
657
658
599/** 659/**
600 * skb_clone - duplicate an sk_buff 660 * skb_clone - duplicate an sk_buff
601 * @skb: buffer to clone 661 * @skb: buffer to clone
@@ -614,6 +674,12 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
614{ 674{
615 struct sk_buff *n; 675 struct sk_buff *n;
616 676
677 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
678 if (skb_copy_ubufs(skb, gfp_mask))
679 return NULL;
680 skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;
681 }
682
617 n = skb + 1; 683 n = skb + 1;
618 if (skb->fclone == SKB_FCLONE_ORIG && 684 if (skb->fclone == SKB_FCLONE_ORIG &&
619 n->fclone == SKB_FCLONE_UNAVAILABLE) { 685 n->fclone == SKB_FCLONE_UNAVAILABLE) {
@@ -731,6 +797,14 @@ struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask)
731 if (skb_shinfo(skb)->nr_frags) { 797 if (skb_shinfo(skb)->nr_frags) {
732 int i; 798 int i;
733 799
800 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
801 if (skb_copy_ubufs(skb, gfp_mask)) {
802 kfree_skb(n);
803 n = NULL;
804 goto out;
805 }
806 skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;
807 }
734 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 808 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
735 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; 809 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
736 get_page(skb_shinfo(n)->frags[i].page); 810 get_page(skb_shinfo(n)->frags[i].page);
@@ -788,7 +862,6 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
788 fastpath = true; 862 fastpath = true;
789 else { 863 else {
790 int delta = skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1; 864 int delta = skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1;
791
792 fastpath = atomic_read(&skb_shinfo(skb)->dataref) == delta; 865 fastpath = atomic_read(&skb_shinfo(skb)->dataref) == delta;
793 } 866 }
794 867
@@ -819,6 +892,12 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
819 if (fastpath) { 892 if (fastpath) {
820 kfree(skb->head); 893 kfree(skb->head);
821 } else { 894 } else {
895 /* copy this zero copy skb frags */
896 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
897 if (skb_copy_ubufs(skb, gfp_mask))
898 goto nofrags;
899 skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;
900 }
822 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 901 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
823 get_page(skb_shinfo(skb)->frags[i].page); 902 get_page(skb_shinfo(skb)->frags[i].page);
824 903
@@ -853,6 +932,8 @@ adjust_others:
853 atomic_set(&skb_shinfo(skb)->dataref, 1); 932 atomic_set(&skb_shinfo(skb)->dataref, 1);
854 return 0; 933 return 0;
855 934
935nofrags:
936 kfree(data);
856nodata: 937nodata:
857 return -ENOMEM; 938 return -ENOMEM;
858} 939}
@@ -1354,6 +1435,7 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
1354 } 1435 }
1355 start = end; 1436 start = end;
1356 } 1437 }
1438
1357 if (!len) 1439 if (!len)
1358 return 0; 1440 return 0;
1359 1441
diff --git a/net/core/sock.c b/net/core/sock.c
index 6e819780c23..bc745d00ea4 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -128,6 +128,8 @@
128 128
129#include <linux/filter.h> 129#include <linux/filter.h>
130 130
131#include <trace/events/sock.h>
132
131#ifdef CONFIG_INET 133#ifdef CONFIG_INET
132#include <net/tcp.h> 134#include <net/tcp.h>
133#endif 135#endif
@@ -158,7 +160,7 @@ static const char *const af_family_key_strings[AF_MAX+1] = {
158 "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" , 160 "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" ,
159 "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" , 161 "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" ,
160 "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG" , 162 "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG" ,
161 "sk_lock-AF_MAX" 163 "sk_lock-AF_NFC" , "sk_lock-AF_MAX"
162}; 164};
163static const char *const af_family_slock_key_strings[AF_MAX+1] = { 165static const char *const af_family_slock_key_strings[AF_MAX+1] = {
164 "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" , 166 "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" ,
@@ -174,7 +176,7 @@ static const char *const af_family_slock_key_strings[AF_MAX+1] = {
174 "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" , 176 "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" ,
175 "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" , 177 "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" ,
176 "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG" , 178 "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG" ,
177 "slock-AF_MAX" 179 "slock-AF_NFC" , "slock-AF_MAX"
178}; 180};
179static const char *const af_family_clock_key_strings[AF_MAX+1] = { 181static const char *const af_family_clock_key_strings[AF_MAX+1] = {
180 "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" , 182 "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" ,
@@ -190,7 +192,7 @@ static const char *const af_family_clock_key_strings[AF_MAX+1] = {
190 "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" , 192 "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" ,
191 "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" , 193 "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" ,
192 "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG" , 194 "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG" ,
193 "clock-AF_MAX" 195 "clock-AF_NFC" , "clock-AF_MAX"
194}; 196};
195 197
196/* 198/*
@@ -292,6 +294,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
292 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 294 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
293 (unsigned)sk->sk_rcvbuf) { 295 (unsigned)sk->sk_rcvbuf) {
294 atomic_inc(&sk->sk_drops); 296 atomic_inc(&sk->sk_drops);
297 trace_sock_rcvqueue_full(sk, skb);
295 return -ENOMEM; 298 return -ENOMEM;
296 } 299 }
297 300
@@ -1736,6 +1739,8 @@ suppress_allocation:
1736 return 1; 1739 return 1;
1737 } 1740 }
1738 1741
1742 trace_sock_exceed_buf_limit(sk, prot, allocated);
1743
1739 /* Alas. Undo changes. */ 1744 /* Alas. Undo changes. */
1740 sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM; 1745 sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM;
1741 atomic_long_sub(amt, prot->memory_allocated); 1746 atomic_long_sub(amt, prot->memory_allocated);
diff --git a/net/core/timestamping.c b/net/core/timestamping.c
index 7e7ca375d43..98a52640e7c 100644
--- a/net/core/timestamping.c
+++ b/net/core/timestamping.c
@@ -68,6 +68,7 @@ void skb_clone_tx_timestamp(struct sk_buff *skb)
68 break; 68 break;
69 } 69 }
70} 70}
71EXPORT_SYMBOL_GPL(skb_clone_tx_timestamp);
71 72
72void skb_complete_tx_timestamp(struct sk_buff *skb, 73void skb_complete_tx_timestamp(struct sk_buff *skb,
73 struct skb_shared_hwtstamps *hwtstamps) 74 struct skb_shared_hwtstamps *hwtstamps)
@@ -121,6 +122,7 @@ bool skb_defer_rx_timestamp(struct sk_buff *skb)
121 122
122 return false; 123 return false;
123} 124}
125EXPORT_SYMBOL_GPL(skb_defer_rx_timestamp);
124 126
125void __init skb_timestamping_init(void) 127void __init skb_timestamping_init(void)
126{ 128{
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
index 3609eacaf4c..3cb56af4e13 100644
--- a/net/dcb/dcbnl.c
+++ b/net/dcb/dcbnl.c
@@ -1166,64 +1166,6 @@ err:
1166 return ret; 1166 return ret;
1167} 1167}
1168 1168
1169/* Handle IEEE 802.1Qaz SET commands. If any requested operation can not
1170 * be completed the entire msg is aborted and error value is returned.
1171 * No attempt is made to reconcile the case where only part of the
1172 * cmd can be completed.
1173 */
1174static int dcbnl_ieee_set(struct net_device *netdev, struct nlattr **tb,
1175 u32 pid, u32 seq, u16 flags)
1176{
1177 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1178 struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1];
1179 int err = -EOPNOTSUPP;
1180
1181 if (!ops)
1182 goto err;
1183
1184 err = nla_parse_nested(ieee, DCB_ATTR_IEEE_MAX,
1185 tb[DCB_ATTR_IEEE], dcbnl_ieee_policy);
1186 if (err)
1187 goto err;
1188
1189 if (ieee[DCB_ATTR_IEEE_ETS] && ops->ieee_setets) {
1190 struct ieee_ets *ets = nla_data(ieee[DCB_ATTR_IEEE_ETS]);
1191 err = ops->ieee_setets(netdev, ets);
1192 if (err)
1193 goto err;
1194 }
1195
1196 if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setpfc) {
1197 struct ieee_pfc *pfc = nla_data(ieee[DCB_ATTR_IEEE_PFC]);
1198 err = ops->ieee_setpfc(netdev, pfc);
1199 if (err)
1200 goto err;
1201 }
1202
1203 if (ieee[DCB_ATTR_IEEE_APP_TABLE]) {
1204 struct nlattr *attr;
1205 int rem;
1206
1207 nla_for_each_nested(attr, ieee[DCB_ATTR_IEEE_APP_TABLE], rem) {
1208 struct dcb_app *app_data;
1209 if (nla_type(attr) != DCB_ATTR_IEEE_APP)
1210 continue;
1211 app_data = nla_data(attr);
1212 if (ops->ieee_setapp)
1213 err = ops->ieee_setapp(netdev, app_data);
1214 else
1215 err = dcb_setapp(netdev, app_data);
1216 if (err)
1217 goto err;
1218 }
1219 }
1220
1221err:
1222 dcbnl_reply(err, RTM_SETDCB, DCB_CMD_IEEE_SET, DCB_ATTR_IEEE,
1223 pid, seq, flags);
1224 return err;
1225}
1226
1227static int dcbnl_build_peer_app(struct net_device *netdev, struct sk_buff* skb, 1169static int dcbnl_build_peer_app(struct net_device *netdev, struct sk_buff* skb,
1228 int app_nested_type, int app_info_type, 1170 int app_nested_type, int app_info_type,
1229 int app_entry_type) 1171 int app_entry_type)
@@ -1279,29 +1221,13 @@ nla_put_failure:
1279} 1221}
1280 1222
1281/* Handle IEEE 802.1Qaz GET commands. */ 1223/* Handle IEEE 802.1Qaz GET commands. */
1282static int dcbnl_ieee_get(struct net_device *netdev, struct nlattr **tb, 1224static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
1283 u32 pid, u32 seq, u16 flags)
1284{ 1225{
1285 struct sk_buff *skb;
1286 struct nlmsghdr *nlh;
1287 struct dcbmsg *dcb;
1288 struct nlattr *ieee, *app; 1226 struct nlattr *ieee, *app;
1289 struct dcb_app_type *itr; 1227 struct dcb_app_type *itr;
1290 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops; 1228 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1291 int err; 1229 int dcbx;
1292 1230 int err = -EMSGSIZE;
1293 if (!ops)
1294 return -EOPNOTSUPP;
1295
1296 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1297 if (!skb)
1298 return -ENOBUFS;
1299
1300 nlh = NLMSG_NEW(skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
1301
1302 dcb = NLMSG_DATA(nlh);
1303 dcb->dcb_family = AF_UNSPEC;
1304 dcb->cmd = DCB_CMD_IEEE_GET;
1305 1231
1306 NLA_PUT_STRING(skb, DCB_ATTR_IFNAME, netdev->name); 1232 NLA_PUT_STRING(skb, DCB_ATTR_IFNAME, netdev->name);
1307 1233
@@ -1338,6 +1264,12 @@ static int dcbnl_ieee_get(struct net_device *netdev, struct nlattr **tb,
1338 } 1264 }
1339 } 1265 }
1340 } 1266 }
1267
1268 if (netdev->dcbnl_ops->getdcbx)
1269 dcbx = netdev->dcbnl_ops->getdcbx(netdev);
1270 else
1271 dcbx = -EOPNOTSUPP;
1272
1341 spin_unlock(&dcb_lock); 1273 spin_unlock(&dcb_lock);
1342 nla_nest_end(skb, app); 1274 nla_nest_end(skb, app);
1343 1275
@@ -1366,16 +1298,413 @@ static int dcbnl_ieee_get(struct net_device *netdev, struct nlattr **tb,
1366 } 1298 }
1367 1299
1368 nla_nest_end(skb, ieee); 1300 nla_nest_end(skb, ieee);
1369 nlmsg_end(skb, nlh); 1301 if (dcbx >= 0) {
1302 err = nla_put_u8(skb, DCB_ATTR_DCBX, dcbx);
1303 if (err)
1304 goto nla_put_failure;
1305 }
1306
1307 return 0;
1370 1308
1371 return rtnl_unicast(skb, &init_net, pid);
1372nla_put_failure: 1309nla_put_failure:
1373 nlmsg_cancel(skb, nlh); 1310 return err;
1374nlmsg_failure:
1375 kfree_skb(skb);
1376 return -1;
1377} 1311}
1378 1312
1313static int dcbnl_cee_pg_fill(struct sk_buff *skb, struct net_device *dev,
1314 int dir)
1315{
1316 u8 pgid, up_map, prio, tc_pct;
1317 const struct dcbnl_rtnl_ops *ops = dev->dcbnl_ops;
1318 int i = dir ? DCB_ATTR_CEE_TX_PG : DCB_ATTR_CEE_RX_PG;
1319 struct nlattr *pg = nla_nest_start(skb, i);
1320
1321 if (!pg)
1322 goto nla_put_failure;
1323
1324 for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
1325 struct nlattr *tc_nest = nla_nest_start(skb, i);
1326
1327 if (!tc_nest)
1328 goto nla_put_failure;
1329
1330 pgid = DCB_ATTR_VALUE_UNDEFINED;
1331 prio = DCB_ATTR_VALUE_UNDEFINED;
1332 tc_pct = DCB_ATTR_VALUE_UNDEFINED;
1333 up_map = DCB_ATTR_VALUE_UNDEFINED;
1334
1335 if (!dir)
1336 ops->getpgtccfgrx(dev, i - DCB_PG_ATTR_TC_0,
1337 &prio, &pgid, &tc_pct, &up_map);
1338 else
1339 ops->getpgtccfgtx(dev, i - DCB_PG_ATTR_TC_0,
1340 &prio, &pgid, &tc_pct, &up_map);
1341
1342 NLA_PUT_U8(skb, DCB_TC_ATTR_PARAM_PGID, pgid);
1343 NLA_PUT_U8(skb, DCB_TC_ATTR_PARAM_UP_MAPPING, up_map);
1344 NLA_PUT_U8(skb, DCB_TC_ATTR_PARAM_STRICT_PRIO, prio);
1345 NLA_PUT_U8(skb, DCB_TC_ATTR_PARAM_BW_PCT, tc_pct);
1346 nla_nest_end(skb, tc_nest);
1347 }
1348
1349 for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) {
1350 tc_pct = DCB_ATTR_VALUE_UNDEFINED;
1351
1352 if (!dir)
1353 ops->getpgbwgcfgrx(dev, i - DCB_PG_ATTR_BW_ID_0,
1354 &tc_pct);
1355 else
1356 ops->getpgbwgcfgtx(dev, i - DCB_PG_ATTR_BW_ID_0,
1357 &tc_pct);
1358 NLA_PUT_U8(skb, i, tc_pct);
1359 }
1360 nla_nest_end(skb, pg);
1361 return 0;
1362
1363nla_put_failure:
1364 return -EMSGSIZE;
1365}
1366
1367static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev)
1368{
1369 struct nlattr *cee, *app;
1370 struct dcb_app_type *itr;
1371 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1372 int dcbx, i, err = -EMSGSIZE;
1373 u8 value;
1374
1375 NLA_PUT_STRING(skb, DCB_ATTR_IFNAME, netdev->name);
1376
1377 cee = nla_nest_start(skb, DCB_ATTR_CEE);
1378 if (!cee)
1379 goto nla_put_failure;
1380
1381 /* local pg */
1382 if (ops->getpgtccfgtx && ops->getpgbwgcfgtx) {
1383 err = dcbnl_cee_pg_fill(skb, netdev, 1);
1384 if (err)
1385 goto nla_put_failure;
1386 }
1387
1388 if (ops->getpgtccfgrx && ops->getpgbwgcfgrx) {
1389 err = dcbnl_cee_pg_fill(skb, netdev, 0);
1390 if (err)
1391 goto nla_put_failure;
1392 }
1393
1394 /* local pfc */
1395 if (ops->getpfccfg) {
1396 struct nlattr *pfc_nest = nla_nest_start(skb, DCB_ATTR_CEE_PFC);
1397
1398 if (!pfc_nest)
1399 goto nla_put_failure;
1400
1401 for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
1402 ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0, &value);
1403 NLA_PUT_U8(skb, i, value);
1404 }
1405 nla_nest_end(skb, pfc_nest);
1406 }
1407
1408 /* local app */
1409 spin_lock(&dcb_lock);
1410 app = nla_nest_start(skb, DCB_ATTR_CEE_APP_TABLE);
1411 if (!app)
1412 goto dcb_unlock;
1413
1414 list_for_each_entry(itr, &dcb_app_list, list) {
1415 if (strncmp(itr->name, netdev->name, IFNAMSIZ) == 0) {
1416 struct nlattr *app_nest = nla_nest_start(skb,
1417 DCB_ATTR_APP);
1418 if (!app_nest)
1419 goto dcb_unlock;
1420
1421 err = nla_put_u8(skb, DCB_APP_ATTR_IDTYPE,
1422 itr->app.selector);
1423 if (err)
1424 goto dcb_unlock;
1425
1426 err = nla_put_u16(skb, DCB_APP_ATTR_ID,
1427 itr->app.protocol);
1428 if (err)
1429 goto dcb_unlock;
1430
1431 err = nla_put_u8(skb, DCB_APP_ATTR_PRIORITY,
1432 itr->app.priority);
1433 if (err)
1434 goto dcb_unlock;
1435
1436 nla_nest_end(skb, app_nest);
1437 }
1438 }
1439 nla_nest_end(skb, app);
1440
1441 if (netdev->dcbnl_ops->getdcbx)
1442 dcbx = netdev->dcbnl_ops->getdcbx(netdev);
1443 else
1444 dcbx = -EOPNOTSUPP;
1445
1446 spin_unlock(&dcb_lock);
1447
1448 /* features flags */
1449 if (ops->getfeatcfg) {
1450 struct nlattr *feat = nla_nest_start(skb, DCB_ATTR_CEE_FEAT);
1451 if (!feat)
1452 goto nla_put_failure;
1453
1454 for (i = DCB_FEATCFG_ATTR_ALL + 1; i <= DCB_FEATCFG_ATTR_MAX;
1455 i++)
1456 if (!ops->getfeatcfg(netdev, i, &value))
1457 NLA_PUT_U8(skb, i, value);
1458
1459 nla_nest_end(skb, feat);
1460 }
1461
1462 /* peer info if available */
1463 if (ops->cee_peer_getpg) {
1464 struct cee_pg pg;
1465 err = ops->cee_peer_getpg(netdev, &pg);
1466 if (!err)
1467 NLA_PUT(skb, DCB_ATTR_CEE_PEER_PG, sizeof(pg), &pg);
1468 }
1469
1470 if (ops->cee_peer_getpfc) {
1471 struct cee_pfc pfc;
1472 err = ops->cee_peer_getpfc(netdev, &pfc);
1473 if (!err)
1474 NLA_PUT(skb, DCB_ATTR_CEE_PEER_PFC, sizeof(pfc), &pfc);
1475 }
1476
1477 if (ops->peer_getappinfo && ops->peer_getapptable) {
1478 err = dcbnl_build_peer_app(netdev, skb,
1479 DCB_ATTR_CEE_PEER_APP_TABLE,
1480 DCB_ATTR_CEE_PEER_APP_INFO,
1481 DCB_ATTR_CEE_PEER_APP);
1482 if (err)
1483 goto nla_put_failure;
1484 }
1485 nla_nest_end(skb, cee);
1486
1487 /* DCBX state */
1488 if (dcbx >= 0) {
1489 err = nla_put_u8(skb, DCB_ATTR_DCBX, dcbx);
1490 if (err)
1491 goto nla_put_failure;
1492 }
1493 return 0;
1494
1495dcb_unlock:
1496 spin_unlock(&dcb_lock);
1497nla_put_failure:
1498 return err;
1499}
1500
1501static int dcbnl_notify(struct net_device *dev, int event, int cmd,
1502 u32 seq, u32 pid, int dcbx_ver)
1503{
1504 struct net *net = dev_net(dev);
1505 struct sk_buff *skb;
1506 struct nlmsghdr *nlh;
1507 struct dcbmsg *dcb;
1508 const struct dcbnl_rtnl_ops *ops = dev->dcbnl_ops;
1509 int err;
1510
1511 if (!ops)
1512 return -EOPNOTSUPP;
1513
1514 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1515 if (!skb)
1516 return -ENOBUFS;
1517
1518 nlh = nlmsg_put(skb, pid, 0, event, sizeof(*dcb), 0);
1519 if (nlh == NULL) {
1520 nlmsg_free(skb);
1521 return -EMSGSIZE;
1522 }
1523
1524 dcb = NLMSG_DATA(nlh);
1525 dcb->dcb_family = AF_UNSPEC;
1526 dcb->cmd = cmd;
1527
1528 if (dcbx_ver == DCB_CAP_DCBX_VER_IEEE)
1529 err = dcbnl_ieee_fill(skb, dev);
1530 else
1531 err = dcbnl_cee_fill(skb, dev);
1532
1533 if (err < 0) {
1534 /* Report error to broadcast listeners */
1535 nlmsg_cancel(skb, nlh);
1536 kfree_skb(skb);
1537 rtnl_set_sk_err(net, RTNLGRP_DCB, err);
1538 } else {
1539 /* End nlmsg and notify broadcast listeners */
1540 nlmsg_end(skb, nlh);
1541 rtnl_notify(skb, net, 0, RTNLGRP_DCB, NULL, GFP_KERNEL);
1542 }
1543
1544 return err;
1545}
1546
1547int dcbnl_ieee_notify(struct net_device *dev, int event, int cmd,
1548 u32 seq, u32 pid)
1549{
1550 return dcbnl_notify(dev, event, cmd, seq, pid, DCB_CAP_DCBX_VER_IEEE);
1551}
1552EXPORT_SYMBOL(dcbnl_ieee_notify);
1553
1554int dcbnl_cee_notify(struct net_device *dev, int event, int cmd,
1555 u32 seq, u32 pid)
1556{
1557 return dcbnl_notify(dev, event, cmd, seq, pid, DCB_CAP_DCBX_VER_CEE);
1558}
1559EXPORT_SYMBOL(dcbnl_cee_notify);
1560
1561/* Handle IEEE 802.1Qaz SET commands. If any requested operation can not
1562 * be completed the entire msg is aborted and error value is returned.
1563 * No attempt is made to reconcile the case where only part of the
1564 * cmd can be completed.
1565 */
1566static int dcbnl_ieee_set(struct net_device *netdev, struct nlattr **tb,
1567 u32 pid, u32 seq, u16 flags)
1568{
1569 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1570 struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1];
1571 int err = -EOPNOTSUPP;
1572
1573 if (!ops)
1574 return err;
1575
1576 if (!tb[DCB_ATTR_IEEE])
1577 return -EINVAL;
1578
1579 err = nla_parse_nested(ieee, DCB_ATTR_IEEE_MAX,
1580 tb[DCB_ATTR_IEEE], dcbnl_ieee_policy);
1581 if (err)
1582 return err;
1583
1584 if (ieee[DCB_ATTR_IEEE_ETS] && ops->ieee_setets) {
1585 struct ieee_ets *ets = nla_data(ieee[DCB_ATTR_IEEE_ETS]);
1586 err = ops->ieee_setets(netdev, ets);
1587 if (err)
1588 goto err;
1589 }
1590
1591 if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setpfc) {
1592 struct ieee_pfc *pfc = nla_data(ieee[DCB_ATTR_IEEE_PFC]);
1593 err = ops->ieee_setpfc(netdev, pfc);
1594 if (err)
1595 goto err;
1596 }
1597
1598 if (ieee[DCB_ATTR_IEEE_APP_TABLE]) {
1599 struct nlattr *attr;
1600 int rem;
1601
1602 nla_for_each_nested(attr, ieee[DCB_ATTR_IEEE_APP_TABLE], rem) {
1603 struct dcb_app *app_data;
1604 if (nla_type(attr) != DCB_ATTR_IEEE_APP)
1605 continue;
1606 app_data = nla_data(attr);
1607 if (ops->ieee_setapp)
1608 err = ops->ieee_setapp(netdev, app_data);
1609 else
1610 err = dcb_ieee_setapp(netdev, app_data);
1611 if (err)
1612 goto err;
1613 }
1614 }
1615
1616err:
1617 dcbnl_reply(err, RTM_SETDCB, DCB_CMD_IEEE_SET, DCB_ATTR_IEEE,
1618 pid, seq, flags);
1619 dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_SET, seq, 0);
1620 return err;
1621}
1622
1623static int dcbnl_ieee_get(struct net_device *netdev, struct nlattr **tb,
1624 u32 pid, u32 seq, u16 flags)
1625{
1626 struct net *net = dev_net(netdev);
1627 struct sk_buff *skb;
1628 struct nlmsghdr *nlh;
1629 struct dcbmsg *dcb;
1630 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1631 int err;
1632
1633 if (!ops)
1634 return -EOPNOTSUPP;
1635
1636 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1637 if (!skb)
1638 return -ENOBUFS;
1639
1640 nlh = nlmsg_put(skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
1641 if (nlh == NULL) {
1642 nlmsg_free(skb);
1643 return -EMSGSIZE;
1644 }
1645
1646 dcb = NLMSG_DATA(nlh);
1647 dcb->dcb_family = AF_UNSPEC;
1648 dcb->cmd = DCB_CMD_IEEE_GET;
1649
1650 err = dcbnl_ieee_fill(skb, netdev);
1651
1652 if (err < 0) {
1653 nlmsg_cancel(skb, nlh);
1654 kfree_skb(skb);
1655 } else {
1656 nlmsg_end(skb, nlh);
1657 err = rtnl_unicast(skb, net, pid);
1658 }
1659
1660 return err;
1661}
1662
1663static int dcbnl_ieee_del(struct net_device *netdev, struct nlattr **tb,
1664 u32 pid, u32 seq, u16 flags)
1665{
1666 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1667 struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1];
1668 int err = -EOPNOTSUPP;
1669
1670 if (!ops)
1671 return -EOPNOTSUPP;
1672
1673 if (!tb[DCB_ATTR_IEEE])
1674 return -EINVAL;
1675
1676 err = nla_parse_nested(ieee, DCB_ATTR_IEEE_MAX,
1677 tb[DCB_ATTR_IEEE], dcbnl_ieee_policy);
1678 if (err)
1679 return err;
1680
1681 if (ieee[DCB_ATTR_IEEE_APP_TABLE]) {
1682 struct nlattr *attr;
1683 int rem;
1684
1685 nla_for_each_nested(attr, ieee[DCB_ATTR_IEEE_APP_TABLE], rem) {
1686 struct dcb_app *app_data;
1687
1688 if (nla_type(attr) != DCB_ATTR_IEEE_APP)
1689 continue;
1690 app_data = nla_data(attr);
1691 if (ops->ieee_delapp)
1692 err = ops->ieee_delapp(netdev, app_data);
1693 else
1694 err = dcb_ieee_delapp(netdev, app_data);
1695 if (err)
1696 goto err;
1697 }
1698 }
1699
1700err:
1701 dcbnl_reply(err, RTM_SETDCB, DCB_CMD_IEEE_DEL, DCB_ATTR_IEEE,
1702 pid, seq, flags);
1703 dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_DEL, seq, 0);
1704 return err;
1705}
1706
1707
1379/* DCBX configuration */ 1708/* DCBX configuration */
1380static int dcbnl_getdcbx(struct net_device *netdev, struct nlattr **tb, 1709static int dcbnl_getdcbx(struct net_device *netdev, struct nlattr **tb,
1381 u32 pid, u32 seq, u16 flags) 1710 u32 pid, u32 seq, u16 flags)
@@ -1522,10 +1851,10 @@ err:
1522static int dcbnl_cee_get(struct net_device *netdev, struct nlattr **tb, 1851static int dcbnl_cee_get(struct net_device *netdev, struct nlattr **tb,
1523 u32 pid, u32 seq, u16 flags) 1852 u32 pid, u32 seq, u16 flags)
1524{ 1853{
1854 struct net *net = dev_net(netdev);
1525 struct sk_buff *skb; 1855 struct sk_buff *skb;
1526 struct nlmsghdr *nlh; 1856 struct nlmsghdr *nlh;
1527 struct dcbmsg *dcb; 1857 struct dcbmsg *dcb;
1528 struct nlattr *cee;
1529 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops; 1858 const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1530 int err; 1859 int err;
1531 1860
@@ -1536,51 +1865,26 @@ static int dcbnl_cee_get(struct net_device *netdev, struct nlattr **tb,
1536 if (!skb) 1865 if (!skb)
1537 return -ENOBUFS; 1866 return -ENOBUFS;
1538 1867
1539 nlh = NLMSG_NEW(skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags); 1868 nlh = nlmsg_put(skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
1869 if (nlh == NULL) {
1870 nlmsg_free(skb);
1871 return -EMSGSIZE;
1872 }
1540 1873
1541 dcb = NLMSG_DATA(nlh); 1874 dcb = NLMSG_DATA(nlh);
1542 dcb->dcb_family = AF_UNSPEC; 1875 dcb->dcb_family = AF_UNSPEC;
1543 dcb->cmd = DCB_CMD_CEE_GET; 1876 dcb->cmd = DCB_CMD_CEE_GET;
1544 1877
1545 NLA_PUT_STRING(skb, DCB_ATTR_IFNAME, netdev->name); 1878 err = dcbnl_cee_fill(skb, netdev);
1546 1879
1547 cee = nla_nest_start(skb, DCB_ATTR_CEE); 1880 if (err < 0) {
1548 if (!cee) 1881 nlmsg_cancel(skb, nlh);
1549 goto nla_put_failure; 1882 nlmsg_free(skb);
1550 1883 } else {
1551 /* get peer info if available */ 1884 nlmsg_end(skb, nlh);
1552 if (ops->cee_peer_getpg) { 1885 err = rtnl_unicast(skb, net, pid);
1553 struct cee_pg pg;
1554 err = ops->cee_peer_getpg(netdev, &pg);
1555 if (!err)
1556 NLA_PUT(skb, DCB_ATTR_CEE_PEER_PG, sizeof(pg), &pg);
1557 }
1558
1559 if (ops->cee_peer_getpfc) {
1560 struct cee_pfc pfc;
1561 err = ops->cee_peer_getpfc(netdev, &pfc);
1562 if (!err)
1563 NLA_PUT(skb, DCB_ATTR_CEE_PEER_PFC, sizeof(pfc), &pfc);
1564 }
1565
1566 if (ops->peer_getappinfo && ops->peer_getapptable) {
1567 err = dcbnl_build_peer_app(netdev, skb,
1568 DCB_ATTR_CEE_PEER_APP_TABLE,
1569 DCB_ATTR_CEE_PEER_APP_INFO,
1570 DCB_ATTR_CEE_PEER_APP);
1571 if (err)
1572 goto nla_put_failure;
1573 } 1886 }
1574 1887 return err;
1575 nla_nest_end(skb, cee);
1576 nlmsg_end(skb, nlh);
1577
1578 return rtnl_unicast(skb, &init_net, pid);
1579nla_put_failure:
1580 nlmsg_cancel(skb, nlh);
1581nlmsg_failure:
1582 kfree_skb(skb);
1583 return -1;
1584} 1888}
1585 1889
1586static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) 1890static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
@@ -1690,11 +1994,15 @@ static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1690 goto out; 1994 goto out;
1691 case DCB_CMD_IEEE_SET: 1995 case DCB_CMD_IEEE_SET:
1692 ret = dcbnl_ieee_set(netdev, tb, pid, nlh->nlmsg_seq, 1996 ret = dcbnl_ieee_set(netdev, tb, pid, nlh->nlmsg_seq,
1693 nlh->nlmsg_flags); 1997 nlh->nlmsg_flags);
1694 goto out; 1998 goto out;
1695 case DCB_CMD_IEEE_GET: 1999 case DCB_CMD_IEEE_GET:
1696 ret = dcbnl_ieee_get(netdev, tb, pid, nlh->nlmsg_seq, 2000 ret = dcbnl_ieee_get(netdev, tb, pid, nlh->nlmsg_seq,
1697 nlh->nlmsg_flags); 2001 nlh->nlmsg_flags);
2002 goto out;
2003 case DCB_CMD_IEEE_DEL:
2004 ret = dcbnl_ieee_del(netdev, tb, pid, nlh->nlmsg_seq,
2005 nlh->nlmsg_flags);
1698 goto out; 2006 goto out;
1699 case DCB_CMD_GDCBX: 2007 case DCB_CMD_GDCBX:
1700 ret = dcbnl_getdcbx(netdev, tb, pid, nlh->nlmsg_seq, 2008 ret = dcbnl_getdcbx(netdev, tb, pid, nlh->nlmsg_seq,
@@ -1754,12 +2062,13 @@ u8 dcb_getapp(struct net_device *dev, struct dcb_app *app)
1754EXPORT_SYMBOL(dcb_getapp); 2062EXPORT_SYMBOL(dcb_getapp);
1755 2063
1756/** 2064/**
1757 * ixgbe_dcbnl_setapp - add dcb application data to app list 2065 * dcb_setapp - add CEE dcb application data to app list
1758 * 2066 *
1759 * Priority 0 is the default priority this removes applications 2067 * Priority 0 is an invalid priority in CEE spec. This routine
1760 * from the app list if the priority is set to zero. 2068 * removes applications from the app list if the priority is
2069 * set to zero.
1761 */ 2070 */
1762u8 dcb_setapp(struct net_device *dev, struct dcb_app *new) 2071int dcb_setapp(struct net_device *dev, struct dcb_app *new)
1763{ 2072{
1764 struct dcb_app_type *itr; 2073 struct dcb_app_type *itr;
1765 struct dcb_app_type event; 2074 struct dcb_app_type event;
@@ -1802,6 +2111,114 @@ out:
1802} 2111}
1803EXPORT_SYMBOL(dcb_setapp); 2112EXPORT_SYMBOL(dcb_setapp);
1804 2113
2114/**
2115 * dcb_ieee_getapp_mask - retrieve the IEEE DCB application priority
2116 *
2117 * Helper routine which on success returns a non-zero 802.1Qaz user
2118 * priority bitmap otherwise returns 0 to indicate the dcb_app was
2119 * not found in APP list.
2120 */
2121u8 dcb_ieee_getapp_mask(struct net_device *dev, struct dcb_app *app)
2122{
2123 struct dcb_app_type *itr;
2124 u8 prio = 0;
2125
2126 spin_lock(&dcb_lock);
2127 list_for_each_entry(itr, &dcb_app_list, list) {
2128 if (itr->app.selector == app->selector &&
2129 itr->app.protocol == app->protocol &&
2130 (strncmp(itr->name, dev->name, IFNAMSIZ) == 0)) {
2131 prio |= 1 << itr->app.priority;
2132 }
2133 }
2134 spin_unlock(&dcb_lock);
2135
2136 return prio;
2137}
2138EXPORT_SYMBOL(dcb_ieee_getapp_mask);
2139
2140/**
2141 * dcb_ieee_setapp - add IEEE dcb application data to app list
2142 *
2143 * This adds Application data to the list. Multiple application
2144 * entries may exists for the same selector and protocol as long
2145 * as the priorities are different.
2146 */
2147int dcb_ieee_setapp(struct net_device *dev, struct dcb_app *new)
2148{
2149 struct dcb_app_type *itr, *entry;
2150 struct dcb_app_type event;
2151 int err = 0;
2152
2153 memcpy(&event.name, dev->name, sizeof(event.name));
2154 memcpy(&event.app, new, sizeof(event.app));
2155
2156 spin_lock(&dcb_lock);
2157 /* Search for existing match and abort if found */
2158 list_for_each_entry(itr, &dcb_app_list, list) {
2159 if (itr->app.selector == new->selector &&
2160 itr->app.protocol == new->protocol &&
2161 itr->app.priority == new->priority &&
2162 (strncmp(itr->name, dev->name, IFNAMSIZ) == 0)) {
2163 err = -EEXIST;
2164 goto out;
2165 }
2166 }
2167
2168 /* App entry does not exist add new entry */
2169 entry = kmalloc(sizeof(struct dcb_app_type), GFP_ATOMIC);
2170 if (!entry) {
2171 err = -ENOMEM;
2172 goto out;
2173 }
2174
2175 memcpy(&entry->app, new, sizeof(*new));
2176 strncpy(entry->name, dev->name, IFNAMSIZ);
2177 list_add(&entry->list, &dcb_app_list);
2178out:
2179 spin_unlock(&dcb_lock);
2180 if (!err)
2181 call_dcbevent_notifiers(DCB_APP_EVENT, &event);
2182 return err;
2183}
2184EXPORT_SYMBOL(dcb_ieee_setapp);
2185
2186/**
2187 * dcb_ieee_delapp - delete IEEE dcb application data from list
2188 *
2189 * This removes a matching APP data from the APP list
2190 */
2191int dcb_ieee_delapp(struct net_device *dev, struct dcb_app *del)
2192{
2193 struct dcb_app_type *itr;
2194 struct dcb_app_type event;
2195 int err = -ENOENT;
2196
2197 memcpy(&event.name, dev->name, sizeof(event.name));
2198 memcpy(&event.app, del, sizeof(event.app));
2199
2200 spin_lock(&dcb_lock);
2201 /* Search for existing match and remove it. */
2202 list_for_each_entry(itr, &dcb_app_list, list) {
2203 if (itr->app.selector == del->selector &&
2204 itr->app.protocol == del->protocol &&
2205 itr->app.priority == del->priority &&
2206 (strncmp(itr->name, dev->name, IFNAMSIZ) == 0)) {
2207 list_del(&itr->list);
2208 kfree(itr);
2209 err = 0;
2210 goto out;
2211 }
2212 }
2213
2214out:
2215 spin_unlock(&dcb_lock);
2216 if (!err)
2217 call_dcbevent_notifiers(DCB_APP_EVENT, &event);
2218 return err;
2219}
2220EXPORT_SYMBOL(dcb_ieee_delapp);
2221
1805static void dcb_flushapp(void) 2222static void dcb_flushapp(void)
1806{ 2223{
1807 struct dcb_app_type *app; 2224 struct dcb_app_type *app;
@@ -1819,8 +2236,8 @@ static int __init dcbnl_init(void)
1819{ 2236{
1820 INIT_LIST_HEAD(&dcb_app_list); 2237 INIT_LIST_HEAD(&dcb_app_list);
1821 2238
1822 rtnl_register(PF_UNSPEC, RTM_GETDCB, dcb_doit, NULL); 2239 rtnl_register(PF_UNSPEC, RTM_GETDCB, dcb_doit, NULL, NULL);
1823 rtnl_register(PF_UNSPEC, RTM_SETDCB, dcb_doit, NULL); 2240 rtnl_register(PF_UNSPEC, RTM_SETDCB, dcb_doit, NULL, NULL);
1824 2241
1825 return 0; 2242 return 0;
1826} 2243}
diff --git a/net/dccp/ccid.c b/net/dccp/ccid.c
index 36479ca61e0..48b585a5cba 100644
--- a/net/dccp/ccid.c
+++ b/net/dccp/ccid.c
@@ -118,7 +118,7 @@ static int ccid_activate(struct ccid_operations *ccid_ops)
118 if (ccid_ops->ccid_hc_tx_slab == NULL) 118 if (ccid_ops->ccid_hc_tx_slab == NULL)
119 goto out_free_rx_slab; 119 goto out_free_rx_slab;
120 120
121 pr_info("CCID: Activated CCID %d (%s)\n", 121 pr_info("DCCP: Activated CCID %d (%s)\n",
122 ccid_ops->ccid_id, ccid_ops->ccid_name); 122 ccid_ops->ccid_id, ccid_ops->ccid_name);
123 err = 0; 123 err = 0;
124out: 124out:
@@ -136,7 +136,7 @@ static void ccid_deactivate(struct ccid_operations *ccid_ops)
136 ccid_kmem_cache_destroy(ccid_ops->ccid_hc_rx_slab); 136 ccid_kmem_cache_destroy(ccid_ops->ccid_hc_rx_slab);
137 ccid_ops->ccid_hc_rx_slab = NULL; 137 ccid_ops->ccid_hc_rx_slab = NULL;
138 138
139 pr_info("CCID: Deactivated CCID %d (%s)\n", 139 pr_info("DCCP: Deactivated CCID %d (%s)\n",
140 ccid_ops->ccid_id, ccid_ops->ccid_name); 140 ccid_ops->ccid_id, ccid_ops->ccid_name);
141} 141}
142 142
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
index fadecd20d75..0462040fc81 100644
--- a/net/dccp/ccids/ccid2.c
+++ b/net/dccp/ccids/ccid2.c
@@ -153,17 +153,93 @@ out:
153 sock_put(sk); 153 sock_put(sk);
154} 154}
155 155
156/*
157 * Congestion window validation (RFC 2861).
158 */
159static int ccid2_do_cwv = 1;
160module_param(ccid2_do_cwv, bool, 0644);
161MODULE_PARM_DESC(ccid2_do_cwv, "Perform RFC2861 Congestion Window Validation");
162
163/**
164 * ccid2_update_used_window - Track how much of cwnd is actually used
165 * This is done in addition to CWV. The sender needs to have an idea of how many
166 * packets may be in flight, to set the local Sequence Window value accordingly
167 * (RFC 4340, 7.5.2). The CWV mechanism is exploited to keep track of the
168 * maximum-used window. We use an EWMA low-pass filter to filter out noise.
169 */
170static void ccid2_update_used_window(struct ccid2_hc_tx_sock *hc, u32 new_wnd)
171{
172 hc->tx_expected_wnd = (3 * hc->tx_expected_wnd + new_wnd) / 4;
173}
174
175/* This borrows the code of tcp_cwnd_application_limited() */
176static void ccid2_cwnd_application_limited(struct sock *sk, const u32 now)
177{
178 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
179 /* don't reduce cwnd below the initial window (IW) */
180 u32 init_win = rfc3390_bytes_to_packets(dccp_sk(sk)->dccps_mss_cache),
181 win_used = max(hc->tx_cwnd_used, init_win);
182
183 if (win_used < hc->tx_cwnd) {
184 hc->tx_ssthresh = max(hc->tx_ssthresh,
185 (hc->tx_cwnd >> 1) + (hc->tx_cwnd >> 2));
186 hc->tx_cwnd = (hc->tx_cwnd + win_used) >> 1;
187 }
188 hc->tx_cwnd_used = 0;
189 hc->tx_cwnd_stamp = now;
190}
191
192/* This borrows the code of tcp_cwnd_restart() */
193static void ccid2_cwnd_restart(struct sock *sk, const u32 now)
194{
195 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
196 u32 cwnd = hc->tx_cwnd, restart_cwnd,
197 iwnd = rfc3390_bytes_to_packets(dccp_sk(sk)->dccps_mss_cache);
198
199 hc->tx_ssthresh = max(hc->tx_ssthresh, (cwnd >> 1) + (cwnd >> 2));
200
201 /* don't reduce cwnd below the initial window (IW) */
202 restart_cwnd = min(cwnd, iwnd);
203 cwnd >>= (now - hc->tx_lsndtime) / hc->tx_rto;
204 hc->tx_cwnd = max(cwnd, restart_cwnd);
205
206 hc->tx_cwnd_stamp = now;
207 hc->tx_cwnd_used = 0;
208}
209
156static void ccid2_hc_tx_packet_sent(struct sock *sk, unsigned int len) 210static void ccid2_hc_tx_packet_sent(struct sock *sk, unsigned int len)
157{ 211{
158 struct dccp_sock *dp = dccp_sk(sk); 212 struct dccp_sock *dp = dccp_sk(sk);
159 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); 213 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
214 const u32 now = ccid2_time_stamp;
160 struct ccid2_seq *next; 215 struct ccid2_seq *next;
161 216
162 hc->tx_pipe++; 217 /* slow-start after idle periods (RFC 2581, RFC 2861) */
218 if (ccid2_do_cwv && !hc->tx_pipe &&
219 (s32)(now - hc->tx_lsndtime) >= hc->tx_rto)
220 ccid2_cwnd_restart(sk, now);
221
222 hc->tx_lsndtime = now;
223 hc->tx_pipe += 1;
224
225 /* see whether cwnd was fully used (RFC 2861), update expected window */
226 if (ccid2_cwnd_network_limited(hc)) {
227 ccid2_update_used_window(hc, hc->tx_cwnd);
228 hc->tx_cwnd_used = 0;
229 hc->tx_cwnd_stamp = now;
230 } else {
231 if (hc->tx_pipe > hc->tx_cwnd_used)
232 hc->tx_cwnd_used = hc->tx_pipe;
233
234 ccid2_update_used_window(hc, hc->tx_cwnd_used);
235
236 if (ccid2_do_cwv && (s32)(now - hc->tx_cwnd_stamp) >= hc->tx_rto)
237 ccid2_cwnd_application_limited(sk, now);
238 }
163 239
164 hc->tx_seqh->ccid2s_seq = dp->dccps_gss; 240 hc->tx_seqh->ccid2s_seq = dp->dccps_gss;
165 hc->tx_seqh->ccid2s_acked = 0; 241 hc->tx_seqh->ccid2s_acked = 0;
166 hc->tx_seqh->ccid2s_sent = ccid2_time_stamp; 242 hc->tx_seqh->ccid2s_sent = now;
167 243
168 next = hc->tx_seqh->ccid2s_next; 244 next = hc->tx_seqh->ccid2s_next;
169 /* check if we need to alloc more space */ 245 /* check if we need to alloc more space */
@@ -583,15 +659,6 @@ done:
583 dccp_ackvec_parsed_cleanup(&hc->tx_av_chunks); 659 dccp_ackvec_parsed_cleanup(&hc->tx_av_chunks);
584} 660}
585 661
586/*
587 * Convert RFC 3390 larger initial window into an equivalent number of packets.
588 * This is based on the numbers specified in RFC 5681, 3.1.
589 */
590static inline u32 rfc3390_bytes_to_packets(const u32 smss)
591{
592 return smss <= 1095 ? 4 : (smss > 2190 ? 2 : 3);
593}
594
595static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk) 662static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk)
596{ 663{
597 struct ccid2_hc_tx_sock *hc = ccid_priv(ccid); 664 struct ccid2_hc_tx_sock *hc = ccid_priv(ccid);
@@ -603,6 +670,7 @@ static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk)
603 670
604 /* Use larger initial windows (RFC 4341, section 5). */ 671 /* Use larger initial windows (RFC 4341, section 5). */
605 hc->tx_cwnd = rfc3390_bytes_to_packets(dp->dccps_mss_cache); 672 hc->tx_cwnd = rfc3390_bytes_to_packets(dp->dccps_mss_cache);
673 hc->tx_expected_wnd = hc->tx_cwnd;
606 674
607 /* Make sure that Ack Ratio is enabled and within bounds. */ 675 /* Make sure that Ack Ratio is enabled and within bounds. */
608 max_ratio = DIV_ROUND_UP(hc->tx_cwnd, 2); 676 max_ratio = DIV_ROUND_UP(hc->tx_cwnd, 2);
@@ -615,7 +683,8 @@ static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk)
615 683
616 hc->tx_rto = DCCP_TIMEOUT_INIT; 684 hc->tx_rto = DCCP_TIMEOUT_INIT;
617 hc->tx_rpdupack = -1; 685 hc->tx_rpdupack = -1;
618 hc->tx_last_cong = ccid2_time_stamp; 686 hc->tx_last_cong = hc->tx_lsndtime = hc->tx_cwnd_stamp = ccid2_time_stamp;
687 hc->tx_cwnd_used = 0;
619 setup_timer(&hc->tx_rtotimer, ccid2_hc_tx_rto_expire, 688 setup_timer(&hc->tx_rtotimer, ccid2_hc_tx_rto_expire,
620 (unsigned long)sk); 689 (unsigned long)sk);
621 INIT_LIST_HEAD(&hc->tx_av_chunks); 690 INIT_LIST_HEAD(&hc->tx_av_chunks);
@@ -636,18 +705,14 @@ static void ccid2_hc_tx_exit(struct sock *sk)
636 705
637static void ccid2_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) 706static void ccid2_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
638{ 707{
639 const struct dccp_sock *dp = dccp_sk(sk);
640 struct ccid2_hc_rx_sock *hc = ccid2_hc_rx_sk(sk); 708 struct ccid2_hc_rx_sock *hc = ccid2_hc_rx_sk(sk);
641 709
642 switch (DCCP_SKB_CB(skb)->dccpd_type) { 710 if (!dccp_data_packet(skb))
643 case DCCP_PKT_DATA: 711 return;
644 case DCCP_PKT_DATAACK: 712
645 hc->rx_data++; 713 if (++hc->rx_num_data_pkts >= dccp_sk(sk)->dccps_r_ack_ratio) {
646 if (hc->rx_data >= dp->dccps_r_ack_ratio) { 714 dccp_send_ack(sk);
647 dccp_send_ack(sk); 715 hc->rx_num_data_pkts = 0;
648 hc->rx_data = 0;
649 }
650 break;
651 } 716 }
652} 717}
653 718
diff --git a/net/dccp/ccids/ccid2.h b/net/dccp/ccids/ccid2.h
index e9985dafc2c..f585d330e1e 100644
--- a/net/dccp/ccids/ccid2.h
+++ b/net/dccp/ccids/ccid2.h
@@ -53,6 +53,10 @@ struct ccid2_seq {
53 * @tx_rttvar: moving average/maximum of @mdev_max 53 * @tx_rttvar: moving average/maximum of @mdev_max
54 * @tx_rto: RTO value deriving from SRTT and RTTVAR (RFC 2988) 54 * @tx_rto: RTO value deriving from SRTT and RTTVAR (RFC 2988)
55 * @tx_rtt_seq: to decay RTTVAR at most once per flight 55 * @tx_rtt_seq: to decay RTTVAR at most once per flight
56 * @tx_cwnd_used: actually used cwnd, W_used of RFC 2861
57 * @tx_expected_wnd: moving average of @tx_cwnd_used
58 * @tx_cwnd_stamp: to track idle periods in CWV
59 * @tx_lsndtime: last time (in jiffies) a data packet was sent
56 * @tx_rpseq: last consecutive seqno 60 * @tx_rpseq: last consecutive seqno
57 * @tx_rpdupack: dupacks since rpseq 61 * @tx_rpdupack: dupacks since rpseq
58 * @tx_av_chunks: list of Ack Vectors received on current skb 62 * @tx_av_chunks: list of Ack Vectors received on current skb
@@ -76,6 +80,12 @@ struct ccid2_hc_tx_sock {
76 u64 tx_rtt_seq:48; 80 u64 tx_rtt_seq:48;
77 struct timer_list tx_rtotimer; 81 struct timer_list tx_rtotimer;
78 82
83 /* Congestion Window validation (optional, RFC 2861) */
84 u32 tx_cwnd_used,
85 tx_expected_wnd,
86 tx_cwnd_stamp,
87 tx_lsndtime;
88
79 u64 tx_rpseq; 89 u64 tx_rpseq;
80 int tx_rpdupack; 90 int tx_rpdupack;
81 u32 tx_last_cong; 91 u32 tx_last_cong;
@@ -88,8 +98,21 @@ static inline bool ccid2_cwnd_network_limited(struct ccid2_hc_tx_sock *hc)
88 return hc->tx_pipe >= hc->tx_cwnd; 98 return hc->tx_pipe >= hc->tx_cwnd;
89} 99}
90 100
101/*
102 * Convert RFC 3390 larger initial window into an equivalent number of packets.
103 * This is based on the numbers specified in RFC 5681, 3.1.
104 */
105static inline u32 rfc3390_bytes_to_packets(const u32 smss)
106{
107 return smss <= 1095 ? 4 : (smss > 2190 ? 2 : 3);
108}
109
110/**
111 * struct ccid2_hc_rx_sock - Receiving end of CCID-2 half-connection
112 * @rx_num_data_pkts: number of data packets received since last feedback
113 */
91struct ccid2_hc_rx_sock { 114struct ccid2_hc_rx_sock {
92 int rx_data; 115 u32 rx_num_data_pkts;
93}; 116};
94 117
95static inline struct ccid2_hc_tx_sock *ccid2_hc_tx_sk(const struct sock *sk) 118static inline struct ccid2_hc_tx_sock *ccid2_hc_tx_sk(const struct sock *sk)
diff --git a/net/dccp/input.c b/net/dccp/input.c
index 4222e7a654b..51d5fe5fffb 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -619,20 +619,31 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
619 return 1; 619 return 1;
620 } 620 }
621 621
622 if (sk->sk_state != DCCP_REQUESTING && sk->sk_state != DCCP_RESPOND) { 622 /* Step 6: Check sequence numbers (omitted in LISTEN/REQUEST state) */
623 if (dccp_check_seqno(sk, skb)) 623 if (sk->sk_state != DCCP_REQUESTING && dccp_check_seqno(sk, skb))
624 goto discard; 624 goto discard;
625
626 /*
627 * Step 8: Process options and mark acknowledgeable
628 */
629 if (dccp_parse_options(sk, NULL, skb))
630 return 1;
631 625
632 dccp_handle_ackvec_processing(sk, skb); 626 /*
633 dccp_deliver_input_to_ccids(sk, skb); 627 * Step 7: Check for unexpected packet types
628 * If (S.is_server and P.type == Response)
629 * or (S.is_client and P.type == Request)
630 * or (S.state == RESPOND and P.type == Data),
631 * Send Sync packet acknowledging P.seqno
632 * Drop packet and return
633 */
634 if ((dp->dccps_role != DCCP_ROLE_CLIENT &&
635 dh->dccph_type == DCCP_PKT_RESPONSE) ||
636 (dp->dccps_role == DCCP_ROLE_CLIENT &&
637 dh->dccph_type == DCCP_PKT_REQUEST) ||
638 (sk->sk_state == DCCP_RESPOND && dh->dccph_type == DCCP_PKT_DATA)) {
639 dccp_send_sync(sk, dcb->dccpd_seq, DCCP_PKT_SYNC);
640 goto discard;
634 } 641 }
635 642
643 /* Step 8: Process options */
644 if (dccp_parse_options(sk, NULL, skb))
645 return 1;
646
636 /* 647 /*
637 * Step 9: Process Reset 648 * Step 9: Process Reset
638 * If P.type == Reset, 649 * If P.type == Reset,
@@ -640,31 +651,15 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
640 * S.state := TIMEWAIT 651 * S.state := TIMEWAIT
641 * Set TIMEWAIT timer 652 * Set TIMEWAIT timer
642 * Drop packet and return 653 * Drop packet and return
643 */ 654 */
644 if (dh->dccph_type == DCCP_PKT_RESET) { 655 if (dh->dccph_type == DCCP_PKT_RESET) {
645 dccp_rcv_reset(sk, skb); 656 dccp_rcv_reset(sk, skb);
646 return 0; 657 return 0;
647 /* 658 } else if (dh->dccph_type == DCCP_PKT_CLOSEREQ) { /* Step 13 */
648 * Step 7: Check for unexpected packet types
649 * If (S.is_server and P.type == Response)
650 * or (S.is_client and P.type == Request)
651 * or (S.state == RESPOND and P.type == Data),
652 * Send Sync packet acknowledging P.seqno
653 * Drop packet and return
654 */
655 } else if ((dp->dccps_role != DCCP_ROLE_CLIENT &&
656 dh->dccph_type == DCCP_PKT_RESPONSE) ||
657 (dp->dccps_role == DCCP_ROLE_CLIENT &&
658 dh->dccph_type == DCCP_PKT_REQUEST) ||
659 (sk->sk_state == DCCP_RESPOND &&
660 dh->dccph_type == DCCP_PKT_DATA)) {
661 dccp_send_sync(sk, dcb->dccpd_seq, DCCP_PKT_SYNC);
662 goto discard;
663 } else if (dh->dccph_type == DCCP_PKT_CLOSEREQ) {
664 if (dccp_rcv_closereq(sk, skb)) 659 if (dccp_rcv_closereq(sk, skb))
665 return 0; 660 return 0;
666 goto discard; 661 goto discard;
667 } else if (dh->dccph_type == DCCP_PKT_CLOSE) { 662 } else if (dh->dccph_type == DCCP_PKT_CLOSE) { /* Step 14 */
668 if (dccp_rcv_close(sk, skb)) 663 if (dccp_rcv_close(sk, skb))
669 return 0; 664 return 0;
670 goto discard; 665 goto discard;
@@ -679,8 +674,12 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
679 __kfree_skb(skb); 674 __kfree_skb(skb);
680 return 0; 675 return 0;
681 676
682 case DCCP_RESPOND:
683 case DCCP_PARTOPEN: 677 case DCCP_PARTOPEN:
678 /* Step 8: if using Ack Vectors, mark packet acknowledgeable */
679 dccp_handle_ackvec_processing(sk, skb);
680 dccp_deliver_input_to_ccids(sk, skb);
681 /* fall through */
682 case DCCP_RESPOND:
684 queued = dccp_rcv_respond_partopen_state_process(sk, skb, 683 queued = dccp_rcv_respond_partopen_state_process(sk, skb,
685 dh, len); 684 dh, len);
686 break; 685 break;
diff --git a/net/dccp/output.c b/net/dccp/output.c
index fab108e51e5..dede3edb884 100644
--- a/net/dccp/output.c
+++ b/net/dccp/output.c
@@ -27,11 +27,13 @@ static inline void dccp_event_ack_sent(struct sock *sk)
27 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); 27 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
28} 28}
29 29
30static void dccp_skb_entail(struct sock *sk, struct sk_buff *skb) 30/* enqueue @skb on sk_send_head for retransmission, return clone to send now */
31static struct sk_buff *dccp_skb_entail(struct sock *sk, struct sk_buff *skb)
31{ 32{
32 skb_set_owner_w(skb, sk); 33 skb_set_owner_w(skb, sk);
33 WARN_ON(sk->sk_send_head); 34 WARN_ON(sk->sk_send_head);
34 sk->sk_send_head = skb; 35 sk->sk_send_head = skb;
36 return skb_clone(sk->sk_send_head, gfp_any());
35} 37}
36 38
37/* 39/*
@@ -552,8 +554,7 @@ int dccp_connect(struct sock *sk)
552 554
553 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_REQUEST; 555 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_REQUEST;
554 556
555 dccp_skb_entail(sk, skb); 557 dccp_transmit_skb(sk, dccp_skb_entail(sk, skb));
556 dccp_transmit_skb(sk, skb_clone(skb, GFP_KERNEL));
557 DCCP_INC_STATS(DCCP_MIB_ACTIVEOPENS); 558 DCCP_INC_STATS(DCCP_MIB_ACTIVEOPENS);
558 559
559 /* Timer for repeating the REQUEST until an answer. */ 560 /* Timer for repeating the REQUEST until an answer. */
@@ -678,8 +679,7 @@ void dccp_send_close(struct sock *sk, const int active)
678 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_CLOSE; 679 DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_CLOSE;
679 680
680 if (active) { 681 if (active) {
681 dccp_skb_entail(sk, skb); 682 skb = dccp_skb_entail(sk, skb);
682 dccp_transmit_skb(sk, skb_clone(skb, prio));
683 /* 683 /*
684 * Retransmission timer for active-close: RFC 4340, 8.3 requires 684 * Retransmission timer for active-close: RFC 4340, 8.3 requires
685 * to retransmit the Close/CloseReq until the CLOSING/CLOSEREQ 685 * to retransmit the Close/CloseReq until the CLOSING/CLOSEREQ
@@ -692,6 +692,6 @@ void dccp_send_close(struct sock *sk, const int active)
692 */ 692 */
693 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 693 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
694 DCCP_TIMEOUT_INIT, DCCP_RTO_MAX); 694 DCCP_TIMEOUT_INIT, DCCP_RTO_MAX);
695 } else 695 }
696 dccp_transmit_skb(sk, skb); 696 dccp_transmit_skb(sk, skb);
697} 697}
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index ea3b6ee21fc..19acd00a638 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -291,23 +291,23 @@ int dn_sockaddr2username(struct sockaddr_dn *sdn, unsigned char *buf, unsigned c
291 291
292 *buf++ = type; 292 *buf++ = type;
293 293
294 switch(type) { 294 switch (type) {
295 case 0: 295 case 0:
296 *buf++ = sdn->sdn_objnum; 296 *buf++ = sdn->sdn_objnum;
297 break; 297 break;
298 case 1: 298 case 1:
299 *buf++ = 0; 299 *buf++ = 0;
300 *buf++ = le16_to_cpu(sdn->sdn_objnamel); 300 *buf++ = le16_to_cpu(sdn->sdn_objnamel);
301 memcpy(buf, sdn->sdn_objname, le16_to_cpu(sdn->sdn_objnamel)); 301 memcpy(buf, sdn->sdn_objname, le16_to_cpu(sdn->sdn_objnamel));
302 len = 3 + le16_to_cpu(sdn->sdn_objnamel); 302 len = 3 + le16_to_cpu(sdn->sdn_objnamel);
303 break; 303 break;
304 case 2: 304 case 2:
305 memset(buf, 0, 5); 305 memset(buf, 0, 5);
306 buf += 5; 306 buf += 5;
307 *buf++ = le16_to_cpu(sdn->sdn_objnamel); 307 *buf++ = le16_to_cpu(sdn->sdn_objnamel);
308 memcpy(buf, sdn->sdn_objname, le16_to_cpu(sdn->sdn_objnamel)); 308 memcpy(buf, sdn->sdn_objname, le16_to_cpu(sdn->sdn_objnamel));
309 len = 7 + le16_to_cpu(sdn->sdn_objnamel); 309 len = 7 + le16_to_cpu(sdn->sdn_objnamel);
310 break; 310 break;
311 } 311 }
312 312
313 return len; 313 return len;
@@ -337,23 +337,23 @@ int dn_username2sockaddr(unsigned char *data, int len, struct sockaddr_dn *sdn,
337 *fmt = *data++; 337 *fmt = *data++;
338 type = *data++; 338 type = *data++;
339 339
340 switch(*fmt) { 340 switch (*fmt) {
341 case 0: 341 case 0:
342 sdn->sdn_objnum = type; 342 sdn->sdn_objnum = type;
343 return 2; 343 return 2;
344 case 1: 344 case 1:
345 namel = 16; 345 namel = 16;
346 break; 346 break;
347 case 2: 347 case 2:
348 len -= 4; 348 len -= 4;
349 data += 4; 349 data += 4;
350 break; 350 break;
351 case 4: 351 case 4:
352 len -= 8; 352 len -= 8;
353 data += 8; 353 data += 8;
354 break; 354 break;
355 default: 355 default:
356 return -1; 356 return -1;
357 } 357 }
358 358
359 len -= 1; 359 len -= 1;
@@ -575,25 +575,26 @@ int dn_destroy_timer(struct sock *sk)
575 575
576 scp->persist = dn_nsp_persist(sk); 576 scp->persist = dn_nsp_persist(sk);
577 577
578 switch(scp->state) { 578 switch (scp->state) {
579 case DN_DI: 579 case DN_DI:
580 dn_nsp_send_disc(sk, NSP_DISCINIT, 0, GFP_ATOMIC); 580 dn_nsp_send_disc(sk, NSP_DISCINIT, 0, GFP_ATOMIC);
581 if (scp->nsp_rxtshift >= decnet_di_count) 581 if (scp->nsp_rxtshift >= decnet_di_count)
582 scp->state = DN_CN; 582 scp->state = DN_CN;
583 return 0; 583 return 0;
584 584
585 case DN_DR: 585 case DN_DR:
586 dn_nsp_send_disc(sk, NSP_DISCINIT, 0, GFP_ATOMIC); 586 dn_nsp_send_disc(sk, NSP_DISCINIT, 0, GFP_ATOMIC);
587 if (scp->nsp_rxtshift >= decnet_dr_count) 587 if (scp->nsp_rxtshift >= decnet_dr_count)
588 scp->state = DN_DRC; 588 scp->state = DN_DRC;
589 return 0; 589 return 0;
590 590
591 case DN_DN: 591 case DN_DN:
592 if (scp->nsp_rxtshift < decnet_dn_count) { 592 if (scp->nsp_rxtshift < decnet_dn_count) {
593 /* printk(KERN_DEBUG "dn_destroy_timer: DN\n"); */ 593 /* printk(KERN_DEBUG "dn_destroy_timer: DN\n"); */
594 dn_nsp_send_disc(sk, NSP_DISCCONF, NSP_REASON_DC, GFP_ATOMIC); 594 dn_nsp_send_disc(sk, NSP_DISCCONF, NSP_REASON_DC,
595 return 0; 595 GFP_ATOMIC);
596 } 596 return 0;
597 }
597 } 598 }
598 599
599 scp->persist = (HZ * decnet_time_wait); 600 scp->persist = (HZ * decnet_time_wait);
@@ -623,42 +624,42 @@ static void dn_destroy_sock(struct sock *sk)
623 624
624 sk->sk_state = TCP_CLOSE; 625 sk->sk_state = TCP_CLOSE;
625 626
626 switch(scp->state) { 627 switch (scp->state) {
627 case DN_DN: 628 case DN_DN:
628 dn_nsp_send_disc(sk, NSP_DISCCONF, NSP_REASON_DC, 629 dn_nsp_send_disc(sk, NSP_DISCCONF, NSP_REASON_DC,
629 sk->sk_allocation); 630 sk->sk_allocation);
630 scp->persist_fxn = dn_destroy_timer; 631 scp->persist_fxn = dn_destroy_timer;
631 scp->persist = dn_nsp_persist(sk); 632 scp->persist = dn_nsp_persist(sk);
632 break; 633 break;
633 case DN_CR: 634 case DN_CR:
634 scp->state = DN_DR; 635 scp->state = DN_DR;
635 goto disc_reject; 636 goto disc_reject;
636 case DN_RUN: 637 case DN_RUN:
637 scp->state = DN_DI; 638 scp->state = DN_DI;
638 case DN_DI: 639 case DN_DI:
639 case DN_DR: 640 case DN_DR:
640disc_reject: 641disc_reject:
641 dn_nsp_send_disc(sk, NSP_DISCINIT, 0, sk->sk_allocation); 642 dn_nsp_send_disc(sk, NSP_DISCINIT, 0, sk->sk_allocation);
642 case DN_NC: 643 case DN_NC:
643 case DN_NR: 644 case DN_NR:
644 case DN_RJ: 645 case DN_RJ:
645 case DN_DIC: 646 case DN_DIC:
646 case DN_CN: 647 case DN_CN:
647 case DN_DRC: 648 case DN_DRC:
648 case DN_CI: 649 case DN_CI:
649 case DN_CD: 650 case DN_CD:
650 scp->persist_fxn = dn_destroy_timer; 651 scp->persist_fxn = dn_destroy_timer;
651 scp->persist = dn_nsp_persist(sk); 652 scp->persist = dn_nsp_persist(sk);
652 break; 653 break;
653 default: 654 default:
654 printk(KERN_DEBUG "DECnet: dn_destroy_sock passed socket in invalid state\n"); 655 printk(KERN_DEBUG "DECnet: dn_destroy_sock passed socket in invalid state\n");
655 case DN_O: 656 case DN_O:
656 dn_stop_slow_timer(sk); 657 dn_stop_slow_timer(sk);
657 658
658 dn_unhash_sock_bh(sk); 659 dn_unhash_sock_bh(sk);
659 sock_put(sk); 660 sock_put(sk);
660 661
661 break; 662 break;
662 } 663 }
663} 664}
664 665
@@ -683,15 +684,15 @@ static int dn_create(struct net *net, struct socket *sock, int protocol,
683 if (!net_eq(net, &init_net)) 684 if (!net_eq(net, &init_net))
684 return -EAFNOSUPPORT; 685 return -EAFNOSUPPORT;
685 686
686 switch(sock->type) { 687 switch (sock->type) {
687 case SOCK_SEQPACKET: 688 case SOCK_SEQPACKET:
688 if (protocol != DNPROTO_NSP) 689 if (protocol != DNPROTO_NSP)
689 return -EPROTONOSUPPORT; 690 return -EPROTONOSUPPORT;
690 break; 691 break;
691 case SOCK_STREAM: 692 case SOCK_STREAM:
692 break; 693 break;
693 default: 694 default:
694 return -ESOCKTNOSUPPORT; 695 return -ESOCKTNOSUPPORT;
695 } 696 }
696 697
697 698
@@ -987,16 +988,16 @@ static inline int dn_check_state(struct sock *sk, struct sockaddr_dn *addr, int
987{ 988{
988 struct dn_scp *scp = DN_SK(sk); 989 struct dn_scp *scp = DN_SK(sk);
989 990
990 switch(scp->state) { 991 switch (scp->state) {
991 case DN_RUN: 992 case DN_RUN:
992 return 0; 993 return 0;
993 case DN_CR: 994 case DN_CR:
994 return dn_confirm_accept(sk, timeo, sk->sk_allocation); 995 return dn_confirm_accept(sk, timeo, sk->sk_allocation);
995 case DN_CI: 996 case DN_CI:
996 case DN_CC: 997 case DN_CC:
997 return dn_wait_run(sk, timeo); 998 return dn_wait_run(sk, timeo);
998 case DN_O: 999 case DN_O:
999 return __dn_connect(sk, addr, addrlen, timeo, flags); 1000 return __dn_connect(sk, addr, addrlen, timeo, flags);
1000 } 1001 }
1001 1002
1002 return -EINVAL; 1003 return -EINVAL;
@@ -1363,141 +1364,140 @@ static int __dn_setsockopt(struct socket *sock, int level,int optname, char __us
1363 if (copy_from_user(&u, optval, optlen)) 1364 if (copy_from_user(&u, optval, optlen))
1364 return -EFAULT; 1365 return -EFAULT;
1365 1366
1366 switch(optname) { 1367 switch (optname) {
1367 case DSO_CONDATA: 1368 case DSO_CONDATA:
1368 if (sock->state == SS_CONNECTED) 1369 if (sock->state == SS_CONNECTED)
1369 return -EISCONN; 1370 return -EISCONN;
1370 if ((scp->state != DN_O) && (scp->state != DN_CR)) 1371 if ((scp->state != DN_O) && (scp->state != DN_CR))
1371 return -EINVAL; 1372 return -EINVAL;
1372 1373
1373 if (optlen != sizeof(struct optdata_dn)) 1374 if (optlen != sizeof(struct optdata_dn))
1374 return -EINVAL; 1375 return -EINVAL;
1375 1376
1376 if (le16_to_cpu(u.opt.opt_optl) > 16) 1377 if (le16_to_cpu(u.opt.opt_optl) > 16)
1377 return -EINVAL; 1378 return -EINVAL;
1378 1379
1379 memcpy(&scp->conndata_out, &u.opt, optlen); 1380 memcpy(&scp->conndata_out, &u.opt, optlen);
1380 break; 1381 break;
1381
1382 case DSO_DISDATA:
1383 if (sock->state != SS_CONNECTED && scp->accept_mode == ACC_IMMED)
1384 return -ENOTCONN;
1385
1386 if (optlen != sizeof(struct optdata_dn))
1387 return -EINVAL;
1388 1382
1389 if (le16_to_cpu(u.opt.opt_optl) > 16) 1383 case DSO_DISDATA:
1390 return -EINVAL; 1384 if (sock->state != SS_CONNECTED &&
1385 scp->accept_mode == ACC_IMMED)
1386 return -ENOTCONN;
1391 1387
1392 memcpy(&scp->discdata_out, &u.opt, optlen); 1388 if (optlen != sizeof(struct optdata_dn))
1393 break; 1389 return -EINVAL;
1394 1390
1395 case DSO_CONACCESS: 1391 if (le16_to_cpu(u.opt.opt_optl) > 16)
1396 if (sock->state == SS_CONNECTED) 1392 return -EINVAL;
1397 return -EISCONN;
1398 if (scp->state != DN_O)
1399 return -EINVAL;
1400 1393
1401 if (optlen != sizeof(struct accessdata_dn)) 1394 memcpy(&scp->discdata_out, &u.opt, optlen);
1402 return -EINVAL; 1395 break;
1403 1396
1404 if ((u.acc.acc_accl > DN_MAXACCL) || 1397 case DSO_CONACCESS:
1405 (u.acc.acc_passl > DN_MAXACCL) || 1398 if (sock->state == SS_CONNECTED)
1406 (u.acc.acc_userl > DN_MAXACCL)) 1399 return -EISCONN;
1407 return -EINVAL; 1400 if (scp->state != DN_O)
1401 return -EINVAL;
1408 1402
1409 memcpy(&scp->accessdata, &u.acc, optlen); 1403 if (optlen != sizeof(struct accessdata_dn))
1410 break; 1404 return -EINVAL;
1411 1405
1412 case DSO_ACCEPTMODE: 1406 if ((u.acc.acc_accl > DN_MAXACCL) ||
1413 if (sock->state == SS_CONNECTED) 1407 (u.acc.acc_passl > DN_MAXACCL) ||
1414 return -EISCONN; 1408 (u.acc.acc_userl > DN_MAXACCL))
1415 if (scp->state != DN_O) 1409 return -EINVAL;
1416 return -EINVAL;
1417 1410
1418 if (optlen != sizeof(int)) 1411 memcpy(&scp->accessdata, &u.acc, optlen);
1419 return -EINVAL; 1412 break;
1420 1413
1421 if ((u.mode != ACC_IMMED) && (u.mode != ACC_DEFER)) 1414 case DSO_ACCEPTMODE:
1422 return -EINVAL; 1415 if (sock->state == SS_CONNECTED)
1416 return -EISCONN;
1417 if (scp->state != DN_O)
1418 return -EINVAL;
1423 1419
1424 scp->accept_mode = (unsigned char)u.mode; 1420 if (optlen != sizeof(int))
1425 break; 1421 return -EINVAL;
1426 1422
1427 case DSO_CONACCEPT: 1423 if ((u.mode != ACC_IMMED) && (u.mode != ACC_DEFER))
1424 return -EINVAL;
1428 1425
1429 if (scp->state != DN_CR) 1426 scp->accept_mode = (unsigned char)u.mode;
1430 return -EINVAL; 1427 break;
1431 timeo = sock_rcvtimeo(sk, 0);
1432 err = dn_confirm_accept(sk, &timeo, sk->sk_allocation);
1433 return err;
1434 1428
1435 case DSO_CONREJECT: 1429 case DSO_CONACCEPT:
1430 if (scp->state != DN_CR)
1431 return -EINVAL;
1432 timeo = sock_rcvtimeo(sk, 0);
1433 err = dn_confirm_accept(sk, &timeo, sk->sk_allocation);
1434 return err;
1436 1435
1437 if (scp->state != DN_CR) 1436 case DSO_CONREJECT:
1438 return -EINVAL; 1437 if (scp->state != DN_CR)
1438 return -EINVAL;
1439 1439
1440 scp->state = DN_DR; 1440 scp->state = DN_DR;
1441 sk->sk_shutdown = SHUTDOWN_MASK; 1441 sk->sk_shutdown = SHUTDOWN_MASK;
1442 dn_nsp_send_disc(sk, 0x38, 0, sk->sk_allocation); 1442 dn_nsp_send_disc(sk, 0x38, 0, sk->sk_allocation);
1443 break; 1443 break;
1444 1444
1445 default: 1445 default:
1446#ifdef CONFIG_NETFILTER 1446#ifdef CONFIG_NETFILTER
1447 return nf_setsockopt(sk, PF_DECnet, optname, optval, optlen); 1447 return nf_setsockopt(sk, PF_DECnet, optname, optval, optlen);
1448#endif 1448#endif
1449 case DSO_LINKINFO: 1449 case DSO_LINKINFO:
1450 case DSO_STREAM: 1450 case DSO_STREAM:
1451 case DSO_SEQPACKET: 1451 case DSO_SEQPACKET:
1452 return -ENOPROTOOPT; 1452 return -ENOPROTOOPT;
1453 1453
1454 case DSO_MAXWINDOW: 1454 case DSO_MAXWINDOW:
1455 if (optlen != sizeof(unsigned long)) 1455 if (optlen != sizeof(unsigned long))
1456 return -EINVAL; 1456 return -EINVAL;
1457 if (u.win > NSP_MAX_WINDOW) 1457 if (u.win > NSP_MAX_WINDOW)
1458 u.win = NSP_MAX_WINDOW; 1458 u.win = NSP_MAX_WINDOW;
1459 if (u.win == 0) 1459 if (u.win == 0)
1460 return -EINVAL; 1460 return -EINVAL;
1461 scp->max_window = u.win; 1461 scp->max_window = u.win;
1462 if (scp->snd_window > u.win) 1462 if (scp->snd_window > u.win)
1463 scp->snd_window = u.win; 1463 scp->snd_window = u.win;
1464 break; 1464 break;
1465 1465
1466 case DSO_NODELAY: 1466 case DSO_NODELAY:
1467 if (optlen != sizeof(int)) 1467 if (optlen != sizeof(int))
1468 return -EINVAL; 1468 return -EINVAL;
1469 if (scp->nonagle == 2) 1469 if (scp->nonagle == 2)
1470 return -EINVAL; 1470 return -EINVAL;
1471 scp->nonagle = (u.val == 0) ? 0 : 1; 1471 scp->nonagle = (u.val == 0) ? 0 : 1;
1472 /* if (scp->nonagle == 1) { Push pending frames } */ 1472 /* if (scp->nonagle == 1) { Push pending frames } */
1473 break; 1473 break;
1474 1474
1475 case DSO_CORK: 1475 case DSO_CORK:
1476 if (optlen != sizeof(int)) 1476 if (optlen != sizeof(int))
1477 return -EINVAL; 1477 return -EINVAL;
1478 if (scp->nonagle == 1) 1478 if (scp->nonagle == 1)
1479 return -EINVAL; 1479 return -EINVAL;
1480 scp->nonagle = (u.val == 0) ? 0 : 2; 1480 scp->nonagle = (u.val == 0) ? 0 : 2;
1481 /* if (scp->nonagle == 0) { Push pending frames } */ 1481 /* if (scp->nonagle == 0) { Push pending frames } */
1482 break; 1482 break;
1483 1483
1484 case DSO_SERVICES: 1484 case DSO_SERVICES:
1485 if (optlen != sizeof(unsigned char)) 1485 if (optlen != sizeof(unsigned char))
1486 return -EINVAL; 1486 return -EINVAL;
1487 if ((u.services & ~NSP_FC_MASK) != 0x01) 1487 if ((u.services & ~NSP_FC_MASK) != 0x01)
1488 return -EINVAL; 1488 return -EINVAL;
1489 if ((u.services & NSP_FC_MASK) == NSP_FC_MASK) 1489 if ((u.services & NSP_FC_MASK) == NSP_FC_MASK)
1490 return -EINVAL; 1490 return -EINVAL;
1491 scp->services_loc = u.services; 1491 scp->services_loc = u.services;
1492 break; 1492 break;
1493 1493
1494 case DSO_INFO: 1494 case DSO_INFO:
1495 if (optlen != sizeof(unsigned char)) 1495 if (optlen != sizeof(unsigned char))
1496 return -EINVAL; 1496 return -EINVAL;
1497 if (u.info & 0xfc) 1497 if (u.info & 0xfc)
1498 return -EINVAL; 1498 return -EINVAL;
1499 scp->info_loc = u.info; 1499 scp->info_loc = u.info;
1500 break; 1500 break;
1501 } 1501 }
1502 1502
1503 return 0; 1503 return 0;
@@ -1527,107 +1527,106 @@ static int __dn_getsockopt(struct socket *sock, int level,int optname, char __us
1527 if(get_user(r_len , optlen)) 1527 if(get_user(r_len , optlen))
1528 return -EFAULT; 1528 return -EFAULT;
1529 1529
1530 switch(optname) { 1530 switch (optname) {
1531 case DSO_CONDATA: 1531 case DSO_CONDATA:
1532 if (r_len > sizeof(struct optdata_dn)) 1532 if (r_len > sizeof(struct optdata_dn))
1533 r_len = sizeof(struct optdata_dn); 1533 r_len = sizeof(struct optdata_dn);
1534 r_data = &scp->conndata_in; 1534 r_data = &scp->conndata_in;
1535 break; 1535 break;
1536
1537 case DSO_DISDATA:
1538 if (r_len > sizeof(struct optdata_dn))
1539 r_len = sizeof(struct optdata_dn);
1540 r_data = &scp->discdata_in;
1541 break;
1542 1536
1543 case DSO_CONACCESS: 1537 case DSO_DISDATA:
1544 if (r_len > sizeof(struct accessdata_dn)) 1538 if (r_len > sizeof(struct optdata_dn))
1545 r_len = sizeof(struct accessdata_dn); 1539 r_len = sizeof(struct optdata_dn);
1546 r_data = &scp->accessdata; 1540 r_data = &scp->discdata_in;
1547 break; 1541 break;
1548 1542
1549 case DSO_ACCEPTMODE: 1543 case DSO_CONACCESS:
1550 if (r_len > sizeof(unsigned char)) 1544 if (r_len > sizeof(struct accessdata_dn))
1551 r_len = sizeof(unsigned char); 1545 r_len = sizeof(struct accessdata_dn);
1552 r_data = &scp->accept_mode; 1546 r_data = &scp->accessdata;
1553 break; 1547 break;
1554 1548
1555 case DSO_LINKINFO: 1549 case DSO_ACCEPTMODE:
1556 if (r_len > sizeof(struct linkinfo_dn)) 1550 if (r_len > sizeof(unsigned char))
1557 r_len = sizeof(struct linkinfo_dn); 1551 r_len = sizeof(unsigned char);
1552 r_data = &scp->accept_mode;
1553 break;
1558 1554
1559 memset(&link, 0, sizeof(link)); 1555 case DSO_LINKINFO:
1556 if (r_len > sizeof(struct linkinfo_dn))
1557 r_len = sizeof(struct linkinfo_dn);
1560 1558
1561 switch(sock->state) { 1559 memset(&link, 0, sizeof(link));
1562 case SS_CONNECTING:
1563 link.idn_linkstate = LL_CONNECTING;
1564 break;
1565 case SS_DISCONNECTING:
1566 link.idn_linkstate = LL_DISCONNECTING;
1567 break;
1568 case SS_CONNECTED:
1569 link.idn_linkstate = LL_RUNNING;
1570 break;
1571 default:
1572 link.idn_linkstate = LL_INACTIVE;
1573 }
1574 1560
1575 link.idn_segsize = scp->segsize_rem; 1561 switch (sock->state) {
1576 r_data = &link; 1562 case SS_CONNECTING:
1563 link.idn_linkstate = LL_CONNECTING;
1564 break;
1565 case SS_DISCONNECTING:
1566 link.idn_linkstate = LL_DISCONNECTING;
1567 break;
1568 case SS_CONNECTED:
1569 link.idn_linkstate = LL_RUNNING;
1577 break; 1570 break;
1578
1579 default: 1571 default:
1572 link.idn_linkstate = LL_INACTIVE;
1573 }
1574
1575 link.idn_segsize = scp->segsize_rem;
1576 r_data = &link;
1577 break;
1578
1579 default:
1580#ifdef CONFIG_NETFILTER 1580#ifdef CONFIG_NETFILTER
1581 { 1581 {
1582 int ret, len; 1582 int ret, len;
1583 1583
1584 if(get_user(len, optlen)) 1584 if (get_user(len, optlen))
1585 return -EFAULT; 1585 return -EFAULT;
1586 1586
1587 ret = nf_getsockopt(sk, PF_DECnet, optname, 1587 ret = nf_getsockopt(sk, PF_DECnet, optname, optval, &len);
1588 optval, &len); 1588 if (ret >= 0)
1589 if (ret >= 0) 1589 ret = put_user(len, optlen);
1590 ret = put_user(len, optlen); 1590 return ret;
1591 return ret; 1591 }
1592 }
1593#endif 1592#endif
1594 case DSO_STREAM: 1593 case DSO_STREAM:
1595 case DSO_SEQPACKET: 1594 case DSO_SEQPACKET:
1596 case DSO_CONACCEPT: 1595 case DSO_CONACCEPT:
1597 case DSO_CONREJECT: 1596 case DSO_CONREJECT:
1598 return -ENOPROTOOPT; 1597 return -ENOPROTOOPT;
1599 1598
1600 case DSO_MAXWINDOW: 1599 case DSO_MAXWINDOW:
1601 if (r_len > sizeof(unsigned long)) 1600 if (r_len > sizeof(unsigned long))
1602 r_len = sizeof(unsigned long); 1601 r_len = sizeof(unsigned long);
1603 r_data = &scp->max_window; 1602 r_data = &scp->max_window;
1604 break; 1603 break;
1605 1604
1606 case DSO_NODELAY: 1605 case DSO_NODELAY:
1607 if (r_len > sizeof(int)) 1606 if (r_len > sizeof(int))
1608 r_len = sizeof(int); 1607 r_len = sizeof(int);
1609 val = (scp->nonagle == 1); 1608 val = (scp->nonagle == 1);
1610 r_data = &val; 1609 r_data = &val;
1611 break; 1610 break;
1612 1611
1613 case DSO_CORK: 1612 case DSO_CORK:
1614 if (r_len > sizeof(int)) 1613 if (r_len > sizeof(int))
1615 r_len = sizeof(int); 1614 r_len = sizeof(int);
1616 val = (scp->nonagle == 2); 1615 val = (scp->nonagle == 2);
1617 r_data = &val; 1616 r_data = &val;
1618 break; 1617 break;
1619 1618
1620 case DSO_SERVICES: 1619 case DSO_SERVICES:
1621 if (r_len > sizeof(unsigned char)) 1620 if (r_len > sizeof(unsigned char))
1622 r_len = sizeof(unsigned char); 1621 r_len = sizeof(unsigned char);
1623 r_data = &scp->services_rem; 1622 r_data = &scp->services_rem;
1624 break; 1623 break;
1625 1624
1626 case DSO_INFO: 1625 case DSO_INFO:
1627 if (r_len > sizeof(unsigned char)) 1626 if (r_len > sizeof(unsigned char))
1628 r_len = sizeof(unsigned char); 1627 r_len = sizeof(unsigned char);
1629 r_data = &scp->info_rem; 1628 r_data = &scp->info_rem;
1630 break; 1629 break;
1631 } 1630 }
1632 1631
1633 if (r_data) { 1632 if (r_data) {
@@ -2088,15 +2087,15 @@ static int dn_device_event(struct notifier_block *this, unsigned long event,
2088 if (!net_eq(dev_net(dev), &init_net)) 2087 if (!net_eq(dev_net(dev), &init_net))
2089 return NOTIFY_DONE; 2088 return NOTIFY_DONE;
2090 2089
2091 switch(event) { 2090 switch (event) {
2092 case NETDEV_UP: 2091 case NETDEV_UP:
2093 dn_dev_up(dev); 2092 dn_dev_up(dev);
2094 break; 2093 break;
2095 case NETDEV_DOWN: 2094 case NETDEV_DOWN:
2096 dn_dev_down(dev); 2095 dn_dev_down(dev);
2097 break; 2096 break;
2098 default: 2097 default:
2099 break; 2098 break;
2100 } 2099 }
2101 2100
2102 return NOTIFY_DONE; 2101 return NOTIFY_DONE;
@@ -2209,54 +2208,54 @@ static void dn_printable_object(struct sockaddr_dn *dn, unsigned char *buf)
2209 int i; 2208 int i;
2210 2209
2211 switch (le16_to_cpu(dn->sdn_objnamel)) { 2210 switch (le16_to_cpu(dn->sdn_objnamel)) {
2212 case 0: 2211 case 0:
2213 sprintf(buf, "%d", dn->sdn_objnum); 2212 sprintf(buf, "%d", dn->sdn_objnum);
2214 break; 2213 break;
2215 default: 2214 default:
2216 for (i = 0; i < le16_to_cpu(dn->sdn_objnamel); i++) { 2215 for (i = 0; i < le16_to_cpu(dn->sdn_objnamel); i++) {
2217 buf[i] = dn->sdn_objname[i]; 2216 buf[i] = dn->sdn_objname[i];
2218 if (IS_NOT_PRINTABLE(buf[i])) 2217 if (IS_NOT_PRINTABLE(buf[i]))
2219 buf[i] = '.'; 2218 buf[i] = '.';
2220 } 2219 }
2221 buf[i] = 0; 2220 buf[i] = 0;
2222 } 2221 }
2223} 2222}
2224 2223
2225static char *dn_state2asc(unsigned char state) 2224static char *dn_state2asc(unsigned char state)
2226{ 2225{
2227 switch(state) { 2226 switch (state) {
2228 case DN_O: 2227 case DN_O:
2229 return "OPEN"; 2228 return "OPEN";
2230 case DN_CR: 2229 case DN_CR:
2231 return " CR"; 2230 return " CR";
2232 case DN_DR: 2231 case DN_DR:
2233 return " DR"; 2232 return " DR";
2234 case DN_DRC: 2233 case DN_DRC:
2235 return " DRC"; 2234 return " DRC";
2236 case DN_CC: 2235 case DN_CC:
2237 return " CC"; 2236 return " CC";
2238 case DN_CI: 2237 case DN_CI:
2239 return " CI"; 2238 return " CI";
2240 case DN_NR: 2239 case DN_NR:
2241 return " NR"; 2240 return " NR";
2242 case DN_NC: 2241 case DN_NC:
2243 return " NC"; 2242 return " NC";
2244 case DN_CD: 2243 case DN_CD:
2245 return " CD"; 2244 return " CD";
2246 case DN_RJ: 2245 case DN_RJ:
2247 return " RJ"; 2246 return " RJ";
2248 case DN_RUN: 2247 case DN_RUN:
2249 return " RUN"; 2248 return " RUN";
2250 case DN_DI: 2249 case DN_DI:
2251 return " DI"; 2250 return " DI";
2252 case DN_DIC: 2251 case DN_DIC:
2253 return " DIC"; 2252 return " DIC";
2254 case DN_DN: 2253 case DN_DN:
2255 return " DN"; 2254 return " DN";
2256 case DN_CL: 2255 case DN_CL:
2257 return " CL"; 2256 return " CL";
2258 case DN_CN: 2257 case DN_CN:
2259 return " CN"; 2258 return " CN";
2260 } 2259 }
2261 2260
2262 return "????"; 2261 return "????";
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index cf26ac74a18..ba4faceec40 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -437,17 +437,17 @@ int dn_dev_ioctl(unsigned int cmd, void __user *arg)
437 437
438 dev_load(&init_net, ifr->ifr_name); 438 dev_load(&init_net, ifr->ifr_name);
439 439
440 switch(cmd) { 440 switch (cmd) {
441 case SIOCGIFADDR: 441 case SIOCGIFADDR:
442 break; 442 break;
443 case SIOCSIFADDR: 443 case SIOCSIFADDR:
444 if (!capable(CAP_NET_ADMIN)) 444 if (!capable(CAP_NET_ADMIN))
445 return -EACCES; 445 return -EACCES;
446 if (sdn->sdn_family != AF_DECnet) 446 if (sdn->sdn_family != AF_DECnet)
447 return -EINVAL;
448 break;
449 default:
450 return -EINVAL; 447 return -EINVAL;
448 break;
449 default:
450 return -EINVAL;
451 } 451 }
452 452
453 rtnl_lock(); 453 rtnl_lock();
@@ -470,27 +470,27 @@ int dn_dev_ioctl(unsigned int cmd, void __user *arg)
470 goto done; 470 goto done;
471 } 471 }
472 472
473 switch(cmd) { 473 switch (cmd) {
474 case SIOCGIFADDR: 474 case SIOCGIFADDR:
475 *((__le16 *)sdn->sdn_nodeaddr) = ifa->ifa_local; 475 *((__le16 *)sdn->sdn_nodeaddr) = ifa->ifa_local;
476 goto rarok; 476 goto rarok;
477 477
478 case SIOCSIFADDR: 478 case SIOCSIFADDR:
479 if (!ifa) { 479 if (!ifa) {
480 if ((ifa = dn_dev_alloc_ifa()) == NULL) { 480 if ((ifa = dn_dev_alloc_ifa()) == NULL) {
481 ret = -ENOBUFS; 481 ret = -ENOBUFS;
482 break; 482 break;
483 }
484 memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
485 } else {
486 if (ifa->ifa_local == dn_saddr2dn(sdn))
487 break;
488 dn_dev_del_ifa(dn_db, ifap, 0);
489 } 483 }
484 memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
485 } else {
486 if (ifa->ifa_local == dn_saddr2dn(sdn))
487 break;
488 dn_dev_del_ifa(dn_db, ifap, 0);
489 }
490 490
491 ifa->ifa_local = ifa->ifa_address = dn_saddr2dn(sdn); 491 ifa->ifa_local = ifa->ifa_address = dn_saddr2dn(sdn);
492 492
493 ret = dn_dev_set_ifa(dev, ifa); 493 ret = dn_dev_set_ifa(dev, ifa);
494 } 494 }
495done: 495done:
496 rtnl_unlock(); 496 rtnl_unlock();
@@ -1313,7 +1313,7 @@ static void *dn_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1313 1313
1314 ++*pos; 1314 ++*pos;
1315 1315
1316 dev = (struct net_device *)v; 1316 dev = v;
1317 if (v == SEQ_START_TOKEN) 1317 if (v == SEQ_START_TOKEN)
1318 dev = net_device_entry(&init_net.dev_base_head); 1318 dev = net_device_entry(&init_net.dev_base_head);
1319 1319
@@ -1335,13 +1335,13 @@ static void dn_dev_seq_stop(struct seq_file *seq, void *v)
1335 1335
1336static char *dn_type2asc(char type) 1336static char *dn_type2asc(char type)
1337{ 1337{
1338 switch(type) { 1338 switch (type) {
1339 case DN_DEV_BCAST: 1339 case DN_DEV_BCAST:
1340 return "B"; 1340 return "B";
1341 case DN_DEV_UCAST: 1341 case DN_DEV_UCAST:
1342 return "U"; 1342 return "U";
1343 case DN_DEV_MPOINT: 1343 case DN_DEV_MPOINT:
1344 return "M"; 1344 return "M";
1345 } 1345 }
1346 1346
1347 return "?"; 1347 return "?";
@@ -1414,9 +1414,9 @@ void __init dn_dev_init(void)
1414 1414
1415 dn_dev_devices_on(); 1415 dn_dev_devices_on();
1416 1416
1417 rtnl_register(PF_DECnet, RTM_NEWADDR, dn_nl_newaddr, NULL); 1417 rtnl_register(PF_DECnet, RTM_NEWADDR, dn_nl_newaddr, NULL, NULL);
1418 rtnl_register(PF_DECnet, RTM_DELADDR, dn_nl_deladdr, NULL); 1418 rtnl_register(PF_DECnet, RTM_DELADDR, dn_nl_deladdr, NULL, NULL);
1419 rtnl_register(PF_DECnet, RTM_GETADDR, NULL, dn_nl_dump_ifaddr); 1419 rtnl_register(PF_DECnet, RTM_GETADDR, NULL, dn_nl_dump_ifaddr, NULL);
1420 1420
1421 proc_net_fops_create(&init_net, "decnet_dev", S_IRUGO, &dn_dev_seq_fops); 1421 proc_net_fops_create(&init_net, "decnet_dev", S_IRUGO, &dn_dev_seq_fops);
1422 1422
diff --git a/net/decnet/dn_fib.c b/net/decnet/dn_fib.c
index 1c74ed36ce8..2bd8e53d777 100644
--- a/net/decnet/dn_fib.c
+++ b/net/decnet/dn_fib.c
@@ -414,33 +414,34 @@ int dn_fib_semantic_match(int type, struct dn_fib_info *fi, const struct flowidn
414 414
415 res->fi = fi; 415 res->fi = fi;
416 416
417 switch(type) { 417 switch (type) {
418 case RTN_NAT: 418 case RTN_NAT:
419 DN_FIB_RES_RESET(*res); 419 DN_FIB_RES_RESET(*res);
420 atomic_inc(&fi->fib_clntref);
421 return 0;
422 case RTN_UNICAST:
423 case RTN_LOCAL:
424 for_nexthops(fi) {
425 if (nh->nh_flags & RTNH_F_DEAD)
426 continue;
427 if (!fld->flowidn_oif ||
428 fld->flowidn_oif == nh->nh_oif)
429 break;
430 }
431 if (nhsel < fi->fib_nhs) {
432 res->nh_sel = nhsel;
420 atomic_inc(&fi->fib_clntref); 433 atomic_inc(&fi->fib_clntref);
421 return 0; 434 return 0;
422 case RTN_UNICAST: 435 }
423 case RTN_LOCAL: 436 endfor_nexthops(fi);
424 for_nexthops(fi) { 437 res->fi = NULL;
425 if (nh->nh_flags & RTNH_F_DEAD) 438 return 1;
426 continue; 439 default:
427 if (!fld->flowidn_oif || 440 if (net_ratelimit())
428 fld->flowidn_oif == nh->nh_oif) 441 printk("DECnet: impossible routing event : dn_fib_semantic_match type=%d\n",
429 break; 442 type);
430 } 443 res->fi = NULL;
431 if (nhsel < fi->fib_nhs) { 444 return -EINVAL;
432 res->nh_sel = nhsel;
433 atomic_inc(&fi->fib_clntref);
434 return 0;
435 }
436 endfor_nexthops(fi);
437 res->fi = NULL;
438 return 1;
439 default:
440 if (net_ratelimit())
441 printk("DECnet: impossible routing event : dn_fib_semantic_match type=%d\n", type);
442 res->fi = NULL;
443 return -EINVAL;
444 } 445 }
445 } 446 }
446 return err; 447 return err;
@@ -647,20 +648,20 @@ static int dn_fib_dnaddr_event(struct notifier_block *this, unsigned long event,
647{ 648{
648 struct dn_ifaddr *ifa = (struct dn_ifaddr *)ptr; 649 struct dn_ifaddr *ifa = (struct dn_ifaddr *)ptr;
649 650
650 switch(event) { 651 switch (event) {
651 case NETDEV_UP: 652 case NETDEV_UP:
652 dn_fib_add_ifaddr(ifa); 653 dn_fib_add_ifaddr(ifa);
653 dn_fib_sync_up(ifa->ifa_dev->dev); 654 dn_fib_sync_up(ifa->ifa_dev->dev);
655 dn_rt_cache_flush(-1);
656 break;
657 case NETDEV_DOWN:
658 dn_fib_del_ifaddr(ifa);
659 if (ifa->ifa_dev && ifa->ifa_dev->ifa_list == NULL) {
660 dn_fib_disable_addr(ifa->ifa_dev->dev, 1);
661 } else {
654 dn_rt_cache_flush(-1); 662 dn_rt_cache_flush(-1);
655 break; 663 }
656 case NETDEV_DOWN: 664 break;
657 dn_fib_del_ifaddr(ifa);
658 if (ifa->ifa_dev && ifa->ifa_dev->ifa_list == NULL) {
659 dn_fib_disable_addr(ifa->ifa_dev->dev, 1);
660 } else {
661 dn_rt_cache_flush(-1);
662 }
663 break;
664 } 665 }
665 return NOTIFY_DONE; 666 return NOTIFY_DONE;
666} 667}
@@ -763,8 +764,8 @@ void __init dn_fib_init(void)
763 764
764 register_dnaddr_notifier(&dn_fib_dnaddr_notifier); 765 register_dnaddr_notifier(&dn_fib_dnaddr_notifier);
765 766
766 rtnl_register(PF_DECnet, RTM_NEWROUTE, dn_fib_rtm_newroute, NULL); 767 rtnl_register(PF_DECnet, RTM_NEWROUTE, dn_fib_rtm_newroute, NULL, NULL);
767 rtnl_register(PF_DECnet, RTM_DELROUTE, dn_fib_rtm_delroute, NULL); 768 rtnl_register(PF_DECnet, RTM_DELROUTE, dn_fib_rtm_delroute, NULL, NULL);
768} 769}
769 770
770 771
diff --git a/net/decnet/dn_neigh.c b/net/decnet/dn_neigh.c
index 602dade7e9a..0dc3fe61085 100644
--- a/net/decnet/dn_neigh.c
+++ b/net/decnet/dn_neigh.c
@@ -51,9 +51,9 @@
51static int dn_neigh_construct(struct neighbour *); 51static int dn_neigh_construct(struct neighbour *);
52static void dn_long_error_report(struct neighbour *, struct sk_buff *); 52static void dn_long_error_report(struct neighbour *, struct sk_buff *);
53static void dn_short_error_report(struct neighbour *, struct sk_buff *); 53static void dn_short_error_report(struct neighbour *, struct sk_buff *);
54static int dn_long_output(struct sk_buff *); 54static int dn_long_output(struct neighbour *, struct sk_buff *);
55static int dn_short_output(struct sk_buff *); 55static int dn_short_output(struct neighbour *, struct sk_buff *);
56static int dn_phase3_output(struct sk_buff *); 56static int dn_phase3_output(struct neighbour *, struct sk_buff *);
57 57
58 58
59/* 59/*
@@ -64,8 +64,6 @@ static const struct neigh_ops dn_long_ops = {
64 .error_report = dn_long_error_report, 64 .error_report = dn_long_error_report,
65 .output = dn_long_output, 65 .output = dn_long_output,
66 .connected_output = dn_long_output, 66 .connected_output = dn_long_output,
67 .hh_output = dev_queue_xmit,
68 .queue_xmit = dev_queue_xmit,
69}; 67};
70 68
71/* 69/*
@@ -76,8 +74,6 @@ static const struct neigh_ops dn_short_ops = {
76 .error_report = dn_short_error_report, 74 .error_report = dn_short_error_report,
77 .output = dn_short_output, 75 .output = dn_short_output,
78 .connected_output = dn_short_output, 76 .connected_output = dn_short_output,
79 .hh_output = dev_queue_xmit,
80 .queue_xmit = dev_queue_xmit,
81}; 77};
82 78
83/* 79/*
@@ -88,8 +84,6 @@ static const struct neigh_ops dn_phase3_ops = {
88 .error_report = dn_short_error_report, /* Can use short version here */ 84 .error_report = dn_short_error_report, /* Can use short version here */
89 .output = dn_phase3_output, 85 .output = dn_phase3_output,
90 .connected_output = dn_phase3_output, 86 .connected_output = dn_phase3_output,
91 .hh_output = dev_queue_xmit,
92 .queue_xmit = dev_queue_xmit
93}; 87};
94 88
95static u32 dn_neigh_hash(const void *pkey, 89static u32 dn_neigh_hash(const void *pkey,
@@ -208,14 +202,14 @@ static int dn_neigh_output_packet(struct sk_buff *skb)
208{ 202{
209 struct dst_entry *dst = skb_dst(skb); 203 struct dst_entry *dst = skb_dst(skb);
210 struct dn_route *rt = (struct dn_route *)dst; 204 struct dn_route *rt = (struct dn_route *)dst;
211 struct neighbour *neigh = dst->neighbour; 205 struct neighbour *neigh = dst_get_neighbour(dst);
212 struct net_device *dev = neigh->dev; 206 struct net_device *dev = neigh->dev;
213 char mac_addr[ETH_ALEN]; 207 char mac_addr[ETH_ALEN];
214 208
215 dn_dn2eth(mac_addr, rt->rt_local_src); 209 dn_dn2eth(mac_addr, rt->rt_local_src);
216 if (dev_hard_header(skb, dev, ntohs(skb->protocol), neigh->ha, 210 if (dev_hard_header(skb, dev, ntohs(skb->protocol), neigh->ha,
217 mac_addr, skb->len) >= 0) 211 mac_addr, skb->len) >= 0)
218 return neigh->ops->queue_xmit(skb); 212 return dev_queue_xmit(skb);
219 213
220 if (net_ratelimit()) 214 if (net_ratelimit())
221 printk(KERN_DEBUG "dn_neigh_output_packet: oops, can't send packet\n"); 215 printk(KERN_DEBUG "dn_neigh_output_packet: oops, can't send packet\n");
@@ -224,10 +218,8 @@ static int dn_neigh_output_packet(struct sk_buff *skb)
224 return -EINVAL; 218 return -EINVAL;
225} 219}
226 220
227static int dn_long_output(struct sk_buff *skb) 221static int dn_long_output(struct neighbour *neigh, struct sk_buff *skb)
228{ 222{
229 struct dst_entry *dst = skb_dst(skb);
230 struct neighbour *neigh = dst->neighbour;
231 struct net_device *dev = neigh->dev; 223 struct net_device *dev = neigh->dev;
232 int headroom = dev->hard_header_len + sizeof(struct dn_long_packet) + 3; 224 int headroom = dev->hard_header_len + sizeof(struct dn_long_packet) + 3;
233 unsigned char *data; 225 unsigned char *data;
@@ -271,10 +263,8 @@ static int dn_long_output(struct sk_buff *skb)
271 neigh->dev, dn_neigh_output_packet); 263 neigh->dev, dn_neigh_output_packet);
272} 264}
273 265
274static int dn_short_output(struct sk_buff *skb) 266static int dn_short_output(struct neighbour *neigh, struct sk_buff *skb)
275{ 267{
276 struct dst_entry *dst = skb_dst(skb);
277 struct neighbour *neigh = dst->neighbour;
278 struct net_device *dev = neigh->dev; 268 struct net_device *dev = neigh->dev;
279 int headroom = dev->hard_header_len + sizeof(struct dn_short_packet) + 2; 269 int headroom = dev->hard_header_len + sizeof(struct dn_short_packet) + 2;
280 struct dn_short_packet *sp; 270 struct dn_short_packet *sp;
@@ -315,10 +305,8 @@ static int dn_short_output(struct sk_buff *skb)
315 * Phase 3 output is the same is short output, execpt that 305 * Phase 3 output is the same is short output, execpt that
316 * it clears the area bits before transmission. 306 * it clears the area bits before transmission.
317 */ 307 */
318static int dn_phase3_output(struct sk_buff *skb) 308static int dn_phase3_output(struct neighbour *neigh, struct sk_buff *skb)
319{ 309{
320 struct dst_entry *dst = skb_dst(skb);
321 struct neighbour *neigh = dst->neighbour;
322 struct net_device *dev = neigh->dev; 310 struct net_device *dev = neigh->dev;
323 int headroom = dev->hard_header_len + sizeof(struct dn_short_packet) + 2; 311 int headroom = dev->hard_header_len + sizeof(struct dn_short_packet) + 2;
324 struct dn_short_packet *sp; 312 struct dn_short_packet *sp;
@@ -404,13 +392,13 @@ int dn_neigh_router_hello(struct sk_buff *skb)
404 392
405 dn->flags &= ~DN_NDFLAG_P3; 393 dn->flags &= ~DN_NDFLAG_P3;
406 394
407 switch(msg->iinfo & DN_RT_INFO_TYPE) { 395 switch (msg->iinfo & DN_RT_INFO_TYPE) {
408 case DN_RT_INFO_L1RT: 396 case DN_RT_INFO_L1RT:
409 dn->flags &=~DN_NDFLAG_R2; 397 dn->flags &=~DN_NDFLAG_R2;
410 dn->flags |= DN_NDFLAG_R1; 398 dn->flags |= DN_NDFLAG_R1;
411 break; 399 break;
412 case DN_RT_INFO_L2RT: 400 case DN_RT_INFO_L2RT:
413 dn->flags |= DN_NDFLAG_R2; 401 dn->flags |= DN_NDFLAG_R2;
414 } 402 }
415 } 403 }
416 404
diff --git a/net/decnet/dn_nsp_in.c b/net/decnet/dn_nsp_in.c
index b430549e2b9..73fa268fe2e 100644
--- a/net/decnet/dn_nsp_in.c
+++ b/net/decnet/dn_nsp_in.c
@@ -101,23 +101,27 @@ static void dn_ack(struct sock *sk, struct sk_buff *skb, unsigned short ack)
101 unsigned short type = ((ack >> 12) & 0x0003); 101 unsigned short type = ((ack >> 12) & 0x0003);
102 int wakeup = 0; 102 int wakeup = 0;
103 103
104 switch(type) { 104 switch (type) {
105 case 0: /* ACK - Data */ 105 case 0: /* ACK - Data */
106 if (dn_after(ack, scp->ackrcv_dat)) { 106 if (dn_after(ack, scp->ackrcv_dat)) {
107 scp->ackrcv_dat = ack & 0x0fff; 107 scp->ackrcv_dat = ack & 0x0fff;
108 wakeup |= dn_nsp_check_xmit_queue(sk, skb, &scp->data_xmit_queue, ack); 108 wakeup |= dn_nsp_check_xmit_queue(sk, skb,
109 } 109 &scp->data_xmit_queue,
110 break; 110 ack);
111 case 1: /* NAK - Data */ 111 }
112 break; 112 break;
113 case 2: /* ACK - OtherData */ 113 case 1: /* NAK - Data */
114 if (dn_after(ack, scp->ackrcv_oth)) { 114 break;
115 scp->ackrcv_oth = ack & 0x0fff; 115 case 2: /* ACK - OtherData */
116 wakeup |= dn_nsp_check_xmit_queue(sk, skb, &scp->other_xmit_queue, ack); 116 if (dn_after(ack, scp->ackrcv_oth)) {
117 } 117 scp->ackrcv_oth = ack & 0x0fff;
118 break; 118 wakeup |= dn_nsp_check_xmit_queue(sk, skb,
119 case 3: /* NAK - OtherData */ 119 &scp->other_xmit_queue,
120 break; 120 ack);
121 }
122 break;
123 case 3: /* NAK - OtherData */
124 break;
121 } 125 }
122 126
123 if (wakeup && !sock_flag(sk, SOCK_DEAD)) 127 if (wakeup && !sock_flag(sk, SOCK_DEAD))
@@ -417,19 +421,19 @@ static void dn_nsp_disc_init(struct sock *sk, struct sk_buff *skb)
417 scp->addrrem = cb->src_port; 421 scp->addrrem = cb->src_port;
418 sk->sk_state = TCP_CLOSE; 422 sk->sk_state = TCP_CLOSE;
419 423
420 switch(scp->state) { 424 switch (scp->state) {
421 case DN_CI: 425 case DN_CI:
422 case DN_CD: 426 case DN_CD:
423 scp->state = DN_RJ; 427 scp->state = DN_RJ;
424 sk->sk_err = ECONNREFUSED; 428 sk->sk_err = ECONNREFUSED;
425 break; 429 break;
426 case DN_RUN: 430 case DN_RUN:
427 sk->sk_shutdown |= SHUTDOWN_MASK; 431 sk->sk_shutdown |= SHUTDOWN_MASK;
428 scp->state = DN_DN; 432 scp->state = DN_DN;
429 break; 433 break;
430 case DN_DI: 434 case DN_DI:
431 scp->state = DN_DIC; 435 scp->state = DN_DIC;
432 break; 436 break;
433 } 437 }
434 438
435 if (!sock_flag(sk, SOCK_DEAD)) { 439 if (!sock_flag(sk, SOCK_DEAD)) {
@@ -470,23 +474,23 @@ static void dn_nsp_disc_conf(struct sock *sk, struct sk_buff *skb)
470 474
471 sk->sk_state = TCP_CLOSE; 475 sk->sk_state = TCP_CLOSE;
472 476
473 switch(scp->state) { 477 switch (scp->state) {
474 case DN_CI: 478 case DN_CI:
475 scp->state = DN_NR; 479 scp->state = DN_NR;
476 break; 480 break;
477 case DN_DR: 481 case DN_DR:
478 if (reason == NSP_REASON_DC) 482 if (reason == NSP_REASON_DC)
479 scp->state = DN_DRC; 483 scp->state = DN_DRC;
480 if (reason == NSP_REASON_NL) 484 if (reason == NSP_REASON_NL)
481 scp->state = DN_CN;
482 break;
483 case DN_DI:
484 scp->state = DN_DIC;
485 break;
486 case DN_RUN:
487 sk->sk_shutdown |= SHUTDOWN_MASK;
488 case DN_CC:
489 scp->state = DN_CN; 485 scp->state = DN_CN;
486 break;
487 case DN_DI:
488 scp->state = DN_DIC;
489 break;
490 case DN_RUN:
491 sk->sk_shutdown |= SHUTDOWN_MASK;
492 case DN_CC:
493 scp->state = DN_CN;
490 } 494 }
491 495
492 if (!sock_flag(sk, SOCK_DEAD)) { 496 if (!sock_flag(sk, SOCK_DEAD)) {
@@ -692,16 +696,16 @@ static int dn_nsp_no_socket(struct sk_buff *skb, unsigned short reason)
692 goto out; 696 goto out;
693 697
694 if ((reason != NSP_REASON_OK) && ((cb->nsp_flags & 0x0c) == 0x08)) { 698 if ((reason != NSP_REASON_OK) && ((cb->nsp_flags & 0x0c) == 0x08)) {
695 switch(cb->nsp_flags & 0x70) { 699 switch (cb->nsp_flags & 0x70) {
696 case 0x10: 700 case 0x10:
697 case 0x60: /* (Retransmitted) Connect Init */ 701 case 0x60: /* (Retransmitted) Connect Init */
698 dn_nsp_return_disc(skb, NSP_DISCINIT, reason); 702 dn_nsp_return_disc(skb, NSP_DISCINIT, reason);
699 ret = NET_RX_SUCCESS; 703 ret = NET_RX_SUCCESS;
700 break; 704 break;
701 case 0x20: /* Connect Confirm */ 705 case 0x20: /* Connect Confirm */
702 dn_nsp_return_disc(skb, NSP_DISCCONF, reason); 706 dn_nsp_return_disc(skb, NSP_DISCCONF, reason);
703 ret = NET_RX_SUCCESS; 707 ret = NET_RX_SUCCESS;
704 break; 708 break;
705 } 709 }
706 } 710 }
707 711
@@ -733,17 +737,17 @@ static int dn_nsp_rx_packet(struct sk_buff *skb)
733 * Filter out conninits and useless packet types 737 * Filter out conninits and useless packet types
734 */ 738 */
735 if ((cb->nsp_flags & 0x0c) == 0x08) { 739 if ((cb->nsp_flags & 0x0c) == 0x08) {
736 switch(cb->nsp_flags & 0x70) { 740 switch (cb->nsp_flags & 0x70) {
737 case 0x00: /* NOP */ 741 case 0x00: /* NOP */
738 case 0x70: /* Reserved */ 742 case 0x70: /* Reserved */
739 case 0x50: /* Reserved, Phase II node init */ 743 case 0x50: /* Reserved, Phase II node init */
744 goto free_out;
745 case 0x10:
746 case 0x60:
747 if (unlikely(cb->rt_flags & DN_RT_F_RTS))
740 goto free_out; 748 goto free_out;
741 case 0x10: 749 sk = dn_find_listener(skb, &reason);
742 case 0x60: 750 goto got_it;
743 if (unlikely(cb->rt_flags & DN_RT_F_RTS))
744 goto free_out;
745 sk = dn_find_listener(skb, &reason);
746 goto got_it;
747 } 751 }
748 } 752 }
749 753
@@ -836,20 +840,20 @@ int dn_nsp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
836 * Control packet. 840 * Control packet.
837 */ 841 */
838 if ((cb->nsp_flags & 0x0c) == 0x08) { 842 if ((cb->nsp_flags & 0x0c) == 0x08) {
839 switch(cb->nsp_flags & 0x70) { 843 switch (cb->nsp_flags & 0x70) {
840 case 0x10: 844 case 0x10:
841 case 0x60: 845 case 0x60:
842 dn_nsp_conn_init(sk, skb); 846 dn_nsp_conn_init(sk, skb);
843 break; 847 break;
844 case 0x20: 848 case 0x20:
845 dn_nsp_conn_conf(sk, skb); 849 dn_nsp_conn_conf(sk, skb);
846 break; 850 break;
847 case 0x30: 851 case 0x30:
848 dn_nsp_disc_init(sk, skb); 852 dn_nsp_disc_init(sk, skb);
849 break; 853 break;
850 case 0x40: 854 case 0x40:
851 dn_nsp_disc_conf(sk, skb); 855 dn_nsp_disc_conf(sk, skb);
852 break; 856 break;
853 } 857 }
854 858
855 } else if (cb->nsp_flags == 0x24) { 859 } else if (cb->nsp_flags == 0x24) {
@@ -890,15 +894,15 @@ int dn_nsp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
890 if (scp->state != DN_RUN) 894 if (scp->state != DN_RUN)
891 goto free_out; 895 goto free_out;
892 896
893 switch(cb->nsp_flags) { 897 switch (cb->nsp_flags) {
894 case 0x10: /* LS */ 898 case 0x10: /* LS */
895 dn_nsp_linkservice(sk, skb); 899 dn_nsp_linkservice(sk, skb);
896 break; 900 break;
897 case 0x30: /* OD */ 901 case 0x30: /* OD */
898 dn_nsp_otherdata(sk, skb); 902 dn_nsp_otherdata(sk, skb);
899 break; 903 break;
900 default: 904 default:
901 dn_nsp_data(sk, skb); 905 dn_nsp_data(sk, skb);
902 } 906 }
903 907
904 } else { /* Ack, chuck it out here */ 908 } else { /* Ack, chuck it out here */
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 74544bc6fde..43450c10022 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -116,6 +116,7 @@ static void dn_dst_destroy(struct dst_entry *);
116static struct dst_entry *dn_dst_negative_advice(struct dst_entry *); 116static struct dst_entry *dn_dst_negative_advice(struct dst_entry *);
117static void dn_dst_link_failure(struct sk_buff *); 117static void dn_dst_link_failure(struct sk_buff *);
118static void dn_dst_update_pmtu(struct dst_entry *dst, u32 mtu); 118static void dn_dst_update_pmtu(struct dst_entry *dst, u32 mtu);
119static struct neighbour *dn_dst_neigh_lookup(const struct dst_entry *dst, const void *daddr);
119static int dn_route_input(struct sk_buff *); 120static int dn_route_input(struct sk_buff *);
120static void dn_run_flush(unsigned long dummy); 121static void dn_run_flush(unsigned long dummy);
121 122
@@ -139,6 +140,7 @@ static struct dst_ops dn_dst_ops = {
139 .negative_advice = dn_dst_negative_advice, 140 .negative_advice = dn_dst_negative_advice,
140 .link_failure = dn_dst_link_failure, 141 .link_failure = dn_dst_link_failure,
141 .update_pmtu = dn_dst_update_pmtu, 142 .update_pmtu = dn_dst_update_pmtu,
143 .neigh_lookup = dn_dst_neigh_lookup,
142}; 144};
143 145
144static void dn_dst_destroy(struct dst_entry *dst) 146static void dn_dst_destroy(struct dst_entry *dst)
@@ -241,9 +243,11 @@ static int dn_dst_gc(struct dst_ops *ops)
241 */ 243 */
242static void dn_dst_update_pmtu(struct dst_entry *dst, u32 mtu) 244static void dn_dst_update_pmtu(struct dst_entry *dst, u32 mtu)
243{ 245{
246 struct neighbour *n = dst_get_neighbour(dst);
244 u32 min_mtu = 230; 247 u32 min_mtu = 230;
245 struct dn_dev *dn = dst->neighbour ? 248 struct dn_dev *dn;
246 rcu_dereference_raw(dst->neighbour->dev->dn_ptr) : NULL; 249
250 dn = n ? rcu_dereference_raw(n->dev->dn_ptr) : NULL;
247 251
248 if (dn && dn->use_long == 0) 252 if (dn && dn->use_long == 0)
249 min_mtu -= 6; 253 min_mtu -= 6;
@@ -495,11 +499,11 @@ static int dn_route_rx_packet(struct sk_buff *skb)
495 } 499 }
496 500
497 if ((skb->pkt_type == PACKET_HOST) && (cb->rt_flags & DN_RT_F_RQR)) { 501 if ((skb->pkt_type == PACKET_HOST) && (cb->rt_flags & DN_RT_F_RQR)) {
498 switch(cb->rt_flags & DN_RT_PKT_MSK) { 502 switch (cb->rt_flags & DN_RT_PKT_MSK) {
499 case DN_RT_PKT_SHORT: 503 case DN_RT_PKT_SHORT:
500 return dn_return_short(skb); 504 return dn_return_short(skb);
501 case DN_RT_PKT_LONG: 505 case DN_RT_PKT_LONG:
502 return dn_return_long(skb); 506 return dn_return_long(skb);
503 } 507 }
504 } 508 }
505 509
@@ -652,38 +656,38 @@ int dn_route_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type
652 if (unlikely(skb_linearize(skb))) 656 if (unlikely(skb_linearize(skb)))
653 goto dump_it; 657 goto dump_it;
654 658
655 switch(flags & DN_RT_CNTL_MSK) { 659 switch (flags & DN_RT_CNTL_MSK) {
656 case DN_RT_PKT_INIT: 660 case DN_RT_PKT_INIT:
657 dn_dev_init_pkt(skb); 661 dn_dev_init_pkt(skb);
658 break; 662 break;
659 case DN_RT_PKT_VERI: 663 case DN_RT_PKT_VERI:
660 dn_dev_veri_pkt(skb); 664 dn_dev_veri_pkt(skb);
661 break; 665 break;
662 } 666 }
663 667
664 if (dn->parms.state != DN_DEV_S_RU) 668 if (dn->parms.state != DN_DEV_S_RU)
665 goto dump_it; 669 goto dump_it;
666 670
667 switch(flags & DN_RT_CNTL_MSK) { 671 switch (flags & DN_RT_CNTL_MSK) {
668 case DN_RT_PKT_HELO: 672 case DN_RT_PKT_HELO:
669 return NF_HOOK(NFPROTO_DECNET, NF_DN_HELLO, 673 return NF_HOOK(NFPROTO_DECNET, NF_DN_HELLO,
670 skb, skb->dev, NULL, 674 skb, skb->dev, NULL,
671 dn_route_ptp_hello); 675 dn_route_ptp_hello);
672 676
673 case DN_RT_PKT_L1RT: 677 case DN_RT_PKT_L1RT:
674 case DN_RT_PKT_L2RT: 678 case DN_RT_PKT_L2RT:
675 return NF_HOOK(NFPROTO_DECNET, NF_DN_ROUTE, 679 return NF_HOOK(NFPROTO_DECNET, NF_DN_ROUTE,
676 skb, skb->dev, NULL, 680 skb, skb->dev, NULL,
677 dn_route_discard); 681 dn_route_discard);
678 case DN_RT_PKT_ERTH: 682 case DN_RT_PKT_ERTH:
679 return NF_HOOK(NFPROTO_DECNET, NF_DN_HELLO, 683 return NF_HOOK(NFPROTO_DECNET, NF_DN_HELLO,
680 skb, skb->dev, NULL, 684 skb, skb->dev, NULL,
681 dn_neigh_router_hello); 685 dn_neigh_router_hello);
682 686
683 case DN_RT_PKT_EEDH: 687 case DN_RT_PKT_EEDH:
684 return NF_HOOK(NFPROTO_DECNET, NF_DN_HELLO, 688 return NF_HOOK(NFPROTO_DECNET, NF_DN_HELLO,
685 skb, skb->dev, NULL, 689 skb, skb->dev, NULL,
686 dn_neigh_endnode_hello); 690 dn_neigh_endnode_hello);
687 } 691 }
688 } else { 692 } else {
689 if (dn->parms.state != DN_DEV_S_RU) 693 if (dn->parms.state != DN_DEV_S_RU)
@@ -691,11 +695,11 @@ int dn_route_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type
691 695
692 skb_pull(skb, 1); /* Pull flags */ 696 skb_pull(skb, 1); /* Pull flags */
693 697
694 switch(flags & DN_RT_PKT_MSK) { 698 switch (flags & DN_RT_PKT_MSK) {
695 case DN_RT_PKT_LONG: 699 case DN_RT_PKT_LONG:
696 return dn_route_rx_long(skb); 700 return dn_route_rx_long(skb);
697 case DN_RT_PKT_SHORT: 701 case DN_RT_PKT_SHORT:
698 return dn_route_rx_short(skb); 702 return dn_route_rx_short(skb);
699 } 703 }
700 } 704 }
701 705
@@ -705,6 +709,14 @@ out:
705 return NET_RX_DROP; 709 return NET_RX_DROP;
706} 710}
707 711
712static int dn_to_neigh_output(struct sk_buff *skb)
713{
714 struct dst_entry *dst = skb_dst(skb);
715 struct neighbour *n = dst_get_neighbour(dst);
716
717 return n->output(n, skb);
718}
719
708static int dn_output(struct sk_buff *skb) 720static int dn_output(struct sk_buff *skb)
709{ 721{
710 struct dst_entry *dst = skb_dst(skb); 722 struct dst_entry *dst = skb_dst(skb);
@@ -715,7 +727,7 @@ static int dn_output(struct sk_buff *skb)
715 727
716 int err = -EINVAL; 728 int err = -EINVAL;
717 729
718 if ((neigh = dst->neighbour) == NULL) 730 if ((neigh = dst_get_neighbour(dst)) == NULL)
719 goto error; 731 goto error;
720 732
721 skb->dev = dev; 733 skb->dev = dev;
@@ -733,7 +745,7 @@ static int dn_output(struct sk_buff *skb)
733 cb->hops = 0; 745 cb->hops = 0;
734 746
735 return NF_HOOK(NFPROTO_DECNET, NF_DN_LOCAL_OUT, skb, NULL, dev, 747 return NF_HOOK(NFPROTO_DECNET, NF_DN_LOCAL_OUT, skb, NULL, dev,
736 neigh->output); 748 dn_to_neigh_output);
737 749
738error: 750error:
739 if (net_ratelimit()) 751 if (net_ratelimit())
@@ -750,7 +762,6 @@ static int dn_forward(struct sk_buff *skb)
750 struct dst_entry *dst = skb_dst(skb); 762 struct dst_entry *dst = skb_dst(skb);
751 struct dn_dev *dn_db = rcu_dereference(dst->dev->dn_ptr); 763 struct dn_dev *dn_db = rcu_dereference(dst->dev->dn_ptr);
752 struct dn_route *rt; 764 struct dn_route *rt;
753 struct neighbour *neigh = dst->neighbour;
754 int header_len; 765 int header_len;
755#ifdef CONFIG_NETFILTER 766#ifdef CONFIG_NETFILTER
756 struct net_device *dev = skb->dev; 767 struct net_device *dev = skb->dev;
@@ -783,7 +794,7 @@ static int dn_forward(struct sk_buff *skb)
783 cb->rt_flags |= DN_RT_F_IE; 794 cb->rt_flags |= DN_RT_F_IE;
784 795
785 return NF_HOOK(NFPROTO_DECNET, NF_DN_FORWARD, skb, dev, skb->dev, 796 return NF_HOOK(NFPROTO_DECNET, NF_DN_FORWARD, skb, dev, skb->dev,
786 neigh->output); 797 dn_to_neigh_output);
787 798
788drop: 799drop:
789 kfree_skb(skb); 800 kfree_skb(skb);
@@ -818,6 +829,11 @@ static unsigned int dn_dst_default_mtu(const struct dst_entry *dst)
818 return dst->dev->mtu; 829 return dst->dev->mtu;
819} 830}
820 831
832static struct neighbour *dn_dst_neigh_lookup(const struct dst_entry *dst, const void *daddr)
833{
834 return __neigh_lookup_errno(&dn_neigh_table, daddr, dst->dev);
835}
836
821static int dn_rt_set_next_hop(struct dn_route *rt, struct dn_fib_res *res) 837static int dn_rt_set_next_hop(struct dn_route *rt, struct dn_fib_res *res)
822{ 838{
823 struct dn_fib_info *fi = res->fi; 839 struct dn_fib_info *fi = res->fi;
@@ -833,11 +849,11 @@ static int dn_rt_set_next_hop(struct dn_route *rt, struct dn_fib_res *res)
833 } 849 }
834 rt->rt_type = res->type; 850 rt->rt_type = res->type;
835 851
836 if (dev != NULL && rt->dst.neighbour == NULL) { 852 if (dev != NULL && dst_get_neighbour(&rt->dst) == NULL) {
837 n = __neigh_lookup_errno(&dn_neigh_table, &rt->rt_gateway, dev); 853 n = __neigh_lookup_errno(&dn_neigh_table, &rt->rt_gateway, dev);
838 if (IS_ERR(n)) 854 if (IS_ERR(n))
839 return PTR_ERR(n); 855 return PTR_ERR(n);
840 rt->dst.neighbour = n; 856 dst_set_neighbour(&rt->dst, n);
841 } 857 }
842 858
843 if (dst_metric(&rt->dst, RTAX_MTU) > rt->dst.dev->mtu) 859 if (dst_metric(&rt->dst, RTAX_MTU) > rt->dst.dev->mtu)
@@ -1144,7 +1160,7 @@ make_route:
1144 rt->rt_dst_map = fld.daddr; 1160 rt->rt_dst_map = fld.daddr;
1145 rt->rt_src_map = fld.saddr; 1161 rt->rt_src_map = fld.saddr;
1146 1162
1147 rt->dst.neighbour = neigh; 1163 dst_set_neighbour(&rt->dst, neigh);
1148 neigh = NULL; 1164 neigh = NULL;
1149 1165
1150 rt->dst.lastuse = jiffies; 1166 rt->dst.lastuse = jiffies;
@@ -1416,23 +1432,23 @@ make_route:
1416 rt->fld.flowidn_iif = in_dev->ifindex; 1432 rt->fld.flowidn_iif = in_dev->ifindex;
1417 rt->fld.flowidn_mark = fld.flowidn_mark; 1433 rt->fld.flowidn_mark = fld.flowidn_mark;
1418 1434
1419 rt->dst.neighbour = neigh; 1435 dst_set_neighbour(&rt->dst, neigh);
1420 rt->dst.lastuse = jiffies; 1436 rt->dst.lastuse = jiffies;
1421 rt->dst.output = dn_rt_bug; 1437 rt->dst.output = dn_rt_bug;
1422 switch(res.type) { 1438 switch (res.type) {
1423 case RTN_UNICAST: 1439 case RTN_UNICAST:
1424 rt->dst.input = dn_forward; 1440 rt->dst.input = dn_forward;
1425 break; 1441 break;
1426 case RTN_LOCAL: 1442 case RTN_LOCAL:
1427 rt->dst.output = dn_output; 1443 rt->dst.output = dn_output;
1428 rt->dst.input = dn_nsp_rx; 1444 rt->dst.input = dn_nsp_rx;
1429 rt->dst.dev = in_dev; 1445 rt->dst.dev = in_dev;
1430 flags |= RTCF_LOCAL; 1446 flags |= RTCF_LOCAL;
1431 break; 1447 break;
1432 default: 1448 default:
1433 case RTN_UNREACHABLE: 1449 case RTN_UNREACHABLE:
1434 case RTN_BLACKHOLE: 1450 case RTN_BLACKHOLE:
1435 rt->dst.input = dst_discard; 1451 rt->dst.input = dst_discard;
1436 } 1452 }
1437 rt->rt_flags = flags; 1453 rt->rt_flags = flags;
1438 1454
@@ -1841,10 +1857,11 @@ void __init dn_route_init(void)
1841 proc_net_fops_create(&init_net, "decnet_cache", S_IRUGO, &dn_rt_cache_seq_fops); 1857 proc_net_fops_create(&init_net, "decnet_cache", S_IRUGO, &dn_rt_cache_seq_fops);
1842 1858
1843#ifdef CONFIG_DECNET_ROUTER 1859#ifdef CONFIG_DECNET_ROUTER
1844 rtnl_register(PF_DECnet, RTM_GETROUTE, dn_cache_getroute, dn_fib_dump); 1860 rtnl_register(PF_DECnet, RTM_GETROUTE, dn_cache_getroute,
1861 dn_fib_dump, NULL);
1845#else 1862#else
1846 rtnl_register(PF_DECnet, RTM_GETROUTE, dn_cache_getroute, 1863 rtnl_register(PF_DECnet, RTM_GETROUTE, dn_cache_getroute,
1847 dn_cache_dump); 1864 dn_cache_dump, NULL);
1848#endif 1865#endif
1849} 1866}
1850 1867
diff --git a/net/decnet/dn_table.c b/net/decnet/dn_table.c
index bd0a52dd1d4..cd0354e9bdb 100644
--- a/net/decnet/dn_table.c
+++ b/net/decnet/dn_table.c
@@ -147,17 +147,18 @@ static void dn_rehash_zone(struct dn_zone *dz)
147 147
148 old_divisor = dz->dz_divisor; 148 old_divisor = dz->dz_divisor;
149 149
150 switch(old_divisor) { 150 switch (old_divisor) {
151 case 16: 151 case 16:
152 new_divisor = 256; 152 new_divisor = 256;
153 new_hashmask = 0xFF; 153 new_hashmask = 0xFF;
154 break; 154 break;
155 default: 155 default:
156 printk(KERN_DEBUG "DECnet: dn_rehash_zone: BUG! %d\n", old_divisor); 156 printk(KERN_DEBUG "DECnet: dn_rehash_zone: BUG! %d\n",
157 case 256: 157 old_divisor);
158 new_divisor = 1024; 158 case 256:
159 new_hashmask = 0x3FF; 159 new_divisor = 1024;
160 break; 160 new_hashmask = 0x3FF;
161 break;
161 } 162 }
162 163
163 ht = kcalloc(new_divisor, sizeof(struct dn_fib_node*), GFP_KERNEL); 164 ht = kcalloc(new_divisor, sizeof(struct dn_fib_node*), GFP_KERNEL);
diff --git a/net/decnet/netfilter/dn_rtmsg.c b/net/decnet/netfilter/dn_rtmsg.c
index 64a7f39e069..69975e0bcde 100644
--- a/net/decnet/netfilter/dn_rtmsg.c
+++ b/net/decnet/netfilter/dn_rtmsg.c
@@ -69,15 +69,15 @@ static void dnrmg_send_peer(struct sk_buff *skb)
69 int group = 0; 69 int group = 0;
70 unsigned char flags = *skb->data; 70 unsigned char flags = *skb->data;
71 71
72 switch(flags & DN_RT_CNTL_MSK) { 72 switch (flags & DN_RT_CNTL_MSK) {
73 case DN_RT_PKT_L1RT: 73 case DN_RT_PKT_L1RT:
74 group = DNRNG_NLGRP_L1; 74 group = DNRNG_NLGRP_L1;
75 break; 75 break;
76 case DN_RT_PKT_L2RT: 76 case DN_RT_PKT_L2RT:
77 group = DNRNG_NLGRP_L2; 77 group = DNRNG_NLGRP_L2;
78 break; 78 break;
79 default: 79 default:
80 return; 80 return;
81 } 81 }
82 82
83 skb2 = dnrmg_build_message(skb, &status); 83 skb2 = dnrmg_build_message(skb, &status);
diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
index 28f8b5e5f73..02e75d11cfb 100644
--- a/net/decnet/sysctl_net_decnet.c
+++ b/net/decnet/sysctl_net_decnet.c
@@ -68,14 +68,15 @@ static struct ctl_table_header *dn_table_header = NULL;
68static void strip_it(char *str) 68static void strip_it(char *str)
69{ 69{
70 for(;;) { 70 for(;;) {
71 switch(*str) { 71 switch (*str) {
72 case ' ': 72 case ' ':
73 case '\n': 73 case '\n':
74 case '\r': 74 case '\r':
75 case ':': 75 case ':':
76 *str = 0; 76 *str = 0;
77 case 0: 77 /* Fallthrough */
78 return; 78 case 0:
79 return;
79 } 80 }
80 str++; 81 str++;
81 } 82 }
diff --git a/net/dsa/mv88e6131.c b/net/dsa/mv88e6131.c
index 45f7411e90b..9bd1061fa4e 100644
--- a/net/dsa/mv88e6131.c
+++ b/net/dsa/mv88e6131.c
@@ -118,10 +118,14 @@ static int mv88e6131_setup_global(struct dsa_switch *ds)
118 REG_WRITE(REG_GLOBAL, 0x1a, (dsa_upstream_port(ds) * 0x1100) | 0x00f0); 118 REG_WRITE(REG_GLOBAL, 0x1a, (dsa_upstream_port(ds) * 0x1100) | 0x00f0);
119 119
120 /* 120 /*
121 * Disable cascade port functionality, and set the switch's 121 * Disable cascade port functionality unless this device
122 * is used in a cascade configuration, and set the switch's
122 * DSA device number. 123 * DSA device number.
123 */ 124 */
124 REG_WRITE(REG_GLOBAL, 0x1c, 0xe000 | (ds->index & 0x1f)); 125 if (ds->dst->pd->nr_chips > 1)
126 REG_WRITE(REG_GLOBAL, 0x1c, 0xf000 | (ds->index & 0x1f));
127 else
128 REG_WRITE(REG_GLOBAL, 0x1c, 0xe000 | (ds->index & 0x1f));
125 129
126 /* 130 /*
127 * Send all frames with destination addresses matching 131 * Send all frames with destination addresses matching
diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c
index a1d9f3787dd..1c1f26c5d67 100644
--- a/net/econet/af_econet.c
+++ b/net/econet/af_econet.c
@@ -9,6 +9,8 @@
9 * 9 *
10 */ 10 */
11 11
12#define pr_fmt(fmt) fmt
13
12#include <linux/module.h> 14#include <linux/module.h>
13 15
14#include <linux/types.h> 16#include <linux/types.h>
@@ -44,7 +46,7 @@
44#include <linux/bitops.h> 46#include <linux/bitops.h>
45#include <linux/mutex.h> 47#include <linux/mutex.h>
46 48
47#include <asm/uaccess.h> 49#include <linux/uaccess.h>
48#include <asm/system.h> 50#include <asm/system.h>
49 51
50static const struct proto_ops econet_ops; 52static const struct proto_ops econet_ops;
@@ -63,9 +65,7 @@ static DEFINE_SPINLOCK(aun_queue_lock);
63static struct socket *udpsock; 65static struct socket *udpsock;
64#define AUN_PORT 0x8000 66#define AUN_PORT 0x8000
65 67
66 68struct aunhdr {
67struct aunhdr
68{
69 unsigned char code; /* AUN magic protocol byte */ 69 unsigned char code; /* AUN magic protocol byte */
70 unsigned char port; 70 unsigned char port;
71 unsigned char cb; 71 unsigned char cb;
@@ -82,8 +82,7 @@ static struct timer_list ab_cleanup_timer;
82#endif /* CONFIG_ECONET_AUNUDP */ 82#endif /* CONFIG_ECONET_AUNUDP */
83 83
84/* Per-packet information */ 84/* Per-packet information */
85struct ec_cb 85struct ec_cb {
86{
87 struct sockaddr_ec sec; 86 struct sockaddr_ec sec;
88 unsigned long cookie; /* Supplied by user. */ 87 unsigned long cookie; /* Supplied by user. */
89#ifdef CONFIG_ECONET_AUNUDP 88#ifdef CONFIG_ECONET_AUNUDP
@@ -137,7 +136,7 @@ static int econet_recvmsg(struct kiocb *iocb, struct socket *sock,
137 * but then it will block. 136 * but then it will block.
138 */ 137 */
139 138
140 skb=skb_recv_datagram(sk,flags,flags&MSG_DONTWAIT,&err); 139 skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
141 140
142 /* 141 /*
143 * An error occurred so return it. Because skb_recv_datagram() 142 * An error occurred so return it. Because skb_recv_datagram()
@@ -145,7 +144,7 @@ static int econet_recvmsg(struct kiocb *iocb, struct socket *sock,
145 * retries. 144 * retries.
146 */ 145 */
147 146
148 if(skb==NULL) 147 if (skb == NULL)
149 goto out; 148 goto out;
150 149
151 /* 150 /*
@@ -154,10 +153,9 @@ static int econet_recvmsg(struct kiocb *iocb, struct socket *sock,
154 */ 153 */
155 154
156 copied = skb->len; 155 copied = skb->len;
157 if (copied > len) 156 if (copied > len) {
158 { 157 copied = len;
159 copied=len; 158 msg->msg_flags |= MSG_TRUNC;
160 msg->msg_flags|=MSG_TRUNC;
161 } 159 }
162 160
163 /* We can't use skb_copy_datagram here */ 161 /* We can't use skb_copy_datagram here */
@@ -186,7 +184,8 @@ out:
186 * Bind an Econet socket. 184 * Bind an Econet socket.
187 */ 185 */
188 186
189static int econet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) 187static int econet_bind(struct socket *sock, struct sockaddr *uaddr,
188 int addr_len)
190{ 189{
191 struct sockaddr_ec *sec = (struct sockaddr_ec *)uaddr; 190 struct sockaddr_ec *sec = (struct sockaddr_ec *)uaddr;
192 struct sock *sk; 191 struct sock *sk;
@@ -226,9 +225,8 @@ static void tx_result(struct sock *sk, unsigned long cookie, int result)
226 struct ec_cb *eb; 225 struct ec_cb *eb;
227 struct sockaddr_ec *sec; 226 struct sockaddr_ec *sec;
228 227
229 if (skb == NULL) 228 if (skb == NULL) {
230 { 229 pr_debug("econet: memory squeeze, transmit result dropped\n");
231 printk(KERN_DEBUG "ec: memory squeeze, transmit result dropped.\n");
232 return; 230 return;
233 } 231 }
234 232
@@ -265,7 +263,7 @@ static void ec_tx_done(struct sk_buff *skb, int result)
265static int econet_sendmsg(struct kiocb *iocb, struct socket *sock, 263static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
266 struct msghdr *msg, size_t len) 264 struct msghdr *msg, size_t len)
267{ 265{
268 struct sockaddr_ec *saddr=(struct sockaddr_ec *)msg->msg_name; 266 struct sockaddr_ec *saddr = (struct sockaddr_ec *)msg->msg_name;
269 struct net_device *dev; 267 struct net_device *dev;
270 struct ec_addr addr; 268 struct ec_addr addr;
271 int err; 269 int err;
@@ -298,14 +296,14 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
298 296
299 mutex_lock(&econet_mutex); 297 mutex_lock(&econet_mutex);
300 298
301 if (saddr == NULL || msg->msg_namelen < sizeof(struct sockaddr_ec)) { 299 if (saddr == NULL || msg->msg_namelen < sizeof(struct sockaddr_ec)) {
302 mutex_unlock(&econet_mutex); 300 mutex_unlock(&econet_mutex);
303 return -EINVAL; 301 return -EINVAL;
304 } 302 }
305 addr.station = saddr->addr.station; 303 addr.station = saddr->addr.station;
306 addr.net = saddr->addr.net; 304 addr.net = saddr->addr.net;
307 port = saddr->port; 305 port = saddr->port;
308 cb = saddr->cb; 306 cb = saddr->cb;
309 307
310 /* Look for a device with the right network number. */ 308 /* Look for a device with the right network number. */
311 dev = net2dev_map[addr.net]; 309 dev = net2dev_map[addr.net];
@@ -333,9 +331,9 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
333 331
334 dev_hold(dev); 332 dev_hold(dev);
335 333
336 skb = sock_alloc_send_skb(sk, len+LL_ALLOCATED_SPACE(dev), 334 skb = sock_alloc_send_skb(sk, len + LL_ALLOCATED_SPACE(dev),
337 msg->msg_flags & MSG_DONTWAIT, &err); 335 msg->msg_flags & MSG_DONTWAIT, &err);
338 if (skb==NULL) 336 if (skb == NULL)
339 goto out_unlock; 337 goto out_unlock;
340 338
341 skb_reserve(skb, LL_RESERVED_SPACE(dev)); 339 skb_reserve(skb, LL_RESERVED_SPACE(dev));
@@ -355,7 +353,7 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
355 struct ec_framehdr *fh; 353 struct ec_framehdr *fh;
356 /* Poke in our control byte and 354 /* Poke in our control byte and
357 port number. Hack, hack. */ 355 port number. Hack, hack. */
358 fh = (struct ec_framehdr *)(skb->data); 356 fh = (struct ec_framehdr *)skb->data;
359 fh->cb = cb; 357 fh->cb = cb;
360 fh->port = port; 358 fh->port = port;
361 if (sock->type != SOCK_DGRAM) { 359 if (sock->type != SOCK_DGRAM) {
@@ -365,7 +363,7 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
365 } 363 }
366 364
367 /* Copy the data. Returns -EFAULT on error */ 365 /* Copy the data. Returns -EFAULT on error */
368 err = memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len); 366 err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
369 skb->protocol = proto; 367 skb->protocol = proto;
370 skb->dev = dev; 368 skb->dev = dev;
371 skb->priority = sk->sk_priority; 369 skb->priority = sk->sk_priority;
@@ -385,9 +383,9 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
385 mutex_unlock(&econet_mutex); 383 mutex_unlock(&econet_mutex);
386 return len; 384 return len;
387 385
388 out_free: 386out_free:
389 kfree_skb(skb); 387 kfree_skb(skb);
390 out_unlock: 388out_unlock:
391 if (dev) 389 if (dev)
392 dev_put(dev); 390 dev_put(dev);
393#else 391#else
@@ -458,15 +456,14 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
458 goto error_free_buf; 456 goto error_free_buf;
459 457
460 /* Get a skbuff (no data, just holds our cb information) */ 458 /* Get a skbuff (no data, just holds our cb information) */
461 if ((skb = sock_alloc_send_skb(sk, 0, 459 skb = sock_alloc_send_skb(sk, 0, msg->msg_flags & MSG_DONTWAIT, &err);
462 msg->msg_flags & MSG_DONTWAIT, 460 if (skb == NULL)
463 &err)) == NULL)
464 goto error_free_buf; 461 goto error_free_buf;
465 462
466 eb = (struct ec_cb *)&skb->cb; 463 eb = (struct ec_cb *)&skb->cb;
467 464
468 eb->cookie = saddr->cookie; 465 eb->cookie = saddr->cookie;
469 eb->timeout = (5*HZ); 466 eb->timeout = 5 * HZ;
470 eb->start = jiffies; 467 eb->start = jiffies;
471 ah.handle = aun_seq; 468 ah.handle = aun_seq;
472 eb->seq = (aun_seq++); 469 eb->seq = (aun_seq++);
@@ -480,9 +477,10 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
480 udpmsg.msg_iovlen = 2; 477 udpmsg.msg_iovlen = 2;
481 udpmsg.msg_control = NULL; 478 udpmsg.msg_control = NULL;
482 udpmsg.msg_controllen = 0; 479 udpmsg.msg_controllen = 0;
483 udpmsg.msg_flags=0; 480 udpmsg.msg_flags = 0;
484 481
485 oldfs = get_fs(); set_fs(KERNEL_DS); /* More privs :-) */ 482 oldfs = get_fs();
483 set_fs(KERNEL_DS); /* More privs :-) */
486 err = sock_sendmsg(udpsock, &udpmsg, size); 484 err = sock_sendmsg(udpsock, &udpmsg, size);
487 set_fs(oldfs); 485 set_fs(oldfs);
488 486
@@ -530,7 +528,7 @@ static int econet_getname(struct socket *sock, struct sockaddr *uaddr,
530 528
531static void econet_destroy_timer(unsigned long data) 529static void econet_destroy_timer(unsigned long data)
532{ 530{
533 struct sock *sk=(struct sock *)data; 531 struct sock *sk = (struct sock *)data;
534 532
535 if (!sk_has_allocations(sk)) { 533 if (!sk_has_allocations(sk)) {
536 sk_free(sk); 534 sk_free(sk);
@@ -539,7 +537,7 @@ static void econet_destroy_timer(unsigned long data)
539 537
540 sk->sk_timer.expires = jiffies + 10 * HZ; 538 sk->sk_timer.expires = jiffies + 10 * HZ;
541 add_timer(&sk->sk_timer); 539 add_timer(&sk->sk_timer);
542 printk(KERN_DEBUG "econet socket destroy delayed\n"); 540 pr_debug("econet: socket destroy delayed\n");
543} 541}
544 542
545/* 543/*
@@ -651,7 +649,8 @@ static int ec_dev_ioctl(struct socket *sock, unsigned int cmd, void __user *arg)
651 if (copy_from_user(&ifr, arg, sizeof(struct ifreq))) 649 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
652 return -EFAULT; 650 return -EFAULT;
653 651
654 if ((dev = dev_get_by_name(&init_net, ifr.ifr_name)) == NULL) 652 dev = dev_get_by_name(&init_net, ifr.ifr_name);
653 if (dev == NULL)
655 return -ENODEV; 654 return -ENODEV;
656 655
657 sec = (struct sockaddr_ec *)&ifr.ifr_addr; 656 sec = (struct sockaddr_ec *)&ifr.ifr_addr;
@@ -715,28 +714,26 @@ static int ec_dev_ioctl(struct socket *sock, unsigned int cmd, void __user *arg)
715 * Handle generic ioctls 714 * Handle generic ioctls
716 */ 715 */
717 716
718static int econet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 717static int econet_ioctl(struct socket *sock, unsigned int cmd,
718 unsigned long arg)
719{ 719{
720 struct sock *sk = sock->sk; 720 struct sock *sk = sock->sk;
721 void __user *argp = (void __user *)arg; 721 void __user *argp = (void __user *)arg;
722 722
723 switch(cmd) { 723 switch (cmd) {
724 case SIOCGSTAMP: 724 case SIOCGSTAMP:
725 return sock_get_timestamp(sk, argp); 725 return sock_get_timestamp(sk, argp);
726 726
727 case SIOCGSTAMPNS: 727 case SIOCGSTAMPNS:
728 return sock_get_timestampns(sk, argp); 728 return sock_get_timestampns(sk, argp);
729 729
730 case SIOCSIFADDR: 730 case SIOCSIFADDR:
731 case SIOCGIFADDR: 731 case SIOCGIFADDR:
732 return ec_dev_ioctl(sock, cmd, argp); 732 return ec_dev_ioctl(sock, cmd, argp);
733 break;
734 733
735 default:
736 return -ENOIOCTLCMD;
737 } 734 }
738 /*NOTREACHED*/ 735
739 return 0; 736 return -ENOIOCTLCMD;
740} 737}
741 738
742static const struct net_proto_family econet_family_ops = { 739static const struct net_proto_family econet_family_ops = {
@@ -836,7 +833,7 @@ static void aun_send_response(__u32 addr, unsigned long seq, int code, int cb)
836 udpmsg.msg_namelen = sizeof(sin); 833 udpmsg.msg_namelen = sizeof(sin);
837 udpmsg.msg_control = NULL; 834 udpmsg.msg_control = NULL;
838 udpmsg.msg_controllen = 0; 835 udpmsg.msg_controllen = 0;
839 udpmsg.msg_flags=0; 836 udpmsg.msg_flags = 0;
840 837
841 kernel_sendmsg(udpsock, &udpmsg, &iov, 1, sizeof(ah)); 838 kernel_sendmsg(udpsock, &udpmsg, &iov, 1, sizeof(ah));
842} 839}
@@ -859,26 +856,25 @@ static void aun_incoming(struct sk_buff *skb, struct aunhdr *ah, size_t len)
859 if (dst) 856 if (dst)
860 edev = dst->dev->ec_ptr; 857 edev = dst->dev->ec_ptr;
861 858
862 if (! edev) 859 if (!edev)
863 goto bad; 860 goto bad;
864 861
865 if ((sk = ec_listening_socket(ah->port, stn, edev->net)) == NULL) 862 sk = ec_listening_socket(ah->port, stn, edev->net);
863 if (sk == NULL)
866 goto bad; /* Nobody wants it */ 864 goto bad; /* Nobody wants it */
867 865
868 newskb = alloc_skb((len - sizeof(struct aunhdr) + 15) & ~15, 866 newskb = alloc_skb((len - sizeof(struct aunhdr) + 15) & ~15,
869 GFP_ATOMIC); 867 GFP_ATOMIC);
870 if (newskb == NULL) 868 if (newskb == NULL) {
871 { 869 pr_debug("AUN: memory squeeze, dropping packet\n");
872 printk(KERN_DEBUG "AUN: memory squeeze, dropping packet.\n");
873 /* Send nack and hope sender tries again */ 870 /* Send nack and hope sender tries again */
874 goto bad; 871 goto bad;
875 } 872 }
876 873
877 memcpy(skb_put(newskb, len - sizeof(struct aunhdr)), (void *)(ah+1), 874 memcpy(skb_put(newskb, len - sizeof(struct aunhdr)), (void *)(ah + 1),
878 len - sizeof(struct aunhdr)); 875 len - sizeof(struct aunhdr));
879 876
880 if (ec_queue_packet(sk, newskb, stn, edev->net, ah->cb, ah->port)) 877 if (ec_queue_packet(sk, newskb, stn, edev->net, ah->cb, ah->port)) {
881 {
882 /* Socket is bankrupt. */ 878 /* Socket is bankrupt. */
883 kfree_skb(newskb); 879 kfree_skb(newskb);
884 goto bad; 880 goto bad;
@@ -914,7 +910,7 @@ static void aun_tx_ack(unsigned long seq, int result)
914 goto foundit; 910 goto foundit;
915 } 911 }
916 spin_unlock_irqrestore(&aun_queue_lock, flags); 912 spin_unlock_irqrestore(&aun_queue_lock, flags);
917 printk(KERN_DEBUG "AUN: unknown sequence %ld\n", seq); 913 pr_debug("AUN: unknown sequence %ld\n", seq);
918 return; 914 return;
919 915
920foundit: 916foundit:
@@ -939,18 +935,17 @@ static void aun_data_available(struct sock *sk, int slen)
939 935
940 while ((skb = skb_recv_datagram(sk, 0, 1, &err)) == NULL) { 936 while ((skb = skb_recv_datagram(sk, 0, 1, &err)) == NULL) {
941 if (err == -EAGAIN) { 937 if (err == -EAGAIN) {
942 printk(KERN_ERR "AUN: no data available?!"); 938 pr_err("AUN: no data available?!\n");
943 return; 939 return;
944 } 940 }
945 printk(KERN_DEBUG "AUN: recvfrom() error %d\n", -err); 941 pr_debug("AUN: recvfrom() error %d\n", -err);
946 } 942 }
947 943
948 data = skb_transport_header(skb) + sizeof(struct udphdr); 944 data = skb_transport_header(skb) + sizeof(struct udphdr);
949 ah = (struct aunhdr *)data; 945 ah = (struct aunhdr *)data;
950 len = skb->len - sizeof(struct udphdr); 946 len = skb->len - sizeof(struct udphdr);
951 947
952 switch (ah->code) 948 switch (ah->code) {
953 {
954 case 2: 949 case 2:
955 aun_incoming(skb, ah, len); 950 aun_incoming(skb, ah, len);
956 break; 951 break;
@@ -961,7 +956,7 @@ static void aun_data_available(struct sock *sk, int slen)
961 aun_tx_ack(ah->handle, ECTYPE_TRANSMIT_NOT_LISTENING); 956 aun_tx_ack(ah->handle, ECTYPE_TRANSMIT_NOT_LISTENING);
962 break; 957 break;
963 default: 958 default:
964 printk(KERN_DEBUG "unknown AUN packet (type %d)\n", data[0]); 959 pr_debug("AUN: unknown packet type: %d\n", data[0]);
965 } 960 }
966 961
967 skb_free_datagram(sk, skb); 962 skb_free_datagram(sk, skb);
@@ -991,7 +986,7 @@ static void ab_cleanup(unsigned long h)
991 } 986 }
992 spin_unlock_irqrestore(&aun_queue_lock, flags); 987 spin_unlock_irqrestore(&aun_queue_lock, flags);
993 988
994 mod_timer(&ab_cleanup_timer, jiffies + (HZ*2)); 989 mod_timer(&ab_cleanup_timer, jiffies + (HZ * 2));
995} 990}
996 991
997static int __init aun_udp_initialise(void) 992static int __init aun_udp_initialise(void)
@@ -1001,7 +996,7 @@ static int __init aun_udp_initialise(void)
1001 996
1002 skb_queue_head_init(&aun_queue); 997 skb_queue_head_init(&aun_queue);
1003 setup_timer(&ab_cleanup_timer, ab_cleanup, 0); 998 setup_timer(&ab_cleanup_timer, ab_cleanup, 0);
1004 ab_cleanup_timer.expires = jiffies + (HZ*2); 999 ab_cleanup_timer.expires = jiffies + (HZ * 2);
1005 add_timer(&ab_cleanup_timer); 1000 add_timer(&ab_cleanup_timer);
1006 1001
1007 memset(&sin, 0, sizeof(sin)); 1002 memset(&sin, 0, sizeof(sin));
@@ -1009,9 +1004,9 @@ static int __init aun_udp_initialise(void)
1009 1004
1010 /* We can count ourselves lucky Acorn machines are too dim to 1005 /* We can count ourselves lucky Acorn machines are too dim to
1011 speak IPv6. :-) */ 1006 speak IPv6. :-) */
1012 if ((error = sock_create_kern(PF_INET, SOCK_DGRAM, 0, &udpsock)) < 0) 1007 error = sock_create_kern(PF_INET, SOCK_DGRAM, 0, &udpsock);
1013 { 1008 if (error < 0) {
1014 printk("AUN: socket error %d\n", -error); 1009 pr_err("AUN: socket error %d\n", -error);
1015 return error; 1010 return error;
1016 } 1011 }
1017 1012
@@ -1020,10 +1015,9 @@ static int __init aun_udp_initialise(void)
1020 from interrupts */ 1015 from interrupts */
1021 1016
1022 error = udpsock->ops->bind(udpsock, (struct sockaddr *)&sin, 1017 error = udpsock->ops->bind(udpsock, (struct sockaddr *)&sin,
1023 sizeof(sin)); 1018 sizeof(sin));
1024 if (error < 0) 1019 if (error < 0) {
1025 { 1020 pr_err("AUN: bind error %d\n", -error);
1026 printk("AUN: bind error %d\n", -error);
1027 goto release; 1021 goto release;
1028 } 1022 }
1029 1023
@@ -1044,7 +1038,8 @@ release:
1044 * Receive an Econet frame from a device. 1038 * Receive an Econet frame from a device.
1045 */ 1039 */
1046 1040
1047static int econet_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) 1041static int econet_rcv(struct sk_buff *skb, struct net_device *dev,
1042 struct packet_type *pt, struct net_device *orig_dev)
1048{ 1043{
1049 struct ec_framehdr *hdr; 1044 struct ec_framehdr *hdr;
1050 struct sock *sk = NULL; 1045 struct sock *sk = NULL;
@@ -1059,13 +1054,14 @@ static int econet_rcv(struct sk_buff *skb, struct net_device *dev, struct packet
1059 if (!edev) 1054 if (!edev)
1060 goto drop; 1055 goto drop;
1061 1056
1062 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) 1057 skb = skb_share_check(skb, GFP_ATOMIC);
1058 if (skb == NULL)
1063 return NET_RX_DROP; 1059 return NET_RX_DROP;
1064 1060
1065 if (!pskb_may_pull(skb, sizeof(struct ec_framehdr))) 1061 if (!pskb_may_pull(skb, sizeof(struct ec_framehdr)))
1066 goto drop; 1062 goto drop;
1067 1063
1068 hdr = (struct ec_framehdr *) skb->data; 1064 hdr = (struct ec_framehdr *)skb->data;
1069 1065
1070 /* First check for encapsulated IP */ 1066 /* First check for encapsulated IP */
1071 if (hdr->port == EC_PORT_IP) { 1067 if (hdr->port == EC_PORT_IP) {
@@ -1093,8 +1089,8 @@ drop:
1093} 1089}
1094 1090
1095static struct packet_type econet_packet_type __read_mostly = { 1091static struct packet_type econet_packet_type __read_mostly = {
1096 .type = cpu_to_be16(ETH_P_ECONET), 1092 .type = cpu_to_be16(ETH_P_ECONET),
1097 .func = econet_rcv, 1093 .func = econet_rcv,
1098}; 1094};
1099 1095
1100static void econet_hw_initialise(void) 1096static void econet_hw_initialise(void)
@@ -1104,9 +1100,10 @@ static void econet_hw_initialise(void)
1104 1100
1105#endif 1101#endif
1106 1102
1107static int econet_notifier(struct notifier_block *this, unsigned long msg, void *data) 1103static int econet_notifier(struct notifier_block *this, unsigned long msg,
1104 void *data)
1108{ 1105{
1109 struct net_device *dev = (struct net_device *)data; 1106 struct net_device *dev = data;
1110 struct ec_device *edev; 1107 struct ec_device *edev;
1111 1108
1112 if (!net_eq(dev_net(dev), &init_net)) 1109 if (!net_eq(dev_net(dev), &init_net))
@@ -1116,8 +1113,7 @@ static int econet_notifier(struct notifier_block *this, unsigned long msg, void
1116 case NETDEV_UNREGISTER: 1113 case NETDEV_UNREGISTER:
1117 /* A device has gone down - kill any data we hold for it. */ 1114 /* A device has gone down - kill any data we hold for it. */
1118 edev = dev->ec_ptr; 1115 edev = dev->ec_ptr;
1119 if (edev) 1116 if (edev) {
1120 {
1121 if (net2dev_map[0] == dev) 1117 if (net2dev_map[0] == dev)
1122 net2dev_map[0] = NULL; 1118 net2dev_map[0] = NULL;
1123 net2dev_map[edev->net] = NULL; 1119 net2dev_map[edev->net] = NULL;
@@ -1131,7 +1127,7 @@ static int econet_notifier(struct notifier_block *this, unsigned long msg, void
1131} 1127}
1132 1128
1133static struct notifier_block econet_netdev_notifier = { 1129static struct notifier_block econet_netdev_notifier = {
1134 .notifier_call =econet_notifier, 1130 .notifier_call = econet_notifier,
1135}; 1131};
1136 1132
1137static void __exit econet_proto_exit(void) 1133static void __exit econet_proto_exit(void)
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index 44d2b42fda5..5cffb63f481 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -233,9 +233,8 @@ EXPORT_SYMBOL(eth_header_parse);
233 * @hh: destination cache entry 233 * @hh: destination cache entry
234 * Create an Ethernet header template from the neighbour. 234 * Create an Ethernet header template from the neighbour.
235 */ 235 */
236int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh) 236int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh, __be16 type)
237{ 237{
238 __be16 type = hh->hh_type;
239 struct ethhdr *eth; 238 struct ethhdr *eth;
240 const struct net_device *dev = neigh->dev; 239 const struct net_device *dev = neigh->dev;
241 240
diff --git a/net/ieee802154/af_ieee802154.c b/net/ieee802154/af_ieee802154.c
index 6df6ecf4970..40e606f3788 100644
--- a/net/ieee802154/af_ieee802154.c
+++ b/net/ieee802154/af_ieee802154.c
@@ -302,7 +302,7 @@ static int ieee802154_rcv(struct sk_buff *skb, struct net_device *dev,
302 struct packet_type *pt, struct net_device *orig_dev) 302 struct packet_type *pt, struct net_device *orig_dev)
303{ 303{
304 if (!netif_running(dev)) 304 if (!netif_running(dev))
305 return -ENODEV; 305 goto drop;
306 pr_debug("got frame, type %d, dev %p\n", dev->type, dev); 306 pr_debug("got frame, type %d, dev %p\n", dev->type, dev);
307#ifdef DEBUG 307#ifdef DEBUG
308 print_hex_dump_bytes("ieee802154_rcv ", DUMP_PREFIX_NONE, skb->data, skb->len); 308 print_hex_dump_bytes("ieee802154_rcv ", DUMP_PREFIX_NONE, skb->data, skb->len);
diff --git a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c
index 1a3334c2609..faecf648123 100644
--- a/net/ieee802154/dgram.c
+++ b/net/ieee802154/dgram.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * ZigBee socket interface 2 * IEEE 802.15.4 dgram socket interface
3 * 3 *
4 * Copyright 2007, 2008 Siemens AG 4 * Copyright 2007, 2008 Siemens AG
5 * 5 *
diff --git a/net/ieee802154/nl-phy.c b/net/ieee802154/nl-phy.c
index 02548b292b5..c64a38d57aa 100644
--- a/net/ieee802154/nl-phy.c
+++ b/net/ieee802154/nl-phy.c
@@ -24,6 +24,7 @@
24 24
25#include <linux/kernel.h> 25#include <linux/kernel.h>
26#include <linux/slab.h> 26#include <linux/slab.h>
27#include <linux/if_arp.h>
27#include <net/netlink.h> 28#include <net/netlink.h>
28#include <net/genetlink.h> 29#include <net/genetlink.h>
29#include <net/wpan-phy.h> 30#include <net/wpan-phy.h>
@@ -213,12 +214,37 @@ static int ieee802154_add_iface(struct sk_buff *skb,
213 goto nla_put_failure; 214 goto nla_put_failure;
214 } 215 }
215 216
217 if (info->attrs[IEEE802154_ATTR_HW_ADDR] &&
218 nla_len(info->attrs[IEEE802154_ATTR_HW_ADDR]) !=
219 IEEE802154_ADDR_LEN) {
220 rc = -EINVAL;
221 goto nla_put_failure;
222 }
223
216 dev = phy->add_iface(phy, devname); 224 dev = phy->add_iface(phy, devname);
217 if (IS_ERR(dev)) { 225 if (IS_ERR(dev)) {
218 rc = PTR_ERR(dev); 226 rc = PTR_ERR(dev);
219 goto nla_put_failure; 227 goto nla_put_failure;
220 } 228 }
221 229
230 if (info->attrs[IEEE802154_ATTR_HW_ADDR]) {
231 struct sockaddr addr;
232
233 addr.sa_family = ARPHRD_IEEE802154;
234 nla_memcpy(&addr.sa_data, info->attrs[IEEE802154_ATTR_HW_ADDR],
235 IEEE802154_ADDR_LEN);
236
237 /*
238 * strangely enough, some callbacks (inetdev_event) from
239 * dev_set_mac_address require RTNL_LOCK
240 */
241 rtnl_lock();
242 rc = dev_set_mac_address(dev, &addr);
243 rtnl_unlock();
244 if (rc)
245 goto dev_unregister;
246 }
247
222 NLA_PUT_STRING(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)); 248 NLA_PUT_STRING(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy));
223 NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name); 249 NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
224 250
@@ -228,6 +254,11 @@ static int ieee802154_add_iface(struct sk_buff *skb,
228 254
229 return ieee802154_nl_reply(msg, info); 255 return ieee802154_nl_reply(msg, info);
230 256
257dev_unregister:
258 rtnl_lock(); /* del_iface must be called with RTNL lock */
259 phy->del_iface(phy, dev);
260 dev_put(dev);
261 rtnl_unlock();
231nla_put_failure: 262nla_put_failure:
232 nlmsg_free(msg); 263 nlmsg_free(msg);
233out_dev: 264out_dev:
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index eae1f676f87..1b745d412cf 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -465,8 +465,10 @@ int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
465 if (addr_len < sizeof(struct sockaddr_in)) 465 if (addr_len < sizeof(struct sockaddr_in))
466 goto out; 466 goto out;
467 467
468 if (addr->sin_family != AF_INET) 468 if (addr->sin_family != AF_INET) {
469 err = -EAFNOSUPPORT;
469 goto out; 470 goto out;
471 }
470 472
471 chk_addr_ret = inet_addr_type(sock_net(sk), addr->sin_addr.s_addr); 473 chk_addr_ret = inet_addr_type(sock_net(sk), addr->sin_addr.s_addr);
472 474
@@ -1438,11 +1440,11 @@ EXPORT_SYMBOL_GPL(inet_ctl_sock_create);
1438unsigned long snmp_fold_field(void __percpu *mib[], int offt) 1440unsigned long snmp_fold_field(void __percpu *mib[], int offt)
1439{ 1441{
1440 unsigned long res = 0; 1442 unsigned long res = 0;
1441 int i; 1443 int i, j;
1442 1444
1443 for_each_possible_cpu(i) { 1445 for_each_possible_cpu(i) {
1444 res += *(((unsigned long *) per_cpu_ptr(mib[0], i)) + offt); 1446 for (j = 0; j < SNMP_ARRAY_SZ; j++)
1445 res += *(((unsigned long *) per_cpu_ptr(mib[1], i)) + offt); 1447 res += *(((unsigned long *) per_cpu_ptr(mib[j], i)) + offt);
1446 } 1448 }
1447 return res; 1449 return res;
1448} 1450}
@@ -1456,28 +1458,19 @@ u64 snmp_fold_field64(void __percpu *mib[], int offt, size_t syncp_offset)
1456 int cpu; 1458 int cpu;
1457 1459
1458 for_each_possible_cpu(cpu) { 1460 for_each_possible_cpu(cpu) {
1459 void *bhptr, *userptr; 1461 void *bhptr;
1460 struct u64_stats_sync *syncp; 1462 struct u64_stats_sync *syncp;
1461 u64 v_bh, v_user; 1463 u64 v;
1462 unsigned int start; 1464 unsigned int start;
1463 1465
1464 /* first mib used by softirq context, we must use _bh() accessors */ 1466 bhptr = per_cpu_ptr(mib[0], cpu);
1465 bhptr = per_cpu_ptr(SNMP_STAT_BHPTR(mib), cpu);
1466 syncp = (struct u64_stats_sync *)(bhptr + syncp_offset); 1467 syncp = (struct u64_stats_sync *)(bhptr + syncp_offset);
1467 do { 1468 do {
1468 start = u64_stats_fetch_begin_bh(syncp); 1469 start = u64_stats_fetch_begin_bh(syncp);
1469 v_bh = *(((u64 *) bhptr) + offt); 1470 v = *(((u64 *) bhptr) + offt);
1470 } while (u64_stats_fetch_retry_bh(syncp, start)); 1471 } while (u64_stats_fetch_retry_bh(syncp, start));
1471 1472
1472 /* second mib used in USER context */ 1473 res += v;
1473 userptr = per_cpu_ptr(SNMP_STAT_USRPTR(mib), cpu);
1474 syncp = (struct u64_stats_sync *)(userptr + syncp_offset);
1475 do {
1476 start = u64_stats_fetch_begin(syncp);
1477 v_user = *(((u64 *) userptr) + offt);
1478 } while (u64_stats_fetch_retry(syncp, start));
1479
1480 res += v_bh + v_user;
1481 } 1474 }
1482 return res; 1475 return res;
1483} 1476}
@@ -1489,25 +1482,28 @@ int snmp_mib_init(void __percpu *ptr[2], size_t mibsize, size_t align)
1489 BUG_ON(ptr == NULL); 1482 BUG_ON(ptr == NULL);
1490 ptr[0] = __alloc_percpu(mibsize, align); 1483 ptr[0] = __alloc_percpu(mibsize, align);
1491 if (!ptr[0]) 1484 if (!ptr[0])
1492 goto err0; 1485 return -ENOMEM;
1486#if SNMP_ARRAY_SZ == 2
1493 ptr[1] = __alloc_percpu(mibsize, align); 1487 ptr[1] = __alloc_percpu(mibsize, align);
1494 if (!ptr[1]) 1488 if (!ptr[1]) {
1495 goto err1; 1489 free_percpu(ptr[0]);
1490 ptr[0] = NULL;
1491 return -ENOMEM;
1492 }
1493#endif
1496 return 0; 1494 return 0;
1497err1:
1498 free_percpu(ptr[0]);
1499 ptr[0] = NULL;
1500err0:
1501 return -ENOMEM;
1502} 1495}
1503EXPORT_SYMBOL_GPL(snmp_mib_init); 1496EXPORT_SYMBOL_GPL(snmp_mib_init);
1504 1497
1505void snmp_mib_free(void __percpu *ptr[2]) 1498void snmp_mib_free(void __percpu *ptr[SNMP_ARRAY_SZ])
1506{ 1499{
1500 int i;
1501
1507 BUG_ON(ptr == NULL); 1502 BUG_ON(ptr == NULL);
1508 free_percpu(ptr[0]); 1503 for (i = 0; i < SNMP_ARRAY_SZ; i++) {
1509 free_percpu(ptr[1]); 1504 free_percpu(ptr[i]);
1510 ptr[0] = ptr[1] = NULL; 1505 ptr[i] = NULL;
1506 }
1511} 1507}
1512EXPORT_SYMBOL_GPL(snmp_mib_free); 1508EXPORT_SYMBOL_GPL(snmp_mib_free);
1513 1509
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 1b74d3b6437..96a164aa136 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -97,7 +97,6 @@
97#include <linux/init.h> 97#include <linux/init.h>
98#include <linux/net.h> 98#include <linux/net.h>
99#include <linux/rcupdate.h> 99#include <linux/rcupdate.h>
100#include <linux/jhash.h>
101#include <linux/slab.h> 100#include <linux/slab.h>
102#ifdef CONFIG_SYSCTL 101#ifdef CONFIG_SYSCTL
103#include <linux/sysctl.h> 102#include <linux/sysctl.h>
@@ -139,8 +138,6 @@ static const struct neigh_ops arp_generic_ops = {
139 .error_report = arp_error_report, 138 .error_report = arp_error_report,
140 .output = neigh_resolve_output, 139 .output = neigh_resolve_output,
141 .connected_output = neigh_connected_output, 140 .connected_output = neigh_connected_output,
142 .hh_output = dev_queue_xmit,
143 .queue_xmit = dev_queue_xmit,
144}; 141};
145 142
146static const struct neigh_ops arp_hh_ops = { 143static const struct neigh_ops arp_hh_ops = {
@@ -149,16 +146,12 @@ static const struct neigh_ops arp_hh_ops = {
149 .error_report = arp_error_report, 146 .error_report = arp_error_report,
150 .output = neigh_resolve_output, 147 .output = neigh_resolve_output,
151 .connected_output = neigh_resolve_output, 148 .connected_output = neigh_resolve_output,
152 .hh_output = dev_queue_xmit,
153 .queue_xmit = dev_queue_xmit,
154}; 149};
155 150
156static const struct neigh_ops arp_direct_ops = { 151static const struct neigh_ops arp_direct_ops = {
157 .family = AF_INET, 152 .family = AF_INET,
158 .output = dev_queue_xmit, 153 .output = neigh_direct_output,
159 .connected_output = dev_queue_xmit, 154 .connected_output = neigh_direct_output,
160 .hh_output = dev_queue_xmit,
161 .queue_xmit = dev_queue_xmit,
162}; 155};
163 156
164static const struct neigh_ops arp_broken_ops = { 157static const struct neigh_ops arp_broken_ops = {
@@ -167,8 +160,6 @@ static const struct neigh_ops arp_broken_ops = {
167 .error_report = arp_error_report, 160 .error_report = arp_error_report,
168 .output = neigh_compat_output, 161 .output = neigh_compat_output,
169 .connected_output = neigh_compat_output, 162 .connected_output = neigh_compat_output,
170 .hh_output = dev_queue_xmit,
171 .queue_xmit = dev_queue_xmit,
172}; 163};
173 164
174struct neigh_table arp_tbl = { 165struct neigh_table arp_tbl = {
@@ -232,7 +223,7 @@ static u32 arp_hash(const void *pkey,
232 const struct net_device *dev, 223 const struct net_device *dev,
233 __u32 hash_rnd) 224 __u32 hash_rnd)
234{ 225{
235 return jhash_2words(*(u32 *)pkey, dev->ifindex, hash_rnd); 226 return arp_hashfn(*(u32 *)pkey, dev, hash_rnd);
236} 227}
237 228
238static int arp_constructor(struct neighbour *neigh) 229static int arp_constructor(struct neighbour *neigh)
@@ -259,7 +250,7 @@ static int arp_constructor(struct neighbour *neigh)
259 if (!dev->header_ops) { 250 if (!dev->header_ops) {
260 neigh->nud_state = NUD_NOARP; 251 neigh->nud_state = NUD_NOARP;
261 neigh->ops = &arp_direct_ops; 252 neigh->ops = &arp_direct_ops;
262 neigh->output = neigh->ops->queue_xmit; 253 neigh->output = neigh_direct_output;
263 } else { 254 } else {
264 /* Good devices (checked by reading texts, but only Ethernet is 255 /* Good devices (checked by reading texts, but only Ethernet is
265 tested) 256 tested)
@@ -518,30 +509,6 @@ EXPORT_SYMBOL(arp_find);
518 509
519/* END OF OBSOLETE FUNCTIONS */ 510/* END OF OBSOLETE FUNCTIONS */
520 511
521int arp_bind_neighbour(struct dst_entry *dst)
522{
523 struct net_device *dev = dst->dev;
524 struct neighbour *n = dst->neighbour;
525
526 if (dev == NULL)
527 return -EINVAL;
528 if (n == NULL) {
529 __be32 nexthop = ((struct rtable *)dst)->rt_gateway;
530 if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT))
531 nexthop = 0;
532 n = __neigh_lookup_errno(
533#if defined(CONFIG_ATM_CLIP) || defined(CONFIG_ATM_CLIP_MODULE)
534 dev->type == ARPHRD_ATM ?
535 clip_tbl_hook :
536#endif
537 &arp_tbl, &nexthop, dev);
538 if (IS_ERR(n))
539 return PTR_ERR(n);
540 dst->neighbour = n;
541 }
542 return 0;
543}
544
545/* 512/*
546 * Check if we can use proxy ARP for this path 513 * Check if we can use proxy ARP for this path
547 */ 514 */
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 0d4a184af16..37b3c188d8b 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1833,8 +1833,8 @@ void __init devinet_init(void)
1833 1833
1834 rtnl_af_register(&inet_af_ops); 1834 rtnl_af_register(&inet_af_ops);
1835 1835
1836 rtnl_register(PF_INET, RTM_NEWADDR, inet_rtm_newaddr, NULL); 1836 rtnl_register(PF_INET, RTM_NEWADDR, inet_rtm_newaddr, NULL, NULL);
1837 rtnl_register(PF_INET, RTM_DELADDR, inet_rtm_deladdr, NULL); 1837 rtnl_register(PF_INET, RTM_DELADDR, inet_rtm_deladdr, NULL, NULL);
1838 rtnl_register(PF_INET, RTM_GETADDR, NULL, inet_dump_ifaddr); 1838 rtnl_register(PF_INET, RTM_GETADDR, NULL, inet_dump_ifaddr, NULL);
1839} 1839}
1840 1840
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 22524716fe7..92fc5f69f5d 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -1124,9 +1124,9 @@ static struct pernet_operations fib_net_ops = {
1124 1124
1125void __init ip_fib_init(void) 1125void __init ip_fib_init(void)
1126{ 1126{
1127 rtnl_register(PF_INET, RTM_NEWROUTE, inet_rtm_newroute, NULL); 1127 rtnl_register(PF_INET, RTM_NEWROUTE, inet_rtm_newroute, NULL, NULL);
1128 rtnl_register(PF_INET, RTM_DELROUTE, inet_rtm_delroute, NULL); 1128 rtnl_register(PF_INET, RTM_DELROUTE, inet_rtm_delroute, NULL, NULL);
1129 rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib); 1129 rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib, NULL);
1130 1130
1131 register_pernet_subsys(&fib_net_ops); 1131 register_pernet_subsys(&fib_net_ops);
1132 register_netdevice_notifier(&fib_netdev_notifier); 1132 register_netdevice_notifier(&fib_netdev_notifier);
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 58c25ea5a5c..de9e2978476 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -110,9 +110,10 @@ struct leaf {
110 110
111struct leaf_info { 111struct leaf_info {
112 struct hlist_node hlist; 112 struct hlist_node hlist;
113 struct rcu_head rcu;
114 int plen; 113 int plen;
114 u32 mask_plen; /* ntohl(inet_make_mask(plen)) */
115 struct list_head falh; 115 struct list_head falh;
116 struct rcu_head rcu;
116}; 117};
117 118
118struct tnode { 119struct tnode {
@@ -451,6 +452,7 @@ static struct leaf_info *leaf_info_new(int plen)
451 struct leaf_info *li = kmalloc(sizeof(struct leaf_info), GFP_KERNEL); 452 struct leaf_info *li = kmalloc(sizeof(struct leaf_info), GFP_KERNEL);
452 if (li) { 453 if (li) {
453 li->plen = plen; 454 li->plen = plen;
455 li->mask_plen = ntohl(inet_make_mask(plen));
454 INIT_LIST_HEAD(&li->falh); 456 INIT_LIST_HEAD(&li->falh);
455 } 457 }
456 return li; 458 return li;
@@ -1359,10 +1361,8 @@ static int check_leaf(struct fib_table *tb, struct trie *t, struct leaf *l,
1359 1361
1360 hlist_for_each_entry_rcu(li, node, hhead, hlist) { 1362 hlist_for_each_entry_rcu(li, node, hhead, hlist) {
1361 struct fib_alias *fa; 1363 struct fib_alias *fa;
1362 int plen = li->plen;
1363 __be32 mask = inet_make_mask(plen);
1364 1364
1365 if (l->key != (key & ntohl(mask))) 1365 if (l->key != (key & li->mask_plen))
1366 continue; 1366 continue;
1367 1367
1368 list_for_each_entry_rcu(fa, &li->falh, fa_list) { 1368 list_for_each_entry_rcu(fa, &li->falh, fa_list) {
@@ -1394,7 +1394,7 @@ static int check_leaf(struct fib_table *tb, struct trie *t, struct leaf *l,
1394#ifdef CONFIG_IP_FIB_TRIE_STATS 1394#ifdef CONFIG_IP_FIB_TRIE_STATS
1395 t->stats.semantic_match_passed++; 1395 t->stats.semantic_match_passed++;
1396#endif 1396#endif
1397 res->prefixlen = plen; 1397 res->prefixlen = li->plen;
1398 res->nh_sel = nhsel; 1398 res->nh_sel = nhsel;
1399 res->type = fa->fa_type; 1399 res->type = fa->fa_type;
1400 res->scope = fa->fa_info->fib_scope; 1400 res->scope = fa->fa_info->fib_scope;
@@ -1402,7 +1402,7 @@ static int check_leaf(struct fib_table *tb, struct trie *t, struct leaf *l,
1402 res->table = tb; 1402 res->table = tb;
1403 res->fa_head = &li->falh; 1403 res->fa_head = &li->falh;
1404 if (!(fib_flags & FIB_LOOKUP_NOREF)) 1404 if (!(fib_flags & FIB_LOOKUP_NOREF))
1405 atomic_inc(&res->fi->fib_clntref); 1405 atomic_inc(&fi->fib_clntref);
1406 return 0; 1406 return 0;
1407 } 1407 }
1408 } 1408 }
diff --git a/net/ipv4/gre.c b/net/ipv4/gre.c
index c6933f2ea31..9dbe10875fb 100644
--- a/net/ipv4/gre.c
+++ b/net/ipv4/gre.c
@@ -16,7 +16,6 @@
16#include <linux/skbuff.h> 16#include <linux/skbuff.h>
17#include <linux/in.h> 17#include <linux/in.h>
18#include <linux/netdevice.h> 18#include <linux/netdevice.h>
19#include <linux/version.h>
20#include <linux/spinlock.h> 19#include <linux/spinlock.h>
21#include <net/protocol.h> 20#include <net/protocol.h>
22#include <net/gre.h> 21#include <net/gre.h>
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 5395e45dcce..23ef31baa1a 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -380,6 +380,7 @@ static struct rtable *icmp_route_lookup(struct net *net,
380 struct icmp_bxm *param) 380 struct icmp_bxm *param)
381{ 381{
382 struct rtable *rt, *rt2; 382 struct rtable *rt, *rt2;
383 struct flowi4 fl4_dec;
383 int err; 384 int err;
384 385
385 memset(fl4, 0, sizeof(*fl4)); 386 memset(fl4, 0, sizeof(*fl4));
@@ -408,19 +409,19 @@ static struct rtable *icmp_route_lookup(struct net *net,
408 } else 409 } else
409 return rt; 410 return rt;
410 411
411 err = xfrm_decode_session_reverse(skb_in, flowi4_to_flowi(fl4), AF_INET); 412 err = xfrm_decode_session_reverse(skb_in, flowi4_to_flowi(&fl4_dec), AF_INET);
412 if (err) 413 if (err)
413 goto relookup_failed; 414 goto relookup_failed;
414 415
415 if (inet_addr_type(net, fl4->saddr) == RTN_LOCAL) { 416 if (inet_addr_type(net, fl4_dec.saddr) == RTN_LOCAL) {
416 rt2 = __ip_route_output_key(net, fl4); 417 rt2 = __ip_route_output_key(net, &fl4_dec);
417 if (IS_ERR(rt2)) 418 if (IS_ERR(rt2))
418 err = PTR_ERR(rt2); 419 err = PTR_ERR(rt2);
419 } else { 420 } else {
420 struct flowi4 fl4_2 = {}; 421 struct flowi4 fl4_2 = {};
421 unsigned long orefdst; 422 unsigned long orefdst;
422 423
423 fl4_2.daddr = fl4->saddr; 424 fl4_2.daddr = fl4_dec.saddr;
424 rt2 = ip_route_output_key(net, &fl4_2); 425 rt2 = ip_route_output_key(net, &fl4_2);
425 if (IS_ERR(rt2)) { 426 if (IS_ERR(rt2)) {
426 err = PTR_ERR(rt2); 427 err = PTR_ERR(rt2);
@@ -428,7 +429,7 @@ static struct rtable *icmp_route_lookup(struct net *net,
428 } 429 }
429 /* Ugh! */ 430 /* Ugh! */
430 orefdst = skb_in->_skb_refdst; /* save old refdst */ 431 orefdst = skb_in->_skb_refdst; /* save old refdst */
431 err = ip_route_input(skb_in, fl4->daddr, fl4->saddr, 432 err = ip_route_input(skb_in, fl4_dec.daddr, fl4_dec.saddr,
432 RT_TOS(tos), rt2->dst.dev); 433 RT_TOS(tos), rt2->dst.dev);
433 434
434 dst_release(&rt2->dst); 435 dst_release(&rt2->dst);
@@ -440,10 +441,11 @@ static struct rtable *icmp_route_lookup(struct net *net,
440 goto relookup_failed; 441 goto relookup_failed;
441 442
442 rt2 = (struct rtable *) xfrm_lookup(net, &rt2->dst, 443 rt2 = (struct rtable *) xfrm_lookup(net, &rt2->dst,
443 flowi4_to_flowi(fl4), NULL, 444 flowi4_to_flowi(&fl4_dec), NULL,
444 XFRM_LOOKUP_ICMP); 445 XFRM_LOOKUP_ICMP);
445 if (!IS_ERR(rt2)) { 446 if (!IS_ERR(rt2)) {
446 dst_release(&rt->dst); 447 dst_release(&rt->dst);
448 memcpy(fl4, &fl4_dec, sizeof(*fl4));
447 rt = rt2; 449 rt = rt2;
448 } else if (PTR_ERR(rt2) == -EPERM) { 450 } else if (PTR_ERR(rt2) == -EPERM) {
449 if (rt) 451 if (rt)
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 3267d389843..389a2e6a17f 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -869,7 +869,7 @@ static int inet_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
869 } 869 }
870 870
871 return netlink_dump_start(idiagnl, skb, nlh, 871 return netlink_dump_start(idiagnl, skb, nlh,
872 inet_diag_dump, NULL); 872 inet_diag_dump, NULL, 0);
873 } 873 }
874 874
875 return inet_diag_get_exact(skb, nlh); 875 return inet_diag_get_exact(skb, nlh);
diff --git a/net/ipv4/inet_lro.c b/net/ipv4/inet_lro.c
index 85a0f75dae6..ef7ae6049a5 100644
--- a/net/ipv4/inet_lro.c
+++ b/net/ipv4/inet_lro.c
@@ -146,8 +146,7 @@ static __wsum lro_tcp_data_csum(struct iphdr *iph, struct tcphdr *tcph, int len)
146} 146}
147 147
148static void lro_init_desc(struct net_lro_desc *lro_desc, struct sk_buff *skb, 148static void lro_init_desc(struct net_lro_desc *lro_desc, struct sk_buff *skb,
149 struct iphdr *iph, struct tcphdr *tcph, 149 struct iphdr *iph, struct tcphdr *tcph)
150 u16 vlan_tag, struct vlan_group *vgrp)
151{ 150{
152 int nr_frags; 151 int nr_frags;
153 __be32 *ptr; 152 __be32 *ptr;
@@ -173,8 +172,6 @@ static void lro_init_desc(struct net_lro_desc *lro_desc, struct sk_buff *skb,
173 } 172 }
174 173
175 lro_desc->mss = tcp_data_len; 174 lro_desc->mss = tcp_data_len;
176 lro_desc->vgrp = vgrp;
177 lro_desc->vlan_tag = vlan_tag;
178 lro_desc->active = 1; 175 lro_desc->active = 1;
179 176
180 lro_desc->data_csum = lro_tcp_data_csum(iph, tcph, 177 lro_desc->data_csum = lro_tcp_data_csum(iph, tcph,
@@ -309,29 +306,17 @@ static void lro_flush(struct net_lro_mgr *lro_mgr,
309 306
310 skb_shinfo(lro_desc->parent)->gso_size = lro_desc->mss; 307 skb_shinfo(lro_desc->parent)->gso_size = lro_desc->mss;
311 308
312 if (lro_desc->vgrp) { 309 if (lro_mgr->features & LRO_F_NAPI)
313 if (lro_mgr->features & LRO_F_NAPI) 310 netif_receive_skb(lro_desc->parent);
314 vlan_hwaccel_receive_skb(lro_desc->parent, 311 else
315 lro_desc->vgrp, 312 netif_rx(lro_desc->parent);
316 lro_desc->vlan_tag);
317 else
318 vlan_hwaccel_rx(lro_desc->parent,
319 lro_desc->vgrp,
320 lro_desc->vlan_tag);
321
322 } else {
323 if (lro_mgr->features & LRO_F_NAPI)
324 netif_receive_skb(lro_desc->parent);
325 else
326 netif_rx(lro_desc->parent);
327 }
328 313
329 LRO_INC_STATS(lro_mgr, flushed); 314 LRO_INC_STATS(lro_mgr, flushed);
330 lro_clear_desc(lro_desc); 315 lro_clear_desc(lro_desc);
331} 316}
332 317
333static int __lro_proc_skb(struct net_lro_mgr *lro_mgr, struct sk_buff *skb, 318static int __lro_proc_skb(struct net_lro_mgr *lro_mgr, struct sk_buff *skb,
334 struct vlan_group *vgrp, u16 vlan_tag, void *priv) 319 void *priv)
335{ 320{
336 struct net_lro_desc *lro_desc; 321 struct net_lro_desc *lro_desc;
337 struct iphdr *iph; 322 struct iphdr *iph;
@@ -360,7 +345,7 @@ static int __lro_proc_skb(struct net_lro_mgr *lro_mgr, struct sk_buff *skb,
360 goto out; 345 goto out;
361 346
362 skb->ip_summed = lro_mgr->ip_summed_aggr; 347 skb->ip_summed = lro_mgr->ip_summed_aggr;
363 lro_init_desc(lro_desc, skb, iph, tcph, vlan_tag, vgrp); 348 lro_init_desc(lro_desc, skb, iph, tcph);
364 LRO_INC_STATS(lro_mgr, aggregated); 349 LRO_INC_STATS(lro_mgr, aggregated);
365 return 0; 350 return 0;
366 } 351 }
@@ -433,8 +418,7 @@ static struct sk_buff *lro_gen_skb(struct net_lro_mgr *lro_mgr,
433static struct sk_buff *__lro_proc_segment(struct net_lro_mgr *lro_mgr, 418static struct sk_buff *__lro_proc_segment(struct net_lro_mgr *lro_mgr,
434 struct skb_frag_struct *frags, 419 struct skb_frag_struct *frags,
435 int len, int true_size, 420 int len, int true_size,
436 struct vlan_group *vgrp, 421 void *priv, __wsum sum)
437 u16 vlan_tag, void *priv, __wsum sum)
438{ 422{
439 struct net_lro_desc *lro_desc; 423 struct net_lro_desc *lro_desc;
440 struct iphdr *iph; 424 struct iphdr *iph;
@@ -480,7 +464,7 @@ static struct sk_buff *__lro_proc_segment(struct net_lro_mgr *lro_mgr,
480 tcph = (void *)((u8 *)skb->data + vlan_hdr_len 464 tcph = (void *)((u8 *)skb->data + vlan_hdr_len
481 + IP_HDR_LEN(iph)); 465 + IP_HDR_LEN(iph));
482 466
483 lro_init_desc(lro_desc, skb, iph, tcph, 0, NULL); 467 lro_init_desc(lro_desc, skb, iph, tcph);
484 LRO_INC_STATS(lro_mgr, aggregated); 468 LRO_INC_STATS(lro_mgr, aggregated);
485 return NULL; 469 return NULL;
486 } 470 }
@@ -514,7 +498,7 @@ void lro_receive_skb(struct net_lro_mgr *lro_mgr,
514 struct sk_buff *skb, 498 struct sk_buff *skb,
515 void *priv) 499 void *priv)
516{ 500{
517 if (__lro_proc_skb(lro_mgr, skb, NULL, 0, priv)) { 501 if (__lro_proc_skb(lro_mgr, skb, priv)) {
518 if (lro_mgr->features & LRO_F_NAPI) 502 if (lro_mgr->features & LRO_F_NAPI)
519 netif_receive_skb(skb); 503 netif_receive_skb(skb);
520 else 504 else
@@ -523,29 +507,13 @@ void lro_receive_skb(struct net_lro_mgr *lro_mgr,
523} 507}
524EXPORT_SYMBOL(lro_receive_skb); 508EXPORT_SYMBOL(lro_receive_skb);
525 509
526void lro_vlan_hwaccel_receive_skb(struct net_lro_mgr *lro_mgr,
527 struct sk_buff *skb,
528 struct vlan_group *vgrp,
529 u16 vlan_tag,
530 void *priv)
531{
532 if (__lro_proc_skb(lro_mgr, skb, vgrp, vlan_tag, priv)) {
533 if (lro_mgr->features & LRO_F_NAPI)
534 vlan_hwaccel_receive_skb(skb, vgrp, vlan_tag);
535 else
536 vlan_hwaccel_rx(skb, vgrp, vlan_tag);
537 }
538}
539EXPORT_SYMBOL(lro_vlan_hwaccel_receive_skb);
540
541void lro_receive_frags(struct net_lro_mgr *lro_mgr, 510void lro_receive_frags(struct net_lro_mgr *lro_mgr,
542 struct skb_frag_struct *frags, 511 struct skb_frag_struct *frags,
543 int len, int true_size, void *priv, __wsum sum) 512 int len, int true_size, void *priv, __wsum sum)
544{ 513{
545 struct sk_buff *skb; 514 struct sk_buff *skb;
546 515
547 skb = __lro_proc_segment(lro_mgr, frags, len, true_size, NULL, 0, 516 skb = __lro_proc_segment(lro_mgr, frags, len, true_size, priv, sum);
548 priv, sum);
549 if (!skb) 517 if (!skb)
550 return; 518 return;
551 519
@@ -556,26 +524,6 @@ void lro_receive_frags(struct net_lro_mgr *lro_mgr,
556} 524}
557EXPORT_SYMBOL(lro_receive_frags); 525EXPORT_SYMBOL(lro_receive_frags);
558 526
559void lro_vlan_hwaccel_receive_frags(struct net_lro_mgr *lro_mgr,
560 struct skb_frag_struct *frags,
561 int len, int true_size,
562 struct vlan_group *vgrp,
563 u16 vlan_tag, void *priv, __wsum sum)
564{
565 struct sk_buff *skb;
566
567 skb = __lro_proc_segment(lro_mgr, frags, len, true_size, vgrp,
568 vlan_tag, priv, sum);
569 if (!skb)
570 return;
571
572 if (lro_mgr->features & LRO_F_NAPI)
573 vlan_hwaccel_receive_skb(skb, vgrp, vlan_tag);
574 else
575 vlan_hwaccel_rx(skb, vgrp, vlan_tag);
576}
577EXPORT_SYMBOL(lro_vlan_hwaccel_receive_frags);
578
579void lro_flush_all(struct net_lro_mgr *lro_mgr) 527void lro_flush_all(struct net_lro_mgr *lro_mgr)
580{ 528{
581 int i; 529 int i;
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index ce616d92cc5..e38213817d0 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -54,15 +54,11 @@
54 * 1. Nodes may appear in the tree only with the pool lock held. 54 * 1. Nodes may appear in the tree only with the pool lock held.
55 * 2. Nodes may disappear from the tree only with the pool lock held 55 * 2. Nodes may disappear from the tree only with the pool lock held
56 * AND reference count being 0. 56 * AND reference count being 0.
57 * 3. Nodes appears and disappears from unused node list only under 57 * 3. Global variable peer_total is modified under the pool lock.
58 * "inet_peer_unused_lock". 58 * 4. struct inet_peer fields modification:
59 * 4. Global variable peer_total is modified under the pool lock.
60 * 5. struct inet_peer fields modification:
61 * avl_left, avl_right, avl_parent, avl_height: pool lock 59 * avl_left, avl_right, avl_parent, avl_height: pool lock
62 * unused: unused node list lock
63 * refcnt: atomically against modifications on other CPU; 60 * refcnt: atomically against modifications on other CPU;
64 * usually under some other lock to prevent node disappearing 61 * usually under some other lock to prevent node disappearing
65 * dtime: unused node list lock
66 * daddr: unchangeable 62 * daddr: unchangeable
67 * ip_id_count: atomic value (no lock needed) 63 * ip_id_count: atomic value (no lock needed)
68 */ 64 */
@@ -104,19 +100,6 @@ int inet_peer_threshold __read_mostly = 65536 + 128; /* start to throw entries m
104 * aggressively at this stage */ 100 * aggressively at this stage */
105int inet_peer_minttl __read_mostly = 120 * HZ; /* TTL under high load: 120 sec */ 101int inet_peer_minttl __read_mostly = 120 * HZ; /* TTL under high load: 120 sec */
106int inet_peer_maxttl __read_mostly = 10 * 60 * HZ; /* usual time to live: 10 min */ 102int inet_peer_maxttl __read_mostly = 10 * 60 * HZ; /* usual time to live: 10 min */
107int inet_peer_gc_mintime __read_mostly = 10 * HZ;
108int inet_peer_gc_maxtime __read_mostly = 120 * HZ;
109
110static struct {
111 struct list_head list;
112 spinlock_t lock;
113} unused_peers = {
114 .list = LIST_HEAD_INIT(unused_peers.list),
115 .lock = __SPIN_LOCK_UNLOCKED(unused_peers.lock),
116};
117
118static void peer_check_expire(unsigned long dummy);
119static DEFINE_TIMER(peer_periodic_timer, peer_check_expire, 0, 0);
120 103
121 104
122/* Called from ip_output.c:ip_init */ 105/* Called from ip_output.c:ip_init */
@@ -142,21 +125,6 @@ void __init inet_initpeers(void)
142 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, 125 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
143 NULL); 126 NULL);
144 127
145 /* All the timers, started at system startup tend
146 to synchronize. Perturb it a bit.
147 */
148 peer_periodic_timer.expires = jiffies
149 + net_random() % inet_peer_gc_maxtime
150 + inet_peer_gc_maxtime;
151 add_timer(&peer_periodic_timer);
152}
153
154/* Called with or without local BH being disabled. */
155static void unlink_from_unused(struct inet_peer *p)
156{
157 spin_lock_bh(&unused_peers.lock);
158 list_del_init(&p->unused);
159 spin_unlock_bh(&unused_peers.lock);
160} 128}
161 129
162static int addr_compare(const struct inetpeer_addr *a, 130static int addr_compare(const struct inetpeer_addr *a,
@@ -203,20 +171,6 @@ static int addr_compare(const struct inetpeer_addr *a,
203 u; \ 171 u; \
204}) 172})
205 173
206static bool atomic_add_unless_return(atomic_t *ptr, int a, int u, int *newv)
207{
208 int cur, old = atomic_read(ptr);
209
210 while (old != u) {
211 *newv = old + a;
212 cur = atomic_cmpxchg(ptr, old, *newv);
213 if (cur == old)
214 return true;
215 old = cur;
216 }
217 return false;
218}
219
220/* 174/*
221 * Called with rcu_read_lock() 175 * Called with rcu_read_lock()
222 * Because we hold no lock against a writer, its quite possible we fall 176 * Because we hold no lock against a writer, its quite possible we fall
@@ -225,8 +179,7 @@ static bool atomic_add_unless_return(atomic_t *ptr, int a, int u, int *newv)
225 * We exit from this function if number of links exceeds PEER_MAXDEPTH 179 * We exit from this function if number of links exceeds PEER_MAXDEPTH
226 */ 180 */
227static struct inet_peer *lookup_rcu(const struct inetpeer_addr *daddr, 181static struct inet_peer *lookup_rcu(const struct inetpeer_addr *daddr,
228 struct inet_peer_base *base, 182 struct inet_peer_base *base)
229 int *newrefcnt)
230{ 183{
231 struct inet_peer *u = rcu_dereference(base->root); 184 struct inet_peer *u = rcu_dereference(base->root);
232 int count = 0; 185 int count = 0;
@@ -235,11 +188,9 @@ static struct inet_peer *lookup_rcu(const struct inetpeer_addr *daddr,
235 int cmp = addr_compare(daddr, &u->daddr); 188 int cmp = addr_compare(daddr, &u->daddr);
236 if (cmp == 0) { 189 if (cmp == 0) {
237 /* Before taking a reference, check if this entry was 190 /* Before taking a reference, check if this entry was
238 * deleted, unlink_from_pool() sets refcnt=-1 to make 191 * deleted (refcnt=-1)
239 * distinction between an unused entry (refcnt=0) and
240 * a freed one.
241 */ 192 */
242 if (!atomic_add_unless_return(&u->refcnt, 1, -1, newrefcnt)) 193 if (!atomic_add_unless(&u->refcnt, 1, -1))
243 u = NULL; 194 u = NULL;
244 return u; 195 return u;
245 } 196 }
@@ -366,137 +317,99 @@ static void inetpeer_free_rcu(struct rcu_head *head)
366 kmem_cache_free(peer_cachep, container_of(head, struct inet_peer, rcu)); 317 kmem_cache_free(peer_cachep, container_of(head, struct inet_peer, rcu));
367} 318}
368 319
369/* May be called with local BH enabled. */
370static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base, 320static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base,
371 struct inet_peer __rcu **stack[PEER_MAXDEPTH]) 321 struct inet_peer __rcu **stack[PEER_MAXDEPTH])
372{ 322{
373 int do_free; 323 struct inet_peer __rcu ***stackptr, ***delp;
374 324
375 do_free = 0; 325 if (lookup(&p->daddr, stack, base) != p)
376 326 BUG();
377 write_seqlock_bh(&base->lock); 327 delp = stackptr - 1; /* *delp[0] == p */
378 /* Check the reference counter. It was artificially incremented by 1 328 if (p->avl_left == peer_avl_empty_rcu) {
379 * in cleanup() function to prevent sudden disappearing. If we can 329 *delp[0] = p->avl_right;
380 * atomically (because of lockless readers) take this last reference, 330 --stackptr;
381 * it's safe to remove the node and free it later. 331 } else {
382 * We use refcnt=-1 to alert lockless readers this entry is deleted. 332 /* look for a node to insert instead of p */
383 */ 333 struct inet_peer *t;
384 if (atomic_cmpxchg(&p->refcnt, 1, -1) == 1) { 334 t = lookup_rightempty(p, base);
385 struct inet_peer __rcu ***stackptr, ***delp; 335 BUG_ON(rcu_deref_locked(*stackptr[-1], base) != t);
386 if (lookup(&p->daddr, stack, base) != p) 336 **--stackptr = t->avl_left;
387 BUG(); 337 /* t is removed, t->daddr > x->daddr for any
388 delp = stackptr - 1; /* *delp[0] == p */ 338 * x in p->avl_left subtree.
389 if (p->avl_left == peer_avl_empty_rcu) { 339 * Put t in the old place of p. */
390 *delp[0] = p->avl_right; 340 RCU_INIT_POINTER(*delp[0], t);
391 --stackptr; 341 t->avl_left = p->avl_left;
392 } else { 342 t->avl_right = p->avl_right;
393 /* look for a node to insert instead of p */ 343 t->avl_height = p->avl_height;
394 struct inet_peer *t; 344 BUG_ON(delp[1] != &p->avl_left);
395 t = lookup_rightempty(p, base); 345 delp[1] = &t->avl_left; /* was &p->avl_left */
396 BUG_ON(rcu_deref_locked(*stackptr[-1], base) != t);
397 **--stackptr = t->avl_left;
398 /* t is removed, t->daddr > x->daddr for any
399 * x in p->avl_left subtree.
400 * Put t in the old place of p. */
401 RCU_INIT_POINTER(*delp[0], t);
402 t->avl_left = p->avl_left;
403 t->avl_right = p->avl_right;
404 t->avl_height = p->avl_height;
405 BUG_ON(delp[1] != &p->avl_left);
406 delp[1] = &t->avl_left; /* was &p->avl_left */
407 }
408 peer_avl_rebalance(stack, stackptr, base);
409 base->total--;
410 do_free = 1;
411 } 346 }
412 write_sequnlock_bh(&base->lock); 347 peer_avl_rebalance(stack, stackptr, base);
413 348 base->total--;
414 if (do_free) 349 call_rcu(&p->rcu, inetpeer_free_rcu);
415 call_rcu(&p->rcu, inetpeer_free_rcu);
416 else
417 /* The node is used again. Decrease the reference counter
418 * back. The loop "cleanup -> unlink_from_unused
419 * -> unlink_from_pool -> putpeer -> link_to_unused
420 * -> cleanup (for the same node)"
421 * doesn't really exist because the entry will have a
422 * recent deletion time and will not be cleaned again soon.
423 */
424 inet_putpeer(p);
425} 350}
426 351
427static struct inet_peer_base *family_to_base(int family) 352static struct inet_peer_base *family_to_base(int family)
428{ 353{
429 return (family == AF_INET ? &v4_peers : &v6_peers); 354 return family == AF_INET ? &v4_peers : &v6_peers;
430}
431
432static struct inet_peer_base *peer_to_base(struct inet_peer *p)
433{
434 return family_to_base(p->daddr.family);
435} 355}
436 356
437/* May be called with local BH enabled. */ 357/* perform garbage collect on all items stacked during a lookup */
438static int cleanup_once(unsigned long ttl, struct inet_peer __rcu **stack[PEER_MAXDEPTH]) 358static int inet_peer_gc(struct inet_peer_base *base,
359 struct inet_peer __rcu **stack[PEER_MAXDEPTH],
360 struct inet_peer __rcu ***stackptr)
439{ 361{
440 struct inet_peer *p = NULL; 362 struct inet_peer *p, *gchead = NULL;
441 363 __u32 delta, ttl;
442 /* Remove the first entry from the list of unused nodes. */ 364 int cnt = 0;
443 spin_lock_bh(&unused_peers.lock);
444 if (!list_empty(&unused_peers.list)) {
445 __u32 delta;
446
447 p = list_first_entry(&unused_peers.list, struct inet_peer, unused);
448 delta = (__u32)jiffies - p->dtime;
449 365
450 if (delta < ttl) { 366 if (base->total >= inet_peer_threshold)
451 /* Do not prune fresh entries. */ 367 ttl = 0; /* be aggressive */
452 spin_unlock_bh(&unused_peers.lock); 368 else
453 return -1; 369 ttl = inet_peer_maxttl
370 - (inet_peer_maxttl - inet_peer_minttl) / HZ *
371 base->total / inet_peer_threshold * HZ;
372 stackptr--; /* last stack slot is peer_avl_empty */
373 while (stackptr > stack) {
374 stackptr--;
375 p = rcu_deref_locked(**stackptr, base);
376 if (atomic_read(&p->refcnt) == 0) {
377 smp_rmb();
378 delta = (__u32)jiffies - p->dtime;
379 if (delta >= ttl &&
380 atomic_cmpxchg(&p->refcnt, 0, -1) == 0) {
381 p->gc_next = gchead;
382 gchead = p;
383 }
454 } 384 }
455
456 list_del_init(&p->unused);
457
458 /* Grab an extra reference to prevent node disappearing
459 * before unlink_from_pool() call. */
460 atomic_inc(&p->refcnt);
461 } 385 }
462 spin_unlock_bh(&unused_peers.lock); 386 while ((p = gchead) != NULL) {
463 387 gchead = p->gc_next;
464 if (p == NULL) 388 cnt++;
465 /* It means that the total number of USED entries has 389 unlink_from_pool(p, base, stack);
466 * grown over inet_peer_threshold. It shouldn't really 390 }
467 * happen because of entry limits in route cache. */ 391 return cnt;
468 return -1;
469
470 unlink_from_pool(p, peer_to_base(p), stack);
471 return 0;
472} 392}
473 393
474/* Called with or without local BH being disabled. */ 394struct inet_peer *inet_getpeer(const struct inetpeer_addr *daddr, int create)
475struct inet_peer *inet_getpeer(struct inetpeer_addr *daddr, int create)
476{ 395{
477 struct inet_peer __rcu **stack[PEER_MAXDEPTH], ***stackptr; 396 struct inet_peer __rcu **stack[PEER_MAXDEPTH], ***stackptr;
478 struct inet_peer_base *base = family_to_base(daddr->family); 397 struct inet_peer_base *base = family_to_base(daddr->family);
479 struct inet_peer *p; 398 struct inet_peer *p;
480 unsigned int sequence; 399 unsigned int sequence;
481 int invalidated, newrefcnt = 0; 400 int invalidated, gccnt = 0;
482 401
483 /* Look up for the address quickly, lockless. 402 /* Attempt a lockless lookup first.
484 * Because of a concurrent writer, we might not find an existing entry. 403 * Because of a concurrent writer, we might not find an existing entry.
485 */ 404 */
486 rcu_read_lock(); 405 rcu_read_lock();
487 sequence = read_seqbegin(&base->lock); 406 sequence = read_seqbegin(&base->lock);
488 p = lookup_rcu(daddr, base, &newrefcnt); 407 p = lookup_rcu(daddr, base);
489 invalidated = read_seqretry(&base->lock, sequence); 408 invalidated = read_seqretry(&base->lock, sequence);
490 rcu_read_unlock(); 409 rcu_read_unlock();
491 410
492 if (p) { 411 if (p)
493found: /* The existing node has been found.
494 * Remove the entry from unused list if it was there.
495 */
496 if (newrefcnt == 1)
497 unlink_from_unused(p);
498 return p; 412 return p;
499 }
500 413
501 /* If no writer did a change during our lookup, we can return early. */ 414 /* If no writer did a change during our lookup, we can return early. */
502 if (!create && !invalidated) 415 if (!create && !invalidated)
@@ -506,18 +419,27 @@ found: /* The existing node has been found.
506 * At least, nodes should be hot in our cache. 419 * At least, nodes should be hot in our cache.
507 */ 420 */
508 write_seqlock_bh(&base->lock); 421 write_seqlock_bh(&base->lock);
422relookup:
509 p = lookup(daddr, stack, base); 423 p = lookup(daddr, stack, base);
510 if (p != peer_avl_empty) { 424 if (p != peer_avl_empty) {
511 newrefcnt = atomic_inc_return(&p->refcnt); 425 atomic_inc(&p->refcnt);
512 write_sequnlock_bh(&base->lock); 426 write_sequnlock_bh(&base->lock);
513 goto found; 427 return p;
428 }
429 if (!gccnt) {
430 gccnt = inet_peer_gc(base, stack, stackptr);
431 if (gccnt && create)
432 goto relookup;
514 } 433 }
515 p = create ? kmem_cache_alloc(peer_cachep, GFP_ATOMIC) : NULL; 434 p = create ? kmem_cache_alloc(peer_cachep, GFP_ATOMIC) : NULL;
516 if (p) { 435 if (p) {
517 p->daddr = *daddr; 436 p->daddr = *daddr;
518 atomic_set(&p->refcnt, 1); 437 atomic_set(&p->refcnt, 1);
519 atomic_set(&p->rid, 0); 438 atomic_set(&p->rid, 0);
520 atomic_set(&p->ip_id_count, secure_ip_id(daddr->addr.a4)); 439 atomic_set(&p->ip_id_count,
440 (daddr->family == AF_INET) ?
441 secure_ip_id(daddr->addr.a4) :
442 secure_ipv6_id(daddr->addr.a6));
521 p->tcp_ts_stamp = 0; 443 p->tcp_ts_stamp = 0;
522 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW; 444 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
523 p->rate_tokens = 0; 445 p->rate_tokens = 0;
@@ -525,7 +447,6 @@ found: /* The existing node has been found.
525 p->pmtu_expires = 0; 447 p->pmtu_expires = 0;
526 p->pmtu_orig = 0; 448 p->pmtu_orig = 0;
527 memset(&p->redirect_learned, 0, sizeof(p->redirect_learned)); 449 memset(&p->redirect_learned, 0, sizeof(p->redirect_learned));
528 INIT_LIST_HEAD(&p->unused);
529 450
530 451
531 /* Link the node. */ 452 /* Link the node. */
@@ -534,63 +455,15 @@ found: /* The existing node has been found.
534 } 455 }
535 write_sequnlock_bh(&base->lock); 456 write_sequnlock_bh(&base->lock);
536 457
537 if (base->total >= inet_peer_threshold)
538 /* Remove one less-recently-used entry. */
539 cleanup_once(0, stack);
540
541 return p; 458 return p;
542} 459}
543
544static int compute_total(void)
545{
546 return v4_peers.total + v6_peers.total;
547}
548EXPORT_SYMBOL_GPL(inet_getpeer); 460EXPORT_SYMBOL_GPL(inet_getpeer);
549 461
550/* Called with local BH disabled. */
551static void peer_check_expire(unsigned long dummy)
552{
553 unsigned long now = jiffies;
554 int ttl, total;
555 struct inet_peer __rcu **stack[PEER_MAXDEPTH];
556
557 total = compute_total();
558 if (total >= inet_peer_threshold)
559 ttl = inet_peer_minttl;
560 else
561 ttl = inet_peer_maxttl
562 - (inet_peer_maxttl - inet_peer_minttl) / HZ *
563 total / inet_peer_threshold * HZ;
564 while (!cleanup_once(ttl, stack)) {
565 if (jiffies != now)
566 break;
567 }
568
569 /* Trigger the timer after inet_peer_gc_mintime .. inet_peer_gc_maxtime
570 * interval depending on the total number of entries (more entries,
571 * less interval). */
572 total = compute_total();
573 if (total >= inet_peer_threshold)
574 peer_periodic_timer.expires = jiffies + inet_peer_gc_mintime;
575 else
576 peer_periodic_timer.expires = jiffies
577 + inet_peer_gc_maxtime
578 - (inet_peer_gc_maxtime - inet_peer_gc_mintime) / HZ *
579 total / inet_peer_threshold * HZ;
580 add_timer(&peer_periodic_timer);
581}
582
583void inet_putpeer(struct inet_peer *p) 462void inet_putpeer(struct inet_peer *p)
584{ 463{
585 local_bh_disable(); 464 p->dtime = (__u32)jiffies;
586 465 smp_mb__before_atomic_dec();
587 if (atomic_dec_and_lock(&p->refcnt, &unused_peers.lock)) { 466 atomic_dec(&p->refcnt);
588 list_add_tail(&p->unused, &unused_peers.list);
589 p->dtime = (__u32)jiffies;
590 spin_unlock(&unused_peers.lock);
591 }
592
593 local_bh_enable();
594} 467}
595EXPORT_SYMBOL_GPL(inet_putpeer); 468EXPORT_SYMBOL_GPL(inet_putpeer);
596 469
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 0ad6035f636..0e0ab98abc6 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -261,8 +261,9 @@ static void ip_expire(unsigned long arg)
261 * Only an end host needs to send an ICMP 261 * Only an end host needs to send an ICMP
262 * "Fragment Reassembly Timeout" message, per RFC792. 262 * "Fragment Reassembly Timeout" message, per RFC792.
263 */ 263 */
264 if (qp->user == IP_DEFRAG_CONNTRACK_IN && 264 if (qp->user == IP_DEFRAG_AF_PACKET ||
265 skb_rtable(head)->rt_type != RTN_LOCAL) 265 (qp->user == IP_DEFRAG_CONNTRACK_IN &&
266 skb_rtable(head)->rt_type != RTN_LOCAL))
266 goto out_rcu_unlock; 267 goto out_rcu_unlock;
267 268
268 269
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 8871067560d..d7bb94c4834 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -731,9 +731,9 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
731 } 731 }
732#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 732#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
733 else if (skb->protocol == htons(ETH_P_IPV6)) { 733 else if (skb->protocol == htons(ETH_P_IPV6)) {
734 struct neighbour *neigh = dst_get_neighbour(skb_dst(skb));
734 const struct in6_addr *addr6; 735 const struct in6_addr *addr6;
735 int addr_type; 736 int addr_type;
736 struct neighbour *neigh = skb_dst(skb)->neighbour;
737 737
738 if (neigh == NULL) 738 if (neigh == NULL)
739 goto tx_error; 739 goto tx_error;
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index c8f48efc5fd..073a9b01c40 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -165,7 +165,7 @@ int ip_call_ra_chain(struct sk_buff *skb)
165 (!sk->sk_bound_dev_if || 165 (!sk->sk_bound_dev_if ||
166 sk->sk_bound_dev_if == dev->ifindex) && 166 sk->sk_bound_dev_if == dev->ifindex) &&
167 net_eq(sock_net(sk), dev_net(dev))) { 167 net_eq(sock_net(sk), dev_net(dev))) {
168 if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) { 168 if (ip_is_fragment(ip_hdr(skb))) {
169 if (ip_defrag(skb, IP_DEFRAG_CALL_RA_CHAIN)) 169 if (ip_defrag(skb, IP_DEFRAG_CALL_RA_CHAIN))
170 return 1; 170 return 1;
171 } 171 }
@@ -256,7 +256,7 @@ int ip_local_deliver(struct sk_buff *skb)
256 * Reassemble IP fragments. 256 * Reassemble IP fragments.
257 */ 257 */
258 258
259 if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) { 259 if (ip_is_fragment(ip_hdr(skb))) {
260 if (ip_defrag(skb, IP_DEFRAG_LOCAL_DELIVER)) 260 if (ip_defrag(skb, IP_DEFRAG_LOCAL_DELIVER))
261 return 0; 261 return 0;
262 } 262 }
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index a8024eaa0e8..ccaaa851ab4 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -182,6 +182,7 @@ static inline int ip_finish_output2(struct sk_buff *skb)
182 struct rtable *rt = (struct rtable *)dst; 182 struct rtable *rt = (struct rtable *)dst;
183 struct net_device *dev = dst->dev; 183 struct net_device *dev = dst->dev;
184 unsigned int hh_len = LL_RESERVED_SPACE(dev); 184 unsigned int hh_len = LL_RESERVED_SPACE(dev);
185 struct neighbour *neigh;
185 186
186 if (rt->rt_type == RTN_MULTICAST) { 187 if (rt->rt_type == RTN_MULTICAST) {
187 IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUTMCAST, skb->len); 188 IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUTMCAST, skb->len);
@@ -203,10 +204,9 @@ static inline int ip_finish_output2(struct sk_buff *skb)
203 skb = skb2; 204 skb = skb2;
204 } 205 }
205 206
206 if (dst->hh) 207 neigh = dst_get_neighbour(dst);
207 return neigh_hh_output(dst->hh, skb); 208 if (neigh)
208 else if (dst->neighbour) 209 return neigh_output(neigh, skb);
209 return dst->neighbour->output(skb);
210 210
211 if (net_ratelimit()) 211 if (net_ratelimit())
212 printk(KERN_DEBUG "ip_finish_output2: No header cache and no neighbour!\n"); 212 printk(KERN_DEBUG "ip_finish_output2: No header cache and no neighbour!\n");
@@ -489,7 +489,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
489 489
490 if (first_len - hlen > mtu || 490 if (first_len - hlen > mtu ||
491 ((first_len - hlen) & 7) || 491 ((first_len - hlen) & 7) ||
492 (iph->frag_off & htons(IP_MF|IP_OFFSET)) || 492 ip_is_fragment(iph) ||
493 skb_cloned(skb)) 493 skb_cloned(skb))
494 goto slow_path; 494 goto slow_path;
495 495
@@ -734,7 +734,7 @@ static inline int ip_ufo_append_data(struct sock *sk,
734 int getfrag(void *from, char *to, int offset, int len, 734 int getfrag(void *from, char *to, int offset, int len,
735 int odd, struct sk_buff *skb), 735 int odd, struct sk_buff *skb),
736 void *from, int length, int hh_len, int fragheaderlen, 736 void *from, int length, int hh_len, int fragheaderlen,
737 int transhdrlen, int mtu, unsigned int flags) 737 int transhdrlen, int maxfraglen, unsigned int flags)
738{ 738{
739 struct sk_buff *skb; 739 struct sk_buff *skb;
740 int err; 740 int err;
@@ -767,7 +767,7 @@ static inline int ip_ufo_append_data(struct sock *sk,
767 skb->csum = 0; 767 skb->csum = 0;
768 768
769 /* specify the length of each IP datagram fragment */ 769 /* specify the length of each IP datagram fragment */
770 skb_shinfo(skb)->gso_size = mtu - fragheaderlen; 770 skb_shinfo(skb)->gso_size = maxfraglen - fragheaderlen;
771 skb_shinfo(skb)->gso_type = SKB_GSO_UDP; 771 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
772 __skb_queue_tail(queue, skb); 772 __skb_queue_tail(queue, skb);
773 } 773 }
@@ -802,8 +802,6 @@ static int __ip_append_data(struct sock *sk,
802 skb = skb_peek_tail(queue); 802 skb = skb_peek_tail(queue);
803 803
804 exthdrlen = !skb ? rt->dst.header_len : 0; 804 exthdrlen = !skb ? rt->dst.header_len : 0;
805 length += exthdrlen;
806 transhdrlen += exthdrlen;
807 mtu = cork->fragsize; 805 mtu = cork->fragsize;
808 806
809 hh_len = LL_RESERVED_SPACE(rt->dst.dev); 807 hh_len = LL_RESERVED_SPACE(rt->dst.dev);
@@ -830,10 +828,10 @@ static int __ip_append_data(struct sock *sk,
830 cork->length += length; 828 cork->length += length;
831 if (((length > mtu) || (skb && skb_is_gso(skb))) && 829 if (((length > mtu) || (skb && skb_is_gso(skb))) &&
832 (sk->sk_protocol == IPPROTO_UDP) && 830 (sk->sk_protocol == IPPROTO_UDP) &&
833 (rt->dst.dev->features & NETIF_F_UFO)) { 831 (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len) {
834 err = ip_ufo_append_data(sk, queue, getfrag, from, length, 832 err = ip_ufo_append_data(sk, queue, getfrag, from, length,
835 hh_len, fragheaderlen, transhdrlen, 833 hh_len, fragheaderlen, transhdrlen,
836 mtu, flags); 834 maxfraglen, flags);
837 if (err) 835 if (err)
838 goto error; 836 goto error;
839 return 0; 837 return 0;
@@ -883,17 +881,16 @@ alloc_new_skb:
883 else 881 else
884 alloclen = fraglen; 882 alloclen = fraglen;
885 883
884 alloclen += exthdrlen;
885
886 /* The last fragment gets additional space at tail. 886 /* The last fragment gets additional space at tail.
887 * Note, with MSG_MORE we overallocate on fragments, 887 * Note, with MSG_MORE we overallocate on fragments,
888 * because we have no idea what fragment will be 888 * because we have no idea what fragment will be
889 * the last. 889 * the last.
890 */ 890 */
891 if (datalen == length + fraggap) { 891 if (datalen == length + fraggap)
892 alloclen += rt->dst.trailer_len; 892 alloclen += rt->dst.trailer_len;
893 /* make sure mtu is not reached */ 893
894 if (datalen > mtu - fragheaderlen - rt->dst.trailer_len)
895 datalen -= ALIGN(rt->dst.trailer_len, 8);
896 }
897 if (transhdrlen) { 894 if (transhdrlen) {
898 skb = sock_alloc_send_skb(sk, 895 skb = sock_alloc_send_skb(sk,
899 alloclen + hh_len + 15, 896 alloclen + hh_len + 15,
@@ -926,11 +923,11 @@ alloc_new_skb:
926 /* 923 /*
927 * Find where to start putting bytes. 924 * Find where to start putting bytes.
928 */ 925 */
929 data = skb_put(skb, fraglen); 926 data = skb_put(skb, fraglen + exthdrlen);
930 skb_set_network_header(skb, exthdrlen); 927 skb_set_network_header(skb, exthdrlen);
931 skb->transport_header = (skb->network_header + 928 skb->transport_header = (skb->network_header +
932 fragheaderlen); 929 fragheaderlen);
933 data += fragheaderlen; 930 data += fragheaderlen + exthdrlen;
934 931
935 if (fraggap) { 932 if (fraggap) {
936 skb->csum = skb_copy_and_csum_bits( 933 skb->csum = skb_copy_and_csum_bits(
@@ -1064,7 +1061,7 @@ static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
1064 */ 1061 */
1065 *rtp = NULL; 1062 *rtp = NULL;
1066 cork->fragsize = inet->pmtudisc == IP_PMTUDISC_PROBE ? 1063 cork->fragsize = inet->pmtudisc == IP_PMTUDISC_PROBE ?
1067 rt->dst.dev->mtu : dst_mtu(rt->dst.path); 1064 rt->dst.dev->mtu : dst_mtu(&rt->dst);
1068 cork->dst = &rt->dst; 1065 cork->dst = &rt->dst;
1069 cork->length = 0; 1066 cork->length = 0;
1070 cork->tx_flags = ipc->tx_flags; 1067 cork->tx_flags = ipc->tx_flags;
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index ab7e5542c1c..472a8c4f1dc 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -861,41 +861,44 @@ static void __init ic_do_bootp_ext(u8 *ext)
861#endif 861#endif
862 862
863 switch (*ext++) { 863 switch (*ext++) {
864 case 1: /* Subnet mask */ 864 case 1: /* Subnet mask */
865 if (ic_netmask == NONE) 865 if (ic_netmask == NONE)
866 memcpy(&ic_netmask, ext+1, 4); 866 memcpy(&ic_netmask, ext+1, 4);
867 break; 867 break;
868 case 3: /* Default gateway */ 868 case 3: /* Default gateway */
869 if (ic_gateway == NONE) 869 if (ic_gateway == NONE)
870 memcpy(&ic_gateway, ext+1, 4); 870 memcpy(&ic_gateway, ext+1, 4);
871 break; 871 break;
872 case 6: /* DNS server */ 872 case 6: /* DNS server */
873 servers= *ext/4; 873 servers= *ext/4;
874 if (servers > CONF_NAMESERVERS_MAX) 874 if (servers > CONF_NAMESERVERS_MAX)
875 servers = CONF_NAMESERVERS_MAX; 875 servers = CONF_NAMESERVERS_MAX;
876 for (i = 0; i < servers; i++) { 876 for (i = 0; i < servers; i++) {
877 if (ic_nameservers[i] == NONE) 877 if (ic_nameservers[i] == NONE)
878 memcpy(&ic_nameservers[i], ext+1+4*i, 4); 878 memcpy(&ic_nameservers[i], ext+1+4*i, 4);
879 } 879 }
880 break; 880 break;
881 case 12: /* Host name */ 881 case 12: /* Host name */
882 ic_bootp_string(utsname()->nodename, ext+1, *ext, __NEW_UTS_LEN); 882 ic_bootp_string(utsname()->nodename, ext+1, *ext,
883 ic_host_name_set = 1; 883 __NEW_UTS_LEN);
884 break; 884 ic_host_name_set = 1;
885 case 15: /* Domain name (DNS) */ 885 break;
886 ic_bootp_string(ic_domain, ext+1, *ext, sizeof(ic_domain)); 886 case 15: /* Domain name (DNS) */
887 break; 887 ic_bootp_string(ic_domain, ext+1, *ext, sizeof(ic_domain));
888 case 17: /* Root path */ 888 break;
889 if (!root_server_path[0]) 889 case 17: /* Root path */
890 ic_bootp_string(root_server_path, ext+1, *ext, sizeof(root_server_path)); 890 if (!root_server_path[0])
891 break; 891 ic_bootp_string(root_server_path, ext+1, *ext,
892 case 26: /* Interface MTU */ 892 sizeof(root_server_path));
893 memcpy(&mtu, ext+1, sizeof(mtu)); 893 break;
894 ic_dev_mtu = ntohs(mtu); 894 case 26: /* Interface MTU */
895 break; 895 memcpy(&mtu, ext+1, sizeof(mtu));
896 case 40: /* NIS Domain name (_not_ DNS) */ 896 ic_dev_mtu = ntohs(mtu);
897 ic_bootp_string(utsname()->domainname, ext+1, *ext, __NEW_UTS_LEN); 897 break;
898 break; 898 case 40: /* NIS Domain name (_not_ DNS) */
899 ic_bootp_string(utsname()->domainname, ext+1, *ext,
900 __NEW_UTS_LEN);
901 break;
899 } 902 }
900} 903}
901 904
@@ -932,7 +935,7 @@ static int __init ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, str
932 goto drop; 935 goto drop;
933 936
934 /* Fragments are not supported */ 937 /* Fragments are not supported */
935 if (h->frag_off & htons(IP_OFFSET | IP_MF)) { 938 if (ip_is_fragment(h)) {
936 if (net_ratelimit()) 939 if (net_ratelimit())
937 printk(KERN_ERR "DHCP/BOOTP: Ignoring fragmented " 940 printk(KERN_ERR "DHCP/BOOTP: Ignoring fragmented "
938 "reply.\n"); 941 "reply.\n");
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 30a7763c400..aae2bd8cd92 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -2544,7 +2544,8 @@ int __init ip_mr_init(void)
2544 goto add_proto_fail; 2544 goto add_proto_fail;
2545 } 2545 }
2546#endif 2546#endif
2547 rtnl_register(RTNL_FAMILY_IPMR, RTM_GETROUTE, NULL, ipmr_rtm_dumproute); 2547 rtnl_register(RTNL_FAMILY_IPMR, RTM_GETROUTE,
2548 NULL, ipmr_rtm_dumproute, NULL);
2548 return 0; 2549 return 0;
2549 2550
2550#ifdef CONFIG_IP_PIMSM_V2 2551#ifdef CONFIG_IP_PIMSM_V2
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c
index 4614babdc45..2e97e3ec1eb 100644
--- a/net/ipv4/netfilter.c
+++ b/net/ipv4/netfilter.c
@@ -17,51 +17,35 @@ int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type)
17 const struct iphdr *iph = ip_hdr(skb); 17 const struct iphdr *iph = ip_hdr(skb);
18 struct rtable *rt; 18 struct rtable *rt;
19 struct flowi4 fl4 = {}; 19 struct flowi4 fl4 = {};
20 unsigned long orefdst; 20 __be32 saddr = iph->saddr;
21 __u8 flags = 0;
21 unsigned int hh_len; 22 unsigned int hh_len;
22 unsigned int type;
23 23
24 type = inet_addr_type(net, iph->saddr); 24 if (!skb->sk && addr_type != RTN_LOCAL) {
25 if (skb->sk && inet_sk(skb->sk)->transparent) 25 if (addr_type == RTN_UNSPEC)
26 type = RTN_LOCAL; 26 addr_type = inet_addr_type(net, saddr);
27 if (addr_type == RTN_UNSPEC) 27 if (addr_type == RTN_LOCAL || addr_type == RTN_UNICAST)
28 addr_type = type; 28 flags |= FLOWI_FLAG_ANYSRC;
29 else
30 saddr = 0;
31 }
29 32
30 /* some non-standard hacks like ipt_REJECT.c:send_reset() can cause 33 /* some non-standard hacks like ipt_REJECT.c:send_reset() can cause
31 * packets with foreign saddr to appear on the NF_INET_LOCAL_OUT hook. 34 * packets with foreign saddr to appear on the NF_INET_LOCAL_OUT hook.
32 */ 35 */
33 if (addr_type == RTN_LOCAL) { 36 fl4.daddr = iph->daddr;
34 fl4.daddr = iph->daddr; 37 fl4.saddr = saddr;
35 if (type == RTN_LOCAL) 38 fl4.flowi4_tos = RT_TOS(iph->tos);
36 fl4.saddr = iph->saddr; 39 fl4.flowi4_oif = skb->sk ? skb->sk->sk_bound_dev_if : 0;
37 fl4.flowi4_tos = RT_TOS(iph->tos); 40 fl4.flowi4_mark = skb->mark;
38 fl4.flowi4_oif = skb->sk ? skb->sk->sk_bound_dev_if : 0; 41 fl4.flowi4_flags = skb->sk ? inet_sk_flowi_flags(skb->sk) : flags;
39 fl4.flowi4_mark = skb->mark; 42 rt = ip_route_output_key(net, &fl4);
40 fl4.flowi4_flags = skb->sk ? inet_sk_flowi_flags(skb->sk) : 0; 43 if (IS_ERR(rt))
41 rt = ip_route_output_key(net, &fl4); 44 return -1;
42 if (IS_ERR(rt))
43 return -1;
44
45 /* Drop old route. */
46 skb_dst_drop(skb);
47 skb_dst_set(skb, &rt->dst);
48 } else {
49 /* non-local src, find valid iif to satisfy
50 * rp-filter when calling ip_route_input. */
51 fl4.daddr = iph->saddr;
52 rt = ip_route_output_key(net, &fl4);
53 if (IS_ERR(rt))
54 return -1;
55 45
56 orefdst = skb->_skb_refdst; 46 /* Drop old route. */
57 if (ip_route_input(skb, iph->daddr, iph->saddr, 47 skb_dst_drop(skb);
58 RT_TOS(iph->tos), rt->dst.dev) != 0) { 48 skb_dst_set(skb, &rt->dst);
59 dst_release(&rt->dst);
60 return -1;
61 }
62 dst_release(&rt->dst);
63 refdst_drop(orefdst);
64 }
65 49
66 if (skb_dst(skb)->error) 50 if (skb_dst(skb)->error)
67 return -1; 51 return -1;
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index 5c9e97c7901..db8d22db425 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -317,19 +317,19 @@ clusterip_tg(struct sk_buff *skb, const struct xt_action_param *par)
317 hash = clusterip_hashfn(skb, cipinfo->config); 317 hash = clusterip_hashfn(skb, cipinfo->config);
318 318
319 switch (ctinfo) { 319 switch (ctinfo) {
320 case IP_CT_NEW: 320 case IP_CT_NEW:
321 ct->mark = hash; 321 ct->mark = hash;
322 break; 322 break;
323 case IP_CT_RELATED: 323 case IP_CT_RELATED:
324 case IP_CT_RELATED_REPLY: 324 case IP_CT_RELATED_REPLY:
325 /* FIXME: we don't handle expectations at the 325 /* FIXME: we don't handle expectations at the moment.
326 * moment. they can arrive on a different node than 326 * They can arrive on a different node than
327 * the master connection (e.g. FTP passive mode) */ 327 * the master connection (e.g. FTP passive mode) */
328 case IP_CT_ESTABLISHED: 328 case IP_CT_ESTABLISHED:
329 case IP_CT_ESTABLISHED_REPLY: 329 case IP_CT_ESTABLISHED_REPLY:
330 break; 330 break;
331 default: 331 default: /* Prevent gcc warnings */
332 break; 332 break;
333 } 333 }
334 334
335#ifdef DEBUG 335#ifdef DEBUG
diff --git a/net/ipv4/netfilter/ipt_REJECT.c b/net/ipv4/netfilter/ipt_REJECT.c
index 1ff79e557f9..51f13f8ec72 100644
--- a/net/ipv4/netfilter/ipt_REJECT.c
+++ b/net/ipv4/netfilter/ipt_REJECT.c
@@ -40,7 +40,6 @@ static void send_reset(struct sk_buff *oldskb, int hook)
40 struct iphdr *niph; 40 struct iphdr *niph;
41 const struct tcphdr *oth; 41 const struct tcphdr *oth;
42 struct tcphdr _otcph, *tcph; 42 struct tcphdr _otcph, *tcph;
43 unsigned int addr_type;
44 43
45 /* IP header checks: fragment. */ 44 /* IP header checks: fragment. */
46 if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET)) 45 if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET))
@@ -55,6 +54,9 @@ static void send_reset(struct sk_buff *oldskb, int hook)
55 if (oth->rst) 54 if (oth->rst)
56 return; 55 return;
57 56
57 if (skb_rtable(oldskb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
58 return;
59
58 /* Check checksum */ 60 /* Check checksum */
59 if (nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), IPPROTO_TCP)) 61 if (nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), IPPROTO_TCP))
60 return; 62 return;
@@ -101,19 +103,11 @@ static void send_reset(struct sk_buff *oldskb, int hook)
101 nskb->csum_start = (unsigned char *)tcph - nskb->head; 103 nskb->csum_start = (unsigned char *)tcph - nskb->head;
102 nskb->csum_offset = offsetof(struct tcphdr, check); 104 nskb->csum_offset = offsetof(struct tcphdr, check);
103 105
104 addr_type = RTN_UNSPEC;
105 if (hook != NF_INET_FORWARD
106#ifdef CONFIG_BRIDGE_NETFILTER
107 || (nskb->nf_bridge && nskb->nf_bridge->mask & BRNF_BRIDGED)
108#endif
109 )
110 addr_type = RTN_LOCAL;
111
112 /* ip_route_me_harder expects skb->dst to be set */ 106 /* ip_route_me_harder expects skb->dst to be set */
113 skb_dst_set_noref(nskb, skb_dst(oldskb)); 107 skb_dst_set_noref(nskb, skb_dst(oldskb));
114 108
115 nskb->protocol = htons(ETH_P_IP); 109 nskb->protocol = htons(ETH_P_IP);
116 if (ip_route_me_harder(nskb, addr_type)) 110 if (ip_route_me_harder(nskb, RTN_UNSPEC))
117 goto free_nskb; 111 goto free_nskb;
118 112
119 niph->ttl = ip4_dst_hoplimit(skb_dst(nskb)); 113 niph->ttl = ip4_dst_hoplimit(skb_dst(nskb));
diff --git a/net/ipv4/netfilter/nf_defrag_ipv4.c b/net/ipv4/netfilter/nf_defrag_ipv4.c
index f3a9b42b16c..9bb1b8a37a2 100644
--- a/net/ipv4/netfilter/nf_defrag_ipv4.c
+++ b/net/ipv4/netfilter/nf_defrag_ipv4.c
@@ -82,7 +82,7 @@ static unsigned int ipv4_conntrack_defrag(unsigned int hooknum,
82#endif 82#endif
83#endif 83#endif
84 /* Gather fragments. */ 84 /* Gather fragments. */
85 if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) { 85 if (ip_is_fragment(ip_hdr(skb))) {
86 enum ip_defrag_users user = nf_ct_defrag_user(hooknum, skb); 86 enum ip_defrag_users user = nf_ct_defrag_user(hooknum, skb);
87 if (nf_ct_ipv4_gather_frags(skb, user)) 87 if (nf_ct_ipv4_gather_frags(skb, user))
88 return NF_STOLEN; 88 return NF_STOLEN;
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
index 8812a02078a..076b7c8c4aa 100644
--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
+++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
@@ -719,117 +719,115 @@ static unsigned char snmp_object_decode(struct asn1_ctx *ctx,
719 719
720 l = 0; 720 l = 0;
721 switch (type) { 721 switch (type) {
722 case SNMP_INTEGER: 722 case SNMP_INTEGER:
723 len = sizeof(long); 723 len = sizeof(long);
724 if (!asn1_long_decode(ctx, end, &l)) { 724 if (!asn1_long_decode(ctx, end, &l)) {
725 kfree(id); 725 kfree(id);
726 return 0; 726 return 0;
727 } 727 }
728 *obj = kmalloc(sizeof(struct snmp_object) + len, 728 *obj = kmalloc(sizeof(struct snmp_object) + len, GFP_ATOMIC);
729 GFP_ATOMIC); 729 if (*obj == NULL) {
730 if (*obj == NULL) { 730 kfree(id);
731 kfree(id); 731 if (net_ratelimit())
732 if (net_ratelimit()) 732 pr_notice("OOM in bsalg (%d)\n", __LINE__);
733 pr_notice("OOM in bsalg (%d)\n", __LINE__); 733 return 0;
734 return 0; 734 }
735 } 735 (*obj)->syntax.l[0] = l;
736 (*obj)->syntax.l[0] = l; 736 break;
737 break; 737 case SNMP_OCTETSTR:
738 case SNMP_OCTETSTR: 738 case SNMP_OPAQUE:
739 case SNMP_OPAQUE: 739 if (!asn1_octets_decode(ctx, end, &p, &len)) {
740 if (!asn1_octets_decode(ctx, end, &p, &len)) { 740 kfree(id);
741 kfree(id); 741 return 0;
742 return 0; 742 }
743 } 743 *obj = kmalloc(sizeof(struct snmp_object) + len, GFP_ATOMIC);
744 *obj = kmalloc(sizeof(struct snmp_object) + len, 744 if (*obj == NULL) {
745 GFP_ATOMIC);
746 if (*obj == NULL) {
747 kfree(p);
748 kfree(id);
749 if (net_ratelimit())
750 pr_notice("OOM in bsalg (%d)\n", __LINE__);
751 return 0;
752 }
753 memcpy((*obj)->syntax.c, p, len);
754 kfree(p); 745 kfree(p);
755 break; 746 kfree(id);
756 case SNMP_NULL: 747 if (net_ratelimit())
757 case SNMP_NOSUCHOBJECT: 748 pr_notice("OOM in bsalg (%d)\n", __LINE__);
758 case SNMP_NOSUCHINSTANCE: 749 return 0;
759 case SNMP_ENDOFMIBVIEW: 750 }
760 len = 0; 751 memcpy((*obj)->syntax.c, p, len);
761 *obj = kmalloc(sizeof(struct snmp_object), GFP_ATOMIC); 752 kfree(p);
762 if (*obj == NULL) { 753 break;
763 kfree(id); 754 case SNMP_NULL:
764 if (net_ratelimit()) 755 case SNMP_NOSUCHOBJECT:
765 pr_notice("OOM in bsalg (%d)\n", __LINE__); 756 case SNMP_NOSUCHINSTANCE:
766 return 0; 757 case SNMP_ENDOFMIBVIEW:
767 } 758 len = 0;
768 if (!asn1_null_decode(ctx, end)) { 759 *obj = kmalloc(sizeof(struct snmp_object), GFP_ATOMIC);
769 kfree(id); 760 if (*obj == NULL) {
770 kfree(*obj); 761 kfree(id);
771 *obj = NULL; 762 if (net_ratelimit())
772 return 0; 763 pr_notice("OOM in bsalg (%d)\n", __LINE__);
773 } 764 return 0;
774 break; 765 }
775 case SNMP_OBJECTID: 766 if (!asn1_null_decode(ctx, end)) {
776 if (!asn1_oid_decode(ctx, end, (unsigned long **)&lp, &len)) { 767 kfree(id);
777 kfree(id); 768 kfree(*obj);
778 return 0; 769 *obj = NULL;
779 } 770 return 0;
780 len *= sizeof(unsigned long); 771 }
781 *obj = kmalloc(sizeof(struct snmp_object) + len, GFP_ATOMIC); 772 break;
782 if (*obj == NULL) { 773 case SNMP_OBJECTID:
783 kfree(lp); 774 if (!asn1_oid_decode(ctx, end, (unsigned long **)&lp, &len)) {
784 kfree(id); 775 kfree(id);
785 if (net_ratelimit()) 776 return 0;
786 pr_notice("OOM in bsalg (%d)\n", __LINE__); 777 }
787 return 0; 778 len *= sizeof(unsigned long);
788 } 779 *obj = kmalloc(sizeof(struct snmp_object) + len, GFP_ATOMIC);
789 memcpy((*obj)->syntax.ul, lp, len); 780 if (*obj == NULL) {
790 kfree(lp); 781 kfree(lp);
791 break; 782 kfree(id);
792 case SNMP_IPADDR: 783 if (net_ratelimit())
793 if (!asn1_octets_decode(ctx, end, &p, &len)) { 784 pr_notice("OOM in bsalg (%d)\n", __LINE__);
794 kfree(id); 785 return 0;
795 return 0; 786 }
796 } 787 memcpy((*obj)->syntax.ul, lp, len);
797 if (len != 4) { 788 kfree(lp);
798 kfree(p); 789 break;
799 kfree(id); 790 case SNMP_IPADDR:
800 return 0; 791 if (!asn1_octets_decode(ctx, end, &p, &len)) {
801 } 792 kfree(id);
802 *obj = kmalloc(sizeof(struct snmp_object) + len, GFP_ATOMIC); 793 return 0;
803 if (*obj == NULL) { 794 }
804 kfree(p); 795 if (len != 4) {
805 kfree(id);
806 if (net_ratelimit())
807 pr_notice("OOM in bsalg (%d)\n", __LINE__);
808 return 0;
809 }
810 memcpy((*obj)->syntax.uc, p, len);
811 kfree(p); 796 kfree(p);
812 break;
813 case SNMP_COUNTER:
814 case SNMP_GAUGE:
815 case SNMP_TIMETICKS:
816 len = sizeof(unsigned long);
817 if (!asn1_ulong_decode(ctx, end, &ul)) {
818 kfree(id);
819 return 0;
820 }
821 *obj = kmalloc(sizeof(struct snmp_object) + len, GFP_ATOMIC);
822 if (*obj == NULL) {
823 kfree(id);
824 if (net_ratelimit())
825 pr_notice("OOM in bsalg (%d)\n", __LINE__);
826 return 0;
827 }
828 (*obj)->syntax.ul[0] = ul;
829 break;
830 default:
831 kfree(id); 797 kfree(id);
832 return 0; 798 return 0;
799 }
800 *obj = kmalloc(sizeof(struct snmp_object) + len, GFP_ATOMIC);
801 if (*obj == NULL) {
802 kfree(p);
803 kfree(id);
804 if (net_ratelimit())
805 pr_notice("OOM in bsalg (%d)\n", __LINE__);
806 return 0;
807 }
808 memcpy((*obj)->syntax.uc, p, len);
809 kfree(p);
810 break;
811 case SNMP_COUNTER:
812 case SNMP_GAUGE:
813 case SNMP_TIMETICKS:
814 len = sizeof(unsigned long);
815 if (!asn1_ulong_decode(ctx, end, &ul)) {
816 kfree(id);
817 return 0;
818 }
819 *obj = kmalloc(sizeof(struct snmp_object) + len, GFP_ATOMIC);
820 if (*obj == NULL) {
821 kfree(id);
822 if (net_ratelimit())
823 pr_notice("OOM in bsalg (%d)\n", __LINE__);
824 return 0;
825 }
826 (*obj)->syntax.ul[0] = ul;
827 break;
828 default:
829 kfree(id);
830 return 0;
833 } 831 }
834 832
835 (*obj)->syntax_len = len; 833 (*obj)->syntax_len = len;
diff --git a/net/ipv4/netfilter/nf_nat_standalone.c b/net/ipv4/netfilter/nf_nat_standalone.c
index 483b76d042d..a6e606e8482 100644
--- a/net/ipv4/netfilter/nf_nat_standalone.c
+++ b/net/ipv4/netfilter/nf_nat_standalone.c
@@ -88,7 +88,7 @@ nf_nat_fn(unsigned int hooknum,
88 88
89 /* We never see fragments: conntrack defrags on pre-routing 89 /* We never see fragments: conntrack defrags on pre-routing
90 and local-out, and nf_nat_out protects post-routing. */ 90 and local-out, and nf_nat_out protects post-routing. */
91 NF_CT_ASSERT(!(ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET))); 91 NF_CT_ASSERT(!ip_is_fragment(ip_hdr(skb)));
92 92
93 ct = nf_ct_get(skb, &ctinfo); 93 ct = nf_ct_get(skb, &ctinfo);
94 /* Can't track? It's not due to stress, or conntrack would 94 /* Can't track? It's not due to stress, or conntrack would
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index c9893d43242..08526786dc3 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -825,28 +825,28 @@ static int compat_raw_getsockopt(struct sock *sk, int level, int optname,
825static int raw_ioctl(struct sock *sk, int cmd, unsigned long arg) 825static int raw_ioctl(struct sock *sk, int cmd, unsigned long arg)
826{ 826{
827 switch (cmd) { 827 switch (cmd) {
828 case SIOCOUTQ: { 828 case SIOCOUTQ: {
829 int amount = sk_wmem_alloc_get(sk); 829 int amount = sk_wmem_alloc_get(sk);
830 830
831 return put_user(amount, (int __user *)arg); 831 return put_user(amount, (int __user *)arg);
832 } 832 }
833 case SIOCINQ: { 833 case SIOCINQ: {
834 struct sk_buff *skb; 834 struct sk_buff *skb;
835 int amount = 0; 835 int amount = 0;
836 836
837 spin_lock_bh(&sk->sk_receive_queue.lock); 837 spin_lock_bh(&sk->sk_receive_queue.lock);
838 skb = skb_peek(&sk->sk_receive_queue); 838 skb = skb_peek(&sk->sk_receive_queue);
839 if (skb != NULL) 839 if (skb != NULL)
840 amount = skb->len; 840 amount = skb->len;
841 spin_unlock_bh(&sk->sk_receive_queue.lock); 841 spin_unlock_bh(&sk->sk_receive_queue.lock);
842 return put_user(amount, (int __user *)arg); 842 return put_user(amount, (int __user *)arg);
843 } 843 }
844 844
845 default: 845 default:
846#ifdef CONFIG_IP_MROUTE 846#ifdef CONFIG_IP_MROUTE
847 return ipmr_ioctl(sk, cmd, (void __user *)arg); 847 return ipmr_ioctl(sk, cmd, (void __user *)arg);
848#else 848#else
849 return -ENOIOCTLCMD; 849 return -ENOIOCTLCMD;
850#endif 850#endif
851 } 851 }
852} 852}
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index aa13ef10511..33137307d52 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -108,6 +108,7 @@
108#ifdef CONFIG_SYSCTL 108#ifdef CONFIG_SYSCTL
109#include <linux/sysctl.h> 109#include <linux/sysctl.h>
110#endif 110#endif
111#include <net/atmclip.h>
111 112
112#define RT_FL_TOS(oldflp4) \ 113#define RT_FL_TOS(oldflp4) \
113 ((u32)(oldflp4->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))) 114 ((u32)(oldflp4->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK)))
@@ -184,6 +185,8 @@ static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old)
184 return p; 185 return p;
185} 186}
186 187
188static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst, const void *daddr);
189
187static struct dst_ops ipv4_dst_ops = { 190static struct dst_ops ipv4_dst_ops = {
188 .family = AF_INET, 191 .family = AF_INET,
189 .protocol = cpu_to_be16(ETH_P_IP), 192 .protocol = cpu_to_be16(ETH_P_IP),
@@ -198,6 +201,7 @@ static struct dst_ops ipv4_dst_ops = {
198 .link_failure = ipv4_link_failure, 201 .link_failure = ipv4_link_failure,
199 .update_pmtu = ip_rt_update_pmtu, 202 .update_pmtu = ip_rt_update_pmtu,
200 .local_out = __ip_local_out, 203 .local_out = __ip_local_out,
204 .neigh_lookup = ipv4_neigh_lookup,
201}; 205};
202 206
203#define ECN_OR_COST(class) TC_PRIO_##class 207#define ECN_OR_COST(class) TC_PRIO_##class
@@ -411,8 +415,10 @@ static int rt_cache_seq_show(struct seq_file *seq, void *v)
411 "HHUptod\tSpecDst"); 415 "HHUptod\tSpecDst");
412 else { 416 else {
413 struct rtable *r = v; 417 struct rtable *r = v;
418 struct neighbour *n;
414 int len; 419 int len;
415 420
421 n = dst_get_neighbour(&r->dst);
416 seq_printf(seq, "%s\t%08X\t%08X\t%8X\t%d\t%u\t%d\t" 422 seq_printf(seq, "%s\t%08X\t%08X\t%8X\t%d\t%u\t%d\t"
417 "%08X\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n", 423 "%08X\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n",
418 r->dst.dev ? r->dst.dev->name : "*", 424 r->dst.dev ? r->dst.dev->name : "*",
@@ -425,9 +431,8 @@ static int rt_cache_seq_show(struct seq_file *seq, void *v)
425 (int)((dst_metric(&r->dst, RTAX_RTT) >> 3) + 431 (int)((dst_metric(&r->dst, RTAX_RTT) >> 3) +
426 dst_metric(&r->dst, RTAX_RTTVAR)), 432 dst_metric(&r->dst, RTAX_RTTVAR)),
427 r->rt_key_tos, 433 r->rt_key_tos,
428 r->dst.hh ? atomic_read(&r->dst.hh->hh_refcnt) : -1, 434 -1,
429 r->dst.hh ? (r->dst.hh->hh_output == 435 (n && (n->nud_state & NUD_CONNECTED)) ? 1 : 0,
430 dev_queue_xmit) : 0,
431 r->rt_spec_dst, &len); 436 r->rt_spec_dst, &len);
432 437
433 seq_printf(seq, "%*s\n", 127 - len, ""); 438 seq_printf(seq, "%*s\n", 127 - len, "");
@@ -1006,6 +1011,37 @@ static int slow_chain_length(const struct rtable *head)
1006 return length >> FRACT_BITS; 1011 return length >> FRACT_BITS;
1007} 1012}
1008 1013
1014static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst, const void *daddr)
1015{
1016 struct neigh_table *tbl = &arp_tbl;
1017 static const __be32 inaddr_any = 0;
1018 struct net_device *dev = dst->dev;
1019 const __be32 *pkey = daddr;
1020 struct neighbour *n;
1021
1022#if defined(CONFIG_ATM_CLIP) || defined(CONFIG_ATM_CLIP_MODULE)
1023 if (dev->type == ARPHRD_ATM)
1024 tbl = clip_tbl_hook;
1025#endif
1026 if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT))
1027 pkey = &inaddr_any;
1028
1029 n = __ipv4_neigh_lookup(tbl, dev, *(__force u32 *)pkey);
1030 if (n)
1031 return n;
1032 return neigh_create(tbl, pkey, dev);
1033}
1034
1035static int rt_bind_neighbour(struct rtable *rt)
1036{
1037 struct neighbour *n = ipv4_neigh_lookup(&rt->dst, &rt->rt_gateway);
1038 if (IS_ERR(n))
1039 return PTR_ERR(n);
1040 dst_set_neighbour(&rt->dst, n);
1041
1042 return 0;
1043}
1044
1009static struct rtable *rt_intern_hash(unsigned hash, struct rtable *rt, 1045static struct rtable *rt_intern_hash(unsigned hash, struct rtable *rt,
1010 struct sk_buff *skb, int ifindex) 1046 struct sk_buff *skb, int ifindex)
1011{ 1047{
@@ -1042,7 +1078,7 @@ restart:
1042 1078
1043 rt->dst.flags |= DST_NOCACHE; 1079 rt->dst.flags |= DST_NOCACHE;
1044 if (rt->rt_type == RTN_UNICAST || rt_is_output_route(rt)) { 1080 if (rt->rt_type == RTN_UNICAST || rt_is_output_route(rt)) {
1045 int err = arp_bind_neighbour(&rt->dst); 1081 int err = rt_bind_neighbour(rt);
1046 if (err) { 1082 if (err) {
1047 if (net_ratelimit()) 1083 if (net_ratelimit())
1048 printk(KERN_WARNING 1084 printk(KERN_WARNING
@@ -1138,7 +1174,7 @@ restart:
1138 route or unicast forwarding path. 1174 route or unicast forwarding path.
1139 */ 1175 */
1140 if (rt->rt_type == RTN_UNICAST || rt_is_output_route(rt)) { 1176 if (rt->rt_type == RTN_UNICAST || rt_is_output_route(rt)) {
1141 int err = arp_bind_neighbour(&rt->dst); 1177 int err = rt_bind_neighbour(rt);
1142 if (err) { 1178 if (err) {
1143 spin_unlock_bh(rt_hash_lock_addr(hash)); 1179 spin_unlock_bh(rt_hash_lock_addr(hash));
1144 1180
@@ -1439,20 +1475,20 @@ static int ip_error(struct sk_buff *skb)
1439 int code; 1475 int code;
1440 1476
1441 switch (rt->dst.error) { 1477 switch (rt->dst.error) {
1442 case EINVAL: 1478 case EINVAL:
1443 default: 1479 default:
1444 goto out; 1480 goto out;
1445 case EHOSTUNREACH: 1481 case EHOSTUNREACH:
1446 code = ICMP_HOST_UNREACH; 1482 code = ICMP_HOST_UNREACH;
1447 break; 1483 break;
1448 case ENETUNREACH: 1484 case ENETUNREACH:
1449 code = ICMP_NET_UNREACH; 1485 code = ICMP_NET_UNREACH;
1450 IP_INC_STATS_BH(dev_net(rt->dst.dev), 1486 IP_INC_STATS_BH(dev_net(rt->dst.dev),
1451 IPSTATS_MIB_INNOROUTES); 1487 IPSTATS_MIB_INNOROUTES);
1452 break; 1488 break;
1453 case EACCES: 1489 case EACCES:
1454 code = ICMP_PKT_FILTERED; 1490 code = ICMP_PKT_FILTERED;
1455 break; 1491 break;
1456 } 1492 }
1457 1493
1458 if (!rt->peer) 1494 if (!rt->peer)
@@ -1592,23 +1628,24 @@ static int check_peer_redir(struct dst_entry *dst, struct inet_peer *peer)
1592{ 1628{
1593 struct rtable *rt = (struct rtable *) dst; 1629 struct rtable *rt = (struct rtable *) dst;
1594 __be32 orig_gw = rt->rt_gateway; 1630 __be32 orig_gw = rt->rt_gateway;
1631 struct neighbour *n;
1595 1632
1596 dst_confirm(&rt->dst); 1633 dst_confirm(&rt->dst);
1597 1634
1598 neigh_release(rt->dst.neighbour); 1635 neigh_release(dst_get_neighbour(&rt->dst));
1599 rt->dst.neighbour = NULL; 1636 dst_set_neighbour(&rt->dst, NULL);
1600 1637
1601 rt->rt_gateway = peer->redirect_learned.a4; 1638 rt->rt_gateway = peer->redirect_learned.a4;
1602 if (arp_bind_neighbour(&rt->dst) || 1639 rt_bind_neighbour(rt);
1603 !(rt->dst.neighbour->nud_state & NUD_VALID)) { 1640 n = dst_get_neighbour(&rt->dst);
1604 if (rt->dst.neighbour) 1641 if (!n || !(n->nud_state & NUD_VALID)) {
1605 neigh_event_send(rt->dst.neighbour, NULL); 1642 if (n)
1643 neigh_event_send(n, NULL);
1606 rt->rt_gateway = orig_gw; 1644 rt->rt_gateway = orig_gw;
1607 return -EAGAIN; 1645 return -EAGAIN;
1608 } else { 1646 } else {
1609 rt->rt_flags |= RTCF_REDIRECTED; 1647 rt->rt_flags |= RTCF_REDIRECTED;
1610 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, 1648 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n);
1611 rt->dst.neighbour);
1612 } 1649 }
1613 return 0; 1650 return 0;
1614} 1651}
@@ -2708,6 +2745,7 @@ static struct dst_ops ipv4_dst_blackhole_ops = {
2708 .default_advmss = ipv4_default_advmss, 2745 .default_advmss = ipv4_default_advmss,
2709 .update_pmtu = ipv4_rt_blackhole_update_pmtu, 2746 .update_pmtu = ipv4_rt_blackhole_update_pmtu,
2710 .cow_metrics = ipv4_rt_blackhole_cow_metrics, 2747 .cow_metrics = ipv4_rt_blackhole_cow_metrics,
2748 .neigh_lookup = ipv4_neigh_lookup,
2711}; 2749};
2712 2750
2713struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig) 2751struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig)
@@ -3303,7 +3341,7 @@ int __init ip_rt_init(void)
3303 xfrm_init(); 3341 xfrm_init();
3304 xfrm4_init(ip_rt_max_size); 3342 xfrm4_init(ip_rt_max_size);
3305#endif 3343#endif
3306 rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL); 3344 rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL, NULL);
3307 3345
3308#ifdef CONFIG_SYSCTL 3346#ifdef CONFIG_SYSCTL
3309 register_pernet_subsys(&sysctl_route_ops); 3347 register_pernet_subsys(&sysctl_route_ops);
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 26461492a84..92bb9434b33 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -316,6 +316,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
316 ireq->wscale_ok = tcp_opt.wscale_ok; 316 ireq->wscale_ok = tcp_opt.wscale_ok;
317 ireq->tstamp_ok = tcp_opt.saw_tstamp; 317 ireq->tstamp_ok = tcp_opt.saw_tstamp;
318 req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0; 318 req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0;
319 treq->snt_synack = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsecr : 0;
319 320
320 /* We throwed the options of the initial SYN away, so we hope 321 /* We throwed the options of the initial SYN away, so we hope
321 * the ACK carries the same options again (see RFC1122 4.2.3.8) 322 * the ACK carries the same options again (see RFC1122 4.2.3.8)
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 57d0752e239..69fd7201129 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -398,20 +398,6 @@ static struct ctl_table ipv4_table[] = {
398 .proc_handler = proc_dointvec_jiffies, 398 .proc_handler = proc_dointvec_jiffies,
399 }, 399 },
400 { 400 {
401 .procname = "inet_peer_gc_mintime",
402 .data = &inet_peer_gc_mintime,
403 .maxlen = sizeof(int),
404 .mode = 0644,
405 .proc_handler = proc_dointvec_jiffies,
406 },
407 {
408 .procname = "inet_peer_gc_maxtime",
409 .data = &inet_peer_gc_maxtime,
410 .maxlen = sizeof(int),
411 .mode = 0644,
412 .proc_handler = proc_dointvec_jiffies,
413 },
414 {
415 .procname = "tcp_orphan_retries", 401 .procname = "tcp_orphan_retries",
416 .data = &sysctl_tcp_orphan_retries, 402 .data = &sysctl_tcp_orphan_retries,
417 .maxlen = sizeof(int), 403 .maxlen = sizeof(int),
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 054a59d21eb..46febcacb72 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -3220,7 +3220,7 @@ __setup("thash_entries=", set_thash_entries);
3220void __init tcp_init(void) 3220void __init tcp_init(void)
3221{ 3221{
3222 struct sk_buff *skb = NULL; 3222 struct sk_buff *skb = NULL;
3223 unsigned long nr_pages, limit; 3223 unsigned long limit;
3224 int i, max_share, cnt; 3224 int i, max_share, cnt;
3225 unsigned long jiffy = jiffies; 3225 unsigned long jiffy = jiffies;
3226 3226
@@ -3277,13 +3277,7 @@ void __init tcp_init(void)
3277 sysctl_tcp_max_orphans = cnt / 2; 3277 sysctl_tcp_max_orphans = cnt / 2;
3278 sysctl_max_syn_backlog = max(128, cnt / 256); 3278 sysctl_max_syn_backlog = max(128, cnt / 256);
3279 3279
3280 /* Set the pressure threshold to be a fraction of global memory that 3280 limit = nr_free_buffer_pages() / 8;
3281 * is up to 1/2 at 256 MB, decreasing toward zero with the amount of
3282 * memory, with a floor of 128 pages.
3283 */
3284 nr_pages = totalram_pages - totalhigh_pages;
3285 limit = min(nr_pages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT);
3286 limit = (limit * (nr_pages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11);
3287 limit = max(limit, 128UL); 3281 limit = max(limit, 128UL);
3288 sysctl_tcp_mem[0] = limit / 4 * 3; 3282 sysctl_tcp_mem[0] = limit / 4 * 3;
3289 sysctl_tcp_mem[1] = limit; 3283 sysctl_tcp_mem[1] = limit;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index bef9f04c22b..ea0d2183df4 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -880,6 +880,11 @@ static void tcp_init_metrics(struct sock *sk)
880 tp->snd_ssthresh = dst_metric(dst, RTAX_SSTHRESH); 880 tp->snd_ssthresh = dst_metric(dst, RTAX_SSTHRESH);
881 if (tp->snd_ssthresh > tp->snd_cwnd_clamp) 881 if (tp->snd_ssthresh > tp->snd_cwnd_clamp)
882 tp->snd_ssthresh = tp->snd_cwnd_clamp; 882 tp->snd_ssthresh = tp->snd_cwnd_clamp;
883 } else {
884 /* ssthresh may have been reduced unnecessarily during.
885 * 3WHS. Restore it back to its initial default.
886 */
887 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
883 } 888 }
884 if (dst_metric(dst, RTAX_REORDERING) && 889 if (dst_metric(dst, RTAX_REORDERING) &&
885 tp->reordering != dst_metric(dst, RTAX_REORDERING)) { 890 tp->reordering != dst_metric(dst, RTAX_REORDERING)) {
@@ -887,10 +892,7 @@ static void tcp_init_metrics(struct sock *sk)
887 tp->reordering = dst_metric(dst, RTAX_REORDERING); 892 tp->reordering = dst_metric(dst, RTAX_REORDERING);
888 } 893 }
889 894
890 if (dst_metric(dst, RTAX_RTT) == 0) 895 if (dst_metric(dst, RTAX_RTT) == 0 || tp->srtt == 0)
891 goto reset;
892
893 if (!tp->srtt && dst_metric_rtt(dst, RTAX_RTT) < (TCP_TIMEOUT_INIT << 3))
894 goto reset; 896 goto reset;
895 897
896 /* Initial rtt is determined from SYN,SYN-ACK. 898 /* Initial rtt is determined from SYN,SYN-ACK.
@@ -916,19 +918,26 @@ static void tcp_init_metrics(struct sock *sk)
916 tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk)); 918 tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk));
917 } 919 }
918 tcp_set_rto(sk); 920 tcp_set_rto(sk);
919 if (inet_csk(sk)->icsk_rto < TCP_TIMEOUT_INIT && !tp->rx_opt.saw_tstamp) {
920reset: 921reset:
921 /* Play conservative. If timestamps are not 922 if (tp->srtt == 0) {
922 * supported, TCP will fail to recalculate correct 923 /* RFC2988bis: We've failed to get a valid RTT sample from
923 * rtt, if initial rto is too small. FORGET ALL AND RESET! 924 * 3WHS. This is most likely due to retransmission,
925 * including spurious one. Reset the RTO back to 3secs
926 * from the more aggressive 1sec to avoid more spurious
927 * retransmission.
924 */ 928 */
925 if (!tp->rx_opt.saw_tstamp && tp->srtt) { 929 tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_FALLBACK;
926 tp->srtt = 0; 930 inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK;
927 tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_INIT;
928 inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT;
929 }
930 } 931 }
931 tp->snd_cwnd = tcp_init_cwnd(tp, dst); 932 /* Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been
933 * retransmitted. In light of RFC2988bis' more aggressive 1sec
934 * initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK
935 * retransmission has occurred.
936 */
937 if (tp->total_retrans > 1)
938 tp->snd_cwnd = 1;
939 else
940 tp->snd_cwnd = tcp_init_cwnd(tp, dst);
932 tp->snd_cwnd_stamp = tcp_time_stamp; 941 tp->snd_cwnd_stamp = tcp_time_stamp;
933} 942}
934 943
@@ -3112,12 +3121,13 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag)
3112 tcp_xmit_retransmit_queue(sk); 3121 tcp_xmit_retransmit_queue(sk);
3113} 3122}
3114 3123
3115static void tcp_valid_rtt_meas(struct sock *sk, u32 seq_rtt) 3124void tcp_valid_rtt_meas(struct sock *sk, u32 seq_rtt)
3116{ 3125{
3117 tcp_rtt_estimator(sk, seq_rtt); 3126 tcp_rtt_estimator(sk, seq_rtt);
3118 tcp_set_rto(sk); 3127 tcp_set_rto(sk);
3119 inet_csk(sk)->icsk_backoff = 0; 3128 inet_csk(sk)->icsk_backoff = 0;
3120} 3129}
3130EXPORT_SYMBOL(tcp_valid_rtt_meas);
3121 3131
3122/* Read draft-ietf-tcplw-high-performance before mucking 3132/* Read draft-ietf-tcplw-high-performance before mucking
3123 * with this code. (Supersedes RFC1323) 3133 * with this code. (Supersedes RFC1323)
@@ -5806,12 +5816,6 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
5806 tp->rx_opt.snd_wscale; 5816 tp->rx_opt.snd_wscale;
5807 tcp_init_wl(tp, TCP_SKB_CB(skb)->seq); 5817 tcp_init_wl(tp, TCP_SKB_CB(skb)->seq);
5808 5818
5809 /* tcp_ack considers this ACK as duplicate
5810 * and does not calculate rtt.
5811 * Force it here.
5812 */
5813 tcp_ack_update_rtt(sk, 0, 0);
5814
5815 if (tp->rx_opt.tstamp_ok) 5819 if (tp->rx_opt.tstamp_ok)
5816 tp->advmss -= TCPOLEN_TSTAMP_ALIGNED; 5820 tp->advmss -= TCPOLEN_TSTAMP_ALIGNED;
5817 5821
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 708dc203b03..955b8e65b69 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -429,8 +429,8 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
429 break; 429 break;
430 430
431 icsk->icsk_backoff--; 431 icsk->icsk_backoff--;
432 inet_csk(sk)->icsk_rto = __tcp_set_rto(tp) << 432 inet_csk(sk)->icsk_rto = (tp->srtt ? __tcp_set_rto(tp) :
433 icsk->icsk_backoff; 433 TCP_TIMEOUT_INIT) << icsk->icsk_backoff;
434 tcp_bound_rto(sk); 434 tcp_bound_rto(sk);
435 435
436 skb = tcp_write_queue_head(sk); 436 skb = tcp_write_queue_head(sk);
@@ -1384,6 +1384,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1384 isn = tcp_v4_init_sequence(skb); 1384 isn = tcp_v4_init_sequence(skb);
1385 } 1385 }
1386 tcp_rsk(req)->snt_isn = isn; 1386 tcp_rsk(req)->snt_isn = isn;
1387 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1387 1388
1388 if (tcp_v4_send_synack(sk, dst, req, 1389 if (tcp_v4_send_synack(sk, dst, req,
1389 (struct request_values *)&tmp_ext) || 1390 (struct request_values *)&tmp_ext) ||
@@ -1458,6 +1459,10 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1458 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss; 1459 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1459 1460
1460 tcp_initialize_rcv_mss(newsk); 1461 tcp_initialize_rcv_mss(newsk);
1462 if (tcp_rsk(req)->snt_synack)
1463 tcp_valid_rtt_meas(newsk,
1464 tcp_time_stamp - tcp_rsk(req)->snt_synack);
1465 newtp->total_retrans = req->retrans;
1461 1466
1462#ifdef CONFIG_TCP_MD5SIG 1467#ifdef CONFIG_TCP_MD5SIG
1463 /* Copy over the MD5 key from the original socket */ 1468 /* Copy over the MD5 key from the original socket */
@@ -1855,7 +1860,7 @@ static int tcp_v4_init_sock(struct sock *sk)
1855 * algorithms that we must have the following bandaid to talk 1860 * algorithms that we must have the following bandaid to talk
1856 * efficiently to them. -DaveM 1861 * efficiently to them. -DaveM
1857 */ 1862 */
1858 tp->snd_cwnd = 2; 1863 tp->snd_cwnd = TCP_INIT_CWND;
1859 1864
1860 /* See draft-stevens-tcpca-spec-01 for discussion of the 1865 /* See draft-stevens-tcpca-spec-01 for discussion of the
1861 * initialization of these values. 1866 * initialization of these values.
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 80b1f80759a..d2fe4e06b47 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -486,7 +486,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
486 * algorithms that we must have the following bandaid to talk 486 * algorithms that we must have the following bandaid to talk
487 * efficiently to them. -DaveM 487 * efficiently to them. -DaveM
488 */ 488 */
489 newtp->snd_cwnd = 2; 489 newtp->snd_cwnd = TCP_INIT_CWND;
490 newtp->snd_cwnd_cnt = 0; 490 newtp->snd_cwnd_cnt = 0;
491 newtp->bytes_acked = 0; 491 newtp->bytes_acked = 0;
492 492
@@ -720,6 +720,10 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
720 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP); 720 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP);
721 return NULL; 721 return NULL;
722 } 722 }
723 if (tmp_opt.saw_tstamp && tmp_opt.rcv_tsecr)
724 tcp_rsk(req)->snt_synack = tmp_opt.rcv_tsecr;
725 else if (req->retrans) /* don't take RTT sample if retrans && ~TS */
726 tcp_rsk(req)->snt_synack = 0;
723 727
724 /* OK, ACK is valid, create big socket and 728 /* OK, ACK is valid, create big socket and
725 * feed this segment to it. It will repeat all 729 * feed this segment to it. It will repeat all
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index abca870d8ff..1b5a19340a9 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -105,6 +105,7 @@
105#include <net/route.h> 105#include <net/route.h>
106#include <net/checksum.h> 106#include <net/checksum.h>
107#include <net/xfrm.h> 107#include <net/xfrm.h>
108#include <trace/events/udp.h>
108#include "udp_impl.h" 109#include "udp_impl.h"
109 110
110struct udp_table udp_table __read_mostly; 111struct udp_table udp_table __read_mostly;
@@ -1249,6 +1250,9 @@ csum_copy_err:
1249 1250
1250 if (noblock) 1251 if (noblock)
1251 return -EAGAIN; 1252 return -EAGAIN;
1253
1254 /* starting over for a new packet */
1255 msg->msg_flags &= ~MSG_TRUNC;
1252 goto try_again; 1256 goto try_again;
1253} 1257}
1254 1258
@@ -1363,6 +1367,7 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1363 is_udplite); 1367 is_udplite);
1364 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite); 1368 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
1365 kfree_skb(skb); 1369 kfree_skb(skb);
1370 trace_udp_fail_queue_rcv_skb(rc, sk);
1366 return -1; 1371 return -1;
1367 } 1372 }
1368 1373
@@ -2206,16 +2211,10 @@ void __init udp_table_init(struct udp_table *table, const char *name)
2206 2211
2207void __init udp_init(void) 2212void __init udp_init(void)
2208{ 2213{
2209 unsigned long nr_pages, limit; 2214 unsigned long limit;
2210 2215
2211 udp_table_init(&udp_table, "UDP"); 2216 udp_table_init(&udp_table, "UDP");
2212 /* Set the pressure threshold up by the same strategy of TCP. It is a 2217 limit = nr_free_buffer_pages() / 8;
2213 * fraction of global memory that is up to 1/2 at 256 MB, decreasing
2214 * toward zero with the amount of memory, with a floor of 128 pages.
2215 */
2216 nr_pages = totalram_pages - totalhigh_pages;
2217 limit = min(nr_pages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT);
2218 limit = (limit * (nr_pages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11);
2219 limit = max(limit, 128UL); 2218 limit = max(limit, 128UL);
2220 sysctl_udp_mem[0] = limit / 4 * 3; 2219 sysctl_udp_mem[0] = limit / 4 * 3;
2221 sysctl_udp_mem[1] = limit; 2220 sysctl_udp_mem[1] = limit;
diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c
index 2d51840e53a..327a617d594 100644
--- a/net/ipv4/xfrm4_output.c
+++ b/net/ipv4/xfrm4_output.c
@@ -32,7 +32,12 @@ static int xfrm4_tunnel_check_size(struct sk_buff *skb)
32 dst = skb_dst(skb); 32 dst = skb_dst(skb);
33 mtu = dst_mtu(dst); 33 mtu = dst_mtu(dst);
34 if (skb->len > mtu) { 34 if (skb->len > mtu) {
35 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); 35 if (skb->sk)
36 ip_local_error(skb->sk, EMSGSIZE, ip_hdr(skb)->daddr,
37 inet_sk(skb->sk)->inet_dport, mtu);
38 else
39 icmp_send(skb, ICMP_DEST_UNREACH,
40 ICMP_FRAG_NEEDED, htonl(mtu));
36 ret = -EMSGSIZE; 41 ret = -EMSGSIZE;
37 } 42 }
38out: 43out:
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 981e43eaf70..fc5368ad2b0 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -117,7 +117,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
117 memset(fl4, 0, sizeof(struct flowi4)); 117 memset(fl4, 0, sizeof(struct flowi4));
118 fl4->flowi4_mark = skb->mark; 118 fl4->flowi4_mark = skb->mark;
119 119
120 if (!(iph->frag_off & htons(IP_MF | IP_OFFSET))) { 120 if (!ip_is_fragment(iph)) {
121 switch (iph->protocol) { 121 switch (iph->protocol) {
122 case IPPROTO_UDP: 122 case IPPROTO_UDP:
123 case IPPROTO_UDPLITE: 123 case IPPROTO_UDPLITE:
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 498b927f68b..a06c53c14d8 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -656,7 +656,7 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen,
656 * layer address of our nexhop router 656 * layer address of our nexhop router
657 */ 657 */
658 658
659 if (rt->rt6i_nexthop == NULL) 659 if (dst_get_neighbour(&rt->dst) == NULL)
660 ifa->flags &= ~IFA_F_OPTIMISTIC; 660 ifa->flags &= ~IFA_F_OPTIMISTIC;
661 661
662 ifa->idev = idev; 662 ifa->idev = idev;
@@ -1470,6 +1470,8 @@ void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr)
1470static void addrconf_join_anycast(struct inet6_ifaddr *ifp) 1470static void addrconf_join_anycast(struct inet6_ifaddr *ifp)
1471{ 1471{
1472 struct in6_addr addr; 1472 struct in6_addr addr;
1473 if (ifp->prefix_len == 127) /* RFC 6164 */
1474 return;
1473 ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len); 1475 ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
1474 if (ipv6_addr_any(&addr)) 1476 if (ipv6_addr_any(&addr))
1475 return; 1477 return;
@@ -1559,6 +1561,11 @@ static int addrconf_ifid_sit(u8 *eui, struct net_device *dev)
1559 return -1; 1561 return -1;
1560} 1562}
1561 1563
1564static int addrconf_ifid_gre(u8 *eui, struct net_device *dev)
1565{
1566 return __ipv6_isatap_ifid(eui, *(__be32 *)dev->dev_addr);
1567}
1568
1562static int ipv6_generate_eui64(u8 *eui, struct net_device *dev) 1569static int ipv6_generate_eui64(u8 *eui, struct net_device *dev)
1563{ 1570{
1564 switch (dev->type) { 1571 switch (dev->type) {
@@ -1572,6 +1579,8 @@ static int ipv6_generate_eui64(u8 *eui, struct net_device *dev)
1572 return addrconf_ifid_infiniband(eui, dev); 1579 return addrconf_ifid_infiniband(eui, dev);
1573 case ARPHRD_SIT: 1580 case ARPHRD_SIT:
1574 return addrconf_ifid_sit(eui, dev); 1581 return addrconf_ifid_sit(eui, dev);
1582 case ARPHRD_IPGRE:
1583 return addrconf_ifid_gre(eui, dev);
1575 } 1584 }
1576 return -1; 1585 return -1;
1577} 1586}
@@ -2423,6 +2432,29 @@ static void addrconf_sit_config(struct net_device *dev)
2423} 2432}
2424#endif 2433#endif
2425 2434
2435#if defined(CONFIG_NET_IPGRE) || defined(CONFIG_NET_IPGRE_MODULE)
2436static void addrconf_gre_config(struct net_device *dev)
2437{
2438 struct inet6_dev *idev;
2439 struct in6_addr addr;
2440
2441 pr_info("ipv6: addrconf_gre_config(%s)\n", dev->name);
2442
2443 ASSERT_RTNL();
2444
2445 if ((idev = ipv6_find_idev(dev)) == NULL) {
2446 printk(KERN_DEBUG "init gre: add_dev failed\n");
2447 return;
2448 }
2449
2450 ipv6_addr_set(&addr, htonl(0xFE800000), 0, 0, 0);
2451 addrconf_prefix_route(&addr, 64, dev, 0, 0);
2452
2453 if (!ipv6_generate_eui64(addr.s6_addr + 8, dev))
2454 addrconf_add_linklocal(idev, &addr);
2455}
2456#endif
2457
2426static inline int 2458static inline int
2427ipv6_inherit_linklocal(struct inet6_dev *idev, struct net_device *link_dev) 2459ipv6_inherit_linklocal(struct inet6_dev *idev, struct net_device *link_dev)
2428{ 2460{
@@ -2539,6 +2571,11 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
2539 addrconf_sit_config(dev); 2571 addrconf_sit_config(dev);
2540 break; 2572 break;
2541#endif 2573#endif
2574#if defined(CONFIG_NET_IPGRE) || defined(CONFIG_NET_IPGRE_MODULE)
2575 case ARPHRD_IPGRE:
2576 addrconf_gre_config(dev);
2577 break;
2578#endif
2542 case ARPHRD_TUNNEL6: 2579 case ARPHRD_TUNNEL6:
2543 addrconf_ip6_tnl_config(dev); 2580 addrconf_ip6_tnl_config(dev);
2544 break; 2581 break;
@@ -4692,16 +4729,20 @@ int __init addrconf_init(void)
4692 if (err < 0) 4729 if (err < 0)
4693 goto errout_af; 4730 goto errout_af;
4694 4731
4695 err = __rtnl_register(PF_INET6, RTM_GETLINK, NULL, inet6_dump_ifinfo); 4732 err = __rtnl_register(PF_INET6, RTM_GETLINK, NULL, inet6_dump_ifinfo,
4733 NULL);
4696 if (err < 0) 4734 if (err < 0)
4697 goto errout; 4735 goto errout;
4698 4736
4699 /* Only the first call to __rtnl_register can fail */ 4737 /* Only the first call to __rtnl_register can fail */
4700 __rtnl_register(PF_INET6, RTM_NEWADDR, inet6_rtm_newaddr, NULL); 4738 __rtnl_register(PF_INET6, RTM_NEWADDR, inet6_rtm_newaddr, NULL, NULL);
4701 __rtnl_register(PF_INET6, RTM_DELADDR, inet6_rtm_deladdr, NULL); 4739 __rtnl_register(PF_INET6, RTM_DELADDR, inet6_rtm_deladdr, NULL, NULL);
4702 __rtnl_register(PF_INET6, RTM_GETADDR, inet6_rtm_getaddr, inet6_dump_ifaddr); 4740 __rtnl_register(PF_INET6, RTM_GETADDR, inet6_rtm_getaddr,
4703 __rtnl_register(PF_INET6, RTM_GETMULTICAST, NULL, inet6_dump_ifmcaddr); 4741 inet6_dump_ifaddr, NULL);
4704 __rtnl_register(PF_INET6, RTM_GETANYCAST, NULL, inet6_dump_ifacaddr); 4742 __rtnl_register(PF_INET6, RTM_GETMULTICAST, NULL,
4743 inet6_dump_ifmcaddr, NULL);
4744 __rtnl_register(PF_INET6, RTM_GETANYCAST, NULL,
4745 inet6_dump_ifacaddr, NULL);
4705 4746
4706 ipv6_addr_label_rtnl_register(); 4747 ipv6_addr_label_rtnl_register();
4707 4748
diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c
index c8993e5a337..2d8ddba9ee5 100644
--- a/net/ipv6/addrlabel.c
+++ b/net/ipv6/addrlabel.c
@@ -592,8 +592,11 @@ out:
592 592
593void __init ipv6_addr_label_rtnl_register(void) 593void __init ipv6_addr_label_rtnl_register(void)
594{ 594{
595 __rtnl_register(PF_INET6, RTM_NEWADDRLABEL, ip6addrlbl_newdel, NULL); 595 __rtnl_register(PF_INET6, RTM_NEWADDRLABEL, ip6addrlbl_newdel,
596 __rtnl_register(PF_INET6, RTM_DELADDRLABEL, ip6addrlbl_newdel, NULL); 596 NULL, NULL);
597 __rtnl_register(PF_INET6, RTM_GETADDRLABEL, ip6addrlbl_get, ip6addrlbl_dump); 597 __rtnl_register(PF_INET6, RTM_DELADDRLABEL, ip6addrlbl_newdel,
598 NULL, NULL);
599 __rtnl_register(PF_INET6, RTM_GETADDRLABEL, ip6addrlbl_get,
600 ip6addrlbl_dump, NULL);
598} 601}
599 602
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index d450a2f9fc0..3b5669a2582 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -274,7 +274,7 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
274 return -EINVAL; 274 return -EINVAL;
275 275
276 if (addr->sin6_family != AF_INET6) 276 if (addr->sin6_family != AF_INET6)
277 return -EINVAL; 277 return -EAFNOSUPPORT;
278 278
279 addr_type = ipv6_addr_type(&addr->sin6_addr); 279 addr_type = ipv6_addr_type(&addr->sin6_addr);
280 if ((addr_type & IPV6_ADDR_MULTICAST) && sock->type == SOCK_STREAM) 280 if ((addr_type & IPV6_ADDR_MULTICAST) && sock->type == SOCK_STREAM)
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 4076a0b14b2..54a4678955b 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -1455,7 +1455,7 @@ static int fib6_age(struct rt6_info *rt, void *arg)
1455 RT6_TRACE("aging clone %p\n", rt); 1455 RT6_TRACE("aging clone %p\n", rt);
1456 return -1; 1456 return -1;
1457 } else if ((rt->rt6i_flags & RTF_GATEWAY) && 1457 } else if ((rt->rt6i_flags & RTF_GATEWAY) &&
1458 (!(rt->rt6i_nexthop->flags & NTF_ROUTER))) { 1458 (!(dst_get_neighbour(&rt->dst)->flags & NTF_ROUTER))) {
1459 RT6_TRACE("purging route %p via non-router but gateway\n", 1459 RT6_TRACE("purging route %p via non-router but gateway\n",
1460 rt); 1460 rt);
1461 return -1; 1461 return -1;
@@ -1586,7 +1586,8 @@ int __init fib6_init(void)
1586 if (ret) 1586 if (ret)
1587 goto out_kmem_cache_create; 1587 goto out_kmem_cache_create;
1588 1588
1589 ret = __rtnl_register(PF_INET6, RTM_GETROUTE, NULL, inet6_dump_fib); 1589 ret = __rtnl_register(PF_INET6, RTM_GETROUTE, NULL, inet6_dump_fib,
1590 NULL);
1590 if (ret) 1591 if (ret)
1591 goto out_unregister_subsys; 1592 goto out_unregister_subsys;
1592out: 1593out:
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 9d4b165837d..32e5339db0c 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -100,6 +100,7 @@ static int ip6_finish_output2(struct sk_buff *skb)
100{ 100{
101 struct dst_entry *dst = skb_dst(skb); 101 struct dst_entry *dst = skb_dst(skb);
102 struct net_device *dev = dst->dev; 102 struct net_device *dev = dst->dev;
103 struct neighbour *neigh;
103 104
104 skb->protocol = htons(ETH_P_IPV6); 105 skb->protocol = htons(ETH_P_IPV6);
105 skb->dev = dev; 106 skb->dev = dev;
@@ -134,10 +135,9 @@ static int ip6_finish_output2(struct sk_buff *skb)
134 skb->len); 135 skb->len);
135 } 136 }
136 137
137 if (dst->hh) 138 neigh = dst_get_neighbour(dst);
138 return neigh_hh_output(dst->hh, skb); 139 if (neigh)
139 else if (dst->neighbour) 140 return neigh_output(neigh, skb);
140 return dst->neighbour->output(skb);
141 141
142 IP6_INC_STATS_BH(dev_net(dst->dev), 142 IP6_INC_STATS_BH(dev_net(dst->dev),
143 ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES); 143 ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
@@ -385,6 +385,7 @@ int ip6_forward(struct sk_buff *skb)
385 struct ipv6hdr *hdr = ipv6_hdr(skb); 385 struct ipv6hdr *hdr = ipv6_hdr(skb);
386 struct inet6_skb_parm *opt = IP6CB(skb); 386 struct inet6_skb_parm *opt = IP6CB(skb);
387 struct net *net = dev_net(dst->dev); 387 struct net *net = dev_net(dst->dev);
388 struct neighbour *n;
388 u32 mtu; 389 u32 mtu;
389 390
390 if (net->ipv6.devconf_all->forwarding == 0) 391 if (net->ipv6.devconf_all->forwarding == 0)
@@ -459,11 +460,10 @@ int ip6_forward(struct sk_buff *skb)
459 send redirects to source routed frames. 460 send redirects to source routed frames.
460 We don't send redirects to frames decapsulated from IPsec. 461 We don't send redirects to frames decapsulated from IPsec.
461 */ 462 */
462 if (skb->dev == dst->dev && dst->neighbour && opt->srcrt == 0 && 463 n = dst_get_neighbour(dst);
463 !skb_sec_path(skb)) { 464 if (skb->dev == dst->dev && n && opt->srcrt == 0 && !skb_sec_path(skb)) {
464 struct in6_addr *target = NULL; 465 struct in6_addr *target = NULL;
465 struct rt6_info *rt; 466 struct rt6_info *rt;
466 struct neighbour *n = dst->neighbour;
467 467
468 /* 468 /*
469 * incoming and outgoing devices are the same 469 * incoming and outgoing devices are the same
@@ -596,6 +596,31 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
596 return offset; 596 return offset;
597} 597}
598 598
599void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
600{
601 static atomic_t ipv6_fragmentation_id;
602 int old, new;
603
604 if (rt) {
605 struct inet_peer *peer;
606
607 if (!rt->rt6i_peer)
608 rt6_bind_peer(rt, 1);
609 peer = rt->rt6i_peer;
610 if (peer) {
611 fhdr->identification = htonl(inet_getid(peer, 0));
612 return;
613 }
614 }
615 do {
616 old = atomic_read(&ipv6_fragmentation_id);
617 new = old + 1;
618 if (!new)
619 new = 1;
620 } while (atomic_cmpxchg(&ipv6_fragmentation_id, old, new) != old);
621 fhdr->identification = htonl(new);
622}
623
599int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) 624int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
600{ 625{
601 struct sk_buff *frag; 626 struct sk_buff *frag;
@@ -680,7 +705,7 @@ int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
680 skb_reset_network_header(skb); 705 skb_reset_network_header(skb);
681 memcpy(skb_network_header(skb), tmp_hdr, hlen); 706 memcpy(skb_network_header(skb), tmp_hdr, hlen);
682 707
683 ipv6_select_ident(fh); 708 ipv6_select_ident(fh, rt);
684 fh->nexthdr = nexthdr; 709 fh->nexthdr = nexthdr;
685 fh->reserved = 0; 710 fh->reserved = 0;
686 fh->frag_off = htons(IP6_MF); 711 fh->frag_off = htons(IP6_MF);
@@ -826,7 +851,7 @@ slow_path:
826 fh->nexthdr = nexthdr; 851 fh->nexthdr = nexthdr;
827 fh->reserved = 0; 852 fh->reserved = 0;
828 if (!frag_id) { 853 if (!frag_id) {
829 ipv6_select_ident(fh); 854 ipv6_select_ident(fh, rt);
830 frag_id = fh->identification; 855 frag_id = fh->identification;
831 } else 856 } else
832 fh->identification = frag_id; 857 fh->identification = frag_id;
@@ -920,8 +945,11 @@ out:
920static int ip6_dst_lookup_tail(struct sock *sk, 945static int ip6_dst_lookup_tail(struct sock *sk,
921 struct dst_entry **dst, struct flowi6 *fl6) 946 struct dst_entry **dst, struct flowi6 *fl6)
922{ 947{
923 int err;
924 struct net *net = sock_net(sk); 948 struct net *net = sock_net(sk);
949#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
950 struct neighbour *n;
951#endif
952 int err;
925 953
926 if (*dst == NULL) 954 if (*dst == NULL)
927 *dst = ip6_route_output(net, sk, fl6); 955 *dst = ip6_route_output(net, sk, fl6);
@@ -947,7 +975,8 @@ static int ip6_dst_lookup_tail(struct sock *sk,
947 * dst entry and replace it instead with the 975 * dst entry and replace it instead with the
948 * dst entry of the nexthop router 976 * dst entry of the nexthop router
949 */ 977 */
950 if ((*dst)->neighbour && !((*dst)->neighbour->nud_state & NUD_VALID)) { 978 n = dst_get_neighbour(*dst);
979 if (n && !(n->nud_state & NUD_VALID)) {
951 struct inet6_ifaddr *ifp; 980 struct inet6_ifaddr *ifp;
952 struct flowi6 fl_gw6; 981 struct flowi6 fl_gw6;
953 int redirect; 982 int redirect;
@@ -1072,7 +1101,8 @@ static inline int ip6_ufo_append_data(struct sock *sk,
1072 int getfrag(void *from, char *to, int offset, int len, 1101 int getfrag(void *from, char *to, int offset, int len,
1073 int odd, struct sk_buff *skb), 1102 int odd, struct sk_buff *skb),
1074 void *from, int length, int hh_len, int fragheaderlen, 1103 void *from, int length, int hh_len, int fragheaderlen,
1075 int transhdrlen, int mtu,unsigned int flags) 1104 int transhdrlen, int mtu,unsigned int flags,
1105 struct rt6_info *rt)
1076 1106
1077{ 1107{
1078 struct sk_buff *skb; 1108 struct sk_buff *skb;
@@ -1116,7 +1146,7 @@ static inline int ip6_ufo_append_data(struct sock *sk,
1116 skb_shinfo(skb)->gso_size = (mtu - fragheaderlen - 1146 skb_shinfo(skb)->gso_size = (mtu - fragheaderlen -
1117 sizeof(struct frag_hdr)) & ~7; 1147 sizeof(struct frag_hdr)) & ~7;
1118 skb_shinfo(skb)->gso_type = SKB_GSO_UDP; 1148 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
1119 ipv6_select_ident(&fhdr); 1149 ipv6_select_ident(&fhdr, rt);
1120 skb_shinfo(skb)->ip6_frag_id = fhdr.identification; 1150 skb_shinfo(skb)->ip6_frag_id = fhdr.identification;
1121 __skb_queue_tail(&sk->sk_write_queue, skb); 1151 __skb_queue_tail(&sk->sk_write_queue, skb);
1122 1152
@@ -1282,7 +1312,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1282 1312
1283 err = ip6_ufo_append_data(sk, getfrag, from, length, 1313 err = ip6_ufo_append_data(sk, getfrag, from, length,
1284 hh_len, fragheaderlen, 1314 hh_len, fragheaderlen,
1285 transhdrlen, mtu, flags); 1315 transhdrlen, mtu, flags, rt);
1286 if (err) 1316 if (err)
1287 goto error; 1317 goto error;
1288 return 0; 1318 return 0;
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 82a809901f8..705c8288628 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -1354,7 +1354,8 @@ int __init ip6_mr_init(void)
1354 goto add_proto_fail; 1354 goto add_proto_fail;
1355 } 1355 }
1356#endif 1356#endif
1357 rtnl_register(RTNL_FAMILY_IP6MR, RTM_GETROUTE, NULL, ip6mr_rtm_dumproute); 1357 rtnl_register(RTNL_FAMILY_IP6MR, RTM_GETROUTE, NULL,
1358 ip6mr_rtm_dumproute, NULL);
1358 return 0; 1359 return 0;
1359#ifdef CONFIG_IPV6_PIMSM_V2 1360#ifdef CONFIG_IPV6_PIMSM_V2
1360add_proto_fail: 1361add_proto_fail:
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 7596f071d30..9da6e02eaae 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -107,8 +107,6 @@ static const struct neigh_ops ndisc_generic_ops = {
107 .error_report = ndisc_error_report, 107 .error_report = ndisc_error_report,
108 .output = neigh_resolve_output, 108 .output = neigh_resolve_output,
109 .connected_output = neigh_connected_output, 109 .connected_output = neigh_connected_output,
110 .hh_output = dev_queue_xmit,
111 .queue_xmit = dev_queue_xmit,
112}; 110};
113 111
114static const struct neigh_ops ndisc_hh_ops = { 112static const struct neigh_ops ndisc_hh_ops = {
@@ -117,17 +115,13 @@ static const struct neigh_ops ndisc_hh_ops = {
117 .error_report = ndisc_error_report, 115 .error_report = ndisc_error_report,
118 .output = neigh_resolve_output, 116 .output = neigh_resolve_output,
119 .connected_output = neigh_resolve_output, 117 .connected_output = neigh_resolve_output,
120 .hh_output = dev_queue_xmit,
121 .queue_xmit = dev_queue_xmit,
122}; 118};
123 119
124 120
125static const struct neigh_ops ndisc_direct_ops = { 121static const struct neigh_ops ndisc_direct_ops = {
126 .family = AF_INET6, 122 .family = AF_INET6,
127 .output = dev_queue_xmit, 123 .output = neigh_direct_output,
128 .connected_output = dev_queue_xmit, 124 .connected_output = neigh_direct_output,
129 .hh_output = dev_queue_xmit,
130 .queue_xmit = dev_queue_xmit,
131}; 125};
132 126
133struct neigh_table nd_tbl = { 127struct neigh_table nd_tbl = {
@@ -392,7 +386,7 @@ static int ndisc_constructor(struct neighbour *neigh)
392 if (!dev->header_ops) { 386 if (!dev->header_ops) {
393 neigh->nud_state = NUD_NOARP; 387 neigh->nud_state = NUD_NOARP;
394 neigh->ops = &ndisc_direct_ops; 388 neigh->ops = &ndisc_direct_ops;
395 neigh->output = neigh->ops->queue_xmit; 389 neigh->output = neigh_direct_output;
396 } else { 390 } else {
397 if (is_multicast) { 391 if (is_multicast) {
398 neigh->nud_state = NUD_NOARP; 392 neigh->nud_state = NUD_NOARP;
@@ -1244,7 +1238,7 @@ static void ndisc_router_discovery(struct sk_buff *skb)
1244 rt = rt6_get_dflt_router(&ipv6_hdr(skb)->saddr, skb->dev); 1238 rt = rt6_get_dflt_router(&ipv6_hdr(skb)->saddr, skb->dev);
1245 1239
1246 if (rt) 1240 if (rt)
1247 neigh = rt->rt6i_nexthop; 1241 neigh = dst_get_neighbour(&rt->dst);
1248 1242
1249 if (rt && lifetime == 0) { 1243 if (rt && lifetime == 0) {
1250 neigh_clone(neigh); 1244 neigh_clone(neigh);
@@ -1265,7 +1259,7 @@ static void ndisc_router_discovery(struct sk_buff *skb)
1265 return; 1259 return;
1266 } 1260 }
1267 1261
1268 neigh = rt->rt6i_nexthop; 1262 neigh = dst_get_neighbour(&rt->dst);
1269 if (neigh == NULL) { 1263 if (neigh == NULL) {
1270 ND_PRINTK0(KERN_ERR 1264 ND_PRINTK0(KERN_ERR
1271 "ICMPv6 RA: %s() got default router without neighbour.\n", 1265 "ICMPv6 RA: %s() got default router without neighbour.\n",
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index cc7313b8f7e..6a79f3081bd 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -959,57 +959,54 @@ static int do_rawv6_setsockopt(struct sock *sk, int level, int optname,
959 return -EFAULT; 959 return -EFAULT;
960 960
961 switch (optname) { 961 switch (optname) {
962 case IPV6_CHECKSUM: 962 case IPV6_CHECKSUM:
963 if (inet_sk(sk)->inet_num == IPPROTO_ICMPV6 && 963 if (inet_sk(sk)->inet_num == IPPROTO_ICMPV6 &&
964 level == IPPROTO_IPV6) { 964 level == IPPROTO_IPV6) {
965 /* 965 /*
966 * RFC3542 tells that IPV6_CHECKSUM socket 966 * RFC3542 tells that IPV6_CHECKSUM socket
967 * option in the IPPROTO_IPV6 level is not 967 * option in the IPPROTO_IPV6 level is not
968 * allowed on ICMPv6 sockets. 968 * allowed on ICMPv6 sockets.
969 * If you want to set it, use IPPROTO_RAW 969 * If you want to set it, use IPPROTO_RAW
970 * level IPV6_CHECKSUM socket option 970 * level IPV6_CHECKSUM socket option
971 * (Linux extension). 971 * (Linux extension).
972 */ 972 */
973 return -EINVAL; 973 return -EINVAL;
974 } 974 }
975 975
976 /* You may get strange result with a positive odd offset; 976 /* You may get strange result with a positive odd offset;
977 RFC2292bis agrees with me. */ 977 RFC2292bis agrees with me. */
978 if (val > 0 && (val&1)) 978 if (val > 0 && (val&1))
979 return -EINVAL; 979 return -EINVAL;
980 if (val < 0) { 980 if (val < 0) {
981 rp->checksum = 0; 981 rp->checksum = 0;
982 } else { 982 } else {
983 rp->checksum = 1; 983 rp->checksum = 1;
984 rp->offset = val; 984 rp->offset = val;
985 } 985 }
986 986
987 return 0; 987 return 0;
988 break;
989 988
990 default: 989 default:
991 return -ENOPROTOOPT; 990 return -ENOPROTOOPT;
992 } 991 }
993} 992}
994 993
995static int rawv6_setsockopt(struct sock *sk, int level, int optname, 994static int rawv6_setsockopt(struct sock *sk, int level, int optname,
996 char __user *optval, unsigned int optlen) 995 char __user *optval, unsigned int optlen)
997{ 996{
998 switch(level) { 997 switch (level) {
999 case SOL_RAW: 998 case SOL_RAW:
1000 break; 999 break;
1001 1000
1002 case SOL_ICMPV6: 1001 case SOL_ICMPV6:
1003 if (inet_sk(sk)->inet_num != IPPROTO_ICMPV6) 1002 if (inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
1004 return -EOPNOTSUPP; 1003 return -EOPNOTSUPP;
1005 return rawv6_seticmpfilter(sk, level, optname, optval, 1004 return rawv6_seticmpfilter(sk, level, optname, optval, optlen);
1006 optlen); 1005 case SOL_IPV6:
1007 case SOL_IPV6: 1006 if (optname == IPV6_CHECKSUM)
1008 if (optname == IPV6_CHECKSUM) 1007 break;
1009 break; 1008 default:
1010 default: 1009 return ipv6_setsockopt(sk, level, optname, optval, optlen);
1011 return ipv6_setsockopt(sk, level, optname, optval,
1012 optlen);
1013 } 1010 }
1014 1011
1015 return do_rawv6_setsockopt(sk, level, optname, optval, optlen); 1012 return do_rawv6_setsockopt(sk, level, optname, optval, optlen);
@@ -1075,21 +1072,19 @@ static int do_rawv6_getsockopt(struct sock *sk, int level, int optname,
1075static int rawv6_getsockopt(struct sock *sk, int level, int optname, 1072static int rawv6_getsockopt(struct sock *sk, int level, int optname,
1076 char __user *optval, int __user *optlen) 1073 char __user *optval, int __user *optlen)
1077{ 1074{
1078 switch(level) { 1075 switch (level) {
1079 case SOL_RAW: 1076 case SOL_RAW:
1080 break; 1077 break;
1081 1078
1082 case SOL_ICMPV6: 1079 case SOL_ICMPV6:
1083 if (inet_sk(sk)->inet_num != IPPROTO_ICMPV6) 1080 if (inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
1084 return -EOPNOTSUPP; 1081 return -EOPNOTSUPP;
1085 return rawv6_geticmpfilter(sk, level, optname, optval, 1082 return rawv6_geticmpfilter(sk, level, optname, optval, optlen);
1086 optlen); 1083 case SOL_IPV6:
1087 case SOL_IPV6: 1084 if (optname == IPV6_CHECKSUM)
1088 if (optname == IPV6_CHECKSUM) 1085 break;
1089 break; 1086 default:
1090 default: 1087 return ipv6_getsockopt(sk, level, optname, optval, optlen);
1091 return ipv6_getsockopt(sk, level, optname, optval,
1092 optlen);
1093 } 1088 }
1094 1089
1095 return do_rawv6_getsockopt(sk, level, optname, optval, optlen); 1090 return do_rawv6_getsockopt(sk, level, optname, optval, optlen);
@@ -1119,31 +1114,29 @@ static int compat_rawv6_getsockopt(struct sock *sk, int level, int optname,
1119 1114
1120static int rawv6_ioctl(struct sock *sk, int cmd, unsigned long arg) 1115static int rawv6_ioctl(struct sock *sk, int cmd, unsigned long arg)
1121{ 1116{
1122 switch(cmd) { 1117 switch (cmd) {
1123 case SIOCOUTQ: 1118 case SIOCOUTQ: {
1124 { 1119 int amount = sk_wmem_alloc_get(sk);
1125 int amount = sk_wmem_alloc_get(sk);
1126 1120
1127 return put_user(amount, (int __user *)arg); 1121 return put_user(amount, (int __user *)arg);
1128 } 1122 }
1129 case SIOCINQ: 1123 case SIOCINQ: {
1130 { 1124 struct sk_buff *skb;
1131 struct sk_buff *skb; 1125 int amount = 0;
1132 int amount = 0; 1126
1133 1127 spin_lock_bh(&sk->sk_receive_queue.lock);
1134 spin_lock_bh(&sk->sk_receive_queue.lock); 1128 skb = skb_peek(&sk->sk_receive_queue);
1135 skb = skb_peek(&sk->sk_receive_queue); 1129 if (skb != NULL)
1136 if (skb != NULL) 1130 amount = skb->tail - skb->transport_header;
1137 amount = skb->tail - skb->transport_header; 1131 spin_unlock_bh(&sk->sk_receive_queue.lock);
1138 spin_unlock_bh(&sk->sk_receive_queue.lock); 1132 return put_user(amount, (int __user *)arg);
1139 return put_user(amount, (int __user *)arg); 1133 }
1140 }
1141 1134
1142 default: 1135 default:
1143#ifdef CONFIG_IPV6_MROUTE 1136#ifdef CONFIG_IPV6_MROUTE
1144 return ip6mr_ioctl(sk, cmd, (void __user *)arg); 1137 return ip6mr_ioctl(sk, cmd, (void __user *)arg);
1145#else 1138#else
1146 return -ENOIOCTLCMD; 1139 return -ENOIOCTLCMD;
1147#endif 1140#endif
1148 } 1141 }
1149} 1142}
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index de2b1decd78..e8987da0666 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -72,7 +72,8 @@
72#define RT6_TRACE(x...) do { ; } while (0) 72#define RT6_TRACE(x...) do { ; } while (0)
73#endif 73#endif
74 74
75static struct rt6_info * ip6_rt_copy(struct rt6_info *ort); 75static struct rt6_info *ip6_rt_copy(const struct rt6_info *ort,
76 const struct in6_addr *dest);
76static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie); 77static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
77static unsigned int ip6_default_advmss(const struct dst_entry *dst); 78static unsigned int ip6_default_advmss(const struct dst_entry *dst);
78static unsigned int ip6_default_mtu(const struct dst_entry *dst); 79static unsigned int ip6_default_mtu(const struct dst_entry *dst);
@@ -127,6 +128,11 @@ static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)
127 return p; 128 return p;
128} 129}
129 130
131static struct neighbour *ip6_neigh_lookup(const struct dst_entry *dst, const void *daddr)
132{
133 return __neigh_lookup_errno(&nd_tbl, daddr, dst->dev);
134}
135
130static struct dst_ops ip6_dst_ops_template = { 136static struct dst_ops ip6_dst_ops_template = {
131 .family = AF_INET6, 137 .family = AF_INET6,
132 .protocol = cpu_to_be16(ETH_P_IPV6), 138 .protocol = cpu_to_be16(ETH_P_IPV6),
@@ -142,6 +148,7 @@ static struct dst_ops ip6_dst_ops_template = {
142 .link_failure = ip6_link_failure, 148 .link_failure = ip6_link_failure,
143 .update_pmtu = ip6_rt_update_pmtu, 149 .update_pmtu = ip6_rt_update_pmtu,
144 .local_out = __ip6_local_out, 150 .local_out = __ip6_local_out,
151 .neigh_lookup = ip6_neigh_lookup,
145}; 152};
146 153
147static unsigned int ip6_blackhole_default_mtu(const struct dst_entry *dst) 154static unsigned int ip6_blackhole_default_mtu(const struct dst_entry *dst)
@@ -168,6 +175,7 @@ static struct dst_ops ip6_dst_blackhole_ops = {
168 .default_advmss = ip6_default_advmss, 175 .default_advmss = ip6_default_advmss,
169 .update_pmtu = ip6_rt_blackhole_update_pmtu, 176 .update_pmtu = ip6_rt_blackhole_update_pmtu,
170 .cow_metrics = ip6_rt_blackhole_cow_metrics, 177 .cow_metrics = ip6_rt_blackhole_cow_metrics,
178 .neigh_lookup = ip6_neigh_lookup,
171}; 179};
172 180
173static const u32 ip6_template_metrics[RTAX_MAX] = { 181static const u32 ip6_template_metrics[RTAX_MAX] = {
@@ -228,9 +236,10 @@ static struct rt6_info ip6_blk_hole_entry_template = {
228 236
229/* allocate dst with ip6_dst_ops */ 237/* allocate dst with ip6_dst_ops */
230static inline struct rt6_info *ip6_dst_alloc(struct dst_ops *ops, 238static inline struct rt6_info *ip6_dst_alloc(struct dst_ops *ops,
231 struct net_device *dev) 239 struct net_device *dev,
240 int flags)
232{ 241{
233 struct rt6_info *rt = dst_alloc(ops, dev, 0, 0, 0); 242 struct rt6_info *rt = dst_alloc(ops, dev, 0, 0, flags);
234 243
235 memset(&rt->rt6i_table, 0, sizeof(*rt) - sizeof(struct dst_entry)); 244 memset(&rt->rt6i_table, 0, sizeof(*rt) - sizeof(struct dst_entry));
236 245
@@ -355,7 +364,7 @@ out:
355#ifdef CONFIG_IPV6_ROUTER_PREF 364#ifdef CONFIG_IPV6_ROUTER_PREF
356static void rt6_probe(struct rt6_info *rt) 365static void rt6_probe(struct rt6_info *rt)
357{ 366{
358 struct neighbour *neigh = rt ? rt->rt6i_nexthop : NULL; 367 struct neighbour *neigh = rt ? dst_get_neighbour(&rt->dst) : NULL;
359 /* 368 /*
360 * Okay, this does not seem to be appropriate 369 * Okay, this does not seem to be appropriate
361 * for now, however, we need to check if it 370 * for now, however, we need to check if it
@@ -403,7 +412,7 @@ static inline int rt6_check_dev(struct rt6_info *rt, int oif)
403 412
404static inline int rt6_check_neigh(struct rt6_info *rt) 413static inline int rt6_check_neigh(struct rt6_info *rt)
405{ 414{
406 struct neighbour *neigh = rt->rt6i_nexthop; 415 struct neighbour *neigh = dst_get_neighbour(&rt->dst);
407 int m; 416 int m;
408 if (rt->rt6i_flags & RTF_NONEXTHOP || 417 if (rt->rt6i_flags & RTF_NONEXTHOP ||
409 !(rt->rt6i_flags & RTF_GATEWAY)) 418 !(rt->rt6i_flags & RTF_GATEWAY))
@@ -682,7 +691,8 @@ int ip6_ins_rt(struct rt6_info *rt)
682 return __ip6_ins_rt(rt, &info); 691 return __ip6_ins_rt(rt, &info);
683} 692}
684 693
685static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort, const struct in6_addr *daddr, 694static struct rt6_info *rt6_alloc_cow(const struct rt6_info *ort,
695 const struct in6_addr *daddr,
686 const struct in6_addr *saddr) 696 const struct in6_addr *saddr)
687{ 697{
688 struct rt6_info *rt; 698 struct rt6_info *rt;
@@ -691,7 +701,7 @@ static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort, const struct in6_add
691 * Clone the route. 701 * Clone the route.
692 */ 702 */
693 703
694 rt = ip6_rt_copy(ort); 704 rt = ip6_rt_copy(ort, daddr);
695 705
696 if (rt) { 706 if (rt) {
697 struct neighbour *neigh; 707 struct neighbour *neigh;
@@ -699,12 +709,11 @@ static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort, const struct in6_add
699 709
700 if (!(rt->rt6i_flags&RTF_GATEWAY)) { 710 if (!(rt->rt6i_flags&RTF_GATEWAY)) {
701 if (rt->rt6i_dst.plen != 128 && 711 if (rt->rt6i_dst.plen != 128 &&
702 ipv6_addr_equal(&rt->rt6i_dst.addr, daddr)) 712 ipv6_addr_equal(&ort->rt6i_dst.addr, daddr))
703 rt->rt6i_flags |= RTF_ANYCAST; 713 rt->rt6i_flags |= RTF_ANYCAST;
704 ipv6_addr_copy(&rt->rt6i_gateway, daddr); 714 ipv6_addr_copy(&rt->rt6i_gateway, daddr);
705 } 715 }
706 716
707 ipv6_addr_copy(&rt->rt6i_dst.addr, daddr);
708 rt->rt6i_dst.plen = 128; 717 rt->rt6i_dst.plen = 128;
709 rt->rt6i_flags |= RTF_CACHE; 718 rt->rt6i_flags |= RTF_CACHE;
710 rt->dst.flags |= DST_HOST; 719 rt->dst.flags |= DST_HOST;
@@ -744,22 +753,23 @@ static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort, const struct in6_add
744 dst_free(&rt->dst); 753 dst_free(&rt->dst);
745 return NULL; 754 return NULL;
746 } 755 }
747 rt->rt6i_nexthop = neigh; 756 dst_set_neighbour(&rt->dst, neigh);
748 757
749 } 758 }
750 759
751 return rt; 760 return rt;
752} 761}
753 762
754static struct rt6_info *rt6_alloc_clone(struct rt6_info *ort, const struct in6_addr *daddr) 763static struct rt6_info *rt6_alloc_clone(struct rt6_info *ort,
764 const struct in6_addr *daddr)
755{ 765{
756 struct rt6_info *rt = ip6_rt_copy(ort); 766 struct rt6_info *rt = ip6_rt_copy(ort, daddr);
767
757 if (rt) { 768 if (rt) {
758 ipv6_addr_copy(&rt->rt6i_dst.addr, daddr);
759 rt->rt6i_dst.plen = 128; 769 rt->rt6i_dst.plen = 128;
760 rt->rt6i_flags |= RTF_CACHE; 770 rt->rt6i_flags |= RTF_CACHE;
761 rt->dst.flags |= DST_HOST; 771 rt->dst.flags |= DST_HOST;
762 rt->rt6i_nexthop = neigh_clone(ort->rt6i_nexthop); 772 dst_set_neighbour(&rt->dst, neigh_clone(dst_get_neighbour(&ort->dst)));
763 } 773 }
764 return rt; 774 return rt;
765} 775}
@@ -793,7 +803,7 @@ restart:
793 dst_hold(&rt->dst); 803 dst_hold(&rt->dst);
794 read_unlock_bh(&table->tb6_lock); 804 read_unlock_bh(&table->tb6_lock);
795 805
796 if (!rt->rt6i_nexthop && !(rt->rt6i_flags & RTF_NONEXTHOP)) 806 if (!dst_get_neighbour(&rt->dst) && !(rt->rt6i_flags & RTF_NONEXTHOP))
797 nrt = rt6_alloc_cow(rt, &fl6->daddr, &fl6->saddr); 807 nrt = rt6_alloc_cow(rt, &fl6->daddr, &fl6->saddr);
798 else if (!(rt->dst.flags & DST_HOST)) 808 else if (!(rt->dst.flags & DST_HOST))
799 nrt = rt6_alloc_clone(rt, &fl6->daddr); 809 nrt = rt6_alloc_clone(rt, &fl6->daddr);
@@ -899,7 +909,10 @@ struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_ori
899 new->input = dst_discard; 909 new->input = dst_discard;
900 new->output = dst_discard; 910 new->output = dst_discard;
901 911
902 dst_copy_metrics(new, &ort->dst); 912 if (dst_metrics_read_only(&ort->dst))
913 new->_metrics = ort->dst._metrics;
914 else
915 dst_copy_metrics(new, &ort->dst);
903 rt->rt6i_idev = ort->rt6i_idev; 916 rt->rt6i_idev = ort->rt6i_idev;
904 if (rt->rt6i_idev) 917 if (rt->rt6i_idev)
905 in6_dev_hold(rt->rt6i_idev); 918 in6_dev_hold(rt->rt6i_idev);
@@ -1042,7 +1055,7 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
1042 if (unlikely(idev == NULL)) 1055 if (unlikely(idev == NULL))
1043 return NULL; 1056 return NULL;
1044 1057
1045 rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, dev); 1058 rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, dev, 0);
1046 if (unlikely(rt == NULL)) { 1059 if (unlikely(rt == NULL)) {
1047 in6_dev_put(idev); 1060 in6_dev_put(idev);
1048 goto out; 1061 goto out;
@@ -1057,19 +1070,12 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
1057 } 1070 }
1058 1071
1059 rt->rt6i_idev = idev; 1072 rt->rt6i_idev = idev;
1060 rt->rt6i_nexthop = neigh; 1073 dst_set_neighbour(&rt->dst, neigh);
1061 atomic_set(&rt->dst.__refcnt, 1); 1074 atomic_set(&rt->dst.__refcnt, 1);
1075 ipv6_addr_copy(&rt->rt6i_dst.addr, addr);
1062 dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 255); 1076 dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 255);
1063 rt->dst.output = ip6_output; 1077 rt->dst.output = ip6_output;
1064 1078
1065#if 0 /* there's no chance to use these for ndisc */
1066 rt->dst.flags = ipv6_addr_type(addr) & IPV6_ADDR_UNICAST
1067 ? DST_HOST
1068 : 0;
1069 ipv6_addr_copy(&rt->rt6i_dst.addr, addr);
1070 rt->rt6i_dst.plen = 128;
1071#endif
1072
1073 spin_lock_bh(&icmp6_dst_lock); 1079 spin_lock_bh(&icmp6_dst_lock);
1074 rt->dst.next = icmp6_dst_gc_list; 1080 rt->dst.next = icmp6_dst_gc_list;
1075 icmp6_dst_gc_list = &rt->dst; 1081 icmp6_dst_gc_list = &rt->dst;
@@ -1214,7 +1220,7 @@ int ip6_route_add(struct fib6_config *cfg)
1214 goto out; 1220 goto out;
1215 } 1221 }
1216 1222
1217 rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, NULL); 1223 rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, NULL, DST_NOCOUNT);
1218 1224
1219 if (rt == NULL) { 1225 if (rt == NULL) {
1220 err = -ENOMEM; 1226 err = -ENOMEM;
@@ -1244,7 +1250,7 @@ int ip6_route_add(struct fib6_config *cfg)
1244 ipv6_addr_prefix(&rt->rt6i_dst.addr, &cfg->fc_dst, cfg->fc_dst_len); 1250 ipv6_addr_prefix(&rt->rt6i_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
1245 rt->rt6i_dst.plen = cfg->fc_dst_len; 1251 rt->rt6i_dst.plen = cfg->fc_dst_len;
1246 if (rt->rt6i_dst.plen == 128) 1252 if (rt->rt6i_dst.plen == 128)
1247 rt->dst.flags = DST_HOST; 1253 rt->dst.flags |= DST_HOST;
1248 1254
1249#ifdef CONFIG_IPV6_SUBTREES 1255#ifdef CONFIG_IPV6_SUBTREES
1250 ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len); 1256 ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len);
@@ -1345,12 +1351,12 @@ int ip6_route_add(struct fib6_config *cfg)
1345 rt->rt6i_prefsrc.plen = 0; 1351 rt->rt6i_prefsrc.plen = 0;
1346 1352
1347 if (cfg->fc_flags & (RTF_GATEWAY | RTF_NONEXTHOP)) { 1353 if (cfg->fc_flags & (RTF_GATEWAY | RTF_NONEXTHOP)) {
1348 rt->rt6i_nexthop = __neigh_lookup_errno(&nd_tbl, &rt->rt6i_gateway, dev); 1354 struct neighbour *n = __neigh_lookup_errno(&nd_tbl, &rt->rt6i_gateway, dev);
1349 if (IS_ERR(rt->rt6i_nexthop)) { 1355 if (IS_ERR(n)) {
1350 err = PTR_ERR(rt->rt6i_nexthop); 1356 err = PTR_ERR(n);
1351 rt->rt6i_nexthop = NULL;
1352 goto out; 1357 goto out;
1353 } 1358 }
1359 dst_set_neighbour(&rt->dst, n);
1354 } 1360 }
1355 1361
1356 rt->rt6i_flags = cfg->fc_flags; 1362 rt->rt6i_flags = cfg->fc_flags;
@@ -1581,10 +1587,10 @@ void rt6_redirect(const struct in6_addr *dest, const struct in6_addr *src,
1581 dst_confirm(&rt->dst); 1587 dst_confirm(&rt->dst);
1582 1588
1583 /* Duplicate redirect: silently ignore. */ 1589 /* Duplicate redirect: silently ignore. */
1584 if (neigh == rt->dst.neighbour) 1590 if (neigh == dst_get_neighbour(&rt->dst))
1585 goto out; 1591 goto out;
1586 1592
1587 nrt = ip6_rt_copy(rt); 1593 nrt = ip6_rt_copy(rt, dest);
1588 if (nrt == NULL) 1594 if (nrt == NULL)
1589 goto out; 1595 goto out;
1590 1596
@@ -1592,12 +1598,11 @@ void rt6_redirect(const struct in6_addr *dest, const struct in6_addr *src,
1592 if (on_link) 1598 if (on_link)
1593 nrt->rt6i_flags &= ~RTF_GATEWAY; 1599 nrt->rt6i_flags &= ~RTF_GATEWAY;
1594 1600
1595 ipv6_addr_copy(&nrt->rt6i_dst.addr, dest);
1596 nrt->rt6i_dst.plen = 128; 1601 nrt->rt6i_dst.plen = 128;
1597 nrt->dst.flags |= DST_HOST; 1602 nrt->dst.flags |= DST_HOST;
1598 1603
1599 ipv6_addr_copy(&nrt->rt6i_gateway, (struct in6_addr*)neigh->primary_key); 1604 ipv6_addr_copy(&nrt->rt6i_gateway, (struct in6_addr*)neigh->primary_key);
1600 nrt->rt6i_nexthop = neigh_clone(neigh); 1605 dst_set_neighbour(&nrt->dst, neigh_clone(neigh));
1601 1606
1602 if (ip6_ins_rt(nrt)) 1607 if (ip6_ins_rt(nrt))
1603 goto out; 1608 goto out;
@@ -1677,7 +1682,7 @@ again:
1677 1. It is connected route. Action: COW 1682 1. It is connected route. Action: COW
1678 2. It is gatewayed route or NONEXTHOP route. Action: clone it. 1683 2. It is gatewayed route or NONEXTHOP route. Action: clone it.
1679 */ 1684 */
1680 if (!rt->rt6i_nexthop && !(rt->rt6i_flags & RTF_NONEXTHOP)) 1685 if (!dst_get_neighbour(&rt->dst) && !(rt->rt6i_flags & RTF_NONEXTHOP))
1681 nrt = rt6_alloc_cow(rt, daddr, saddr); 1686 nrt = rt6_alloc_cow(rt, daddr, saddr);
1682 else 1687 else
1683 nrt = rt6_alloc_clone(rt, daddr); 1688 nrt = rt6_alloc_clone(rt, daddr);
@@ -1730,16 +1735,19 @@ void rt6_pmtu_discovery(const struct in6_addr *daddr, const struct in6_addr *sad
1730 * Misc support functions 1735 * Misc support functions
1731 */ 1736 */
1732 1737
1733static struct rt6_info * ip6_rt_copy(struct rt6_info *ort) 1738static struct rt6_info *ip6_rt_copy(const struct rt6_info *ort,
1739 const struct in6_addr *dest)
1734{ 1740{
1735 struct net *net = dev_net(ort->rt6i_dev); 1741 struct net *net = dev_net(ort->rt6i_dev);
1736 struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, 1742 struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops,
1737 ort->dst.dev); 1743 ort->dst.dev, 0);
1738 1744
1739 if (rt) { 1745 if (rt) {
1740 rt->dst.input = ort->dst.input; 1746 rt->dst.input = ort->dst.input;
1741 rt->dst.output = ort->dst.output; 1747 rt->dst.output = ort->dst.output;
1742 1748
1749 ipv6_addr_copy(&rt->rt6i_dst.addr, dest);
1750 rt->rt6i_dst.plen = ort->rt6i_dst.plen;
1743 dst_copy_metrics(&rt->dst, &ort->dst); 1751 dst_copy_metrics(&rt->dst, &ort->dst);
1744 rt->dst.error = ort->dst.error; 1752 rt->dst.error = ort->dst.error;
1745 rt->rt6i_idev = ort->rt6i_idev; 1753 rt->rt6i_idev = ort->rt6i_idev;
@@ -1752,7 +1760,6 @@ static struct rt6_info * ip6_rt_copy(struct rt6_info *ort)
1752 rt->rt6i_flags = ort->rt6i_flags & ~RTF_EXPIRES; 1760 rt->rt6i_flags = ort->rt6i_flags & ~RTF_EXPIRES;
1753 rt->rt6i_metric = 0; 1761 rt->rt6i_metric = 0;
1754 1762
1755 memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
1756#ifdef CONFIG_IPV6_SUBTREES 1763#ifdef CONFIG_IPV6_SUBTREES
1757 memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key)); 1764 memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
1758#endif 1765#endif
@@ -2013,7 +2020,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
2013{ 2020{
2014 struct net *net = dev_net(idev->dev); 2021 struct net *net = dev_net(idev->dev);
2015 struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, 2022 struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops,
2016 net->loopback_dev); 2023 net->loopback_dev, 0);
2017 struct neighbour *neigh; 2024 struct neighbour *neigh;
2018 2025
2019 if (rt == NULL) { 2026 if (rt == NULL) {
@@ -2025,7 +2032,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
2025 2032
2026 in6_dev_hold(idev); 2033 in6_dev_hold(idev);
2027 2034
2028 rt->dst.flags = DST_HOST; 2035 rt->dst.flags |= DST_HOST;
2029 rt->dst.input = ip6_input; 2036 rt->dst.input = ip6_input;
2030 rt->dst.output = ip6_output; 2037 rt->dst.output = ip6_output;
2031 rt->rt6i_idev = idev; 2038 rt->rt6i_idev = idev;
@@ -2042,7 +2049,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
2042 2049
2043 return ERR_CAST(neigh); 2050 return ERR_CAST(neigh);
2044 } 2051 }
2045 rt->rt6i_nexthop = neigh; 2052 dst_set_neighbour(&rt->dst, neigh);
2046 2053
2047 ipv6_addr_copy(&rt->rt6i_dst.addr, addr); 2054 ipv6_addr_copy(&rt->rt6i_dst.addr, addr);
2048 rt->rt6i_dst.plen = 128; 2055 rt->rt6i_dst.plen = 128;
@@ -2407,8 +2414,8 @@ static int rt6_fill_node(struct net *net,
2407 if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0) 2414 if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
2408 goto nla_put_failure; 2415 goto nla_put_failure;
2409 2416
2410 if (rt->dst.neighbour) 2417 if (dst_get_neighbour(&rt->dst))
2411 NLA_PUT(skb, RTA_GATEWAY, 16, &rt->dst.neighbour->primary_key); 2418 NLA_PUT(skb, RTA_GATEWAY, 16, &dst_get_neighbour(&rt->dst)->primary_key);
2412 2419
2413 if (rt->dst.dev) 2420 if (rt->dst.dev)
2414 NLA_PUT_U32(skb, RTA_OIF, rt->rt6i_dev->ifindex); 2421 NLA_PUT_U32(skb, RTA_OIF, rt->rt6i_dev->ifindex);
@@ -2592,6 +2599,7 @@ struct rt6_proc_arg
2592static int rt6_info_route(struct rt6_info *rt, void *p_arg) 2599static int rt6_info_route(struct rt6_info *rt, void *p_arg)
2593{ 2600{
2594 struct seq_file *m = p_arg; 2601 struct seq_file *m = p_arg;
2602 struct neighbour *n;
2595 2603
2596 seq_printf(m, "%pi6 %02x ", &rt->rt6i_dst.addr, rt->rt6i_dst.plen); 2604 seq_printf(m, "%pi6 %02x ", &rt->rt6i_dst.addr, rt->rt6i_dst.plen);
2597 2605
@@ -2600,9 +2608,9 @@ static int rt6_info_route(struct rt6_info *rt, void *p_arg)
2600#else 2608#else
2601 seq_puts(m, "00000000000000000000000000000000 00 "); 2609 seq_puts(m, "00000000000000000000000000000000 00 ");
2602#endif 2610#endif
2603 2611 n = dst_get_neighbour(&rt->dst);
2604 if (rt->rt6i_nexthop) { 2612 if (n) {
2605 seq_printf(m, "%pi6", rt->rt6i_nexthop->primary_key); 2613 seq_printf(m, "%pi6", n->primary_key);
2606 } else { 2614 } else {
2607 seq_puts(m, "00000000000000000000000000000000"); 2615 seq_puts(m, "00000000000000000000000000000000");
2608 } 2616 }
@@ -2925,9 +2933,9 @@ int __init ip6_route_init(void)
2925 goto xfrm6_init; 2933 goto xfrm6_init;
2926 2934
2927 ret = -ENOBUFS; 2935 ret = -ENOBUFS;
2928 if (__rtnl_register(PF_INET6, RTM_NEWROUTE, inet6_rtm_newroute, NULL) || 2936 if (__rtnl_register(PF_INET6, RTM_NEWROUTE, inet6_rtm_newroute, NULL, NULL) ||
2929 __rtnl_register(PF_INET6, RTM_DELROUTE, inet6_rtm_delroute, NULL) || 2937 __rtnl_register(PF_INET6, RTM_DELROUTE, inet6_rtm_delroute, NULL, NULL) ||
2930 __rtnl_register(PF_INET6, RTM_GETROUTE, inet6_rtm_getroute, NULL)) 2938 __rtnl_register(PF_INET6, RTM_GETROUTE, inet6_rtm_getroute, NULL, NULL))
2931 goto fib6_rules_init; 2939 goto fib6_rules_init;
2932 2940
2933 ret = register_netdevice_notifier(&ip6_route_dev_notifier); 2941 ret = register_netdevice_notifier(&ip6_route_dev_notifier);
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 1cca5761aea..07bf1085458 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -677,7 +677,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
677 struct neighbour *neigh = NULL; 677 struct neighbour *neigh = NULL;
678 678
679 if (skb_dst(skb)) 679 if (skb_dst(skb))
680 neigh = skb_dst(skb)->neighbour; 680 neigh = dst_get_neighbour(skb_dst(skb));
681 681
682 if (neigh == NULL) { 682 if (neigh == NULL) {
683 if (net_ratelimit()) 683 if (net_ratelimit())
@@ -702,7 +702,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
702 struct neighbour *neigh = NULL; 702 struct neighbour *neigh = NULL;
703 703
704 if (skb_dst(skb)) 704 if (skb_dst(skb))
705 neigh = skb_dst(skb)->neighbour; 705 neigh = dst_get_neighbour(skb_dst(skb));
706 706
707 if (neigh == NULL) { 707 if (neigh == NULL) {
708 if (net_ratelimit()) 708 if (net_ratelimit())
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 8b9644a8b69..89d5bf80622 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -223,6 +223,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
223 ireq->wscale_ok = tcp_opt.wscale_ok; 223 ireq->wscale_ok = tcp_opt.wscale_ok;
224 ireq->tstamp_ok = tcp_opt.saw_tstamp; 224 ireq->tstamp_ok = tcp_opt.saw_tstamp;
225 req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0; 225 req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0;
226 treq->snt_synack = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsecr : 0;
226 treq->rcv_isn = ntohl(th->seq) - 1; 227 treq->rcv_isn = ntohl(th->seq) - 1;
227 treq->snt_isn = cookie; 228 treq->snt_isn = cookie;
228 229
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 87551ca568c..78aa53492b3 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1341,6 +1341,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1341 } 1341 }
1342have_isn: 1342have_isn:
1343 tcp_rsk(req)->snt_isn = isn; 1343 tcp_rsk(req)->snt_isn = isn;
1344 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1344 1345
1345 security_inet_conn_request(sk, skb, req); 1346 security_inet_conn_request(sk, skb, req);
1346 1347
@@ -1509,6 +1510,10 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1509 tcp_sync_mss(newsk, dst_mtu(dst)); 1510 tcp_sync_mss(newsk, dst_mtu(dst));
1510 newtp->advmss = dst_metric_advmss(dst); 1511 newtp->advmss = dst_metric_advmss(dst);
1511 tcp_initialize_rcv_mss(newsk); 1512 tcp_initialize_rcv_mss(newsk);
1513 if (tcp_rsk(req)->snt_synack)
1514 tcp_valid_rtt_meas(newsk,
1515 tcp_time_stamp - tcp_rsk(req)->snt_synack);
1516 newtp->total_retrans = req->retrans;
1512 1517
1513 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6; 1518 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1514 newinet->inet_rcv_saddr = LOOPBACK4_IPV6; 1519 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 41f8c9c08db..29213b51c49 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -453,8 +453,11 @@ csum_copy_err:
453 } 453 }
454 unlock_sock_fast(sk, slow); 454 unlock_sock_fast(sk, slow);
455 455
456 if (flags & MSG_DONTWAIT) 456 if (noblock)
457 return -EAGAIN; 457 return -EAGAIN;
458
459 /* starting over for a new packet */
460 msg->msg_flags &= ~MSG_TRUNC;
458 goto try_again; 461 goto try_again;
459} 462}
460 463
@@ -1356,7 +1359,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, u32 features)
1356 fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen); 1359 fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen);
1357 fptr->nexthdr = nexthdr; 1360 fptr->nexthdr = nexthdr;
1358 fptr->reserved = 0; 1361 fptr->reserved = 0;
1359 ipv6_select_ident(fptr); 1362 ipv6_select_ident(fptr, (struct rt6_info *)skb_dst(skb));
1360 1363
1361 /* Fragment the skb. ipv6 header and the remaining fields of the 1364 /* Fragment the skb. ipv6 header and the remaining fields of the
1362 * fragment header are updated in ipv6_gso_segment() 1365 * fragment header are updated in ipv6_gso_segment()
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index cc616974a44..c24f25ab67d 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -369,7 +369,7 @@ static void irda_getvalue_confirm(int result, __u16 obj_id,
369{ 369{
370 struct irda_sock *self; 370 struct irda_sock *self;
371 371
372 self = (struct irda_sock *) priv; 372 self = priv;
373 if (!self) { 373 if (!self) {
374 IRDA_WARNING("%s: lost myself!\n", __func__); 374 IRDA_WARNING("%s: lost myself!\n", __func__);
375 return; 375 return;
@@ -418,7 +418,7 @@ static void irda_selective_discovery_indication(discinfo_t *discovery,
418 418
419 IRDA_DEBUG(2, "%s()\n", __func__); 419 IRDA_DEBUG(2, "%s()\n", __func__);
420 420
421 self = (struct irda_sock *) priv; 421 self = priv;
422 if (!self) { 422 if (!self) {
423 IRDA_WARNING("%s: lost myself!\n", __func__); 423 IRDA_WARNING("%s: lost myself!\n", __func__);
424 return; 424 return;
diff --git a/net/irda/ircomm/ircomm_tty_attach.c b/net/irda/ircomm/ircomm_tty_attach.c
index 3c175402302..b65d66e0d81 100644
--- a/net/irda/ircomm/ircomm_tty_attach.c
+++ b/net/irda/ircomm/ircomm_tty_attach.c
@@ -382,7 +382,7 @@ static void ircomm_tty_discovery_indication(discinfo_t *discovery,
382 info.daddr = discovery->daddr; 382 info.daddr = discovery->daddr;
383 info.saddr = discovery->saddr; 383 info.saddr = discovery->saddr;
384 384
385 self = (struct ircomm_tty_cb *) priv; 385 self = priv;
386 ircomm_tty_do_event(self, IRCOMM_TTY_DISCOVERY_INDICATION, 386 ircomm_tty_do_event(self, IRCOMM_TTY_DISCOVERY_INDICATION,
387 NULL, &info); 387 NULL, &info);
388} 388}
diff --git a/net/irda/irda_device.c b/net/irda/irda_device.c
index 25cc2e69515..3eca35faf2a 100644
--- a/net/irda/irda_device.c
+++ b/net/irda/irda_device.c
@@ -262,7 +262,7 @@ static void irda_task_timer_expired(void *data)
262 262
263 IRDA_DEBUG(2, "%s()\n", __func__); 263 IRDA_DEBUG(2, "%s()\n", __func__);
264 264
265 task = (struct irda_task *) data; 265 task = data;
266 266
267 irda_task_kick(task); 267 irda_task_kick(task);
268} 268}
diff --git a/net/irda/iriap.c b/net/irda/iriap.c
index f876eed7d4a..e71e85ba2bf 100644
--- a/net/irda/iriap.c
+++ b/net/irda/iriap.c
@@ -305,7 +305,7 @@ static void iriap_disconnect_indication(void *instance, void *sap,
305 305
306 IRDA_DEBUG(4, "%s(), reason=%s\n", __func__, irlmp_reasons[reason]); 306 IRDA_DEBUG(4, "%s(), reason=%s\n", __func__, irlmp_reasons[reason]);
307 307
308 self = (struct iriap_cb *) instance; 308 self = instance;
309 309
310 IRDA_ASSERT(self != NULL, return;); 310 IRDA_ASSERT(self != NULL, return;);
311 IRDA_ASSERT(self->magic == IAS_MAGIC, return;); 311 IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
@@ -759,7 +759,7 @@ static void iriap_connect_confirm(void *instance, void *sap,
759{ 759{
760 struct iriap_cb *self; 760 struct iriap_cb *self;
761 761
762 self = (struct iriap_cb *) instance; 762 self = instance;
763 763
764 IRDA_ASSERT(self != NULL, return;); 764 IRDA_ASSERT(self != NULL, return;);
765 IRDA_ASSERT(self->magic == IAS_MAGIC, return;); 765 IRDA_ASSERT(self->magic == IAS_MAGIC, return;);
@@ -791,7 +791,7 @@ static void iriap_connect_indication(void *instance, void *sap,
791 791
792 IRDA_DEBUG(1, "%s()\n", __func__); 792 IRDA_DEBUG(1, "%s()\n", __func__);
793 793
794 self = (struct iriap_cb *) instance; 794 self = instance;
795 795
796 IRDA_ASSERT(skb != NULL, return;); 796 IRDA_ASSERT(skb != NULL, return;);
797 IRDA_ASSERT(self != NULL, goto out;); 797 IRDA_ASSERT(self != NULL, goto out;);
@@ -839,7 +839,7 @@ static int iriap_data_indication(void *instance, void *sap,
839 839
840 IRDA_DEBUG(3, "%s()\n", __func__); 840 IRDA_DEBUG(3, "%s()\n", __func__);
841 841
842 self = (struct iriap_cb *) instance; 842 self = instance;
843 843
844 IRDA_ASSERT(skb != NULL, return 0;); 844 IRDA_ASSERT(skb != NULL, return 0;);
845 IRDA_ASSERT(self != NULL, goto out;); 845 IRDA_ASSERT(self != NULL, goto out;);
diff --git a/net/irda/irlan/irlan_client.c b/net/irda/irlan/irlan_client.c
index 7ed3af95793..ba1a3fc39b5 100644
--- a/net/irda/irlan/irlan_client.c
+++ b/net/irda/irlan/irlan_client.c
@@ -198,7 +198,7 @@ static int irlan_client_ctrl_data_indication(void *instance, void *sap,
198 198
199 IRDA_DEBUG(2, "%s()\n", __func__ ); 199 IRDA_DEBUG(2, "%s()\n", __func__ );
200 200
201 self = (struct irlan_cb *) instance; 201 self = instance;
202 202
203 IRDA_ASSERT(self != NULL, return -1;); 203 IRDA_ASSERT(self != NULL, return -1;);
204 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -1;); 204 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -1;);
@@ -226,8 +226,8 @@ static void irlan_client_ctrl_disconnect_indication(void *instance, void *sap,
226 226
227 IRDA_DEBUG(4, "%s(), reason=%d\n", __func__ , reason); 227 IRDA_DEBUG(4, "%s(), reason=%d\n", __func__ , reason);
228 228
229 self = (struct irlan_cb *) instance; 229 self = instance;
230 tsap = (struct tsap_cb *) sap; 230 tsap = sap;
231 231
232 IRDA_ASSERT(self != NULL, return;); 232 IRDA_ASSERT(self != NULL, return;);
233 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 233 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
@@ -298,7 +298,7 @@ static void irlan_client_ctrl_connect_confirm(void *instance, void *sap,
298 298
299 IRDA_DEBUG(4, "%s()\n", __func__ ); 299 IRDA_DEBUG(4, "%s()\n", __func__ );
300 300
301 self = (struct irlan_cb *) instance; 301 self = instance;
302 302
303 IRDA_ASSERT(self != NULL, return;); 303 IRDA_ASSERT(self != NULL, return;);
304 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 304 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
@@ -542,7 +542,7 @@ void irlan_client_get_value_confirm(int result, __u16 obj_id,
542 542
543 IRDA_ASSERT(priv != NULL, return;); 543 IRDA_ASSERT(priv != NULL, return;);
544 544
545 self = (struct irlan_cb *) priv; 545 self = priv;
546 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 546 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
547 547
548 /* We probably don't need to make any more queries */ 548 /* We probably don't need to make any more queries */
diff --git a/net/irda/irlan/irlan_common.c b/net/irda/irlan/irlan_common.c
index 6130f9d9dbe..77911763627 100644
--- a/net/irda/irlan/irlan_common.c
+++ b/net/irda/irlan/irlan_common.c
@@ -317,8 +317,8 @@ static void irlan_connect_indication(void *instance, void *sap,
317 317
318 IRDA_DEBUG(2, "%s()\n", __func__ ); 318 IRDA_DEBUG(2, "%s()\n", __func__ );
319 319
320 self = (struct irlan_cb *) instance; 320 self = instance;
321 tsap = (struct tsap_cb *) sap; 321 tsap = sap;
322 322
323 IRDA_ASSERT(self != NULL, return;); 323 IRDA_ASSERT(self != NULL, return;);
324 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 324 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
@@ -361,7 +361,7 @@ static void irlan_connect_confirm(void *instance, void *sap,
361{ 361{
362 struct irlan_cb *self; 362 struct irlan_cb *self;
363 363
364 self = (struct irlan_cb *) instance; 364 self = instance;
365 365
366 IRDA_ASSERT(self != NULL, return;); 366 IRDA_ASSERT(self != NULL, return;);
367 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 367 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
@@ -406,8 +406,8 @@ static void irlan_disconnect_indication(void *instance,
406 406
407 IRDA_DEBUG(0, "%s(), reason=%d\n", __func__ , reason); 407 IRDA_DEBUG(0, "%s(), reason=%d\n", __func__ , reason);
408 408
409 self = (struct irlan_cb *) instance; 409 self = instance;
410 tsap = (struct tsap_cb *) sap; 410 tsap = sap;
411 411
412 IRDA_ASSERT(self != NULL, return;); 412 IRDA_ASSERT(self != NULL, return;);
413 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 413 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
diff --git a/net/irda/irlan/irlan_eth.c b/net/irda/irlan/irlan_eth.c
index 8ee1ff6c742..e8d5f4405d6 100644
--- a/net/irda/irlan/irlan_eth.c
+++ b/net/irda/irlan/irlan_eth.c
@@ -272,7 +272,7 @@ void irlan_eth_flow_indication(void *instance, void *sap, LOCAL_FLOW flow)
272 struct irlan_cb *self; 272 struct irlan_cb *self;
273 struct net_device *dev; 273 struct net_device *dev;
274 274
275 self = (struct irlan_cb *) instance; 275 self = instance;
276 276
277 IRDA_ASSERT(self != NULL, return;); 277 IRDA_ASSERT(self != NULL, return;);
278 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 278 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
diff --git a/net/irda/irlan/irlan_provider.c b/net/irda/irlan/irlan_provider.c
index b8af74ab8b6..8b61cf0d8a6 100644
--- a/net/irda/irlan/irlan_provider.c
+++ b/net/irda/irlan/irlan_provider.c
@@ -73,7 +73,7 @@ static int irlan_provider_data_indication(void *instance, void *sap,
73 73
74 IRDA_DEBUG(4, "%s()\n", __func__ ); 74 IRDA_DEBUG(4, "%s()\n", __func__ );
75 75
76 self = (struct irlan_cb *) instance; 76 self = instance;
77 77
78 IRDA_ASSERT(self != NULL, return -1;); 78 IRDA_ASSERT(self != NULL, return -1;);
79 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -1;); 79 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -1;);
@@ -131,8 +131,8 @@ static void irlan_provider_connect_indication(void *instance, void *sap,
131 131
132 IRDA_DEBUG(0, "%s()\n", __func__ ); 132 IRDA_DEBUG(0, "%s()\n", __func__ );
133 133
134 self = (struct irlan_cb *) instance; 134 self = instance;
135 tsap = (struct tsap_cb *) sap; 135 tsap = sap;
136 136
137 IRDA_ASSERT(self != NULL, return;); 137 IRDA_ASSERT(self != NULL, return;);
138 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 138 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
@@ -182,8 +182,8 @@ static void irlan_provider_disconnect_indication(void *instance, void *sap,
182 182
183 IRDA_DEBUG(4, "%s(), reason=%d\n", __func__ , reason); 183 IRDA_DEBUG(4, "%s(), reason=%d\n", __func__ , reason);
184 184
185 self = (struct irlan_cb *) instance; 185 self = instance;
186 tsap = (struct tsap_cb *) sap; 186 tsap = sap;
187 187
188 IRDA_ASSERT(self != NULL, return;); 188 IRDA_ASSERT(self != NULL, return;);
189 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;); 189 IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
diff --git a/net/irda/irqueue.c b/net/irda/irqueue.c
index 9715e6e5900..f06947c4fa8 100644
--- a/net/irda/irqueue.c
+++ b/net/irda/irqueue.c
@@ -780,7 +780,7 @@ void* hashbin_lock_find( hashbin_t* hashbin, long hashv, const char* name )
780 /* 780 /*
781 * Search for entry 781 * Search for entry
782 */ 782 */
783 entry = (irda_queue_t* ) hashbin_find( hashbin, hashv, name ); 783 entry = hashbin_find(hashbin, hashv, name);
784 784
785 /* Release lock */ 785 /* Release lock */
786 spin_unlock_irqrestore(&hashbin->hb_spinlock, flags); 786 spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
@@ -813,7 +813,7 @@ void* hashbin_find_next( hashbin_t* hashbin, long hashv, const char* name,
813 * This allow to check if the current item is still in the 813 * This allow to check if the current item is still in the
814 * hashbin or has been removed. 814 * hashbin or has been removed.
815 */ 815 */
816 entry = (irda_queue_t* ) hashbin_find( hashbin, hashv, name ); 816 entry = hashbin_find(hashbin, hashv, name);
817 817
818 /* 818 /*
819 * Trick hashbin_get_next() to return what we want 819 * Trick hashbin_get_next() to return what we want
diff --git a/net/irda/irttp.c b/net/irda/irttp.c
index 9d9af460697..285ccd623ae 100644
--- a/net/irda/irttp.c
+++ b/net/irda/irttp.c
@@ -350,7 +350,7 @@ static int irttp_param_max_sdu_size(void *instance, irda_param_t *param,
350{ 350{
351 struct tsap_cb *self; 351 struct tsap_cb *self;
352 352
353 self = (struct tsap_cb *) instance; 353 self = instance;
354 354
355 IRDA_ASSERT(self != NULL, return -1;); 355 IRDA_ASSERT(self != NULL, return -1;);
356 IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;); 356 IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;);
@@ -879,7 +879,7 @@ static int irttp_udata_indication(void *instance, void *sap,
879 879
880 IRDA_DEBUG(4, "%s()\n", __func__); 880 IRDA_DEBUG(4, "%s()\n", __func__);
881 881
882 self = (struct tsap_cb *) instance; 882 self = instance;
883 883
884 IRDA_ASSERT(self != NULL, return -1;); 884 IRDA_ASSERT(self != NULL, return -1;);
885 IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;); 885 IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;);
@@ -914,7 +914,7 @@ static int irttp_data_indication(void *instance, void *sap,
914 unsigned long flags; 914 unsigned long flags;
915 int n; 915 int n;
916 916
917 self = (struct tsap_cb *) instance; 917 self = instance;
918 918
919 n = skb->data[0] & 0x7f; /* Extract the credits */ 919 n = skb->data[0] & 0x7f; /* Extract the credits */
920 920
@@ -996,7 +996,7 @@ static void irttp_status_indication(void *instance,
996 996
997 IRDA_DEBUG(4, "%s()\n", __func__); 997 IRDA_DEBUG(4, "%s()\n", __func__);
998 998
999 self = (struct tsap_cb *) instance; 999 self = instance;
1000 1000
1001 IRDA_ASSERT(self != NULL, return;); 1001 IRDA_ASSERT(self != NULL, return;);
1002 IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;); 1002 IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
@@ -1025,7 +1025,7 @@ static void irttp_flow_indication(void *instance, void *sap, LOCAL_FLOW flow)
1025{ 1025{
1026 struct tsap_cb *self; 1026 struct tsap_cb *self;
1027 1027
1028 self = (struct tsap_cb *) instance; 1028 self = instance;
1029 1029
1030 IRDA_ASSERT(self != NULL, return;); 1030 IRDA_ASSERT(self != NULL, return;);
1031 IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;); 1031 IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
@@ -1208,7 +1208,7 @@ static void irttp_connect_confirm(void *instance, void *sap,
1208 1208
1209 IRDA_DEBUG(4, "%s()\n", __func__); 1209 IRDA_DEBUG(4, "%s()\n", __func__);
1210 1210
1211 self = (struct tsap_cb *) instance; 1211 self = instance;
1212 1212
1213 IRDA_ASSERT(self != NULL, return;); 1213 IRDA_ASSERT(self != NULL, return;);
1214 IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;); 1214 IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
@@ -1292,13 +1292,13 @@ static void irttp_connect_indication(void *instance, void *sap,
1292 __u8 plen; 1292 __u8 plen;
1293 __u8 n; 1293 __u8 n;
1294 1294
1295 self = (struct tsap_cb *) instance; 1295 self = instance;
1296 1296
1297 IRDA_ASSERT(self != NULL, return;); 1297 IRDA_ASSERT(self != NULL, return;);
1298 IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;); 1298 IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
1299 IRDA_ASSERT(skb != NULL, return;); 1299 IRDA_ASSERT(skb != NULL, return;);
1300 1300
1301 lsap = (struct lsap_cb *) sap; 1301 lsap = sap;
1302 1302
1303 self->max_seg_size = max_seg_size - TTP_HEADER; 1303 self->max_seg_size = max_seg_size - TTP_HEADER;
1304 self->max_header_size = max_header_size+TTP_HEADER; 1304 self->max_header_size = max_header_size+TTP_HEADER;
@@ -1602,7 +1602,7 @@ static void irttp_disconnect_indication(void *instance, void *sap,
1602 1602
1603 IRDA_DEBUG(4, "%s()\n", __func__); 1603 IRDA_DEBUG(4, "%s()\n", __func__);
1604 1604
1605 self = (struct tsap_cb *) instance; 1605 self = instance;
1606 1606
1607 IRDA_ASSERT(self != NULL, return;); 1607 IRDA_ASSERT(self != NULL, return;);
1608 IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;); 1608 IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return;);
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index 7f9124914b1..f2b713847b4 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -1988,12 +1988,13 @@ static int __init iucv_init(void)
1988 rc = -EPROTONOSUPPORT; 1988 rc = -EPROTONOSUPPORT;
1989 goto out; 1989 goto out;
1990 } 1990 }
1991 ctl_set_bit(0, 1);
1991 rc = iucv_query_maxconn(); 1992 rc = iucv_query_maxconn();
1992 if (rc) 1993 if (rc)
1993 goto out; 1994 goto out_ctl;
1994 rc = register_external_interrupt(0x4000, iucv_external_interrupt); 1995 rc = register_external_interrupt(0x4000, iucv_external_interrupt);
1995 if (rc) 1996 if (rc)
1996 goto out; 1997 goto out_ctl;
1997 iucv_root = root_device_register("iucv"); 1998 iucv_root = root_device_register("iucv");
1998 if (IS_ERR(iucv_root)) { 1999 if (IS_ERR(iucv_root)) {
1999 rc = PTR_ERR(iucv_root); 2000 rc = PTR_ERR(iucv_root);
@@ -2055,6 +2056,8 @@ out_free:
2055 root_device_unregister(iucv_root); 2056 root_device_unregister(iucv_root);
2056out_int: 2057out_int:
2057 unregister_external_interrupt(0x4000, iucv_external_interrupt); 2058 unregister_external_interrupt(0x4000, iucv_external_interrupt);
2059out_ctl:
2060 ctl_clear_bit(0, 1);
2058out: 2061out:
2059 return rc; 2062 return rc;
2060} 2063}
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 8f92cf8116e..1e733e9073d 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -621,7 +621,7 @@ static struct xfrm_state *pfkey_xfrm_state_lookup(struct net *net, const struct
621 unsigned short family; 621 unsigned short family;
622 xfrm_address_t *xaddr; 622 xfrm_address_t *xaddr;
623 623
624 sa = (const struct sadb_sa *) ext_hdrs[SADB_EXT_SA-1]; 624 sa = ext_hdrs[SADB_EXT_SA - 1];
625 if (sa == NULL) 625 if (sa == NULL)
626 return NULL; 626 return NULL;
627 627
@@ -630,7 +630,7 @@ static struct xfrm_state *pfkey_xfrm_state_lookup(struct net *net, const struct
630 return NULL; 630 return NULL;
631 631
632 /* sadb_address_len should be checked by caller */ 632 /* sadb_address_len should be checked by caller */
633 addr = (const struct sadb_address *) ext_hdrs[SADB_EXT_ADDRESS_DST-1]; 633 addr = ext_hdrs[SADB_EXT_ADDRESS_DST - 1];
634 if (addr == NULL) 634 if (addr == NULL)
635 return NULL; 635 return NULL;
636 636
@@ -1039,7 +1039,7 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
1039 int err; 1039 int err;
1040 1040
1041 1041
1042 sa = (const struct sadb_sa *) ext_hdrs[SADB_EXT_SA-1]; 1042 sa = ext_hdrs[SADB_EXT_SA - 1];
1043 if (!sa || 1043 if (!sa ||
1044 !present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC-1], 1044 !present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC-1],
1045 ext_hdrs[SADB_EXT_ADDRESS_DST-1])) 1045 ext_hdrs[SADB_EXT_ADDRESS_DST-1]))
@@ -1078,7 +1078,7 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
1078 sa->sadb_sa_encrypt > SADB_X_CALG_MAX) || 1078 sa->sadb_sa_encrypt > SADB_X_CALG_MAX) ||
1079 sa->sadb_sa_encrypt > SADB_EALG_MAX) 1079 sa->sadb_sa_encrypt > SADB_EALG_MAX)
1080 return ERR_PTR(-EINVAL); 1080 return ERR_PTR(-EINVAL);
1081 key = (const struct sadb_key*) ext_hdrs[SADB_EXT_KEY_AUTH-1]; 1081 key = ext_hdrs[SADB_EXT_KEY_AUTH - 1];
1082 if (key != NULL && 1082 if (key != NULL &&
1083 sa->sadb_sa_auth != SADB_X_AALG_NULL && 1083 sa->sadb_sa_auth != SADB_X_AALG_NULL &&
1084 ((key->sadb_key_bits+7) / 8 == 0 || 1084 ((key->sadb_key_bits+7) / 8 == 0 ||
@@ -1105,14 +1105,14 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
1105 if (sa->sadb_sa_flags & SADB_SAFLAGS_NOPMTUDISC) 1105 if (sa->sadb_sa_flags & SADB_SAFLAGS_NOPMTUDISC)
1106 x->props.flags |= XFRM_STATE_NOPMTUDISC; 1106 x->props.flags |= XFRM_STATE_NOPMTUDISC;
1107 1107
1108 lifetime = (const struct sadb_lifetime*) ext_hdrs[SADB_EXT_LIFETIME_HARD-1]; 1108 lifetime = ext_hdrs[SADB_EXT_LIFETIME_HARD - 1];
1109 if (lifetime != NULL) { 1109 if (lifetime != NULL) {
1110 x->lft.hard_packet_limit = _KEY2X(lifetime->sadb_lifetime_allocations); 1110 x->lft.hard_packet_limit = _KEY2X(lifetime->sadb_lifetime_allocations);
1111 x->lft.hard_byte_limit = _KEY2X(lifetime->sadb_lifetime_bytes); 1111 x->lft.hard_byte_limit = _KEY2X(lifetime->sadb_lifetime_bytes);
1112 x->lft.hard_add_expires_seconds = lifetime->sadb_lifetime_addtime; 1112 x->lft.hard_add_expires_seconds = lifetime->sadb_lifetime_addtime;
1113 x->lft.hard_use_expires_seconds = lifetime->sadb_lifetime_usetime; 1113 x->lft.hard_use_expires_seconds = lifetime->sadb_lifetime_usetime;
1114 } 1114 }
1115 lifetime = (const struct sadb_lifetime*) ext_hdrs[SADB_EXT_LIFETIME_SOFT-1]; 1115 lifetime = ext_hdrs[SADB_EXT_LIFETIME_SOFT - 1];
1116 if (lifetime != NULL) { 1116 if (lifetime != NULL) {
1117 x->lft.soft_packet_limit = _KEY2X(lifetime->sadb_lifetime_allocations); 1117 x->lft.soft_packet_limit = _KEY2X(lifetime->sadb_lifetime_allocations);
1118 x->lft.soft_byte_limit = _KEY2X(lifetime->sadb_lifetime_bytes); 1118 x->lft.soft_byte_limit = _KEY2X(lifetime->sadb_lifetime_bytes);
@@ -1120,7 +1120,7 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
1120 x->lft.soft_use_expires_seconds = lifetime->sadb_lifetime_usetime; 1120 x->lft.soft_use_expires_seconds = lifetime->sadb_lifetime_usetime;
1121 } 1121 }
1122 1122
1123 sec_ctx = (const struct sadb_x_sec_ctx *) ext_hdrs[SADB_X_EXT_SEC_CTX-1]; 1123 sec_ctx = ext_hdrs[SADB_X_EXT_SEC_CTX - 1];
1124 if (sec_ctx != NULL) { 1124 if (sec_ctx != NULL) {
1125 struct xfrm_user_sec_ctx *uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx); 1125 struct xfrm_user_sec_ctx *uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx);
1126 1126
@@ -1134,7 +1134,7 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
1134 goto out; 1134 goto out;
1135 } 1135 }
1136 1136
1137 key = (const struct sadb_key*) ext_hdrs[SADB_EXT_KEY_AUTH-1]; 1137 key = ext_hdrs[SADB_EXT_KEY_AUTH - 1];
1138 if (sa->sadb_sa_auth) { 1138 if (sa->sadb_sa_auth) {
1139 int keysize = 0; 1139 int keysize = 0;
1140 struct xfrm_algo_desc *a = xfrm_aalg_get_byid(sa->sadb_sa_auth); 1140 struct xfrm_algo_desc *a = xfrm_aalg_get_byid(sa->sadb_sa_auth);
@@ -2219,7 +2219,7 @@ static int pfkey_spdadd(struct sock *sk, struct sk_buff *skb, const struct sadb_
2219 if (xp->selector.dport) 2219 if (xp->selector.dport)
2220 xp->selector.dport_mask = htons(0xffff); 2220 xp->selector.dport_mask = htons(0xffff);
2221 2221
2222 sec_ctx = (struct sadb_x_sec_ctx *) ext_hdrs[SADB_X_EXT_SEC_CTX-1]; 2222 sec_ctx = ext_hdrs[SADB_X_EXT_SEC_CTX - 1];
2223 if (sec_ctx != NULL) { 2223 if (sec_ctx != NULL) {
2224 struct xfrm_user_sec_ctx *uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx); 2224 struct xfrm_user_sec_ctx *uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx);
2225 2225
@@ -2323,7 +2323,7 @@ static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, const struct sa
2323 if (sel.dport) 2323 if (sel.dport)
2324 sel.dport_mask = htons(0xffff); 2324 sel.dport_mask = htons(0xffff);
2325 2325
2326 sec_ctx = (struct sadb_x_sec_ctx *) ext_hdrs[SADB_X_EXT_SEC_CTX-1]; 2326 sec_ctx = ext_hdrs[SADB_X_EXT_SEC_CTX - 1];
2327 if (sec_ctx != NULL) { 2327 if (sec_ctx != NULL) {
2328 struct xfrm_user_sec_ctx *uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx); 2328 struct xfrm_user_sec_ctx *uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx);
2329 2329
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index b6466e71f5e..d21e7ebd91c 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -480,18 +480,16 @@ static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
480 if (connected) 480 if (connected)
481 rt = (struct rtable *) __sk_dst_check(sk, 0); 481 rt = (struct rtable *) __sk_dst_check(sk, 0);
482 482
483 rcu_read_lock();
483 if (rt == NULL) { 484 if (rt == NULL) {
484 struct ip_options_rcu *inet_opt; 485 const struct ip_options_rcu *inet_opt;
485 486
486 rcu_read_lock();
487 inet_opt = rcu_dereference(inet->inet_opt); 487 inet_opt = rcu_dereference(inet->inet_opt);
488 488
489 /* Use correct destination address if we have options. */ 489 /* Use correct destination address if we have options. */
490 if (inet_opt && inet_opt->opt.srr) 490 if (inet_opt && inet_opt->opt.srr)
491 daddr = inet_opt->opt.faddr; 491 daddr = inet_opt->opt.faddr;
492 492
493 rcu_read_unlock();
494
495 /* If this fails, retransmit mechanism of transport layer will 493 /* If this fails, retransmit mechanism of transport layer will
496 * keep trying until route appears or the connection times 494 * keep trying until route appears or the connection times
497 * itself out. 495 * itself out.
@@ -503,12 +501,20 @@ static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
503 sk->sk_bound_dev_if); 501 sk->sk_bound_dev_if);
504 if (IS_ERR(rt)) 502 if (IS_ERR(rt))
505 goto no_route; 503 goto no_route;
506 sk_setup_caps(sk, &rt->dst); 504 if (connected)
505 sk_setup_caps(sk, &rt->dst);
506 else
507 dst_release(&rt->dst); /* safe since we hold rcu_read_lock */
507 } 508 }
508 skb_dst_set(skb, dst_clone(&rt->dst)); 509
510 /* We dont need to clone dst here, it is guaranteed to not disappear.
511 * __dev_xmit_skb() might force a refcount if needed.
512 */
513 skb_dst_set_noref(skb, &rt->dst);
509 514
510 /* Queue the packet to IP for output */ 515 /* Queue the packet to IP for output */
511 rc = ip_queue_xmit(skb, &inet->cork.fl); 516 rc = ip_queue_xmit(skb, &inet->cork.fl);
517 rcu_read_unlock();
512 518
513error: 519error:
514 /* Update stats */ 520 /* Update stats */
@@ -525,6 +531,7 @@ out:
525 return rc; 531 return rc;
526 532
527no_route: 533no_route:
534 rcu_read_unlock();
528 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES); 535 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
529 kfree_skb(skb); 536 kfree_skb(skb);
530 rc = -EHOSTUNREACH; 537 rc = -EHOSTUNREACH;
diff --git a/net/lapb/lapb_iface.c b/net/lapb/lapb_iface.c
index d5d8d555c41..956b7e47dc5 100644
--- a/net/lapb/lapb_iface.c
+++ b/net/lapb/lapb_iface.c
@@ -300,26 +300,26 @@ int lapb_disconnect_request(struct net_device *dev)
300 goto out; 300 goto out;
301 301
302 switch (lapb->state) { 302 switch (lapb->state) {
303 case LAPB_STATE_0: 303 case LAPB_STATE_0:
304 rc = LAPB_NOTCONNECTED; 304 rc = LAPB_NOTCONNECTED;
305 goto out_put; 305 goto out_put;
306 306
307 case LAPB_STATE_1: 307 case LAPB_STATE_1:
308#if LAPB_DEBUG > 1 308#if LAPB_DEBUG > 1
309 printk(KERN_DEBUG "lapb: (%p) S1 TX DISC(1)\n", lapb->dev); 309 printk(KERN_DEBUG "lapb: (%p) S1 TX DISC(1)\n", lapb->dev);
310#endif 310#endif
311#if LAPB_DEBUG > 0 311#if LAPB_DEBUG > 0
312 printk(KERN_DEBUG "lapb: (%p) S1 -> S0\n", lapb->dev); 312 printk(KERN_DEBUG "lapb: (%p) S1 -> S0\n", lapb->dev);
313#endif 313#endif
314 lapb_send_control(lapb, LAPB_DISC, LAPB_POLLON, LAPB_COMMAND); 314 lapb_send_control(lapb, LAPB_DISC, LAPB_POLLON, LAPB_COMMAND);
315 lapb->state = LAPB_STATE_0; 315 lapb->state = LAPB_STATE_0;
316 lapb_start_t1timer(lapb); 316 lapb_start_t1timer(lapb);
317 rc = LAPB_NOTCONNECTED; 317 rc = LAPB_NOTCONNECTED;
318 goto out_put; 318 goto out_put;
319 319
320 case LAPB_STATE_2: 320 case LAPB_STATE_2:
321 rc = LAPB_OK; 321 rc = LAPB_OK;
322 goto out_put; 322 goto out_put;
323 } 323 }
324 324
325 lapb_clear_queues(lapb); 325 lapb_clear_queues(lapb);
diff --git a/net/lapb/lapb_in.c b/net/lapb/lapb_in.c
index 21904a00244..2ec1af5c36c 100644
--- a/net/lapb/lapb_in.c
+++ b/net/lapb/lapb_in.c
@@ -44,89 +44,86 @@ static void lapb_state0_machine(struct lapb_cb *lapb, struct sk_buff *skb,
44 struct lapb_frame *frame) 44 struct lapb_frame *frame)
45{ 45{
46 switch (frame->type) { 46 switch (frame->type) {
47 case LAPB_SABM: 47 case LAPB_SABM:
48#if LAPB_DEBUG > 1 48#if LAPB_DEBUG > 1
49 printk(KERN_DEBUG "lapb: (%p) S0 RX SABM(%d)\n", 49 printk(KERN_DEBUG "lapb: (%p) S0 RX SABM(%d)\n",
50 lapb->dev, frame->pf); 50 lapb->dev, frame->pf);
51#endif 51#endif
52 if (lapb->mode & LAPB_EXTENDED) { 52 if (lapb->mode & LAPB_EXTENDED) {
53#if LAPB_DEBUG > 1 53#if LAPB_DEBUG > 1
54 printk(KERN_DEBUG "lapb: (%p) S0 TX DM(%d)\n", 54 printk(KERN_DEBUG "lapb: (%p) S0 TX DM(%d)\n",
55 lapb->dev, frame->pf); 55 lapb->dev, frame->pf);
56#endif 56#endif
57 lapb_send_control(lapb, LAPB_DM, frame->pf, 57 lapb_send_control(lapb, LAPB_DM, frame->pf,
58 LAPB_RESPONSE); 58 LAPB_RESPONSE);
59 } else { 59 } else {
60#if LAPB_DEBUG > 1 60#if LAPB_DEBUG > 1
61 printk(KERN_DEBUG "lapb: (%p) S0 TX UA(%d)\n", 61 printk(KERN_DEBUG "lapb: (%p) S0 TX UA(%d)\n",
62 lapb->dev, frame->pf); 62 lapb->dev, frame->pf);
63#endif 63#endif
64#if LAPB_DEBUG > 0 64#if LAPB_DEBUG > 0
65 printk(KERN_DEBUG "lapb: (%p) S0 -> S3\n", 65 printk(KERN_DEBUG "lapb: (%p) S0 -> S3\n", lapb->dev);
66 lapb->dev);
67#endif 66#endif
68 lapb_send_control(lapb, LAPB_UA, frame->pf, 67 lapb_send_control(lapb, LAPB_UA, frame->pf,
69 LAPB_RESPONSE); 68 LAPB_RESPONSE);
70 lapb_stop_t1timer(lapb); 69 lapb_stop_t1timer(lapb);
71 lapb_stop_t2timer(lapb); 70 lapb_stop_t2timer(lapb);
72 lapb->state = LAPB_STATE_3; 71 lapb->state = LAPB_STATE_3;
73 lapb->condition = 0x00; 72 lapb->condition = 0x00;
74 lapb->n2count = 0; 73 lapb->n2count = 0;
75 lapb->vs = 0; 74 lapb->vs = 0;
76 lapb->vr = 0; 75 lapb->vr = 0;
77 lapb->va = 0; 76 lapb->va = 0;
78 lapb_connect_indication(lapb, LAPB_OK); 77 lapb_connect_indication(lapb, LAPB_OK);
79 } 78 }
80 break; 79 break;
81 80
82 case LAPB_SABME: 81 case LAPB_SABME:
83#if LAPB_DEBUG > 1 82#if LAPB_DEBUG > 1
84 printk(KERN_DEBUG "lapb: (%p) S0 RX SABME(%d)\n", 83 printk(KERN_DEBUG "lapb: (%p) S0 RX SABME(%d)\n",
85 lapb->dev, frame->pf); 84 lapb->dev, frame->pf);
86#endif 85#endif
87 if (lapb->mode & LAPB_EXTENDED) { 86 if (lapb->mode & LAPB_EXTENDED) {
88#if LAPB_DEBUG > 1 87#if LAPB_DEBUG > 1
89 printk(KERN_DEBUG "lapb: (%p) S0 TX UA(%d)\n", 88 printk(KERN_DEBUG "lapb: (%p) S0 TX UA(%d)\n",
90 lapb->dev, frame->pf); 89 lapb->dev, frame->pf);
91#endif 90#endif
92#if LAPB_DEBUG > 0 91#if LAPB_DEBUG > 0
93 printk(KERN_DEBUG "lapb: (%p) S0 -> S3\n", 92 printk(KERN_DEBUG "lapb: (%p) S0 -> S3\n", lapb->dev);
94 lapb->dev);
95#endif 93#endif
96 lapb_send_control(lapb, LAPB_UA, frame->pf, 94 lapb_send_control(lapb, LAPB_UA, frame->pf,
97 LAPB_RESPONSE); 95 LAPB_RESPONSE);
98 lapb_stop_t1timer(lapb); 96 lapb_stop_t1timer(lapb);
99 lapb_stop_t2timer(lapb); 97 lapb_stop_t2timer(lapb);
100 lapb->state = LAPB_STATE_3; 98 lapb->state = LAPB_STATE_3;
101 lapb->condition = 0x00; 99 lapb->condition = 0x00;
102 lapb->n2count = 0; 100 lapb->n2count = 0;
103 lapb->vs = 0; 101 lapb->vs = 0;
104 lapb->vr = 0; 102 lapb->vr = 0;
105 lapb->va = 0; 103 lapb->va = 0;
106 lapb_connect_indication(lapb, LAPB_OK); 104 lapb_connect_indication(lapb, LAPB_OK);
107 } else { 105 } else {
108#if LAPB_DEBUG > 1 106#if LAPB_DEBUG > 1
109 printk(KERN_DEBUG "lapb: (%p) S0 TX DM(%d)\n", 107 printk(KERN_DEBUG "lapb: (%p) S0 TX DM(%d)\n",
110 lapb->dev, frame->pf); 108 lapb->dev, frame->pf);
111#endif 109#endif
112 lapb_send_control(lapb, LAPB_DM, frame->pf, 110 lapb_send_control(lapb, LAPB_DM, frame->pf,
113 LAPB_RESPONSE); 111 LAPB_RESPONSE);
114 } 112 }
115 break; 113 break;
116 114
117 case LAPB_DISC: 115 case LAPB_DISC:
118#if LAPB_DEBUG > 1 116#if LAPB_DEBUG > 1
119 printk(KERN_DEBUG "lapb: (%p) S0 RX DISC(%d)\n", 117 printk(KERN_DEBUG "lapb: (%p) S0 RX DISC(%d)\n",
120 lapb->dev, frame->pf); 118 lapb->dev, frame->pf);
121 printk(KERN_DEBUG "lapb: (%p) S0 TX UA(%d)\n", 119 printk(KERN_DEBUG "lapb: (%p) S0 TX UA(%d)\n",
122 lapb->dev, frame->pf); 120 lapb->dev, frame->pf);
123#endif 121#endif
124 lapb_send_control(lapb, LAPB_UA, frame->pf, 122 lapb_send_control(lapb, LAPB_UA, frame->pf, LAPB_RESPONSE);
125 LAPB_RESPONSE); 123 break;
126 break;
127 124
128 default: 125 default:
129 break; 126 break;
130 } 127 }
131 128
132 kfree_skb(skb); 129 kfree_skb(skb);
@@ -140,100 +137,97 @@ static void lapb_state1_machine(struct lapb_cb *lapb, struct sk_buff *skb,
140 struct lapb_frame *frame) 137 struct lapb_frame *frame)
141{ 138{
142 switch (frame->type) { 139 switch (frame->type) {
143 case LAPB_SABM: 140 case LAPB_SABM:
144#if LAPB_DEBUG > 1 141#if LAPB_DEBUG > 1
145 printk(KERN_DEBUG "lapb: (%p) S1 RX SABM(%d)\n", 142 printk(KERN_DEBUG "lapb: (%p) S1 RX SABM(%d)\n",
146 lapb->dev, frame->pf); 143 lapb->dev, frame->pf);
147#endif 144#endif
148 if (lapb->mode & LAPB_EXTENDED) { 145 if (lapb->mode & LAPB_EXTENDED) {
149#if LAPB_DEBUG > 1 146#if LAPB_DEBUG > 1
150 printk(KERN_DEBUG "lapb: (%p) S1 TX DM(%d)\n", 147 printk(KERN_DEBUG "lapb: (%p) S1 TX DM(%d)\n",
151 lapb->dev, frame->pf); 148 lapb->dev, frame->pf);
152#endif
153 lapb_send_control(lapb, LAPB_DM, frame->pf,
154 LAPB_RESPONSE);
155 } else {
156#if LAPB_DEBUG > 1
157 printk(KERN_DEBUG "lapb: (%p) S1 TX UA(%d)\n",
158 lapb->dev, frame->pf);
159#endif 149#endif
160 lapb_send_control(lapb, LAPB_UA, frame->pf, 150 lapb_send_control(lapb, LAPB_DM, frame->pf,
161 LAPB_RESPONSE); 151 LAPB_RESPONSE);
162 } 152 } else {
163 break;
164
165 case LAPB_SABME:
166#if LAPB_DEBUG > 1 153#if LAPB_DEBUG > 1
167 printk(KERN_DEBUG "lapb: (%p) S1 RX SABME(%d)\n", 154 printk(KERN_DEBUG "lapb: (%p) S1 TX UA(%d)\n",
168 lapb->dev, frame->pf); 155 lapb->dev, frame->pf);
169#endif 156#endif
170 if (lapb->mode & LAPB_EXTENDED) { 157 lapb_send_control(lapb, LAPB_UA, frame->pf,
158 LAPB_RESPONSE);
159 }
160 break;
161
162 case LAPB_SABME:
171#if LAPB_DEBUG > 1 163#if LAPB_DEBUG > 1
172 printk(KERN_DEBUG "lapb: (%p) S1 TX UA(%d)\n", 164 printk(KERN_DEBUG "lapb: (%p) S1 RX SABME(%d)\n",
173 lapb->dev, frame->pf); 165 lapb->dev, frame->pf);
174#endif 166#endif
175 lapb_send_control(lapb, LAPB_UA, frame->pf, 167 if (lapb->mode & LAPB_EXTENDED) {
176 LAPB_RESPONSE);
177 } else {
178#if LAPB_DEBUG > 1 168#if LAPB_DEBUG > 1
179 printk(KERN_DEBUG "lapb: (%p) S1 TX DM(%d)\n", 169 printk(KERN_DEBUG "lapb: (%p) S1 TX UA(%d)\n",
180 lapb->dev, frame->pf); 170 lapb->dev, frame->pf);
181#endif 171#endif
182 lapb_send_control(lapb, LAPB_DM, frame->pf, 172 lapb_send_control(lapb, LAPB_UA, frame->pf,
183 LAPB_RESPONSE); 173 LAPB_RESPONSE);
184 } 174 } else {
185 break;
186
187 case LAPB_DISC:
188#if LAPB_DEBUG > 1 175#if LAPB_DEBUG > 1
189 printk(KERN_DEBUG "lapb: (%p) S1 RX DISC(%d)\n",
190 lapb->dev, frame->pf);
191 printk(KERN_DEBUG "lapb: (%p) S1 TX DM(%d)\n", 176 printk(KERN_DEBUG "lapb: (%p) S1 TX DM(%d)\n",
192 lapb->dev, frame->pf); 177 lapb->dev, frame->pf);
193#endif 178#endif
194 lapb_send_control(lapb, LAPB_DM, frame->pf, 179 lapb_send_control(lapb, LAPB_DM, frame->pf,
195 LAPB_RESPONSE); 180 LAPB_RESPONSE);
196 break; 181 }
182 break;
197 183
198 case LAPB_UA: 184 case LAPB_DISC:
199#if LAPB_DEBUG > 1 185#if LAPB_DEBUG > 1
200 printk(KERN_DEBUG "lapb: (%p) S1 RX UA(%d)\n", 186 printk(KERN_DEBUG "lapb: (%p) S1 RX DISC(%d)\n",
201 lapb->dev, frame->pf); 187 lapb->dev, frame->pf);
188 printk(KERN_DEBUG "lapb: (%p) S1 TX DM(%d)\n",
189 lapb->dev, frame->pf);
202#endif 190#endif
203 if (frame->pf) { 191 lapb_send_control(lapb, LAPB_DM, frame->pf, LAPB_RESPONSE);
204#if LAPB_DEBUG > 0 192 break;
205 printk(KERN_DEBUG "lapb: (%p) S1 -> S3\n",
206 lapb->dev);
207#endif
208 lapb_stop_t1timer(lapb);
209 lapb_stop_t2timer(lapb);
210 lapb->state = LAPB_STATE_3;
211 lapb->condition = 0x00;
212 lapb->n2count = 0;
213 lapb->vs = 0;
214 lapb->vr = 0;
215 lapb->va = 0;
216 lapb_connect_confirmation(lapb, LAPB_OK);
217 }
218 break;
219 193
220 case LAPB_DM: 194 case LAPB_UA:
221#if LAPB_DEBUG > 1 195#if LAPB_DEBUG > 1
222 printk(KERN_DEBUG "lapb: (%p) S1 RX DM(%d)\n", 196 printk(KERN_DEBUG "lapb: (%p) S1 RX UA(%d)\n",
223 lapb->dev, frame->pf); 197 lapb->dev, frame->pf);
224#endif 198#endif
225 if (frame->pf) { 199 if (frame->pf) {
226#if LAPB_DEBUG > 0 200#if LAPB_DEBUG > 0
227 printk(KERN_DEBUG "lapb: (%p) S1 -> S0\n", 201 printk(KERN_DEBUG "lapb: (%p) S1 -> S3\n", lapb->dev);
228 lapb->dev); 202#endif
229#endif 203 lapb_stop_t1timer(lapb);
230 lapb_clear_queues(lapb); 204 lapb_stop_t2timer(lapb);
231 lapb->state = LAPB_STATE_0; 205 lapb->state = LAPB_STATE_3;
232 lapb_start_t1timer(lapb); 206 lapb->condition = 0x00;
233 lapb_stop_t2timer(lapb); 207 lapb->n2count = 0;
234 lapb_disconnect_indication(lapb, LAPB_REFUSED); 208 lapb->vs = 0;
235 } 209 lapb->vr = 0;
236 break; 210 lapb->va = 0;
211 lapb_connect_confirmation(lapb, LAPB_OK);
212 }
213 break;
214
215 case LAPB_DM:
216#if LAPB_DEBUG > 1
217 printk(KERN_DEBUG "lapb: (%p) S1 RX DM(%d)\n",
218 lapb->dev, frame->pf);
219#endif
220 if (frame->pf) {
221#if LAPB_DEBUG > 0
222 printk(KERN_DEBUG "lapb: (%p) S1 -> S0\n", lapb->dev);
223#endif
224 lapb_clear_queues(lapb);
225 lapb->state = LAPB_STATE_0;
226 lapb_start_t1timer(lapb);
227 lapb_stop_t2timer(lapb);
228 lapb_disconnect_indication(lapb, LAPB_REFUSED);
229 }
230 break;
237 } 231 }
238 232
239 kfree_skb(skb); 233 kfree_skb(skb);
@@ -247,78 +241,73 @@ static void lapb_state2_machine(struct lapb_cb *lapb, struct sk_buff *skb,
247 struct lapb_frame *frame) 241 struct lapb_frame *frame)
248{ 242{
249 switch (frame->type) { 243 switch (frame->type) {
250 case LAPB_SABM: 244 case LAPB_SABM:
251 case LAPB_SABME: 245 case LAPB_SABME:
252#if LAPB_DEBUG > 1 246#if LAPB_DEBUG > 1
253 printk(KERN_DEBUG "lapb: (%p) S2 RX {SABM,SABME}(%d)\n", 247 printk(KERN_DEBUG "lapb: (%p) S2 RX {SABM,SABME}(%d)\n",
254 lapb->dev, frame->pf); 248 lapb->dev, frame->pf);
255 printk(KERN_DEBUG "lapb: (%p) S2 TX DM(%d)\n", 249 printk(KERN_DEBUG "lapb: (%p) S2 TX DM(%d)\n",
256 lapb->dev, frame->pf); 250 lapb->dev, frame->pf);
257#endif 251#endif
258 lapb_send_control(lapb, LAPB_DM, frame->pf, 252 lapb_send_control(lapb, LAPB_DM, frame->pf, LAPB_RESPONSE);
259 LAPB_RESPONSE); 253 break;
260 break;
261 254
262 case LAPB_DISC: 255 case LAPB_DISC:
263#if LAPB_DEBUG > 1 256#if LAPB_DEBUG > 1
264 printk(KERN_DEBUG "lapb: (%p) S2 RX DISC(%d)\n", 257 printk(KERN_DEBUG "lapb: (%p) S2 RX DISC(%d)\n",
265 lapb->dev, frame->pf); 258 lapb->dev, frame->pf);
266 printk(KERN_DEBUG "lapb: (%p) S2 TX UA(%d)\n", 259 printk(KERN_DEBUG "lapb: (%p) S2 TX UA(%d)\n",
267 lapb->dev, frame->pf); 260 lapb->dev, frame->pf);
268#endif 261#endif
269 lapb_send_control(lapb, LAPB_UA, frame->pf, 262 lapb_send_control(lapb, LAPB_UA, frame->pf, LAPB_RESPONSE);
270 LAPB_RESPONSE); 263 break;
271 break;
272 264
273 case LAPB_UA: 265 case LAPB_UA:
274#if LAPB_DEBUG > 1 266#if LAPB_DEBUG > 1
275 printk(KERN_DEBUG "lapb: (%p) S2 RX UA(%d)\n", 267 printk(KERN_DEBUG "lapb: (%p) S2 RX UA(%d)\n",
276 lapb->dev, frame->pf); 268 lapb->dev, frame->pf);
277#endif 269#endif
278 if (frame->pf) { 270 if (frame->pf) {
279#if LAPB_DEBUG > 0 271#if LAPB_DEBUG > 0
280 printk(KERN_DEBUG "lapb: (%p) S2 -> S0\n", 272 printk(KERN_DEBUG "lapb: (%p) S2 -> S0\n", lapb->dev);
281 lapb->dev);
282#endif 273#endif
283 lapb->state = LAPB_STATE_0; 274 lapb->state = LAPB_STATE_0;
284 lapb_start_t1timer(lapb); 275 lapb_start_t1timer(lapb);
285 lapb_stop_t2timer(lapb); 276 lapb_stop_t2timer(lapb);
286 lapb_disconnect_confirmation(lapb, LAPB_OK); 277 lapb_disconnect_confirmation(lapb, LAPB_OK);
287 } 278 }
288 break; 279 break;
289 280
290 case LAPB_DM: 281 case LAPB_DM:
291#if LAPB_DEBUG > 1 282#if LAPB_DEBUG > 1
292 printk(KERN_DEBUG "lapb: (%p) S2 RX DM(%d)\n", 283 printk(KERN_DEBUG "lapb: (%p) S2 RX DM(%d)\n",
293 lapb->dev, frame->pf); 284 lapb->dev, frame->pf);
294#endif 285#endif
295 if (frame->pf) { 286 if (frame->pf) {
296#if LAPB_DEBUG > 0 287#if LAPB_DEBUG > 0
297 printk(KERN_DEBUG "lapb: (%p) S2 -> S0\n", 288 printk(KERN_DEBUG "lapb: (%p) S2 -> S0\n", lapb->dev);
298 lapb->dev); 289#endif
299#endif 290 lapb->state = LAPB_STATE_0;
300 lapb->state = LAPB_STATE_0; 291 lapb_start_t1timer(lapb);
301 lapb_start_t1timer(lapb); 292 lapb_stop_t2timer(lapb);
302 lapb_stop_t2timer(lapb); 293 lapb_disconnect_confirmation(lapb, LAPB_NOTCONNECTED);
303 lapb_disconnect_confirmation(lapb, 294 }
304 LAPB_NOTCONNECTED); 295 break;
305 }
306 break;
307 296
308 case LAPB_I: 297 case LAPB_I:
309 case LAPB_REJ: 298 case LAPB_REJ:
310 case LAPB_RNR: 299 case LAPB_RNR:
311 case LAPB_RR: 300 case LAPB_RR:
312#if LAPB_DEBUG > 1 301#if LAPB_DEBUG > 1
313 printk(KERN_DEBUG "lapb: (%p) S2 RX {I,REJ,RNR,RR}" 302 printk(KERN_DEBUG "lapb: (%p) S2 RX {I,REJ,RNR,RR}(%d)\n",
314 "(%d)\n", lapb->dev, frame->pf); 303 lapb->dev, frame->pf);
315 printk(KERN_DEBUG "lapb: (%p) S2 RX DM(%d)\n", 304 printk(KERN_DEBUG "lapb: (%p) S2 RX DM(%d)\n",
316 lapb->dev, frame->pf); 305 lapb->dev, frame->pf);
317#endif 306#endif
318 if (frame->pf) 307 if (frame->pf)
319 lapb_send_control(lapb, LAPB_DM, frame->pf, 308 lapb_send_control(lapb, LAPB_DM, frame->pf,
320 LAPB_RESPONSE); 309 LAPB_RESPONSE);
321 break; 310 break;
322 } 311 }
323 312
324 kfree_skb(skb); 313 kfree_skb(skb);
@@ -336,277 +325,267 @@ static void lapb_state3_machine(struct lapb_cb *lapb, struct sk_buff *skb,
336 LAPB_SMODULUS; 325 LAPB_SMODULUS;
337 326
338 switch (frame->type) { 327 switch (frame->type) {
339 case LAPB_SABM: 328 case LAPB_SABM:
340#if LAPB_DEBUG > 1 329#if LAPB_DEBUG > 1
341 printk(KERN_DEBUG "lapb: (%p) S3 RX SABM(%d)\n", 330 printk(KERN_DEBUG "lapb: (%p) S3 RX SABM(%d)\n",
342 lapb->dev, frame->pf); 331 lapb->dev, frame->pf);
343#endif 332#endif
344 if (lapb->mode & LAPB_EXTENDED) { 333 if (lapb->mode & LAPB_EXTENDED) {
345#if LAPB_DEBUG > 1 334#if LAPB_DEBUG > 1
346 printk(KERN_DEBUG "lapb: (%p) S3 TX DM(%d)\n", 335 printk(KERN_DEBUG "lapb: (%p) S3 TX DM(%d)\n",
347 lapb->dev, frame->pf); 336 lapb->dev, frame->pf);
348#endif 337#endif
349 lapb_send_control(lapb, LAPB_DM, frame->pf, 338 lapb_send_control(lapb, LAPB_DM, frame->pf,
350 LAPB_RESPONSE); 339 LAPB_RESPONSE);
351 } else { 340 } else {
352#if LAPB_DEBUG > 1 341#if LAPB_DEBUG > 1
353 printk(KERN_DEBUG "lapb: (%p) S3 TX UA(%d)\n", 342 printk(KERN_DEBUG "lapb: (%p) S3 TX UA(%d)\n",
354 lapb->dev, frame->pf); 343 lapb->dev, frame->pf);
355#endif 344#endif
356 lapb_send_control(lapb, LAPB_UA, frame->pf, 345 lapb_send_control(lapb, LAPB_UA, frame->pf,
357 LAPB_RESPONSE); 346 LAPB_RESPONSE);
358 lapb_stop_t1timer(lapb); 347 lapb_stop_t1timer(lapb);
359 lapb_stop_t2timer(lapb); 348 lapb_stop_t2timer(lapb);
360 lapb->condition = 0x00; 349 lapb->condition = 0x00;
361 lapb->n2count = 0; 350 lapb->n2count = 0;
362 lapb->vs = 0; 351 lapb->vs = 0;
363 lapb->vr = 0; 352 lapb->vr = 0;
364 lapb->va = 0; 353 lapb->va = 0;
365 lapb_requeue_frames(lapb); 354 lapb_requeue_frames(lapb);
366 } 355 }
367 break; 356 break;
368 357
369 case LAPB_SABME: 358 case LAPB_SABME:
370#if LAPB_DEBUG > 1 359#if LAPB_DEBUG > 1
371 printk(KERN_DEBUG "lapb: (%p) S3 RX SABME(%d)\n", 360 printk(KERN_DEBUG "lapb: (%p) S3 RX SABME(%d)\n",
372 lapb->dev, frame->pf); 361 lapb->dev, frame->pf);
373#endif 362#endif
374 if (lapb->mode & LAPB_EXTENDED) { 363 if (lapb->mode & LAPB_EXTENDED) {
375#if LAPB_DEBUG > 1 364#if LAPB_DEBUG > 1
376 printk(KERN_DEBUG "lapb: (%p) S3 TX UA(%d)\n", 365 printk(KERN_DEBUG "lapb: (%p) S3 TX UA(%d)\n",
377 lapb->dev, frame->pf); 366 lapb->dev, frame->pf);
378#endif 367#endif
379 lapb_send_control(lapb, LAPB_UA, frame->pf, 368 lapb_send_control(lapb, LAPB_UA, frame->pf,
380 LAPB_RESPONSE); 369 LAPB_RESPONSE);
381 lapb_stop_t1timer(lapb); 370 lapb_stop_t1timer(lapb);
382 lapb_stop_t2timer(lapb); 371 lapb_stop_t2timer(lapb);
383 lapb->condition = 0x00; 372 lapb->condition = 0x00;
384 lapb->n2count = 0; 373 lapb->n2count = 0;
385 lapb->vs = 0; 374 lapb->vs = 0;
386 lapb->vr = 0; 375 lapb->vr = 0;
387 lapb->va = 0; 376 lapb->va = 0;
388 lapb_requeue_frames(lapb); 377 lapb_requeue_frames(lapb);
389 } else { 378 } else {
390#if LAPB_DEBUG > 1 379#if LAPB_DEBUG > 1
391 printk(KERN_DEBUG "lapb: (%p) S3 TX DM(%d)\n", 380 printk(KERN_DEBUG "lapb: (%p) S3 TX DM(%d)\n",
392 lapb->dev, frame->pf); 381 lapb->dev, frame->pf);
393#endif 382#endif
394 lapb_send_control(lapb, LAPB_DM, frame->pf, 383 lapb_send_control(lapb, LAPB_DM, frame->pf,
395 LAPB_RESPONSE); 384 LAPB_RESPONSE);
396 } 385 }
397 break; 386 break;
398 387
399 case LAPB_DISC: 388 case LAPB_DISC:
400#if LAPB_DEBUG > 1 389#if LAPB_DEBUG > 1
401 printk(KERN_DEBUG "lapb: (%p) S3 RX DISC(%d)\n", 390 printk(KERN_DEBUG "lapb: (%p) S3 RX DISC(%d)\n",
402 lapb->dev, frame->pf); 391 lapb->dev, frame->pf);
403#endif 392#endif
404#if LAPB_DEBUG > 0 393#if LAPB_DEBUG > 0
405 printk(KERN_DEBUG "lapb: (%p) S3 -> S0\n", 394 printk(KERN_DEBUG "lapb: (%p) S3 -> S0\n", lapb->dev);
406 lapb->dev);
407#endif 395#endif
408 lapb_clear_queues(lapb); 396 lapb_clear_queues(lapb);
409 lapb_send_control(lapb, LAPB_UA, frame->pf, 397 lapb_send_control(lapb, LAPB_UA, frame->pf, LAPB_RESPONSE);
410 LAPB_RESPONSE); 398 lapb_start_t1timer(lapb);
411 lapb_start_t1timer(lapb); 399 lapb_stop_t2timer(lapb);
412 lapb_stop_t2timer(lapb); 400 lapb->state = LAPB_STATE_0;
413 lapb->state = LAPB_STATE_0; 401 lapb_disconnect_indication(lapb, LAPB_OK);
414 lapb_disconnect_indication(lapb, LAPB_OK); 402 break;
415 break;
416 403
417 case LAPB_DM: 404 case LAPB_DM:
418#if LAPB_DEBUG > 1 405#if LAPB_DEBUG > 1
419 printk(KERN_DEBUG "lapb: (%p) S3 RX DM(%d)\n", 406 printk(KERN_DEBUG "lapb: (%p) S3 RX DM(%d)\n",
420 lapb->dev, frame->pf); 407 lapb->dev, frame->pf);
421#endif 408#endif
422#if LAPB_DEBUG > 0 409#if LAPB_DEBUG > 0
423 printk(KERN_DEBUG "lapb: (%p) S3 -> S0\n", 410 printk(KERN_DEBUG "lapb: (%p) S3 -> S0\n", lapb->dev);
424 lapb->dev);
425#endif 411#endif
426 lapb_clear_queues(lapb); 412 lapb_clear_queues(lapb);
427 lapb->state = LAPB_STATE_0; 413 lapb->state = LAPB_STATE_0;
428 lapb_start_t1timer(lapb); 414 lapb_start_t1timer(lapb);
429 lapb_stop_t2timer(lapb); 415 lapb_stop_t2timer(lapb);
430 lapb_disconnect_indication(lapb, LAPB_NOTCONNECTED); 416 lapb_disconnect_indication(lapb, LAPB_NOTCONNECTED);
431 break; 417 break;
432 418
433 case LAPB_RNR: 419 case LAPB_RNR:
434#if LAPB_DEBUG > 1 420#if LAPB_DEBUG > 1
435 printk(KERN_DEBUG "lapb: (%p) S3 RX RNR(%d) R%d\n", 421 printk(KERN_DEBUG "lapb: (%p) S3 RX RNR(%d) R%d\n",
436 lapb->dev, frame->pf, frame->nr); 422 lapb->dev, frame->pf, frame->nr);
437#endif 423#endif
438 lapb->condition |= LAPB_PEER_RX_BUSY_CONDITION; 424 lapb->condition |= LAPB_PEER_RX_BUSY_CONDITION;
439 lapb_check_need_response(lapb, frame->cr, frame->pf); 425 lapb_check_need_response(lapb, frame->cr, frame->pf);
440 if (lapb_validate_nr(lapb, frame->nr)) { 426 if (lapb_validate_nr(lapb, frame->nr)) {
441 lapb_check_iframes_acked(lapb, frame->nr); 427 lapb_check_iframes_acked(lapb, frame->nr);
442 } else { 428 } else {
443 lapb->frmr_data = *frame; 429 lapb->frmr_data = *frame;
444 lapb->frmr_type = LAPB_FRMR_Z; 430 lapb->frmr_type = LAPB_FRMR_Z;
445 lapb_transmit_frmr(lapb); 431 lapb_transmit_frmr(lapb);
446#if LAPB_DEBUG > 0 432#if LAPB_DEBUG > 0
447 printk(KERN_DEBUG "lapb: (%p) S3 -> S4\n", 433 printk(KERN_DEBUG "lapb: (%p) S3 -> S4\n", lapb->dev);
448 lapb->dev);
449#endif 434#endif
450 lapb_start_t1timer(lapb); 435 lapb_start_t1timer(lapb);
451 lapb_stop_t2timer(lapb); 436 lapb_stop_t2timer(lapb);
452 lapb->state = LAPB_STATE_4; 437 lapb->state = LAPB_STATE_4;
453 lapb->n2count = 0; 438 lapb->n2count = 0;
454 } 439 }
455 break; 440 break;
456 441
457 case LAPB_RR: 442 case LAPB_RR:
458#if LAPB_DEBUG > 1 443#if LAPB_DEBUG > 1
459 printk(KERN_DEBUG "lapb: (%p) S3 RX RR(%d) R%d\n", 444 printk(KERN_DEBUG "lapb: (%p) S3 RX RR(%d) R%d\n",
460 lapb->dev, frame->pf, frame->nr); 445 lapb->dev, frame->pf, frame->nr);
461#endif 446#endif
462 lapb->condition &= ~LAPB_PEER_RX_BUSY_CONDITION; 447 lapb->condition &= ~LAPB_PEER_RX_BUSY_CONDITION;
463 lapb_check_need_response(lapb, frame->cr, frame->pf); 448 lapb_check_need_response(lapb, frame->cr, frame->pf);
464 if (lapb_validate_nr(lapb, frame->nr)) { 449 if (lapb_validate_nr(lapb, frame->nr)) {
465 lapb_check_iframes_acked(lapb, frame->nr); 450 lapb_check_iframes_acked(lapb, frame->nr);
466 } else { 451 } else {
467 lapb->frmr_data = *frame; 452 lapb->frmr_data = *frame;
468 lapb->frmr_type = LAPB_FRMR_Z; 453 lapb->frmr_type = LAPB_FRMR_Z;
469 lapb_transmit_frmr(lapb); 454 lapb_transmit_frmr(lapb);
470#if LAPB_DEBUG > 0 455#if LAPB_DEBUG > 0
471 printk(KERN_DEBUG "lapb: (%p) S3 -> S4\n", 456 printk(KERN_DEBUG "lapb: (%p) S3 -> S4\n", lapb->dev);
472 lapb->dev);
473#endif 457#endif
474 lapb_start_t1timer(lapb); 458 lapb_start_t1timer(lapb);
475 lapb_stop_t2timer(lapb); 459 lapb_stop_t2timer(lapb);
476 lapb->state = LAPB_STATE_4; 460 lapb->state = LAPB_STATE_4;
477 lapb->n2count = 0; 461 lapb->n2count = 0;
478 } 462 }
479 break; 463 break;
480 464
481 case LAPB_REJ: 465 case LAPB_REJ:
482#if LAPB_DEBUG > 1 466#if LAPB_DEBUG > 1
483 printk(KERN_DEBUG "lapb: (%p) S3 RX REJ(%d) R%d\n", 467 printk(KERN_DEBUG "lapb: (%p) S3 RX REJ(%d) R%d\n",
484 lapb->dev, frame->pf, frame->nr); 468 lapb->dev, frame->pf, frame->nr);
485#endif 469#endif
486 lapb->condition &= ~LAPB_PEER_RX_BUSY_CONDITION; 470 lapb->condition &= ~LAPB_PEER_RX_BUSY_CONDITION;
487 lapb_check_need_response(lapb, frame->cr, frame->pf); 471 lapb_check_need_response(lapb, frame->cr, frame->pf);
488 if (lapb_validate_nr(lapb, frame->nr)) { 472 if (lapb_validate_nr(lapb, frame->nr)) {
489 lapb_frames_acked(lapb, frame->nr); 473 lapb_frames_acked(lapb, frame->nr);
490 lapb_stop_t1timer(lapb); 474 lapb_stop_t1timer(lapb);
491 lapb->n2count = 0; 475 lapb->n2count = 0;
492 lapb_requeue_frames(lapb); 476 lapb_requeue_frames(lapb);
493 } else { 477 } else {
494 lapb->frmr_data = *frame; 478 lapb->frmr_data = *frame;
495 lapb->frmr_type = LAPB_FRMR_Z; 479 lapb->frmr_type = LAPB_FRMR_Z;
496 lapb_transmit_frmr(lapb); 480 lapb_transmit_frmr(lapb);
497#if LAPB_DEBUG > 0 481#if LAPB_DEBUG > 0
498 printk(KERN_DEBUG "lapb: (%p) S3 -> S4\n", 482 printk(KERN_DEBUG "lapb: (%p) S3 -> S4\n", lapb->dev);
499 lapb->dev);
500#endif 483#endif
501 lapb_start_t1timer(lapb); 484 lapb_start_t1timer(lapb);
502 lapb_stop_t2timer(lapb); 485 lapb_stop_t2timer(lapb);
503 lapb->state = LAPB_STATE_4; 486 lapb->state = LAPB_STATE_4;
504 lapb->n2count = 0; 487 lapb->n2count = 0;
505 } 488 }
506 break; 489 break;
507 490
508 case LAPB_I: 491 case LAPB_I:
509#if LAPB_DEBUG > 1 492#if LAPB_DEBUG > 1
510 printk(KERN_DEBUG "lapb: (%p) S3 RX I(%d) S%d R%d\n", 493 printk(KERN_DEBUG "lapb: (%p) S3 RX I(%d) S%d R%d\n",
511 lapb->dev, frame->pf, frame->ns, frame->nr); 494 lapb->dev, frame->pf, frame->ns, frame->nr);
512#endif 495#endif
513 if (!lapb_validate_nr(lapb, frame->nr)) { 496 if (!lapb_validate_nr(lapb, frame->nr)) {
514 lapb->frmr_data = *frame; 497 lapb->frmr_data = *frame;
515 lapb->frmr_type = LAPB_FRMR_Z; 498 lapb->frmr_type = LAPB_FRMR_Z;
516 lapb_transmit_frmr(lapb); 499 lapb_transmit_frmr(lapb);
517#if LAPB_DEBUG > 0 500#if LAPB_DEBUG > 0
518 printk(KERN_DEBUG "lapb: (%p) S3 -> S4\n", 501 printk(KERN_DEBUG "lapb: (%p) S3 -> S4\n", lapb->dev);
519 lapb->dev);
520#endif 502#endif
521 lapb_start_t1timer(lapb); 503 lapb_start_t1timer(lapb);
522 lapb_stop_t2timer(lapb); 504 lapb_stop_t2timer(lapb);
523 lapb->state = LAPB_STATE_4; 505 lapb->state = LAPB_STATE_4;
524 lapb->n2count = 0; 506 lapb->n2count = 0;
507 break;
508 }
509 if (lapb->condition & LAPB_PEER_RX_BUSY_CONDITION)
510 lapb_frames_acked(lapb, frame->nr);
511 else
512 lapb_check_iframes_acked(lapb, frame->nr);
513
514 if (frame->ns == lapb->vr) {
515 int cn;
516 cn = lapb_data_indication(lapb, skb);
517 queued = 1;
518 /*
519 * If upper layer has dropped the frame, we
520 * basically ignore any further protocol
521 * processing. This will cause the peer
522 * to re-transmit the frame later like
523 * a frame lost on the wire.
524 */
525 if (cn == NET_RX_DROP) {
526 printk(KERN_DEBUG "LAPB: rx congestion\n");
525 break; 527 break;
526 } 528 }
527 if (lapb->condition & LAPB_PEER_RX_BUSY_CONDITION) 529 lapb->vr = (lapb->vr + 1) % modulus;
528 lapb_frames_acked(lapb, frame->nr); 530 lapb->condition &= ~LAPB_REJECT_CONDITION;
529 else 531 if (frame->pf)
530 lapb_check_iframes_acked(lapb, frame->nr); 532 lapb_enquiry_response(lapb);
531 533 else {
532 if (frame->ns == lapb->vr) { 534 if (!(lapb->condition &
533 int cn; 535 LAPB_ACK_PENDING_CONDITION)) {
534 cn = lapb_data_indication(lapb, skb); 536 lapb->condition |= LAPB_ACK_PENDING_CONDITION;
535 queued = 1; 537 lapb_start_t2timer(lapb);
536 /*
537 * If upper layer has dropped the frame, we
538 * basically ignore any further protocol
539 * processing. This will cause the peer
540 * to re-transmit the frame later like
541 * a frame lost on the wire.
542 */
543 if (cn == NET_RX_DROP) {
544 printk(KERN_DEBUG
545 "LAPB: rx congestion\n");
546 break;
547 } 538 }
548 lapb->vr = (lapb->vr + 1) % modulus; 539 }
549 lapb->condition &= ~LAPB_REJECT_CONDITION; 540 } else {
541 if (lapb->condition & LAPB_REJECT_CONDITION) {
550 if (frame->pf) 542 if (frame->pf)
551 lapb_enquiry_response(lapb); 543 lapb_enquiry_response(lapb);
552 else {
553 if (!(lapb->condition &
554 LAPB_ACK_PENDING_CONDITION)) {
555 lapb->condition |= LAPB_ACK_PENDING_CONDITION;
556 lapb_start_t2timer(lapb);
557 }
558 }
559 } else { 544 } else {
560 if (lapb->condition & LAPB_REJECT_CONDITION) { 545#if LAPB_DEBUG > 1
561 if (frame->pf) 546 printk(KERN_DEBUG
562 lapb_enquiry_response(lapb); 547 "lapb: (%p) S3 TX REJ(%d) R%d\n",
563 } else { 548 lapb->dev, frame->pf, lapb->vr);
564#if LAPB_DEBUG > 1 549#endif
565 printk(KERN_DEBUG 550 lapb->condition |= LAPB_REJECT_CONDITION;
566 "lapb: (%p) S3 TX REJ(%d) R%d\n", 551 lapb_send_control(lapb, LAPB_REJ, frame->pf,
567 lapb->dev, frame->pf, lapb->vr); 552 LAPB_RESPONSE);
568#endif 553 lapb->condition &= ~LAPB_ACK_PENDING_CONDITION;
569 lapb->condition |= LAPB_REJECT_CONDITION;
570 lapb_send_control(lapb, LAPB_REJ,
571 frame->pf,
572 LAPB_RESPONSE);
573 lapb->condition &= ~LAPB_ACK_PENDING_CONDITION;
574 }
575 } 554 }
576 break; 555 }
556 break;
577 557
578 case LAPB_FRMR: 558 case LAPB_FRMR:
579#if LAPB_DEBUG > 1 559#if LAPB_DEBUG > 1
580 printk(KERN_DEBUG "lapb: (%p) S3 RX FRMR(%d) %02X " 560 printk(KERN_DEBUG "lapb: (%p) S3 RX FRMR(%d) %02X "
581 "%02X %02X %02X %02X\n", lapb->dev, frame->pf, 561 "%02X %02X %02X %02X\n", lapb->dev, frame->pf,
582 skb->data[0], skb->data[1], skb->data[2], 562 skb->data[0], skb->data[1], skb->data[2],
583 skb->data[3], skb->data[4]); 563 skb->data[3], skb->data[4]);
584#endif 564#endif
585 lapb_establish_data_link(lapb); 565 lapb_establish_data_link(lapb);
586#if LAPB_DEBUG > 0 566#if LAPB_DEBUG > 0
587 printk(KERN_DEBUG "lapb: (%p) S3 -> S1\n", 567 printk(KERN_DEBUG "lapb: (%p) S3 -> S1\n", lapb->dev);
588 lapb->dev);
589#endif 568#endif
590 lapb_requeue_frames(lapb); 569 lapb_requeue_frames(lapb);
591 lapb->state = LAPB_STATE_1; 570 lapb->state = LAPB_STATE_1;
592 break; 571 break;
593 572
594 case LAPB_ILLEGAL: 573 case LAPB_ILLEGAL:
595#if LAPB_DEBUG > 1 574#if LAPB_DEBUG > 1
596 printk(KERN_DEBUG "lapb: (%p) S3 RX ILLEGAL(%d)\n", 575 printk(KERN_DEBUG "lapb: (%p) S3 RX ILLEGAL(%d)\n",
597 lapb->dev, frame->pf); 576 lapb->dev, frame->pf);
598#endif 577#endif
599 lapb->frmr_data = *frame; 578 lapb->frmr_data = *frame;
600 lapb->frmr_type = LAPB_FRMR_W; 579 lapb->frmr_type = LAPB_FRMR_W;
601 lapb_transmit_frmr(lapb); 580 lapb_transmit_frmr(lapb);
602#if LAPB_DEBUG > 0 581#if LAPB_DEBUG > 0
603 printk(KERN_DEBUG "lapb: (%p) S3 -> S4\n", lapb->dev); 582 printk(KERN_DEBUG "lapb: (%p) S3 -> S4\n", lapb->dev);
604#endif 583#endif
605 lapb_start_t1timer(lapb); 584 lapb_start_t1timer(lapb);
606 lapb_stop_t2timer(lapb); 585 lapb_stop_t2timer(lapb);
607 lapb->state = LAPB_STATE_4; 586 lapb->state = LAPB_STATE_4;
608 lapb->n2count = 0; 587 lapb->n2count = 0;
609 break; 588 break;
610 } 589 }
611 590
612 if (!queued) 591 if (!queued)
@@ -621,75 +600,73 @@ static void lapb_state4_machine(struct lapb_cb *lapb, struct sk_buff *skb,
621 struct lapb_frame *frame) 600 struct lapb_frame *frame)
622{ 601{
623 switch (frame->type) { 602 switch (frame->type) {
624 case LAPB_SABM: 603 case LAPB_SABM:
625#if LAPB_DEBUG > 1 604#if LAPB_DEBUG > 1
626 printk(KERN_DEBUG "lapb: (%p) S4 RX SABM(%d)\n", 605 printk(KERN_DEBUG "lapb: (%p) S4 RX SABM(%d)\n",
627 lapb->dev, frame->pf); 606 lapb->dev, frame->pf);
628#endif 607#endif
629 if (lapb->mode & LAPB_EXTENDED) { 608 if (lapb->mode & LAPB_EXTENDED) {
630#if LAPB_DEBUG > 1 609#if LAPB_DEBUG > 1
631 printk(KERN_DEBUG "lapb: (%p) S4 TX DM(%d)\n", 610 printk(KERN_DEBUG "lapb: (%p) S4 TX DM(%d)\n",
632 lapb->dev, frame->pf); 611 lapb->dev, frame->pf);
633#endif 612#endif
634 lapb_send_control(lapb, LAPB_DM, frame->pf, 613 lapb_send_control(lapb, LAPB_DM, frame->pf,
635 LAPB_RESPONSE); 614 LAPB_RESPONSE);
636 } else { 615 } else {
637#if LAPB_DEBUG > 1 616#if LAPB_DEBUG > 1
638 printk(KERN_DEBUG "lapb: (%p) S4 TX UA(%d)\n", 617 printk(KERN_DEBUG "lapb: (%p) S4 TX UA(%d)\n",
639 lapb->dev, frame->pf); 618 lapb->dev, frame->pf);
640#endif 619#endif
641#if LAPB_DEBUG > 0 620#if LAPB_DEBUG > 0
642 printk(KERN_DEBUG "lapb: (%p) S4 -> S3\n", 621 printk(KERN_DEBUG "lapb: (%p) S4 -> S3\n", lapb->dev);
643 lapb->dev);
644#endif 622#endif
645 lapb_send_control(lapb, LAPB_UA, frame->pf, 623 lapb_send_control(lapb, LAPB_UA, frame->pf,
646 LAPB_RESPONSE); 624 LAPB_RESPONSE);
647 lapb_stop_t1timer(lapb); 625 lapb_stop_t1timer(lapb);
648 lapb_stop_t2timer(lapb); 626 lapb_stop_t2timer(lapb);
649 lapb->state = LAPB_STATE_3; 627 lapb->state = LAPB_STATE_3;
650 lapb->condition = 0x00; 628 lapb->condition = 0x00;
651 lapb->n2count = 0; 629 lapb->n2count = 0;
652 lapb->vs = 0; 630 lapb->vs = 0;
653 lapb->vr = 0; 631 lapb->vr = 0;
654 lapb->va = 0; 632 lapb->va = 0;
655 lapb_connect_indication(lapb, LAPB_OK); 633 lapb_connect_indication(lapb, LAPB_OK);
656 } 634 }
657 break; 635 break;
658 636
659 case LAPB_SABME: 637 case LAPB_SABME:
660#if LAPB_DEBUG > 1 638#if LAPB_DEBUG > 1
661 printk(KERN_DEBUG "lapb: (%p) S4 RX SABME(%d)\n", 639 printk(KERN_DEBUG "lapb: (%p) S4 RX SABME(%d)\n",
662 lapb->dev, frame->pf); 640 lapb->dev, frame->pf);
663#endif 641#endif
664 if (lapb->mode & LAPB_EXTENDED) { 642 if (lapb->mode & LAPB_EXTENDED) {
665#if LAPB_DEBUG > 1 643#if LAPB_DEBUG > 1
666 printk(KERN_DEBUG "lapb: (%p) S4 TX UA(%d)\n", 644 printk(KERN_DEBUG "lapb: (%p) S4 TX UA(%d)\n",
667 lapb->dev, frame->pf); 645 lapb->dev, frame->pf);
668#endif 646#endif
669#if LAPB_DEBUG > 0 647#if LAPB_DEBUG > 0
670 printk(KERN_DEBUG "lapb: (%p) S4 -> S3\n", 648 printk(KERN_DEBUG "lapb: (%p) S4 -> S3\n", lapb->dev);
671 lapb->dev);
672#endif 649#endif
673 lapb_send_control(lapb, LAPB_UA, frame->pf, 650 lapb_send_control(lapb, LAPB_UA, frame->pf,
674 LAPB_RESPONSE); 651 LAPB_RESPONSE);
675 lapb_stop_t1timer(lapb); 652 lapb_stop_t1timer(lapb);
676 lapb_stop_t2timer(lapb); 653 lapb_stop_t2timer(lapb);
677 lapb->state = LAPB_STATE_3; 654 lapb->state = LAPB_STATE_3;
678 lapb->condition = 0x00; 655 lapb->condition = 0x00;
679 lapb->n2count = 0; 656 lapb->n2count = 0;
680 lapb->vs = 0; 657 lapb->vs = 0;
681 lapb->vr = 0; 658 lapb->vr = 0;
682 lapb->va = 0; 659 lapb->va = 0;
683 lapb_connect_indication(lapb, LAPB_OK); 660 lapb_connect_indication(lapb, LAPB_OK);
684 } else { 661 } else {
685#if LAPB_DEBUG > 1 662#if LAPB_DEBUG > 1
686 printk(KERN_DEBUG "lapb: (%p) S4 TX DM(%d)\n", 663 printk(KERN_DEBUG "lapb: (%p) S4 TX DM(%d)\n",
687 lapb->dev, frame->pf); 664 lapb->dev, frame->pf);
688#endif 665#endif
689 lapb_send_control(lapb, LAPB_DM, frame->pf, 666 lapb_send_control(lapb, LAPB_DM, frame->pf,
690 LAPB_RESPONSE); 667 LAPB_RESPONSE);
691 } 668 }
692 break; 669 break;
693 } 670 }
694 671
695 kfree_skb(skb); 672 kfree_skb(skb);
diff --git a/net/mac80211/aes_ccm.c b/net/mac80211/aes_ccm.c
index b9b595c0811..0785e95c992 100644
--- a/net/mac80211/aes_ccm.c
+++ b/net/mac80211/aes_ccm.c
@@ -11,6 +11,7 @@
11#include <linux/types.h> 11#include <linux/types.h>
12#include <linux/crypto.h> 12#include <linux/crypto.h>
13#include <linux/err.h> 13#include <linux/err.h>
14#include <crypto/aes.h>
14 15
15#include <net/mac80211.h> 16#include <net/mac80211.h>
16#include "key.h" 17#include "key.h"
@@ -21,21 +22,21 @@ static void aes_ccm_prepare(struct crypto_cipher *tfm, u8 *scratch, u8 *a)
21 int i; 22 int i;
22 u8 *b_0, *aad, *b, *s_0; 23 u8 *b_0, *aad, *b, *s_0;
23 24
24 b_0 = scratch + 3 * AES_BLOCK_LEN; 25 b_0 = scratch + 3 * AES_BLOCK_SIZE;
25 aad = scratch + 4 * AES_BLOCK_LEN; 26 aad = scratch + 4 * AES_BLOCK_SIZE;
26 b = scratch; 27 b = scratch;
27 s_0 = scratch + AES_BLOCK_LEN; 28 s_0 = scratch + AES_BLOCK_SIZE;
28 29
29 crypto_cipher_encrypt_one(tfm, b, b_0); 30 crypto_cipher_encrypt_one(tfm, b, b_0);
30 31
31 /* Extra Authenticate-only data (always two AES blocks) */ 32 /* Extra Authenticate-only data (always two AES blocks) */
32 for (i = 0; i < AES_BLOCK_LEN; i++) 33 for (i = 0; i < AES_BLOCK_SIZE; i++)
33 aad[i] ^= b[i]; 34 aad[i] ^= b[i];
34 crypto_cipher_encrypt_one(tfm, b, aad); 35 crypto_cipher_encrypt_one(tfm, b, aad);
35 36
36 aad += AES_BLOCK_LEN; 37 aad += AES_BLOCK_SIZE;
37 38
38 for (i = 0; i < AES_BLOCK_LEN; i++) 39 for (i = 0; i < AES_BLOCK_SIZE; i++)
39 aad[i] ^= b[i]; 40 aad[i] ^= b[i];
40 crypto_cipher_encrypt_one(tfm, a, aad); 41 crypto_cipher_encrypt_one(tfm, a, aad);
41 42
@@ -57,12 +58,12 @@ void ieee80211_aes_ccm_encrypt(struct crypto_cipher *tfm, u8 *scratch,
57 u8 *pos, *cpos, *b, *s_0, *e, *b_0; 58 u8 *pos, *cpos, *b, *s_0, *e, *b_0;
58 59
59 b = scratch; 60 b = scratch;
60 s_0 = scratch + AES_BLOCK_LEN; 61 s_0 = scratch + AES_BLOCK_SIZE;
61 e = scratch + 2 * AES_BLOCK_LEN; 62 e = scratch + 2 * AES_BLOCK_SIZE;
62 b_0 = scratch + 3 * AES_BLOCK_LEN; 63 b_0 = scratch + 3 * AES_BLOCK_SIZE;
63 64
64 num_blocks = DIV_ROUND_UP(data_len, AES_BLOCK_LEN); 65 num_blocks = DIV_ROUND_UP(data_len, AES_BLOCK_SIZE);
65 last_len = data_len % AES_BLOCK_LEN; 66 last_len = data_len % AES_BLOCK_SIZE;
66 aes_ccm_prepare(tfm, scratch, b); 67 aes_ccm_prepare(tfm, scratch, b);
67 68
68 /* Process payload blocks */ 69 /* Process payload blocks */
@@ -70,7 +71,7 @@ void ieee80211_aes_ccm_encrypt(struct crypto_cipher *tfm, u8 *scratch,
70 cpos = cdata; 71 cpos = cdata;
71 for (j = 1; j <= num_blocks; j++) { 72 for (j = 1; j <= num_blocks; j++) {
72 int blen = (j == num_blocks && last_len) ? 73 int blen = (j == num_blocks && last_len) ?
73 last_len : AES_BLOCK_LEN; 74 last_len : AES_BLOCK_SIZE;
74 75
75 /* Authentication followed by encryption */ 76 /* Authentication followed by encryption */
76 for (i = 0; i < blen; i++) 77 for (i = 0; i < blen; i++)
@@ -96,12 +97,12 @@ int ieee80211_aes_ccm_decrypt(struct crypto_cipher *tfm, u8 *scratch,
96 u8 *pos, *cpos, *b, *s_0, *a, *b_0; 97 u8 *pos, *cpos, *b, *s_0, *a, *b_0;
97 98
98 b = scratch; 99 b = scratch;
99 s_0 = scratch + AES_BLOCK_LEN; 100 s_0 = scratch + AES_BLOCK_SIZE;
100 a = scratch + 2 * AES_BLOCK_LEN; 101 a = scratch + 2 * AES_BLOCK_SIZE;
101 b_0 = scratch + 3 * AES_BLOCK_LEN; 102 b_0 = scratch + 3 * AES_BLOCK_SIZE;
102 103
103 num_blocks = DIV_ROUND_UP(data_len, AES_BLOCK_LEN); 104 num_blocks = DIV_ROUND_UP(data_len, AES_BLOCK_SIZE);
104 last_len = data_len % AES_BLOCK_LEN; 105 last_len = data_len % AES_BLOCK_SIZE;
105 aes_ccm_prepare(tfm, scratch, a); 106 aes_ccm_prepare(tfm, scratch, a);
106 107
107 /* Process payload blocks */ 108 /* Process payload blocks */
@@ -109,7 +110,7 @@ int ieee80211_aes_ccm_decrypt(struct crypto_cipher *tfm, u8 *scratch,
109 pos = data; 110 pos = data;
110 for (j = 1; j <= num_blocks; j++) { 111 for (j = 1; j <= num_blocks; j++) {
111 int blen = (j == num_blocks && last_len) ? 112 int blen = (j == num_blocks && last_len) ?
112 last_len : AES_BLOCK_LEN; 113 last_len : AES_BLOCK_SIZE;
113 114
114 /* Decryption followed by authentication */ 115 /* Decryption followed by authentication */
115 b_0[14] = (j >> 8) & 0xff; 116 b_0[14] = (j >> 8) & 0xff;
diff --git a/net/mac80211/aes_ccm.h b/net/mac80211/aes_ccm.h
index 6e7820ef344..5b7d744e237 100644
--- a/net/mac80211/aes_ccm.h
+++ b/net/mac80211/aes_ccm.h
@@ -12,8 +12,6 @@
12 12
13#include <linux/crypto.h> 13#include <linux/crypto.h>
14 14
15#define AES_BLOCK_LEN 16
16
17struct crypto_cipher *ieee80211_aes_key_setup_encrypt(const u8 key[]); 15struct crypto_cipher *ieee80211_aes_key_setup_encrypt(const u8 key[]);
18void ieee80211_aes_ccm_encrypt(struct crypto_cipher *tfm, u8 *scratch, 16void ieee80211_aes_ccm_encrypt(struct crypto_cipher *tfm, u8 *scratch,
19 u8 *data, size_t data_len, 17 u8 *data, size_t data_len,
diff --git a/net/mac80211/aes_cmac.c b/net/mac80211/aes_cmac.c
index d502b2684a6..8dfd70d8fcf 100644
--- a/net/mac80211/aes_cmac.c
+++ b/net/mac80211/aes_cmac.c
@@ -11,12 +11,12 @@
11#include <linux/types.h> 11#include <linux/types.h>
12#include <linux/crypto.h> 12#include <linux/crypto.h>
13#include <linux/err.h> 13#include <linux/err.h>
14#include <crypto/aes.h>
14 15
15#include <net/mac80211.h> 16#include <net/mac80211.h>
16#include "key.h" 17#include "key.h"
17#include "aes_cmac.h" 18#include "aes_cmac.h"
18 19
19#define AES_BLOCK_SIZE 16
20#define AES_CMAC_KEY_LEN 16 20#define AES_CMAC_KEY_LEN 16
21#define CMAC_TLEN 8 /* CMAC TLen = 64 bits (8 octets) */ 21#define CMAC_TLEN 8 /* CMAC TLen = 64 bits (8 octets) */
22#define AAD_LEN 20 22#define AAD_LEN 20
@@ -35,10 +35,10 @@ static void gf_mulx(u8 *pad)
35} 35}
36 36
37 37
38static void aes_128_cmac_vector(struct crypto_cipher *tfm, u8 *scratch, 38static void aes_128_cmac_vector(struct crypto_cipher *tfm, size_t num_elem,
39 size_t num_elem,
40 const u8 *addr[], const size_t *len, u8 *mac) 39 const u8 *addr[], const size_t *len, u8 *mac)
41{ 40{
41 u8 scratch[2 * AES_BLOCK_SIZE];
42 u8 *cbc, *pad; 42 u8 *cbc, *pad;
43 const u8 *pos, *end; 43 const u8 *pos, *end;
44 size_t i, e, left, total_len; 44 size_t i, e, left, total_len;
@@ -95,7 +95,7 @@ static void aes_128_cmac_vector(struct crypto_cipher *tfm, u8 *scratch,
95} 95}
96 96
97 97
98void ieee80211_aes_cmac(struct crypto_cipher *tfm, u8 *scratch, const u8 *aad, 98void ieee80211_aes_cmac(struct crypto_cipher *tfm, const u8 *aad,
99 const u8 *data, size_t data_len, u8 *mic) 99 const u8 *data, size_t data_len, u8 *mic)
100{ 100{
101 const u8 *addr[3]; 101 const u8 *addr[3];
@@ -110,7 +110,7 @@ void ieee80211_aes_cmac(struct crypto_cipher *tfm, u8 *scratch, const u8 *aad,
110 addr[2] = zero; 110 addr[2] = zero;
111 len[2] = CMAC_TLEN; 111 len[2] = CMAC_TLEN;
112 112
113 aes_128_cmac_vector(tfm, scratch, 3, addr, len, mic); 113 aes_128_cmac_vector(tfm, 3, addr, len, mic);
114} 114}
115 115
116 116
diff --git a/net/mac80211/aes_cmac.h b/net/mac80211/aes_cmac.h
index 0eb9a483150..20785a64725 100644
--- a/net/mac80211/aes_cmac.h
+++ b/net/mac80211/aes_cmac.h
@@ -12,7 +12,7 @@
12#include <linux/crypto.h> 12#include <linux/crypto.h>
13 13
14struct crypto_cipher * ieee80211_aes_cmac_key_setup(const u8 key[]); 14struct crypto_cipher * ieee80211_aes_cmac_key_setup(const u8 key[]);
15void ieee80211_aes_cmac(struct crypto_cipher *tfm, u8 *scratch, const u8 *aad, 15void ieee80211_aes_cmac(struct crypto_cipher *tfm, const u8 *aad,
16 const u8 *data, size_t data_len, u8 *mic); 16 const u8 *data, size_t data_len, u8 *mic);
17void ieee80211_aes_cmac_key_free(struct crypto_cipher *tfm); 17void ieee80211_aes_cmac_key_free(struct crypto_cipher *tfm);
18 18
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index 9c0d76cdca9..ebadb9ac9a7 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -100,6 +100,21 @@ void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
100 mutex_unlock(&sta->ampdu_mlme.mtx); 100 mutex_unlock(&sta->ampdu_mlme.mtx);
101} 101}
102 102
103void ieee80211_stop_rx_ba_session(struct ieee80211_vif *vif, u16 ba_rx_bitmap,
104 const u8 *addr)
105{
106 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
107 struct sta_info *sta = sta_info_get(sdata, addr);
108 int i;
109
110 for (i = 0; i < STA_TID_NUM; i++)
111 if (ba_rx_bitmap & BIT(i))
112 set_bit(i, sta->ampdu_mlme.tid_rx_stop_requested);
113
114 ieee80211_queue_work(&sta->local->hw, &sta->ampdu_mlme.work);
115}
116EXPORT_SYMBOL(ieee80211_stop_rx_ba_session);
117
103/* 118/*
104 * After accepting the AddBA Request we activated a timer, 119 * After accepting the AddBA Request we activated a timer,
105 * resetting it after each frame that arrives from the originator. 120 * resetting it after each frame that arrives from the originator.
@@ -247,7 +262,11 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
247 "%pM on tid %u\n", 262 "%pM on tid %u\n",
248 mgmt->sa, tid); 263 mgmt->sa, tid);
249#endif /* CONFIG_MAC80211_HT_DEBUG */ 264#endif /* CONFIG_MAC80211_HT_DEBUG */
250 goto end; 265
266 /* delete existing Rx BA session on the same tid */
267 ___ieee80211_stop_rx_ba_session(sta, tid, WLAN_BACK_RECIPIENT,
268 WLAN_STATUS_UNSPECIFIED_QOS,
269 false);
251 } 270 }
252 271
253 /* prepare A-MPDU MLME for Rx aggregation */ 272 /* prepare A-MPDU MLME for Rx aggregation */
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index be70c70d3f5..bfc36e90476 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -209,6 +209,7 @@ static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev,
209 u8 seq[6] = {0}; 209 u8 seq[6] = {0};
210 struct key_params params; 210 struct key_params params;
211 struct ieee80211_key *key = NULL; 211 struct ieee80211_key *key = NULL;
212 u64 pn64;
212 u32 iv32; 213 u32 iv32;
213 u16 iv16; 214 u16 iv16;
214 int err = -ENOENT; 215 int err = -ENOENT;
@@ -256,22 +257,24 @@ static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev,
256 params.seq_len = 6; 257 params.seq_len = 6;
257 break; 258 break;
258 case WLAN_CIPHER_SUITE_CCMP: 259 case WLAN_CIPHER_SUITE_CCMP:
259 seq[0] = key->u.ccmp.tx_pn[5]; 260 pn64 = atomic64_read(&key->u.ccmp.tx_pn);
260 seq[1] = key->u.ccmp.tx_pn[4]; 261 seq[0] = pn64;
261 seq[2] = key->u.ccmp.tx_pn[3]; 262 seq[1] = pn64 >> 8;
262 seq[3] = key->u.ccmp.tx_pn[2]; 263 seq[2] = pn64 >> 16;
263 seq[4] = key->u.ccmp.tx_pn[1]; 264 seq[3] = pn64 >> 24;
264 seq[5] = key->u.ccmp.tx_pn[0]; 265 seq[4] = pn64 >> 32;
266 seq[5] = pn64 >> 40;
265 params.seq = seq; 267 params.seq = seq;
266 params.seq_len = 6; 268 params.seq_len = 6;
267 break; 269 break;
268 case WLAN_CIPHER_SUITE_AES_CMAC: 270 case WLAN_CIPHER_SUITE_AES_CMAC:
269 seq[0] = key->u.aes_cmac.tx_pn[5]; 271 pn64 = atomic64_read(&key->u.aes_cmac.tx_pn);
270 seq[1] = key->u.aes_cmac.tx_pn[4]; 272 seq[0] = pn64;
271 seq[2] = key->u.aes_cmac.tx_pn[3]; 273 seq[1] = pn64 >> 8;
272 seq[3] = key->u.aes_cmac.tx_pn[2]; 274 seq[2] = pn64 >> 16;
273 seq[4] = key->u.aes_cmac.tx_pn[1]; 275 seq[3] = pn64 >> 24;
274 seq[5] = key->u.aes_cmac.tx_pn[0]; 276 seq[4] = pn64 >> 32;
277 seq[5] = pn64 >> 40;
275 params.seq = seq; 278 params.seq = seq;
276 params.seq_len = 6; 279 params.seq_len = 6;
277 break; 280 break;
@@ -674,8 +677,11 @@ static void sta_apply_parameters(struct ieee80211_local *local,
674 677
675 if (mask & BIT(NL80211_STA_FLAG_WME)) { 678 if (mask & BIT(NL80211_STA_FLAG_WME)) {
676 sta->flags &= ~WLAN_STA_WME; 679 sta->flags &= ~WLAN_STA_WME;
677 if (set & BIT(NL80211_STA_FLAG_WME)) 680 sta->sta.wme = false;
681 if (set & BIT(NL80211_STA_FLAG_WME)) {
678 sta->flags |= WLAN_STA_WME; 682 sta->flags |= WLAN_STA_WME;
683 sta->sta.wme = true;
684 }
679 } 685 }
680 686
681 if (mask & BIT(NL80211_STA_FLAG_MFP)) { 687 if (mask & BIT(NL80211_STA_FLAG_MFP)) {
@@ -1554,6 +1560,19 @@ static int ieee80211_testmode_cmd(struct wiphy *wiphy, void *data, int len)
1554 1560
1555 return local->ops->testmode_cmd(&local->hw, data, len); 1561 return local->ops->testmode_cmd(&local->hw, data, len);
1556} 1562}
1563
1564static int ieee80211_testmode_dump(struct wiphy *wiphy,
1565 struct sk_buff *skb,
1566 struct netlink_callback *cb,
1567 void *data, int len)
1568{
1569 struct ieee80211_local *local = wiphy_priv(wiphy);
1570
1571 if (!local->ops->testmode_dump)
1572 return -EOPNOTSUPP;
1573
1574 return local->ops->testmode_dump(&local->hw, skb, cb, data, len);
1575}
1557#endif 1576#endif
1558 1577
1559int __ieee80211_request_smps(struct ieee80211_sub_if_data *sdata, 1578int __ieee80211_request_smps(struct ieee80211_sub_if_data *sdata,
@@ -2085,6 +2104,21 @@ static void ieee80211_get_ringparam(struct wiphy *wiphy,
2085 drv_get_ringparam(local, tx, tx_max, rx, rx_max); 2104 drv_get_ringparam(local, tx, tx_max, rx, rx_max);
2086} 2105}
2087 2106
2107static int ieee80211_set_rekey_data(struct wiphy *wiphy,
2108 struct net_device *dev,
2109 struct cfg80211_gtk_rekey_data *data)
2110{
2111 struct ieee80211_local *local = wiphy_priv(wiphy);
2112 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
2113
2114 if (!local->ops->set_rekey_data)
2115 return -EOPNOTSUPP;
2116
2117 drv_set_rekey_data(local, sdata, data);
2118
2119 return 0;
2120}
2121
2088struct cfg80211_ops mac80211_config_ops = { 2122struct cfg80211_ops mac80211_config_ops = {
2089 .add_virtual_intf = ieee80211_add_iface, 2123 .add_virtual_intf = ieee80211_add_iface,
2090 .del_virtual_intf = ieee80211_del_iface, 2124 .del_virtual_intf = ieee80211_del_iface,
@@ -2134,6 +2168,7 @@ struct cfg80211_ops mac80211_config_ops = {
2134 .set_wds_peer = ieee80211_set_wds_peer, 2168 .set_wds_peer = ieee80211_set_wds_peer,
2135 .rfkill_poll = ieee80211_rfkill_poll, 2169 .rfkill_poll = ieee80211_rfkill_poll,
2136 CFG80211_TESTMODE_CMD(ieee80211_testmode_cmd) 2170 CFG80211_TESTMODE_CMD(ieee80211_testmode_cmd)
2171 CFG80211_TESTMODE_DUMP(ieee80211_testmode_dump)
2137 .set_power_mgmt = ieee80211_set_power_mgmt, 2172 .set_power_mgmt = ieee80211_set_power_mgmt,
2138 .set_bitrate_mask = ieee80211_set_bitrate_mask, 2173 .set_bitrate_mask = ieee80211_set_bitrate_mask,
2139 .remain_on_channel = ieee80211_remain_on_channel, 2174 .remain_on_channel = ieee80211_remain_on_channel,
@@ -2146,4 +2181,5 @@ struct cfg80211_ops mac80211_config_ops = {
2146 .get_antenna = ieee80211_get_antenna, 2181 .get_antenna = ieee80211_get_antenna,
2147 .set_ringparam = ieee80211_set_ringparam, 2182 .set_ringparam = ieee80211_set_ringparam,
2148 .get_ringparam = ieee80211_get_ringparam, 2183 .get_ringparam = ieee80211_get_ringparam,
2184 .set_rekey_data = ieee80211_set_rekey_data,
2149}; 2185};
diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c
index 33c58b85c91..38e6101190d 100644
--- a/net/mac80211/debugfs_key.c
+++ b/net/mac80211/debugfs_key.c
@@ -78,7 +78,7 @@ KEY_OPS(algorithm);
78static ssize_t key_tx_spec_read(struct file *file, char __user *userbuf, 78static ssize_t key_tx_spec_read(struct file *file, char __user *userbuf,
79 size_t count, loff_t *ppos) 79 size_t count, loff_t *ppos)
80{ 80{
81 const u8 *tpn; 81 u64 pn;
82 char buf[20]; 82 char buf[20];
83 int len; 83 int len;
84 struct ieee80211_key *key = file->private_data; 84 struct ieee80211_key *key = file->private_data;
@@ -94,15 +94,16 @@ static ssize_t key_tx_spec_read(struct file *file, char __user *userbuf,
94 key->u.tkip.tx.iv16); 94 key->u.tkip.tx.iv16);
95 break; 95 break;
96 case WLAN_CIPHER_SUITE_CCMP: 96 case WLAN_CIPHER_SUITE_CCMP:
97 tpn = key->u.ccmp.tx_pn; 97 pn = atomic64_read(&key->u.ccmp.tx_pn);
98 len = scnprintf(buf, sizeof(buf), "%02x%02x%02x%02x%02x%02x\n", 98 len = scnprintf(buf, sizeof(buf), "%02x%02x%02x%02x%02x%02x\n",
99 tpn[0], tpn[1], tpn[2], tpn[3], tpn[4], tpn[5]); 99 (u8)(pn >> 40), (u8)(pn >> 32), (u8)(pn >> 24),
100 (u8)(pn >> 16), (u8)(pn >> 8), (u8)pn);
100 break; 101 break;
101 case WLAN_CIPHER_SUITE_AES_CMAC: 102 case WLAN_CIPHER_SUITE_AES_CMAC:
102 tpn = key->u.aes_cmac.tx_pn; 103 pn = atomic64_read(&key->u.aes_cmac.tx_pn);
103 len = scnprintf(buf, sizeof(buf), "%02x%02x%02x%02x%02x%02x\n", 104 len = scnprintf(buf, sizeof(buf), "%02x%02x%02x%02x%02x%02x\n",
104 tpn[0], tpn[1], tpn[2], tpn[3], tpn[4], 105 (u8)(pn >> 40), (u8)(pn >> 32), (u8)(pn >> 24),
105 tpn[5]); 106 (u8)(pn >> 16), (u8)(pn >> 8), (u8)pn);
106 break; 107 break;
107 default: 108 default:
108 return 0; 109 return 0;
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index eebf7a67daf..b2d6bba4405 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -218,6 +218,16 @@ static inline int drv_hw_scan(struct ieee80211_local *local,
218 return ret; 218 return ret;
219} 219}
220 220
221static inline void drv_cancel_hw_scan(struct ieee80211_local *local,
222 struct ieee80211_sub_if_data *sdata)
223{
224 might_sleep();
225
226 trace_drv_cancel_hw_scan(local, sdata);
227 local->ops->cancel_hw_scan(&local->hw, &sdata->vif);
228 trace_drv_return_void(local);
229}
230
221static inline int 231static inline int
222drv_sched_scan_start(struct ieee80211_local *local, 232drv_sched_scan_start(struct ieee80211_local *local,
223 struct ieee80211_sub_if_data *sdata, 233 struct ieee80211_sub_if_data *sdata,
@@ -637,4 +647,22 @@ static inline int drv_set_bitrate_mask(struct ieee80211_local *local,
637 return ret; 647 return ret;
638} 648}
639 649
650static inline void drv_set_rekey_data(struct ieee80211_local *local,
651 struct ieee80211_sub_if_data *sdata,
652 struct cfg80211_gtk_rekey_data *data)
653{
654 trace_drv_set_rekey_data(local, sdata, data);
655 if (local->ops->set_rekey_data)
656 local->ops->set_rekey_data(&local->hw, &sdata->vif, data);
657 trace_drv_return_void(local);
658}
659
660static inline void drv_rssi_callback(struct ieee80211_local *local,
661 const enum ieee80211_rssi_event event)
662{
663 trace_drv_rssi_callback(local, event);
664 if (local->ops->rssi_callback)
665 local->ops->rssi_callback(&local->hw, event);
666 trace_drv_return_void(local);
667}
640#endif /* __MAC80211_DRIVER_OPS */ 668#endif /* __MAC80211_DRIVER_OPS */
diff --git a/net/mac80211/driver-trace.h b/net/mac80211/driver-trace.h
index ed9edcbd9aa..4470f6e8b84 100644
--- a/net/mac80211/driver-trace.h
+++ b/net/mac80211/driver-trace.h
@@ -460,6 +460,12 @@ DEFINE_EVENT(local_sdata_evt, drv_hw_scan,
460 TP_ARGS(local, sdata) 460 TP_ARGS(local, sdata)
461); 461);
462 462
463DEFINE_EVENT(local_sdata_evt, drv_cancel_hw_scan,
464 TP_PROTO(struct ieee80211_local *local,
465 struct ieee80211_sub_if_data *sdata),
466 TP_ARGS(local, sdata)
467);
468
463DEFINE_EVENT(local_sdata_evt, drv_sched_scan_start, 469DEFINE_EVENT(local_sdata_evt, drv_sched_scan_start,
464 TP_PROTO(struct ieee80211_local *local, 470 TP_PROTO(struct ieee80211_local *local,
465 struct ieee80211_sub_if_data *sdata), 471 struct ieee80211_sub_if_data *sdata),
@@ -1018,6 +1024,56 @@ TRACE_EVENT(drv_set_bitrate_mask,
1018 ) 1024 )
1019); 1025);
1020 1026
1027TRACE_EVENT(drv_set_rekey_data,
1028 TP_PROTO(struct ieee80211_local *local,
1029 struct ieee80211_sub_if_data *sdata,
1030 struct cfg80211_gtk_rekey_data *data),
1031
1032 TP_ARGS(local, sdata, data),
1033
1034 TP_STRUCT__entry(
1035 LOCAL_ENTRY
1036 VIF_ENTRY
1037 __array(u8, kek, NL80211_KEK_LEN)
1038 __array(u8, kck, NL80211_KCK_LEN)
1039 __array(u8, replay_ctr, NL80211_REPLAY_CTR_LEN)
1040 ),
1041
1042 TP_fast_assign(
1043 LOCAL_ASSIGN;
1044 VIF_ASSIGN;
1045 memcpy(__entry->kek, data->kek, NL80211_KEK_LEN);
1046 memcpy(__entry->kck, data->kck, NL80211_KCK_LEN);
1047 memcpy(__entry->replay_ctr, data->replay_ctr,
1048 NL80211_REPLAY_CTR_LEN);
1049 ),
1050
1051 TP_printk(LOCAL_PR_FMT VIF_PR_FMT,
1052 LOCAL_PR_ARG, VIF_PR_ARG)
1053);
1054
1055TRACE_EVENT(drv_rssi_callback,
1056 TP_PROTO(struct ieee80211_local *local,
1057 enum ieee80211_rssi_event rssi_event),
1058
1059 TP_ARGS(local, rssi_event),
1060
1061 TP_STRUCT__entry(
1062 LOCAL_ENTRY
1063 __field(u32, rssi_event)
1064 ),
1065
1066 TP_fast_assign(
1067 LOCAL_ASSIGN;
1068 __entry->rssi_event = rssi_event;
1069 ),
1070
1071 TP_printk(
1072 LOCAL_PR_FMT " rssi_event:%d",
1073 LOCAL_PR_ARG, __entry->rssi_event
1074 )
1075);
1076
1021/* 1077/*
1022 * Tracing for API calls that drivers call. 1078 * Tracing for API calls that drivers call.
1023 */ 1079 */
@@ -1287,6 +1343,51 @@ DEFINE_EVENT(local_only_evt, api_remain_on_channel_expired,
1287 TP_ARGS(local) 1343 TP_ARGS(local)
1288); 1344);
1289 1345
1346TRACE_EVENT(api_gtk_rekey_notify,
1347 TP_PROTO(struct ieee80211_sub_if_data *sdata,
1348 const u8 *bssid, const u8 *replay_ctr),
1349
1350 TP_ARGS(sdata, bssid, replay_ctr),
1351
1352 TP_STRUCT__entry(
1353 VIF_ENTRY
1354 __array(u8, bssid, ETH_ALEN)
1355 __array(u8, replay_ctr, NL80211_REPLAY_CTR_LEN)
1356 ),
1357
1358 TP_fast_assign(
1359 VIF_ASSIGN;
1360 memcpy(__entry->bssid, bssid, ETH_ALEN);
1361 memcpy(__entry->replay_ctr, replay_ctr, NL80211_REPLAY_CTR_LEN);
1362 ),
1363
1364 TP_printk(VIF_PR_FMT, VIF_PR_ARG)
1365);
1366
1367TRACE_EVENT(api_enable_rssi_reports,
1368 TP_PROTO(struct ieee80211_sub_if_data *sdata,
1369 int rssi_min_thold, int rssi_max_thold),
1370
1371 TP_ARGS(sdata, rssi_min_thold, rssi_max_thold),
1372
1373 TP_STRUCT__entry(
1374 VIF_ENTRY
1375 __field(int, rssi_min_thold)
1376 __field(int, rssi_max_thold)
1377 ),
1378
1379 TP_fast_assign(
1380 VIF_ASSIGN;
1381 __entry->rssi_min_thold = rssi_min_thold;
1382 __entry->rssi_max_thold = rssi_max_thold;
1383 ),
1384
1385 TP_printk(
1386 VIF_PR_FMT " rssi_min_thold =%d, rssi_max_thold = %d",
1387 VIF_PR_ARG, __entry->rssi_min_thold, __entry->rssi_max_thold
1388 )
1389);
1390
1290/* 1391/*
1291 * Tracing for internal functions 1392 * Tracing for internal functions
1292 * (which may also be called in response to driver calls) 1393 * (which may also be called in response to driver calls)
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index 591add22bcc..7cfc286946c 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -140,6 +140,12 @@ void ieee80211_ba_session_work(struct work_struct *work)
140 sta, tid, WLAN_BACK_RECIPIENT, 140 sta, tid, WLAN_BACK_RECIPIENT,
141 WLAN_REASON_QSTA_TIMEOUT, true); 141 WLAN_REASON_QSTA_TIMEOUT, true);
142 142
143 if (test_and_clear_bit(tid,
144 sta->ampdu_mlme.tid_rx_stop_requested))
145 ___ieee80211_stop_rx_ba_session(
146 sta, tid, WLAN_BACK_RECIPIENT,
147 WLAN_REASON_UNSPECIFIED, true);
148
143 tid_tx = sta->ampdu_mlme.tid_start_tx[tid]; 149 tid_tx = sta->ampdu_mlme.tid_start_tx[tid];
144 if (tid_tx) { 150 if (tid_tx) {
145 /* 151 /*
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 090b0ec1e05..dda0d1ab34f 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -202,7 +202,22 @@ struct ieee80211_rx_data {
202 struct ieee80211_key *key; 202 struct ieee80211_key *key;
203 203
204 unsigned int flags; 204 unsigned int flags;
205 int queue; 205
206 /*
207 * Index into sequence numbers array, 0..16
208 * since the last (16) is used for non-QoS,
209 * will be 16 on non-QoS frames.
210 */
211 int seqno_idx;
212
213 /*
214 * Index into the security IV/PN arrays, 0..16
215 * since the last (16) is used for CCMP-encrypted
216 * management frames, will be set to 16 on mgmt
217 * frames and 0 on non-QoS frames.
218 */
219 int security_idx;
220
206 u32 tkip_iv32; 221 u32 tkip_iv32;
207 u16 tkip_iv16; 222 u16 tkip_iv16;
208}; 223};
@@ -417,6 +432,14 @@ struct ieee80211_if_managed {
417 * generated for the current association. 432 * generated for the current association.
418 */ 433 */
419 int last_cqm_event_signal; 434 int last_cqm_event_signal;
435
436 /*
437 * State variables for keeping track of RSSI of the AP currently
438 * connected to and informing driver when RSSI has gone
439 * below/above a certain threshold.
440 */
441 int rssi_min_thold, rssi_max_thold;
442 int last_ave_beacon_signal;
420}; 443};
421 444
422struct ieee80211_if_ibss { 445struct ieee80211_if_ibss {
@@ -515,12 +538,14 @@ struct ieee80211_if_mesh {
515 * @IEEE80211_SDATA_DONT_BRIDGE_PACKETS: bridge packets between 538 * @IEEE80211_SDATA_DONT_BRIDGE_PACKETS: bridge packets between
516 * associated stations and deliver multicast frames both 539 * associated stations and deliver multicast frames both
517 * back to wireless media and to the local net stack. 540 * back to wireless media and to the local net stack.
541 * @IEEE80211_SDATA_DISCONNECT_RESUME: Disconnect after resume.
518 */ 542 */
519enum ieee80211_sub_if_data_flags { 543enum ieee80211_sub_if_data_flags {
520 IEEE80211_SDATA_ALLMULTI = BIT(0), 544 IEEE80211_SDATA_ALLMULTI = BIT(0),
521 IEEE80211_SDATA_PROMISC = BIT(1), 545 IEEE80211_SDATA_PROMISC = BIT(1),
522 IEEE80211_SDATA_OPERATING_GMODE = BIT(2), 546 IEEE80211_SDATA_OPERATING_GMODE = BIT(2),
523 IEEE80211_SDATA_DONT_BRIDGE_PACKETS = BIT(3), 547 IEEE80211_SDATA_DONT_BRIDGE_PACKETS = BIT(3),
548 IEEE80211_SDATA_DISCONNECT_RESUME = BIT(4),
524}; 549};
525 550
526/** 551/**
@@ -544,6 +569,9 @@ struct ieee80211_sub_if_data {
544 /* keys */ 569 /* keys */
545 struct list_head key_list; 570 struct list_head key_list;
546 571
572 /* count for keys needing tailroom space allocation */
573 int crypto_tx_tailroom_needed_cnt;
574
547 struct net_device *dev; 575 struct net_device *dev;
548 struct ieee80211_local *local; 576 struct ieee80211_local *local;
549 577
@@ -1350,10 +1378,12 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
1350struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata, 1378struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
1351 u8 *dst, 1379 u8 *dst,
1352 const u8 *ssid, size_t ssid_len, 1380 const u8 *ssid, size_t ssid_len,
1353 const u8 *ie, size_t ie_len); 1381 const u8 *ie, size_t ie_len,
1382 bool directed);
1354void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst, 1383void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
1355 const u8 *ssid, size_t ssid_len, 1384 const u8 *ssid, size_t ssid_len,
1356 const u8 *ie, size_t ie_len); 1385 const u8 *ie, size_t ie_len,
1386 bool directed);
1357 1387
1358void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata, 1388void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata,
1359 const size_t supp_rates_len, 1389 const size_t supp_rates_len,
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index dee30aea9ab..cd5fb40d3fd 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -363,8 +363,7 @@ static int ieee80211_open(struct net_device *dev)
363 int err; 363 int err;
364 364
365 /* fail early if user set an invalid address */ 365 /* fail early if user set an invalid address */
366 if (!is_zero_ether_addr(dev->dev_addr) && 366 if (!is_valid_ether_addr(dev->dev_addr))
367 !is_valid_ether_addr(dev->dev_addr))
368 return -EADDRNOTAVAIL; 367 return -EADDRNOTAVAIL;
369 368
370 err = ieee80211_check_concurrent_iface(sdata, sdata->vif.type); 369 err = ieee80211_check_concurrent_iface(sdata, sdata->vif.type);
@@ -1130,8 +1129,8 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
1130 1129
1131 ASSERT_RTNL(); 1130 ASSERT_RTNL();
1132 1131
1133 ndev = alloc_netdev_mq(sizeof(*sdata) + local->hw.vif_data_size, 1132 ndev = alloc_netdev_mqs(sizeof(*sdata) + local->hw.vif_data_size,
1134 name, ieee80211_if_setup, local->hw.queues); 1133 name, ieee80211_if_setup, local->hw.queues, 1);
1135 if (!ndev) 1134 if (!ndev)
1136 return -ENOMEM; 1135 return -ENOMEM;
1137 dev_net_set(ndev, wiphy_net(local->hw.wiphy)); 1136 dev_net_set(ndev, wiphy_net(local->hw.wiphy));
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index f825e2f0a57..739bee13e81 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -61,6 +61,36 @@ static struct ieee80211_sta *get_sta_for_key(struct ieee80211_key *key)
61 return NULL; 61 return NULL;
62} 62}
63 63
64static void increment_tailroom_need_count(struct ieee80211_sub_if_data *sdata)
65{
66 /*
67 * When this count is zero, SKB resizing for allocating tailroom
68 * for IV or MMIC is skipped. But, this check has created two race
69 * cases in xmit path while transiting from zero count to one:
70 *
71 * 1. SKB resize was skipped because no key was added but just before
72 * the xmit key is added and SW encryption kicks off.
73 *
74 * 2. SKB resize was skipped because all the keys were hw planted but
75 * just before xmit one of the key is deleted and SW encryption kicks
76 * off.
77 *
78 * In both the above case SW encryption will find not enough space for
79 * tailroom and exits with WARN_ON. (See WARN_ONs at wpa.c)
80 *
81 * Solution has been explained at
82 * http://mid.gmane.org/1308590980.4322.19.camel@jlt3.sipsolutions.net
83 */
84
85 if (!sdata->crypto_tx_tailroom_needed_cnt++) {
86 /*
87 * Flush all XMIT packets currently using HW encryption or no
88 * encryption at all if the count transition is from 0 -> 1.
89 */
90 synchronize_net();
91 }
92}
93
64static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key) 94static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
65{ 95{
66 struct ieee80211_sub_if_data *sdata; 96 struct ieee80211_sub_if_data *sdata;
@@ -101,6 +131,11 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
101 131
102 if (!ret) { 132 if (!ret) {
103 key->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE; 133 key->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE;
134
135 if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) ||
136 (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)))
137 sdata->crypto_tx_tailroom_needed_cnt--;
138
104 return 0; 139 return 0;
105 } 140 }
106 141
@@ -142,6 +177,10 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key)
142 sta = get_sta_for_key(key); 177 sta = get_sta_for_key(key);
143 sdata = key->sdata; 178 sdata = key->sdata;
144 179
180 if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) ||
181 (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV)))
182 increment_tailroom_need_count(sdata);
183
145 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 184 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
146 sdata = container_of(sdata->bss, 185 sdata = container_of(sdata->bss,
147 struct ieee80211_sub_if_data, 186 struct ieee80211_sub_if_data,
@@ -330,6 +369,7 @@ struct ieee80211_key *ieee80211_key_alloc(u32 cipher, int idx, size_t key_len,
330 get_unaligned_le16(seq); 369 get_unaligned_le16(seq);
331 } 370 }
332 } 371 }
372 spin_lock_init(&key->u.tkip.txlock);
333 break; 373 break;
334 case WLAN_CIPHER_SUITE_CCMP: 374 case WLAN_CIPHER_SUITE_CCMP:
335 key->conf.iv_len = CCMP_HDR_LEN; 375 key->conf.iv_len = CCMP_HDR_LEN;
@@ -394,8 +434,10 @@ static void __ieee80211_key_destroy(struct ieee80211_key *key)
394 ieee80211_aes_key_free(key->u.ccmp.tfm); 434 ieee80211_aes_key_free(key->u.ccmp.tfm);
395 if (key->conf.cipher == WLAN_CIPHER_SUITE_AES_CMAC) 435 if (key->conf.cipher == WLAN_CIPHER_SUITE_AES_CMAC)
396 ieee80211_aes_cmac_key_free(key->u.aes_cmac.tfm); 436 ieee80211_aes_cmac_key_free(key->u.aes_cmac.tfm);
397 if (key->local) 437 if (key->local) {
398 ieee80211_debugfs_key_remove(key); 438 ieee80211_debugfs_key_remove(key);
439 key->sdata->crypto_tx_tailroom_needed_cnt--;
440 }
399 441
400 kfree(key); 442 kfree(key);
401} 443}
@@ -452,6 +494,8 @@ int ieee80211_key_link(struct ieee80211_key *key,
452 else 494 else
453 old_key = key_mtx_dereference(sdata->local, sdata->keys[idx]); 495 old_key = key_mtx_dereference(sdata->local, sdata->keys[idx]);
454 496
497 increment_tailroom_need_count(sdata);
498
455 __ieee80211_key_replace(sdata, sta, pairwise, old_key, key); 499 __ieee80211_key_replace(sdata, sta, pairwise, old_key, key);
456 __ieee80211_key_destroy(old_key); 500 __ieee80211_key_destroy(old_key);
457 501
@@ -498,12 +542,49 @@ void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata)
498 542
499 mutex_lock(&sdata->local->key_mtx); 543 mutex_lock(&sdata->local->key_mtx);
500 544
501 list_for_each_entry(key, &sdata->key_list, list) 545 sdata->crypto_tx_tailroom_needed_cnt = 0;
546
547 list_for_each_entry(key, &sdata->key_list, list) {
548 increment_tailroom_need_count(sdata);
502 ieee80211_key_enable_hw_accel(key); 549 ieee80211_key_enable_hw_accel(key);
550 }
503 551
504 mutex_unlock(&sdata->local->key_mtx); 552 mutex_unlock(&sdata->local->key_mtx);
505} 553}
506 554
555void ieee80211_iter_keys(struct ieee80211_hw *hw,
556 struct ieee80211_vif *vif,
557 void (*iter)(struct ieee80211_hw *hw,
558 struct ieee80211_vif *vif,
559 struct ieee80211_sta *sta,
560 struct ieee80211_key_conf *key,
561 void *data),
562 void *iter_data)
563{
564 struct ieee80211_local *local = hw_to_local(hw);
565 struct ieee80211_key *key;
566 struct ieee80211_sub_if_data *sdata;
567
568 ASSERT_RTNL();
569
570 mutex_lock(&local->key_mtx);
571 if (vif) {
572 sdata = vif_to_sdata(vif);
573 list_for_each_entry(key, &sdata->key_list, list)
574 iter(hw, &sdata->vif,
575 key->sta ? &key->sta->sta : NULL,
576 &key->conf, iter_data);
577 } else {
578 list_for_each_entry(sdata, &local->interfaces, list)
579 list_for_each_entry(key, &sdata->key_list, list)
580 iter(hw, &sdata->vif,
581 key->sta ? &key->sta->sta : NULL,
582 &key->conf, iter_data);
583 }
584 mutex_unlock(&local->key_mtx);
585}
586EXPORT_SYMBOL(ieee80211_iter_keys);
587
507void ieee80211_disable_keys(struct ieee80211_sub_if_data *sdata) 588void ieee80211_disable_keys(struct ieee80211_sub_if_data *sdata)
508{ 589{
509 struct ieee80211_key *key; 590 struct ieee80211_key *key;
@@ -533,3 +614,89 @@ void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata)
533 614
534 mutex_unlock(&sdata->local->key_mtx); 615 mutex_unlock(&sdata->local->key_mtx);
535} 616}
617
618
619void ieee80211_gtk_rekey_notify(struct ieee80211_vif *vif, const u8 *bssid,
620 const u8 *replay_ctr, gfp_t gfp)
621{
622 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
623
624 trace_api_gtk_rekey_notify(sdata, bssid, replay_ctr);
625
626 cfg80211_gtk_rekey_notify(sdata->dev, bssid, replay_ctr, gfp);
627}
628EXPORT_SYMBOL_GPL(ieee80211_gtk_rekey_notify);
629
630void ieee80211_get_key_tx_seq(struct ieee80211_key_conf *keyconf,
631 struct ieee80211_key_seq *seq)
632{
633 struct ieee80211_key *key;
634 u64 pn64;
635
636 if (WARN_ON(!(keyconf->flags & IEEE80211_KEY_FLAG_GENERATE_IV)))
637 return;
638
639 key = container_of(keyconf, struct ieee80211_key, conf);
640
641 switch (key->conf.cipher) {
642 case WLAN_CIPHER_SUITE_TKIP:
643 seq->tkip.iv32 = key->u.tkip.tx.iv32;
644 seq->tkip.iv16 = key->u.tkip.tx.iv16;
645 break;
646 case WLAN_CIPHER_SUITE_CCMP:
647 pn64 = atomic64_read(&key->u.ccmp.tx_pn);
648 seq->ccmp.pn[5] = pn64;
649 seq->ccmp.pn[4] = pn64 >> 8;
650 seq->ccmp.pn[3] = pn64 >> 16;
651 seq->ccmp.pn[2] = pn64 >> 24;
652 seq->ccmp.pn[1] = pn64 >> 32;
653 seq->ccmp.pn[0] = pn64 >> 40;
654 break;
655 case WLAN_CIPHER_SUITE_AES_CMAC:
656 pn64 = atomic64_read(&key->u.aes_cmac.tx_pn);
657 seq->ccmp.pn[5] = pn64;
658 seq->ccmp.pn[4] = pn64 >> 8;
659 seq->ccmp.pn[3] = pn64 >> 16;
660 seq->ccmp.pn[2] = pn64 >> 24;
661 seq->ccmp.pn[1] = pn64 >> 32;
662 seq->ccmp.pn[0] = pn64 >> 40;
663 break;
664 default:
665 WARN_ON(1);
666 }
667}
668EXPORT_SYMBOL(ieee80211_get_key_tx_seq);
669
670void ieee80211_get_key_rx_seq(struct ieee80211_key_conf *keyconf,
671 int tid, struct ieee80211_key_seq *seq)
672{
673 struct ieee80211_key *key;
674 const u8 *pn;
675
676 key = container_of(keyconf, struct ieee80211_key, conf);
677
678 switch (key->conf.cipher) {
679 case WLAN_CIPHER_SUITE_TKIP:
680 if (WARN_ON(tid < 0 || tid >= NUM_RX_DATA_QUEUES))
681 return;
682 seq->tkip.iv32 = key->u.tkip.rx[tid].iv32;
683 seq->tkip.iv16 = key->u.tkip.rx[tid].iv16;
684 break;
685 case WLAN_CIPHER_SUITE_CCMP:
686 if (WARN_ON(tid < -1 || tid >= NUM_RX_DATA_QUEUES))
687 return;
688 if (tid < 0)
689 pn = key->u.ccmp.rx_pn[NUM_RX_DATA_QUEUES];
690 else
691 pn = key->u.ccmp.rx_pn[tid];
692 memcpy(seq->ccmp.pn, pn, CCMP_PN_LEN);
693 break;
694 case WLAN_CIPHER_SUITE_AES_CMAC:
695 if (WARN_ON(tid != 0))
696 return;
697 pn = key->u.aes_cmac.rx_pn;
698 memcpy(seq->aes_cmac.pn, pn, CMAC_PN_LEN);
699 break;
700 }
701}
702EXPORT_SYMBOL(ieee80211_get_key_rx_seq);
diff --git a/net/mac80211/key.h b/net/mac80211/key.h
index d801d535133..7d4e31f037d 100644
--- a/net/mac80211/key.h
+++ b/net/mac80211/key.h
@@ -28,8 +28,9 @@
28#define CCMP_PN_LEN 6 28#define CCMP_PN_LEN 6
29#define TKIP_IV_LEN 8 29#define TKIP_IV_LEN 8
30#define TKIP_ICV_LEN 4 30#define TKIP_ICV_LEN 4
31#define CMAC_PN_LEN 6
31 32
32#define NUM_RX_DATA_QUEUES 17 33#define NUM_RX_DATA_QUEUES 16
33 34
34struct ieee80211_local; 35struct ieee80211_local;
35struct ieee80211_sub_if_data; 36struct ieee80211_sub_if_data;
@@ -40,9 +41,11 @@ struct sta_info;
40 * 41 *
41 * @KEY_FLAG_UPLOADED_TO_HARDWARE: Indicates that this key is present 42 * @KEY_FLAG_UPLOADED_TO_HARDWARE: Indicates that this key is present
42 * in the hardware for TX crypto hardware acceleration. 43 * in the hardware for TX crypto hardware acceleration.
44 * @KEY_FLAG_TAINTED: Key is tainted and packets should be dropped.
43 */ 45 */
44enum ieee80211_internal_key_flags { 46enum ieee80211_internal_key_flags {
45 KEY_FLAG_UPLOADED_TO_HARDWARE = BIT(0), 47 KEY_FLAG_UPLOADED_TO_HARDWARE = BIT(0),
48 KEY_FLAG_TAINTED = BIT(1),
46}; 49};
47 50
48enum ieee80211_internal_tkip_state { 51enum ieee80211_internal_tkip_state {
@@ -52,9 +55,10 @@ enum ieee80211_internal_tkip_state {
52}; 55};
53 56
54struct tkip_ctx { 57struct tkip_ctx {
55 u32 iv32; 58 u32 iv32; /* current iv32 */
56 u16 iv16; 59 u16 iv16; /* current iv16 */
57 u16 p1k[5]; 60 u16 p1k[5]; /* p1k cache */
61 u32 p1k_iv32; /* iv32 for which p1k computed */
58 enum ieee80211_internal_tkip_state state; 62 enum ieee80211_internal_tkip_state state;
59}; 63};
60 64
@@ -71,6 +75,9 @@ struct ieee80211_key {
71 75
72 union { 76 union {
73 struct { 77 struct {
78 /* protects tx context */
79 spinlock_t txlock;
80
74 /* last used TSC */ 81 /* last used TSC */
75 struct tkip_ctx tx; 82 struct tkip_ctx tx;
76 83
@@ -78,32 +85,23 @@ struct ieee80211_key {
78 struct tkip_ctx rx[NUM_RX_DATA_QUEUES]; 85 struct tkip_ctx rx[NUM_RX_DATA_QUEUES];
79 } tkip; 86 } tkip;
80 struct { 87 struct {
81 u8 tx_pn[6]; 88 atomic64_t tx_pn;
82 /* 89 /*
83 * Last received packet number. The first 90 * Last received packet number. The first
84 * NUM_RX_DATA_QUEUES counters are used with Data 91 * NUM_RX_DATA_QUEUES counters are used with Data
85 * frames and the last counter is used with Robust 92 * frames and the last counter is used with Robust
86 * Management frames. 93 * Management frames.
87 */ 94 */
88 u8 rx_pn[NUM_RX_DATA_QUEUES + 1][6]; 95 u8 rx_pn[NUM_RX_DATA_QUEUES + 1][CCMP_PN_LEN];
89 struct crypto_cipher *tfm; 96 struct crypto_cipher *tfm;
90 u32 replays; /* dot11RSNAStatsCCMPReplays */ 97 u32 replays; /* dot11RSNAStatsCCMPReplays */
91 /* scratch buffers for virt_to_page() (crypto API) */
92#ifndef AES_BLOCK_LEN
93#define AES_BLOCK_LEN 16
94#endif
95 u8 tx_crypto_buf[6 * AES_BLOCK_LEN];
96 u8 rx_crypto_buf[6 * AES_BLOCK_LEN];
97 } ccmp; 98 } ccmp;
98 struct { 99 struct {
99 u8 tx_pn[6]; 100 atomic64_t tx_pn;
100 u8 rx_pn[6]; 101 u8 rx_pn[CMAC_PN_LEN];
101 struct crypto_cipher *tfm; 102 struct crypto_cipher *tfm;
102 u32 replays; /* dot11RSNAStatsCMACReplays */ 103 u32 replays; /* dot11RSNAStatsCMACReplays */
103 u32 icverrors; /* dot11RSNAStatsCMACICVErrors */ 104 u32 icverrors; /* dot11RSNAStatsCMACICVErrors */
104 /* scratch buffers for virt_to_page() (crypto API) */
105 u8 tx_crypto_buf[2 * AES_BLOCK_LEN];
106 u8 rx_crypto_buf[2 * AES_BLOCK_LEN];
107 } aes_cmac; 105 } aes_cmac;
108 } u; 106 } u;
109 107
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 0d2faacc3e8..068ee651825 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -647,12 +647,12 @@ int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata)
647 mpath = node->mpath; 647 mpath = node->mpath;
648 if (mpath->sdata == sdata && 648 if (mpath->sdata == sdata &&
649 memcmp(addr, mpath->dst, ETH_ALEN) == 0) { 649 memcmp(addr, mpath->dst, ETH_ALEN) == 0) {
650 spin_lock_bh(&mpath->state_lock); 650 spin_lock(&mpath->state_lock);
651 mpath->flags |= MESH_PATH_RESOLVING; 651 mpath->flags |= MESH_PATH_RESOLVING;
652 hlist_del_rcu(&node->list); 652 hlist_del_rcu(&node->list);
653 call_rcu(&node->rcu, mesh_path_node_reclaim); 653 call_rcu(&node->rcu, mesh_path_node_reclaim);
654 atomic_dec(&tbl->entries); 654 atomic_dec(&tbl->entries);
655 spin_unlock_bh(&mpath->state_lock); 655 spin_unlock(&mpath->state_lock);
656 goto enddel; 656 goto enddel;
657 } 657 }
658 } 658 }
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index d595265d6c2..c99237cd4b9 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -749,7 +749,7 @@ void ieee80211_dynamic_ps_enable_work(struct work_struct *work)
749 container_of(work, struct ieee80211_local, 749 container_of(work, struct ieee80211_local,
750 dynamic_ps_enable_work); 750 dynamic_ps_enable_work);
751 struct ieee80211_sub_if_data *sdata = local->ps_sdata; 751 struct ieee80211_sub_if_data *sdata = local->ps_sdata;
752 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 752 struct ieee80211_if_managed *ifmgd;
753 unsigned long flags; 753 unsigned long flags;
754 int q; 754 int q;
755 755
@@ -757,26 +757,39 @@ void ieee80211_dynamic_ps_enable_work(struct work_struct *work)
757 if (!sdata) 757 if (!sdata)
758 return; 758 return;
759 759
760 ifmgd = &sdata->u.mgd;
761
760 if (local->hw.conf.flags & IEEE80211_CONF_PS) 762 if (local->hw.conf.flags & IEEE80211_CONF_PS)
761 return; 763 return;
762 764
763 /* 765 if (!local->disable_dynamic_ps &&
764 * transmission can be stopped by others which leads to 766 local->hw.conf.dynamic_ps_timeout > 0) {
765 * dynamic_ps_timer expiry. Postpond the ps timer if it 767 /* don't enter PS if TX frames are pending */
766 * is not the actual idle state. 768 if (drv_tx_frames_pending(local)) {
767 */
768 spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
769 for (q = 0; q < local->hw.queues; q++) {
770 if (local->queue_stop_reasons[q]) {
771 spin_unlock_irqrestore(&local->queue_stop_reason_lock,
772 flags);
773 mod_timer(&local->dynamic_ps_timer, jiffies + 769 mod_timer(&local->dynamic_ps_timer, jiffies +
774 msecs_to_jiffies( 770 msecs_to_jiffies(
775 local->hw.conf.dynamic_ps_timeout)); 771 local->hw.conf.dynamic_ps_timeout));
776 return; 772 return;
777 } 773 }
774
775 /*
776 * transmission can be stopped by others which leads to
777 * dynamic_ps_timer expiry. Postpone the ps timer if it
778 * is not the actual idle state.
779 */
780 spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
781 for (q = 0; q < local->hw.queues; q++) {
782 if (local->queue_stop_reasons[q]) {
783 spin_unlock_irqrestore(&local->queue_stop_reason_lock,
784 flags);
785 mod_timer(&local->dynamic_ps_timer, jiffies +
786 msecs_to_jiffies(
787 local->hw.conf.dynamic_ps_timeout));
788 return;
789 }
790 }
791 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
778 } 792 }
779 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
780 793
781 if ((local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) && 794 if ((local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) &&
782 (!(ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED))) { 795 (!(ifmgd->flags & IEEE80211_STA_NULLFUNC_ACKED))) {
@@ -801,7 +814,8 @@ void ieee80211_dynamic_ps_enable_work(struct work_struct *work)
801 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS); 814 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
802 } 815 }
803 816
804 netif_tx_wake_all_queues(sdata->dev); 817 if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)
818 netif_tx_wake_all_queues(sdata->dev);
805} 819}
806 820
807void ieee80211_dynamic_ps_timer(unsigned long data) 821void ieee80211_dynamic_ps_timer(unsigned long data)
@@ -1204,7 +1218,8 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
1204 ieee80211_send_nullfunc(sdata->local, sdata, 0); 1218 ieee80211_send_nullfunc(sdata->local, sdata, 0);
1205 } else { 1219 } else {
1206 ssid = ieee80211_bss_get_ie(ifmgd->associated, WLAN_EID_SSID); 1220 ssid = ieee80211_bss_get_ie(ifmgd->associated, WLAN_EID_SSID);
1207 ieee80211_send_probe_req(sdata, dst, ssid + 2, ssid[1], NULL, 0); 1221 ieee80211_send_probe_req(sdata, dst, ssid + 2, ssid[1], NULL, 0,
1222 true);
1208 } 1223 }
1209 1224
1210 ifmgd->probe_send_count++; 1225 ifmgd->probe_send_count++;
@@ -1289,7 +1304,7 @@ struct sk_buff *ieee80211_ap_probereq_get(struct ieee80211_hw *hw,
1289 1304
1290 ssid = ieee80211_bss_get_ie(ifmgd->associated, WLAN_EID_SSID); 1305 ssid = ieee80211_bss_get_ie(ifmgd->associated, WLAN_EID_SSID);
1291 skb = ieee80211_build_probe_req(sdata, ifmgd->associated->bssid, 1306 skb = ieee80211_build_probe_req(sdata, ifmgd->associated->bssid,
1292 ssid + 2, ssid[1], NULL, 0); 1307 ssid + 2, ssid[1], NULL, 0, true);
1293 1308
1294 return skb; 1309 return skb;
1295} 1310}
@@ -1748,6 +1763,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
1748 ifmgd->ave_beacon_signal = rx_status->signal * 16; 1763 ifmgd->ave_beacon_signal = rx_status->signal * 16;
1749 ifmgd->last_cqm_event_signal = 0; 1764 ifmgd->last_cqm_event_signal = 0;
1750 ifmgd->count_beacon_signal = 1; 1765 ifmgd->count_beacon_signal = 1;
1766 ifmgd->last_ave_beacon_signal = 0;
1751 } else { 1767 } else {
1752 ifmgd->ave_beacon_signal = 1768 ifmgd->ave_beacon_signal =
1753 (IEEE80211_SIGNAL_AVE_WEIGHT * rx_status->signal * 16 + 1769 (IEEE80211_SIGNAL_AVE_WEIGHT * rx_status->signal * 16 +
@@ -1755,6 +1771,28 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
1755 ifmgd->ave_beacon_signal) / 16; 1771 ifmgd->ave_beacon_signal) / 16;
1756 ifmgd->count_beacon_signal++; 1772 ifmgd->count_beacon_signal++;
1757 } 1773 }
1774
1775 if (ifmgd->rssi_min_thold != ifmgd->rssi_max_thold &&
1776 ifmgd->count_beacon_signal >= IEEE80211_SIGNAL_AVE_MIN_COUNT) {
1777 int sig = ifmgd->ave_beacon_signal;
1778 int last_sig = ifmgd->last_ave_beacon_signal;
1779
1780 /*
1781 * if signal crosses either of the boundaries, invoke callback
1782 * with appropriate parameters
1783 */
1784 if (sig > ifmgd->rssi_max_thold &&
1785 (last_sig <= ifmgd->rssi_min_thold || last_sig == 0)) {
1786 ifmgd->last_ave_beacon_signal = sig;
1787 drv_rssi_callback(local, RSSI_EVENT_HIGH);
1788 } else if (sig < ifmgd->rssi_min_thold &&
1789 (last_sig >= ifmgd->rssi_max_thold ||
1790 last_sig == 0)) {
1791 ifmgd->last_ave_beacon_signal = sig;
1792 drv_rssi_callback(local, RSSI_EVENT_LOW);
1793 }
1794 }
1795
1758 if (bss_conf->cqm_rssi_thold && 1796 if (bss_conf->cqm_rssi_thold &&
1759 ifmgd->count_beacon_signal >= IEEE80211_SIGNAL_AVE_MIN_COUNT && 1797 ifmgd->count_beacon_signal >= IEEE80211_SIGNAL_AVE_MIN_COUNT &&
1760 !(local->hw.flags & IEEE80211_HW_SUPPORTS_CQM_RSSI)) { 1798 !(local->hw.flags & IEEE80211_HW_SUPPORTS_CQM_RSSI)) {
@@ -2014,7 +2052,7 @@ static void ieee80211_sta_timer(unsigned long data)
2014} 2052}
2015 2053
2016static void ieee80211_sta_connection_lost(struct ieee80211_sub_if_data *sdata, 2054static void ieee80211_sta_connection_lost(struct ieee80211_sub_if_data *sdata,
2017 u8 *bssid) 2055 u8 *bssid, u8 reason)
2018{ 2056{
2019 struct ieee80211_local *local = sdata->local; 2057 struct ieee80211_local *local = sdata->local;
2020 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 2058 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
@@ -2032,8 +2070,7 @@ static void ieee80211_sta_connection_lost(struct ieee80211_sub_if_data *sdata,
2032 * but that's not a problem. 2070 * but that's not a problem.
2033 */ 2071 */
2034 ieee80211_send_deauth_disassoc(sdata, bssid, 2072 ieee80211_send_deauth_disassoc(sdata, bssid,
2035 IEEE80211_STYPE_DEAUTH, 2073 IEEE80211_STYPE_DEAUTH, reason,
2036 WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY,
2037 NULL, true); 2074 NULL, true);
2038 mutex_lock(&ifmgd->mtx); 2075 mutex_lock(&ifmgd->mtx);
2039} 2076}
@@ -2079,7 +2116,8 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
2079 " AP %pM, disconnecting.\n", 2116 " AP %pM, disconnecting.\n",
2080 sdata->name, bssid); 2117 sdata->name, bssid);
2081#endif 2118#endif
2082 ieee80211_sta_connection_lost(sdata, bssid); 2119 ieee80211_sta_connection_lost(sdata, bssid,
2120 WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY);
2083 } 2121 }
2084 } else if (time_is_after_jiffies(ifmgd->probe_timeout)) 2122 } else if (time_is_after_jiffies(ifmgd->probe_timeout))
2085 run_again(ifmgd, ifmgd->probe_timeout); 2123 run_again(ifmgd, ifmgd->probe_timeout);
@@ -2091,7 +2129,8 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
2091 sdata->name, 2129 sdata->name,
2092 bssid, probe_wait_ms); 2130 bssid, probe_wait_ms);
2093#endif 2131#endif
2094 ieee80211_sta_connection_lost(sdata, bssid); 2132 ieee80211_sta_connection_lost(sdata, bssid,
2133 WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY);
2095 } else if (ifmgd->probe_send_count < max_tries) { 2134 } else if (ifmgd->probe_send_count < max_tries) {
2096#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 2135#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
2097 wiphy_debug(local->hw.wiphy, 2136 wiphy_debug(local->hw.wiphy,
@@ -2113,7 +2152,8 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
2113 sdata->name, 2152 sdata->name,
2114 bssid, probe_wait_ms); 2153 bssid, probe_wait_ms);
2115 2154
2116 ieee80211_sta_connection_lost(sdata, bssid); 2155 ieee80211_sta_connection_lost(sdata, bssid,
2156 WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY);
2117 } 2157 }
2118 } 2158 }
2119 2159
@@ -2200,12 +2240,34 @@ void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata)
2200{ 2240{
2201 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 2241 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
2202 2242
2243 if (!ifmgd->associated)
2244 return;
2245
2246 if (sdata->flags & IEEE80211_SDATA_DISCONNECT_RESUME) {
2247 sdata->flags &= ~IEEE80211_SDATA_DISCONNECT_RESUME;
2248 mutex_lock(&ifmgd->mtx);
2249 if (ifmgd->associated) {
2250#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
2251 wiphy_debug(sdata->local->hw.wiphy,
2252 "%s: driver requested disconnect after resume.\n",
2253 sdata->name);
2254#endif
2255 ieee80211_sta_connection_lost(sdata,
2256 ifmgd->associated->bssid,
2257 WLAN_REASON_UNSPECIFIED);
2258 mutex_unlock(&ifmgd->mtx);
2259 return;
2260 }
2261 mutex_unlock(&ifmgd->mtx);
2262 }
2263
2203 if (test_and_clear_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running)) 2264 if (test_and_clear_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running))
2204 add_timer(&ifmgd->timer); 2265 add_timer(&ifmgd->timer);
2205 if (test_and_clear_bit(TMR_RUNNING_CHANSW, &ifmgd->timers_running)) 2266 if (test_and_clear_bit(TMR_RUNNING_CHANSW, &ifmgd->timers_running))
2206 add_timer(&ifmgd->chswitch_timer); 2267 add_timer(&ifmgd->chswitch_timer);
2207 ieee80211_sta_reset_beacon_monitor(sdata); 2268 ieee80211_sta_reset_beacon_monitor(sdata);
2208 ieee80211_restart_sta_timer(sdata); 2269 ieee80211_restart_sta_timer(sdata);
2270 ieee80211_queue_work(&sdata->local->hw, &sdata->u.mgd.monitor_work);
2209} 2271}
2210#endif 2272#endif
2211 2273
@@ -2652,3 +2714,10 @@ void ieee80211_cqm_rssi_notify(struct ieee80211_vif *vif,
2652 cfg80211_cqm_rssi_notify(sdata->dev, rssi_event, gfp); 2714 cfg80211_cqm_rssi_notify(sdata->dev, rssi_event, gfp);
2653} 2715}
2654EXPORT_SYMBOL(ieee80211_cqm_rssi_notify); 2716EXPORT_SYMBOL(ieee80211_cqm_rssi_notify);
2717
2718unsigned char ieee80211_get_operstate(struct ieee80211_vif *vif)
2719{
2720 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
2721 return sdata->dev->operstate;
2722}
2723EXPORT_SYMBOL(ieee80211_get_operstate);
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index 730778a2c90..f87e993e713 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -6,6 +6,28 @@
6#include "driver-ops.h" 6#include "driver-ops.h"
7#include "led.h" 7#include "led.h"
8 8
9/* return value indicates whether the driver should be further notified */
10static bool ieee80211_quiesce(struct ieee80211_sub_if_data *sdata)
11{
12 switch (sdata->vif.type) {
13 case NL80211_IFTYPE_STATION:
14 ieee80211_sta_quiesce(sdata);
15 return true;
16 case NL80211_IFTYPE_ADHOC:
17 ieee80211_ibss_quiesce(sdata);
18 return true;
19 case NL80211_IFTYPE_MESH_POINT:
20 ieee80211_mesh_quiesce(sdata);
21 return true;
22 case NL80211_IFTYPE_AP_VLAN:
23 case NL80211_IFTYPE_MONITOR:
24 /* don't tell driver about this */
25 return false;
26 default:
27 return true;
28 }
29}
30
9int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) 31int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
10{ 32{
11 struct ieee80211_local *local = hw_to_local(hw); 33 struct ieee80211_local *local = hw_to_local(hw);
@@ -50,11 +72,19 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
50 local->wowlan = wowlan && local->open_count; 72 local->wowlan = wowlan && local->open_count;
51 if (local->wowlan) { 73 if (local->wowlan) {
52 int err = drv_suspend(local, wowlan); 74 int err = drv_suspend(local, wowlan);
53 if (err) { 75 if (err < 0) {
54 local->quiescing = false; 76 local->quiescing = false;
55 return err; 77 return err;
78 } else if (err > 0) {
79 WARN_ON(err != 1);
80 local->wowlan = false;
81 } else {
82 list_for_each_entry(sdata, &local->interfaces, list) {
83 cancel_work_sync(&sdata->work);
84 ieee80211_quiesce(sdata);
85 }
86 goto suspend;
56 } 87 }
57 goto suspend;
58 } 88 }
59 89
60 /* disable keys */ 90 /* disable keys */
@@ -82,23 +112,8 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
82 list_for_each_entry(sdata, &local->interfaces, list) { 112 list_for_each_entry(sdata, &local->interfaces, list) {
83 cancel_work_sync(&sdata->work); 113 cancel_work_sync(&sdata->work);
84 114
85 switch(sdata->vif.type) { 115 if (!ieee80211_quiesce(sdata))
86 case NL80211_IFTYPE_STATION:
87 ieee80211_sta_quiesce(sdata);
88 break;
89 case NL80211_IFTYPE_ADHOC:
90 ieee80211_ibss_quiesce(sdata);
91 break;
92 case NL80211_IFTYPE_MESH_POINT:
93 ieee80211_mesh_quiesce(sdata);
94 break;
95 case NL80211_IFTYPE_AP_VLAN:
96 case NL80211_IFTYPE_MONITOR:
97 /* don't tell driver about this */
98 continue; 116 continue;
99 default:
100 break;
101 }
102 117
103 if (!ieee80211_sdata_running(sdata)) 118 if (!ieee80211_sdata_running(sdata))
104 continue; 119 continue;
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c
index 8adac67395f..58a89554b78 100644
--- a/net/mac80211/rc80211_minstrel.c
+++ b/net/mac80211/rc80211_minstrel.c
@@ -532,12 +532,21 @@ minstrel_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
532 mp->hw = hw; 532 mp->hw = hw;
533 mp->update_interval = 100; 533 mp->update_interval = 100;
534 534
535#ifdef CONFIG_MAC80211_DEBUGFS
536 mp->fixed_rate_idx = (u32) -1;
537 mp->dbg_fixed_rate = debugfs_create_u32("fixed_rate_idx",
538 S_IRUGO | S_IWUGO, debugfsdir, &mp->fixed_rate_idx);
539#endif
540
535 return mp; 541 return mp;
536} 542}
537 543
538static void 544static void
539minstrel_free(void *priv) 545minstrel_free(void *priv)
540{ 546{
547#ifdef CONFIG_MAC80211_DEBUGFS
548 debugfs_remove(((struct minstrel_priv *)priv)->dbg_fixed_rate);
549#endif
541 kfree(priv); 550 kfree(priv);
542} 551}
543 552
diff --git a/net/mac80211/rc80211_minstrel.h b/net/mac80211/rc80211_minstrel.h
index 0f5a83370aa..5d278eccaef 100644
--- a/net/mac80211/rc80211_minstrel.h
+++ b/net/mac80211/rc80211_minstrel.h
@@ -78,6 +78,18 @@ struct minstrel_priv {
78 unsigned int update_interval; 78 unsigned int update_interval;
79 unsigned int lookaround_rate; 79 unsigned int lookaround_rate;
80 unsigned int lookaround_rate_mrr; 80 unsigned int lookaround_rate_mrr;
81
82#ifdef CONFIG_MAC80211_DEBUGFS
83 /*
84 * enable fixed rate processing per RC
85 * - write static index to debugfs:ieee80211/phyX/rc/fixed_rate_idx
86 * - write -1 to enable RC processing again
87 * - setting will be applied on next update
88 */
89 u32 fixed_rate_idx;
90 struct dentry *dbg_fixed_rate;
91#endif
92
81}; 93};
82 94
83struct minstrel_debugfs_info { 95struct minstrel_debugfs_info {
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index 333b5118be6..66a1eeb279c 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -609,6 +609,13 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
609 609
610 info->flags |= mi->tx_flags; 610 info->flags |= mi->tx_flags;
611 sample_idx = minstrel_get_sample_rate(mp, mi); 611 sample_idx = minstrel_get_sample_rate(mp, mi);
612
613#ifdef CONFIG_MAC80211_DEBUGFS
614 /* use fixed index if set */
615 if (mp->fixed_rate_idx != -1)
616 sample_idx = mp->fixed_rate_idx;
617#endif
618
612 if (sample_idx >= 0) { 619 if (sample_idx >= 0) {
613 sample = true; 620 sample = true;
614 minstrel_ht_set_rate(mp, mi, &ar[0], sample_idx, 621 minstrel_ht_set_rate(mp, mi, &ar[0], sample_idx,
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 7fa8c6be7bf..fe2c2a71779 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -331,15 +331,18 @@ static void ieee80211_parse_qos(struct ieee80211_rx_data *rx)
331{ 331{
332 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 332 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
333 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); 333 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
334 int tid; 334 int tid, seqno_idx, security_idx;
335 335
336 /* does the frame have a qos control field? */ 336 /* does the frame have a qos control field? */
337 if (ieee80211_is_data_qos(hdr->frame_control)) { 337 if (ieee80211_is_data_qos(hdr->frame_control)) {
338 u8 *qc = ieee80211_get_qos_ctl(hdr); 338 u8 *qc = ieee80211_get_qos_ctl(hdr);
339 /* frame has qos control */ 339 /* frame has qos control */
340 tid = *qc & IEEE80211_QOS_CTL_TID_MASK; 340 tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
341 if (*qc & IEEE80211_QOS_CONTROL_A_MSDU_PRESENT) 341 if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT)
342 status->rx_flags |= IEEE80211_RX_AMSDU; 342 status->rx_flags |= IEEE80211_RX_AMSDU;
343
344 seqno_idx = tid;
345 security_idx = tid;
343 } else { 346 } else {
344 /* 347 /*
345 * IEEE 802.11-2007, 7.1.3.4.1 ("Sequence Number field"): 348 * IEEE 802.11-2007, 7.1.3.4.1 ("Sequence Number field"):
@@ -352,10 +355,15 @@ static void ieee80211_parse_qos(struct ieee80211_rx_data *rx)
352 * 355 *
353 * We also use that counter for non-QoS STAs. 356 * We also use that counter for non-QoS STAs.
354 */ 357 */
355 tid = NUM_RX_DATA_QUEUES - 1; 358 seqno_idx = NUM_RX_DATA_QUEUES;
359 security_idx = 0;
360 if (ieee80211_is_mgmt(hdr->frame_control))
361 security_idx = NUM_RX_DATA_QUEUES;
362 tid = 0;
356 } 363 }
357 364
358 rx->queue = tid; 365 rx->seqno_idx = seqno_idx;
366 rx->security_idx = security_idx;
359 /* Set skb->priority to 1d tag if highest order bit of TID is not set. 367 /* Set skb->priority to 1d tag if highest order bit of TID is not set.
360 * For now, set skb->priority to 0 for other cases. */ 368 * For now, set skb->priority to 0 for other cases. */
361 rx->skb->priority = (tid > 7) ? 0 : tid; 369 rx->skb->priority = (tid > 7) ? 0 : tid;
@@ -810,7 +818,7 @@ ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
810 /* Drop duplicate 802.11 retransmissions (IEEE 802.11 Chap. 9.2.9) */ 818 /* Drop duplicate 802.11 retransmissions (IEEE 802.11 Chap. 9.2.9) */
811 if (rx->sta && !is_multicast_ether_addr(hdr->addr1)) { 819 if (rx->sta && !is_multicast_ether_addr(hdr->addr1)) {
812 if (unlikely(ieee80211_has_retry(hdr->frame_control) && 820 if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
813 rx->sta->last_seq_ctrl[rx->queue] == 821 rx->sta->last_seq_ctrl[rx->seqno_idx] ==
814 hdr->seq_ctrl)) { 822 hdr->seq_ctrl)) {
815 if (status->rx_flags & IEEE80211_RX_RA_MATCH) { 823 if (status->rx_flags & IEEE80211_RX_RA_MATCH) {
816 rx->local->dot11FrameDuplicateCount++; 824 rx->local->dot11FrameDuplicateCount++;
@@ -818,7 +826,7 @@ ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
818 } 826 }
819 return RX_DROP_UNUSABLE; 827 return RX_DROP_UNUSABLE;
820 } else 828 } else
821 rx->sta->last_seq_ctrl[rx->queue] = hdr->seq_ctrl; 829 rx->sta->last_seq_ctrl[rx->seqno_idx] = hdr->seq_ctrl;
822 } 830 }
823 831
824 if (unlikely(rx->skb->len < 16)) { 832 if (unlikely(rx->skb->len < 16)) {
@@ -1011,6 +1019,9 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
1011 } 1019 }
1012 1020
1013 if (rx->key) { 1021 if (rx->key) {
1022 if (unlikely(rx->key->flags & KEY_FLAG_TAINTED))
1023 return RX_DROP_MONITOR;
1024
1014 rx->key->tx_rx_count++; 1025 rx->key->tx_rx_count++;
1015 /* TODO: add threshold stuff again */ 1026 /* TODO: add threshold stuff again */
1016 } else { 1027 } else {
@@ -1374,11 +1385,10 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
1374 if (frag == 0) { 1385 if (frag == 0) {
1375 /* This is the first fragment of a new frame. */ 1386 /* This is the first fragment of a new frame. */
1376 entry = ieee80211_reassemble_add(rx->sdata, frag, seq, 1387 entry = ieee80211_reassemble_add(rx->sdata, frag, seq,
1377 rx->queue, &(rx->skb)); 1388 rx->seqno_idx, &(rx->skb));
1378 if (rx->key && rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP && 1389 if (rx->key && rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP &&
1379 ieee80211_has_protected(fc)) { 1390 ieee80211_has_protected(fc)) {
1380 int queue = ieee80211_is_mgmt(fc) ? 1391 int queue = rx->security_idx;
1381 NUM_RX_DATA_QUEUES : rx->queue;
1382 /* Store CCMP PN so that we can verify that the next 1392 /* Store CCMP PN so that we can verify that the next
1383 * fragment has a sequential PN value. */ 1393 * fragment has a sequential PN value. */
1384 entry->ccmp = 1; 1394 entry->ccmp = 1;
@@ -1392,7 +1402,8 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
1392 /* This is a fragment for a frame that should already be pending in 1402 /* This is a fragment for a frame that should already be pending in
1393 * fragment cache. Add this fragment to the end of the pending entry. 1403 * fragment cache. Add this fragment to the end of the pending entry.
1394 */ 1404 */
1395 entry = ieee80211_reassemble_find(rx->sdata, frag, seq, rx->queue, hdr); 1405 entry = ieee80211_reassemble_find(rx->sdata, frag, seq,
1406 rx->seqno_idx, hdr);
1396 if (!entry) { 1407 if (!entry) {
1397 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag); 1408 I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
1398 return RX_DROP_MONITOR; 1409 return RX_DROP_MONITOR;
@@ -1412,8 +1423,7 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
1412 if (pn[i]) 1423 if (pn[i])
1413 break; 1424 break;
1414 } 1425 }
1415 queue = ieee80211_is_mgmt(fc) ? 1426 queue = rx->security_idx;
1416 NUM_RX_DATA_QUEUES : rx->queue;
1417 rpn = rx->key->u.ccmp.rx_pn[queue]; 1427 rpn = rx->key->u.ccmp.rx_pn[queue];
1418 if (memcmp(pn, rpn, CCMP_PN_LEN)) 1428 if (memcmp(pn, rpn, CCMP_PN_LEN))
1419 return RX_DROP_UNUSABLE; 1429 return RX_DROP_UNUSABLE;
@@ -2590,7 +2600,9 @@ void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
2590 .sta = sta, 2600 .sta = sta,
2591 .sdata = sta->sdata, 2601 .sdata = sta->sdata,
2592 .local = sta->local, 2602 .local = sta->local,
2593 .queue = tid, 2603 /* This is OK -- must be QoS data frame */
2604 .security_idx = tid,
2605 .seqno_idx = tid,
2594 .flags = 0, 2606 .flags = 0,
2595 }; 2607 };
2596 struct tid_ampdu_rx *tid_agg_rx; 2608 struct tid_ampdu_rx *tid_agg_rx;
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 58ffa7d069c..08a45ac3d6f 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -228,6 +228,7 @@ ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
228static bool ieee80211_prep_hw_scan(struct ieee80211_local *local) 228static bool ieee80211_prep_hw_scan(struct ieee80211_local *local)
229{ 229{
230 struct cfg80211_scan_request *req = local->scan_req; 230 struct cfg80211_scan_request *req = local->scan_req;
231 struct ieee80211_sub_if_data *sdata = local->scan_sdata;
231 enum ieee80211_band band; 232 enum ieee80211_band band;
232 int i, ielen, n_chans; 233 int i, ielen, n_chans;
233 234
@@ -251,8 +252,8 @@ static bool ieee80211_prep_hw_scan(struct ieee80211_local *local)
251 local->hw_scan_req->n_channels = n_chans; 252 local->hw_scan_req->n_channels = n_chans;
252 253
253 ielen = ieee80211_build_preq_ies(local, (u8 *)local->hw_scan_req->ie, 254 ielen = ieee80211_build_preq_ies(local, (u8 *)local->hw_scan_req->ie,
254 req->ie, req->ie_len, band, (u32) -1, 255 req->ie, req->ie_len, band,
255 0); 256 sdata->rc_rateidx_mask[band], 0);
256 local->hw_scan_req->ie_len = ielen; 257 local->hw_scan_req->ie_len = ielen;
257 258
258 return true; 259 return true;
@@ -658,7 +659,8 @@ static void ieee80211_scan_state_send_probe(struct ieee80211_local *local,
658 sdata, NULL, 659 sdata, NULL,
659 local->scan_req->ssids[i].ssid, 660 local->scan_req->ssids[i].ssid,
660 local->scan_req->ssids[i].ssid_len, 661 local->scan_req->ssids[i].ssid_len,
661 local->scan_req->ie, local->scan_req->ie_len); 662 local->scan_req->ie, local->scan_req->ie_len,
663 false);
662 664
663 /* 665 /*
664 * After sending probe requests, wait for probe responses 666 * After sending probe requests, wait for probe responses
@@ -821,10 +823,8 @@ int ieee80211_request_internal_scan(struct ieee80211_sub_if_data *sdata,
821 */ 823 */
822void ieee80211_scan_cancel(struct ieee80211_local *local) 824void ieee80211_scan_cancel(struct ieee80211_local *local)
823{ 825{
824 bool abortscan;
825
826 /* 826 /*
827 * We are only canceling software scan, or deferred scan that was not 827 * We are canceling software scan, or deferred scan that was not
828 * yet really started (see __ieee80211_start_scan ). 828 * yet really started (see __ieee80211_start_scan ).
829 * 829 *
830 * Regarding hardware scan: 830 * Regarding hardware scan:
@@ -836,23 +836,30 @@ void ieee80211_scan_cancel(struct ieee80211_local *local)
836 * - we can not cancel scan_work since driver can schedule it 836 * - we can not cancel scan_work since driver can schedule it
837 * by ieee80211_scan_completed(..., true) to finish scan 837 * by ieee80211_scan_completed(..., true) to finish scan
838 * 838 *
839 * Hence low lever driver is responsible for canceling HW scan. 839 * Hence we only call the cancel_hw_scan() callback, but the low-level
840 * driver is still responsible for calling ieee80211_scan_completed()
841 * after the scan was completed/aborted.
840 */ 842 */
841 843
842 mutex_lock(&local->mtx); 844 mutex_lock(&local->mtx);
843 abortscan = local->scan_req && !test_bit(SCAN_HW_SCANNING, &local->scanning); 845 if (!local->scan_req)
844 if (abortscan) { 846 goto out;
845 /* 847
846 * The scan is canceled, but stop work from being pending. 848 if (test_bit(SCAN_HW_SCANNING, &local->scanning)) {
847 * 849 if (local->ops->cancel_hw_scan)
848 * If the work is currently running, it must be blocked on 850 drv_cancel_hw_scan(local, local->scan_sdata);
849 * the mutex, but we'll set scan_sdata = NULL and it'll 851 goto out;
850 * simply exit once it acquires the mutex.
851 */
852 cancel_delayed_work(&local->scan_work);
853 /* and clean up */
854 __ieee80211_scan_completed(&local->hw, true, false);
855 } 852 }
853
854 /*
855 * If the work is currently running, it must be blocked on
856 * the mutex, but we'll set scan_sdata = NULL and it'll
857 * simply exit once it acquires the mutex.
858 */
859 cancel_delayed_work(&local->scan_work);
860 /* and clean up */
861 __ieee80211_scan_completed(&local->hw, true, false);
862out:
856 mutex_unlock(&local->mtx); 863 mutex_unlock(&local->mtx);
857} 864}
858 865
@@ -877,7 +884,8 @@ int ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata,
877 for (i = 0; i < IEEE80211_NUM_BANDS; i++) { 884 for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
878 local->sched_scan_ies.ie[i] = kzalloc(2 + 885 local->sched_scan_ies.ie[i] = kzalloc(2 +
879 IEEE80211_MAX_SSID_LEN + 886 IEEE80211_MAX_SSID_LEN +
880 local->scan_ies_len, 887 local->scan_ies_len +
888 req->ie_len,
881 GFP_KERNEL); 889 GFP_KERNEL);
882 if (!local->sched_scan_ies.ie[i]) { 890 if (!local->sched_scan_ies.ie[i]) {
883 ret = -ENOMEM; 891 ret = -ENOMEM;
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index c6ae8718bd5..28beb78e601 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -158,6 +158,8 @@ struct tid_ampdu_rx {
158 * @work: work struct for starting/stopping aggregation 158 * @work: work struct for starting/stopping aggregation
159 * @tid_rx_timer_expired: bitmap indicating on which TIDs the 159 * @tid_rx_timer_expired: bitmap indicating on which TIDs the
160 * RX timer expired until the work for it runs 160 * RX timer expired until the work for it runs
161 * @tid_rx_stop_requested: bitmap indicating which BA sessions per TID the
162 * driver requested to close until the work for it runs
161 * @mtx: mutex to protect all TX data (except non-NULL assignments 163 * @mtx: mutex to protect all TX data (except non-NULL assignments
162 * to tid_tx[idx], which are protected by the sta spinlock) 164 * to tid_tx[idx], which are protected by the sta spinlock)
163 */ 165 */
@@ -166,6 +168,7 @@ struct sta_ampdu_mlme {
166 /* rx */ 168 /* rx */
167 struct tid_ampdu_rx __rcu *tid_rx[STA_TID_NUM]; 169 struct tid_ampdu_rx __rcu *tid_rx[STA_TID_NUM];
168 unsigned long tid_rx_timer_expired[BITS_TO_LONGS(STA_TID_NUM)]; 170 unsigned long tid_rx_timer_expired[BITS_TO_LONGS(STA_TID_NUM)];
171 unsigned long tid_rx_stop_requested[BITS_TO_LONGS(STA_TID_NUM)];
169 /* tx */ 172 /* tx */
170 struct work_struct work; 173 struct work_struct work;
171 struct tid_ampdu_tx __rcu *tid_tx[STA_TID_NUM]; 174 struct tid_ampdu_tx __rcu *tid_tx[STA_TID_NUM];
@@ -284,7 +287,8 @@ struct sta_info {
284 unsigned long rx_dropped; 287 unsigned long rx_dropped;
285 int last_signal; 288 int last_signal;
286 struct ewma avg_signal; 289 struct ewma avg_signal;
287 __le16 last_seq_ctrl[NUM_RX_DATA_QUEUES]; 290 /* Plus 1 for non-QoS frames */
291 __le16 last_seq_ctrl[NUM_RX_DATA_QUEUES + 1];
288 292
289 /* Updated from TX status path only, no locking requirements */ 293 /* Updated from TX status path only, no locking requirements */
290 unsigned long tx_filtered_count; 294 unsigned long tx_filtered_count;
diff --git a/net/mac80211/tkip.c b/net/mac80211/tkip.c
index 757e4eb2baf..cc79e697cdb 100644
--- a/net/mac80211/tkip.c
+++ b/net/mac80211/tkip.c
@@ -101,6 +101,7 @@ static void tkip_mixing_phase1(const u8 *tk, struct tkip_ctx *ctx,
101 p1k[4] += tkipS(p1k[3] ^ get_unaligned_le16(tk + 0 + j)) + i; 101 p1k[4] += tkipS(p1k[3] ^ get_unaligned_le16(tk + 0 + j)) + i;
102 } 102 }
103 ctx->state = TKIP_STATE_PHASE1_DONE; 103 ctx->state = TKIP_STATE_PHASE1_DONE;
104 ctx->p1k_iv32 = tsc_IV32;
104} 105}
105 106
106static void tkip_mixing_phase2(const u8 *tk, struct tkip_ctx *ctx, 107static void tkip_mixing_phase2(const u8 *tk, struct tkip_ctx *ctx,
@@ -140,60 +141,69 @@ static void tkip_mixing_phase2(const u8 *tk, struct tkip_ctx *ctx,
140/* Add TKIP IV and Ext. IV at @pos. @iv0, @iv1, and @iv2 are the first octets 141/* Add TKIP IV and Ext. IV at @pos. @iv0, @iv1, and @iv2 are the first octets
141 * of the IV. Returns pointer to the octet following IVs (i.e., beginning of 142 * of the IV. Returns pointer to the octet following IVs (i.e., beginning of
142 * the packet payload). */ 143 * the packet payload). */
143u8 *ieee80211_tkip_add_iv(u8 *pos, struct ieee80211_key *key, u16 iv16) 144u8 *ieee80211_tkip_add_iv(u8 *pos, struct ieee80211_key *key)
144{ 145{
145 pos = write_tkip_iv(pos, iv16); 146 lockdep_assert_held(&key->u.tkip.txlock);
147
148 pos = write_tkip_iv(pos, key->u.tkip.tx.iv16);
146 *pos++ = (key->conf.keyidx << 6) | (1 << 5) /* Ext IV */; 149 *pos++ = (key->conf.keyidx << 6) | (1 << 5) /* Ext IV */;
147 put_unaligned_le32(key->u.tkip.tx.iv32, pos); 150 put_unaligned_le32(key->u.tkip.tx.iv32, pos);
148 return pos + 4; 151 return pos + 4;
149} 152}
150 153
151void ieee80211_get_tkip_key(struct ieee80211_key_conf *keyconf, 154static void ieee80211_compute_tkip_p1k(struct ieee80211_key *key, u32 iv32)
152 struct sk_buff *skb, enum ieee80211_tkip_key_type type,
153 u8 *outkey)
154{ 155{
155 struct ieee80211_key *key = (struct ieee80211_key *) 156 struct ieee80211_sub_if_data *sdata = key->sdata;
156 container_of(keyconf, struct ieee80211_key, conf); 157 struct tkip_ctx *ctx = &key->u.tkip.tx;
157 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 158 const u8 *tk = &key->conf.key[NL80211_TKIP_DATA_OFFSET_ENCR_KEY];
158 u8 *data;
159 const u8 *tk;
160 struct tkip_ctx *ctx;
161 u16 iv16;
162 u32 iv32;
163
164 data = (u8 *)hdr + ieee80211_hdrlen(hdr->frame_control);
165 iv16 = data[2] | (data[0] << 8);
166 iv32 = get_unaligned_le32(&data[4]);
167
168 tk = &key->conf.key[NL80211_TKIP_DATA_OFFSET_ENCR_KEY];
169 ctx = &key->u.tkip.tx;
170 159
171#ifdef CONFIG_MAC80211_TKIP_DEBUG 160 lockdep_assert_held(&key->u.tkip.txlock);
172 printk(KERN_DEBUG "TKIP encrypt: iv16 = 0x%04x, iv32 = 0x%08x\n", 161
173 iv16, iv32); 162 /*
174 163 * Update the P1K when the IV32 is different from the value it
175 if (iv32 != ctx->iv32) { 164 * had when we last computed it (or when not initialised yet).
176 printk(KERN_DEBUG "skb: iv32 = 0x%08x key: iv32 = 0x%08x\n", 165 * This might flip-flop back and forth if packets are processed
177 iv32, ctx->iv32); 166 * out-of-order due to the different ACs, but then we have to
178 printk(KERN_DEBUG "Wrap around of iv16 in the middle of a " 167 * just compute the P1K more often.
179 "fragmented packet\n"); 168 */
180 } 169 if (ctx->p1k_iv32 != iv32 || ctx->state == TKIP_STATE_NOT_INIT)
181#endif 170 tkip_mixing_phase1(tk, ctx, sdata->vif.addr, iv32);
171}
182 172
183 /* Update the p1k only when the iv16 in the packet wraps around, this 173void ieee80211_get_tkip_p1k_iv(struct ieee80211_key_conf *keyconf,
184 * might occur after the wrap around of iv16 in the key in case of 174 u32 iv32, u16 *p1k)
185 * fragmented packets. */ 175{
186 if (iv16 == 0 || ctx->state == TKIP_STATE_NOT_INIT) 176 struct ieee80211_key *key = (struct ieee80211_key *)
187 tkip_mixing_phase1(tk, ctx, hdr->addr2, iv32); 177 container_of(keyconf, struct ieee80211_key, conf);
178 struct tkip_ctx *ctx = &key->u.tkip.tx;
179 unsigned long flags;
188 180
189 if (type == IEEE80211_TKIP_P1_KEY) { 181 spin_lock_irqsave(&key->u.tkip.txlock, flags);
190 memcpy(outkey, ctx->p1k, sizeof(u16) * 5); 182 ieee80211_compute_tkip_p1k(key, iv32);
191 return; 183 memcpy(p1k, ctx->p1k, sizeof(ctx->p1k));
192 } 184 spin_unlock_irqrestore(&key->u.tkip.txlock, flags);
185}
186EXPORT_SYMBOL(ieee80211_get_tkip_p1k_iv);
193 187
194 tkip_mixing_phase2(tk, ctx, iv16, outkey); 188void ieee80211_get_tkip_p2k(struct ieee80211_key_conf *keyconf,
189 struct sk_buff *skb, u8 *p2k)
190{
191 struct ieee80211_key *key = (struct ieee80211_key *)
192 container_of(keyconf, struct ieee80211_key, conf);
193 const u8 *tk = &key->conf.key[NL80211_TKIP_DATA_OFFSET_ENCR_KEY];
194 struct tkip_ctx *ctx = &key->u.tkip.tx;
195 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
196 const u8 *data = (u8 *)hdr + ieee80211_hdrlen(hdr->frame_control);
197 u32 iv32 = get_unaligned_le32(&data[4]);
198 u16 iv16 = data[2] | (data[0] << 8);
199 unsigned long flags;
200
201 spin_lock_irqsave(&key->u.tkip.txlock, flags);
202 ieee80211_compute_tkip_p1k(key, iv32);
203 tkip_mixing_phase2(tk, ctx, iv16, p2k);
204 spin_unlock_irqrestore(&key->u.tkip.txlock, flags);
195} 205}
196EXPORT_SYMBOL(ieee80211_get_tkip_key); 206EXPORT_SYMBOL(ieee80211_get_tkip_p2k);
197 207
198/* 208/*
199 * Encrypt packet payload with TKIP using @key. @pos is a pointer to the 209 * Encrypt packet payload with TKIP using @key. @pos is a pointer to the
@@ -204,19 +214,15 @@ EXPORT_SYMBOL(ieee80211_get_tkip_key);
204 */ 214 */
205int ieee80211_tkip_encrypt_data(struct crypto_cipher *tfm, 215int ieee80211_tkip_encrypt_data(struct crypto_cipher *tfm,
206 struct ieee80211_key *key, 216 struct ieee80211_key *key,
207 u8 *pos, size_t payload_len, u8 *ta) 217 struct sk_buff *skb,
218 u8 *payload, size_t payload_len)
208{ 219{
209 u8 rc4key[16]; 220 u8 rc4key[16];
210 struct tkip_ctx *ctx = &key->u.tkip.tx;
211 const u8 *tk = &key->conf.key[NL80211_TKIP_DATA_OFFSET_ENCR_KEY];
212
213 /* Calculate per-packet key */
214 if (ctx->iv16 == 0 || ctx->state == TKIP_STATE_NOT_INIT)
215 tkip_mixing_phase1(tk, ctx, ta, ctx->iv32);
216 221
217 tkip_mixing_phase2(tk, ctx, ctx->iv16, rc4key); 222 ieee80211_get_tkip_p2k(&key->conf, skb, rc4key);
218 223
219 return ieee80211_wep_encrypt_data(tfm, rc4key, 16, pos, payload_len); 224 return ieee80211_wep_encrypt_data(tfm, rc4key, 16,
225 payload, payload_len);
220} 226}
221 227
222/* Decrypt packet payload with TKIP using @key. @pos is a pointer to the 228/* Decrypt packet payload with TKIP using @key. @pos is a pointer to the
diff --git a/net/mac80211/tkip.h b/net/mac80211/tkip.h
index 1cab9c86978..e3ecb659b90 100644
--- a/net/mac80211/tkip.h
+++ b/net/mac80211/tkip.h
@@ -13,11 +13,13 @@
13#include <linux/crypto.h> 13#include <linux/crypto.h>
14#include "key.h" 14#include "key.h"
15 15
16u8 *ieee80211_tkip_add_iv(u8 *pos, struct ieee80211_key *key, u16 iv16); 16u8 *ieee80211_tkip_add_iv(u8 *pos, struct ieee80211_key *key);
17 17
18int ieee80211_tkip_encrypt_data(struct crypto_cipher *tfm, 18int ieee80211_tkip_encrypt_data(struct crypto_cipher *tfm,
19 struct ieee80211_key *key, 19 struct ieee80211_key *key,
20 u8 *pos, size_t payload_len, u8 *ta); 20 struct sk_buff *skb,
21 u8 *payload, size_t payload_len);
22
21enum { 23enum {
22 TKIP_DECRYPT_OK = 0, 24 TKIP_DECRYPT_OK = 0,
23 TKIP_DECRYPT_NO_EXT_IV = -1, 25 TKIP_DECRYPT_NO_EXT_IV = -1,
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 3104c844b54..8cb0d2d0ac6 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -589,6 +589,9 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
589 break; 589 break;
590 } 590 }
591 591
592 if (unlikely(tx->key && tx->key->flags & KEY_FLAG_TAINTED))
593 return TX_DROP;
594
592 if (!skip_hw && tx->key && 595 if (!skip_hw && tx->key &&
593 tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) 596 tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)
594 info->control.hw_key = &tx->key->conf; 597 info->control.hw_key = &tx->key->conf;
@@ -1474,18 +1477,14 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata,
1474 1477
1475/* device xmit handlers */ 1478/* device xmit handlers */
1476 1479
1477static int ieee80211_skb_resize(struct ieee80211_local *local, 1480static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata,
1478 struct sk_buff *skb, 1481 struct sk_buff *skb,
1479 int head_need, bool may_encrypt) 1482 int head_need, bool may_encrypt)
1480{ 1483{
1484 struct ieee80211_local *local = sdata->local;
1481 int tail_need = 0; 1485 int tail_need = 0;
1482 1486
1483 /* 1487 if (may_encrypt && sdata->crypto_tx_tailroom_needed_cnt) {
1484 * This could be optimised, devices that do full hardware
1485 * crypto (including TKIP MMIC) need no tailroom... But we
1486 * have no drivers for such devices currently.
1487 */
1488 if (may_encrypt) {
1489 tail_need = IEEE80211_ENCRYPT_TAILROOM; 1488 tail_need = IEEE80211_ENCRYPT_TAILROOM;
1490 tail_need -= skb_tailroom(skb); 1489 tail_need -= skb_tailroom(skb);
1491 tail_need = max_t(int, tail_need, 0); 1490 tail_need = max_t(int, tail_need, 0);
@@ -1578,7 +1577,7 @@ static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
1578 headroom -= skb_headroom(skb); 1577 headroom -= skb_headroom(skb);
1579 headroom = max_t(int, 0, headroom); 1578 headroom = max_t(int, 0, headroom);
1580 1579
1581 if (ieee80211_skb_resize(local, skb, headroom, may_encrypt)) { 1580 if (ieee80211_skb_resize(sdata, skb, headroom, may_encrypt)) {
1582 dev_kfree_skb(skb); 1581 dev_kfree_skb(skb);
1583 rcu_read_unlock(); 1582 rcu_read_unlock();
1584 return; 1583 return;
@@ -1945,7 +1944,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1945 head_need += IEEE80211_ENCRYPT_HEADROOM; 1944 head_need += IEEE80211_ENCRYPT_HEADROOM;
1946 head_need += local->tx_headroom; 1945 head_need += local->tx_headroom;
1947 head_need = max_t(int, 0, head_need); 1946 head_need = max_t(int, 0, head_need);
1948 if (ieee80211_skb_resize(local, skb, head_need, true)) 1947 if (ieee80211_skb_resize(sdata, skb, head_need, true))
1949 goto fail; 1948 goto fail;
1950 } 1949 }
1951 1950
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index d3fe2d23748..5bfb80cba63 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -1018,7 +1018,8 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
1018struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata, 1018struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
1019 u8 *dst, 1019 u8 *dst,
1020 const u8 *ssid, size_t ssid_len, 1020 const u8 *ssid, size_t ssid_len,
1021 const u8 *ie, size_t ie_len) 1021 const u8 *ie, size_t ie_len,
1022 bool directed)
1022{ 1023{
1023 struct ieee80211_local *local = sdata->local; 1024 struct ieee80211_local *local = sdata->local;
1024 struct sk_buff *skb; 1025 struct sk_buff *skb;
@@ -1035,8 +1036,16 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
1035 return NULL; 1036 return NULL;
1036 } 1037 }
1037 1038
1038 chan = ieee80211_frequency_to_channel( 1039 /*
1039 local->hw.conf.channel->center_freq); 1040 * Do not send DS Channel parameter for directed probe requests
1041 * in order to maximize the chance that we get a response. Some
1042 * badly-behaved APs don't respond when this parameter is included.
1043 */
1044 if (directed)
1045 chan = 0;
1046 else
1047 chan = ieee80211_frequency_to_channel(
1048 local->hw.conf.channel->center_freq);
1040 1049
1041 buf_len = ieee80211_build_preq_ies(local, buf, ie, ie_len, 1050 buf_len = ieee80211_build_preq_ies(local, buf, ie, ie_len,
1042 local->hw.conf.channel->band, 1051 local->hw.conf.channel->band,
@@ -1062,11 +1071,13 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
1062 1071
1063void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst, 1072void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
1064 const u8 *ssid, size_t ssid_len, 1073 const u8 *ssid, size_t ssid_len,
1065 const u8 *ie, size_t ie_len) 1074 const u8 *ie, size_t ie_len,
1075 bool directed)
1066{ 1076{
1067 struct sk_buff *skb; 1077 struct sk_buff *skb;
1068 1078
1069 skb = ieee80211_build_probe_req(sdata, dst, ssid, ssid_len, ie, ie_len); 1079 skb = ieee80211_build_probe_req(sdata, dst, ssid, ssid_len, ie, ie_len,
1080 directed);
1070 if (skb) 1081 if (skb)
1071 ieee80211_tx_skb(sdata, skb); 1082 ieee80211_tx_skb(sdata, skb);
1072} 1083}
@@ -1276,7 +1287,9 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1276 if (ieee80211_sdata_running(sdata)) 1287 if (ieee80211_sdata_running(sdata))
1277 ieee80211_enable_keys(sdata); 1288 ieee80211_enable_keys(sdata);
1278 1289
1290#ifdef CONFIG_PM
1279 wake_up: 1291 wake_up:
1292#endif
1280 ieee80211_wake_queues_by_reason(hw, 1293 ieee80211_wake_queues_by_reason(hw,
1281 IEEE80211_QUEUE_STOP_REASON_SUSPEND); 1294 IEEE80211_QUEUE_STOP_REASON_SUSPEND);
1282 1295
@@ -1321,6 +1334,33 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1321 return 0; 1334 return 0;
1322} 1335}
1323 1336
1337void ieee80211_resume_disconnect(struct ieee80211_vif *vif)
1338{
1339 struct ieee80211_sub_if_data *sdata;
1340 struct ieee80211_local *local;
1341 struct ieee80211_key *key;
1342
1343 if (WARN_ON(!vif))
1344 return;
1345
1346 sdata = vif_to_sdata(vif);
1347 local = sdata->local;
1348
1349 if (WARN_ON(!local->resuming))
1350 return;
1351
1352 if (WARN_ON(vif->type != NL80211_IFTYPE_STATION))
1353 return;
1354
1355 sdata->flags |= IEEE80211_SDATA_DISCONNECT_RESUME;
1356
1357 mutex_lock(&local->key_mtx);
1358 list_for_each_entry(key, &sdata->key_list, list)
1359 key->flags |= KEY_FLAG_TAINTED;
1360 mutex_unlock(&local->key_mtx);
1361}
1362EXPORT_SYMBOL_GPL(ieee80211_resume_disconnect);
1363
1324static int check_mgd_smps(struct ieee80211_if_managed *ifmgd, 1364static int check_mgd_smps(struct ieee80211_if_managed *ifmgd,
1325 enum ieee80211_smps_mode *smps_mode) 1365 enum ieee80211_smps_mode *smps_mode)
1326{ 1366{
@@ -1437,3 +1477,43 @@ size_t ieee80211_ie_split_vendor(const u8 *ies, size_t ielen, size_t offset)
1437 1477
1438 return pos; 1478 return pos;
1439} 1479}
1480
1481static void _ieee80211_enable_rssi_reports(struct ieee80211_sub_if_data *sdata,
1482 int rssi_min_thold,
1483 int rssi_max_thold)
1484{
1485 trace_api_enable_rssi_reports(sdata, rssi_min_thold, rssi_max_thold);
1486
1487 if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION))
1488 return;
1489
1490 /*
1491 * Scale up threshold values before storing it, as the RSSI averaging
1492 * algorithm uses a scaled up value as well. Change this scaling
1493 * factor if the RSSI averaging algorithm changes.
1494 */
1495 sdata->u.mgd.rssi_min_thold = rssi_min_thold*16;
1496 sdata->u.mgd.rssi_max_thold = rssi_max_thold*16;
1497}
1498
1499void ieee80211_enable_rssi_reports(struct ieee80211_vif *vif,
1500 int rssi_min_thold,
1501 int rssi_max_thold)
1502{
1503 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
1504
1505 WARN_ON(rssi_min_thold == rssi_max_thold ||
1506 rssi_min_thold > rssi_max_thold);
1507
1508 _ieee80211_enable_rssi_reports(sdata, rssi_min_thold,
1509 rssi_max_thold);
1510}
1511EXPORT_SYMBOL(ieee80211_enable_rssi_reports);
1512
1513void ieee80211_disable_rssi_reports(struct ieee80211_vif *vif)
1514{
1515 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
1516
1517 _ieee80211_enable_rssi_reports(sdata, 0, 0);
1518}
1519EXPORT_SYMBOL(ieee80211_disable_rssi_reports);
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c
index 28bc084dbfb..7a49532f14c 100644
--- a/net/mac80211/wme.c
+++ b/net/mac80211/wme.c
@@ -151,8 +151,7 @@ void ieee80211_set_qos_hdr(struct ieee80211_local *local, struct sk_buff *skb)
151 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; 151 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
152 152
153 if (unlikely(local->wifi_wme_noack_test)) 153 if (unlikely(local->wifi_wme_noack_test))
154 ack_policy |= QOS_CONTROL_ACK_POLICY_NOACK << 154 ack_policy |= IEEE80211_QOS_CTL_ACK_POLICY_NOACK;
155 QOS_CONTROL_ACK_POLICY_SHIFT;
156 /* qos header is 2 bytes, second reserved */ 155 /* qos header is 2 bytes, second reserved */
157 *p++ = ack_policy | tid; 156 *p++ = ack_policy | tid;
158 *p = 0; 157 *p = 0;
diff --git a/net/mac80211/wme.h b/net/mac80211/wme.h
index 6053b1c9fee..faead6d0202 100644
--- a/net/mac80211/wme.h
+++ b/net/mac80211/wme.h
@@ -13,11 +13,6 @@
13#include <linux/netdevice.h> 13#include <linux/netdevice.h>
14#include "ieee80211_i.h" 14#include "ieee80211_i.h"
15 15
16#define QOS_CONTROL_ACK_POLICY_NORMAL 0
17#define QOS_CONTROL_ACK_POLICY_NOACK 1
18
19#define QOS_CONTROL_ACK_POLICY_SHIFT 5
20
21extern const int ieee802_1d_to_ac[8]; 16extern const int ieee802_1d_to_ac[8];
22 17
23u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata, 18u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/work.c b/net/mac80211/work.c
index d2e7f0e8667..edf8583280c 100644
--- a/net/mac80211/work.c
+++ b/net/mac80211/work.c
@@ -450,7 +450,7 @@ ieee80211_direct_probe(struct ieee80211_work *wk)
450 * will not answer to direct packet in unassociated state. 450 * will not answer to direct packet in unassociated state.
451 */ 451 */
452 ieee80211_send_probe_req(sdata, NULL, wk->probe_auth.ssid, 452 ieee80211_send_probe_req(sdata, NULL, wk->probe_auth.ssid,
453 wk->probe_auth.ssid_len, NULL, 0); 453 wk->probe_auth.ssid_len, NULL, 0, true);
454 454
455 wk->timeout = jiffies + IEEE80211_AUTH_TIMEOUT; 455 wk->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
456 run_again(local, wk->timeout); 456 run_again(local, wk->timeout);
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index 9dc3b5f26e8..7bc8702808f 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -15,6 +15,7 @@
15#include <linux/gfp.h> 15#include <linux/gfp.h>
16#include <asm/unaligned.h> 16#include <asm/unaligned.h>
17#include <net/mac80211.h> 17#include <net/mac80211.h>
18#include <crypto/aes.h>
18 19
19#include "ieee80211_i.h" 20#include "ieee80211_i.h"
20#include "michael.h" 21#include "michael.h"
@@ -148,13 +149,19 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx)
148 149
149update_iv: 150update_iv:
150 /* update IV in key information to be able to detect replays */ 151 /* update IV in key information to be able to detect replays */
151 rx->key->u.tkip.rx[rx->queue].iv32 = rx->tkip_iv32; 152 rx->key->u.tkip.rx[rx->security_idx].iv32 = rx->tkip_iv32;
152 rx->key->u.tkip.rx[rx->queue].iv16 = rx->tkip_iv16; 153 rx->key->u.tkip.rx[rx->security_idx].iv16 = rx->tkip_iv16;
153 154
154 return RX_CONTINUE; 155 return RX_CONTINUE;
155 156
156mic_fail: 157mic_fail:
157 mac80211_ev_michael_mic_failure(rx->sdata, rx->key->conf.keyidx, 158 /*
159 * In some cases the key can be unset - e.g. a multicast packet, in
160 * a driver that supports HW encryption. Send up the key idx only if
161 * the key is set.
162 */
163 mac80211_ev_michael_mic_failure(rx->sdata,
164 rx->key ? rx->key->conf.keyidx : -1,
158 (void *) skb->data, NULL, GFP_ATOMIC); 165 (void *) skb->data, NULL, GFP_ATOMIC);
159 return RX_DROP_UNUSABLE; 166 return RX_DROP_UNUSABLE;
160} 167}
@@ -165,6 +172,7 @@ static int tkip_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
165 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 172 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
166 struct ieee80211_key *key = tx->key; 173 struct ieee80211_key *key = tx->key;
167 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 174 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
175 unsigned long flags;
168 unsigned int hdrlen; 176 unsigned int hdrlen;
169 int len, tail; 177 int len, tail;
170 u8 *pos; 178 u8 *pos;
@@ -192,11 +200,12 @@ static int tkip_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
192 pos += hdrlen; 200 pos += hdrlen;
193 201
194 /* Increase IV for the frame */ 202 /* Increase IV for the frame */
203 spin_lock_irqsave(&key->u.tkip.txlock, flags);
195 key->u.tkip.tx.iv16++; 204 key->u.tkip.tx.iv16++;
196 if (key->u.tkip.tx.iv16 == 0) 205 if (key->u.tkip.tx.iv16 == 0)
197 key->u.tkip.tx.iv32++; 206 key->u.tkip.tx.iv32++;
198 207 pos = ieee80211_tkip_add_iv(pos, key);
199 pos = ieee80211_tkip_add_iv(pos, key, key->u.tkip.tx.iv16); 208 spin_unlock_irqrestore(&key->u.tkip.txlock, flags);
200 209
201 /* hwaccel - with software IV */ 210 /* hwaccel - with software IV */
202 if (info->control.hw_key) 211 if (info->control.hw_key)
@@ -205,9 +214,8 @@ static int tkip_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
205 /* Add room for ICV */ 214 /* Add room for ICV */
206 skb_put(skb, TKIP_ICV_LEN); 215 skb_put(skb, TKIP_ICV_LEN);
207 216
208 hdr = (struct ieee80211_hdr *) skb->data;
209 return ieee80211_tkip_encrypt_data(tx->local->wep_tx_tfm, 217 return ieee80211_tkip_encrypt_data(tx->local->wep_tx_tfm,
210 key, pos, len, hdr->addr2); 218 key, skb, pos, len);
211} 219}
212 220
213 221
@@ -255,7 +263,7 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx)
255 res = ieee80211_tkip_decrypt_data(rx->local->wep_rx_tfm, 263 res = ieee80211_tkip_decrypt_data(rx->local->wep_rx_tfm,
256 key, skb->data + hdrlen, 264 key, skb->data + hdrlen,
257 skb->len - hdrlen, rx->sta->sta.addr, 265 skb->len - hdrlen, rx->sta->sta.addr,
258 hdr->addr1, hwaccel, rx->queue, 266 hdr->addr1, hwaccel, rx->security_idx,
259 &rx->tkip_iv32, 267 &rx->tkip_iv32,
260 &rx->tkip_iv16); 268 &rx->tkip_iv16);
261 if (res != TKIP_DECRYPT_OK) 269 if (res != TKIP_DECRYPT_OK)
@@ -283,8 +291,10 @@ static void ccmp_special_blocks(struct sk_buff *skb, u8 *pn, u8 *scratch,
283 unsigned int hdrlen; 291 unsigned int hdrlen;
284 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 292 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
285 293
286 b_0 = scratch + 3 * AES_BLOCK_LEN; 294 memset(scratch, 0, 6 * AES_BLOCK_SIZE);
287 aad = scratch + 4 * AES_BLOCK_LEN; 295
296 b_0 = scratch + 3 * AES_BLOCK_SIZE;
297 aad = scratch + 4 * AES_BLOCK_SIZE;
288 298
289 /* 299 /*
290 * Mask FC: zero subtype b4 b5 b6 (if not mgmt) 300 * Mask FC: zero subtype b4 b5 b6 (if not mgmt)
@@ -373,8 +383,10 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
373 struct ieee80211_key *key = tx->key; 383 struct ieee80211_key *key = tx->key;
374 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 384 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
375 int hdrlen, len, tail; 385 int hdrlen, len, tail;
376 u8 *pos, *pn; 386 u8 *pos;
377 int i; 387 u8 pn[6];
388 u64 pn64;
389 u8 scratch[6 * AES_BLOCK_SIZE];
378 390
379 if (info->control.hw_key && 391 if (info->control.hw_key &&
380 !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV)) { 392 !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV)) {
@@ -402,14 +414,14 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
402 hdr = (struct ieee80211_hdr *) pos; 414 hdr = (struct ieee80211_hdr *) pos;
403 pos += hdrlen; 415 pos += hdrlen;
404 416
405 /* PN = PN + 1 */ 417 pn64 = atomic64_inc_return(&key->u.ccmp.tx_pn);
406 pn = key->u.ccmp.tx_pn;
407 418
408 for (i = CCMP_PN_LEN - 1; i >= 0; i--) { 419 pn[5] = pn64;
409 pn[i]++; 420 pn[4] = pn64 >> 8;
410 if (pn[i]) 421 pn[3] = pn64 >> 16;
411 break; 422 pn[2] = pn64 >> 24;
412 } 423 pn[1] = pn64 >> 32;
424 pn[0] = pn64 >> 40;
413 425
414 ccmp_pn2hdr(pos, pn, key->conf.keyidx); 426 ccmp_pn2hdr(pos, pn, key->conf.keyidx);
415 427
@@ -418,8 +430,8 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
418 return 0; 430 return 0;
419 431
420 pos += CCMP_HDR_LEN; 432 pos += CCMP_HDR_LEN;
421 ccmp_special_blocks(skb, pn, key->u.ccmp.tx_crypto_buf, 0); 433 ccmp_special_blocks(skb, pn, scratch, 0);
422 ieee80211_aes_ccm_encrypt(key->u.ccmp.tfm, key->u.ccmp.tx_crypto_buf, pos, len, 434 ieee80211_aes_ccm_encrypt(key->u.ccmp.tfm, scratch, pos, len,
423 pos, skb_put(skb, CCMP_MIC_LEN)); 435 pos, skb_put(skb, CCMP_MIC_LEN));
424 436
425 return 0; 437 return 0;
@@ -466,8 +478,7 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx)
466 478
467 ccmp_hdr2pn(pn, skb->data + hdrlen); 479 ccmp_hdr2pn(pn, skb->data + hdrlen);
468 480
469 queue = ieee80211_is_mgmt(hdr->frame_control) ? 481 queue = rx->security_idx;
470 NUM_RX_DATA_QUEUES : rx->queue;
471 482
472 if (memcmp(pn, key->u.ccmp.rx_pn[queue], CCMP_PN_LEN) <= 0) { 483 if (memcmp(pn, key->u.ccmp.rx_pn[queue], CCMP_PN_LEN) <= 0) {
473 key->u.ccmp.replays++; 484 key->u.ccmp.replays++;
@@ -475,11 +486,12 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx)
475 } 486 }
476 487
477 if (!(status->flag & RX_FLAG_DECRYPTED)) { 488 if (!(status->flag & RX_FLAG_DECRYPTED)) {
489 u8 scratch[6 * AES_BLOCK_SIZE];
478 /* hardware didn't decrypt/verify MIC */ 490 /* hardware didn't decrypt/verify MIC */
479 ccmp_special_blocks(skb, pn, key->u.ccmp.rx_crypto_buf, 1); 491 ccmp_special_blocks(skb, pn, scratch, 1);
480 492
481 if (ieee80211_aes_ccm_decrypt( 493 if (ieee80211_aes_ccm_decrypt(
482 key->u.ccmp.tfm, key->u.ccmp.rx_crypto_buf, 494 key->u.ccmp.tfm, scratch,
483 skb->data + hdrlen + CCMP_HDR_LEN, data_len, 495 skb->data + hdrlen + CCMP_HDR_LEN, data_len,
484 skb->data + skb->len - CCMP_MIC_LEN, 496 skb->data + skb->len - CCMP_MIC_LEN,
485 skb->data + hdrlen + CCMP_HDR_LEN)) 497 skb->data + hdrlen + CCMP_HDR_LEN))
@@ -510,6 +522,16 @@ static void bip_aad(struct sk_buff *skb, u8 *aad)
510} 522}
511 523
512 524
525static inline void bip_ipn_set64(u8 *d, u64 pn)
526{
527 *d++ = pn;
528 *d++ = pn >> 8;
529 *d++ = pn >> 16;
530 *d++ = pn >> 24;
531 *d++ = pn >> 32;
532 *d = pn >> 40;
533}
534
513static inline void bip_ipn_swap(u8 *d, const u8 *s) 535static inline void bip_ipn_swap(u8 *d, const u8 *s)
514{ 536{
515 *d++ = s[5]; 537 *d++ = s[5];
@@ -528,8 +550,8 @@ ieee80211_crypto_aes_cmac_encrypt(struct ieee80211_tx_data *tx)
528 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 550 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
529 struct ieee80211_key *key = tx->key; 551 struct ieee80211_key *key = tx->key;
530 struct ieee80211_mmie *mmie; 552 struct ieee80211_mmie *mmie;
531 u8 *pn, aad[20]; 553 u8 aad[20];
532 int i; 554 u64 pn64;
533 555
534 if (info->control.hw_key) 556 if (info->control.hw_key)
535 return 0; 557 return 0;
@@ -543,22 +565,17 @@ ieee80211_crypto_aes_cmac_encrypt(struct ieee80211_tx_data *tx)
543 mmie->key_id = cpu_to_le16(key->conf.keyidx); 565 mmie->key_id = cpu_to_le16(key->conf.keyidx);
544 566
545 /* PN = PN + 1 */ 567 /* PN = PN + 1 */
546 pn = key->u.aes_cmac.tx_pn; 568 pn64 = atomic64_inc_return(&key->u.aes_cmac.tx_pn);
547 569
548 for (i = sizeof(key->u.aes_cmac.tx_pn) - 1; i >= 0; i--) { 570 bip_ipn_set64(mmie->sequence_number, pn64);
549 pn[i]++;
550 if (pn[i])
551 break;
552 }
553 bip_ipn_swap(mmie->sequence_number, pn);
554 571
555 bip_aad(skb, aad); 572 bip_aad(skb, aad);
556 573
557 /* 574 /*
558 * MIC = AES-128-CMAC(IGTK, AAD || Management Frame Body || MMIE, 64) 575 * MIC = AES-128-CMAC(IGTK, AAD || Management Frame Body || MMIE, 64)
559 */ 576 */
560 ieee80211_aes_cmac(key->u.aes_cmac.tfm, key->u.aes_cmac.tx_crypto_buf, 577 ieee80211_aes_cmac(key->u.aes_cmac.tfm, aad,
561 aad, skb->data + 24, skb->len - 24, mmie->mic); 578 skb->data + 24, skb->len - 24, mmie->mic);
562 579
563 return TX_CONTINUE; 580 return TX_CONTINUE;
564} 581}
@@ -596,8 +613,7 @@ ieee80211_crypto_aes_cmac_decrypt(struct ieee80211_rx_data *rx)
596 if (!(status->flag & RX_FLAG_DECRYPTED)) { 613 if (!(status->flag & RX_FLAG_DECRYPTED)) {
597 /* hardware didn't decrypt/verify MIC */ 614 /* hardware didn't decrypt/verify MIC */
598 bip_aad(skb, aad); 615 bip_aad(skb, aad);
599 ieee80211_aes_cmac(key->u.aes_cmac.tfm, 616 ieee80211_aes_cmac(key->u.aes_cmac.tfm, aad,
600 key->u.aes_cmac.rx_crypto_buf, aad,
601 skb->data + 24, skb->len - 24, mic); 617 skb->data + 24, skb->len - 24, mic);
602 if (memcmp(mic, mmie->mic, sizeof(mmie->mic)) != 0) { 618 if (memcmp(mic, mmie->mic, sizeof(mmie->mic)) != 0) {
603 key->u.aes_cmac.icverrors++; 619 key->u.aes_cmac.icverrors++;
diff --git a/net/netfilter/ipset/Kconfig b/net/netfilter/ipset/Kconfig
index 2c5b348eb3a..ba36c283d83 100644
--- a/net/netfilter/ipset/Kconfig
+++ b/net/netfilter/ipset/Kconfig
@@ -109,6 +109,16 @@ config IP_SET_HASH_NETPORT
109 109
110 To compile it as a module, choose M here. If unsure, say N. 110 To compile it as a module, choose M here. If unsure, say N.
111 111
112config IP_SET_HASH_NETIFACE
113 tristate "hash:net,iface set support"
114 depends on IP_SET
115 help
116 This option adds the hash:net,iface set type support, by which
117 one can store IPv4/IPv6 network address/prefix and
118 interface name pairs as elements in a set.
119
120 To compile it as a module, choose M here. If unsure, say N.
121
112config IP_SET_LIST_SET 122config IP_SET_LIST_SET
113 tristate "list:set set support" 123 tristate "list:set set support"
114 depends on IP_SET 124 depends on IP_SET
diff --git a/net/netfilter/ipset/Makefile b/net/netfilter/ipset/Makefile
index 5adbdab67bd..6e965ecd544 100644
--- a/net/netfilter/ipset/Makefile
+++ b/net/netfilter/ipset/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_IP_SET_HASH_IPPORTIP) += ip_set_hash_ipportip.o
19obj-$(CONFIG_IP_SET_HASH_IPPORTNET) += ip_set_hash_ipportnet.o 19obj-$(CONFIG_IP_SET_HASH_IPPORTNET) += ip_set_hash_ipportnet.o
20obj-$(CONFIG_IP_SET_HASH_NET) += ip_set_hash_net.o 20obj-$(CONFIG_IP_SET_HASH_NET) += ip_set_hash_net.o
21obj-$(CONFIG_IP_SET_HASH_NETPORT) += ip_set_hash_netport.o 21obj-$(CONFIG_IP_SET_HASH_NETPORT) += ip_set_hash_netport.o
22obj-$(CONFIG_IP_SET_HASH_NETIFACE) += ip_set_hash_netiface.o
22 23
23# list types 24# list types
24obj-$(CONFIG_IP_SET_LIST_SET) += ip_set_list_set.o 25obj-$(CONFIG_IP_SET_LIST_SET) += ip_set_list_set.o
diff --git a/net/netfilter/ipset/ip_set_bitmap_ip.c b/net/netfilter/ipset/ip_set_bitmap_ip.c
index ba2d16607f4..e3e73997c3b 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ip.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ip.c
@@ -54,7 +54,7 @@ ip_to_id(const struct bitmap_ip *m, u32 ip)
54} 54}
55 55
56static int 56static int
57bitmap_ip_test(struct ip_set *set, void *value, u32 timeout) 57bitmap_ip_test(struct ip_set *set, void *value, u32 timeout, u32 flags)
58{ 58{
59 const struct bitmap_ip *map = set->data; 59 const struct bitmap_ip *map = set->data;
60 u16 id = *(u16 *)value; 60 u16 id = *(u16 *)value;
@@ -63,7 +63,7 @@ bitmap_ip_test(struct ip_set *set, void *value, u32 timeout)
63} 63}
64 64
65static int 65static int
66bitmap_ip_add(struct ip_set *set, void *value, u32 timeout) 66bitmap_ip_add(struct ip_set *set, void *value, u32 timeout, u32 flags)
67{ 67{
68 struct bitmap_ip *map = set->data; 68 struct bitmap_ip *map = set->data;
69 u16 id = *(u16 *)value; 69 u16 id = *(u16 *)value;
@@ -75,7 +75,7 @@ bitmap_ip_add(struct ip_set *set, void *value, u32 timeout)
75} 75}
76 76
77static int 77static int
78bitmap_ip_del(struct ip_set *set, void *value, u32 timeout) 78bitmap_ip_del(struct ip_set *set, void *value, u32 timeout, u32 flags)
79{ 79{
80 struct bitmap_ip *map = set->data; 80 struct bitmap_ip *map = set->data;
81 u16 id = *(u16 *)value; 81 u16 id = *(u16 *)value;
@@ -131,7 +131,7 @@ nla_put_failure:
131/* Timeout variant */ 131/* Timeout variant */
132 132
133static int 133static int
134bitmap_ip_ttest(struct ip_set *set, void *value, u32 timeout) 134bitmap_ip_ttest(struct ip_set *set, void *value, u32 timeout, u32 flags)
135{ 135{
136 const struct bitmap_ip *map = set->data; 136 const struct bitmap_ip *map = set->data;
137 const unsigned long *members = map->members; 137 const unsigned long *members = map->members;
@@ -141,13 +141,13 @@ bitmap_ip_ttest(struct ip_set *set, void *value, u32 timeout)
141} 141}
142 142
143static int 143static int
144bitmap_ip_tadd(struct ip_set *set, void *value, u32 timeout) 144bitmap_ip_tadd(struct ip_set *set, void *value, u32 timeout, u32 flags)
145{ 145{
146 struct bitmap_ip *map = set->data; 146 struct bitmap_ip *map = set->data;
147 unsigned long *members = map->members; 147 unsigned long *members = map->members;
148 u16 id = *(u16 *)value; 148 u16 id = *(u16 *)value;
149 149
150 if (ip_set_timeout_test(members[id])) 150 if (ip_set_timeout_test(members[id]) && !(flags & IPSET_FLAG_EXIST))
151 return -IPSET_ERR_EXIST; 151 return -IPSET_ERR_EXIST;
152 152
153 members[id] = ip_set_timeout_set(timeout); 153 members[id] = ip_set_timeout_set(timeout);
@@ -156,7 +156,7 @@ bitmap_ip_tadd(struct ip_set *set, void *value, u32 timeout)
156} 156}
157 157
158static int 158static int
159bitmap_ip_tdel(struct ip_set *set, void *value, u32 timeout) 159bitmap_ip_tdel(struct ip_set *set, void *value, u32 timeout, u32 flags)
160{ 160{
161 struct bitmap_ip *map = set->data; 161 struct bitmap_ip *map = set->data;
162 unsigned long *members = map->members; 162 unsigned long *members = map->members;
@@ -219,24 +219,25 @@ nla_put_failure:
219 219
220static int 220static int
221bitmap_ip_kadt(struct ip_set *set, const struct sk_buff *skb, 221bitmap_ip_kadt(struct ip_set *set, const struct sk_buff *skb,
222 enum ipset_adt adt, u8 pf, u8 dim, u8 flags) 222 const struct xt_action_param *par,
223 enum ipset_adt adt, const struct ip_set_adt_opt *opt)
223{ 224{
224 struct bitmap_ip *map = set->data; 225 struct bitmap_ip *map = set->data;
225 ipset_adtfn adtfn = set->variant->adt[adt]; 226 ipset_adtfn adtfn = set->variant->adt[adt];
226 u32 ip; 227 u32 ip;
227 228
228 ip = ntohl(ip4addr(skb, flags & IPSET_DIM_ONE_SRC)); 229 ip = ntohl(ip4addr(skb, opt->flags & IPSET_DIM_ONE_SRC));
229 if (ip < map->first_ip || ip > map->last_ip) 230 if (ip < map->first_ip || ip > map->last_ip)
230 return -IPSET_ERR_BITMAP_RANGE; 231 return -IPSET_ERR_BITMAP_RANGE;
231 232
232 ip = ip_to_id(map, ip); 233 ip = ip_to_id(map, ip);
233 234
234 return adtfn(set, &ip, map->timeout); 235 return adtfn(set, &ip, opt_timeout(opt, map), opt->cmdflags);
235} 236}
236 237
237static int 238static int
238bitmap_ip_uadt(struct ip_set *set, struct nlattr *tb[], 239bitmap_ip_uadt(struct ip_set *set, struct nlattr *tb[],
239 enum ipset_adt adt, u32 *lineno, u32 flags) 240 enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
240{ 241{
241 struct bitmap_ip *map = set->data; 242 struct bitmap_ip *map = set->data;
242 ipset_adtfn adtfn = set->variant->adt[adt]; 243 ipset_adtfn adtfn = set->variant->adt[adt];
@@ -266,7 +267,7 @@ bitmap_ip_uadt(struct ip_set *set, struct nlattr *tb[],
266 267
267 if (adt == IPSET_TEST) { 268 if (adt == IPSET_TEST) {
268 id = ip_to_id(map, ip); 269 id = ip_to_id(map, ip);
269 return adtfn(set, &id, timeout); 270 return adtfn(set, &id, timeout, flags);
270 } 271 }
271 272
272 if (tb[IPSET_ATTR_IP_TO]) { 273 if (tb[IPSET_ATTR_IP_TO]) {
@@ -283,8 +284,7 @@ bitmap_ip_uadt(struct ip_set *set, struct nlattr *tb[],
283 284
284 if (cidr > 32) 285 if (cidr > 32)
285 return -IPSET_ERR_INVALID_CIDR; 286 return -IPSET_ERR_INVALID_CIDR;
286 ip &= ip_set_hostmask(cidr); 287 ip_set_mask_from_to(ip, ip_to, cidr);
287 ip_to = ip | ~ip_set_hostmask(cidr);
288 } else 288 } else
289 ip_to = ip; 289 ip_to = ip;
290 290
@@ -293,7 +293,7 @@ bitmap_ip_uadt(struct ip_set *set, struct nlattr *tb[],
293 293
294 for (; !before(ip_to, ip); ip += map->hosts) { 294 for (; !before(ip_to, ip); ip += map->hosts) {
295 id = ip_to_id(map, ip); 295 id = ip_to_id(map, ip);
296 ret = adtfn(set, &id, timeout); 296 ret = adtfn(set, &id, timeout, flags);
297 297
298 if (ret && !ip_set_eexist(ret, flags)) 298 if (ret && !ip_set_eexist(ret, flags))
299 return ret; 299 return ret;
@@ -478,7 +478,7 @@ bitmap_ip_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
478 478
479 if (cidr >= 32) 479 if (cidr >= 32)
480 return -IPSET_ERR_INVALID_CIDR; 480 return -IPSET_ERR_INVALID_CIDR;
481 last_ip = first_ip | ~ip_set_hostmask(cidr); 481 ip_set_mask_from_to(first_ip, last_ip, cidr);
482 } else 482 } else
483 return -IPSET_ERR_PROTOCOL; 483 return -IPSET_ERR_PROTOCOL;
484 484
@@ -551,7 +551,8 @@ static struct ip_set_type bitmap_ip_type __read_mostly = {
551 .features = IPSET_TYPE_IP, 551 .features = IPSET_TYPE_IP,
552 .dimension = IPSET_DIM_ONE, 552 .dimension = IPSET_DIM_ONE,
553 .family = AF_INET, 553 .family = AF_INET,
554 .revision = 0, 554 .revision_min = 0,
555 .revision_max = 0,
555 .create = bitmap_ip_create, 556 .create = bitmap_ip_create,
556 .create_policy = { 557 .create_policy = {
557 [IPSET_ATTR_IP] = { .type = NLA_NESTED }, 558 [IPSET_ATTR_IP] = { .type = NLA_NESTED },
diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
index a274300b6a5..56096f54497 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ipmac.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
@@ -99,7 +99,7 @@ bitmap_ipmac_exist(const struct ipmac_telem *elem)
99/* Base variant */ 99/* Base variant */
100 100
101static int 101static int
102bitmap_ipmac_test(struct ip_set *set, void *value, u32 timeout) 102bitmap_ipmac_test(struct ip_set *set, void *value, u32 timeout, u32 flags)
103{ 103{
104 const struct bitmap_ipmac *map = set->data; 104 const struct bitmap_ipmac *map = set->data;
105 const struct ipmac *data = value; 105 const struct ipmac *data = value;
@@ -117,7 +117,7 @@ bitmap_ipmac_test(struct ip_set *set, void *value, u32 timeout)
117} 117}
118 118
119static int 119static int
120bitmap_ipmac_add(struct ip_set *set, void *value, u32 timeout) 120bitmap_ipmac_add(struct ip_set *set, void *value, u32 timeout, u32 flags)
121{ 121{
122 struct bitmap_ipmac *map = set->data; 122 struct bitmap_ipmac *map = set->data;
123 const struct ipmac *data = value; 123 const struct ipmac *data = value;
@@ -146,7 +146,7 @@ bitmap_ipmac_add(struct ip_set *set, void *value, u32 timeout)
146} 146}
147 147
148static int 148static int
149bitmap_ipmac_del(struct ip_set *set, void *value, u32 timeout) 149bitmap_ipmac_del(struct ip_set *set, void *value, u32 timeout, u32 flags)
150{ 150{
151 struct bitmap_ipmac *map = set->data; 151 struct bitmap_ipmac *map = set->data;
152 const struct ipmac *data = value; 152 const struct ipmac *data = value;
@@ -212,7 +212,7 @@ nla_put_failure:
212/* Timeout variant */ 212/* Timeout variant */
213 213
214static int 214static int
215bitmap_ipmac_ttest(struct ip_set *set, void *value, u32 timeout) 215bitmap_ipmac_ttest(struct ip_set *set, void *value, u32 timeout, u32 flags)
216{ 216{
217 const struct bitmap_ipmac *map = set->data; 217 const struct bitmap_ipmac *map = set->data;
218 const struct ipmac *data = value; 218 const struct ipmac *data = value;
@@ -231,15 +231,16 @@ bitmap_ipmac_ttest(struct ip_set *set, void *value, u32 timeout)
231} 231}
232 232
233static int 233static int
234bitmap_ipmac_tadd(struct ip_set *set, void *value, u32 timeout) 234bitmap_ipmac_tadd(struct ip_set *set, void *value, u32 timeout, u32 flags)
235{ 235{
236 struct bitmap_ipmac *map = set->data; 236 struct bitmap_ipmac *map = set->data;
237 const struct ipmac *data = value; 237 const struct ipmac *data = value;
238 struct ipmac_telem *elem = bitmap_ipmac_elem(map, data->id); 238 struct ipmac_telem *elem = bitmap_ipmac_elem(map, data->id);
239 bool flag_exist = flags & IPSET_FLAG_EXIST;
239 240
240 switch (elem->match) { 241 switch (elem->match) {
241 case MAC_UNSET: 242 case MAC_UNSET:
242 if (!data->ether) 243 if (!(data->ether || flag_exist))
243 /* Already added without ethernet address */ 244 /* Already added without ethernet address */
244 return -IPSET_ERR_EXIST; 245 return -IPSET_ERR_EXIST;
245 /* Fill the MAC address and activate the timer */ 246 /* Fill the MAC address and activate the timer */
@@ -251,7 +252,7 @@ bitmap_ipmac_tadd(struct ip_set *set, void *value, u32 timeout)
251 elem->timeout = ip_set_timeout_set(timeout); 252 elem->timeout = ip_set_timeout_set(timeout);
252 break; 253 break;
253 case MAC_FILLED: 254 case MAC_FILLED:
254 if (!bitmap_expired(map, data->id)) 255 if (!(bitmap_expired(map, data->id) || flag_exist))
255 return -IPSET_ERR_EXIST; 256 return -IPSET_ERR_EXIST;
256 /* Fall through */ 257 /* Fall through */
257 case MAC_EMPTY: 258 case MAC_EMPTY:
@@ -273,7 +274,7 @@ bitmap_ipmac_tadd(struct ip_set *set, void *value, u32 timeout)
273} 274}
274 275
275static int 276static int
276bitmap_ipmac_tdel(struct ip_set *set, void *value, u32 timeout) 277bitmap_ipmac_tdel(struct ip_set *set, void *value, u32 timeout, u32 flags)
277{ 278{
278 struct bitmap_ipmac *map = set->data; 279 struct bitmap_ipmac *map = set->data;
279 const struct ipmac *data = value; 280 const struct ipmac *data = value;
@@ -337,17 +338,18 @@ nla_put_failure:
337 338
338static int 339static int
339bitmap_ipmac_kadt(struct ip_set *set, const struct sk_buff *skb, 340bitmap_ipmac_kadt(struct ip_set *set, const struct sk_buff *skb,
340 enum ipset_adt adt, u8 pf, u8 dim, u8 flags) 341 const struct xt_action_param *par,
342 enum ipset_adt adt, const struct ip_set_adt_opt *opt)
341{ 343{
342 struct bitmap_ipmac *map = set->data; 344 struct bitmap_ipmac *map = set->data;
343 ipset_adtfn adtfn = set->variant->adt[adt]; 345 ipset_adtfn adtfn = set->variant->adt[adt];
344 struct ipmac data; 346 struct ipmac data;
345 347
346 /* MAC can be src only */ 348 /* MAC can be src only */
347 if (!(flags & IPSET_DIM_TWO_SRC)) 349 if (!(opt->flags & IPSET_DIM_TWO_SRC))
348 return 0; 350 return 0;
349 351
350 data.id = ntohl(ip4addr(skb, flags & IPSET_DIM_ONE_SRC)); 352 data.id = ntohl(ip4addr(skb, opt->flags & IPSET_DIM_ONE_SRC));
351 if (data.id < map->first_ip || data.id > map->last_ip) 353 if (data.id < map->first_ip || data.id > map->last_ip)
352 return -IPSET_ERR_BITMAP_RANGE; 354 return -IPSET_ERR_BITMAP_RANGE;
353 355
@@ -359,12 +361,12 @@ bitmap_ipmac_kadt(struct ip_set *set, const struct sk_buff *skb,
359 data.id -= map->first_ip; 361 data.id -= map->first_ip;
360 data.ether = eth_hdr(skb)->h_source; 362 data.ether = eth_hdr(skb)->h_source;
361 363
362 return adtfn(set, &data, map->timeout); 364 return adtfn(set, &data, opt_timeout(opt, map), opt->cmdflags);
363} 365}
364 366
365static int 367static int
366bitmap_ipmac_uadt(struct ip_set *set, struct nlattr *tb[], 368bitmap_ipmac_uadt(struct ip_set *set, struct nlattr *tb[],
367 enum ipset_adt adt, u32 *lineno, u32 flags) 369 enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
368{ 370{
369 const struct bitmap_ipmac *map = set->data; 371 const struct bitmap_ipmac *map = set->data;
370 ipset_adtfn adtfn = set->variant->adt[adt]; 372 ipset_adtfn adtfn = set->variant->adt[adt];
@@ -399,7 +401,7 @@ bitmap_ipmac_uadt(struct ip_set *set, struct nlattr *tb[],
399 401
400 data.id -= map->first_ip; 402 data.id -= map->first_ip;
401 403
402 ret = adtfn(set, &data, timeout); 404 ret = adtfn(set, &data, timeout, flags);
403 405
404 return ip_set_eexist(ret, flags) ? 0 : ret; 406 return ip_set_eexist(ret, flags) ? 0 : ret;
405} 407}
@@ -577,7 +579,7 @@ bitmap_ipmac_create(struct ip_set *set, struct nlattr *tb[],
577 579
578 if (cidr >= 32) 580 if (cidr >= 32)
579 return -IPSET_ERR_INVALID_CIDR; 581 return -IPSET_ERR_INVALID_CIDR;
580 last_ip = first_ip | ~ip_set_hostmask(cidr); 582 ip_set_mask_from_to(first_ip, last_ip, cidr);
581 } else 583 } else
582 return -IPSET_ERR_PROTOCOL; 584 return -IPSET_ERR_PROTOCOL;
583 585
@@ -622,7 +624,8 @@ static struct ip_set_type bitmap_ipmac_type = {
622 .features = IPSET_TYPE_IP | IPSET_TYPE_MAC, 624 .features = IPSET_TYPE_IP | IPSET_TYPE_MAC,
623 .dimension = IPSET_DIM_TWO, 625 .dimension = IPSET_DIM_TWO,
624 .family = AF_INET, 626 .family = AF_INET,
625 .revision = 0, 627 .revision_min = 0,
628 .revision_max = 0,
626 .create = bitmap_ipmac_create, 629 .create = bitmap_ipmac_create,
627 .create_policy = { 630 .create_policy = {
628 [IPSET_ATTR_IP] = { .type = NLA_NESTED }, 631 [IPSET_ATTR_IP] = { .type = NLA_NESTED },
@@ -632,7 +635,8 @@ static struct ip_set_type bitmap_ipmac_type = {
632 }, 635 },
633 .adt_policy = { 636 .adt_policy = {
634 [IPSET_ATTR_IP] = { .type = NLA_NESTED }, 637 [IPSET_ATTR_IP] = { .type = NLA_NESTED },
635 [IPSET_ATTR_ETHER] = { .type = NLA_BINARY, .len = ETH_ALEN }, 638 [IPSET_ATTR_ETHER] = { .type = NLA_BINARY,
639 .len = ETH_ALEN },
636 [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, 640 [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
637 [IPSET_ATTR_LINENO] = { .type = NLA_U32 }, 641 [IPSET_ATTR_LINENO] = { .type = NLA_U32 },
638 }, 642 },
diff --git a/net/netfilter/ipset/ip_set_bitmap_port.c b/net/netfilter/ipset/ip_set_bitmap_port.c
index 6b38eb8f6ed..29ba93bb94b 100644
--- a/net/netfilter/ipset/ip_set_bitmap_port.c
+++ b/net/netfilter/ipset/ip_set_bitmap_port.c
@@ -40,7 +40,7 @@ struct bitmap_port {
40/* Base variant */ 40/* Base variant */
41 41
42static int 42static int
43bitmap_port_test(struct ip_set *set, void *value, u32 timeout) 43bitmap_port_test(struct ip_set *set, void *value, u32 timeout, u32 flags)
44{ 44{
45 const struct bitmap_port *map = set->data; 45 const struct bitmap_port *map = set->data;
46 u16 id = *(u16 *)value; 46 u16 id = *(u16 *)value;
@@ -49,7 +49,7 @@ bitmap_port_test(struct ip_set *set, void *value, u32 timeout)
49} 49}
50 50
51static int 51static int
52bitmap_port_add(struct ip_set *set, void *value, u32 timeout) 52bitmap_port_add(struct ip_set *set, void *value, u32 timeout, u32 flags)
53{ 53{
54 struct bitmap_port *map = set->data; 54 struct bitmap_port *map = set->data;
55 u16 id = *(u16 *)value; 55 u16 id = *(u16 *)value;
@@ -61,7 +61,7 @@ bitmap_port_add(struct ip_set *set, void *value, u32 timeout)
61} 61}
62 62
63static int 63static int
64bitmap_port_del(struct ip_set *set, void *value, u32 timeout) 64bitmap_port_del(struct ip_set *set, void *value, u32 timeout, u32 flags)
65{ 65{
66 struct bitmap_port *map = set->data; 66 struct bitmap_port *map = set->data;
67 u16 id = *(u16 *)value; 67 u16 id = *(u16 *)value;
@@ -119,7 +119,7 @@ nla_put_failure:
119/* Timeout variant */ 119/* Timeout variant */
120 120
121static int 121static int
122bitmap_port_ttest(struct ip_set *set, void *value, u32 timeout) 122bitmap_port_ttest(struct ip_set *set, void *value, u32 timeout, u32 flags)
123{ 123{
124 const struct bitmap_port *map = set->data; 124 const struct bitmap_port *map = set->data;
125 const unsigned long *members = map->members; 125 const unsigned long *members = map->members;
@@ -129,13 +129,13 @@ bitmap_port_ttest(struct ip_set *set, void *value, u32 timeout)
129} 129}
130 130
131static int 131static int
132bitmap_port_tadd(struct ip_set *set, void *value, u32 timeout) 132bitmap_port_tadd(struct ip_set *set, void *value, u32 timeout, u32 flags)
133{ 133{
134 struct bitmap_port *map = set->data; 134 struct bitmap_port *map = set->data;
135 unsigned long *members = map->members; 135 unsigned long *members = map->members;
136 u16 id = *(u16 *)value; 136 u16 id = *(u16 *)value;
137 137
138 if (ip_set_timeout_test(members[id])) 138 if (ip_set_timeout_test(members[id]) && !(flags & IPSET_FLAG_EXIST))
139 return -IPSET_ERR_EXIST; 139 return -IPSET_ERR_EXIST;
140 140
141 members[id] = ip_set_timeout_set(timeout); 141 members[id] = ip_set_timeout_set(timeout);
@@ -144,7 +144,7 @@ bitmap_port_tadd(struct ip_set *set, void *value, u32 timeout)
144} 144}
145 145
146static int 146static int
147bitmap_port_tdel(struct ip_set *set, void *value, u32 timeout) 147bitmap_port_tdel(struct ip_set *set, void *value, u32 timeout, u32 flags)
148{ 148{
149 struct bitmap_port *map = set->data; 149 struct bitmap_port *map = set->data;
150 unsigned long *members = map->members; 150 unsigned long *members = map->members;
@@ -208,14 +208,16 @@ nla_put_failure:
208 208
209static int 209static int
210bitmap_port_kadt(struct ip_set *set, const struct sk_buff *skb, 210bitmap_port_kadt(struct ip_set *set, const struct sk_buff *skb,
211 enum ipset_adt adt, u8 pf, u8 dim, u8 flags) 211 const struct xt_action_param *par,
212 enum ipset_adt adt, const struct ip_set_adt_opt *opt)
212{ 213{
213 struct bitmap_port *map = set->data; 214 struct bitmap_port *map = set->data;
214 ipset_adtfn adtfn = set->variant->adt[adt]; 215 ipset_adtfn adtfn = set->variant->adt[adt];
215 __be16 __port; 216 __be16 __port;
216 u16 port = 0; 217 u16 port = 0;
217 218
218 if (!ip_set_get_ip_port(skb, pf, flags & IPSET_DIM_ONE_SRC, &__port)) 219 if (!ip_set_get_ip_port(skb, opt->family,
220 opt->flags & IPSET_DIM_ONE_SRC, &__port))
219 return -EINVAL; 221 return -EINVAL;
220 222
221 port = ntohs(__port); 223 port = ntohs(__port);
@@ -225,12 +227,12 @@ bitmap_port_kadt(struct ip_set *set, const struct sk_buff *skb,
225 227
226 port -= map->first_port; 228 port -= map->first_port;
227 229
228 return adtfn(set, &port, map->timeout); 230 return adtfn(set, &port, opt_timeout(opt, map), opt->cmdflags);
229} 231}
230 232
231static int 233static int
232bitmap_port_uadt(struct ip_set *set, struct nlattr *tb[], 234bitmap_port_uadt(struct ip_set *set, struct nlattr *tb[],
233 enum ipset_adt adt, u32 *lineno, u32 flags) 235 enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
234{ 236{
235 struct bitmap_port *map = set->data; 237 struct bitmap_port *map = set->data;
236 ipset_adtfn adtfn = set->variant->adt[adt]; 238 ipset_adtfn adtfn = set->variant->adt[adt];
@@ -259,7 +261,7 @@ bitmap_port_uadt(struct ip_set *set, struct nlattr *tb[],
259 261
260 if (adt == IPSET_TEST) { 262 if (adt == IPSET_TEST) {
261 id = port - map->first_port; 263 id = port - map->first_port;
262 return adtfn(set, &id, timeout); 264 return adtfn(set, &id, timeout, flags);
263 } 265 }
264 266
265 if (tb[IPSET_ATTR_PORT_TO]) { 267 if (tb[IPSET_ATTR_PORT_TO]) {
@@ -277,7 +279,7 @@ bitmap_port_uadt(struct ip_set *set, struct nlattr *tb[],
277 279
278 for (; port <= port_to; port++) { 280 for (; port <= port_to; port++) {
279 id = port - map->first_port; 281 id = port - map->first_port;
280 ret = adtfn(set, &id, timeout); 282 ret = adtfn(set, &id, timeout, flags);
281 283
282 if (ret && !ip_set_eexist(ret, flags)) 284 if (ret && !ip_set_eexist(ret, flags))
283 return ret; 285 return ret;
@@ -482,7 +484,8 @@ static struct ip_set_type bitmap_port_type = {
482 .features = IPSET_TYPE_PORT, 484 .features = IPSET_TYPE_PORT,
483 .dimension = IPSET_DIM_ONE, 485 .dimension = IPSET_DIM_ONE,
484 .family = AF_UNSPEC, 486 .family = AF_UNSPEC,
485 .revision = 0, 487 .revision_min = 0,
488 .revision_max = 0,
486 .create = bitmap_port_create, 489 .create = bitmap_port_create,
487 .create_policy = { 490 .create_policy = {
488 [IPSET_ATTR_PORT] = { .type = NLA_U16 }, 491 [IPSET_ATTR_PORT] = { .type = NLA_U16 },
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index 42aa64b6b0b..d7e86ef9d23 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -17,10 +17,10 @@
17#include <linux/spinlock.h> 17#include <linux/spinlock.h>
18#include <linux/netlink.h> 18#include <linux/netlink.h>
19#include <linux/rculist.h> 19#include <linux/rculist.h>
20#include <linux/version.h>
21#include <net/netlink.h> 20#include <net/netlink.h>
22 21
23#include <linux/netfilter.h> 22#include <linux/netfilter.h>
23#include <linux/netfilter/x_tables.h>
24#include <linux/netfilter/nfnetlink.h> 24#include <linux/netfilter/nfnetlink.h>
25#include <linux/netfilter/ipset/ip_set.h> 25#include <linux/netfilter/ipset/ip_set.h>
26 26
@@ -70,7 +70,8 @@ find_set_type(const char *name, u8 family, u8 revision)
70 list_for_each_entry_rcu(type, &ip_set_type_list, list) 70 list_for_each_entry_rcu(type, &ip_set_type_list, list)
71 if (STREQ(type->name, name) && 71 if (STREQ(type->name, name) &&
72 (type->family == family || type->family == AF_UNSPEC) && 72 (type->family == family || type->family == AF_UNSPEC) &&
73 type->revision == revision) 73 revision >= type->revision_min &&
74 revision <= type->revision_max)
74 return type; 75 return type;
75 return NULL; 76 return NULL;
76} 77}
@@ -135,10 +136,10 @@ find_set_type_minmax(const char *name, u8 family, u8 *min, u8 *max)
135 if (STREQ(type->name, name) && 136 if (STREQ(type->name, name) &&
136 (type->family == family || type->family == AF_UNSPEC)) { 137 (type->family == family || type->family == AF_UNSPEC)) {
137 found = true; 138 found = true;
138 if (type->revision < *min) 139 if (type->revision_min < *min)
139 *min = type->revision; 140 *min = type->revision_min;
140 if (type->revision > *max) 141 if (type->revision_max > *max)
141 *max = type->revision; 142 *max = type->revision_max;
142 } 143 }
143 rcu_read_unlock(); 144 rcu_read_unlock();
144 if (found) 145 if (found)
@@ -159,25 +160,27 @@ ip_set_type_register(struct ip_set_type *type)
159 int ret = 0; 160 int ret = 0;
160 161
161 if (type->protocol != IPSET_PROTOCOL) { 162 if (type->protocol != IPSET_PROTOCOL) {
162 pr_warning("ip_set type %s, family %s, revision %u uses " 163 pr_warning("ip_set type %s, family %s, revision %u:%u uses "
163 "wrong protocol version %u (want %u)\n", 164 "wrong protocol version %u (want %u)\n",
164 type->name, family_name(type->family), 165 type->name, family_name(type->family),
165 type->revision, type->protocol, IPSET_PROTOCOL); 166 type->revision_min, type->revision_max,
167 type->protocol, IPSET_PROTOCOL);
166 return -EINVAL; 168 return -EINVAL;
167 } 169 }
168 170
169 ip_set_type_lock(); 171 ip_set_type_lock();
170 if (find_set_type(type->name, type->family, type->revision)) { 172 if (find_set_type(type->name, type->family, type->revision_min)) {
171 /* Duplicate! */ 173 /* Duplicate! */
172 pr_warning("ip_set type %s, family %s, revision %u " 174 pr_warning("ip_set type %s, family %s with revision min %u "
173 "already registered!\n", type->name, 175 "already registered!\n", type->name,
174 family_name(type->family), type->revision); 176 family_name(type->family), type->revision_min);
175 ret = -EINVAL; 177 ret = -EINVAL;
176 goto unlock; 178 goto unlock;
177 } 179 }
178 list_add_rcu(&type->list, &ip_set_type_list); 180 list_add_rcu(&type->list, &ip_set_type_list);
179 pr_debug("type %s, family %s, revision %u registered.\n", 181 pr_debug("type %s, family %s, revision %u:%u registered.\n",
180 type->name, family_name(type->family), type->revision); 182 type->name, family_name(type->family),
183 type->revision_min, type->revision_max);
181unlock: 184unlock:
182 ip_set_type_unlock(); 185 ip_set_type_unlock();
183 return ret; 186 return ret;
@@ -189,15 +192,15 @@ void
189ip_set_type_unregister(struct ip_set_type *type) 192ip_set_type_unregister(struct ip_set_type *type)
190{ 193{
191 ip_set_type_lock(); 194 ip_set_type_lock();
192 if (!find_set_type(type->name, type->family, type->revision)) { 195 if (!find_set_type(type->name, type->family, type->revision_min)) {
193 pr_warning("ip_set type %s, family %s, revision %u " 196 pr_warning("ip_set type %s, family %s with revision min %u "
194 "not registered\n", type->name, 197 "not registered\n", type->name,
195 family_name(type->family), type->revision); 198 family_name(type->family), type->revision_min);
196 goto unlock; 199 goto unlock;
197 } 200 }
198 list_del_rcu(&type->list); 201 list_del_rcu(&type->list);
199 pr_debug("type %s, family %s, revision %u unregistered.\n", 202 pr_debug("type %s, family %s with revision min %u unregistered.\n",
200 type->name, family_name(type->family), type->revision); 203 type->name, family_name(type->family), type->revision_min);
201unlock: 204unlock:
202 ip_set_type_unlock(); 205 ip_set_type_unlock();
203 206
@@ -325,7 +328,8 @@ __ip_set_put(ip_set_id_t index)
325 328
326int 329int
327ip_set_test(ip_set_id_t index, const struct sk_buff *skb, 330ip_set_test(ip_set_id_t index, const struct sk_buff *skb,
328 u8 family, u8 dim, u8 flags) 331 const struct xt_action_param *par,
332 const struct ip_set_adt_opt *opt)
329{ 333{
330 struct ip_set *set = ip_set_list[index]; 334 struct ip_set *set = ip_set_list[index];
331 int ret = 0; 335 int ret = 0;
@@ -333,19 +337,19 @@ ip_set_test(ip_set_id_t index, const struct sk_buff *skb,
333 BUG_ON(set == NULL); 337 BUG_ON(set == NULL);
334 pr_debug("set %s, index %u\n", set->name, index); 338 pr_debug("set %s, index %u\n", set->name, index);
335 339
336 if (dim < set->type->dimension || 340 if (opt->dim < set->type->dimension ||
337 !(family == set->family || set->family == AF_UNSPEC)) 341 !(opt->family == set->family || set->family == AF_UNSPEC))
338 return 0; 342 return 0;
339 343
340 read_lock_bh(&set->lock); 344 read_lock_bh(&set->lock);
341 ret = set->variant->kadt(set, skb, IPSET_TEST, family, dim, flags); 345 ret = set->variant->kadt(set, skb, par, IPSET_TEST, opt);
342 read_unlock_bh(&set->lock); 346 read_unlock_bh(&set->lock);
343 347
344 if (ret == -EAGAIN) { 348 if (ret == -EAGAIN) {
345 /* Type requests element to be completed */ 349 /* Type requests element to be completed */
346 pr_debug("element must be competed, ADD is triggered\n"); 350 pr_debug("element must be competed, ADD is triggered\n");
347 write_lock_bh(&set->lock); 351 write_lock_bh(&set->lock);
348 set->variant->kadt(set, skb, IPSET_ADD, family, dim, flags); 352 set->variant->kadt(set, skb, par, IPSET_ADD, opt);
349 write_unlock_bh(&set->lock); 353 write_unlock_bh(&set->lock);
350 ret = 1; 354 ret = 1;
351 } 355 }
@@ -357,7 +361,8 @@ EXPORT_SYMBOL_GPL(ip_set_test);
357 361
358int 362int
359ip_set_add(ip_set_id_t index, const struct sk_buff *skb, 363ip_set_add(ip_set_id_t index, const struct sk_buff *skb,
360 u8 family, u8 dim, u8 flags) 364 const struct xt_action_param *par,
365 const struct ip_set_adt_opt *opt)
361{ 366{
362 struct ip_set *set = ip_set_list[index]; 367 struct ip_set *set = ip_set_list[index];
363 int ret; 368 int ret;
@@ -365,12 +370,12 @@ ip_set_add(ip_set_id_t index, const struct sk_buff *skb,
365 BUG_ON(set == NULL); 370 BUG_ON(set == NULL);
366 pr_debug("set %s, index %u\n", set->name, index); 371 pr_debug("set %s, index %u\n", set->name, index);
367 372
368 if (dim < set->type->dimension || 373 if (opt->dim < set->type->dimension ||
369 !(family == set->family || set->family == AF_UNSPEC)) 374 !(opt->family == set->family || set->family == AF_UNSPEC))
370 return 0; 375 return 0;
371 376
372 write_lock_bh(&set->lock); 377 write_lock_bh(&set->lock);
373 ret = set->variant->kadt(set, skb, IPSET_ADD, family, dim, flags); 378 ret = set->variant->kadt(set, skb, par, IPSET_ADD, opt);
374 write_unlock_bh(&set->lock); 379 write_unlock_bh(&set->lock);
375 380
376 return ret; 381 return ret;
@@ -379,7 +384,8 @@ EXPORT_SYMBOL_GPL(ip_set_add);
379 384
380int 385int
381ip_set_del(ip_set_id_t index, const struct sk_buff *skb, 386ip_set_del(ip_set_id_t index, const struct sk_buff *skb,
382 u8 family, u8 dim, u8 flags) 387 const struct xt_action_param *par,
388 const struct ip_set_adt_opt *opt)
383{ 389{
384 struct ip_set *set = ip_set_list[index]; 390 struct ip_set *set = ip_set_list[index];
385 int ret = 0; 391 int ret = 0;
@@ -387,12 +393,12 @@ ip_set_del(ip_set_id_t index, const struct sk_buff *skb,
387 BUG_ON(set == NULL); 393 BUG_ON(set == NULL);
388 pr_debug("set %s, index %u\n", set->name, index); 394 pr_debug("set %s, index %u\n", set->name, index);
389 395
390 if (dim < set->type->dimension || 396 if (opt->dim < set->type->dimension ||
391 !(family == set->family || set->family == AF_UNSPEC)) 397 !(opt->family == set->family || set->family == AF_UNSPEC))
392 return 0; 398 return 0;
393 399
394 write_lock_bh(&set->lock); 400 write_lock_bh(&set->lock);
395 ret = set->variant->kadt(set, skb, IPSET_DEL, family, dim, flags); 401 ret = set->variant->kadt(set, skb, par, IPSET_DEL, opt);
396 write_unlock_bh(&set->lock); 402 write_unlock_bh(&set->lock);
397 403
398 return ret; 404 return ret;
@@ -656,6 +662,7 @@ ip_set_create(struct sock *ctnl, struct sk_buff *skb,
656 rwlock_init(&set->lock); 662 rwlock_init(&set->lock);
657 strlcpy(set->name, name, IPSET_MAXNAMELEN); 663 strlcpy(set->name, name, IPSET_MAXNAMELEN);
658 set->family = family; 664 set->family = family;
665 set->revision = revision;
659 666
660 /* 667 /*
661 * Next, check that we know the type, and take 668 * Next, check that we know the type, and take
@@ -675,8 +682,8 @@ ip_set_create(struct sock *ctnl, struct sk_buff *skb,
675 if (attr[IPSET_ATTR_DATA] && 682 if (attr[IPSET_ATTR_DATA] &&
676 nla_parse_nested(tb, IPSET_ATTR_CREATE_MAX, attr[IPSET_ATTR_DATA], 683 nla_parse_nested(tb, IPSET_ATTR_CREATE_MAX, attr[IPSET_ATTR_DATA],
677 set->type->create_policy)) { 684 set->type->create_policy)) {
678 ret = -IPSET_ERR_PROTOCOL; 685 ret = -IPSET_ERR_PROTOCOL;
679 goto put_out; 686 goto put_out;
680 } 687 }
681 688
682 ret = set->type->create(set, tb, flags); 689 ret = set->type->create(set, tb, flags);
@@ -696,7 +703,8 @@ ip_set_create(struct sock *ctnl, struct sk_buff *skb,
696 (flags & IPSET_FLAG_EXIST) && 703 (flags & IPSET_FLAG_EXIST) &&
697 STREQ(set->type->name, clash->type->name) && 704 STREQ(set->type->name, clash->type->name) &&
698 set->type->family == clash->type->family && 705 set->type->family == clash->type->family &&
699 set->type->revision == clash->type->revision && 706 set->type->revision_min == clash->type->revision_min &&
707 set->type->revision_max == clash->type->revision_max &&
700 set->variant->same_set(set, clash)) 708 set->variant->same_set(set, clash))
701 ret = 0; 709 ret = 0;
702 goto cleanup; 710 goto cleanup;
@@ -939,10 +947,13 @@ ip_set_swap(struct sock *ctnl, struct sk_buff *skb,
939 947
940/* List/save set data */ 948/* List/save set data */
941 949
942#define DUMP_INIT 0L 950#define DUMP_INIT 0
943#define DUMP_ALL 1L 951#define DUMP_ALL 1
944#define DUMP_ONE 2L 952#define DUMP_ONE 2
945#define DUMP_LAST 3L 953#define DUMP_LAST 3
954
955#define DUMP_TYPE(arg) (((u32)(arg)) & 0x0000FFFF)
956#define DUMP_FLAGS(arg) (((u32)(arg)) >> 16)
946 957
947static int 958static int
948ip_set_dump_done(struct netlink_callback *cb) 959ip_set_dump_done(struct netlink_callback *cb)
@@ -973,6 +984,7 @@ dump_init(struct netlink_callback *cb)
973 int min_len = NLMSG_SPACE(sizeof(struct nfgenmsg)); 984 int min_len = NLMSG_SPACE(sizeof(struct nfgenmsg));
974 struct nlattr *cda[IPSET_ATTR_CMD_MAX+1]; 985 struct nlattr *cda[IPSET_ATTR_CMD_MAX+1];
975 struct nlattr *attr = (void *)nlh + min_len; 986 struct nlattr *attr = (void *)nlh + min_len;
987 u32 dump_type;
976 ip_set_id_t index; 988 ip_set_id_t index;
977 989
978 /* Second pass, so parser can't fail */ 990 /* Second pass, so parser can't fail */
@@ -984,17 +996,22 @@ dump_init(struct netlink_callback *cb)
984 * [..]: type specific 996 * [..]: type specific
985 */ 997 */
986 998
987 if (!cda[IPSET_ATTR_SETNAME]) { 999 if (cda[IPSET_ATTR_SETNAME]) {
988 cb->args[0] = DUMP_ALL; 1000 index = find_set_id(nla_data(cda[IPSET_ATTR_SETNAME]));
989 return 0; 1001 if (index == IPSET_INVALID_ID)
990 } 1002 return -ENOENT;
991 1003
992 index = find_set_id(nla_data(cda[IPSET_ATTR_SETNAME])); 1004 dump_type = DUMP_ONE;
993 if (index == IPSET_INVALID_ID) 1005 cb->args[1] = index;
994 return -ENOENT; 1006 } else
1007 dump_type = DUMP_ALL;
1008
1009 if (cda[IPSET_ATTR_FLAGS]) {
1010 u32 f = ip_set_get_h32(cda[IPSET_ATTR_FLAGS]);
1011 dump_type |= (f << 16);
1012 }
1013 cb->args[0] = dump_type;
995 1014
996 cb->args[0] = DUMP_ONE;
997 cb->args[1] = index;
998 return 0; 1015 return 0;
999} 1016}
1000 1017
@@ -1005,9 +1022,10 @@ ip_set_dump_start(struct sk_buff *skb, struct netlink_callback *cb)
1005 struct ip_set *set = NULL; 1022 struct ip_set *set = NULL;
1006 struct nlmsghdr *nlh = NULL; 1023 struct nlmsghdr *nlh = NULL;
1007 unsigned int flags = NETLINK_CB(cb->skb).pid ? NLM_F_MULTI : 0; 1024 unsigned int flags = NETLINK_CB(cb->skb).pid ? NLM_F_MULTI : 0;
1025 u32 dump_type, dump_flags;
1008 int ret = 0; 1026 int ret = 0;
1009 1027
1010 if (cb->args[0] == DUMP_INIT) { 1028 if (!cb->args[0]) {
1011 ret = dump_init(cb); 1029 ret = dump_init(cb);
1012 if (ret < 0) { 1030 if (ret < 0) {
1013 nlh = nlmsg_hdr(cb->skb); 1031 nlh = nlmsg_hdr(cb->skb);
@@ -1022,14 +1040,17 @@ ip_set_dump_start(struct sk_buff *skb, struct netlink_callback *cb)
1022 if (cb->args[1] >= ip_set_max) 1040 if (cb->args[1] >= ip_set_max)
1023 goto out; 1041 goto out;
1024 1042
1025 max = cb->args[0] == DUMP_ONE ? cb->args[1] + 1 : ip_set_max; 1043 dump_type = DUMP_TYPE(cb->args[0]);
1044 dump_flags = DUMP_FLAGS(cb->args[0]);
1045 max = dump_type == DUMP_ONE ? cb->args[1] + 1 : ip_set_max;
1026dump_last: 1046dump_last:
1027 pr_debug("args[0]: %ld args[1]: %ld\n", cb->args[0], cb->args[1]); 1047 pr_debug("args[0]: %u %u args[1]: %ld\n",
1048 dump_type, dump_flags, cb->args[1]);
1028 for (; cb->args[1] < max; cb->args[1]++) { 1049 for (; cb->args[1] < max; cb->args[1]++) {
1029 index = (ip_set_id_t) cb->args[1]; 1050 index = (ip_set_id_t) cb->args[1];
1030 set = ip_set_list[index]; 1051 set = ip_set_list[index];
1031 if (set == NULL) { 1052 if (set == NULL) {
1032 if (cb->args[0] == DUMP_ONE) { 1053 if (dump_type == DUMP_ONE) {
1033 ret = -ENOENT; 1054 ret = -ENOENT;
1034 goto out; 1055 goto out;
1035 } 1056 }
@@ -1038,8 +1059,8 @@ dump_last:
1038 /* When dumping all sets, we must dump "sorted" 1059 /* When dumping all sets, we must dump "sorted"
1039 * so that lists (unions of sets) are dumped last. 1060 * so that lists (unions of sets) are dumped last.
1040 */ 1061 */
1041 if (cb->args[0] != DUMP_ONE && 1062 if (dump_type != DUMP_ONE &&
1042 ((cb->args[0] == DUMP_ALL) == 1063 ((dump_type == DUMP_ALL) ==
1043 !!(set->type->features & IPSET_DUMP_LAST))) 1064 !!(set->type->features & IPSET_DUMP_LAST)))
1044 continue; 1065 continue;
1045 pr_debug("List set: %s\n", set->name); 1066 pr_debug("List set: %s\n", set->name);
@@ -1057,6 +1078,8 @@ dump_last:
1057 } 1078 }
1058 NLA_PUT_U8(skb, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL); 1079 NLA_PUT_U8(skb, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL);
1059 NLA_PUT_STRING(skb, IPSET_ATTR_SETNAME, set->name); 1080 NLA_PUT_STRING(skb, IPSET_ATTR_SETNAME, set->name);
1081 if (dump_flags & IPSET_FLAG_LIST_SETNAME)
1082 goto next_set;
1060 switch (cb->args[2]) { 1083 switch (cb->args[2]) {
1061 case 0: 1084 case 0:
1062 /* Core header data */ 1085 /* Core header data */
@@ -1065,28 +1088,27 @@ dump_last:
1065 NLA_PUT_U8(skb, IPSET_ATTR_FAMILY, 1088 NLA_PUT_U8(skb, IPSET_ATTR_FAMILY,
1066 set->family); 1089 set->family);
1067 NLA_PUT_U8(skb, IPSET_ATTR_REVISION, 1090 NLA_PUT_U8(skb, IPSET_ATTR_REVISION,
1068 set->type->revision); 1091 set->revision);
1069 ret = set->variant->head(set, skb); 1092 ret = set->variant->head(set, skb);
1070 if (ret < 0) 1093 if (ret < 0)
1071 goto release_refcount; 1094 goto release_refcount;
1095 if (dump_flags & IPSET_FLAG_LIST_HEADER)
1096 goto next_set;
1072 /* Fall through and add elements */ 1097 /* Fall through and add elements */
1073 default: 1098 default:
1074 read_lock_bh(&set->lock); 1099 read_lock_bh(&set->lock);
1075 ret = set->variant->list(set, skb, cb); 1100 ret = set->variant->list(set, skb, cb);
1076 read_unlock_bh(&set->lock); 1101 read_unlock_bh(&set->lock);
1077 if (!cb->args[2]) { 1102 if (!cb->args[2])
1078 /* Set is done, proceed with next one */ 1103 /* Set is done, proceed with next one */
1079 if (cb->args[0] == DUMP_ONE) 1104 goto next_set;
1080 cb->args[1] = IPSET_INVALID_ID;
1081 else
1082 cb->args[1]++;
1083 }
1084 goto release_refcount; 1105 goto release_refcount;
1085 } 1106 }
1086 } 1107 }
1087 /* If we dump all sets, continue with dumping last ones */ 1108 /* If we dump all sets, continue with dumping last ones */
1088 if (cb->args[0] == DUMP_ALL) { 1109 if (dump_type == DUMP_ALL) {
1089 cb->args[0] = DUMP_LAST; 1110 dump_type = DUMP_LAST;
1111 cb->args[0] = dump_type | (dump_flags << 16);
1090 cb->args[1] = 0; 1112 cb->args[1] = 0;
1091 goto dump_last; 1113 goto dump_last;
1092 } 1114 }
@@ -1094,6 +1116,11 @@ dump_last:
1094 1116
1095nla_put_failure: 1117nla_put_failure:
1096 ret = -EFAULT; 1118 ret = -EFAULT;
1119next_set:
1120 if (dump_type == DUMP_ONE)
1121 cb->args[1] = IPSET_INVALID_ID;
1122 else
1123 cb->args[1]++;
1097release_refcount: 1124release_refcount:
1098 /* If there was an error or set is done, release set */ 1125 /* If there was an error or set is done, release set */
1099 if (ret || !cb->args[2]) { 1126 if (ret || !cb->args[2]) {
@@ -1120,7 +1147,7 @@ ip_set_dump(struct sock *ctnl, struct sk_buff *skb,
1120 1147
1121 return netlink_dump_start(ctnl, skb, nlh, 1148 return netlink_dump_start(ctnl, skb, nlh,
1122 ip_set_dump_start, 1149 ip_set_dump_start,
1123 ip_set_dump_done); 1150 ip_set_dump_done, 0);
1124} 1151}
1125 1152
1126/* Add, del and test */ 1153/* Add, del and test */
@@ -1139,17 +1166,18 @@ call_ad(struct sock *ctnl, struct sk_buff *skb, struct ip_set *set,
1139 struct nlattr *tb[], enum ipset_adt adt, 1166 struct nlattr *tb[], enum ipset_adt adt,
1140 u32 flags, bool use_lineno) 1167 u32 flags, bool use_lineno)
1141{ 1168{
1142 int ret, retried = 0; 1169 int ret;
1143 u32 lineno = 0; 1170 u32 lineno = 0;
1144 bool eexist = flags & IPSET_FLAG_EXIST; 1171 bool eexist = flags & IPSET_FLAG_EXIST, retried = false;
1145 1172
1146 do { 1173 do {
1147 write_lock_bh(&set->lock); 1174 write_lock_bh(&set->lock);
1148 ret = set->variant->uadt(set, tb, adt, &lineno, flags); 1175 ret = set->variant->uadt(set, tb, adt, &lineno, flags, retried);
1149 write_unlock_bh(&set->lock); 1176 write_unlock_bh(&set->lock);
1177 retried = true;
1150 } while (ret == -EAGAIN && 1178 } while (ret == -EAGAIN &&
1151 set->variant->resize && 1179 set->variant->resize &&
1152 (ret = set->variant->resize(set, retried++)) == 0); 1180 (ret = set->variant->resize(set, retried)) == 0);
1153 1181
1154 if (!ret || (ret == -IPSET_ERR_EXIST && eexist)) 1182 if (!ret || (ret == -IPSET_ERR_EXIST && eexist))
1155 return 0; 1183 return 0;
@@ -1322,7 +1350,7 @@ ip_set_utest(struct sock *ctnl, struct sk_buff *skb,
1322 return -IPSET_ERR_PROTOCOL; 1350 return -IPSET_ERR_PROTOCOL;
1323 1351
1324 read_lock_bh(&set->lock); 1352 read_lock_bh(&set->lock);
1325 ret = set->variant->uadt(set, tb, IPSET_TEST, NULL, 0); 1353 ret = set->variant->uadt(set, tb, IPSET_TEST, NULL, 0, 0);
1326 read_unlock_bh(&set->lock); 1354 read_unlock_bh(&set->lock);
1327 /* Userspace can't trigger element to be re-added */ 1355 /* Userspace can't trigger element to be re-added */
1328 if (ret == -EAGAIN) 1356 if (ret == -EAGAIN)
@@ -1365,7 +1393,7 @@ ip_set_header(struct sock *ctnl, struct sk_buff *skb,
1365 NLA_PUT_STRING(skb2, IPSET_ATTR_SETNAME, set->name); 1393 NLA_PUT_STRING(skb2, IPSET_ATTR_SETNAME, set->name);
1366 NLA_PUT_STRING(skb2, IPSET_ATTR_TYPENAME, set->type->name); 1394 NLA_PUT_STRING(skb2, IPSET_ATTR_TYPENAME, set->type->name);
1367 NLA_PUT_U8(skb2, IPSET_ATTR_FAMILY, set->family); 1395 NLA_PUT_U8(skb2, IPSET_ATTR_FAMILY, set->family);
1368 NLA_PUT_U8(skb2, IPSET_ATTR_REVISION, set->type->revision); 1396 NLA_PUT_U8(skb2, IPSET_ATTR_REVISION, set->revision);
1369 nlmsg_end(skb2, nlh2); 1397 nlmsg_end(skb2, nlh2);
1370 1398
1371 ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT); 1399 ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
diff --git a/net/netfilter/ipset/ip_set_hash_ip.c b/net/netfilter/ipset/ip_set_hash_ip.c
index 43bcce20012..f2d576e6b76 100644
--- a/net/netfilter/ipset/ip_set_hash_ip.c
+++ b/net/netfilter/ipset/ip_set_hash_ip.c
@@ -53,7 +53,8 @@ struct hash_ip4_telem {
53 53
54static inline bool 54static inline bool
55hash_ip4_data_equal(const struct hash_ip4_elem *ip1, 55hash_ip4_data_equal(const struct hash_ip4_elem *ip1,
56 const struct hash_ip4_elem *ip2) 56 const struct hash_ip4_elem *ip2,
57 u32 *multi)
57{ 58{
58 return ip1->ip == ip2->ip; 59 return ip1->ip == ip2->ip;
59} 60}
@@ -108,25 +109,32 @@ nla_put_failure:
108#define HOST_MASK 32 109#define HOST_MASK 32
109#include <linux/netfilter/ipset/ip_set_ahash.h> 110#include <linux/netfilter/ipset/ip_set_ahash.h>
110 111
112static inline void
113hash_ip4_data_next(struct ip_set_hash *h, const struct hash_ip4_elem *d)
114{
115 h->next.ip = ntohl(d->ip);
116}
117
111static int 118static int
112hash_ip4_kadt(struct ip_set *set, const struct sk_buff *skb, 119hash_ip4_kadt(struct ip_set *set, const struct sk_buff *skb,
113 enum ipset_adt adt, u8 pf, u8 dim, u8 flags) 120 const struct xt_action_param *par,
121 enum ipset_adt adt, const struct ip_set_adt_opt *opt)
114{ 122{
115 const struct ip_set_hash *h = set->data; 123 const struct ip_set_hash *h = set->data;
116 ipset_adtfn adtfn = set->variant->adt[adt]; 124 ipset_adtfn adtfn = set->variant->adt[adt];
117 __be32 ip; 125 __be32 ip;
118 126
119 ip4addrptr(skb, flags & IPSET_DIM_ONE_SRC, &ip); 127 ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &ip);
120 ip &= ip_set_netmask(h->netmask); 128 ip &= ip_set_netmask(h->netmask);
121 if (ip == 0) 129 if (ip == 0)
122 return -EINVAL; 130 return -EINVAL;
123 131
124 return adtfn(set, &ip, h->timeout); 132 return adtfn(set, &ip, opt_timeout(opt, h), opt->cmdflags);
125} 133}
126 134
127static int 135static int
128hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[], 136hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[],
129 enum ipset_adt adt, u32 *lineno, u32 flags) 137 enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
130{ 138{
131 const struct ip_set_hash *h = set->data; 139 const struct ip_set_hash *h = set->data;
132 ipset_adtfn adtfn = set->variant->adt[adt]; 140 ipset_adtfn adtfn = set->variant->adt[adt];
@@ -157,7 +165,7 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[],
157 nip = htonl(ip); 165 nip = htonl(ip);
158 if (nip == 0) 166 if (nip == 0)
159 return -IPSET_ERR_HASH_ELEM; 167 return -IPSET_ERR_HASH_ELEM;
160 return adtfn(set, &nip, timeout); 168 return adtfn(set, &nip, timeout, flags);
161 } 169 }
162 170
163 if (tb[IPSET_ATTR_IP_TO]) { 171 if (tb[IPSET_ATTR_IP_TO]) {
@@ -171,18 +179,19 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[],
171 179
172 if (cidr > 32) 180 if (cidr > 32)
173 return -IPSET_ERR_INVALID_CIDR; 181 return -IPSET_ERR_INVALID_CIDR;
174 ip &= ip_set_hostmask(cidr); 182 ip_set_mask_from_to(ip, ip_to, cidr);
175 ip_to = ip | ~ip_set_hostmask(cidr);
176 } else 183 } else
177 ip_to = ip; 184 ip_to = ip;
178 185
179 hosts = h->netmask == 32 ? 1 : 2 << (32 - h->netmask - 1); 186 hosts = h->netmask == 32 ? 1 : 2 << (32 - h->netmask - 1);
180 187
188 if (retried)
189 ip = h->next.ip;
181 for (; !before(ip_to, ip); ip += hosts) { 190 for (; !before(ip_to, ip); ip += hosts) {
182 nip = htonl(ip); 191 nip = htonl(ip);
183 if (nip == 0) 192 if (nip == 0)
184 return -IPSET_ERR_HASH_ELEM; 193 return -IPSET_ERR_HASH_ELEM;
185 ret = adtfn(set, &nip, timeout); 194 ret = adtfn(set, &nip, timeout, flags);
186 195
187 if (ret && !ip_set_eexist(ret, flags)) 196 if (ret && !ip_set_eexist(ret, flags))
188 return ret; 197 return ret;
@@ -217,7 +226,8 @@ struct hash_ip6_telem {
217 226
218static inline bool 227static inline bool
219hash_ip6_data_equal(const struct hash_ip6_elem *ip1, 228hash_ip6_data_equal(const struct hash_ip6_elem *ip1,
220 const struct hash_ip6_elem *ip2) 229 const struct hash_ip6_elem *ip2,
230 u32 *multi)
221{ 231{
222 return ipv6_addr_cmp(&ip1->ip.in6, &ip2->ip.in6) == 0; 232 return ipv6_addr_cmp(&ip1->ip.in6, &ip2->ip.in6) == 0;
223} 233}
@@ -281,20 +291,26 @@ nla_put_failure:
281#define HOST_MASK 128 291#define HOST_MASK 128
282#include <linux/netfilter/ipset/ip_set_ahash.h> 292#include <linux/netfilter/ipset/ip_set_ahash.h>
283 293
294static inline void
295hash_ip6_data_next(struct ip_set_hash *h, const struct hash_ip6_elem *d)
296{
297}
298
284static int 299static int
285hash_ip6_kadt(struct ip_set *set, const struct sk_buff *skb, 300hash_ip6_kadt(struct ip_set *set, const struct sk_buff *skb,
286 enum ipset_adt adt, u8 pf, u8 dim, u8 flags) 301 const struct xt_action_param *par,
302 enum ipset_adt adt, const struct ip_set_adt_opt *opt)
287{ 303{
288 const struct ip_set_hash *h = set->data; 304 const struct ip_set_hash *h = set->data;
289 ipset_adtfn adtfn = set->variant->adt[adt]; 305 ipset_adtfn adtfn = set->variant->adt[adt];
290 union nf_inet_addr ip; 306 union nf_inet_addr ip;
291 307
292 ip6addrptr(skb, flags & IPSET_DIM_ONE_SRC, &ip.in6); 308 ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &ip.in6);
293 ip6_netmask(&ip, h->netmask); 309 ip6_netmask(&ip, h->netmask);
294 if (ipv6_addr_any(&ip.in6)) 310 if (ipv6_addr_any(&ip.in6))
295 return -EINVAL; 311 return -EINVAL;
296 312
297 return adtfn(set, &ip, h->timeout); 313 return adtfn(set, &ip, opt_timeout(opt, h), opt->cmdflags);
298} 314}
299 315
300static const struct nla_policy hash_ip6_adt_policy[IPSET_ATTR_ADT_MAX + 1] = { 316static const struct nla_policy hash_ip6_adt_policy[IPSET_ATTR_ADT_MAX + 1] = {
@@ -305,7 +321,7 @@ static const struct nla_policy hash_ip6_adt_policy[IPSET_ATTR_ADT_MAX + 1] = {
305 321
306static int 322static int
307hash_ip6_uadt(struct ip_set *set, struct nlattr *tb[], 323hash_ip6_uadt(struct ip_set *set, struct nlattr *tb[],
308 enum ipset_adt adt, u32 *lineno, u32 flags) 324 enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
309{ 325{
310 const struct ip_set_hash *h = set->data; 326 const struct ip_set_hash *h = set->data;
311 ipset_adtfn adtfn = set->variant->adt[adt]; 327 ipset_adtfn adtfn = set->variant->adt[adt];
@@ -336,7 +352,7 @@ hash_ip6_uadt(struct ip_set *set, struct nlattr *tb[],
336 timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); 352 timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
337 } 353 }
338 354
339 ret = adtfn(set, &ip, timeout); 355 ret = adtfn(set, &ip, timeout, flags);
340 356
341 return ip_set_eexist(ret, flags) ? 0 : ret; 357 return ip_set_eexist(ret, flags) ? 0 : ret;
342} 358}
@@ -428,7 +444,8 @@ static struct ip_set_type hash_ip_type __read_mostly = {
428 .features = IPSET_TYPE_IP, 444 .features = IPSET_TYPE_IP,
429 .dimension = IPSET_DIM_ONE, 445 .dimension = IPSET_DIM_ONE,
430 .family = AF_UNSPEC, 446 .family = AF_UNSPEC,
431 .revision = 0, 447 .revision_min = 0,
448 .revision_max = 0,
432 .create = hash_ip_create, 449 .create = hash_ip_create,
433 .create_policy = { 450 .create_policy = {
434 [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 }, 451 [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
diff --git a/net/netfilter/ipset/ip_set_hash_ipport.c b/net/netfilter/ipset/ip_set_hash_ipport.c
index 14281b6b807..6ee10f5d59b 100644
--- a/net/netfilter/ipset/ip_set_hash_ipport.c
+++ b/net/netfilter/ipset/ip_set_hash_ipport.c
@@ -60,7 +60,8 @@ struct hash_ipport4_telem {
60 60
61static inline bool 61static inline bool
62hash_ipport4_data_equal(const struct hash_ipport4_elem *ip1, 62hash_ipport4_data_equal(const struct hash_ipport4_elem *ip1,
63 const struct hash_ipport4_elem *ip2) 63 const struct hash_ipport4_elem *ip2,
64 u32 *multi)
64{ 65{
65 return ip1->ip == ip2->ip && 66 return ip1->ip == ip2->ip &&
66 ip1->port == ip2->port && 67 ip1->port == ip2->port &&
@@ -124,31 +125,40 @@ nla_put_failure:
124#define HOST_MASK 32 125#define HOST_MASK 32
125#include <linux/netfilter/ipset/ip_set_ahash.h> 126#include <linux/netfilter/ipset/ip_set_ahash.h>
126 127
128static inline void
129hash_ipport4_data_next(struct ip_set_hash *h,
130 const struct hash_ipport4_elem *d)
131{
132 h->next.ip = ntohl(d->ip);
133 h->next.port = ntohs(d->port);
134}
135
127static int 136static int
128hash_ipport4_kadt(struct ip_set *set, const struct sk_buff *skb, 137hash_ipport4_kadt(struct ip_set *set, const struct sk_buff *skb,
129 enum ipset_adt adt, u8 pf, u8 dim, u8 flags) 138 const struct xt_action_param *par,
139 enum ipset_adt adt, const struct ip_set_adt_opt *opt)
130{ 140{
131 const struct ip_set_hash *h = set->data; 141 const struct ip_set_hash *h = set->data;
132 ipset_adtfn adtfn = set->variant->adt[adt]; 142 ipset_adtfn adtfn = set->variant->adt[adt];
133 struct hash_ipport4_elem data = { }; 143 struct hash_ipport4_elem data = { };
134 144
135 if (!ip_set_get_ip4_port(skb, flags & IPSET_DIM_TWO_SRC, 145 if (!ip_set_get_ip4_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
136 &data.port, &data.proto)) 146 &data.port, &data.proto))
137 return -EINVAL; 147 return -EINVAL;
138 148
139 ip4addrptr(skb, flags & IPSET_DIM_ONE_SRC, &data.ip); 149 ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip);
140 150
141 return adtfn(set, &data, h->timeout); 151 return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags);
142} 152}
143 153
144static int 154static int
145hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[], 155hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[],
146 enum ipset_adt adt, u32 *lineno, u32 flags) 156 enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
147{ 157{
148 const struct ip_set_hash *h = set->data; 158 const struct ip_set_hash *h = set->data;
149 ipset_adtfn adtfn = set->variant->adt[adt]; 159 ipset_adtfn adtfn = set->variant->adt[adt];
150 struct hash_ipport4_elem data = { }; 160 struct hash_ipport4_elem data = { };
151 u32 ip, ip_to, p, port, port_to; 161 u32 ip, ip_to, p = 0, port, port_to;
152 u32 timeout = h->timeout; 162 u32 timeout = h->timeout;
153 bool with_ports = false; 163 bool with_ports = false;
154 int ret; 164 int ret;
@@ -192,7 +202,7 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[],
192 if (adt == IPSET_TEST || 202 if (adt == IPSET_TEST ||
193 !(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_CIDR] || 203 !(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_CIDR] ||
194 tb[IPSET_ATTR_PORT_TO])) { 204 tb[IPSET_ATTR_PORT_TO])) {
195 ret = adtfn(set, &data, timeout); 205 ret = adtfn(set, &data, timeout, flags);
196 return ip_set_eexist(ret, flags) ? 0 : ret; 206 return ip_set_eexist(ret, flags) ? 0 : ret;
197 } 207 }
198 208
@@ -208,8 +218,7 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[],
208 218
209 if (cidr > 32) 219 if (cidr > 32)
210 return -IPSET_ERR_INVALID_CIDR; 220 return -IPSET_ERR_INVALID_CIDR;
211 ip &= ip_set_hostmask(cidr); 221 ip_set_mask_from_to(ip, ip_to, cidr);
212 ip_to = ip | ~ip_set_hostmask(cidr);
213 } else 222 } else
214 ip_to = ip; 223 ip_to = ip;
215 224
@@ -220,17 +229,21 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[],
220 swap(port, port_to); 229 swap(port, port_to);
221 } 230 }
222 231
223 for (; !before(ip_to, ip); ip++) 232 if (retried)
224 for (p = port; p <= port_to; p++) { 233 ip = h->next.ip;
234 for (; !before(ip_to, ip); ip++) {
235 p = retried && ip == h->next.ip ? h->next.port : port;
236 for (; p <= port_to; p++) {
225 data.ip = htonl(ip); 237 data.ip = htonl(ip);
226 data.port = htons(p); 238 data.port = htons(p);
227 ret = adtfn(set, &data, timeout); 239 ret = adtfn(set, &data, timeout, flags);
228 240
229 if (ret && !ip_set_eexist(ret, flags)) 241 if (ret && !ip_set_eexist(ret, flags))
230 return ret; 242 return ret;
231 else 243 else
232 ret = 0; 244 ret = 0;
233 } 245 }
246 }
234 return ret; 247 return ret;
235} 248}
236 249
@@ -264,7 +277,8 @@ struct hash_ipport6_telem {
264 277
265static inline bool 278static inline bool
266hash_ipport6_data_equal(const struct hash_ipport6_elem *ip1, 279hash_ipport6_data_equal(const struct hash_ipport6_elem *ip1,
267 const struct hash_ipport6_elem *ip2) 280 const struct hash_ipport6_elem *ip2,
281 u32 *multi)
268{ 282{
269 return ipv6_addr_cmp(&ip1->ip.in6, &ip2->ip.in6) == 0 && 283 return ipv6_addr_cmp(&ip1->ip.in6, &ip2->ip.in6) == 0 &&
270 ip1->port == ip2->port && 284 ip1->port == ip2->port &&
@@ -328,26 +342,34 @@ nla_put_failure:
328#define HOST_MASK 128 342#define HOST_MASK 128
329#include <linux/netfilter/ipset/ip_set_ahash.h> 343#include <linux/netfilter/ipset/ip_set_ahash.h>
330 344
345static inline void
346hash_ipport6_data_next(struct ip_set_hash *h,
347 const struct hash_ipport6_elem *d)
348{
349 h->next.port = ntohs(d->port);
350}
351
331static int 352static int
332hash_ipport6_kadt(struct ip_set *set, const struct sk_buff *skb, 353hash_ipport6_kadt(struct ip_set *set, const struct sk_buff *skb,
333 enum ipset_adt adt, u8 pf, u8 dim, u8 flags) 354 const struct xt_action_param *par,
355 enum ipset_adt adt, const struct ip_set_adt_opt *opt)
334{ 356{
335 const struct ip_set_hash *h = set->data; 357 const struct ip_set_hash *h = set->data;
336 ipset_adtfn adtfn = set->variant->adt[adt]; 358 ipset_adtfn adtfn = set->variant->adt[adt];
337 struct hash_ipport6_elem data = { }; 359 struct hash_ipport6_elem data = { };
338 360
339 if (!ip_set_get_ip6_port(skb, flags & IPSET_DIM_TWO_SRC, 361 if (!ip_set_get_ip6_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
340 &data.port, &data.proto)) 362 &data.port, &data.proto))
341 return -EINVAL; 363 return -EINVAL;
342 364
343 ip6addrptr(skb, flags & IPSET_DIM_ONE_SRC, &data.ip.in6); 365 ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip.in6);
344 366
345 return adtfn(set, &data, h->timeout); 367 return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags);
346} 368}
347 369
348static int 370static int
349hash_ipport6_uadt(struct ip_set *set, struct nlattr *tb[], 371hash_ipport6_uadt(struct ip_set *set, struct nlattr *tb[],
350 enum ipset_adt adt, u32 *lineno, u32 flags) 372 enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
351{ 373{
352 const struct ip_set_hash *h = set->data; 374 const struct ip_set_hash *h = set->data;
353 ipset_adtfn adtfn = set->variant->adt[adt]; 375 ipset_adtfn adtfn = set->variant->adt[adt];
@@ -396,7 +418,7 @@ hash_ipport6_uadt(struct ip_set *set, struct nlattr *tb[],
396 } 418 }
397 419
398 if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) { 420 if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) {
399 ret = adtfn(set, &data, timeout); 421 ret = adtfn(set, &data, timeout, flags);
400 return ip_set_eexist(ret, flags) ? 0 : ret; 422 return ip_set_eexist(ret, flags) ? 0 : ret;
401 } 423 }
402 424
@@ -405,9 +427,11 @@ hash_ipport6_uadt(struct ip_set *set, struct nlattr *tb[],
405 if (port > port_to) 427 if (port > port_to)
406 swap(port, port_to); 428 swap(port, port_to);
407 429
430 if (retried)
431 port = h->next.port;
408 for (; port <= port_to; port++) { 432 for (; port <= port_to; port++) {
409 data.port = htons(port); 433 data.port = htons(port);
410 ret = adtfn(set, &data, timeout); 434 ret = adtfn(set, &data, timeout, flags);
411 435
412 if (ret && !ip_set_eexist(ret, flags)) 436 if (ret && !ip_set_eexist(ret, flags))
413 return ret; 437 return ret;
@@ -491,7 +515,8 @@ static struct ip_set_type hash_ipport_type __read_mostly = {
491 .features = IPSET_TYPE_IP | IPSET_TYPE_PORT, 515 .features = IPSET_TYPE_IP | IPSET_TYPE_PORT,
492 .dimension = IPSET_DIM_TWO, 516 .dimension = IPSET_DIM_TWO,
493 .family = AF_UNSPEC, 517 .family = AF_UNSPEC,
494 .revision = 1, 518 .revision_min = 0,
519 .revision_max = 1, /* SCTP and UDPLITE support added */
495 .create = hash_ipport_create, 520 .create = hash_ipport_create,
496 .create_policy = { 521 .create_policy = {
497 [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 }, 522 [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
diff --git a/net/netfilter/ipset/ip_set_hash_ipportip.c b/net/netfilter/ipset/ip_set_hash_ipportip.c
index 401c8a2531d..fb90e344e90 100644
--- a/net/netfilter/ipset/ip_set_hash_ipportip.c
+++ b/net/netfilter/ipset/ip_set_hash_ipportip.c
@@ -62,7 +62,8 @@ struct hash_ipportip4_telem {
62 62
63static inline bool 63static inline bool
64hash_ipportip4_data_equal(const struct hash_ipportip4_elem *ip1, 64hash_ipportip4_data_equal(const struct hash_ipportip4_elem *ip1,
65 const struct hash_ipportip4_elem *ip2) 65 const struct hash_ipportip4_elem *ip2,
66 u32 *multi)
66{ 67{
67 return ip1->ip == ip2->ip && 68 return ip1->ip == ip2->ip &&
68 ip1->ip2 == ip2->ip2 && 69 ip1->ip2 == ip2->ip2 &&
@@ -127,32 +128,41 @@ nla_put_failure:
127#define HOST_MASK 32 128#define HOST_MASK 32
128#include <linux/netfilter/ipset/ip_set_ahash.h> 129#include <linux/netfilter/ipset/ip_set_ahash.h>
129 130
131static inline void
132hash_ipportip4_data_next(struct ip_set_hash *h,
133 const struct hash_ipportip4_elem *d)
134{
135 h->next.ip = ntohl(d->ip);
136 h->next.port = ntohs(d->port);
137}
138
130static int 139static int
131hash_ipportip4_kadt(struct ip_set *set, const struct sk_buff *skb, 140hash_ipportip4_kadt(struct ip_set *set, const struct sk_buff *skb,
132 enum ipset_adt adt, u8 pf, u8 dim, u8 flags) 141 const struct xt_action_param *par,
142 enum ipset_adt adt, const struct ip_set_adt_opt *opt)
133{ 143{
134 const struct ip_set_hash *h = set->data; 144 const struct ip_set_hash *h = set->data;
135 ipset_adtfn adtfn = set->variant->adt[adt]; 145 ipset_adtfn adtfn = set->variant->adt[adt];
136 struct hash_ipportip4_elem data = { }; 146 struct hash_ipportip4_elem data = { };
137 147
138 if (!ip_set_get_ip4_port(skb, flags & IPSET_DIM_TWO_SRC, 148 if (!ip_set_get_ip4_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
139 &data.port, &data.proto)) 149 &data.port, &data.proto))
140 return -EINVAL; 150 return -EINVAL;
141 151
142 ip4addrptr(skb, flags & IPSET_DIM_ONE_SRC, &data.ip); 152 ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip);
143 ip4addrptr(skb, flags & IPSET_DIM_THREE_SRC, &data.ip2); 153 ip4addrptr(skb, opt->flags & IPSET_DIM_THREE_SRC, &data.ip2);
144 154
145 return adtfn(set, &data, h->timeout); 155 return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags);
146} 156}
147 157
148static int 158static int
149hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[], 159hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[],
150 enum ipset_adt adt, u32 *lineno, u32 flags) 160 enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
151{ 161{
152 const struct ip_set_hash *h = set->data; 162 const struct ip_set_hash *h = set->data;
153 ipset_adtfn adtfn = set->variant->adt[adt]; 163 ipset_adtfn adtfn = set->variant->adt[adt];
154 struct hash_ipportip4_elem data = { }; 164 struct hash_ipportip4_elem data = { };
155 u32 ip, ip_to, p, port, port_to; 165 u32 ip, ip_to, p = 0, port, port_to;
156 u32 timeout = h->timeout; 166 u32 timeout = h->timeout;
157 bool with_ports = false; 167 bool with_ports = false;
158 int ret; 168 int ret;
@@ -200,7 +210,7 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[],
200 if (adt == IPSET_TEST || 210 if (adt == IPSET_TEST ||
201 !(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_CIDR] || 211 !(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_CIDR] ||
202 tb[IPSET_ATTR_PORT_TO])) { 212 tb[IPSET_ATTR_PORT_TO])) {
203 ret = adtfn(set, &data, timeout); 213 ret = adtfn(set, &data, timeout, flags);
204 return ip_set_eexist(ret, flags) ? 0 : ret; 214 return ip_set_eexist(ret, flags) ? 0 : ret;
205 } 215 }
206 216
@@ -216,8 +226,7 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[],
216 226
217 if (cidr > 32) 227 if (cidr > 32)
218 return -IPSET_ERR_INVALID_CIDR; 228 return -IPSET_ERR_INVALID_CIDR;
219 ip &= ip_set_hostmask(cidr); 229 ip_set_mask_from_to(ip, ip_to, cidr);
220 ip_to = ip | ~ip_set_hostmask(cidr);
221 } else 230 } else
222 ip_to = ip; 231 ip_to = ip;
223 232
@@ -228,17 +237,21 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[],
228 swap(port, port_to); 237 swap(port, port_to);
229 } 238 }
230 239
231 for (; !before(ip_to, ip); ip++) 240 if (retried)
232 for (p = port; p <= port_to; p++) { 241 ip = h->next.ip;
242 for (; !before(ip_to, ip); ip++) {
243 p = retried && ip == h->next.ip ? h->next.port : port;
244 for (; p <= port_to; p++) {
233 data.ip = htonl(ip); 245 data.ip = htonl(ip);
234 data.port = htons(p); 246 data.port = htons(p);
235 ret = adtfn(set, &data, timeout); 247 ret = adtfn(set, &data, timeout, flags);
236 248
237 if (ret && !ip_set_eexist(ret, flags)) 249 if (ret && !ip_set_eexist(ret, flags))
238 return ret; 250 return ret;
239 else 251 else
240 ret = 0; 252 ret = 0;
241 } 253 }
254 }
242 return ret; 255 return ret;
243} 256}
244 257
@@ -274,7 +287,8 @@ struct hash_ipportip6_telem {
274 287
275static inline bool 288static inline bool
276hash_ipportip6_data_equal(const struct hash_ipportip6_elem *ip1, 289hash_ipportip6_data_equal(const struct hash_ipportip6_elem *ip1,
277 const struct hash_ipportip6_elem *ip2) 290 const struct hash_ipportip6_elem *ip2,
291 u32 *multi)
278{ 292{
279 return ipv6_addr_cmp(&ip1->ip.in6, &ip2->ip.in6) == 0 && 293 return ipv6_addr_cmp(&ip1->ip.in6, &ip2->ip.in6) == 0 &&
280 ipv6_addr_cmp(&ip1->ip2.in6, &ip2->ip2.in6) == 0 && 294 ipv6_addr_cmp(&ip1->ip2.in6, &ip2->ip2.in6) == 0 &&
@@ -341,27 +355,35 @@ nla_put_failure:
341#define HOST_MASK 128 355#define HOST_MASK 128
342#include <linux/netfilter/ipset/ip_set_ahash.h> 356#include <linux/netfilter/ipset/ip_set_ahash.h>
343 357
358static inline void
359hash_ipportip6_data_next(struct ip_set_hash *h,
360 const struct hash_ipportip6_elem *d)
361{
362 h->next.port = ntohs(d->port);
363}
364
344static int 365static int
345hash_ipportip6_kadt(struct ip_set *set, const struct sk_buff *skb, 366hash_ipportip6_kadt(struct ip_set *set, const struct sk_buff *skb,
346 enum ipset_adt adt, u8 pf, u8 dim, u8 flags) 367 const struct xt_action_param *par,
368 enum ipset_adt adt, const struct ip_set_adt_opt *opt)
347{ 369{
348 const struct ip_set_hash *h = set->data; 370 const struct ip_set_hash *h = set->data;
349 ipset_adtfn adtfn = set->variant->adt[adt]; 371 ipset_adtfn adtfn = set->variant->adt[adt];
350 struct hash_ipportip6_elem data = { }; 372 struct hash_ipportip6_elem data = { };
351 373
352 if (!ip_set_get_ip6_port(skb, flags & IPSET_DIM_TWO_SRC, 374 if (!ip_set_get_ip6_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
353 &data.port, &data.proto)) 375 &data.port, &data.proto))
354 return -EINVAL; 376 return -EINVAL;
355 377
356 ip6addrptr(skb, flags & IPSET_DIM_ONE_SRC, &data.ip.in6); 378 ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip.in6);
357 ip6addrptr(skb, flags & IPSET_DIM_THREE_SRC, &data.ip2.in6); 379 ip6addrptr(skb, opt->flags & IPSET_DIM_THREE_SRC, &data.ip2.in6);
358 380
359 return adtfn(set, &data, h->timeout); 381 return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags);
360} 382}
361 383
362static int 384static int
363hash_ipportip6_uadt(struct ip_set *set, struct nlattr *tb[], 385hash_ipportip6_uadt(struct ip_set *set, struct nlattr *tb[],
364 enum ipset_adt adt, u32 *lineno, u32 flags) 386 enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
365{ 387{
366 const struct ip_set_hash *h = set->data; 388 const struct ip_set_hash *h = set->data;
367 ipset_adtfn adtfn = set->variant->adt[adt]; 389 ipset_adtfn adtfn = set->variant->adt[adt];
@@ -414,7 +436,7 @@ hash_ipportip6_uadt(struct ip_set *set, struct nlattr *tb[],
414 } 436 }
415 437
416 if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) { 438 if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) {
417 ret = adtfn(set, &data, timeout); 439 ret = adtfn(set, &data, timeout, flags);
418 return ip_set_eexist(ret, flags) ? 0 : ret; 440 return ip_set_eexist(ret, flags) ? 0 : ret;
419 } 441 }
420 442
@@ -423,9 +445,11 @@ hash_ipportip6_uadt(struct ip_set *set, struct nlattr *tb[],
423 if (port > port_to) 445 if (port > port_to)
424 swap(port, port_to); 446 swap(port, port_to);
425 447
448 if (retried)
449 port = h->next.port;
426 for (; port <= port_to; port++) { 450 for (; port <= port_to; port++) {
427 data.port = htons(port); 451 data.port = htons(port);
428 ret = adtfn(set, &data, timeout); 452 ret = adtfn(set, &data, timeout, flags);
429 453
430 if (ret && !ip_set_eexist(ret, flags)) 454 if (ret && !ip_set_eexist(ret, flags))
431 return ret; 455 return ret;
@@ -509,7 +533,8 @@ static struct ip_set_type hash_ipportip_type __read_mostly = {
509 .features = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_IP2, 533 .features = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_IP2,
510 .dimension = IPSET_DIM_THREE, 534 .dimension = IPSET_DIM_THREE,
511 .family = AF_UNSPEC, 535 .family = AF_UNSPEC,
512 .revision = 1, 536 .revision_min = 0,
537 .revision_max = 1, /* SCTP and UDPLITE support added */
513 .create = hash_ipportip_create, 538 .create = hash_ipportip_create,
514 .create_policy = { 539 .create_policy = {
515 [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 }, 540 [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
diff --git a/net/netfilter/ipset/ip_set_hash_ipportnet.c b/net/netfilter/ipset/ip_set_hash_ipportnet.c
index 565a7c5b881..deb3e3dfa5f 100644
--- a/net/netfilter/ipset/ip_set_hash_ipportnet.c
+++ b/net/netfilter/ipset/ip_set_hash_ipportnet.c
@@ -62,7 +62,8 @@ struct hash_ipportnet4_telem {
62 62
63static inline bool 63static inline bool
64hash_ipportnet4_data_equal(const struct hash_ipportnet4_elem *ip1, 64hash_ipportnet4_data_equal(const struct hash_ipportnet4_elem *ip1,
65 const struct hash_ipportnet4_elem *ip2) 65 const struct hash_ipportnet4_elem *ip2,
66 u32 *multi)
66{ 67{
67 return ip1->ip == ip2->ip && 68 return ip1->ip == ip2->ip &&
68 ip1->ip2 == ip2->ip2 && 69 ip1->ip2 == ip2->ip2 &&
@@ -140,9 +141,19 @@ nla_put_failure:
140#define HOST_MASK 32 141#define HOST_MASK 32
141#include <linux/netfilter/ipset/ip_set_ahash.h> 142#include <linux/netfilter/ipset/ip_set_ahash.h>
142 143
144static inline void
145hash_ipportnet4_data_next(struct ip_set_hash *h,
146 const struct hash_ipportnet4_elem *d)
147{
148 h->next.ip = ntohl(d->ip);
149 h->next.port = ntohs(d->port);
150 h->next.ip2 = ntohl(d->ip2);
151}
152
143static int 153static int
144hash_ipportnet4_kadt(struct ip_set *set, const struct sk_buff *skb, 154hash_ipportnet4_kadt(struct ip_set *set, const struct sk_buff *skb,
145 enum ipset_adt adt, u8 pf, u8 dim, u8 flags) 155 const struct xt_action_param *par,
156 enum ipset_adt adt, const struct ip_set_adt_opt *opt)
146{ 157{
147 const struct ip_set_hash *h = set->data; 158 const struct ip_set_hash *h = set->data;
148 ipset_adtfn adtfn = set->variant->adt[adt]; 159 ipset_adtfn adtfn = set->variant->adt[adt];
@@ -155,25 +166,26 @@ hash_ipportnet4_kadt(struct ip_set *set, const struct sk_buff *skb,
155 if (adt == IPSET_TEST) 166 if (adt == IPSET_TEST)
156 data.cidr = HOST_MASK; 167 data.cidr = HOST_MASK;
157 168
158 if (!ip_set_get_ip4_port(skb, flags & IPSET_DIM_TWO_SRC, 169 if (!ip_set_get_ip4_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
159 &data.port, &data.proto)) 170 &data.port, &data.proto))
160 return -EINVAL; 171 return -EINVAL;
161 172
162 ip4addrptr(skb, flags & IPSET_DIM_ONE_SRC, &data.ip); 173 ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip);
163 ip4addrptr(skb, flags & IPSET_DIM_THREE_SRC, &data.ip2); 174 ip4addrptr(skb, opt->flags & IPSET_DIM_THREE_SRC, &data.ip2);
164 data.ip2 &= ip_set_netmask(data.cidr); 175 data.ip2 &= ip_set_netmask(data.cidr);
165 176
166 return adtfn(set, &data, h->timeout); 177 return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags);
167} 178}
168 179
169static int 180static int
170hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[], 181hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
171 enum ipset_adt adt, u32 *lineno, u32 flags) 182 enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
172{ 183{
173 const struct ip_set_hash *h = set->data; 184 const struct ip_set_hash *h = set->data;
174 ipset_adtfn adtfn = set->variant->adt[adt]; 185 ipset_adtfn adtfn = set->variant->adt[adt];
175 struct hash_ipportnet4_elem data = { .cidr = HOST_MASK }; 186 struct hash_ipportnet4_elem data = { .cidr = HOST_MASK };
176 u32 ip, ip_to, p, port, port_to; 187 u32 ip, ip_to, p = 0, port, port_to;
188 u32 ip2_from = 0, ip2_to, ip2_last, ip2;
177 u32 timeout = h->timeout; 189 u32 timeout = h->timeout;
178 bool with_ports = false; 190 bool with_ports = false;
179 int ret; 191 int ret;
@@ -187,21 +199,19 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
187 if (tb[IPSET_ATTR_LINENO]) 199 if (tb[IPSET_ATTR_LINENO])
188 *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); 200 *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
189 201
190 ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &data.ip); 202 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip);
191 if (ret) 203 if (ret)
192 return ret; 204 return ret;
193 205
194 ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP2], &data.ip2); 206 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2], &ip2_from);
195 if (ret) 207 if (ret)
196 return ret; 208 return ret;
197 209
198 if (tb[IPSET_ATTR_CIDR2]) 210 if (tb[IPSET_ATTR_CIDR2]) {
199 data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR2]); 211 data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
200 212 if (!data.cidr)
201 if (!data.cidr) 213 return -IPSET_ERR_INVALID_CIDR;
202 return -IPSET_ERR_INVALID_CIDR; 214 }
203
204 data.ip2 &= ip_set_netmask(data.cidr);
205 215
206 if (tb[IPSET_ATTR_PORT]) 216 if (tb[IPSET_ATTR_PORT])
207 data.port = nla_get_be16(tb[IPSET_ATTR_PORT]); 217 data.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
@@ -226,14 +236,16 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
226 timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); 236 timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
227 } 237 }
228 238
239 with_ports = with_ports && tb[IPSET_ATTR_PORT_TO];
229 if (adt == IPSET_TEST || 240 if (adt == IPSET_TEST ||
230 !(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_CIDR] || 241 !(tb[IPSET_ATTR_CIDR] || tb[IPSET_ATTR_IP_TO] || with_ports ||
231 tb[IPSET_ATTR_PORT_TO])) { 242 tb[IPSET_ATTR_IP2_TO])) {
232 ret = adtfn(set, &data, timeout); 243 data.ip = htonl(ip);
244 data.ip2 = htonl(ip2_from & ip_set_hostmask(data.cidr));
245 ret = adtfn(set, &data, timeout, flags);
233 return ip_set_eexist(ret, flags) ? 0 : ret; 246 return ip_set_eexist(ret, flags) ? 0 : ret;
234 } 247 }
235 248
236 ip = ntohl(data.ip);
237 if (tb[IPSET_ATTR_IP_TO]) { 249 if (tb[IPSET_ATTR_IP_TO]) {
238 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to); 250 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
239 if (ret) 251 if (ret)
@@ -245,29 +257,50 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
245 257
246 if (cidr > 32) 258 if (cidr > 32)
247 return -IPSET_ERR_INVALID_CIDR; 259 return -IPSET_ERR_INVALID_CIDR;
248 ip &= ip_set_hostmask(cidr); 260 ip_set_mask_from_to(ip, ip_to, cidr);
249 ip_to = ip | ~ip_set_hostmask(cidr); 261 }
250 } else
251 ip_to = ip;
252 262
253 port_to = port = ntohs(data.port); 263 port_to = port = ntohs(data.port);
254 if (with_ports && tb[IPSET_ATTR_PORT_TO]) { 264 if (tb[IPSET_ATTR_PORT_TO]) {
255 port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]); 265 port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
256 if (port > port_to) 266 if (port > port_to)
257 swap(port, port_to); 267 swap(port, port_to);
258 } 268 }
269 if (tb[IPSET_ATTR_IP2_TO]) {
270 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2_TO], &ip2_to);
271 if (ret)
272 return ret;
273 if (ip2_from > ip2_to)
274 swap(ip2_from, ip2_to);
275 if (ip2_from + UINT_MAX == ip2_to)
276 return -IPSET_ERR_HASH_RANGE;
277 } else {
278 ip_set_mask_from_to(ip2_from, ip2_to, data.cidr);
279 }
259 280
260 for (; !before(ip_to, ip); ip++) 281 if (retried)
261 for (p = port; p <= port_to; p++) { 282 ip = h->next.ip;
262 data.ip = htonl(ip); 283 for (; !before(ip_to, ip); ip++) {
284 data.ip = htonl(ip);
285 p = retried && ip == h->next.ip ? h->next.port : port;
286 for (; p <= port_to; p++) {
263 data.port = htons(p); 287 data.port = htons(p);
264 ret = adtfn(set, &data, timeout); 288 ip2 = retried && ip == h->next.ip && p == h->next.port
265 289 ? h->next.ip2 : ip2_from;
266 if (ret && !ip_set_eexist(ret, flags)) 290 while (!after(ip2, ip2_to)) {
267 return ret; 291 data.ip2 = htonl(ip2);
268 else 292 ip2_last = ip_set_range_to_cidr(ip2, ip2_to,
269 ret = 0; 293 &data.cidr);
294 ret = adtfn(set, &data, timeout, flags);
295
296 if (ret && !ip_set_eexist(ret, flags))
297 return ret;
298 else
299 ret = 0;
300 ip2 = ip2_last + 1;
301 }
270 } 302 }
303 }
271 return ret; 304 return ret;
272} 305}
273 306
@@ -303,7 +336,8 @@ struct hash_ipportnet6_telem {
303 336
304static inline bool 337static inline bool
305hash_ipportnet6_data_equal(const struct hash_ipportnet6_elem *ip1, 338hash_ipportnet6_data_equal(const struct hash_ipportnet6_elem *ip1,
306 const struct hash_ipportnet6_elem *ip2) 339 const struct hash_ipportnet6_elem *ip2,
340 u32 *multi)
307{ 341{
308 return ipv6_addr_cmp(&ip1->ip.in6, &ip2->ip.in6) == 0 && 342 return ipv6_addr_cmp(&ip1->ip.in6, &ip2->ip.in6) == 0 &&
309 ipv6_addr_cmp(&ip1->ip2.in6, &ip2->ip2.in6) == 0 && 343 ipv6_addr_cmp(&ip1->ip2.in6, &ip2->ip2.in6) == 0 &&
@@ -389,9 +423,17 @@ nla_put_failure:
389#define HOST_MASK 128 423#define HOST_MASK 128
390#include <linux/netfilter/ipset/ip_set_ahash.h> 424#include <linux/netfilter/ipset/ip_set_ahash.h>
391 425
426static inline void
427hash_ipportnet6_data_next(struct ip_set_hash *h,
428 const struct hash_ipportnet6_elem *d)
429{
430 h->next.port = ntohs(d->port);
431}
432
392static int 433static int
393hash_ipportnet6_kadt(struct ip_set *set, const struct sk_buff *skb, 434hash_ipportnet6_kadt(struct ip_set *set, const struct sk_buff *skb,
394 enum ipset_adt adt, u8 pf, u8 dim, u8 flags) 435 const struct xt_action_param *par,
436 enum ipset_adt adt, const struct ip_set_adt_opt *opt)
395{ 437{
396 const struct ip_set_hash *h = set->data; 438 const struct ip_set_hash *h = set->data;
397 ipset_adtfn adtfn = set->variant->adt[adt]; 439 ipset_adtfn adtfn = set->variant->adt[adt];
@@ -404,20 +446,20 @@ hash_ipportnet6_kadt(struct ip_set *set, const struct sk_buff *skb,
404 if (adt == IPSET_TEST) 446 if (adt == IPSET_TEST)
405 data.cidr = HOST_MASK; 447 data.cidr = HOST_MASK;
406 448
407 if (!ip_set_get_ip6_port(skb, flags & IPSET_DIM_TWO_SRC, 449 if (!ip_set_get_ip6_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
408 &data.port, &data.proto)) 450 &data.port, &data.proto))
409 return -EINVAL; 451 return -EINVAL;
410 452
411 ip6addrptr(skb, flags & IPSET_DIM_ONE_SRC, &data.ip.in6); 453 ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip.in6);
412 ip6addrptr(skb, flags & IPSET_DIM_THREE_SRC, &data.ip2.in6); 454 ip6addrptr(skb, opt->flags & IPSET_DIM_THREE_SRC, &data.ip2.in6);
413 ip6_netmask(&data.ip2, data.cidr); 455 ip6_netmask(&data.ip2, data.cidr);
414 456
415 return adtfn(set, &data, h->timeout); 457 return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags);
416} 458}
417 459
418static int 460static int
419hash_ipportnet6_uadt(struct ip_set *set, struct nlattr *tb[], 461hash_ipportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
420 enum ipset_adt adt, u32 *lineno, u32 flags) 462 enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
421{ 463{
422 const struct ip_set_hash *h = set->data; 464 const struct ip_set_hash *h = set->data;
423 ipset_adtfn adtfn = set->variant->adt[adt]; 465 ipset_adtfn adtfn = set->variant->adt[adt];
@@ -434,6 +476,8 @@ hash_ipportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
434 tb[IPSET_ATTR_IP_TO] || 476 tb[IPSET_ATTR_IP_TO] ||
435 tb[IPSET_ATTR_CIDR])) 477 tb[IPSET_ATTR_CIDR]))
436 return -IPSET_ERR_PROTOCOL; 478 return -IPSET_ERR_PROTOCOL;
479 if (unlikely(tb[IPSET_ATTR_IP_TO]))
480 return -IPSET_ERR_HASH_RANGE_UNSUPPORTED;
437 481
438 if (tb[IPSET_ATTR_LINENO]) 482 if (tb[IPSET_ATTR_LINENO])
439 *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); 483 *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
@@ -478,7 +522,7 @@ hash_ipportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
478 } 522 }
479 523
480 if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) { 524 if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) {
481 ret = adtfn(set, &data, timeout); 525 ret = adtfn(set, &data, timeout, flags);
482 return ip_set_eexist(ret, flags) ? 0 : ret; 526 return ip_set_eexist(ret, flags) ? 0 : ret;
483 } 527 }
484 528
@@ -487,9 +531,11 @@ hash_ipportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
487 if (port > port_to) 531 if (port > port_to)
488 swap(port, port_to); 532 swap(port, port_to);
489 533
534 if (retried)
535 port = h->next.port;
490 for (; port <= port_to; port++) { 536 for (; port <= port_to; port++) {
491 data.port = htons(port); 537 data.port = htons(port);
492 ret = adtfn(set, &data, timeout); 538 ret = adtfn(set, &data, timeout, flags);
493 539
494 if (ret && !ip_set_eexist(ret, flags)) 540 if (ret && !ip_set_eexist(ret, flags))
495 return ret; 541 return ret;
@@ -576,7 +622,9 @@ static struct ip_set_type hash_ipportnet_type __read_mostly = {
576 .features = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_IP2, 622 .features = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_IP2,
577 .dimension = IPSET_DIM_THREE, 623 .dimension = IPSET_DIM_THREE,
578 .family = AF_UNSPEC, 624 .family = AF_UNSPEC,
579 .revision = 1, 625 .revision_min = 0,
626 /* 1 SCTP and UDPLITE support added */
627 .revision_max = 2, /* Range as input support for IPv4 added */
580 .create = hash_ipportnet_create, 628 .create = hash_ipportnet_create,
581 .create_policy = { 629 .create_policy = {
582 [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 }, 630 [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
@@ -589,6 +637,7 @@ static struct ip_set_type hash_ipportnet_type __read_mostly = {
589 [IPSET_ATTR_IP] = { .type = NLA_NESTED }, 637 [IPSET_ATTR_IP] = { .type = NLA_NESTED },
590 [IPSET_ATTR_IP_TO] = { .type = NLA_NESTED }, 638 [IPSET_ATTR_IP_TO] = { .type = NLA_NESTED },
591 [IPSET_ATTR_IP2] = { .type = NLA_NESTED }, 639 [IPSET_ATTR_IP2] = { .type = NLA_NESTED },
640 [IPSET_ATTR_IP2_TO] = { .type = NLA_NESTED },
592 [IPSET_ATTR_PORT] = { .type = NLA_U16 }, 641 [IPSET_ATTR_PORT] = { .type = NLA_U16 },
593 [IPSET_ATTR_PORT_TO] = { .type = NLA_U16 }, 642 [IPSET_ATTR_PORT_TO] = { .type = NLA_U16 },
594 [IPSET_ATTR_CIDR] = { .type = NLA_U8 }, 643 [IPSET_ATTR_CIDR] = { .type = NLA_U8 },
diff --git a/net/netfilter/ipset/ip_set_hash_net.c b/net/netfilter/ipset/ip_set_hash_net.c
index 2aeeabcd5a2..60d016541c5 100644
--- a/net/netfilter/ipset/ip_set_hash_net.c
+++ b/net/netfilter/ipset/ip_set_hash_net.c
@@ -58,7 +58,8 @@ struct hash_net4_telem {
58 58
59static inline bool 59static inline bool
60hash_net4_data_equal(const struct hash_net4_elem *ip1, 60hash_net4_data_equal(const struct hash_net4_elem *ip1,
61 const struct hash_net4_elem *ip2) 61 const struct hash_net4_elem *ip2,
62 u32 *multi)
62{ 63{
63 return ip1->ip == ip2->ip && ip1->cidr == ip2->cidr; 64 return ip1->ip == ip2->ip && ip1->cidr == ip2->cidr;
64} 65}
@@ -125,9 +126,17 @@ nla_put_failure:
125#define HOST_MASK 32 126#define HOST_MASK 32
126#include <linux/netfilter/ipset/ip_set_ahash.h> 127#include <linux/netfilter/ipset/ip_set_ahash.h>
127 128
129static inline void
130hash_net4_data_next(struct ip_set_hash *h,
131 const struct hash_net4_elem *d)
132{
133 h->next.ip = ntohl(d->ip);
134}
135
128static int 136static int
129hash_net4_kadt(struct ip_set *set, const struct sk_buff *skb, 137hash_net4_kadt(struct ip_set *set, const struct sk_buff *skb,
130 enum ipset_adt adt, u8 pf, u8 dim, u8 flags) 138 const struct xt_action_param *par,
139 enum ipset_adt adt, const struct ip_set_adt_opt *opt)
131{ 140{
132 const struct ip_set_hash *h = set->data; 141 const struct ip_set_hash *h = set->data;
133 ipset_adtfn adtfn = set->variant->adt[adt]; 142 ipset_adtfn adtfn = set->variant->adt[adt];
@@ -140,20 +149,21 @@ hash_net4_kadt(struct ip_set *set, const struct sk_buff *skb,
140 if (adt == IPSET_TEST) 149 if (adt == IPSET_TEST)
141 data.cidr = HOST_MASK; 150 data.cidr = HOST_MASK;
142 151
143 ip4addrptr(skb, flags & IPSET_DIM_ONE_SRC, &data.ip); 152 ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip);
144 data.ip &= ip_set_netmask(data.cidr); 153 data.ip &= ip_set_netmask(data.cidr);
145 154
146 return adtfn(set, &data, h->timeout); 155 return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags);
147} 156}
148 157
149static int 158static int
150hash_net4_uadt(struct ip_set *set, struct nlattr *tb[], 159hash_net4_uadt(struct ip_set *set, struct nlattr *tb[],
151 enum ipset_adt adt, u32 *lineno, u32 flags) 160 enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
152{ 161{
153 const struct ip_set_hash *h = set->data; 162 const struct ip_set_hash *h = set->data;
154 ipset_adtfn adtfn = set->variant->adt[adt]; 163 ipset_adtfn adtfn = set->variant->adt[adt];
155 struct hash_net4_elem data = { .cidr = HOST_MASK }; 164 struct hash_net4_elem data = { .cidr = HOST_MASK };
156 u32 timeout = h->timeout; 165 u32 timeout = h->timeout;
166 u32 ip = 0, ip_to, last;
157 int ret; 167 int ret;
158 168
159 if (unlikely(!tb[IPSET_ATTR_IP] || 169 if (unlikely(!tb[IPSET_ATTR_IP] ||
@@ -163,17 +173,15 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[],
163 if (tb[IPSET_ATTR_LINENO]) 173 if (tb[IPSET_ATTR_LINENO])
164 *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); 174 *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
165 175
166 ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &data.ip); 176 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip);
167 if (ret) 177 if (ret)
168 return ret; 178 return ret;
169 179
170 if (tb[IPSET_ATTR_CIDR]) 180 if (tb[IPSET_ATTR_CIDR]) {
171 data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); 181 data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
172 182 if (!data.cidr)
173 if (!data.cidr) 183 return -IPSET_ERR_INVALID_CIDR;
174 return -IPSET_ERR_INVALID_CIDR; 184 }
175
176 data.ip &= ip_set_netmask(data.cidr);
177 185
178 if (tb[IPSET_ATTR_TIMEOUT]) { 186 if (tb[IPSET_ATTR_TIMEOUT]) {
179 if (!with_timeout(h->timeout)) 187 if (!with_timeout(h->timeout))
@@ -181,9 +189,35 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[],
181 timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); 189 timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
182 } 190 }
183 191
184 ret = adtfn(set, &data, timeout); 192 if (adt == IPSET_TEST || !tb[IPSET_ATTR_IP_TO]) {
193 data.ip = htonl(ip & ip_set_hostmask(data.cidr));
194 ret = adtfn(set, &data, timeout, flags);
195 return ip_set_eexist(ret, flags) ? 0 : ret;
196 }
185 197
186 return ip_set_eexist(ret, flags) ? 0 : ret; 198 ip_to = ip;
199 if (tb[IPSET_ATTR_IP_TO]) {
200 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
201 if (ret)
202 return ret;
203 if (ip_to < ip)
204 swap(ip, ip_to);
205 if (ip + UINT_MAX == ip_to)
206 return -IPSET_ERR_HASH_RANGE;
207 }
208 if (retried)
209 ip = h->next.ip;
210 while (!after(ip, ip_to)) {
211 data.ip = htonl(ip);
212 last = ip_set_range_to_cidr(ip, ip_to, &data.cidr);
213 ret = adtfn(set, &data, timeout, flags);
214 if (ret && !ip_set_eexist(ret, flags))
215 return ret;
216 else
217 ret = 0;
218 ip = last + 1;
219 }
220 return ret;
187} 221}
188 222
189static bool 223static bool
@@ -216,7 +250,8 @@ struct hash_net6_telem {
216 250
217static inline bool 251static inline bool
218hash_net6_data_equal(const struct hash_net6_elem *ip1, 252hash_net6_data_equal(const struct hash_net6_elem *ip1,
219 const struct hash_net6_elem *ip2) 253 const struct hash_net6_elem *ip2,
254 u32 *multi)
220{ 255{
221 return ipv6_addr_cmp(&ip1->ip.in6, &ip2->ip.in6) == 0 && 256 return ipv6_addr_cmp(&ip1->ip.in6, &ip2->ip.in6) == 0 &&
222 ip1->cidr == ip2->cidr; 257 ip1->cidr == ip2->cidr;
@@ -292,9 +327,16 @@ nla_put_failure:
292#define HOST_MASK 128 327#define HOST_MASK 128
293#include <linux/netfilter/ipset/ip_set_ahash.h> 328#include <linux/netfilter/ipset/ip_set_ahash.h>
294 329
330static inline void
331hash_net6_data_next(struct ip_set_hash *h,
332 const struct hash_net6_elem *d)
333{
334}
335
295static int 336static int
296hash_net6_kadt(struct ip_set *set, const struct sk_buff *skb, 337hash_net6_kadt(struct ip_set *set, const struct sk_buff *skb,
297 enum ipset_adt adt, u8 pf, u8 dim, u8 flags) 338 const struct xt_action_param *par,
339 enum ipset_adt adt, const struct ip_set_adt_opt *opt)
298{ 340{
299 const struct ip_set_hash *h = set->data; 341 const struct ip_set_hash *h = set->data;
300 ipset_adtfn adtfn = set->variant->adt[adt]; 342 ipset_adtfn adtfn = set->variant->adt[adt];
@@ -307,15 +349,15 @@ hash_net6_kadt(struct ip_set *set, const struct sk_buff *skb,
307 if (adt == IPSET_TEST) 349 if (adt == IPSET_TEST)
308 data.cidr = HOST_MASK; 350 data.cidr = HOST_MASK;
309 351
310 ip6addrptr(skb, flags & IPSET_DIM_ONE_SRC, &data.ip.in6); 352 ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip.in6);
311 ip6_netmask(&data.ip, data.cidr); 353 ip6_netmask(&data.ip, data.cidr);
312 354
313 return adtfn(set, &data, h->timeout); 355 return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags);
314} 356}
315 357
316static int 358static int
317hash_net6_uadt(struct ip_set *set, struct nlattr *tb[], 359hash_net6_uadt(struct ip_set *set, struct nlattr *tb[],
318 enum ipset_adt adt, u32 *lineno, u32 flags) 360 enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
319{ 361{
320 const struct ip_set_hash *h = set->data; 362 const struct ip_set_hash *h = set->data;
321 ipset_adtfn adtfn = set->variant->adt[adt]; 363 ipset_adtfn adtfn = set->variant->adt[adt];
@@ -326,6 +368,8 @@ hash_net6_uadt(struct ip_set *set, struct nlattr *tb[],
326 if (unlikely(!tb[IPSET_ATTR_IP] || 368 if (unlikely(!tb[IPSET_ATTR_IP] ||
327 !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT))) 369 !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
328 return -IPSET_ERR_PROTOCOL; 370 return -IPSET_ERR_PROTOCOL;
371 if (unlikely(tb[IPSET_ATTR_IP_TO]))
372 return -IPSET_ERR_HASH_RANGE_UNSUPPORTED;
329 373
330 if (tb[IPSET_ATTR_LINENO]) 374 if (tb[IPSET_ATTR_LINENO])
331 *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); 375 *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
@@ -348,7 +392,7 @@ hash_net6_uadt(struct ip_set *set, struct nlattr *tb[],
348 timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); 392 timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
349 } 393 }
350 394
351 ret = adtfn(set, &data, timeout); 395 ret = adtfn(set, &data, timeout, flags);
352 396
353 return ip_set_eexist(ret, flags) ? 0 : ret; 397 return ip_set_eexist(ret, flags) ? 0 : ret;
354} 398}
@@ -429,7 +473,8 @@ static struct ip_set_type hash_net_type __read_mostly = {
429 .features = IPSET_TYPE_IP, 473 .features = IPSET_TYPE_IP,
430 .dimension = IPSET_DIM_ONE, 474 .dimension = IPSET_DIM_ONE,
431 .family = AF_UNSPEC, 475 .family = AF_UNSPEC,
432 .revision = 0, 476 .revision_min = 0,
477 .revision_max = 1, /* Range as input support for IPv4 added */
433 .create = hash_net_create, 478 .create = hash_net_create,
434 .create_policy = { 479 .create_policy = {
435 [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 }, 480 [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
@@ -440,6 +485,7 @@ static struct ip_set_type hash_net_type __read_mostly = {
440 }, 485 },
441 .adt_policy = { 486 .adt_policy = {
442 [IPSET_ATTR_IP] = { .type = NLA_NESTED }, 487 [IPSET_ATTR_IP] = { .type = NLA_NESTED },
488 [IPSET_ATTR_IP_TO] = { .type = NLA_NESTED },
443 [IPSET_ATTR_CIDR] = { .type = NLA_U8 }, 489 [IPSET_ATTR_CIDR] = { .type = NLA_U8 },
444 [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, 490 [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
445 }, 491 },
diff --git a/net/netfilter/ipset/ip_set_hash_netiface.c b/net/netfilter/ipset/ip_set_hash_netiface.c
new file mode 100644
index 00000000000..e13095deb50
--- /dev/null
+++ b/net/netfilter/ipset/ip_set_hash_netiface.c
@@ -0,0 +1,786 @@
1/* Copyright (C) 2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 as
5 * published by the Free Software Foundation.
6 */
7
8/* Kernel module implementing an IP set type: the hash:net,iface type */
9
10#include <linux/jhash.h>
11#include <linux/module.h>
12#include <linux/ip.h>
13#include <linux/skbuff.h>
14#include <linux/errno.h>
15#include <linux/random.h>
16#include <linux/rbtree.h>
17#include <net/ip.h>
18#include <net/ipv6.h>
19#include <net/netlink.h>
20
21#include <linux/netfilter.h>
22#include <linux/netfilter/ipset/pfxlen.h>
23#include <linux/netfilter/ipset/ip_set.h>
24#include <linux/netfilter/ipset/ip_set_timeout.h>
25#include <linux/netfilter/ipset/ip_set_hash.h>
26
27MODULE_LICENSE("GPL");
28MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
29MODULE_DESCRIPTION("hash:net,iface type of IP sets");
30MODULE_ALIAS("ip_set_hash:net,iface");
31
32/* Interface name rbtree */
33
34struct iface_node {
35 struct rb_node node;
36 char iface[IFNAMSIZ];
37};
38
39#define iface_data(n) (rb_entry(n, struct iface_node, node)->iface)
40
41static inline long
42ifname_compare(const char *_a, const char *_b)
43{
44 const long *a = (const long *)_a;
45 const long *b = (const long *)_b;
46
47 BUILD_BUG_ON(IFNAMSIZ > 4 * sizeof(unsigned long));
48 if (a[0] != b[0])
49 return a[0] - b[0];
50 if (IFNAMSIZ > sizeof(long)) {
51 if (a[1] != b[1])
52 return a[1] - b[1];
53 }
54 if (IFNAMSIZ > 2 * sizeof(long)) {
55 if (a[2] != b[2])
56 return a[2] - b[2];
57 }
58 if (IFNAMSIZ > 3 * sizeof(long)) {
59 if (a[3] != b[3])
60 return a[3] - b[3];
61 }
62 return 0;
63}
64
65static void
66rbtree_destroy(struct rb_root *root)
67{
68 struct rb_node *p, *n = root->rb_node;
69 struct iface_node *node;
70
71 /* Non-recursive destroy, like in ext3 */
72 while (n) {
73 if (n->rb_left) {
74 n = n->rb_left;
75 continue;
76 }
77 if (n->rb_right) {
78 n = n->rb_right;
79 continue;
80 }
81 p = rb_parent(n);
82 node = rb_entry(n, struct iface_node, node);
83 if (!p)
84 *root = RB_ROOT;
85 else if (p->rb_left == n)
86 p->rb_left = NULL;
87 else if (p->rb_right == n)
88 p->rb_right = NULL;
89
90 kfree(node);
91 n = p;
92 }
93}
94
95static int
96iface_test(struct rb_root *root, const char **iface)
97{
98 struct rb_node *n = root->rb_node;
99
100 while (n) {
101 const char *d = iface_data(n);
102 long res = ifname_compare(*iface, d);
103
104 if (res < 0)
105 n = n->rb_left;
106 else if (res > 0)
107 n = n->rb_right;
108 else {
109 *iface = d;
110 return 1;
111 }
112 }
113 return 0;
114}
115
116static int
117iface_add(struct rb_root *root, const char **iface)
118{
119 struct rb_node **n = &(root->rb_node), *p = NULL;
120 struct iface_node *d;
121
122 while (*n) {
123 char *ifname = iface_data(*n);
124 long res = ifname_compare(*iface, ifname);
125
126 p = *n;
127 if (res < 0)
128 n = &((*n)->rb_left);
129 else if (res > 0)
130 n = &((*n)->rb_right);
131 else {
132 *iface = ifname;
133 return 0;
134 }
135 }
136
137 d = kzalloc(sizeof(*d), GFP_ATOMIC);
138 if (!d)
139 return -ENOMEM;
140 strcpy(d->iface, *iface);
141
142 rb_link_node(&d->node, p, n);
143 rb_insert_color(&d->node, root);
144
145 *iface = d->iface;
146 return 0;
147}
148
149/* Type specific function prefix */
150#define TYPE hash_netiface
151
152static bool
153hash_netiface_same_set(const struct ip_set *a, const struct ip_set *b);
154
155#define hash_netiface4_same_set hash_netiface_same_set
156#define hash_netiface6_same_set hash_netiface_same_set
157
158#define STREQ(a, b) (strcmp(a, b) == 0)
159
160/* The type variant functions: IPv4 */
161
162struct hash_netiface4_elem_hashed {
163 __be32 ip;
164 u8 physdev;
165 u8 cidr;
166 u16 padding;
167};
168
169#define HKEY_DATALEN sizeof(struct hash_netiface4_elem_hashed)
170
171/* Member elements without timeout */
172struct hash_netiface4_elem {
173 __be32 ip;
174 u8 physdev;
175 u8 cidr;
176 u16 padding;
177 const char *iface;
178};
179
180/* Member elements with timeout support */
181struct hash_netiface4_telem {
182 __be32 ip;
183 u8 physdev;
184 u8 cidr;
185 u16 padding;
186 const char *iface;
187 unsigned long timeout;
188};
189
190static inline bool
191hash_netiface4_data_equal(const struct hash_netiface4_elem *ip1,
192 const struct hash_netiface4_elem *ip2,
193 u32 *multi)
194{
195 return ip1->ip == ip2->ip &&
196 ip1->cidr == ip2->cidr &&
197 (++*multi) &&
198 ip1->physdev == ip2->physdev &&
199 ip1->iface == ip2->iface;
200}
201
202static inline bool
203hash_netiface4_data_isnull(const struct hash_netiface4_elem *elem)
204{
205 return elem->cidr == 0;
206}
207
208static inline void
209hash_netiface4_data_copy(struct hash_netiface4_elem *dst,
210 const struct hash_netiface4_elem *src) {
211 dst->ip = src->ip;
212 dst->cidr = src->cidr;
213 dst->physdev = src->physdev;
214 dst->iface = src->iface;
215}
216
217static inline void
218hash_netiface4_data_netmask(struct hash_netiface4_elem *elem, u8 cidr)
219{
220 elem->ip &= ip_set_netmask(cidr);
221 elem->cidr = cidr;
222}
223
224static inline void
225hash_netiface4_data_zero_out(struct hash_netiface4_elem *elem)
226{
227 elem->cidr = 0;
228}
229
230static bool
231hash_netiface4_data_list(struct sk_buff *skb,
232 const struct hash_netiface4_elem *data)
233{
234 u32 flags = data->physdev ? IPSET_FLAG_PHYSDEV : 0;
235
236 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
237 NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
238 NLA_PUT_STRING(skb, IPSET_ATTR_IFACE, data->iface);
239 if (flags)
240 NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, flags);
241 return 0;
242
243nla_put_failure:
244 return 1;
245}
246
247static bool
248hash_netiface4_data_tlist(struct sk_buff *skb,
249 const struct hash_netiface4_elem *data)
250{
251 const struct hash_netiface4_telem *tdata =
252 (const struct hash_netiface4_telem *)data;
253 u32 flags = data->physdev ? IPSET_FLAG_PHYSDEV : 0;
254
255 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
256 NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
257 NLA_PUT_STRING(skb, IPSET_ATTR_IFACE, data->iface);
258 if (flags)
259 NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, flags);
260 NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
261 htonl(ip_set_timeout_get(tdata->timeout)));
262
263 return 0;
264
265nla_put_failure:
266 return 1;
267}
268
269#define IP_SET_HASH_WITH_NETS
270#define IP_SET_HASH_WITH_RBTREE
271#define IP_SET_HASH_WITH_MULTI
272
273#define PF 4
274#define HOST_MASK 32
275#include <linux/netfilter/ipset/ip_set_ahash.h>
276
277static inline void
278hash_netiface4_data_next(struct ip_set_hash *h,
279 const struct hash_netiface4_elem *d)
280{
281 h->next.ip = ntohl(d->ip);
282}
283
284static int
285hash_netiface4_kadt(struct ip_set *set, const struct sk_buff *skb,
286 const struct xt_action_param *par,
287 enum ipset_adt adt, const struct ip_set_adt_opt *opt)
288{
289 struct ip_set_hash *h = set->data;
290 ipset_adtfn adtfn = set->variant->adt[adt];
291 struct hash_netiface4_elem data = {
292 .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK
293 };
294 int ret;
295
296 if (data.cidr == 0)
297 return -EINVAL;
298 if (adt == IPSET_TEST)
299 data.cidr = HOST_MASK;
300
301 ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip);
302 data.ip &= ip_set_netmask(data.cidr);
303
304#define IFACE(dir) (par->dir ? par->dir->name : NULL)
305#define PHYSDEV(dir) (nf_bridge->dir ? nf_bridge->dir->name : NULL)
306#define SRCDIR (opt->flags & IPSET_DIM_TWO_SRC)
307
308 if (opt->cmdflags & IPSET_FLAG_PHYSDEV) {
309#ifdef CONFIG_BRIDGE_NETFILTER
310 const struct nf_bridge_info *nf_bridge = skb->nf_bridge;
311
312 if (!nf_bridge)
313 return -EINVAL;
314 data.iface = SRCDIR ? PHYSDEV(physindev) : PHYSDEV(physoutdev);
315 data.physdev = 1;
316#else
317 data.iface = NULL;
318#endif
319 } else
320 data.iface = SRCDIR ? IFACE(in) : IFACE(out);
321
322 if (!data.iface)
323 return -EINVAL;
324 ret = iface_test(&h->rbtree, &data.iface);
325 if (adt == IPSET_ADD) {
326 if (!ret) {
327 ret = iface_add(&h->rbtree, &data.iface);
328 if (ret)
329 return ret;
330 }
331 } else if (!ret)
332 return ret;
333
334 return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags);
335}
336
337static int
338hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
339 enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
340{
341 struct ip_set_hash *h = set->data;
342 ipset_adtfn adtfn = set->variant->adt[adt];
343 struct hash_netiface4_elem data = { .cidr = HOST_MASK };
344 u32 ip = 0, ip_to, last;
345 u32 timeout = h->timeout;
346 char iface[IFNAMSIZ] = {};
347 int ret;
348
349 if (unlikely(!tb[IPSET_ATTR_IP] ||
350 !tb[IPSET_ATTR_IFACE] ||
351 !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
352 !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
353 return -IPSET_ERR_PROTOCOL;
354
355 if (tb[IPSET_ATTR_LINENO])
356 *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
357
358 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip);
359 if (ret)
360 return ret;
361
362 if (tb[IPSET_ATTR_CIDR]) {
363 data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
364 if (!data.cidr)
365 return -IPSET_ERR_INVALID_CIDR;
366 }
367
368 if (tb[IPSET_ATTR_TIMEOUT]) {
369 if (!with_timeout(h->timeout))
370 return -IPSET_ERR_TIMEOUT;
371 timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
372 }
373
374 strcpy(iface, nla_data(tb[IPSET_ATTR_IFACE]));
375 data.iface = iface;
376 ret = iface_test(&h->rbtree, &data.iface);
377 if (adt == IPSET_ADD) {
378 if (!ret) {
379 ret = iface_add(&h->rbtree, &data.iface);
380 if (ret)
381 return ret;
382 }
383 } else if (!ret)
384 return ret;
385
386 if (tb[IPSET_ATTR_CADT_FLAGS]) {
387 u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
388 if (cadt_flags & IPSET_FLAG_PHYSDEV)
389 data.physdev = 1;
390 }
391
392 if (adt == IPSET_TEST || !tb[IPSET_ATTR_IP_TO]) {
393 data.ip = htonl(ip & ip_set_hostmask(data.cidr));
394 ret = adtfn(set, &data, timeout, flags);
395 return ip_set_eexist(ret, flags) ? 0 : ret;
396 }
397
398 if (tb[IPSET_ATTR_IP_TO]) {
399 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
400 if (ret)
401 return ret;
402 if (ip_to < ip)
403 swap(ip, ip_to);
404 if (ip + UINT_MAX == ip_to)
405 return -IPSET_ERR_HASH_RANGE;
406 } else {
407 ip_set_mask_from_to(ip, ip_to, data.cidr);
408 }
409
410 if (retried)
411 ip = h->next.ip;
412 while (!after(ip, ip_to)) {
413 data.ip = htonl(ip);
414 last = ip_set_range_to_cidr(ip, ip_to, &data.cidr);
415 ret = adtfn(set, &data, timeout, flags);
416
417 if (ret && !ip_set_eexist(ret, flags))
418 return ret;
419 else
420 ret = 0;
421 ip = last + 1;
422 }
423 return ret;
424}
425
426static bool
427hash_netiface_same_set(const struct ip_set *a, const struct ip_set *b)
428{
429 const struct ip_set_hash *x = a->data;
430 const struct ip_set_hash *y = b->data;
431
432 /* Resizing changes htable_bits, so we ignore it */
433 return x->maxelem == y->maxelem &&
434 x->timeout == y->timeout;
435}
436
437/* The type variant functions: IPv6 */
438
439struct hash_netiface6_elem_hashed {
440 union nf_inet_addr ip;
441 u8 physdev;
442 u8 cidr;
443 u16 padding;
444};
445
446#define HKEY_DATALEN sizeof(struct hash_netiface6_elem_hashed)
447
448struct hash_netiface6_elem {
449 union nf_inet_addr ip;
450 u8 physdev;
451 u8 cidr;
452 u16 padding;
453 const char *iface;
454};
455
456struct hash_netiface6_telem {
457 union nf_inet_addr ip;
458 u8 physdev;
459 u8 cidr;
460 u16 padding;
461 const char *iface;
462 unsigned long timeout;
463};
464
465static inline bool
466hash_netiface6_data_equal(const struct hash_netiface6_elem *ip1,
467 const struct hash_netiface6_elem *ip2,
468 u32 *multi)
469{
470 return ipv6_addr_cmp(&ip1->ip.in6, &ip2->ip.in6) == 0 &&
471 ip1->cidr == ip2->cidr &&
472 (++*multi) &&
473 ip1->physdev == ip2->physdev &&
474 ip1->iface == ip2->iface;
475}
476
477static inline bool
478hash_netiface6_data_isnull(const struct hash_netiface6_elem *elem)
479{
480 return elem->cidr == 0;
481}
482
483static inline void
484hash_netiface6_data_copy(struct hash_netiface6_elem *dst,
485 const struct hash_netiface6_elem *src)
486{
487 memcpy(dst, src, sizeof(*dst));
488}
489
490static inline void
491hash_netiface6_data_zero_out(struct hash_netiface6_elem *elem)
492{
493}
494
495static inline void
496ip6_netmask(union nf_inet_addr *ip, u8 prefix)
497{
498 ip->ip6[0] &= ip_set_netmask6(prefix)[0];
499 ip->ip6[1] &= ip_set_netmask6(prefix)[1];
500 ip->ip6[2] &= ip_set_netmask6(prefix)[2];
501 ip->ip6[3] &= ip_set_netmask6(prefix)[3];
502}
503
504static inline void
505hash_netiface6_data_netmask(struct hash_netiface6_elem *elem, u8 cidr)
506{
507 ip6_netmask(&elem->ip, cidr);
508 elem->cidr = cidr;
509}
510
511static bool
512hash_netiface6_data_list(struct sk_buff *skb,
513 const struct hash_netiface6_elem *data)
514{
515 u32 flags = data->physdev ? IPSET_FLAG_PHYSDEV : 0;
516
517 NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
518 NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
519 NLA_PUT_STRING(skb, IPSET_ATTR_IFACE, data->iface);
520 if (flags)
521 NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, flags);
522 return 0;
523
524nla_put_failure:
525 return 1;
526}
527
528static bool
529hash_netiface6_data_tlist(struct sk_buff *skb,
530 const struct hash_netiface6_elem *data)
531{
532 const struct hash_netiface6_telem *e =
533 (const struct hash_netiface6_telem *)data;
534 u32 flags = data->physdev ? IPSET_FLAG_PHYSDEV : 0;
535
536 NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
537 NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
538 NLA_PUT_STRING(skb, IPSET_ATTR_IFACE, data->iface);
539 if (flags)
540 NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, flags);
541 NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
542 htonl(ip_set_timeout_get(e->timeout)));
543 return 0;
544
545nla_put_failure:
546 return 1;
547}
548
549#undef PF
550#undef HOST_MASK
551
552#define PF 6
553#define HOST_MASK 128
554#include <linux/netfilter/ipset/ip_set_ahash.h>
555
556static inline void
557hash_netiface6_data_next(struct ip_set_hash *h,
558 const struct hash_netiface6_elem *d)
559{
560}
561
562static int
563hash_netiface6_kadt(struct ip_set *set, const struct sk_buff *skb,
564 const struct xt_action_param *par,
565 enum ipset_adt adt, const struct ip_set_adt_opt *opt)
566{
567 struct ip_set_hash *h = set->data;
568 ipset_adtfn adtfn = set->variant->adt[adt];
569 struct hash_netiface6_elem data = {
570 .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK
571 };
572 int ret;
573
574 if (data.cidr == 0)
575 return -EINVAL;
576 if (adt == IPSET_TEST)
577 data.cidr = HOST_MASK;
578
579 ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip.in6);
580 ip6_netmask(&data.ip, data.cidr);
581
582 if (opt->cmdflags & IPSET_FLAG_PHYSDEV) {
583#ifdef CONFIG_BRIDGE_NETFILTER
584 const struct nf_bridge_info *nf_bridge = skb->nf_bridge;
585
586 if (!nf_bridge)
587 return -EINVAL;
588 data.iface = SRCDIR ? PHYSDEV(physindev) : PHYSDEV(physoutdev);
589 data.physdev = 1;
590#else
591 data.iface = NULL;
592#endif
593 } else
594 data.iface = SRCDIR ? IFACE(in) : IFACE(out);
595
596 if (!data.iface)
597 return -EINVAL;
598 ret = iface_test(&h->rbtree, &data.iface);
599 if (adt == IPSET_ADD) {
600 if (!ret) {
601 ret = iface_add(&h->rbtree, &data.iface);
602 if (ret)
603 return ret;
604 }
605 } else if (!ret)
606 return ret;
607
608 return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags);
609}
610
611static int
612hash_netiface6_uadt(struct ip_set *set, struct nlattr *tb[],
613 enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
614{
615 struct ip_set_hash *h = set->data;
616 ipset_adtfn adtfn = set->variant->adt[adt];
617 struct hash_netiface6_elem data = { .cidr = HOST_MASK };
618 u32 timeout = h->timeout;
619 char iface[IFNAMSIZ] = {};
620 int ret;
621
622 if (unlikely(!tb[IPSET_ATTR_IP] ||
623 !tb[IPSET_ATTR_IFACE] ||
624 !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
625 !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
626 return -IPSET_ERR_PROTOCOL;
627 if (unlikely(tb[IPSET_ATTR_IP_TO]))
628 return -IPSET_ERR_HASH_RANGE_UNSUPPORTED;
629
630 if (tb[IPSET_ATTR_LINENO])
631 *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
632
633 ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &data.ip);
634 if (ret)
635 return ret;
636
637 if (tb[IPSET_ATTR_CIDR])
638 data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
639 if (!data.cidr)
640 return -IPSET_ERR_INVALID_CIDR;
641 ip6_netmask(&data.ip, data.cidr);
642
643 if (tb[IPSET_ATTR_TIMEOUT]) {
644 if (!with_timeout(h->timeout))
645 return -IPSET_ERR_TIMEOUT;
646 timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
647 }
648
649 strcpy(iface, nla_data(tb[IPSET_ATTR_IFACE]));
650 data.iface = iface;
651 ret = iface_test(&h->rbtree, &data.iface);
652 if (adt == IPSET_ADD) {
653 if (!ret) {
654 ret = iface_add(&h->rbtree, &data.iface);
655 if (ret)
656 return ret;
657 }
658 } else if (!ret)
659 return ret;
660
661 if (tb[IPSET_ATTR_CADT_FLAGS]) {
662 u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
663 if (cadt_flags & IPSET_FLAG_PHYSDEV)
664 data.physdev = 1;
665 }
666
667 ret = adtfn(set, &data, timeout, flags);
668
669 return ip_set_eexist(ret, flags) ? 0 : ret;
670}
671
672/* Create hash:ip type of sets */
673
674static int
675hash_netiface_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
676{
677 struct ip_set_hash *h;
678 u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
679 u8 hbits;
680
681 if (!(set->family == AF_INET || set->family == AF_INET6))
682 return -IPSET_ERR_INVALID_FAMILY;
683
684 if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
685 !ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) ||
686 !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
687 return -IPSET_ERR_PROTOCOL;
688
689 if (tb[IPSET_ATTR_HASHSIZE]) {
690 hashsize = ip_set_get_h32(tb[IPSET_ATTR_HASHSIZE]);
691 if (hashsize < IPSET_MIMINAL_HASHSIZE)
692 hashsize = IPSET_MIMINAL_HASHSIZE;
693 }
694
695 if (tb[IPSET_ATTR_MAXELEM])
696 maxelem = ip_set_get_h32(tb[IPSET_ATTR_MAXELEM]);
697
698 h = kzalloc(sizeof(*h)
699 + sizeof(struct ip_set_hash_nets)
700 * (set->family == AF_INET ? 32 : 128), GFP_KERNEL);
701 if (!h)
702 return -ENOMEM;
703
704 h->maxelem = maxelem;
705 get_random_bytes(&h->initval, sizeof(h->initval));
706 h->timeout = IPSET_NO_TIMEOUT;
707 h->ahash_max = AHASH_MAX_SIZE;
708
709 hbits = htable_bits(hashsize);
710 h->table = ip_set_alloc(
711 sizeof(struct htable)
712 + jhash_size(hbits) * sizeof(struct hbucket));
713 if (!h->table) {
714 kfree(h);
715 return -ENOMEM;
716 }
717 h->table->htable_bits = hbits;
718 h->rbtree = RB_ROOT;
719
720 set->data = h;
721
722 if (tb[IPSET_ATTR_TIMEOUT]) {
723 h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
724
725 set->variant = set->family == AF_INET
726 ? &hash_netiface4_tvariant : &hash_netiface6_tvariant;
727
728 if (set->family == AF_INET)
729 hash_netiface4_gc_init(set);
730 else
731 hash_netiface6_gc_init(set);
732 } else {
733 set->variant = set->family == AF_INET
734 ? &hash_netiface4_variant : &hash_netiface6_variant;
735 }
736
737 pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n",
738 set->name, jhash_size(h->table->htable_bits),
739 h->table->htable_bits, h->maxelem, set->data, h->table);
740
741 return 0;
742}
743
744static struct ip_set_type hash_netiface_type __read_mostly = {
745 .name = "hash:net,iface",
746 .protocol = IPSET_PROTOCOL,
747 .features = IPSET_TYPE_IP | IPSET_TYPE_IFACE,
748 .dimension = IPSET_DIM_TWO,
749 .family = AF_UNSPEC,
750 .revision_min = 0,
751 .create = hash_netiface_create,
752 .create_policy = {
753 [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
754 [IPSET_ATTR_MAXELEM] = { .type = NLA_U32 },
755 [IPSET_ATTR_PROBES] = { .type = NLA_U8 },
756 [IPSET_ATTR_RESIZE] = { .type = NLA_U8 },
757 [IPSET_ATTR_PROTO] = { .type = NLA_U8 },
758 [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
759 },
760 .adt_policy = {
761 [IPSET_ATTR_IP] = { .type = NLA_NESTED },
762 [IPSET_ATTR_IP_TO] = { .type = NLA_NESTED },
763 [IPSET_ATTR_IFACE] = { .type = NLA_NUL_STRING,
764 .len = IPSET_MAXNAMELEN - 1 },
765 [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
766 [IPSET_ATTR_CIDR] = { .type = NLA_U8 },
767 [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
768 [IPSET_ATTR_LINENO] = { .type = NLA_U32 },
769 },
770 .me = THIS_MODULE,
771};
772
773static int __init
774hash_netiface_init(void)
775{
776 return ip_set_type_register(&hash_netiface_type);
777}
778
779static void __exit
780hash_netiface_fini(void)
781{
782 ip_set_type_unregister(&hash_netiface_type);
783}
784
785module_init(hash_netiface_init);
786module_exit(hash_netiface_fini);
diff --git a/net/netfilter/ipset/ip_set_hash_netport.c b/net/netfilter/ipset/ip_set_hash_netport.c
index e50d9bb8820..8f9de7207ec 100644
--- a/net/netfilter/ipset/ip_set_hash_netport.c
+++ b/net/netfilter/ipset/ip_set_hash_netport.c
@@ -59,7 +59,8 @@ struct hash_netport4_telem {
59 59
60static inline bool 60static inline bool
61hash_netport4_data_equal(const struct hash_netport4_elem *ip1, 61hash_netport4_data_equal(const struct hash_netport4_elem *ip1,
62 const struct hash_netport4_elem *ip2) 62 const struct hash_netport4_elem *ip2,
63 u32 *multi)
63{ 64{
64 return ip1->ip == ip2->ip && 65 return ip1->ip == ip2->ip &&
65 ip1->port == ip2->port && 66 ip1->port == ip2->port &&
@@ -137,9 +138,18 @@ nla_put_failure:
137#define HOST_MASK 32 138#define HOST_MASK 32
138#include <linux/netfilter/ipset/ip_set_ahash.h> 139#include <linux/netfilter/ipset/ip_set_ahash.h>
139 140
141static inline void
142hash_netport4_data_next(struct ip_set_hash *h,
143 const struct hash_netport4_elem *d)
144{
145 h->next.ip = ntohl(d->ip);
146 h->next.port = ntohs(d->port);
147}
148
140static int 149static int
141hash_netport4_kadt(struct ip_set *set, const struct sk_buff *skb, 150hash_netport4_kadt(struct ip_set *set, const struct sk_buff *skb,
142 enum ipset_adt adt, u8 pf, u8 dim, u8 flags) 151 const struct xt_action_param *par,
152 enum ipset_adt adt, const struct ip_set_adt_opt *opt)
143{ 153{
144 const struct ip_set_hash *h = set->data; 154 const struct ip_set_hash *h = set->data;
145 ipset_adtfn adtfn = set->variant->adt[adt]; 155 ipset_adtfn adtfn = set->variant->adt[adt];
@@ -152,24 +162,24 @@ hash_netport4_kadt(struct ip_set *set, const struct sk_buff *skb,
152 if (adt == IPSET_TEST) 162 if (adt == IPSET_TEST)
153 data.cidr = HOST_MASK; 163 data.cidr = HOST_MASK;
154 164
155 if (!ip_set_get_ip4_port(skb, flags & IPSET_DIM_TWO_SRC, 165 if (!ip_set_get_ip4_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
156 &data.port, &data.proto)) 166 &data.port, &data.proto))
157 return -EINVAL; 167 return -EINVAL;
158 168
159 ip4addrptr(skb, flags & IPSET_DIM_ONE_SRC, &data.ip); 169 ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip);
160 data.ip &= ip_set_netmask(data.cidr); 170 data.ip &= ip_set_netmask(data.cidr);
161 171
162 return adtfn(set, &data, h->timeout); 172 return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags);
163} 173}
164 174
165static int 175static int
166hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[], 176hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
167 enum ipset_adt adt, u32 *lineno, u32 flags) 177 enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
168{ 178{
169 const struct ip_set_hash *h = set->data; 179 const struct ip_set_hash *h = set->data;
170 ipset_adtfn adtfn = set->variant->adt[adt]; 180 ipset_adtfn adtfn = set->variant->adt[adt];
171 struct hash_netport4_elem data = { .cidr = HOST_MASK }; 181 struct hash_netport4_elem data = { .cidr = HOST_MASK };
172 u32 port, port_to; 182 u32 port, port_to, p = 0, ip = 0, ip_to, last;
173 u32 timeout = h->timeout; 183 u32 timeout = h->timeout;
174 bool with_ports = false; 184 bool with_ports = false;
175 int ret; 185 int ret;
@@ -183,15 +193,15 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
183 if (tb[IPSET_ATTR_LINENO]) 193 if (tb[IPSET_ATTR_LINENO])
184 *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); 194 *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
185 195
186 ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &data.ip); 196 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip);
187 if (ret) 197 if (ret)
188 return ret; 198 return ret;
189 199
190 if (tb[IPSET_ATTR_CIDR]) 200 if (tb[IPSET_ATTR_CIDR]) {
191 data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); 201 data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
192 if (!data.cidr) 202 if (!data.cidr)
193 return -IPSET_ERR_INVALID_CIDR; 203 return -IPSET_ERR_INVALID_CIDR;
194 data.ip &= ip_set_netmask(data.cidr); 204 }
195 205
196 if (tb[IPSET_ATTR_PORT]) 206 if (tb[IPSET_ATTR_PORT])
197 data.port = nla_get_be16(tb[IPSET_ATTR_PORT]); 207 data.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
@@ -216,24 +226,47 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
216 timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); 226 timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
217 } 227 }
218 228
219 if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) { 229 with_ports = with_ports && tb[IPSET_ATTR_PORT_TO];
220 ret = adtfn(set, &data, timeout); 230 if (adt == IPSET_TEST || !(with_ports || tb[IPSET_ATTR_IP_TO])) {
231 data.ip = htonl(ip & ip_set_hostmask(data.cidr));
232 ret = adtfn(set, &data, timeout, flags);
221 return ip_set_eexist(ret, flags) ? 0 : ret; 233 return ip_set_eexist(ret, flags) ? 0 : ret;
222 } 234 }
223 235
224 port = ntohs(data.port); 236 port = port_to = ntohs(data.port);
225 port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]); 237 if (tb[IPSET_ATTR_PORT_TO]) {
226 if (port > port_to) 238 port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
227 swap(port, port_to); 239 if (port_to < port)
228 240 swap(port, port_to);
229 for (; port <= port_to; port++) { 241 }
230 data.port = htons(port); 242 if (tb[IPSET_ATTR_IP_TO]) {
231 ret = adtfn(set, &data, timeout); 243 ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
232 244 if (ret)
233 if (ret && !ip_set_eexist(ret, flags))
234 return ret; 245 return ret;
235 else 246 if (ip_to < ip)
236 ret = 0; 247 swap(ip, ip_to);
248 if (ip + UINT_MAX == ip_to)
249 return -IPSET_ERR_HASH_RANGE;
250 } else {
251 ip_set_mask_from_to(ip, ip_to, data.cidr);
252 }
253
254 if (retried)
255 ip = h->next.ip;
256 while (!after(ip, ip_to)) {
257 data.ip = htonl(ip);
258 last = ip_set_range_to_cidr(ip, ip_to, &data.cidr);
259 p = retried && ip == h->next.ip ? h->next.port : port;
260 for (; p <= port_to; p++) {
261 data.port = htons(p);
262 ret = adtfn(set, &data, timeout, flags);
263
264 if (ret && !ip_set_eexist(ret, flags))
265 return ret;
266 else
267 ret = 0;
268 }
269 ip = last + 1;
237 } 270 }
238 return ret; 271 return ret;
239} 272}
@@ -268,7 +301,8 @@ struct hash_netport6_telem {
268 301
269static inline bool 302static inline bool
270hash_netport6_data_equal(const struct hash_netport6_elem *ip1, 303hash_netport6_data_equal(const struct hash_netport6_elem *ip1,
271 const struct hash_netport6_elem *ip2) 304 const struct hash_netport6_elem *ip2,
305 u32 *multi)
272{ 306{
273 return ipv6_addr_cmp(&ip1->ip.in6, &ip2->ip.in6) == 0 && 307 return ipv6_addr_cmp(&ip1->ip.in6, &ip2->ip.in6) == 0 &&
274 ip1->port == ip2->port && 308 ip1->port == ip2->port &&
@@ -351,9 +385,17 @@ nla_put_failure:
351#define HOST_MASK 128 385#define HOST_MASK 128
352#include <linux/netfilter/ipset/ip_set_ahash.h> 386#include <linux/netfilter/ipset/ip_set_ahash.h>
353 387
388static inline void
389hash_netport6_data_next(struct ip_set_hash *h,
390 const struct hash_netport6_elem *d)
391{
392 h->next.port = ntohs(d->port);
393}
394
354static int 395static int
355hash_netport6_kadt(struct ip_set *set, const struct sk_buff *skb, 396hash_netport6_kadt(struct ip_set *set, const struct sk_buff *skb,
356 enum ipset_adt adt, u8 pf, u8 dim, u8 flags) 397 const struct xt_action_param *par,
398 enum ipset_adt adt, const struct ip_set_adt_opt *opt)
357{ 399{
358 const struct ip_set_hash *h = set->data; 400 const struct ip_set_hash *h = set->data;
359 ipset_adtfn adtfn = set->variant->adt[adt]; 401 ipset_adtfn adtfn = set->variant->adt[adt];
@@ -366,19 +408,19 @@ hash_netport6_kadt(struct ip_set *set, const struct sk_buff *skb,
366 if (adt == IPSET_TEST) 408 if (adt == IPSET_TEST)
367 data.cidr = HOST_MASK; 409 data.cidr = HOST_MASK;
368 410
369 if (!ip_set_get_ip6_port(skb, flags & IPSET_DIM_TWO_SRC, 411 if (!ip_set_get_ip6_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
370 &data.port, &data.proto)) 412 &data.port, &data.proto))
371 return -EINVAL; 413 return -EINVAL;
372 414
373 ip6addrptr(skb, flags & IPSET_DIM_ONE_SRC, &data.ip.in6); 415 ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip.in6);
374 ip6_netmask(&data.ip, data.cidr); 416 ip6_netmask(&data.ip, data.cidr);
375 417
376 return adtfn(set, &data, h->timeout); 418 return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags);
377} 419}
378 420
379static int 421static int
380hash_netport6_uadt(struct ip_set *set, struct nlattr *tb[], 422hash_netport6_uadt(struct ip_set *set, struct nlattr *tb[],
381 enum ipset_adt adt, u32 *lineno, u32 flags) 423 enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
382{ 424{
383 const struct ip_set_hash *h = set->data; 425 const struct ip_set_hash *h = set->data;
384 ipset_adtfn adtfn = set->variant->adt[adt]; 426 ipset_adtfn adtfn = set->variant->adt[adt];
@@ -393,6 +435,8 @@ hash_netport6_uadt(struct ip_set *set, struct nlattr *tb[],
393 !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) || 435 !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
394 !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT))) 436 !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
395 return -IPSET_ERR_PROTOCOL; 437 return -IPSET_ERR_PROTOCOL;
438 if (unlikely(tb[IPSET_ATTR_IP_TO]))
439 return -IPSET_ERR_HASH_RANGE_UNSUPPORTED;
396 440
397 if (tb[IPSET_ATTR_LINENO]) 441 if (tb[IPSET_ATTR_LINENO])
398 *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); 442 *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
@@ -431,7 +475,7 @@ hash_netport6_uadt(struct ip_set *set, struct nlattr *tb[],
431 } 475 }
432 476
433 if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) { 477 if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) {
434 ret = adtfn(set, &data, timeout); 478 ret = adtfn(set, &data, timeout, flags);
435 return ip_set_eexist(ret, flags) ? 0 : ret; 479 return ip_set_eexist(ret, flags) ? 0 : ret;
436 } 480 }
437 481
@@ -440,9 +484,11 @@ hash_netport6_uadt(struct ip_set *set, struct nlattr *tb[],
440 if (port > port_to) 484 if (port > port_to)
441 swap(port, port_to); 485 swap(port, port_to);
442 486
487 if (retried)
488 port = h->next.port;
443 for (; port <= port_to; port++) { 489 for (; port <= port_to; port++) {
444 data.port = htons(port); 490 data.port = htons(port);
445 ret = adtfn(set, &data, timeout); 491 ret = adtfn(set, &data, timeout, flags);
446 492
447 if (ret && !ip_set_eexist(ret, flags)) 493 if (ret && !ip_set_eexist(ret, flags))
448 return ret; 494 return ret;
@@ -528,7 +574,9 @@ static struct ip_set_type hash_netport_type __read_mostly = {
528 .features = IPSET_TYPE_IP | IPSET_TYPE_PORT, 574 .features = IPSET_TYPE_IP | IPSET_TYPE_PORT,
529 .dimension = IPSET_DIM_TWO, 575 .dimension = IPSET_DIM_TWO,
530 .family = AF_UNSPEC, 576 .family = AF_UNSPEC,
531 .revision = 1, 577 .revision_min = 0,
578 /* 1 SCTP and UDPLITE support added */
579 .revision_max = 2, /* Range as input support for IPv4 added */
532 .create = hash_netport_create, 580 .create = hash_netport_create,
533 .create_policy = { 581 .create_policy = {
534 [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 }, 582 [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
@@ -540,6 +588,7 @@ static struct ip_set_type hash_netport_type __read_mostly = {
540 }, 588 },
541 .adt_policy = { 589 .adt_policy = {
542 [IPSET_ATTR_IP] = { .type = NLA_NESTED }, 590 [IPSET_ATTR_IP] = { .type = NLA_NESTED },
591 [IPSET_ATTR_IP_TO] = { .type = NLA_NESTED },
543 [IPSET_ATTR_PORT] = { .type = NLA_U16 }, 592 [IPSET_ATTR_PORT] = { .type = NLA_U16 },
544 [IPSET_ATTR_PORT_TO] = { .type = NLA_U16 }, 593 [IPSET_ATTR_PORT_TO] = { .type = NLA_U16 },
545 [IPSET_ATTR_PROTO] = { .type = NLA_U8 }, 594 [IPSET_ATTR_PROTO] = { .type = NLA_U8 },
diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c
index e9159e99fc4..4d10819d462 100644
--- a/net/netfilter/ipset/ip_set_list_set.c
+++ b/net/netfilter/ipset/ip_set_list_set.c
@@ -72,7 +72,8 @@ list_set_expired(const struct list_set *map, u32 id)
72 72
73static int 73static int
74list_set_kadt(struct ip_set *set, const struct sk_buff *skb, 74list_set_kadt(struct ip_set *set, const struct sk_buff *skb,
75 enum ipset_adt adt, u8 pf, u8 dim, u8 flags) 75 const struct xt_action_param *par,
76 enum ipset_adt adt, const struct ip_set_adt_opt *opt)
76{ 77{
77 struct list_set *map = set->data; 78 struct list_set *map = set->data;
78 struct set_elem *elem; 79 struct set_elem *elem;
@@ -87,17 +88,17 @@ list_set_kadt(struct ip_set *set, const struct sk_buff *skb,
87 continue; 88 continue;
88 switch (adt) { 89 switch (adt) {
89 case IPSET_TEST: 90 case IPSET_TEST:
90 ret = ip_set_test(elem->id, skb, pf, dim, flags); 91 ret = ip_set_test(elem->id, skb, par, opt);
91 if (ret > 0) 92 if (ret > 0)
92 return ret; 93 return ret;
93 break; 94 break;
94 case IPSET_ADD: 95 case IPSET_ADD:
95 ret = ip_set_add(elem->id, skb, pf, dim, flags); 96 ret = ip_set_add(elem->id, skb, par, opt);
96 if (ret == 0) 97 if (ret == 0)
97 return ret; 98 return ret;
98 break; 99 break;
99 case IPSET_DEL: 100 case IPSET_DEL:
100 ret = ip_set_del(elem->id, skb, pf, dim, flags); 101 ret = ip_set_del(elem->id, skb, par, opt);
101 if (ret == 0) 102 if (ret == 0)
102 return ret; 103 return ret;
103 break; 104 break;
@@ -109,15 +110,28 @@ list_set_kadt(struct ip_set *set, const struct sk_buff *skb,
109} 110}
110 111
111static bool 112static bool
112next_id_eq(const struct list_set *map, u32 i, ip_set_id_t id) 113id_eq(const struct list_set *map, u32 i, ip_set_id_t id)
113{ 114{
114 const struct set_elem *elem; 115 const struct set_elem *elem;
115 116
116 if (i + 1 < map->size) { 117 if (i < map->size) {
117 elem = list_set_elem(map, i + 1); 118 elem = list_set_elem(map, i);
119 return elem->id == id;
120 }
121
122 return 0;
123}
124
125static bool
126id_eq_timeout(const struct list_set *map, u32 i, ip_set_id_t id)
127{
128 const struct set_elem *elem;
129
130 if (i < map->size) {
131 elem = list_set_elem(map, i);
118 return !!(elem->id == id && 132 return !!(elem->id == id &&
119 !(with_timeout(map->timeout) && 133 !(with_timeout(map->timeout) &&
120 list_set_expired(map, i + 1))); 134 list_set_expired(map, i)));
121 } 135 }
122 136
123 return 0; 137 return 0;
@@ -190,12 +204,26 @@ list_set_del(struct list_set *map, u32 i)
190 return 0; 204 return 0;
191} 205}
192 206
207static void
208cleanup_entries(struct list_set *map)
209{
210 struct set_telem *e;
211 u32 i;
212
213 for (i = 0; i < map->size; i++) {
214 e = list_set_telem(map, i);
215 if (e->id != IPSET_INVALID_ID && list_set_expired(map, i))
216 list_set_del(map, i);
217 }
218}
219
193static int 220static int
194list_set_uadt(struct ip_set *set, struct nlattr *tb[], 221list_set_uadt(struct ip_set *set, struct nlattr *tb[],
195 enum ipset_adt adt, u32 *lineno, u32 flags) 222 enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
196{ 223{
197 struct list_set *map = set->data; 224 struct list_set *map = set->data;
198 bool with_timeout = with_timeout(map->timeout); 225 bool with_timeout = with_timeout(map->timeout);
226 bool flag_exist = flags & IPSET_FLAG_EXIST;
199 int before = 0; 227 int before = 0;
200 u32 timeout = map->timeout; 228 u32 timeout = map->timeout;
201 ip_set_id_t id, refid = IPSET_INVALID_ID; 229 ip_set_id_t id, refid = IPSET_INVALID_ID;
@@ -248,6 +276,8 @@ list_set_uadt(struct ip_set *set, struct nlattr *tb[],
248 } 276 }
249 timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); 277 timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
250 } 278 }
279 if (with_timeout && adt != IPSET_TEST)
280 cleanup_entries(map);
251 281
252 switch (adt) { 282 switch (adt) {
253 case IPSET_TEST: 283 case IPSET_TEST:
@@ -259,22 +289,37 @@ list_set_uadt(struct ip_set *set, struct nlattr *tb[],
259 else if (with_timeout && list_set_expired(map, i)) 289 else if (with_timeout && list_set_expired(map, i))
260 continue; 290 continue;
261 else if (before > 0 && elem->id == id) 291 else if (before > 0 && elem->id == id)
262 ret = next_id_eq(map, i, refid); 292 ret = id_eq_timeout(map, i + 1, refid);
263 else if (before < 0 && elem->id == refid) 293 else if (before < 0 && elem->id == refid)
264 ret = next_id_eq(map, i, id); 294 ret = id_eq_timeout(map, i + 1, id);
265 else if (before == 0 && elem->id == id) 295 else if (before == 0 && elem->id == id)
266 ret = 1; 296 ret = 1;
267 } 297 }
268 break; 298 break;
269 case IPSET_ADD: 299 case IPSET_ADD:
270 for (i = 0; i < map->size && !ret; i++) { 300 for (i = 0; i < map->size; i++) {
271 elem = list_set_elem(map, i); 301 elem = list_set_elem(map, i);
272 if (elem->id == id && 302 if (elem->id != id)
273 !(with_timeout && list_set_expired(map, i))) 303 continue;
304 if (!(with_timeout && flag_exist)) {
274 ret = -IPSET_ERR_EXIST; 305 ret = -IPSET_ERR_EXIST;
306 goto finish;
307 } else {
308 struct set_telem *e = list_set_telem(map, i);
309
310 if ((before > 1 &&
311 !id_eq(map, i + 1, refid)) ||
312 (before < 0 &&
313 (i == 0 || !id_eq(map, i - 1, refid)))) {
314 ret = -IPSET_ERR_EXIST;
315 goto finish;
316 }
317 e->timeout = ip_set_timeout_set(timeout);
318 ip_set_put_byindex(id);
319 ret = 0;
320 goto finish;
321 }
275 } 322 }
276 if (ret == -IPSET_ERR_EXIST)
277 break;
278 ret = -IPSET_ERR_LIST_FULL; 323 ret = -IPSET_ERR_LIST_FULL;
279 for (i = 0; i < map->size && ret == -IPSET_ERR_LIST_FULL; i++) { 324 for (i = 0; i < map->size && ret == -IPSET_ERR_LIST_FULL; i++) {
280 elem = list_set_elem(map, i); 325 elem = list_set_elem(map, i);
@@ -283,9 +328,7 @@ list_set_uadt(struct ip_set *set, struct nlattr *tb[],
283 : list_set_add(map, i, id, timeout); 328 : list_set_add(map, i, id, timeout);
284 else if (elem->id != refid) 329 else if (elem->id != refid)
285 continue; 330 continue;
286 else if (with_timeout && list_set_expired(map, i)) 331 else if (before > 0)
287 ret = -IPSET_ERR_REF_EXIST;
288 else if (before)
289 ret = list_set_add(map, i, id, timeout); 332 ret = list_set_add(map, i, id, timeout);
290 else if (i + 1 < map->size) 333 else if (i + 1 < map->size)
291 ret = list_set_add(map, i + 1, id, timeout); 334 ret = list_set_add(map, i + 1, id, timeout);
@@ -299,16 +342,12 @@ list_set_uadt(struct ip_set *set, struct nlattr *tb[],
299 ret = before != 0 ? -IPSET_ERR_REF_EXIST 342 ret = before != 0 ? -IPSET_ERR_REF_EXIST
300 : -IPSET_ERR_EXIST; 343 : -IPSET_ERR_EXIST;
301 break; 344 break;
302 } else if (with_timeout && list_set_expired(map, i)) 345 } else if (elem->id == id &&
303 continue; 346 (before == 0 ||
304 else if (elem->id == id && 347 (before > 0 && id_eq(map, i + 1, refid))))
305 (before == 0 ||
306 (before > 0 &&
307 next_id_eq(map, i, refid))))
308 ret = list_set_del(map, i); 348 ret = list_set_del(map, i);
309 else if (before < 0 && 349 else if (elem->id == refid &&
310 elem->id == refid && 350 before < 0 && id_eq(map, i + 1, id))
311 next_id_eq(map, i, id))
312 ret = list_set_del(map, i + 1); 351 ret = list_set_del(map, i + 1);
313 } 352 }
314 break; 353 break;
@@ -454,15 +493,9 @@ list_set_gc(unsigned long ul_set)
454{ 493{
455 struct ip_set *set = (struct ip_set *) ul_set; 494 struct ip_set *set = (struct ip_set *) ul_set;
456 struct list_set *map = set->data; 495 struct list_set *map = set->data;
457 struct set_telem *e;
458 u32 i;
459 496
460 write_lock_bh(&set->lock); 497 write_lock_bh(&set->lock);
461 for (i = 0; i < map->size; i++) { 498 cleanup_entries(map);
462 e = list_set_telem(map, i);
463 if (e->id != IPSET_INVALID_ID && list_set_expired(map, i))
464 list_set_del(map, i);
465 }
466 write_unlock_bh(&set->lock); 499 write_unlock_bh(&set->lock);
467 500
468 map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ; 501 map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
@@ -543,7 +576,8 @@ static struct ip_set_type list_set_type __read_mostly = {
543 .features = IPSET_TYPE_NAME | IPSET_DUMP_LAST, 576 .features = IPSET_TYPE_NAME | IPSET_DUMP_LAST,
544 .dimension = IPSET_DIM_ONE, 577 .dimension = IPSET_DIM_ONE,
545 .family = AF_UNSPEC, 578 .family = AF_UNSPEC,
546 .revision = 0, 579 .revision_min = 0,
580 .revision_max = 0,
547 .create = list_set_create, 581 .create = list_set_create,
548 .create_policy = { 582 .create_policy = {
549 [IPSET_ATTR_SIZE] = { .type = NLA_U32 }, 583 [IPSET_ATTR_SIZE] = { .type = NLA_U32 },
diff --git a/net/netfilter/ipset/pfxlen.c b/net/netfilter/ipset/pfxlen.c
index 23f8c816221..bd13d66220f 100644
--- a/net/netfilter/ipset/pfxlen.c
+++ b/net/netfilter/ipset/pfxlen.c
@@ -148,7 +148,7 @@ const union nf_inet_addr ip_set_netmask_map[] = {
148EXPORT_SYMBOL_GPL(ip_set_netmask_map); 148EXPORT_SYMBOL_GPL(ip_set_netmask_map);
149 149
150#undef E 150#undef E
151#define E(a, b, c, d) \ 151#define E(a, b, c, d) \
152 {.ip6 = { (__force __be32) a, (__force __be32) b, \ 152 {.ip6 = { (__force __be32) a, (__force __be32) b, \
153 (__force __be32) c, (__force __be32) d, \ 153 (__force __be32) c, (__force __be32) d, \
154 } } 154 } }
@@ -289,3 +289,24 @@ const union nf_inet_addr ip_set_hostmask_map[] = {
289 E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF), 289 E(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF),
290}; 290};
291EXPORT_SYMBOL_GPL(ip_set_hostmask_map); 291EXPORT_SYMBOL_GPL(ip_set_hostmask_map);
292
293/* Find the largest network which matches the range from left, in host order. */
294u32
295ip_set_range_to_cidr(u32 from, u32 to, u8 *cidr)
296{
297 u32 last;
298 u8 i;
299
300 for (i = 1; i < 32; i++) {
301 if ((from & ip_set_hostmask(i)) != from)
302 continue;
303 last = from | ~ip_set_hostmask(i);
304 if (!after(last, to)) {
305 *cidr = i;
306 return last;
307 }
308 }
309 *cidr = 32;
310 return from;
311}
312EXPORT_SYMBOL_GPL(ip_set_range_to_cidr);
diff --git a/net/netfilter/ipvs/ip_vs_app.c b/net/netfilter/ipvs/ip_vs_app.c
index 059af3120be..fe6cb4304d7 100644
--- a/net/netfilter/ipvs/ip_vs_app.c
+++ b/net/netfilter/ipvs/ip_vs_app.c
@@ -576,7 +576,7 @@ static const struct file_operations ip_vs_app_fops = {
576}; 576};
577#endif 577#endif
578 578
579int __net_init __ip_vs_app_init(struct net *net) 579int __net_init ip_vs_app_net_init(struct net *net)
580{ 580{
581 struct netns_ipvs *ipvs = net_ipvs(net); 581 struct netns_ipvs *ipvs = net_ipvs(net);
582 582
@@ -585,17 +585,7 @@ int __net_init __ip_vs_app_init(struct net *net)
585 return 0; 585 return 0;
586} 586}
587 587
588void __net_exit __ip_vs_app_cleanup(struct net *net) 588void __net_exit ip_vs_app_net_cleanup(struct net *net)
589{ 589{
590 proc_net_remove(net, "ip_vs_app"); 590 proc_net_remove(net, "ip_vs_app");
591} 591}
592
593int __init ip_vs_app_init(void)
594{
595 return 0;
596}
597
598
599void ip_vs_app_cleanup(void)
600{
601}
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index 782db275ac5..12571fb2881 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -1255,7 +1255,7 @@ flush_again:
1255/* 1255/*
1256 * per netns init and exit 1256 * per netns init and exit
1257 */ 1257 */
1258int __net_init __ip_vs_conn_init(struct net *net) 1258int __net_init ip_vs_conn_net_init(struct net *net)
1259{ 1259{
1260 struct netns_ipvs *ipvs = net_ipvs(net); 1260 struct netns_ipvs *ipvs = net_ipvs(net);
1261 1261
@@ -1266,7 +1266,7 @@ int __net_init __ip_vs_conn_init(struct net *net)
1266 return 0; 1266 return 0;
1267} 1267}
1268 1268
1269void __net_exit __ip_vs_conn_cleanup(struct net *net) 1269void __net_exit ip_vs_conn_net_cleanup(struct net *net)
1270{ 1270{
1271 /* flush all the connection entries first */ 1271 /* flush all the connection entries first */
1272 ip_vs_conn_flush(net); 1272 ip_vs_conn_flush(net);
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 24c28d238dc..4f77bb16d22 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -852,7 +852,7 @@ static int ip_vs_out_icmp(struct sk_buff *skb, int *related,
852 *related = 1; 852 *related = 1;
853 853
854 /* reassemble IP fragments */ 854 /* reassemble IP fragments */
855 if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) { 855 if (ip_is_fragment(ip_hdr(skb))) {
856 if (ip_vs_gather_frags(skb, ip_vs_defrag_user(hooknum))) 856 if (ip_vs_gather_frags(skb, ip_vs_defrag_user(hooknum)))
857 return NF_STOLEN; 857 return NF_STOLEN;
858 } 858 }
@@ -1156,8 +1156,7 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af)
1156 ip_vs_fill_iphdr(af, skb_network_header(skb), &iph); 1156 ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
1157 } else 1157 } else
1158#endif 1158#endif
1159 if (unlikely(ip_hdr(skb)->frag_off & htons(IP_MF|IP_OFFSET) && 1159 if (unlikely(ip_is_fragment(ip_hdr(skb)) && !pp->dont_defrag)) {
1160 !pp->dont_defrag)) {
1161 if (ip_vs_gather_frags(skb, 1160 if (ip_vs_gather_frags(skb,
1162 ip_vs_defrag_user(hooknum))) 1161 ip_vs_defrag_user(hooknum)))
1163 return NF_STOLEN; 1162 return NF_STOLEN;
@@ -1310,7 +1309,7 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
1310 *related = 1; 1309 *related = 1;
1311 1310
1312 /* reassemble IP fragments */ 1311 /* reassemble IP fragments */
1313 if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) { 1312 if (ip_is_fragment(ip_hdr(skb))) {
1314 if (ip_vs_gather_frags(skb, ip_vs_defrag_user(hooknum))) 1313 if (ip_vs_gather_frags(skb, ip_vs_defrag_user(hooknum)))
1315 return NF_STOLEN; 1314 return NF_STOLEN;
1316 } 1315 }
@@ -1384,7 +1383,7 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
1384 offset += 2 * sizeof(__u16); 1383 offset += 2 * sizeof(__u16);
1385 verdict = ip_vs_icmp_xmit(skb, cp, pp, offset, hooknum); 1384 verdict = ip_vs_icmp_xmit(skb, cp, pp, offset, hooknum);
1386 1385
1387 out: 1386out:
1388 __ip_vs_conn_put(cp); 1387 __ip_vs_conn_put(cp);
1389 1388
1390 return verdict; 1389 return verdict;
@@ -1891,22 +1890,22 @@ static int __net_init __ip_vs_init(struct net *net)
1891 atomic_inc(&ipvs_netns_cnt); 1890 atomic_inc(&ipvs_netns_cnt);
1892 net->ipvs = ipvs; 1891 net->ipvs = ipvs;
1893 1892
1894 if (__ip_vs_estimator_init(net) < 0) 1893 if (ip_vs_estimator_net_init(net) < 0)
1895 goto estimator_fail; 1894 goto estimator_fail;
1896 1895
1897 if (__ip_vs_control_init(net) < 0) 1896 if (ip_vs_control_net_init(net) < 0)
1898 goto control_fail; 1897 goto control_fail;
1899 1898
1900 if (__ip_vs_protocol_init(net) < 0) 1899 if (ip_vs_protocol_net_init(net) < 0)
1901 goto protocol_fail; 1900 goto protocol_fail;
1902 1901
1903 if (__ip_vs_app_init(net) < 0) 1902 if (ip_vs_app_net_init(net) < 0)
1904 goto app_fail; 1903 goto app_fail;
1905 1904
1906 if (__ip_vs_conn_init(net) < 0) 1905 if (ip_vs_conn_net_init(net) < 0)
1907 goto conn_fail; 1906 goto conn_fail;
1908 1907
1909 if (__ip_vs_sync_init(net) < 0) 1908 if (ip_vs_sync_net_init(net) < 0)
1910 goto sync_fail; 1909 goto sync_fail;
1911 1910
1912 printk(KERN_INFO "IPVS: Creating netns size=%zu id=%d\n", 1911 printk(KERN_INFO "IPVS: Creating netns size=%zu id=%d\n",
@@ -1917,27 +1916,27 @@ static int __net_init __ip_vs_init(struct net *net)
1917 */ 1916 */
1918 1917
1919sync_fail: 1918sync_fail:
1920 __ip_vs_conn_cleanup(net); 1919 ip_vs_conn_net_cleanup(net);
1921conn_fail: 1920conn_fail:
1922 __ip_vs_app_cleanup(net); 1921 ip_vs_app_net_cleanup(net);
1923app_fail: 1922app_fail:
1924 __ip_vs_protocol_cleanup(net); 1923 ip_vs_protocol_net_cleanup(net);
1925protocol_fail: 1924protocol_fail:
1926 __ip_vs_control_cleanup(net); 1925 ip_vs_control_net_cleanup(net);
1927control_fail: 1926control_fail:
1928 __ip_vs_estimator_cleanup(net); 1927 ip_vs_estimator_net_cleanup(net);
1929estimator_fail: 1928estimator_fail:
1930 return -ENOMEM; 1929 return -ENOMEM;
1931} 1930}
1932 1931
1933static void __net_exit __ip_vs_cleanup(struct net *net) 1932static void __net_exit __ip_vs_cleanup(struct net *net)
1934{ 1933{
1935 __ip_vs_service_cleanup(net); /* ip_vs_flush() with locks */ 1934 ip_vs_service_net_cleanup(net); /* ip_vs_flush() with locks */
1936 __ip_vs_conn_cleanup(net); 1935 ip_vs_conn_net_cleanup(net);
1937 __ip_vs_app_cleanup(net); 1936 ip_vs_app_net_cleanup(net);
1938 __ip_vs_protocol_cleanup(net); 1937 ip_vs_protocol_net_cleanup(net);
1939 __ip_vs_control_cleanup(net); 1938 ip_vs_control_net_cleanup(net);
1940 __ip_vs_estimator_cleanup(net); 1939 ip_vs_estimator_net_cleanup(net);
1941 IP_VS_DBG(2, "ipvs netns %d released\n", net_ipvs(net)->gen); 1940 IP_VS_DBG(2, "ipvs netns %d released\n", net_ipvs(net)->gen);
1942} 1941}
1943 1942
@@ -1946,7 +1945,7 @@ static void __net_exit __ip_vs_dev_cleanup(struct net *net)
1946 EnterFunction(2); 1945 EnterFunction(2);
1947 net_ipvs(net)->enable = 0; /* Disable packet reception */ 1946 net_ipvs(net)->enable = 0; /* Disable packet reception */
1948 smp_wmb(); 1947 smp_wmb();
1949 __ip_vs_sync_cleanup(net); 1948 ip_vs_sync_net_cleanup(net);
1950 LeaveFunction(2); 1949 LeaveFunction(2);
1951} 1950}
1952 1951
@@ -1968,36 +1967,23 @@ static int __init ip_vs_init(void)
1968{ 1967{
1969 int ret; 1968 int ret;
1970 1969
1971 ip_vs_estimator_init();
1972 ret = ip_vs_control_init(); 1970 ret = ip_vs_control_init();
1973 if (ret < 0) { 1971 if (ret < 0) {
1974 pr_err("can't setup control.\n"); 1972 pr_err("can't setup control.\n");
1975 goto cleanup_estimator; 1973 goto exit;
1976 } 1974 }
1977 1975
1978 ip_vs_protocol_init(); 1976 ip_vs_protocol_init();
1979 1977
1980 ret = ip_vs_app_init();
1981 if (ret < 0) {
1982 pr_err("can't setup application helper.\n");
1983 goto cleanup_protocol;
1984 }
1985
1986 ret = ip_vs_conn_init(); 1978 ret = ip_vs_conn_init();
1987 if (ret < 0) { 1979 if (ret < 0) {
1988 pr_err("can't setup connection table.\n"); 1980 pr_err("can't setup connection table.\n");
1989 goto cleanup_app; 1981 goto cleanup_protocol;
1990 }
1991
1992 ret = ip_vs_sync_init();
1993 if (ret < 0) {
1994 pr_err("can't setup sync data.\n");
1995 goto cleanup_conn;
1996 } 1982 }
1997 1983
1998 ret = register_pernet_subsys(&ipvs_core_ops); /* Alloc ip_vs struct */ 1984 ret = register_pernet_subsys(&ipvs_core_ops); /* Alloc ip_vs struct */
1999 if (ret < 0) 1985 if (ret < 0)
2000 goto cleanup_sync; 1986 goto cleanup_conn;
2001 1987
2002 ret = register_pernet_device(&ipvs_core_dev_ops); 1988 ret = register_pernet_device(&ipvs_core_dev_ops);
2003 if (ret < 0) 1989 if (ret < 0)
@@ -2017,17 +2003,12 @@ cleanup_dev:
2017 unregister_pernet_device(&ipvs_core_dev_ops); 2003 unregister_pernet_device(&ipvs_core_dev_ops);
2018cleanup_sub: 2004cleanup_sub:
2019 unregister_pernet_subsys(&ipvs_core_ops); 2005 unregister_pernet_subsys(&ipvs_core_ops);
2020cleanup_sync: 2006cleanup_conn:
2021 ip_vs_sync_cleanup();
2022 cleanup_conn:
2023 ip_vs_conn_cleanup(); 2007 ip_vs_conn_cleanup();
2024 cleanup_app: 2008cleanup_protocol:
2025 ip_vs_app_cleanup();
2026 cleanup_protocol:
2027 ip_vs_protocol_cleanup(); 2009 ip_vs_protocol_cleanup();
2028 ip_vs_control_cleanup(); 2010 ip_vs_control_cleanup();
2029 cleanup_estimator: 2011exit:
2030 ip_vs_estimator_cleanup();
2031 return ret; 2012 return ret;
2032} 2013}
2033 2014
@@ -2036,12 +2017,9 @@ static void __exit ip_vs_cleanup(void)
2036 nf_unregister_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops)); 2017 nf_unregister_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
2037 unregister_pernet_device(&ipvs_core_dev_ops); 2018 unregister_pernet_device(&ipvs_core_dev_ops);
2038 unregister_pernet_subsys(&ipvs_core_ops); /* free ip_vs struct */ 2019 unregister_pernet_subsys(&ipvs_core_ops); /* free ip_vs struct */
2039 ip_vs_sync_cleanup();
2040 ip_vs_conn_cleanup(); 2020 ip_vs_conn_cleanup();
2041 ip_vs_app_cleanup();
2042 ip_vs_protocol_cleanup(); 2021 ip_vs_protocol_cleanup();
2043 ip_vs_control_cleanup(); 2022 ip_vs_control_cleanup();
2044 ip_vs_estimator_cleanup();
2045 pr_info("ipvs unloaded.\n"); 2023 pr_info("ipvs unloaded.\n");
2046} 2024}
2047 2025
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 699c79a5565..be43fd805bd 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -1334,9 +1334,9 @@ ip_vs_edit_service(struct ip_vs_service *svc, struct ip_vs_service_user_kern *u)
1334 ip_vs_bind_pe(svc, pe); 1334 ip_vs_bind_pe(svc, pe);
1335 } 1335 }
1336 1336
1337 out_unlock: 1337out_unlock:
1338 write_unlock_bh(&__ip_vs_svc_lock); 1338 write_unlock_bh(&__ip_vs_svc_lock);
1339 out: 1339out:
1340 ip_vs_scheduler_put(old_sched); 1340 ip_vs_scheduler_put(old_sched);
1341 ip_vs_pe_put(old_pe); 1341 ip_vs_pe_put(old_pe);
1342 return ret; 1342 return ret;
@@ -1483,7 +1483,7 @@ static int ip_vs_flush(struct net *net)
1483 * Delete service by {netns} in the service table. 1483 * Delete service by {netns} in the service table.
1484 * Called by __ip_vs_cleanup() 1484 * Called by __ip_vs_cleanup()
1485 */ 1485 */
1486void __ip_vs_service_cleanup(struct net *net) 1486void ip_vs_service_net_cleanup(struct net *net)
1487{ 1487{
1488 EnterFunction(2); 1488 EnterFunction(2);
1489 /* Check for "full" addressed entries */ 1489 /* Check for "full" addressed entries */
@@ -1662,7 +1662,7 @@ proc_do_sync_mode(ctl_table *table, int write,
1662/* 1662/*
1663 * IPVS sysctl table (under the /proc/sys/net/ipv4/vs/) 1663 * IPVS sysctl table (under the /proc/sys/net/ipv4/vs/)
1664 * Do not change order or insert new entries without 1664 * Do not change order or insert new entries without
1665 * align with netns init in __ip_vs_control_init() 1665 * align with netns init in ip_vs_control_net_init()
1666 */ 1666 */
1667 1667
1668static struct ctl_table vs_vars[] = { 1668static struct ctl_table vs_vars[] = {
@@ -2469,7 +2469,7 @@ __ip_vs_get_service_entries(struct net *net,
2469 count++; 2469 count++;
2470 } 2470 }
2471 } 2471 }
2472 out: 2472out:
2473 return ret; 2473 return ret;
2474} 2474}
2475 2475
@@ -2707,7 +2707,7 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2707 ret = -EINVAL; 2707 ret = -EINVAL;
2708 } 2708 }
2709 2709
2710 out: 2710out:
2711 mutex_unlock(&__ip_vs_mutex); 2711 mutex_unlock(&__ip_vs_mutex);
2712 return ret; 2712 return ret;
2713} 2713}
@@ -3595,7 +3595,7 @@ static void ip_vs_genl_unregister(void)
3595 * per netns intit/exit func. 3595 * per netns intit/exit func.
3596 */ 3596 */
3597#ifdef CONFIG_SYSCTL 3597#ifdef CONFIG_SYSCTL
3598int __net_init __ip_vs_control_init_sysctl(struct net *net) 3598int __net_init ip_vs_control_net_init_sysctl(struct net *net)
3599{ 3599{
3600 int idx; 3600 int idx;
3601 struct netns_ipvs *ipvs = net_ipvs(net); 3601 struct netns_ipvs *ipvs = net_ipvs(net);
@@ -3654,7 +3654,7 @@ int __net_init __ip_vs_control_init_sysctl(struct net *net)
3654 return 0; 3654 return 0;
3655} 3655}
3656 3656
3657void __net_init __ip_vs_control_cleanup_sysctl(struct net *net) 3657void __net_init ip_vs_control_net_cleanup_sysctl(struct net *net)
3658{ 3658{
3659 struct netns_ipvs *ipvs = net_ipvs(net); 3659 struct netns_ipvs *ipvs = net_ipvs(net);
3660 3660
@@ -3665,8 +3665,8 @@ void __net_init __ip_vs_control_cleanup_sysctl(struct net *net)
3665 3665
3666#else 3666#else
3667 3667
3668int __net_init __ip_vs_control_init_sysctl(struct net *net) { return 0; } 3668int __net_init ip_vs_control_net_init_sysctl(struct net *net) { return 0; }
3669void __net_init __ip_vs_control_cleanup_sysctl(struct net *net) { } 3669void __net_init ip_vs_control_net_cleanup_sysctl(struct net *net) { }
3670 3670
3671#endif 3671#endif
3672 3672
@@ -3674,7 +3674,7 @@ static struct notifier_block ip_vs_dst_notifier = {
3674 .notifier_call = ip_vs_dst_event, 3674 .notifier_call = ip_vs_dst_event,
3675}; 3675};
3676 3676
3677int __net_init __ip_vs_control_init(struct net *net) 3677int __net_init ip_vs_control_net_init(struct net *net)
3678{ 3678{
3679 int idx; 3679 int idx;
3680 struct netns_ipvs *ipvs = net_ipvs(net); 3680 struct netns_ipvs *ipvs = net_ipvs(net);
@@ -3702,7 +3702,7 @@ int __net_init __ip_vs_control_init(struct net *net)
3702 proc_net_fops_create(net, "ip_vs_stats_percpu", 0, 3702 proc_net_fops_create(net, "ip_vs_stats_percpu", 0,
3703 &ip_vs_stats_percpu_fops); 3703 &ip_vs_stats_percpu_fops);
3704 3704
3705 if (__ip_vs_control_init_sysctl(net)) 3705 if (ip_vs_control_net_init_sysctl(net))
3706 goto err; 3706 goto err;
3707 3707
3708 return 0; 3708 return 0;
@@ -3712,13 +3712,13 @@ err:
3712 return -ENOMEM; 3712 return -ENOMEM;
3713} 3713}
3714 3714
3715void __net_exit __ip_vs_control_cleanup(struct net *net) 3715void __net_exit ip_vs_control_net_cleanup(struct net *net)
3716{ 3716{
3717 struct netns_ipvs *ipvs = net_ipvs(net); 3717 struct netns_ipvs *ipvs = net_ipvs(net);
3718 3718
3719 ip_vs_trash_cleanup(net); 3719 ip_vs_trash_cleanup(net);
3720 ip_vs_stop_estimator(net, &ipvs->tot_stats); 3720 ip_vs_stop_estimator(net, &ipvs->tot_stats);
3721 __ip_vs_control_cleanup_sysctl(net); 3721 ip_vs_control_net_cleanup_sysctl(net);
3722 proc_net_remove(net, "ip_vs_stats_percpu"); 3722 proc_net_remove(net, "ip_vs_stats_percpu");
3723 proc_net_remove(net, "ip_vs_stats"); 3723 proc_net_remove(net, "ip_vs_stats");
3724 proc_net_remove(net, "ip_vs"); 3724 proc_net_remove(net, "ip_vs");
diff --git a/net/netfilter/ipvs/ip_vs_est.c b/net/netfilter/ipvs/ip_vs_est.c
index 508cce98777..0fac6017b6f 100644
--- a/net/netfilter/ipvs/ip_vs_est.c
+++ b/net/netfilter/ipvs/ip_vs_est.c
@@ -192,7 +192,7 @@ void ip_vs_read_estimator(struct ip_vs_stats_user *dst,
192 dst->outbps = (e->outbps + 0xF) >> 5; 192 dst->outbps = (e->outbps + 0xF) >> 5;
193} 193}
194 194
195int __net_init __ip_vs_estimator_init(struct net *net) 195int __net_init ip_vs_estimator_net_init(struct net *net)
196{ 196{
197 struct netns_ipvs *ipvs = net_ipvs(net); 197 struct netns_ipvs *ipvs = net_ipvs(net);
198 198
@@ -203,16 +203,7 @@ int __net_init __ip_vs_estimator_init(struct net *net)
203 return 0; 203 return 0;
204} 204}
205 205
206void __net_exit __ip_vs_estimator_cleanup(struct net *net) 206void __net_exit ip_vs_estimator_net_cleanup(struct net *net)
207{ 207{
208 del_timer_sync(&net_ipvs(net)->est_timer); 208 del_timer_sync(&net_ipvs(net)->est_timer);
209} 209}
210
211int __init ip_vs_estimator_init(void)
212{
213 return 0;
214}
215
216void ip_vs_estimator_cleanup(void)
217{
218}
diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c
index af63553fa33..4490a32ad5b 100644
--- a/net/netfilter/ipvs/ip_vs_ftp.c
+++ b/net/netfilter/ipvs/ip_vs_ftp.c
@@ -44,8 +44,8 @@
44#include <net/ip_vs.h> 44#include <net/ip_vs.h>
45 45
46 46
47#define SERVER_STRING "227 Entering Passive Mode (" 47#define SERVER_STRING "227 "
48#define CLIENT_STRING "PORT " 48#define CLIENT_STRING "PORT"
49 49
50 50
51/* 51/*
@@ -79,14 +79,17 @@ ip_vs_ftp_done_conn(struct ip_vs_app *app, struct ip_vs_conn *cp)
79 79
80/* 80/*
81 * Get <addr,port> from the string "xxx.xxx.xxx.xxx,ppp,ppp", started 81 * Get <addr,port> from the string "xxx.xxx.xxx.xxx,ppp,ppp", started
82 * with the "pattern" and terminated with the "term" character. 82 * with the "pattern", ignoring before "skip" and terminated with
83 * the "term" character.
83 * <addr,port> is in network order. 84 * <addr,port> is in network order.
84 */ 85 */
85static int ip_vs_ftp_get_addrport(char *data, char *data_limit, 86static int ip_vs_ftp_get_addrport(char *data, char *data_limit,
86 const char *pattern, size_t plen, char term, 87 const char *pattern, size_t plen,
88 char skip, char term,
87 __be32 *addr, __be16 *port, 89 __be32 *addr, __be16 *port,
88 char **start, char **end) 90 char **start, char **end)
89{ 91{
92 char *s, c;
90 unsigned char p[6]; 93 unsigned char p[6];
91 int i = 0; 94 int i = 0;
92 95
@@ -101,19 +104,38 @@ static int ip_vs_ftp_get_addrport(char *data, char *data_limit,
101 if (strnicmp(data, pattern, plen) != 0) { 104 if (strnicmp(data, pattern, plen) != 0) {
102 return 0; 105 return 0;
103 } 106 }
104 *start = data + plen; 107 s = data + plen;
108 if (skip) {
109 int found = 0;
110
111 for (;; s++) {
112 if (s == data_limit)
113 return -1;
114 if (!found) {
115 if (*s == skip)
116 found = 1;
117 } else if (*s != skip) {
118 break;
119 }
120 }
121 }
105 122
106 for (data = *start; *data != term; data++) { 123 for (data = s; ; data++) {
107 if (data == data_limit) 124 if (data == data_limit)
108 return -1; 125 return -1;
126 if (*data == term)
127 break;
109 } 128 }
110 *end = data; 129 *end = data;
111 130
112 memset(p, 0, sizeof(p)); 131 memset(p, 0, sizeof(p));
113 for (data = *start; data != *end; data++) { 132 for (data = s; ; data++) {
114 if (*data >= '0' && *data <= '9') { 133 c = *data;
115 p[i] = p[i]*10 + *data - '0'; 134 if (c == term)
116 } else if (*data == ',' && i < 5) { 135 break;
136 if (c >= '0' && c <= '9') {
137 p[i] = p[i]*10 + c - '0';
138 } else if (c == ',' && i < 5) {
117 i++; 139 i++;
118 } else { 140 } else {
119 /* unexpected character */ 141 /* unexpected character */
@@ -124,8 +146,9 @@ static int ip_vs_ftp_get_addrport(char *data, char *data_limit,
124 if (i != 5) 146 if (i != 5)
125 return -1; 147 return -1;
126 148
127 *addr = get_unaligned((__be32 *)p); 149 *start = s;
128 *port = get_unaligned((__be16 *)(p + 4)); 150 *addr = get_unaligned((__be32 *) p);
151 *port = get_unaligned((__be16 *) (p + 4));
129 return 1; 152 return 1;
130} 153}
131 154
@@ -185,7 +208,8 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
185 208
186 if (ip_vs_ftp_get_addrport(data, data_limit, 209 if (ip_vs_ftp_get_addrport(data, data_limit,
187 SERVER_STRING, 210 SERVER_STRING,
188 sizeof(SERVER_STRING)-1, ')', 211 sizeof(SERVER_STRING)-1,
212 '(', ')',
189 &from.ip, &port, 213 &from.ip, &port,
190 &start, &end) != 1) 214 &start, &end) != 1)
191 return 1; 215 return 1;
@@ -345,7 +369,7 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp,
345 */ 369 */
346 if (ip_vs_ftp_get_addrport(data_start, data_limit, 370 if (ip_vs_ftp_get_addrport(data_start, data_limit,
347 CLIENT_STRING, sizeof(CLIENT_STRING)-1, 371 CLIENT_STRING, sizeof(CLIENT_STRING)-1,
348 '\r', &to.ip, &port, 372 ' ', '\r', &to.ip, &port,
349 &start, &end) != 1) 373 &start, &end) != 1)
350 return 1; 374 return 1;
351 375
diff --git a/net/netfilter/ipvs/ip_vs_proto.c b/net/netfilter/ipvs/ip_vs_proto.c
index eb86028536f..52d073c105e 100644
--- a/net/netfilter/ipvs/ip_vs_proto.c
+++ b/net/netfilter/ipvs/ip_vs_proto.c
@@ -316,7 +316,7 @@ ip_vs_tcpudp_debug_packet(int af, struct ip_vs_protocol *pp,
316/* 316/*
317 * per network name-space init 317 * per network name-space init
318 */ 318 */
319int __net_init __ip_vs_protocol_init(struct net *net) 319int __net_init ip_vs_protocol_net_init(struct net *net)
320{ 320{
321#ifdef CONFIG_IP_VS_PROTO_TCP 321#ifdef CONFIG_IP_VS_PROTO_TCP
322 register_ip_vs_proto_netns(net, &ip_vs_protocol_tcp); 322 register_ip_vs_proto_netns(net, &ip_vs_protocol_tcp);
@@ -336,7 +336,7 @@ int __net_init __ip_vs_protocol_init(struct net *net)
336 return 0; 336 return 0;
337} 337}
338 338
339void __net_exit __ip_vs_protocol_cleanup(struct net *net) 339void __net_exit ip_vs_protocol_net_cleanup(struct net *net)
340{ 340{
341 struct netns_ipvs *ipvs = net_ipvs(net); 341 struct netns_ipvs *ipvs = net_ipvs(net);
342 struct ip_vs_proto_data *pd; 342 struct ip_vs_proto_data *pd;
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index e292e5bddc7..7ee7215b8ba 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -1663,7 +1663,7 @@ int stop_sync_thread(struct net *net, int state)
1663/* 1663/*
1664 * Initialize data struct for each netns 1664 * Initialize data struct for each netns
1665 */ 1665 */
1666int __net_init __ip_vs_sync_init(struct net *net) 1666int __net_init ip_vs_sync_net_init(struct net *net)
1667{ 1667{
1668 struct netns_ipvs *ipvs = net_ipvs(net); 1668 struct netns_ipvs *ipvs = net_ipvs(net);
1669 1669
@@ -1677,7 +1677,7 @@ int __net_init __ip_vs_sync_init(struct net *net)
1677 return 0; 1677 return 0;
1678} 1678}
1679 1679
1680void __ip_vs_sync_cleanup(struct net *net) 1680void ip_vs_sync_net_cleanup(struct net *net)
1681{ 1681{
1682 int retc; 1682 int retc;
1683 1683
@@ -1689,12 +1689,3 @@ void __ip_vs_sync_cleanup(struct net *net)
1689 if (retc && retc != -ESRCH) 1689 if (retc && retc != -ESRCH)
1690 pr_err("Failed to stop Backup Daemon\n"); 1690 pr_err("Failed to stop Backup Daemon\n");
1691} 1691}
1692
1693int __init ip_vs_sync_init(void)
1694{
1695 return 0;
1696}
1697
1698void ip_vs_sync_cleanup(void)
1699{
1700}
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 482e90c6185..7dec88a1755 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -970,7 +970,7 @@ ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb,
970 970
971 if (nlh->nlmsg_flags & NLM_F_DUMP) 971 if (nlh->nlmsg_flags & NLM_F_DUMP)
972 return netlink_dump_start(ctnl, skb, nlh, ctnetlink_dump_table, 972 return netlink_dump_start(ctnl, skb, nlh, ctnetlink_dump_table,
973 ctnetlink_done); 973 ctnetlink_done, 0);
974 974
975 err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone); 975 err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
976 if (err < 0) 976 if (err < 0)
@@ -1840,7 +1840,7 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
1840 if (nlh->nlmsg_flags & NLM_F_DUMP) { 1840 if (nlh->nlmsg_flags & NLM_F_DUMP) {
1841 return netlink_dump_start(ctnl, skb, nlh, 1841 return netlink_dump_start(ctnl, skb, nlh,
1842 ctnetlink_exp_dump_table, 1842 ctnetlink_exp_dump_table,
1843 ctnetlink_exp_done); 1843 ctnetlink_exp_done, 0);
1844 } 1844 }
1845 1845
1846 err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone); 1846 err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index b4a4532823e..1905976b513 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -37,7 +37,7 @@ MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_NETFILTER);
37 37
38static char __initdata nfversion[] = "0.30"; 38static char __initdata nfversion[] = "0.30";
39 39
40static const struct nfnetlink_subsystem *subsys_table[NFNL_SUBSYS_COUNT]; 40static const struct nfnetlink_subsystem __rcu *subsys_table[NFNL_SUBSYS_COUNT];
41static DEFINE_MUTEX(nfnl_mutex); 41static DEFINE_MUTEX(nfnl_mutex);
42 42
43void nfnl_lock(void) 43void nfnl_lock(void)
@@ -59,7 +59,7 @@ int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n)
59 nfnl_unlock(); 59 nfnl_unlock();
60 return -EBUSY; 60 return -EBUSY;
61 } 61 }
62 subsys_table[n->subsys_id] = n; 62 rcu_assign_pointer(subsys_table[n->subsys_id], n);
63 nfnl_unlock(); 63 nfnl_unlock();
64 64
65 return 0; 65 return 0;
@@ -71,7 +71,7 @@ int nfnetlink_subsys_unregister(const struct nfnetlink_subsystem *n)
71 nfnl_lock(); 71 nfnl_lock();
72 subsys_table[n->subsys_id] = NULL; 72 subsys_table[n->subsys_id] = NULL;
73 nfnl_unlock(); 73 nfnl_unlock();
74 74 synchronize_rcu();
75 return 0; 75 return 0;
76} 76}
77EXPORT_SYMBOL_GPL(nfnetlink_subsys_unregister); 77EXPORT_SYMBOL_GPL(nfnetlink_subsys_unregister);
@@ -83,7 +83,7 @@ static inline const struct nfnetlink_subsystem *nfnetlink_get_subsys(u_int16_t t
83 if (subsys_id >= NFNL_SUBSYS_COUNT) 83 if (subsys_id >= NFNL_SUBSYS_COUNT)
84 return NULL; 84 return NULL;
85 85
86 return subsys_table[subsys_id]; 86 return rcu_dereference(subsys_table[subsys_id]);
87} 87}
88 88
89static inline const struct nfnl_callback * 89static inline const struct nfnl_callback *
@@ -139,21 +139,27 @@ static int nfnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
139 139
140 type = nlh->nlmsg_type; 140 type = nlh->nlmsg_type;
141replay: 141replay:
142 rcu_read_lock();
142 ss = nfnetlink_get_subsys(type); 143 ss = nfnetlink_get_subsys(type);
143 if (!ss) { 144 if (!ss) {
144#ifdef CONFIG_MODULES 145#ifdef CONFIG_MODULES
145 nfnl_unlock(); 146 rcu_read_unlock();
146 request_module("nfnetlink-subsys-%d", NFNL_SUBSYS_ID(type)); 147 request_module("nfnetlink-subsys-%d", NFNL_SUBSYS_ID(type));
147 nfnl_lock(); 148 rcu_read_lock();
148 ss = nfnetlink_get_subsys(type); 149 ss = nfnetlink_get_subsys(type);
149 if (!ss) 150 if (!ss)
150#endif 151#endif
152 {
153 rcu_read_unlock();
151 return -EINVAL; 154 return -EINVAL;
155 }
152 } 156 }
153 157
154 nc = nfnetlink_find_client(type, ss); 158 nc = nfnetlink_find_client(type, ss);
155 if (!nc) 159 if (!nc) {
160 rcu_read_unlock();
156 return -EINVAL; 161 return -EINVAL;
162 }
157 163
158 { 164 {
159 int min_len = NLMSG_SPACE(sizeof(struct nfgenmsg)); 165 int min_len = NLMSG_SPACE(sizeof(struct nfgenmsg));
@@ -167,7 +173,23 @@ replay:
167 if (err < 0) 173 if (err < 0)
168 return err; 174 return err;
169 175
170 err = nc->call(net->nfnl, skb, nlh, (const struct nlattr **)cda); 176 if (nc->call_rcu) {
177 err = nc->call_rcu(net->nfnl, skb, nlh,
178 (const struct nlattr **)cda);
179 rcu_read_unlock();
180 } else {
181 rcu_read_unlock();
182 nfnl_lock();
183 if (rcu_dereference_protected(
184 subsys_table[NFNL_SUBSYS_ID(type)],
185 lockdep_is_held(&nfnl_mutex)) != ss ||
186 nfnetlink_find_client(type, ss) != nc)
187 err = -EAGAIN;
188 else
189 err = nc->call(net->nfnl, skb, nlh,
190 (const struct nlattr **)cda);
191 nfnl_unlock();
192 }
171 if (err == -EAGAIN) 193 if (err == -EAGAIN)
172 goto replay; 194 goto replay;
173 return err; 195 return err;
@@ -176,9 +198,7 @@ replay:
176 198
177static void nfnetlink_rcv(struct sk_buff *skb) 199static void nfnetlink_rcv(struct sk_buff *skb)
178{ 200{
179 nfnl_lock();
180 netlink_rcv_skb(skb, &nfnetlink_rcv_msg); 201 netlink_rcv_skb(skb, &nfnetlink_rcv_msg);
181 nfnl_unlock();
182} 202}
183 203
184static int __net_init nfnetlink_net_init(struct net *net) 204static int __net_init nfnetlink_net_init(struct net *net)
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index fdd2fafe0a1..49132bddd73 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -58,7 +58,7 @@ struct nfqnl_instance {
58 */ 58 */
59 spinlock_t lock; 59 spinlock_t lock;
60 unsigned int queue_total; 60 unsigned int queue_total;
61 atomic_t id_sequence; /* 'sequence' of pkt ids */ 61 unsigned int id_sequence; /* 'sequence' of pkt ids */
62 struct list_head queue_list; /* packets in queue */ 62 struct list_head queue_list; /* packets in queue */
63}; 63};
64 64
@@ -171,6 +171,13 @@ __enqueue_entry(struct nfqnl_instance *queue, struct nf_queue_entry *entry)
171 queue->queue_total++; 171 queue->queue_total++;
172} 172}
173 173
174static void
175__dequeue_entry(struct nfqnl_instance *queue, struct nf_queue_entry *entry)
176{
177 list_del(&entry->list);
178 queue->queue_total--;
179}
180
174static struct nf_queue_entry * 181static struct nf_queue_entry *
175find_dequeue_entry(struct nfqnl_instance *queue, unsigned int id) 182find_dequeue_entry(struct nfqnl_instance *queue, unsigned int id)
176{ 183{
@@ -185,10 +192,8 @@ find_dequeue_entry(struct nfqnl_instance *queue, unsigned int id)
185 } 192 }
186 } 193 }
187 194
188 if (entry) { 195 if (entry)
189 list_del(&entry->list); 196 __dequeue_entry(queue, entry);
190 queue->queue_total--;
191 }
192 197
193 spin_unlock_bh(&queue->lock); 198 spin_unlock_bh(&queue->lock);
194 199
@@ -213,13 +218,15 @@ nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn, unsigned long data)
213 218
214static struct sk_buff * 219static struct sk_buff *
215nfqnl_build_packet_message(struct nfqnl_instance *queue, 220nfqnl_build_packet_message(struct nfqnl_instance *queue,
216 struct nf_queue_entry *entry) 221 struct nf_queue_entry *entry,
222 __be32 **packet_id_ptr)
217{ 223{
218 sk_buff_data_t old_tail; 224 sk_buff_data_t old_tail;
219 size_t size; 225 size_t size;
220 size_t data_len = 0; 226 size_t data_len = 0;
221 struct sk_buff *skb; 227 struct sk_buff *skb;
222 struct nfqnl_msg_packet_hdr pmsg; 228 struct nlattr *nla;
229 struct nfqnl_msg_packet_hdr *pmsg;
223 struct nlmsghdr *nlh; 230 struct nlmsghdr *nlh;
224 struct nfgenmsg *nfmsg; 231 struct nfgenmsg *nfmsg;
225 struct sk_buff *entskb = entry->skb; 232 struct sk_buff *entskb = entry->skb;
@@ -272,12 +279,11 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
272 nfmsg->version = NFNETLINK_V0; 279 nfmsg->version = NFNETLINK_V0;
273 nfmsg->res_id = htons(queue->queue_num); 280 nfmsg->res_id = htons(queue->queue_num);
274 281
275 entry->id = atomic_inc_return(&queue->id_sequence); 282 nla = __nla_reserve(skb, NFQA_PACKET_HDR, sizeof(*pmsg));
276 pmsg.packet_id = htonl(entry->id); 283 pmsg = nla_data(nla);
277 pmsg.hw_protocol = entskb->protocol; 284 pmsg->hw_protocol = entskb->protocol;
278 pmsg.hook = entry->hook; 285 pmsg->hook = entry->hook;
279 286 *packet_id_ptr = &pmsg->packet_id;
280 NLA_PUT(skb, NFQA_PACKET_HDR, sizeof(pmsg), &pmsg);
281 287
282 indev = entry->indev; 288 indev = entry->indev;
283 if (indev) { 289 if (indev) {
@@ -389,6 +395,7 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
389 struct sk_buff *nskb; 395 struct sk_buff *nskb;
390 struct nfqnl_instance *queue; 396 struct nfqnl_instance *queue;
391 int err = -ENOBUFS; 397 int err = -ENOBUFS;
398 __be32 *packet_id_ptr;
392 399
393 /* rcu_read_lock()ed by nf_hook_slow() */ 400 /* rcu_read_lock()ed by nf_hook_slow() */
394 queue = instance_lookup(queuenum); 401 queue = instance_lookup(queuenum);
@@ -402,7 +409,7 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
402 goto err_out; 409 goto err_out;
403 } 410 }
404 411
405 nskb = nfqnl_build_packet_message(queue, entry); 412 nskb = nfqnl_build_packet_message(queue, entry, &packet_id_ptr);
406 if (nskb == NULL) { 413 if (nskb == NULL) {
407 err = -ENOMEM; 414 err = -ENOMEM;
408 goto err_out; 415 goto err_out;
@@ -421,6 +428,8 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
421 queue->queue_total); 428 queue->queue_total);
422 goto err_out_free_nskb; 429 goto err_out_free_nskb;
423 } 430 }
431 entry->id = ++queue->id_sequence;
432 *packet_id_ptr = htonl(entry->id);
424 433
425 /* nfnetlink_unicast will either free the nskb or add it to a socket */ 434 /* nfnetlink_unicast will either free the nskb or add it to a socket */
426 err = nfnetlink_unicast(nskb, &init_net, queue->peer_pid, MSG_DONTWAIT); 435 err = nfnetlink_unicast(nskb, &init_net, queue->peer_pid, MSG_DONTWAIT);
@@ -608,6 +617,92 @@ static const struct nla_policy nfqa_verdict_policy[NFQA_MAX+1] = {
608 [NFQA_PAYLOAD] = { .type = NLA_UNSPEC }, 617 [NFQA_PAYLOAD] = { .type = NLA_UNSPEC },
609}; 618};
610 619
620static const struct nla_policy nfqa_verdict_batch_policy[NFQA_MAX+1] = {
621 [NFQA_VERDICT_HDR] = { .len = sizeof(struct nfqnl_msg_verdict_hdr) },
622 [NFQA_MARK] = { .type = NLA_U32 },
623};
624
625static struct nfqnl_instance *verdict_instance_lookup(u16 queue_num, int nlpid)
626{
627 struct nfqnl_instance *queue;
628
629 queue = instance_lookup(queue_num);
630 if (!queue)
631 return ERR_PTR(-ENODEV);
632
633 if (queue->peer_pid != nlpid)
634 return ERR_PTR(-EPERM);
635
636 return queue;
637}
638
639static struct nfqnl_msg_verdict_hdr*
640verdicthdr_get(const struct nlattr * const nfqa[])
641{
642 struct nfqnl_msg_verdict_hdr *vhdr;
643 unsigned int verdict;
644
645 if (!nfqa[NFQA_VERDICT_HDR])
646 return NULL;
647
648 vhdr = nla_data(nfqa[NFQA_VERDICT_HDR]);
649 verdict = ntohl(vhdr->verdict);
650 if ((verdict & NF_VERDICT_MASK) > NF_MAX_VERDICT)
651 return NULL;
652 return vhdr;
653}
654
655static int nfq_id_after(unsigned int id, unsigned int max)
656{
657 return (int)(id - max) > 0;
658}
659
660static int
661nfqnl_recv_verdict_batch(struct sock *ctnl, struct sk_buff *skb,
662 const struct nlmsghdr *nlh,
663 const struct nlattr * const nfqa[])
664{
665 struct nfgenmsg *nfmsg = NLMSG_DATA(nlh);
666 struct nf_queue_entry *entry, *tmp;
667 unsigned int verdict, maxid;
668 struct nfqnl_msg_verdict_hdr *vhdr;
669 struct nfqnl_instance *queue;
670 LIST_HEAD(batch_list);
671 u16 queue_num = ntohs(nfmsg->res_id);
672
673 queue = verdict_instance_lookup(queue_num, NETLINK_CB(skb).pid);
674 if (IS_ERR(queue))
675 return PTR_ERR(queue);
676
677 vhdr = verdicthdr_get(nfqa);
678 if (!vhdr)
679 return -EINVAL;
680
681 verdict = ntohl(vhdr->verdict);
682 maxid = ntohl(vhdr->id);
683
684 spin_lock_bh(&queue->lock);
685
686 list_for_each_entry_safe(entry, tmp, &queue->queue_list, list) {
687 if (nfq_id_after(entry->id, maxid))
688 break;
689 __dequeue_entry(queue, entry);
690 list_add_tail(&entry->list, &batch_list);
691 }
692
693 spin_unlock_bh(&queue->lock);
694
695 if (list_empty(&batch_list))
696 return -ENOENT;
697
698 list_for_each_entry_safe(entry, tmp, &batch_list, list) {
699 if (nfqa[NFQA_MARK])
700 entry->skb->mark = ntohl(nla_get_be32(nfqa[NFQA_MARK]));
701 nf_reinject(entry, verdict);
702 }
703 return 0;
704}
705
611static int 706static int
612nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb, 707nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb,
613 const struct nlmsghdr *nlh, 708 const struct nlmsghdr *nlh,
@@ -620,39 +715,23 @@ nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb,
620 struct nfqnl_instance *queue; 715 struct nfqnl_instance *queue;
621 unsigned int verdict; 716 unsigned int verdict;
622 struct nf_queue_entry *entry; 717 struct nf_queue_entry *entry;
623 int err;
624 718
625 rcu_read_lock();
626 queue = instance_lookup(queue_num); 719 queue = instance_lookup(queue_num);
627 if (!queue) { 720 if (!queue)
628 err = -ENODEV;
629 goto err_out_unlock;
630 }
631 721
632 if (queue->peer_pid != NETLINK_CB(skb).pid) { 722 queue = verdict_instance_lookup(queue_num, NETLINK_CB(skb).pid);
633 err = -EPERM; 723 if (IS_ERR(queue))
634 goto err_out_unlock; 724 return PTR_ERR(queue);
635 }
636 725
637 if (!nfqa[NFQA_VERDICT_HDR]) { 726 vhdr = verdicthdr_get(nfqa);
638 err = -EINVAL; 727 if (!vhdr)
639 goto err_out_unlock; 728 return -EINVAL;
640 }
641 729
642 vhdr = nla_data(nfqa[NFQA_VERDICT_HDR]);
643 verdict = ntohl(vhdr->verdict); 730 verdict = ntohl(vhdr->verdict);
644 731
645 if ((verdict & NF_VERDICT_MASK) > NF_MAX_VERDICT) {
646 err = -EINVAL;
647 goto err_out_unlock;
648 }
649
650 entry = find_dequeue_entry(queue, ntohl(vhdr->id)); 732 entry = find_dequeue_entry(queue, ntohl(vhdr->id));
651 if (entry == NULL) { 733 if (entry == NULL)
652 err = -ENOENT; 734 return -ENOENT;
653 goto err_out_unlock;
654 }
655 rcu_read_unlock();
656 735
657 if (nfqa[NFQA_PAYLOAD]) { 736 if (nfqa[NFQA_PAYLOAD]) {
658 if (nfqnl_mangle(nla_data(nfqa[NFQA_PAYLOAD]), 737 if (nfqnl_mangle(nla_data(nfqa[NFQA_PAYLOAD]),
@@ -665,10 +744,6 @@ nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb,
665 744
666 nf_reinject(entry, verdict); 745 nf_reinject(entry, verdict);
667 return 0; 746 return 0;
668
669err_out_unlock:
670 rcu_read_unlock();
671 return err;
672} 747}
673 748
674static int 749static int
@@ -781,14 +856,17 @@ err_out_unlock:
781} 856}
782 857
783static const struct nfnl_callback nfqnl_cb[NFQNL_MSG_MAX] = { 858static const struct nfnl_callback nfqnl_cb[NFQNL_MSG_MAX] = {
784 [NFQNL_MSG_PACKET] = { .call = nfqnl_recv_unsupp, 859 [NFQNL_MSG_PACKET] = { .call_rcu = nfqnl_recv_unsupp,
785 .attr_count = NFQA_MAX, }, 860 .attr_count = NFQA_MAX, },
786 [NFQNL_MSG_VERDICT] = { .call = nfqnl_recv_verdict, 861 [NFQNL_MSG_VERDICT] = { .call_rcu = nfqnl_recv_verdict,
787 .attr_count = NFQA_MAX, 862 .attr_count = NFQA_MAX,
788 .policy = nfqa_verdict_policy }, 863 .policy = nfqa_verdict_policy },
789 [NFQNL_MSG_CONFIG] = { .call = nfqnl_recv_config, 864 [NFQNL_MSG_CONFIG] = { .call = nfqnl_recv_config,
790 .attr_count = NFQA_CFG_MAX, 865 .attr_count = NFQA_CFG_MAX,
791 .policy = nfqa_cfg_policy }, 866 .policy = nfqa_cfg_policy },
867 [NFQNL_MSG_VERDICT_BATCH]={ .call_rcu = nfqnl_recv_verdict_batch,
868 .attr_count = NFQA_MAX,
869 .policy = nfqa_verdict_batch_policy },
792}; 870};
793 871
794static const struct nfnetlink_subsystem nfqnl_subsys = { 872static const struct nfnetlink_subsystem nfqnl_subsys = {
@@ -870,7 +948,7 @@ static int seq_show(struct seq_file *s, void *v)
870 inst->peer_pid, inst->queue_total, 948 inst->peer_pid, inst->queue_total,
871 inst->copy_mode, inst->copy_range, 949 inst->copy_mode, inst->copy_range,
872 inst->queue_dropped, inst->queue_user_dropped, 950 inst->queue_dropped, inst->queue_user_dropped,
873 atomic_read(&inst->id_sequence), 1); 951 inst->id_sequence, 1);
874} 952}
875 953
876static const struct seq_operations nfqnl_seq_ops = { 954static const struct seq_operations nfqnl_seq_ops = {
diff --git a/net/netfilter/xt_AUDIT.c b/net/netfilter/xt_AUDIT.c
index 363a99ec063..4bca15a0c38 100644
--- a/net/netfilter/xt_AUDIT.c
+++ b/net/netfilter/xt_AUDIT.c
@@ -163,6 +163,11 @@ audit_tg(struct sk_buff *skb, const struct xt_action_param *par)
163 break; 163 break;
164 } 164 }
165 165
166#ifdef CONFIG_NETWORK_SECMARK
167 if (skb->secmark)
168 audit_log_secctx(ab, skb->secmark);
169#endif
170
166 audit_log_end(ab); 171 audit_log_end(ab);
167 172
168errout: 173errout:
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
index 782e51986a6..0221d10de75 100644
--- a/net/netfilter/xt_CT.c
+++ b/net/netfilter/xt_CT.c
@@ -5,7 +5,7 @@
5 * it under the terms of the GNU General Public License version 2 as 5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation. 6 * published by the Free Software Foundation.
7 */ 7 */
8 8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/gfp.h> 10#include <linux/gfp.h>
11#include <linux/skbuff.h> 11#include <linux/skbuff.h>
@@ -95,8 +95,11 @@ static int xt_ct_tg_check(const struct xt_tgchk_param *par)
95 if (info->helper[0]) { 95 if (info->helper[0]) {
96 ret = -ENOENT; 96 ret = -ENOENT;
97 proto = xt_ct_find_proto(par); 97 proto = xt_ct_find_proto(par);
98 if (!proto) 98 if (!proto) {
99 pr_info("You must specify a L4 protocol, "
100 "and not use inversions on it.\n");
99 goto err3; 101 goto err3;
102 }
100 103
101 ret = -ENOMEM; 104 ret = -ENOMEM;
102 help = nf_ct_helper_ext_add(ct, GFP_KERNEL); 105 help = nf_ct_helper_ext_add(ct, GFP_KERNEL);
@@ -107,8 +110,10 @@ static int xt_ct_tg_check(const struct xt_tgchk_param *par)
107 help->helper = nf_conntrack_helper_try_module_get(info->helper, 110 help->helper = nf_conntrack_helper_try_module_get(info->helper,
108 par->family, 111 par->family,
109 proto); 112 proto);
110 if (help->helper == NULL) 113 if (help->helper == NULL) {
114 pr_info("No such helper \"%s\"\n", info->helper);
111 goto err3; 115 goto err3;
116 }
112 } 117 }
113 118
114 __set_bit(IPS_TEMPLATE_BIT, &ct->status); 119 __set_bit(IPS_TEMPLATE_BIT, &ct->status);
diff --git a/net/netfilter/xt_HL.c b/net/netfilter/xt_HL.c
index 95b084800fc..1535e87ed9b 100644
--- a/net/netfilter/xt_HL.c
+++ b/net/netfilter/xt_HL.c
@@ -38,22 +38,22 @@ ttl_tg(struct sk_buff *skb, const struct xt_action_param *par)
38 iph = ip_hdr(skb); 38 iph = ip_hdr(skb);
39 39
40 switch (info->mode) { 40 switch (info->mode) {
41 case IPT_TTL_SET: 41 case IPT_TTL_SET:
42 new_ttl = info->ttl; 42 new_ttl = info->ttl;
43 break; 43 break;
44 case IPT_TTL_INC: 44 case IPT_TTL_INC:
45 new_ttl = iph->ttl + info->ttl; 45 new_ttl = iph->ttl + info->ttl;
46 if (new_ttl > 255) 46 if (new_ttl > 255)
47 new_ttl = 255; 47 new_ttl = 255;
48 break; 48 break;
49 case IPT_TTL_DEC: 49 case IPT_TTL_DEC:
50 new_ttl = iph->ttl - info->ttl; 50 new_ttl = iph->ttl - info->ttl;
51 if (new_ttl < 0) 51 if (new_ttl < 0)
52 new_ttl = 0; 52 new_ttl = 0;
53 break; 53 break;
54 default: 54 default:
55 new_ttl = iph->ttl; 55 new_ttl = iph->ttl;
56 break; 56 break;
57 } 57 }
58 58
59 if (new_ttl != iph->ttl) { 59 if (new_ttl != iph->ttl) {
@@ -78,22 +78,22 @@ hl_tg6(struct sk_buff *skb, const struct xt_action_param *par)
78 ip6h = ipv6_hdr(skb); 78 ip6h = ipv6_hdr(skb);
79 79
80 switch (info->mode) { 80 switch (info->mode) {
81 case IP6T_HL_SET: 81 case IP6T_HL_SET:
82 new_hl = info->hop_limit; 82 new_hl = info->hop_limit;
83 break; 83 break;
84 case IP6T_HL_INC: 84 case IP6T_HL_INC:
85 new_hl = ip6h->hop_limit + info->hop_limit; 85 new_hl = ip6h->hop_limit + info->hop_limit;
86 if (new_hl > 255) 86 if (new_hl > 255)
87 new_hl = 255; 87 new_hl = 255;
88 break; 88 break;
89 case IP6T_HL_DEC: 89 case IP6T_HL_DEC:
90 new_hl = ip6h->hop_limit - info->hop_limit; 90 new_hl = ip6h->hop_limit - info->hop_limit;
91 if (new_hl < 0) 91 if (new_hl < 0)
92 new_hl = 0; 92 new_hl = 0;
93 break; 93 break;
94 default: 94 default:
95 new_hl = ip6h->hop_limit; 95 new_hl = ip6h->hop_limit;
96 break; 96 break;
97 } 97 }
98 98
99 ip6h->hop_limit = new_hl; 99 ip6h->hop_limit = new_hl;
diff --git a/net/netfilter/xt_RATEEST.c b/net/netfilter/xt_RATEEST.c
index de079abd5bc..f264032b8c5 100644
--- a/net/netfilter/xt_RATEEST.c
+++ b/net/netfilter/xt_RATEEST.c
@@ -60,11 +60,6 @@ struct xt_rateest *xt_rateest_lookup(const char *name)
60} 60}
61EXPORT_SYMBOL_GPL(xt_rateest_lookup); 61EXPORT_SYMBOL_GPL(xt_rateest_lookup);
62 62
63static void xt_rateest_free_rcu(struct rcu_head *head)
64{
65 kfree(container_of(head, struct xt_rateest, rcu));
66}
67
68void xt_rateest_put(struct xt_rateest *est) 63void xt_rateest_put(struct xt_rateest *est)
69{ 64{
70 mutex_lock(&xt_rateest_mutex); 65 mutex_lock(&xt_rateest_mutex);
@@ -75,7 +70,7 @@ void xt_rateest_put(struct xt_rateest *est)
75 * gen_estimator est_timer() might access est->lock or bstats, 70 * gen_estimator est_timer() might access est->lock or bstats,
76 * wait a RCU grace period before freeing 'est' 71 * wait a RCU grace period before freeing 'est'
77 */ 72 */
78 call_rcu(&est->rcu, xt_rateest_free_rcu); 73 kfree_rcu(est, rcu);
79 } 74 }
80 mutex_unlock(&xt_rateest_mutex); 75 mutex_unlock(&xt_rateest_mutex);
81} 76}
@@ -188,7 +183,6 @@ static int __init xt_rateest_tg_init(void)
188static void __exit xt_rateest_tg_fini(void) 183static void __exit xt_rateest_tg_fini(void)
189{ 184{
190 xt_unregister_target(&xt_rateest_tg_reg); 185 xt_unregister_target(&xt_rateest_tg_reg);
191 rcu_barrier(); /* Wait for completion of call_rcu()'s (xt_rateest_free_rcu) */
192} 186}
193 187
194 188
diff --git a/net/netfilter/xt_hl.c b/net/netfilter/xt_hl.c
index 7d12221ead8..003951149c9 100644
--- a/net/netfilter/xt_hl.c
+++ b/net/netfilter/xt_hl.c
@@ -31,14 +31,14 @@ static bool ttl_mt(const struct sk_buff *skb, struct xt_action_param *par)
31 const u8 ttl = ip_hdr(skb)->ttl; 31 const u8 ttl = ip_hdr(skb)->ttl;
32 32
33 switch (info->mode) { 33 switch (info->mode) {
34 case IPT_TTL_EQ: 34 case IPT_TTL_EQ:
35 return ttl == info->ttl; 35 return ttl == info->ttl;
36 case IPT_TTL_NE: 36 case IPT_TTL_NE:
37 return ttl != info->ttl; 37 return ttl != info->ttl;
38 case IPT_TTL_LT: 38 case IPT_TTL_LT:
39 return ttl < info->ttl; 39 return ttl < info->ttl;
40 case IPT_TTL_GT: 40 case IPT_TTL_GT:
41 return ttl > info->ttl; 41 return ttl > info->ttl;
42 } 42 }
43 43
44 return false; 44 return false;
@@ -50,14 +50,14 @@ static bool hl_mt6(const struct sk_buff *skb, struct xt_action_param *par)
50 const struct ipv6hdr *ip6h = ipv6_hdr(skb); 50 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
51 51
52 switch (info->mode) { 52 switch (info->mode) {
53 case IP6T_HL_EQ: 53 case IP6T_HL_EQ:
54 return ip6h->hop_limit == info->hop_limit; 54 return ip6h->hop_limit == info->hop_limit;
55 case IP6T_HL_NE: 55 case IP6T_HL_NE:
56 return ip6h->hop_limit != info->hop_limit; 56 return ip6h->hop_limit != info->hop_limit;
57 case IP6T_HL_LT: 57 case IP6T_HL_LT:
58 return ip6h->hop_limit < info->hop_limit; 58 return ip6h->hop_limit < info->hop_limit;
59 case IP6T_HL_GT: 59 case IP6T_HL_GT:
60 return ip6h->hop_limit > info->hop_limit; 60 return ip6h->hop_limit > info->hop_limit;
61 } 61 }
62 62
63 return false; 63 return false;
diff --git a/net/netfilter/xt_set.c b/net/netfilter/xt_set.c
index b3babaed771..0ec8138aa47 100644
--- a/net/netfilter/xt_set.c
+++ b/net/netfilter/xt_set.c
@@ -13,7 +13,6 @@
13 13
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/skbuff.h> 15#include <linux/skbuff.h>
16#include <linux/version.h>
17 16
18#include <linux/netfilter/x_tables.h> 17#include <linux/netfilter/x_tables.h>
19#include <linux/netfilter/xt_set.h> 18#include <linux/netfilter/xt_set.h>
@@ -29,23 +28,33 @@ MODULE_ALIAS("ip6t_SET");
29 28
30static inline int 29static inline int
31match_set(ip_set_id_t index, const struct sk_buff *skb, 30match_set(ip_set_id_t index, const struct sk_buff *skb,
32 u8 pf, u8 dim, u8 flags, int inv) 31 const struct xt_action_param *par,
32 const struct ip_set_adt_opt *opt, int inv)
33{ 33{
34 if (ip_set_test(index, skb, pf, dim, flags)) 34 if (ip_set_test(index, skb, par, opt))
35 inv = !inv; 35 inv = !inv;
36 return inv; 36 return inv;
37} 37}
38 38
39#define ADT_OPT(n, f, d, fs, cfs, t) \
40const struct ip_set_adt_opt n = { \
41 .family = f, \
42 .dim = d, \
43 .flags = fs, \
44 .cmdflags = cfs, \
45 .timeout = t, \
46}
47
39/* Revision 0 interface: backward compatible with netfilter/iptables */ 48/* Revision 0 interface: backward compatible with netfilter/iptables */
40 49
41static bool 50static bool
42set_match_v0(const struct sk_buff *skb, struct xt_action_param *par) 51set_match_v0(const struct sk_buff *skb, struct xt_action_param *par)
43{ 52{
44 const struct xt_set_info_match_v0 *info = par->matchinfo; 53 const struct xt_set_info_match_v0 *info = par->matchinfo;
54 ADT_OPT(opt, par->family, info->match_set.u.compat.dim,
55 info->match_set.u.compat.flags, 0, UINT_MAX);
45 56
46 return match_set(info->match_set.index, skb, par->family, 57 return match_set(info->match_set.index, skb, par, &opt,
47 info->match_set.u.compat.dim,
48 info->match_set.u.compat.flags,
49 info->match_set.u.compat.flags & IPSET_INV_MATCH); 58 info->match_set.u.compat.flags & IPSET_INV_MATCH);
50} 59}
51 60
@@ -103,15 +112,15 @@ static unsigned int
103set_target_v0(struct sk_buff *skb, const struct xt_action_param *par) 112set_target_v0(struct sk_buff *skb, const struct xt_action_param *par)
104{ 113{
105 const struct xt_set_info_target_v0 *info = par->targinfo; 114 const struct xt_set_info_target_v0 *info = par->targinfo;
115 ADT_OPT(add_opt, par->family, info->add_set.u.compat.dim,
116 info->add_set.u.compat.flags, 0, UINT_MAX);
117 ADT_OPT(del_opt, par->family, info->del_set.u.compat.dim,
118 info->del_set.u.compat.flags, 0, UINT_MAX);
106 119
107 if (info->add_set.index != IPSET_INVALID_ID) 120 if (info->add_set.index != IPSET_INVALID_ID)
108 ip_set_add(info->add_set.index, skb, par->family, 121 ip_set_add(info->add_set.index, skb, par, &add_opt);
109 info->add_set.u.compat.dim,
110 info->add_set.u.compat.flags);
111 if (info->del_set.index != IPSET_INVALID_ID) 122 if (info->del_set.index != IPSET_INVALID_ID)
112 ip_set_del(info->del_set.index, skb, par->family, 123 ip_set_del(info->del_set.index, skb, par, &del_opt);
113 info->del_set.u.compat.dim,
114 info->del_set.u.compat.flags);
115 124
116 return XT_CONTINUE; 125 return XT_CONTINUE;
117} 126}
@@ -170,23 +179,23 @@ set_target_v0_destroy(const struct xt_tgdtor_param *par)
170 ip_set_nfnl_put(info->del_set.index); 179 ip_set_nfnl_put(info->del_set.index);
171} 180}
172 181
173/* Revision 1: current interface to netfilter/iptables */ 182/* Revision 1 match and target */
174 183
175static bool 184static bool
176set_match(const struct sk_buff *skb, struct xt_action_param *par) 185set_match_v1(const struct sk_buff *skb, struct xt_action_param *par)
177{ 186{
178 const struct xt_set_info_match *info = par->matchinfo; 187 const struct xt_set_info_match_v1 *info = par->matchinfo;
188 ADT_OPT(opt, par->family, info->match_set.dim,
189 info->match_set.flags, 0, UINT_MAX);
179 190
180 return match_set(info->match_set.index, skb, par->family, 191 return match_set(info->match_set.index, skb, par, &opt,
181 info->match_set.dim,
182 info->match_set.flags,
183 info->match_set.flags & IPSET_INV_MATCH); 192 info->match_set.flags & IPSET_INV_MATCH);
184} 193}
185 194
186static int 195static int
187set_match_checkentry(const struct xt_mtchk_param *par) 196set_match_v1_checkentry(const struct xt_mtchk_param *par)
188{ 197{
189 struct xt_set_info_match *info = par->matchinfo; 198 struct xt_set_info_match_v1 *info = par->matchinfo;
190 ip_set_id_t index; 199 ip_set_id_t index;
191 200
192 index = ip_set_nfnl_get_byindex(info->match_set.index); 201 index = ip_set_nfnl_get_byindex(info->match_set.index);
@@ -207,36 +216,34 @@ set_match_checkentry(const struct xt_mtchk_param *par)
207} 216}
208 217
209static void 218static void
210set_match_destroy(const struct xt_mtdtor_param *par) 219set_match_v1_destroy(const struct xt_mtdtor_param *par)
211{ 220{
212 struct xt_set_info_match *info = par->matchinfo; 221 struct xt_set_info_match_v1 *info = par->matchinfo;
213 222
214 ip_set_nfnl_put(info->match_set.index); 223 ip_set_nfnl_put(info->match_set.index);
215} 224}
216 225
217static unsigned int 226static unsigned int
218set_target(struct sk_buff *skb, const struct xt_action_param *par) 227set_target_v1(struct sk_buff *skb, const struct xt_action_param *par)
219{ 228{
220 const struct xt_set_info_target *info = par->targinfo; 229 const struct xt_set_info_target_v1 *info = par->targinfo;
230 ADT_OPT(add_opt, par->family, info->add_set.dim,
231 info->add_set.flags, 0, UINT_MAX);
232 ADT_OPT(del_opt, par->family, info->del_set.dim,
233 info->del_set.flags, 0, UINT_MAX);
221 234
222 if (info->add_set.index != IPSET_INVALID_ID) 235 if (info->add_set.index != IPSET_INVALID_ID)
223 ip_set_add(info->add_set.index, 236 ip_set_add(info->add_set.index, skb, par, &add_opt);
224 skb, par->family,
225 info->add_set.dim,
226 info->add_set.flags);
227 if (info->del_set.index != IPSET_INVALID_ID) 237 if (info->del_set.index != IPSET_INVALID_ID)
228 ip_set_del(info->del_set.index, 238 ip_set_del(info->del_set.index, skb, par, &del_opt);
229 skb, par->family,
230 info->del_set.dim,
231 info->del_set.flags);
232 239
233 return XT_CONTINUE; 240 return XT_CONTINUE;
234} 241}
235 242
236static int 243static int
237set_target_checkentry(const struct xt_tgchk_param *par) 244set_target_v1_checkentry(const struct xt_tgchk_param *par)
238{ 245{
239 const struct xt_set_info_target *info = par->targinfo; 246 const struct xt_set_info_target_v1 *info = par->targinfo;
240 ip_set_id_t index; 247 ip_set_id_t index;
241 248
242 if (info->add_set.index != IPSET_INVALID_ID) { 249 if (info->add_set.index != IPSET_INVALID_ID) {
@@ -273,9 +280,9 @@ set_target_checkentry(const struct xt_tgchk_param *par)
273} 280}
274 281
275static void 282static void
276set_target_destroy(const struct xt_tgdtor_param *par) 283set_target_v1_destroy(const struct xt_tgdtor_param *par)
277{ 284{
278 const struct xt_set_info_target *info = par->targinfo; 285 const struct xt_set_info_target_v1 *info = par->targinfo;
279 286
280 if (info->add_set.index != IPSET_INVALID_ID) 287 if (info->add_set.index != IPSET_INVALID_ID)
281 ip_set_nfnl_put(info->add_set.index); 288 ip_set_nfnl_put(info->add_set.index);
@@ -283,6 +290,28 @@ set_target_destroy(const struct xt_tgdtor_param *par)
283 ip_set_nfnl_put(info->del_set.index); 290 ip_set_nfnl_put(info->del_set.index);
284} 291}
285 292
293/* Revision 2 target */
294
295static unsigned int
296set_target_v2(struct sk_buff *skb, const struct xt_action_param *par)
297{
298 const struct xt_set_info_target_v2 *info = par->targinfo;
299 ADT_OPT(add_opt, par->family, info->add_set.dim,
300 info->add_set.flags, info->flags, info->timeout);
301 ADT_OPT(del_opt, par->family, info->del_set.dim,
302 info->del_set.flags, 0, UINT_MAX);
303
304 if (info->add_set.index != IPSET_INVALID_ID)
305 ip_set_add(info->add_set.index, skb, par, &add_opt);
306 if (info->del_set.index != IPSET_INVALID_ID)
307 ip_set_del(info->del_set.index, skb, par, &del_opt);
308
309 return XT_CONTINUE;
310}
311
312#define set_target_v2_checkentry set_target_v1_checkentry
313#define set_target_v2_destroy set_target_v1_destroy
314
286static struct xt_match set_matches[] __read_mostly = { 315static struct xt_match set_matches[] __read_mostly = {
287 { 316 {
288 .name = "set", 317 .name = "set",
@@ -298,20 +327,20 @@ static struct xt_match set_matches[] __read_mostly = {
298 .name = "set", 327 .name = "set",
299 .family = NFPROTO_IPV4, 328 .family = NFPROTO_IPV4,
300 .revision = 1, 329 .revision = 1,
301 .match = set_match, 330 .match = set_match_v1,
302 .matchsize = sizeof(struct xt_set_info_match), 331 .matchsize = sizeof(struct xt_set_info_match_v1),
303 .checkentry = set_match_checkentry, 332 .checkentry = set_match_v1_checkentry,
304 .destroy = set_match_destroy, 333 .destroy = set_match_v1_destroy,
305 .me = THIS_MODULE 334 .me = THIS_MODULE
306 }, 335 },
307 { 336 {
308 .name = "set", 337 .name = "set",
309 .family = NFPROTO_IPV6, 338 .family = NFPROTO_IPV6,
310 .revision = 1, 339 .revision = 1,
311 .match = set_match, 340 .match = set_match_v1,
312 .matchsize = sizeof(struct xt_set_info_match), 341 .matchsize = sizeof(struct xt_set_info_match_v1),
313 .checkentry = set_match_checkentry, 342 .checkentry = set_match_v1_checkentry,
314 .destroy = set_match_destroy, 343 .destroy = set_match_v1_destroy,
315 .me = THIS_MODULE 344 .me = THIS_MODULE
316 }, 345 },
317}; 346};
@@ -331,20 +360,40 @@ static struct xt_target set_targets[] __read_mostly = {
331 .name = "SET", 360 .name = "SET",
332 .revision = 1, 361 .revision = 1,
333 .family = NFPROTO_IPV4, 362 .family = NFPROTO_IPV4,
334 .target = set_target, 363 .target = set_target_v1,
335 .targetsize = sizeof(struct xt_set_info_target), 364 .targetsize = sizeof(struct xt_set_info_target_v1),
336 .checkentry = set_target_checkentry, 365 .checkentry = set_target_v1_checkentry,
337 .destroy = set_target_destroy, 366 .destroy = set_target_v1_destroy,
338 .me = THIS_MODULE 367 .me = THIS_MODULE
339 }, 368 },
340 { 369 {
341 .name = "SET", 370 .name = "SET",
342 .revision = 1, 371 .revision = 1,
343 .family = NFPROTO_IPV6, 372 .family = NFPROTO_IPV6,
344 .target = set_target, 373 .target = set_target_v1,
345 .targetsize = sizeof(struct xt_set_info_target), 374 .targetsize = sizeof(struct xt_set_info_target_v1),
346 .checkentry = set_target_checkentry, 375 .checkentry = set_target_v1_checkentry,
347 .destroy = set_target_destroy, 376 .destroy = set_target_v1_destroy,
377 .me = THIS_MODULE
378 },
379 {
380 .name = "SET",
381 .revision = 2,
382 .family = NFPROTO_IPV4,
383 .target = set_target_v2,
384 .targetsize = sizeof(struct xt_set_info_target_v2),
385 .checkentry = set_target_v2_checkentry,
386 .destroy = set_target_v2_destroy,
387 .me = THIS_MODULE
388 },
389 {
390 .name = "SET",
391 .revision = 2,
392 .family = NFPROTO_IPV6,
393 .target = set_target_v2,
394 .targetsize = sizeof(struct xt_set_info_target_v2),
395 .checkentry = set_target_v2_checkentry,
396 .destroy = set_target_v2_destroy,
348 .me = THIS_MODULE 397 .me = THIS_MODULE
349 }, 398 },
350}; 399};
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c
index 9c38658fba8..8efd061a0ae 100644
--- a/net/netlabel/netlabel_unlabeled.c
+++ b/net/netlabel/netlabel_unlabeled.c
@@ -426,10 +426,9 @@ int netlbl_unlhsh_add(struct net *net,
426 audit_info); 426 audit_info);
427 switch (addr_len) { 427 switch (addr_len) {
428 case sizeof(struct in_addr): { 428 case sizeof(struct in_addr): {
429 struct in_addr *addr4, *mask4; 429 const struct in_addr *addr4 = addr;
430 const struct in_addr *mask4 = mask;
430 431
431 addr4 = (struct in_addr *)addr;
432 mask4 = (struct in_addr *)mask;
433 ret_val = netlbl_unlhsh_add_addr4(iface, addr4, mask4, secid); 432 ret_val = netlbl_unlhsh_add_addr4(iface, addr4, mask4, secid);
434 if (audit_buf != NULL) 433 if (audit_buf != NULL)
435 netlbl_af4list_audit_addr(audit_buf, 1, 434 netlbl_af4list_audit_addr(audit_buf, 1,
@@ -440,10 +439,9 @@ int netlbl_unlhsh_add(struct net *net,
440 } 439 }
441#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 440#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
442 case sizeof(struct in6_addr): { 441 case sizeof(struct in6_addr): {
443 struct in6_addr *addr6, *mask6; 442 const struct in6_addr *addr6 = addr;
443 const struct in6_addr *mask6 = mask;
444 444
445 addr6 = (struct in6_addr *)addr;
446 mask6 = (struct in6_addr *)mask;
447 ret_val = netlbl_unlhsh_add_addr6(iface, addr6, mask6, secid); 445 ret_val = netlbl_unlhsh_add_addr6(iface, addr6, mask6, secid);
448 if (audit_buf != NULL) 446 if (audit_buf != NULL)
449 netlbl_af6list_audit_addr(audit_buf, 1, 447 netlbl_af6list_audit_addr(audit_buf, 1,
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 6ef64adf736..0a4db0211da 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1659,13 +1659,10 @@ static int netlink_dump(struct sock *sk)
1659{ 1659{
1660 struct netlink_sock *nlk = nlk_sk(sk); 1660 struct netlink_sock *nlk = nlk_sk(sk);
1661 struct netlink_callback *cb; 1661 struct netlink_callback *cb;
1662 struct sk_buff *skb; 1662 struct sk_buff *skb = NULL;
1663 struct nlmsghdr *nlh; 1663 struct nlmsghdr *nlh;
1664 int len, err = -ENOBUFS; 1664 int len, err = -ENOBUFS;
1665 1665 int alloc_size;
1666 skb = sock_rmalloc(sk, NLMSG_GOODSIZE, 0, GFP_KERNEL);
1667 if (!skb)
1668 goto errout;
1669 1666
1670 mutex_lock(nlk->cb_mutex); 1667 mutex_lock(nlk->cb_mutex);
1671 1668
@@ -1675,6 +1672,12 @@ static int netlink_dump(struct sock *sk)
1675 goto errout_skb; 1672 goto errout_skb;
1676 } 1673 }
1677 1674
1675 alloc_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE);
1676
1677 skb = sock_rmalloc(sk, alloc_size, 0, GFP_KERNEL);
1678 if (!skb)
1679 goto errout_skb;
1680
1678 len = cb->dump(skb, cb); 1681 len = cb->dump(skb, cb);
1679 1682
1680 if (len > 0) { 1683 if (len > 0) {
@@ -1693,6 +1696,8 @@ static int netlink_dump(struct sock *sk)
1693 if (!nlh) 1696 if (!nlh)
1694 goto errout_skb; 1697 goto errout_skb;
1695 1698
1699 nl_dump_check_consistent(cb, nlh);
1700
1696 memcpy(nlmsg_data(nlh), &len, sizeof(len)); 1701 memcpy(nlmsg_data(nlh), &len, sizeof(len));
1697 1702
1698 if (sk_filter(sk, skb)) 1703 if (sk_filter(sk, skb))
@@ -1713,7 +1718,6 @@ static int netlink_dump(struct sock *sk)
1713errout_skb: 1718errout_skb:
1714 mutex_unlock(nlk->cb_mutex); 1719 mutex_unlock(nlk->cb_mutex);
1715 kfree_skb(skb); 1720 kfree_skb(skb);
1716errout:
1717 return err; 1721 return err;
1718} 1722}
1719 1723
@@ -1721,7 +1725,8 @@ int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
1721 const struct nlmsghdr *nlh, 1725 const struct nlmsghdr *nlh,
1722 int (*dump)(struct sk_buff *skb, 1726 int (*dump)(struct sk_buff *skb,
1723 struct netlink_callback *), 1727 struct netlink_callback *),
1724 int (*done)(struct netlink_callback *)) 1728 int (*done)(struct netlink_callback *),
1729 u16 min_dump_alloc)
1725{ 1730{
1726 struct netlink_callback *cb; 1731 struct netlink_callback *cb;
1727 struct sock *sk; 1732 struct sock *sk;
@@ -1735,6 +1740,7 @@ int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
1735 cb->dump = dump; 1740 cb->dump = dump;
1736 cb->done = done; 1741 cb->done = done;
1737 cb->nlh = nlh; 1742 cb->nlh = nlh;
1743 cb->min_dump_alloc = min_dump_alloc;
1738 atomic_inc(&skb->users); 1744 atomic_inc(&skb->users);
1739 cb->skb = skb; 1745 cb->skb = skb;
1740 1746
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 1781d99145e..482fa571b4e 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -525,7 +525,7 @@ static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
525 525
526 genl_unlock(); 526 genl_unlock();
527 err = netlink_dump_start(net->genl_sock, skb, nlh, 527 err = netlink_dump_start(net->genl_sock, skb, nlh,
528 ops->dumpit, ops->done); 528 ops->dumpit, ops->done, 0);
529 genl_lock(); 529 genl_lock();
530 return err; 530 return err;
531 } 531 }
diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c
index 44059d0c8dd..cd5ddb2ebc4 100644
--- a/net/netrom/nr_route.c
+++ b/net/netrom/nr_route.c
@@ -257,9 +257,12 @@ static int __must_check nr_add_node(ax25_address *nr, const char *mnemonic,
257 case 3: 257 case 3:
258 if (nr_node->routes[1].quality > nr_node->routes[0].quality) { 258 if (nr_node->routes[1].quality > nr_node->routes[0].quality) {
259 switch (nr_node->which) { 259 switch (nr_node->which) {
260 case 0: nr_node->which = 1; break; 260 case 0:
261 case 1: nr_node->which = 0; break; 261 nr_node->which = 1;
262 default: break; 262 break;
263 case 1:
264 nr_node->which = 0;
265 break;
263 } 266 }
264 nr_route = nr_node->routes[0]; 267 nr_route = nr_node->routes[0];
265 nr_node->routes[0] = nr_node->routes[1]; 268 nr_node->routes[0] = nr_node->routes[1];
@@ -505,12 +508,13 @@ static int nr_dec_obs(void)
505 s->count--; 508 s->count--;
506 509
507 switch (i) { 510 switch (i) {
508 case 0: 511 case 0:
509 s->routes[0] = s->routes[1]; 512 s->routes[0] = s->routes[1];
510 case 1: 513 /* Fallthrough */
511 s->routes[1] = s->routes[2]; 514 case 1:
512 case 2: 515 s->routes[1] = s->routes[2];
513 break; 516 case 2:
517 break;
514 } 518 }
515 break; 519 break;
516 520
diff --git a/net/nfc/Kconfig b/net/nfc/Kconfig
new file mode 100644
index 00000000000..33e095b124b
--- /dev/null
+++ b/net/nfc/Kconfig
@@ -0,0 +1,16 @@
1#
2# NFC sybsystem configuration
3#
4
5menuconfig NFC
6 depends on NET && EXPERIMENTAL
7 tristate "NFC subsystem support (EXPERIMENTAL)"
8 default n
9 help
10 Say Y here if you want to build support for NFC (Near field
11 communication) devices.
12
13 To compile this support as a module, choose M here: the module will
14 be called nfc.
15
16source "drivers/nfc/Kconfig"
diff --git a/net/nfc/Makefile b/net/nfc/Makefile
new file mode 100644
index 00000000000..16250c35385
--- /dev/null
+++ b/net/nfc/Makefile
@@ -0,0 +1,7 @@
1#
2# Makefile for the Linux NFC subsystem.
3#
4
5obj-$(CONFIG_NFC) += nfc.o
6
7nfc-objs := core.o netlink.o af_nfc.o rawsock.o
diff --git a/net/nfc/af_nfc.c b/net/nfc/af_nfc.c
new file mode 100644
index 00000000000..e982cef8f49
--- /dev/null
+++ b/net/nfc/af_nfc.c
@@ -0,0 +1,98 @@
1/*
2 * Copyright (C) 2011 Instituto Nokia de Tecnologia
3 *
4 * Authors:
5 * Aloisio Almeida Jr <aloisio.almeida@openbossa.org>
6 * Lauro Ramos Venancio <lauro.venancio@openbossa.org>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the
20 * Free Software Foundation, Inc.,
21 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
22 */
23
24#include <linux/nfc.h>
25
26#include "nfc.h"
27
28static DEFINE_RWLOCK(proto_tab_lock);
29static const struct nfc_protocol *proto_tab[NFC_SOCKPROTO_MAX];
30
31static int nfc_sock_create(struct net *net, struct socket *sock, int proto,
32 int kern)
33{
34 int rc = -EPROTONOSUPPORT;
35
36 if (net != &init_net)
37 return -EAFNOSUPPORT;
38
39 if (proto < 0 || proto >= NFC_SOCKPROTO_MAX)
40 return -EINVAL;
41
42 read_lock(&proto_tab_lock);
43 if (proto_tab[proto] && try_module_get(proto_tab[proto]->owner)) {
44 rc = proto_tab[proto]->create(net, sock, proto_tab[proto]);
45 module_put(proto_tab[proto]->owner);
46 }
47 read_unlock(&proto_tab_lock);
48
49 return rc;
50}
51
52static struct net_proto_family nfc_sock_family_ops = {
53 .owner = THIS_MODULE,
54 .family = PF_NFC,
55 .create = nfc_sock_create,
56};
57
58int nfc_proto_register(const struct nfc_protocol *nfc_proto)
59{
60 int rc;
61
62 if (nfc_proto->id < 0 || nfc_proto->id >= NFC_SOCKPROTO_MAX)
63 return -EINVAL;
64
65 rc = proto_register(nfc_proto->proto, 0);
66 if (rc)
67 return rc;
68
69 write_lock(&proto_tab_lock);
70 if (proto_tab[nfc_proto->id])
71 rc = -EBUSY;
72 else
73 proto_tab[nfc_proto->id] = nfc_proto;
74 write_unlock(&proto_tab_lock);
75
76 return rc;
77}
78EXPORT_SYMBOL(nfc_proto_register);
79
80void nfc_proto_unregister(const struct nfc_protocol *nfc_proto)
81{
82 write_lock(&proto_tab_lock);
83 proto_tab[nfc_proto->id] = NULL;
84 write_unlock(&proto_tab_lock);
85
86 proto_unregister(nfc_proto->proto);
87}
88EXPORT_SYMBOL(nfc_proto_unregister);
89
90int __init af_nfc_init(void)
91{
92 return sock_register(&nfc_sock_family_ops);
93}
94
95void af_nfc_exit(void)
96{
97 sock_unregister(PF_NFC);
98}
diff --git a/net/nfc/core.c b/net/nfc/core.c
new file mode 100644
index 00000000000..b6fd4e1f205
--- /dev/null
+++ b/net/nfc/core.c
@@ -0,0 +1,468 @@
1/*
2 * Copyright (C) 2011 Instituto Nokia de Tecnologia
3 *
4 * Authors:
5 * Lauro Ramos Venancio <lauro.venancio@openbossa.org>
6 * Aloisio Almeida Jr <aloisio.almeida@openbossa.org>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the
20 * Free Software Foundation, Inc.,
21 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
22 */
23
24#include <linux/init.h>
25#include <linux/kernel.h>
26#include <linux/module.h>
27#include <linux/slab.h>
28
29#include "nfc.h"
30
31#define VERSION "0.1"
32
33int nfc_devlist_generation;
34DEFINE_MUTEX(nfc_devlist_mutex);
35
36int nfc_printk(const char *level, const char *format, ...)
37{
38 struct va_format vaf;
39 va_list args;
40 int r;
41
42 va_start(args, format);
43
44 vaf.fmt = format;
45 vaf.va = &args;
46
47 r = printk("%sNFC: %pV\n", level, &vaf);
48
49 va_end(args);
50
51 return r;
52}
53EXPORT_SYMBOL(nfc_printk);
54
55/**
56 * nfc_start_poll - start polling for nfc targets
57 *
58 * @dev: The nfc device that must start polling
59 * @protocols: bitset of nfc protocols that must be used for polling
60 *
61 * The device remains polling for targets until a target is found or
62 * the nfc_stop_poll function is called.
63 */
64int nfc_start_poll(struct nfc_dev *dev, u32 protocols)
65{
66 int rc;
67
68 nfc_dbg("dev_name=%s protocols=0x%x", dev_name(&dev->dev), protocols);
69
70 if (!protocols)
71 return -EINVAL;
72
73 device_lock(&dev->dev);
74
75 if (!device_is_registered(&dev->dev)) {
76 rc = -ENODEV;
77 goto error;
78 }
79
80 if (dev->polling) {
81 rc = -EBUSY;
82 goto error;
83 }
84
85 rc = dev->ops->start_poll(dev, protocols);
86 if (!rc)
87 dev->polling = true;
88
89error:
90 device_unlock(&dev->dev);
91 return rc;
92}
93
94/**
95 * nfc_stop_poll - stop polling for nfc targets
96 *
97 * @dev: The nfc device that must stop polling
98 */
99int nfc_stop_poll(struct nfc_dev *dev)
100{
101 int rc = 0;
102
103 nfc_dbg("dev_name=%s", dev_name(&dev->dev));
104
105 device_lock(&dev->dev);
106
107 if (!device_is_registered(&dev->dev)) {
108 rc = -ENODEV;
109 goto error;
110 }
111
112 if (!dev->polling) {
113 rc = -EINVAL;
114 goto error;
115 }
116
117 dev->ops->stop_poll(dev);
118 dev->polling = false;
119
120error:
121 device_unlock(&dev->dev);
122 return rc;
123}
124
125/**
126 * nfc_activate_target - prepare the target for data exchange
127 *
128 * @dev: The nfc device that found the target
129 * @target_idx: index of the target that must be activated
130 * @protocol: nfc protocol that will be used for data exchange
131 */
132int nfc_activate_target(struct nfc_dev *dev, u32 target_idx, u32 protocol)
133{
134 int rc;
135
136 nfc_dbg("dev_name=%s target_idx=%u protocol=%u", dev_name(&dev->dev),
137 target_idx, protocol);
138
139 device_lock(&dev->dev);
140
141 if (!device_is_registered(&dev->dev)) {
142 rc = -ENODEV;
143 goto error;
144 }
145
146 rc = dev->ops->activate_target(dev, target_idx, protocol);
147
148error:
149 device_unlock(&dev->dev);
150 return rc;
151}
152
153/**
154 * nfc_deactivate_target - deactivate a nfc target
155 *
156 * @dev: The nfc device that found the target
157 * @target_idx: index of the target that must be deactivated
158 */
159int nfc_deactivate_target(struct nfc_dev *dev, u32 target_idx)
160{
161 int rc = 0;
162
163 nfc_dbg("dev_name=%s target_idx=%u", dev_name(&dev->dev), target_idx);
164
165 device_lock(&dev->dev);
166
167 if (!device_is_registered(&dev->dev)) {
168 rc = -ENODEV;
169 goto error;
170 }
171
172 dev->ops->deactivate_target(dev, target_idx);
173
174error:
175 device_unlock(&dev->dev);
176 return rc;
177}
178
179/**
180 * nfc_data_exchange - transceive data
181 *
182 * @dev: The nfc device that found the target
183 * @target_idx: index of the target
184 * @skb: data to be sent
185 * @cb: callback called when the response is received
186 * @cb_context: parameter for the callback function
187 *
188 * The user must wait for the callback before calling this function again.
189 */
190int nfc_data_exchange(struct nfc_dev *dev, u32 target_idx,
191 struct sk_buff *skb,
192 data_exchange_cb_t cb,
193 void *cb_context)
194{
195 int rc;
196
197 nfc_dbg("dev_name=%s target_idx=%u skb->len=%u", dev_name(&dev->dev),
198 target_idx, skb->len);
199
200 device_lock(&dev->dev);
201
202 if (!device_is_registered(&dev->dev)) {
203 rc = -ENODEV;
204 kfree_skb(skb);
205 goto error;
206 }
207
208 rc = dev->ops->data_exchange(dev, target_idx, skb, cb, cb_context);
209
210error:
211 device_unlock(&dev->dev);
212 return rc;
213}
214
215/**
216 * nfc_alloc_skb - allocate a skb for data exchange responses
217 *
218 * @size: size to allocate
219 * @gfp: gfp flags
220 */
221struct sk_buff *nfc_alloc_skb(unsigned int size, gfp_t gfp)
222{
223 struct sk_buff *skb;
224 unsigned int total_size;
225
226 total_size = size + 1;
227 skb = alloc_skb(total_size, gfp);
228
229 if (skb)
230 skb_reserve(skb, 1);
231
232 return skb;
233}
234EXPORT_SYMBOL(nfc_alloc_skb);
235
236/**
237 * nfc_targets_found - inform that targets were found
238 *
239 * @dev: The nfc device that found the targets
240 * @targets: array of nfc targets found
241 * @ntargets: targets array size
242 *
243 * The device driver must call this function when one or many nfc targets
244 * are found. After calling this function, the device driver must stop
245 * polling for targets.
246 */
247int nfc_targets_found(struct nfc_dev *dev, struct nfc_target *targets,
248 int n_targets)
249{
250 int i;
251
252 nfc_dbg("dev_name=%s n_targets=%d", dev_name(&dev->dev), n_targets);
253
254 dev->polling = false;
255
256 for (i = 0; i < n_targets; i++)
257 targets[i].idx = dev->target_idx++;
258
259 spin_lock_bh(&dev->targets_lock);
260
261 dev->targets_generation++;
262
263 kfree(dev->targets);
264 dev->targets = kmemdup(targets, n_targets * sizeof(struct nfc_target),
265 GFP_ATOMIC);
266
267 if (!dev->targets) {
268 dev->n_targets = 0;
269 spin_unlock_bh(&dev->targets_lock);
270 return -ENOMEM;
271 }
272
273 dev->n_targets = n_targets;
274 spin_unlock_bh(&dev->targets_lock);
275
276 nfc_genl_targets_found(dev);
277
278 return 0;
279}
280EXPORT_SYMBOL(nfc_targets_found);
281
282static void nfc_release(struct device *d)
283{
284 struct nfc_dev *dev = to_nfc_dev(d);
285
286 nfc_dbg("dev_name=%s", dev_name(&dev->dev));
287
288 nfc_genl_data_exit(&dev->genl_data);
289 kfree(dev->targets);
290 kfree(dev);
291}
292
293struct class nfc_class = {
294 .name = "nfc",
295 .dev_release = nfc_release,
296};
297EXPORT_SYMBOL(nfc_class);
298
299static int match_idx(struct device *d, void *data)
300{
301 struct nfc_dev *dev = to_nfc_dev(d);
302 unsigned *idx = data;
303
304 return dev->idx == *idx;
305}
306
307struct nfc_dev *nfc_get_device(unsigned idx)
308{
309 struct device *d;
310
311 d = class_find_device(&nfc_class, NULL, &idx, match_idx);
312 if (!d)
313 return NULL;
314
315 return to_nfc_dev(d);
316}
317
318/**
319 * nfc_allocate_device - allocate a new nfc device
320 *
321 * @ops: device operations
322 * @supported_protocols: NFC protocols supported by the device
323 */
324struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops,
325 u32 supported_protocols)
326{
327 static atomic_t dev_no = ATOMIC_INIT(0);
328 struct nfc_dev *dev;
329
330 if (!ops->start_poll || !ops->stop_poll || !ops->activate_target ||
331 !ops->deactivate_target || !ops->data_exchange)
332 return NULL;
333
334 if (!supported_protocols)
335 return NULL;
336
337 dev = kzalloc(sizeof(struct nfc_dev), GFP_KERNEL);
338 if (!dev)
339 return NULL;
340
341 dev->dev.class = &nfc_class;
342 dev->idx = atomic_inc_return(&dev_no) - 1;
343 dev_set_name(&dev->dev, "nfc%d", dev->idx);
344 device_initialize(&dev->dev);
345
346 dev->ops = ops;
347 dev->supported_protocols = supported_protocols;
348
349 spin_lock_init(&dev->targets_lock);
350 nfc_genl_data_init(&dev->genl_data);
351
352 /* first generation must not be 0 */
353 dev->targets_generation = 1;
354
355 return dev;
356}
357EXPORT_SYMBOL(nfc_allocate_device);
358
359/**
360 * nfc_register_device - register a nfc device in the nfc subsystem
361 *
362 * @dev: The nfc device to register
363 */
364int nfc_register_device(struct nfc_dev *dev)
365{
366 int rc;
367
368 nfc_dbg("dev_name=%s", dev_name(&dev->dev));
369
370 mutex_lock(&nfc_devlist_mutex);
371 nfc_devlist_generation++;
372 rc = device_add(&dev->dev);
373 mutex_unlock(&nfc_devlist_mutex);
374
375 if (rc < 0)
376 return rc;
377
378 rc = nfc_genl_device_added(dev);
379 if (rc)
380 nfc_dbg("The userspace won't be notified that the device %s was"
381 " added", dev_name(&dev->dev));
382
383
384 return 0;
385}
386EXPORT_SYMBOL(nfc_register_device);
387
388/**
389 * nfc_unregister_device - unregister a nfc device in the nfc subsystem
390 *
391 * @dev: The nfc device to unregister
392 */
393void nfc_unregister_device(struct nfc_dev *dev)
394{
395 int rc;
396
397 nfc_dbg("dev_name=%s", dev_name(&dev->dev));
398
399 mutex_lock(&nfc_devlist_mutex);
400 nfc_devlist_generation++;
401
402 /* lock to avoid unregistering a device while an operation
403 is in progress */
404 device_lock(&dev->dev);
405 device_del(&dev->dev);
406 device_unlock(&dev->dev);
407
408 mutex_unlock(&nfc_devlist_mutex);
409
410 rc = nfc_genl_device_removed(dev);
411 if (rc)
412 nfc_dbg("The userspace won't be notified that the device %s"
413 " was removed", dev_name(&dev->dev));
414
415}
416EXPORT_SYMBOL(nfc_unregister_device);
417
418static int __init nfc_init(void)
419{
420 int rc;
421
422 nfc_info("NFC Core ver %s", VERSION);
423
424 rc = class_register(&nfc_class);
425 if (rc)
426 return rc;
427
428 rc = nfc_genl_init();
429 if (rc)
430 goto err_genl;
431
432 /* the first generation must not be 0 */
433 nfc_devlist_generation = 1;
434
435 rc = rawsock_init();
436 if (rc)
437 goto err_rawsock;
438
439 rc = af_nfc_init();
440 if (rc)
441 goto err_af_nfc;
442
443 return 0;
444
445err_af_nfc:
446 rawsock_exit();
447err_rawsock:
448 nfc_genl_exit();
449err_genl:
450 class_unregister(&nfc_class);
451 return rc;
452}
453
454static void __exit nfc_exit(void)
455{
456 af_nfc_exit();
457 rawsock_exit();
458 nfc_genl_exit();
459 class_unregister(&nfc_class);
460}
461
462subsys_initcall(nfc_init);
463module_exit(nfc_exit);
464
465MODULE_AUTHOR("Lauro Ramos Venancio <lauro.venancio@openbossa.org>");
466MODULE_DESCRIPTION("NFC Core ver " VERSION);
467MODULE_VERSION(VERSION);
468MODULE_LICENSE("GPL");
diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
new file mode 100644
index 00000000000..ccdff7953f7
--- /dev/null
+++ b/net/nfc/netlink.c
@@ -0,0 +1,537 @@
1/*
2 * Copyright (C) 2011 Instituto Nokia de Tecnologia
3 *
4 * Authors:
5 * Lauro Ramos Venancio <lauro.venancio@openbossa.org>
6 * Aloisio Almeida Jr <aloisio.almeida@openbossa.org>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the
20 * Free Software Foundation, Inc.,
21 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
22 */
23
24#include <net/genetlink.h>
25#include <linux/nfc.h>
26#include <linux/slab.h>
27
28#include "nfc.h"
29
30static struct genl_multicast_group nfc_genl_event_mcgrp = {
31 .name = NFC_GENL_MCAST_EVENT_NAME,
32};
33
34struct genl_family nfc_genl_family = {
35 .id = GENL_ID_GENERATE,
36 .hdrsize = 0,
37 .name = NFC_GENL_NAME,
38 .version = NFC_GENL_VERSION,
39 .maxattr = NFC_ATTR_MAX,
40};
41
42static const struct nla_policy nfc_genl_policy[NFC_ATTR_MAX + 1] = {
43 [NFC_ATTR_DEVICE_INDEX] = { .type = NLA_U32 },
44 [NFC_ATTR_DEVICE_NAME] = { .type = NLA_STRING,
45 .len = NFC_DEVICE_NAME_MAXSIZE },
46 [NFC_ATTR_PROTOCOLS] = { .type = NLA_U32 },
47};
48
49static int nfc_genl_send_target(struct sk_buff *msg, struct nfc_target *target,
50 struct netlink_callback *cb, int flags)
51{
52 void *hdr;
53
54 nfc_dbg("entry");
55
56 hdr = genlmsg_put(msg, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq,
57 &nfc_genl_family, flags, NFC_CMD_GET_TARGET);
58 if (!hdr)
59 return -EMSGSIZE;
60
61 genl_dump_check_consistent(cb, hdr, &nfc_genl_family);
62
63 NLA_PUT_U32(msg, NFC_ATTR_TARGET_INDEX, target->idx);
64 NLA_PUT_U32(msg, NFC_ATTR_PROTOCOLS,
65 target->supported_protocols);
66 NLA_PUT_U16(msg, NFC_ATTR_TARGET_SENS_RES, target->sens_res);
67 NLA_PUT_U8(msg, NFC_ATTR_TARGET_SEL_RES, target->sel_res);
68
69 return genlmsg_end(msg, hdr);
70
71nla_put_failure:
72 genlmsg_cancel(msg, hdr);
73 return -EMSGSIZE;
74}
75
76static struct nfc_dev *__get_device_from_cb(struct netlink_callback *cb)
77{
78 struct nfc_dev *dev;
79 int rc;
80 u32 idx;
81
82 rc = nlmsg_parse(cb->nlh, GENL_HDRLEN + nfc_genl_family.hdrsize,
83 nfc_genl_family.attrbuf,
84 nfc_genl_family.maxattr,
85 nfc_genl_policy);
86 if (rc < 0)
87 return ERR_PTR(rc);
88
89 if (!nfc_genl_family.attrbuf[NFC_ATTR_DEVICE_INDEX])
90 return ERR_PTR(-EINVAL);
91
92 idx = nla_get_u32(nfc_genl_family.attrbuf[NFC_ATTR_DEVICE_INDEX]);
93
94 dev = nfc_get_device(idx);
95 if (!dev)
96 return ERR_PTR(-ENODEV);
97
98 return dev;
99}
100
101static int nfc_genl_dump_targets(struct sk_buff *skb,
102 struct netlink_callback *cb)
103{
104 int i = cb->args[0];
105 struct nfc_dev *dev = (struct nfc_dev *) cb->args[1];
106 int rc;
107
108 nfc_dbg("entry");
109
110 if (!dev) {
111 dev = __get_device_from_cb(cb);
112 if (IS_ERR(dev))
113 return PTR_ERR(dev);
114
115 cb->args[1] = (long) dev;
116 }
117
118 spin_lock_bh(&dev->targets_lock);
119
120 cb->seq = dev->targets_generation;
121
122 while (i < dev->n_targets) {
123 rc = nfc_genl_send_target(skb, &dev->targets[i], cb,
124 NLM_F_MULTI);
125 if (rc < 0)
126 break;
127
128 i++;
129 }
130
131 spin_unlock_bh(&dev->targets_lock);
132
133 cb->args[0] = i;
134
135 return skb->len;
136}
137
138static int nfc_genl_dump_targets_done(struct netlink_callback *cb)
139{
140 struct nfc_dev *dev = (struct nfc_dev *) cb->args[1];
141
142 nfc_dbg("entry");
143
144 if (dev)
145 nfc_put_device(dev);
146
147 return 0;
148}
149
150int nfc_genl_targets_found(struct nfc_dev *dev)
151{
152 struct sk_buff *msg;
153 void *hdr;
154
155 nfc_dbg("entry");
156
157 dev->genl_data.poll_req_pid = 0;
158
159 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
160 if (!msg)
161 return -ENOMEM;
162
163 hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0,
164 NFC_EVENT_TARGETS_FOUND);
165 if (!hdr)
166 goto free_msg;
167
168 NLA_PUT_U32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx);
169
170 genlmsg_end(msg, hdr);
171
172 return genlmsg_multicast(msg, 0, nfc_genl_event_mcgrp.id, GFP_ATOMIC);
173
174nla_put_failure:
175 genlmsg_cancel(msg, hdr);
176free_msg:
177 nlmsg_free(msg);
178 return -EMSGSIZE;
179}
180
181int nfc_genl_device_added(struct nfc_dev *dev)
182{
183 struct sk_buff *msg;
184 void *hdr;
185
186 nfc_dbg("entry");
187
188 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
189 if (!msg)
190 return -ENOMEM;
191
192 hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0,
193 NFC_EVENT_DEVICE_ADDED);
194 if (!hdr)
195 goto free_msg;
196
197 NLA_PUT_STRING(msg, NFC_ATTR_DEVICE_NAME, nfc_device_name(dev));
198 NLA_PUT_U32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx);
199 NLA_PUT_U32(msg, NFC_ATTR_PROTOCOLS, dev->supported_protocols);
200
201 genlmsg_end(msg, hdr);
202
203 genlmsg_multicast(msg, 0, nfc_genl_event_mcgrp.id, GFP_KERNEL);
204
205 return 0;
206
207nla_put_failure:
208 genlmsg_cancel(msg, hdr);
209free_msg:
210 nlmsg_free(msg);
211 return -EMSGSIZE;
212}
213
214int nfc_genl_device_removed(struct nfc_dev *dev)
215{
216 struct sk_buff *msg;
217 void *hdr;
218
219 nfc_dbg("entry");
220
221 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
222 if (!msg)
223 return -ENOMEM;
224
225 hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0,
226 NFC_EVENT_DEVICE_REMOVED);
227 if (!hdr)
228 goto free_msg;
229
230 NLA_PUT_U32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx);
231
232 genlmsg_end(msg, hdr);
233
234 genlmsg_multicast(msg, 0, nfc_genl_event_mcgrp.id, GFP_KERNEL);
235
236 return 0;
237
238nla_put_failure:
239 genlmsg_cancel(msg, hdr);
240free_msg:
241 nlmsg_free(msg);
242 return -EMSGSIZE;
243}
244
245static int nfc_genl_send_device(struct sk_buff *msg, struct nfc_dev *dev,
246 u32 pid, u32 seq,
247 struct netlink_callback *cb,
248 int flags)
249{
250 void *hdr;
251
252 nfc_dbg("entry");
253
254 hdr = genlmsg_put(msg, pid, seq, &nfc_genl_family, flags,
255 NFC_CMD_GET_DEVICE);
256 if (!hdr)
257 return -EMSGSIZE;
258
259 if (cb)
260 genl_dump_check_consistent(cb, hdr, &nfc_genl_family);
261
262 NLA_PUT_STRING(msg, NFC_ATTR_DEVICE_NAME, nfc_device_name(dev));
263 NLA_PUT_U32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx);
264 NLA_PUT_U32(msg, NFC_ATTR_PROTOCOLS, dev->supported_protocols);
265
266 return genlmsg_end(msg, hdr);
267
268nla_put_failure:
269 genlmsg_cancel(msg, hdr);
270 return -EMSGSIZE;
271}
272
273static int nfc_genl_dump_devices(struct sk_buff *skb,
274 struct netlink_callback *cb)
275{
276 struct class_dev_iter *iter = (struct class_dev_iter *) cb->args[0];
277 struct nfc_dev *dev = (struct nfc_dev *) cb->args[1];
278 bool first_call = false;
279
280 nfc_dbg("entry");
281
282 if (!iter) {
283 first_call = true;
284 iter = kmalloc(sizeof(struct class_dev_iter), GFP_KERNEL);
285 if (!iter)
286 return -ENOMEM;
287 cb->args[0] = (long) iter;
288 }
289
290 mutex_lock(&nfc_devlist_mutex);
291
292 cb->seq = nfc_devlist_generation;
293
294 if (first_call) {
295 nfc_device_iter_init(iter);
296 dev = nfc_device_iter_next(iter);
297 }
298
299 while (dev) {
300 int rc;
301
302 rc = nfc_genl_send_device(skb, dev, NETLINK_CB(cb->skb).pid,
303 cb->nlh->nlmsg_seq,
304 cb, NLM_F_MULTI);
305 if (rc < 0)
306 break;
307
308 dev = nfc_device_iter_next(iter);
309 }
310
311 mutex_unlock(&nfc_devlist_mutex);
312
313 cb->args[1] = (long) dev;
314
315 return skb->len;
316}
317
318static int nfc_genl_dump_devices_done(struct netlink_callback *cb)
319{
320 struct class_dev_iter *iter = (struct class_dev_iter *) cb->args[0];
321
322 nfc_dbg("entry");
323
324 nfc_device_iter_exit(iter);
325 kfree(iter);
326
327 return 0;
328}
329
330static int nfc_genl_get_device(struct sk_buff *skb, struct genl_info *info)
331{
332 struct sk_buff *msg;
333 struct nfc_dev *dev;
334 u32 idx;
335 int rc = -ENOBUFS;
336
337 nfc_dbg("entry");
338
339 if (!info->attrs[NFC_ATTR_DEVICE_INDEX])
340 return -EINVAL;
341
342 idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
343
344 dev = nfc_get_device(idx);
345 if (!dev)
346 return -ENODEV;
347
348 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
349 if (!msg) {
350 rc = -ENOMEM;
351 goto out_putdev;
352 }
353
354 rc = nfc_genl_send_device(msg, dev, info->snd_pid, info->snd_seq,
355 NULL, 0);
356 if (rc < 0)
357 goto out_free;
358
359 nfc_put_device(dev);
360
361 return genlmsg_reply(msg, info);
362
363out_free:
364 nlmsg_free(msg);
365out_putdev:
366 nfc_put_device(dev);
367 return rc;
368}
369
370static int nfc_genl_start_poll(struct sk_buff *skb, struct genl_info *info)
371{
372 struct nfc_dev *dev;
373 int rc;
374 u32 idx;
375 u32 protocols;
376
377 nfc_dbg("entry");
378
379 if (!info->attrs[NFC_ATTR_DEVICE_INDEX] ||
380 !info->attrs[NFC_ATTR_PROTOCOLS])
381 return -EINVAL;
382
383 idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
384 protocols = nla_get_u32(info->attrs[NFC_ATTR_PROTOCOLS]);
385
386 dev = nfc_get_device(idx);
387 if (!dev)
388 return -ENODEV;
389
390 mutex_lock(&dev->genl_data.genl_data_mutex);
391
392 rc = nfc_start_poll(dev, protocols);
393 if (!rc)
394 dev->genl_data.poll_req_pid = info->snd_pid;
395
396 mutex_unlock(&dev->genl_data.genl_data_mutex);
397
398 nfc_put_device(dev);
399 return rc;
400}
401
402static int nfc_genl_stop_poll(struct sk_buff *skb, struct genl_info *info)
403{
404 struct nfc_dev *dev;
405 int rc;
406 u32 idx;
407
408 nfc_dbg("entry");
409
410 if (!info->attrs[NFC_ATTR_DEVICE_INDEX])
411 return -EINVAL;
412
413 idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
414
415 dev = nfc_get_device(idx);
416 if (!dev)
417 return -ENODEV;
418
419 mutex_lock(&dev->genl_data.genl_data_mutex);
420
421 if (dev->genl_data.poll_req_pid != info->snd_pid) {
422 rc = -EBUSY;
423 goto out;
424 }
425
426 rc = nfc_stop_poll(dev);
427 dev->genl_data.poll_req_pid = 0;
428
429out:
430 mutex_unlock(&dev->genl_data.genl_data_mutex);
431 nfc_put_device(dev);
432 return rc;
433}
434
435static struct genl_ops nfc_genl_ops[] = {
436 {
437 .cmd = NFC_CMD_GET_DEVICE,
438 .doit = nfc_genl_get_device,
439 .dumpit = nfc_genl_dump_devices,
440 .done = nfc_genl_dump_devices_done,
441 .policy = nfc_genl_policy,
442 },
443 {
444 .cmd = NFC_CMD_START_POLL,
445 .doit = nfc_genl_start_poll,
446 .policy = nfc_genl_policy,
447 },
448 {
449 .cmd = NFC_CMD_STOP_POLL,
450 .doit = nfc_genl_stop_poll,
451 .policy = nfc_genl_policy,
452 },
453 {
454 .cmd = NFC_CMD_GET_TARGET,
455 .dumpit = nfc_genl_dump_targets,
456 .done = nfc_genl_dump_targets_done,
457 .policy = nfc_genl_policy,
458 },
459};
460
461static int nfc_genl_rcv_nl_event(struct notifier_block *this,
462 unsigned long event, void *ptr)
463{
464 struct netlink_notify *n = ptr;
465 struct class_dev_iter iter;
466 struct nfc_dev *dev;
467
468 if (event != NETLINK_URELEASE || n->protocol != NETLINK_GENERIC)
469 goto out;
470
471 nfc_dbg("NETLINK_URELEASE event from id %d", n->pid);
472
473 nfc_device_iter_init(&iter);
474 dev = nfc_device_iter_next(&iter);
475
476 while (dev) {
477 mutex_lock(&dev->genl_data.genl_data_mutex);
478 if (dev->genl_data.poll_req_pid == n->pid) {
479 nfc_stop_poll(dev);
480 dev->genl_data.poll_req_pid = 0;
481 }
482 mutex_unlock(&dev->genl_data.genl_data_mutex);
483 dev = nfc_device_iter_next(&iter);
484 }
485
486 nfc_device_iter_exit(&iter);
487
488out:
489 return NOTIFY_DONE;
490}
491
492void nfc_genl_data_init(struct nfc_genl_data *genl_data)
493{
494 genl_data->poll_req_pid = 0;
495 mutex_init(&genl_data->genl_data_mutex);
496}
497
498void nfc_genl_data_exit(struct nfc_genl_data *genl_data)
499{
500 mutex_destroy(&genl_data->genl_data_mutex);
501}
502
503static struct notifier_block nl_notifier = {
504 .notifier_call = nfc_genl_rcv_nl_event,
505};
506
507/**
508 * nfc_genl_init() - Initialize netlink interface
509 *
510 * This initialization function registers the nfc netlink family.
511 */
512int __init nfc_genl_init(void)
513{
514 int rc;
515
516 rc = genl_register_family_with_ops(&nfc_genl_family, nfc_genl_ops,
517 ARRAY_SIZE(nfc_genl_ops));
518 if (rc)
519 return rc;
520
521 rc = genl_register_mc_group(&nfc_genl_family, &nfc_genl_event_mcgrp);
522
523 netlink_register_notifier(&nl_notifier);
524
525 return rc;
526}
527
528/**
529 * nfc_genl_exit() - Deinitialize netlink interface
530 *
531 * This exit function unregisters the nfc netlink family.
532 */
533void nfc_genl_exit(void)
534{
535 netlink_unregister_notifier(&nl_notifier);
536 genl_unregister_family(&nfc_genl_family);
537}
diff --git a/net/nfc/nfc.h b/net/nfc/nfc.h
new file mode 100644
index 00000000000..aaf9832298f
--- /dev/null
+++ b/net/nfc/nfc.h
@@ -0,0 +1,117 @@
1/*
2 * Copyright (C) 2011 Instituto Nokia de Tecnologia
3 *
4 * Authors:
5 * Lauro Ramos Venancio <lauro.venancio@openbossa.org>
6 * Aloisio Almeida Jr <aloisio.almeida@openbossa.org>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the
20 * Free Software Foundation, Inc.,
21 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
22 */
23
24#ifndef __LOCAL_NFC_H
25#define __LOCAL_NFC_H
26
27#include <net/nfc.h>
28#include <net/sock.h>
29
30__attribute__((format (printf, 2, 3)))
31int nfc_printk(const char *level, const char *fmt, ...);
32
33#define nfc_info(fmt, arg...) nfc_printk(KERN_INFO, fmt, ##arg)
34#define nfc_err(fmt, arg...) nfc_printk(KERN_ERR, fmt, ##arg)
35#define nfc_dbg(fmt, arg...) pr_debug(fmt "\n", ##arg)
36
37struct nfc_protocol {
38 int id;
39 struct proto *proto;
40 struct module *owner;
41 int (*create)(struct net *net, struct socket *sock,
42 const struct nfc_protocol *nfc_proto);
43};
44
45struct nfc_rawsock {
46 struct sock sk;
47 struct nfc_dev *dev;
48 u32 target_idx;
49 struct work_struct tx_work;
50 bool tx_work_scheduled;
51};
52#define nfc_rawsock(sk) ((struct nfc_rawsock *) sk)
53#define to_rawsock_sk(_tx_work) \
54 ((struct sock *) container_of(_tx_work, struct nfc_rawsock, tx_work))
55
56int __init rawsock_init(void);
57void rawsock_exit(void);
58
59int __init af_nfc_init(void);
60void af_nfc_exit(void);
61int nfc_proto_register(const struct nfc_protocol *nfc_proto);
62void nfc_proto_unregister(const struct nfc_protocol *nfc_proto);
63
64extern int nfc_devlist_generation;
65extern struct mutex nfc_devlist_mutex;
66
67int __init nfc_genl_init(void);
68void nfc_genl_exit(void);
69
70void nfc_genl_data_init(struct nfc_genl_data *genl_data);
71void nfc_genl_data_exit(struct nfc_genl_data *genl_data);
72
73int nfc_genl_targets_found(struct nfc_dev *dev);
74
75int nfc_genl_device_added(struct nfc_dev *dev);
76int nfc_genl_device_removed(struct nfc_dev *dev);
77
78struct nfc_dev *nfc_get_device(unsigned idx);
79
80static inline void nfc_put_device(struct nfc_dev *dev)
81{
82 put_device(&dev->dev);
83}
84
85static inline void nfc_device_iter_init(struct class_dev_iter *iter)
86{
87 class_dev_iter_init(iter, &nfc_class, NULL, NULL);
88}
89
90static inline struct nfc_dev *nfc_device_iter_next(struct class_dev_iter *iter)
91{
92 struct device *d = class_dev_iter_next(iter);
93 if (!d)
94 return NULL;
95
96 return to_nfc_dev(d);
97}
98
99static inline void nfc_device_iter_exit(struct class_dev_iter *iter)
100{
101 class_dev_iter_exit(iter);
102}
103
104int nfc_start_poll(struct nfc_dev *dev, u32 protocols);
105
106int nfc_stop_poll(struct nfc_dev *dev);
107
108int nfc_activate_target(struct nfc_dev *dev, u32 target_idx, u32 protocol);
109
110int nfc_deactivate_target(struct nfc_dev *dev, u32 target_idx);
111
112int nfc_data_exchange(struct nfc_dev *dev, u32 target_idx,
113 struct sk_buff *skb,
114 data_exchange_cb_t cb,
115 void *cb_context);
116
117#endif /* __LOCAL_NFC_H */
diff --git a/net/nfc/rawsock.c b/net/nfc/rawsock.c
new file mode 100644
index 00000000000..52de84a5511
--- /dev/null
+++ b/net/nfc/rawsock.c
@@ -0,0 +1,354 @@
1/*
2 * Copyright (C) 2011 Instituto Nokia de Tecnologia
3 *
4 * Authors:
5 * Aloisio Almeida Jr <aloisio.almeida@openbossa.org>
6 * Lauro Ramos Venancio <lauro.venancio@openbossa.org>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the
20 * Free Software Foundation, Inc.,
21 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
22 */
23
24#include <net/tcp_states.h>
25#include <linux/nfc.h>
26
27#include "nfc.h"
28
29static void rawsock_write_queue_purge(struct sock *sk)
30{
31 nfc_dbg("sk=%p", sk);
32
33 spin_lock_bh(&sk->sk_write_queue.lock);
34 __skb_queue_purge(&sk->sk_write_queue);
35 nfc_rawsock(sk)->tx_work_scheduled = false;
36 spin_unlock_bh(&sk->sk_write_queue.lock);
37}
38
39static void rawsock_report_error(struct sock *sk, int err)
40{
41 nfc_dbg("sk=%p err=%d", sk, err);
42
43 sk->sk_shutdown = SHUTDOWN_MASK;
44 sk->sk_err = -err;
45 sk->sk_error_report(sk);
46
47 rawsock_write_queue_purge(sk);
48}
49
50static int rawsock_release(struct socket *sock)
51{
52 struct sock *sk = sock->sk;
53
54 nfc_dbg("sock=%p", sock);
55
56 sock_orphan(sk);
57 sock_put(sk);
58
59 return 0;
60}
61
62static int rawsock_connect(struct socket *sock, struct sockaddr *_addr,
63 int len, int flags)
64{
65 struct sock *sk = sock->sk;
66 struct sockaddr_nfc *addr = (struct sockaddr_nfc *)_addr;
67 struct nfc_dev *dev;
68 int rc = 0;
69
70 nfc_dbg("sock=%p sk=%p flags=%d", sock, sk, flags);
71
72 if (!addr || len < sizeof(struct sockaddr_nfc) ||
73 addr->sa_family != AF_NFC)
74 return -EINVAL;
75
76 nfc_dbg("addr dev_idx=%u target_idx=%u protocol=%u", addr->dev_idx,
77 addr->target_idx, addr->nfc_protocol);
78
79 lock_sock(sk);
80
81 if (sock->state == SS_CONNECTED) {
82 rc = -EISCONN;
83 goto error;
84 }
85
86 dev = nfc_get_device(addr->dev_idx);
87 if (!dev) {
88 rc = -ENODEV;
89 goto error;
90 }
91
92 if (addr->target_idx > dev->target_idx - 1 ||
93 addr->target_idx < dev->target_idx - dev->n_targets) {
94 rc = -EINVAL;
95 goto error;
96 }
97
98 if (addr->target_idx > dev->target_idx - 1 ||
99 addr->target_idx < dev->target_idx - dev->n_targets) {
100 rc = -EINVAL;
101 goto error;
102 }
103
104 rc = nfc_activate_target(dev, addr->target_idx, addr->nfc_protocol);
105 if (rc)
106 goto put_dev;
107
108 nfc_rawsock(sk)->dev = dev;
109 nfc_rawsock(sk)->target_idx = addr->target_idx;
110 sock->state = SS_CONNECTED;
111 sk->sk_state = TCP_ESTABLISHED;
112 sk->sk_state_change(sk);
113
114 release_sock(sk);
115 return 0;
116
117put_dev:
118 nfc_put_device(dev);
119error:
120 release_sock(sk);
121 return rc;
122}
123
124static int rawsock_add_header(struct sk_buff *skb)
125{
126
127 if (skb_cow_head(skb, 1))
128 return -ENOMEM;
129
130 *skb_push(skb, 1) = 0;
131
132 return 0;
133}
134
135static void rawsock_data_exchange_complete(void *context, struct sk_buff *skb,
136 int err)
137{
138 struct sock *sk = (struct sock *) context;
139
140 BUG_ON(in_irq());
141
142 nfc_dbg("sk=%p err=%d", sk, err);
143
144 if (err)
145 goto error;
146
147 err = rawsock_add_header(skb);
148 if (err)
149 goto error;
150
151 err = sock_queue_rcv_skb(sk, skb);
152 if (err)
153 goto error;
154
155 spin_lock_bh(&sk->sk_write_queue.lock);
156 if (!skb_queue_empty(&sk->sk_write_queue))
157 schedule_work(&nfc_rawsock(sk)->tx_work);
158 else
159 nfc_rawsock(sk)->tx_work_scheduled = false;
160 spin_unlock_bh(&sk->sk_write_queue.lock);
161
162 sock_put(sk);
163 return;
164
165error:
166 rawsock_report_error(sk, err);
167 sock_put(sk);
168}
169
170static void rawsock_tx_work(struct work_struct *work)
171{
172 struct sock *sk = to_rawsock_sk(work);
173 struct nfc_dev *dev = nfc_rawsock(sk)->dev;
174 u32 target_idx = nfc_rawsock(sk)->target_idx;
175 struct sk_buff *skb;
176 int rc;
177
178 nfc_dbg("sk=%p target_idx=%u", sk, target_idx);
179
180 if (sk->sk_shutdown & SEND_SHUTDOWN) {
181 rawsock_write_queue_purge(sk);
182 return;
183 }
184
185 skb = skb_dequeue(&sk->sk_write_queue);
186
187 sock_hold(sk);
188 rc = nfc_data_exchange(dev, target_idx, skb,
189 rawsock_data_exchange_complete, sk);
190 if (rc) {
191 rawsock_report_error(sk, rc);
192 sock_put(sk);
193 }
194}
195
196static int rawsock_sendmsg(struct kiocb *iocb, struct socket *sock,
197 struct msghdr *msg, size_t len)
198{
199 struct sock *sk = sock->sk;
200 struct sk_buff *skb;
201 int rc;
202
203 nfc_dbg("sock=%p sk=%p len=%zu", sock, sk, len);
204
205 if (msg->msg_namelen)
206 return -EOPNOTSUPP;
207
208 if (sock->state != SS_CONNECTED)
209 return -ENOTCONN;
210
211 skb = sock_alloc_send_skb(sk, len, msg->msg_flags & MSG_DONTWAIT,
212 &rc);
213 if (!skb)
214 return rc;
215
216 rc = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
217 if (rc < 0) {
218 kfree_skb(skb);
219 return rc;
220 }
221
222 spin_lock_bh(&sk->sk_write_queue.lock);
223 __skb_queue_tail(&sk->sk_write_queue, skb);
224 if (!nfc_rawsock(sk)->tx_work_scheduled) {
225 schedule_work(&nfc_rawsock(sk)->tx_work);
226 nfc_rawsock(sk)->tx_work_scheduled = true;
227 }
228 spin_unlock_bh(&sk->sk_write_queue.lock);
229
230 return len;
231}
232
233static int rawsock_recvmsg(struct kiocb *iocb, struct socket *sock,
234 struct msghdr *msg, size_t len, int flags)
235{
236 int noblock = flags & MSG_DONTWAIT;
237 struct sock *sk = sock->sk;
238 struct sk_buff *skb;
239 int copied;
240 int rc;
241
242 nfc_dbg("sock=%p sk=%p len=%zu flags=%d", sock, sk, len, flags);
243
244 skb = skb_recv_datagram(sk, flags, noblock, &rc);
245 if (!skb)
246 return rc;
247
248 msg->msg_namelen = 0;
249
250 copied = skb->len;
251 if (len < copied) {
252 msg->msg_flags |= MSG_TRUNC;
253 copied = len;
254 }
255
256 rc = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
257
258 skb_free_datagram(sk, skb);
259
260 return rc ? : copied;
261}
262
263
264static const struct proto_ops rawsock_ops = {
265 .family = PF_NFC,
266 .owner = THIS_MODULE,
267 .release = rawsock_release,
268 .bind = sock_no_bind,
269 .connect = rawsock_connect,
270 .socketpair = sock_no_socketpair,
271 .accept = sock_no_accept,
272 .getname = sock_no_getname,
273 .poll = datagram_poll,
274 .ioctl = sock_no_ioctl,
275 .listen = sock_no_listen,
276 .shutdown = sock_no_shutdown,
277 .setsockopt = sock_no_setsockopt,
278 .getsockopt = sock_no_getsockopt,
279 .sendmsg = rawsock_sendmsg,
280 .recvmsg = rawsock_recvmsg,
281 .mmap = sock_no_mmap,
282};
283
284static void rawsock_destruct(struct sock *sk)
285{
286 nfc_dbg("sk=%p", sk);
287
288 if (sk->sk_state == TCP_ESTABLISHED) {
289 nfc_deactivate_target(nfc_rawsock(sk)->dev,
290 nfc_rawsock(sk)->target_idx);
291 nfc_put_device(nfc_rawsock(sk)->dev);
292 }
293
294 skb_queue_purge(&sk->sk_receive_queue);
295
296 if (!sock_flag(sk, SOCK_DEAD)) {
297 nfc_err("Freeing alive NFC raw socket %p", sk);
298 return;
299 }
300}
301
302static int rawsock_create(struct net *net, struct socket *sock,
303 const struct nfc_protocol *nfc_proto)
304{
305 struct sock *sk;
306
307 nfc_dbg("sock=%p", sock);
308
309 if (sock->type != SOCK_SEQPACKET)
310 return -ESOCKTNOSUPPORT;
311
312 sock->ops = &rawsock_ops;
313
314 sk = sk_alloc(net, PF_NFC, GFP_KERNEL, nfc_proto->proto);
315 if (!sk)
316 return -ENOMEM;
317
318 sock_init_data(sock, sk);
319 sk->sk_protocol = nfc_proto->id;
320 sk->sk_destruct = rawsock_destruct;
321 sock->state = SS_UNCONNECTED;
322
323 INIT_WORK(&nfc_rawsock(sk)->tx_work, rawsock_tx_work);
324 nfc_rawsock(sk)->tx_work_scheduled = false;
325
326 return 0;
327}
328
329static struct proto rawsock_proto = {
330 .name = "NFC_RAW",
331 .owner = THIS_MODULE,
332 .obj_size = sizeof(struct nfc_rawsock),
333};
334
335static const struct nfc_protocol rawsock_nfc_proto = {
336 .id = NFC_SOCKPROTO_RAW,
337 .proto = &rawsock_proto,
338 .owner = THIS_MODULE,
339 .create = rawsock_create
340};
341
342int __init rawsock_init(void)
343{
344 int rc;
345
346 rc = nfc_proto_register(&rawsock_nfc_proto);
347
348 return rc;
349}
350
351void rawsock_exit(void)
352{
353 nfc_proto_unregister(&rawsock_nfc_proto);
354}
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index c0c3cda1971..c698cec0a44 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -187,9 +187,11 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg);
187 187
188static void packet_flush_mclist(struct sock *sk); 188static void packet_flush_mclist(struct sock *sk);
189 189
190struct packet_fanout;
190struct packet_sock { 191struct packet_sock {
191 /* struct sock has to be the first member of packet_sock */ 192 /* struct sock has to be the first member of packet_sock */
192 struct sock sk; 193 struct sock sk;
194 struct packet_fanout *fanout;
193 struct tpacket_stats stats; 195 struct tpacket_stats stats;
194 struct packet_ring_buffer rx_ring; 196 struct packet_ring_buffer rx_ring;
195 struct packet_ring_buffer tx_ring; 197 struct packet_ring_buffer tx_ring;
@@ -212,6 +214,24 @@ struct packet_sock {
212 struct packet_type prot_hook ____cacheline_aligned_in_smp; 214 struct packet_type prot_hook ____cacheline_aligned_in_smp;
213}; 215};
214 216
217#define PACKET_FANOUT_MAX 256
218
219struct packet_fanout {
220#ifdef CONFIG_NET_NS
221 struct net *net;
222#endif
223 unsigned int num_members;
224 u16 id;
225 u8 type;
226 u8 defrag;
227 atomic_t rr_cur;
228 struct list_head list;
229 struct sock *arr[PACKET_FANOUT_MAX];
230 spinlock_t lock;
231 atomic_t sk_ref;
232 struct packet_type prot_hook ____cacheline_aligned_in_smp;
233};
234
215struct packet_skb_cb { 235struct packet_skb_cb {
216 unsigned int origlen; 236 unsigned int origlen;
217 union { 237 union {
@@ -222,6 +242,64 @@ struct packet_skb_cb {
222 242
223#define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb)) 243#define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb))
224 244
245static inline struct packet_sock *pkt_sk(struct sock *sk)
246{
247 return (struct packet_sock *)sk;
248}
249
250static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
251static void __fanout_link(struct sock *sk, struct packet_sock *po);
252
253/* register_prot_hook must be invoked with the po->bind_lock held,
254 * or from a context in which asynchronous accesses to the packet
255 * socket is not possible (packet_create()).
256 */
257static void register_prot_hook(struct sock *sk)
258{
259 struct packet_sock *po = pkt_sk(sk);
260 if (!po->running) {
261 if (po->fanout)
262 __fanout_link(sk, po);
263 else
264 dev_add_pack(&po->prot_hook);
265 sock_hold(sk);
266 po->running = 1;
267 }
268}
269
270/* {,__}unregister_prot_hook() must be invoked with the po->bind_lock
271 * held. If the sync parameter is true, we will temporarily drop
272 * the po->bind_lock and do a synchronize_net to make sure no
273 * asynchronous packet processing paths still refer to the elements
274 * of po->prot_hook. If the sync parameter is false, it is the
275 * callers responsibility to take care of this.
276 */
277static void __unregister_prot_hook(struct sock *sk, bool sync)
278{
279 struct packet_sock *po = pkt_sk(sk);
280
281 po->running = 0;
282 if (po->fanout)
283 __fanout_unlink(sk, po);
284 else
285 __dev_remove_pack(&po->prot_hook);
286 __sock_put(sk);
287
288 if (sync) {
289 spin_unlock(&po->bind_lock);
290 synchronize_net();
291 spin_lock(&po->bind_lock);
292 }
293}
294
295static void unregister_prot_hook(struct sock *sk, bool sync)
296{
297 struct packet_sock *po = pkt_sk(sk);
298
299 if (po->running)
300 __unregister_prot_hook(sk, sync);
301}
302
225static inline __pure struct page *pgv_to_page(void *addr) 303static inline __pure struct page *pgv_to_page(void *addr)
226{ 304{
227 if (is_vmalloc_addr(addr)) 305 if (is_vmalloc_addr(addr))
@@ -324,11 +402,6 @@ static inline void packet_increment_head(struct packet_ring_buffer *buff)
324 buff->head = buff->head != buff->frame_max ? buff->head+1 : 0; 402 buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
325} 403}
326 404
327static inline struct packet_sock *pkt_sk(struct sock *sk)
328{
329 return (struct packet_sock *)sk;
330}
331
332static void packet_sock_destruct(struct sock *sk) 405static void packet_sock_destruct(struct sock *sk)
333{ 406{
334 skb_queue_purge(&sk->sk_error_queue); 407 skb_queue_purge(&sk->sk_error_queue);
@@ -344,6 +417,240 @@ static void packet_sock_destruct(struct sock *sk)
344 sk_refcnt_debug_dec(sk); 417 sk_refcnt_debug_dec(sk);
345} 418}
346 419
420static int fanout_rr_next(struct packet_fanout *f, unsigned int num)
421{
422 int x = atomic_read(&f->rr_cur) + 1;
423
424 if (x >= num)
425 x = 0;
426
427 return x;
428}
429
430static struct sock *fanout_demux_hash(struct packet_fanout *f, struct sk_buff *skb, unsigned int num)
431{
432 u32 idx, hash = skb->rxhash;
433
434 idx = ((u64)hash * num) >> 32;
435
436 return f->arr[idx];
437}
438
439static struct sock *fanout_demux_lb(struct packet_fanout *f, struct sk_buff *skb, unsigned int num)
440{
441 int cur, old;
442
443 cur = atomic_read(&f->rr_cur);
444 while ((old = atomic_cmpxchg(&f->rr_cur, cur,
445 fanout_rr_next(f, num))) != cur)
446 cur = old;
447 return f->arr[cur];
448}
449
450static struct sock *fanout_demux_cpu(struct packet_fanout *f, struct sk_buff *skb, unsigned int num)
451{
452 unsigned int cpu = smp_processor_id();
453
454 return f->arr[cpu % num];
455}
456
457static struct sk_buff *fanout_check_defrag(struct sk_buff *skb)
458{
459#ifdef CONFIG_INET
460 const struct iphdr *iph;
461 u32 len;
462
463 if (skb->protocol != htons(ETH_P_IP))
464 return skb;
465
466 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
467 return skb;
468
469 iph = ip_hdr(skb);
470 if (iph->ihl < 5 || iph->version != 4)
471 return skb;
472 if (!pskb_may_pull(skb, iph->ihl*4))
473 return skb;
474 iph = ip_hdr(skb);
475 len = ntohs(iph->tot_len);
476 if (skb->len < len || len < (iph->ihl * 4))
477 return skb;
478
479 if (ip_is_fragment(ip_hdr(skb))) {
480 skb = skb_share_check(skb, GFP_ATOMIC);
481 if (skb) {
482 if (pskb_trim_rcsum(skb, len))
483 return skb;
484 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
485 if (ip_defrag(skb, IP_DEFRAG_AF_PACKET))
486 return NULL;
487 skb->rxhash = 0;
488 }
489 }
490#endif
491 return skb;
492}
493
494static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
495 struct packet_type *pt, struct net_device *orig_dev)
496{
497 struct packet_fanout *f = pt->af_packet_priv;
498 unsigned int num = f->num_members;
499 struct packet_sock *po;
500 struct sock *sk;
501
502 if (!net_eq(dev_net(dev), read_pnet(&f->net)) ||
503 !num) {
504 kfree_skb(skb);
505 return 0;
506 }
507
508 switch (f->type) {
509 case PACKET_FANOUT_HASH:
510 default:
511 if (f->defrag) {
512 skb = fanout_check_defrag(skb);
513 if (!skb)
514 return 0;
515 }
516 skb_get_rxhash(skb);
517 sk = fanout_demux_hash(f, skb, num);
518 break;
519 case PACKET_FANOUT_LB:
520 sk = fanout_demux_lb(f, skb, num);
521 break;
522 case PACKET_FANOUT_CPU:
523 sk = fanout_demux_cpu(f, skb, num);
524 break;
525 }
526
527 po = pkt_sk(sk);
528
529 return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
530}
531
532static DEFINE_MUTEX(fanout_mutex);
533static LIST_HEAD(fanout_list);
534
535static void __fanout_link(struct sock *sk, struct packet_sock *po)
536{
537 struct packet_fanout *f = po->fanout;
538
539 spin_lock(&f->lock);
540 f->arr[f->num_members] = sk;
541 smp_wmb();
542 f->num_members++;
543 spin_unlock(&f->lock);
544}
545
546static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
547{
548 struct packet_fanout *f = po->fanout;
549 int i;
550
551 spin_lock(&f->lock);
552 for (i = 0; i < f->num_members; i++) {
553 if (f->arr[i] == sk)
554 break;
555 }
556 BUG_ON(i >= f->num_members);
557 f->arr[i] = f->arr[f->num_members - 1];
558 f->num_members--;
559 spin_unlock(&f->lock);
560}
561
562static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
563{
564 struct packet_sock *po = pkt_sk(sk);
565 struct packet_fanout *f, *match;
566 u8 type = type_flags & 0xff;
567 u8 defrag = (type_flags & PACKET_FANOUT_FLAG_DEFRAG) ? 1 : 0;
568 int err;
569
570 switch (type) {
571 case PACKET_FANOUT_HASH:
572 case PACKET_FANOUT_LB:
573 case PACKET_FANOUT_CPU:
574 break;
575 default:
576 return -EINVAL;
577 }
578
579 if (!po->running)
580 return -EINVAL;
581
582 if (po->fanout)
583 return -EALREADY;
584
585 mutex_lock(&fanout_mutex);
586 match = NULL;
587 list_for_each_entry(f, &fanout_list, list) {
588 if (f->id == id &&
589 read_pnet(&f->net) == sock_net(sk)) {
590 match = f;
591 break;
592 }
593 }
594 err = -EINVAL;
595 if (match && match->defrag != defrag)
596 goto out;
597 if (!match) {
598 err = -ENOMEM;
599 match = kzalloc(sizeof(*match), GFP_KERNEL);
600 if (!match)
601 goto out;
602 write_pnet(&match->net, sock_net(sk));
603 match->id = id;
604 match->type = type;
605 match->defrag = defrag;
606 atomic_set(&match->rr_cur, 0);
607 INIT_LIST_HEAD(&match->list);
608 spin_lock_init(&match->lock);
609 atomic_set(&match->sk_ref, 0);
610 match->prot_hook.type = po->prot_hook.type;
611 match->prot_hook.dev = po->prot_hook.dev;
612 match->prot_hook.func = packet_rcv_fanout;
613 match->prot_hook.af_packet_priv = match;
614 dev_add_pack(&match->prot_hook);
615 list_add(&match->list, &fanout_list);
616 }
617 err = -EINVAL;
618 if (match->type == type &&
619 match->prot_hook.type == po->prot_hook.type &&
620 match->prot_hook.dev == po->prot_hook.dev) {
621 err = -ENOSPC;
622 if (atomic_read(&match->sk_ref) < PACKET_FANOUT_MAX) {
623 __dev_remove_pack(&po->prot_hook);
624 po->fanout = match;
625 atomic_inc(&match->sk_ref);
626 __fanout_link(sk, po);
627 err = 0;
628 }
629 }
630out:
631 mutex_unlock(&fanout_mutex);
632 return err;
633}
634
635static void fanout_release(struct sock *sk)
636{
637 struct packet_sock *po = pkt_sk(sk);
638 struct packet_fanout *f;
639
640 f = po->fanout;
641 if (!f)
642 return;
643
644 po->fanout = NULL;
645
646 mutex_lock(&fanout_mutex);
647 if (atomic_dec_and_test(&f->sk_ref)) {
648 list_del(&f->list);
649 dev_remove_pack(&f->prot_hook);
650 kfree(f);
651 }
652 mutex_unlock(&fanout_mutex);
653}
347 654
348static const struct proto_ops packet_ops; 655static const struct proto_ops packet_ops;
349 656
@@ -822,7 +1129,6 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
822 else 1129 else
823 sll->sll_ifindex = dev->ifindex; 1130 sll->sll_ifindex = dev->ifindex;
824 1131
825 __packet_set_status(po, h.raw, status);
826 smp_mb(); 1132 smp_mb();
827#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1 1133#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
828 { 1134 {
@@ -831,8 +1137,10 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
831 end = (u8 *)PAGE_ALIGN((unsigned long)h.raw + macoff + snaplen); 1137 end = (u8 *)PAGE_ALIGN((unsigned long)h.raw + macoff + snaplen);
832 for (start = h.raw; start < end; start += PAGE_SIZE) 1138 for (start = h.raw; start < end; start += PAGE_SIZE)
833 flush_dcache_page(pgv_to_page(start)); 1139 flush_dcache_page(pgv_to_page(start));
1140 smp_wmb();
834 } 1141 }
835#endif 1142#endif
1143 __packet_set_status(po, h.raw, status);
836 1144
837 sk->sk_data_ready(sk, 0); 1145 sk->sk_data_ready(sk, 0);
838 1146
@@ -975,7 +1283,8 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
975 struct sk_buff *skb; 1283 struct sk_buff *skb;
976 struct net_device *dev; 1284 struct net_device *dev;
977 __be16 proto; 1285 __be16 proto;
978 int ifindex, err, reserve = 0; 1286 bool need_rls_dev = false;
1287 int err, reserve = 0;
979 void *ph; 1288 void *ph;
980 struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name; 1289 struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name;
981 int tp_len, size_max; 1290 int tp_len, size_max;
@@ -987,7 +1296,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
987 1296
988 err = -EBUSY; 1297 err = -EBUSY;
989 if (saddr == NULL) { 1298 if (saddr == NULL) {
990 ifindex = po->ifindex; 1299 dev = po->prot_hook.dev;
991 proto = po->num; 1300 proto = po->num;
992 addr = NULL; 1301 addr = NULL;
993 } else { 1302 } else {
@@ -998,12 +1307,12 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
998 + offsetof(struct sockaddr_ll, 1307 + offsetof(struct sockaddr_ll,
999 sll_addr))) 1308 sll_addr)))
1000 goto out; 1309 goto out;
1001 ifindex = saddr->sll_ifindex;
1002 proto = saddr->sll_protocol; 1310 proto = saddr->sll_protocol;
1003 addr = saddr->sll_addr; 1311 addr = saddr->sll_addr;
1312 dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
1313 need_rls_dev = true;
1004 } 1314 }
1005 1315
1006 dev = dev_get_by_index(sock_net(&po->sk), ifindex);
1007 err = -ENXIO; 1316 err = -ENXIO;
1008 if (unlikely(dev == NULL)) 1317 if (unlikely(dev == NULL))
1009 goto out; 1318 goto out;
@@ -1089,7 +1398,8 @@ out_status:
1089 __packet_set_status(po, ph, status); 1398 __packet_set_status(po, ph, status);
1090 kfree_skb(skb); 1399 kfree_skb(skb);
1091out_put: 1400out_put:
1092 dev_put(dev); 1401 if (need_rls_dev)
1402 dev_put(dev);
1093out: 1403out:
1094 mutex_unlock(&po->pg_vec_lock); 1404 mutex_unlock(&po->pg_vec_lock);
1095 return err; 1405 return err;
@@ -1127,8 +1437,9 @@ static int packet_snd(struct socket *sock,
1127 struct sk_buff *skb; 1437 struct sk_buff *skb;
1128 struct net_device *dev; 1438 struct net_device *dev;
1129 __be16 proto; 1439 __be16 proto;
1440 bool need_rls_dev = false;
1130 unsigned char *addr; 1441 unsigned char *addr;
1131 int ifindex, err, reserve = 0; 1442 int err, reserve = 0;
1132 struct virtio_net_hdr vnet_hdr = { 0 }; 1443 struct virtio_net_hdr vnet_hdr = { 0 };
1133 int offset = 0; 1444 int offset = 0;
1134 int vnet_hdr_len; 1445 int vnet_hdr_len;
@@ -1140,7 +1451,7 @@ static int packet_snd(struct socket *sock,
1140 */ 1451 */
1141 1452
1142 if (saddr == NULL) { 1453 if (saddr == NULL) {
1143 ifindex = po->ifindex; 1454 dev = po->prot_hook.dev;
1144 proto = po->num; 1455 proto = po->num;
1145 addr = NULL; 1456 addr = NULL;
1146 } else { 1457 } else {
@@ -1149,13 +1460,12 @@ static int packet_snd(struct socket *sock,
1149 goto out; 1460 goto out;
1150 if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr))) 1461 if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
1151 goto out; 1462 goto out;
1152 ifindex = saddr->sll_ifindex;
1153 proto = saddr->sll_protocol; 1463 proto = saddr->sll_protocol;
1154 addr = saddr->sll_addr; 1464 addr = saddr->sll_addr;
1465 dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
1466 need_rls_dev = true;
1155 } 1467 }
1156 1468
1157
1158 dev = dev_get_by_index(sock_net(sk), ifindex);
1159 err = -ENXIO; 1469 err = -ENXIO;
1160 if (dev == NULL) 1470 if (dev == NULL)
1161 goto out_unlock; 1471 goto out_unlock;
@@ -1286,14 +1596,15 @@ static int packet_snd(struct socket *sock,
1286 if (err > 0 && (err = net_xmit_errno(err)) != 0) 1596 if (err > 0 && (err = net_xmit_errno(err)) != 0)
1287 goto out_unlock; 1597 goto out_unlock;
1288 1598
1289 dev_put(dev); 1599 if (need_rls_dev)
1600 dev_put(dev);
1290 1601
1291 return len; 1602 return len;
1292 1603
1293out_free: 1604out_free:
1294 kfree_skb(skb); 1605 kfree_skb(skb);
1295out_unlock: 1606out_unlock:
1296 if (dev) 1607 if (dev && need_rls_dev)
1297 dev_put(dev); 1608 dev_put(dev);
1298out: 1609out:
1299 return err; 1610 return err;
@@ -1334,14 +1645,10 @@ static int packet_release(struct socket *sock)
1334 spin_unlock_bh(&net->packet.sklist_lock); 1645 spin_unlock_bh(&net->packet.sklist_lock);
1335 1646
1336 spin_lock(&po->bind_lock); 1647 spin_lock(&po->bind_lock);
1337 if (po->running) { 1648 unregister_prot_hook(sk, false);
1338 /* 1649 if (po->prot_hook.dev) {
1339 * Remove from protocol table 1650 dev_put(po->prot_hook.dev);
1340 */ 1651 po->prot_hook.dev = NULL;
1341 po->running = 0;
1342 po->num = 0;
1343 __dev_remove_pack(&po->prot_hook);
1344 __sock_put(sk);
1345 } 1652 }
1346 spin_unlock(&po->bind_lock); 1653 spin_unlock(&po->bind_lock);
1347 1654
@@ -1355,6 +1662,8 @@ static int packet_release(struct socket *sock)
1355 if (po->tx_ring.pg_vec) 1662 if (po->tx_ring.pg_vec)
1356 packet_set_ring(sk, &req, 1, 1); 1663 packet_set_ring(sk, &req, 1, 1);
1357 1664
1665 fanout_release(sk);
1666
1358 synchronize_net(); 1667 synchronize_net();
1359 /* 1668 /*
1360 * Now the socket is dead. No more input will appear. 1669 * Now the socket is dead. No more input will appear.
@@ -1378,24 +1687,18 @@ static int packet_release(struct socket *sock)
1378static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 protocol) 1687static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 protocol)
1379{ 1688{
1380 struct packet_sock *po = pkt_sk(sk); 1689 struct packet_sock *po = pkt_sk(sk);
1381 /* 1690
1382 * Detach an existing hook if present. 1691 if (po->fanout)
1383 */ 1692 return -EINVAL;
1384 1693
1385 lock_sock(sk); 1694 lock_sock(sk);
1386 1695
1387 spin_lock(&po->bind_lock); 1696 spin_lock(&po->bind_lock);
1388 if (po->running) { 1697 unregister_prot_hook(sk, true);
1389 __sock_put(sk);
1390 po->running = 0;
1391 po->num = 0;
1392 spin_unlock(&po->bind_lock);
1393 dev_remove_pack(&po->prot_hook);
1394 spin_lock(&po->bind_lock);
1395 }
1396
1397 po->num = protocol; 1698 po->num = protocol;
1398 po->prot_hook.type = protocol; 1699 po->prot_hook.type = protocol;
1700 if (po->prot_hook.dev)
1701 dev_put(po->prot_hook.dev);
1399 po->prot_hook.dev = dev; 1702 po->prot_hook.dev = dev;
1400 1703
1401 po->ifindex = dev ? dev->ifindex : 0; 1704 po->ifindex = dev ? dev->ifindex : 0;
@@ -1404,9 +1707,7 @@ static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 protoc
1404 goto out_unlock; 1707 goto out_unlock;
1405 1708
1406 if (!dev || (dev->flags & IFF_UP)) { 1709 if (!dev || (dev->flags & IFF_UP)) {
1407 dev_add_pack(&po->prot_hook); 1710 register_prot_hook(sk);
1408 sock_hold(sk);
1409 po->running = 1;
1410 } else { 1711 } else {
1411 sk->sk_err = ENETDOWN; 1712 sk->sk_err = ENETDOWN;
1412 if (!sock_flag(sk, SOCK_DEAD)) 1713 if (!sock_flag(sk, SOCK_DEAD))
@@ -1440,10 +1741,8 @@ static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
1440 strlcpy(name, uaddr->sa_data, sizeof(name)); 1741 strlcpy(name, uaddr->sa_data, sizeof(name));
1441 1742
1442 dev = dev_get_by_name(sock_net(sk), name); 1743 dev = dev_get_by_name(sock_net(sk), name);
1443 if (dev) { 1744 if (dev)
1444 err = packet_do_bind(sk, dev, pkt_sk(sk)->num); 1745 err = packet_do_bind(sk, dev, pkt_sk(sk)->num);
1445 dev_put(dev);
1446 }
1447 return err; 1746 return err;
1448} 1747}
1449 1748
@@ -1471,8 +1770,6 @@ static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len
1471 goto out; 1770 goto out;
1472 } 1771 }
1473 err = packet_do_bind(sk, dev, sll->sll_protocol ? : pkt_sk(sk)->num); 1772 err = packet_do_bind(sk, dev, sll->sll_protocol ? : pkt_sk(sk)->num);
1474 if (dev)
1475 dev_put(dev);
1476 1773
1477out: 1774out:
1478 return err; 1775 return err;
@@ -1537,9 +1834,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol,
1537 1834
1538 if (proto) { 1835 if (proto) {
1539 po->prot_hook.type = proto; 1836 po->prot_hook.type = proto;
1540 dev_add_pack(&po->prot_hook); 1837 register_prot_hook(sk);
1541 sock_hold(sk);
1542 po->running = 1;
1543 } 1838 }
1544 1839
1545 spin_lock_bh(&net->packet.sklist_lock); 1840 spin_lock_bh(&net->packet.sklist_lock);
@@ -1681,6 +1976,8 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
1681 vnet_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; 1976 vnet_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
1682 vnet_hdr.csum_start = skb_checksum_start_offset(skb); 1977 vnet_hdr.csum_start = skb_checksum_start_offset(skb);
1683 vnet_hdr.csum_offset = skb->csum_offset; 1978 vnet_hdr.csum_offset = skb->csum_offset;
1979 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1980 vnet_hdr.flags = VIRTIO_NET_HDR_F_DATA_VALID;
1684 } /* else everything is zero */ 1981 } /* else everything is zero */
1685 1982
1686 err = memcpy_toiovec(msg->msg_iov, (void *)&vnet_hdr, 1983 err = memcpy_toiovec(msg->msg_iov, (void *)&vnet_hdr,
@@ -2102,6 +2399,17 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
2102 po->tp_tstamp = val; 2399 po->tp_tstamp = val;
2103 return 0; 2400 return 0;
2104 } 2401 }
2402 case PACKET_FANOUT:
2403 {
2404 int val;
2405
2406 if (optlen != sizeof(val))
2407 return -EINVAL;
2408 if (copy_from_user(&val, optval, sizeof(val)))
2409 return -EFAULT;
2410
2411 return fanout_add(sk, val & 0xffff, val >> 16);
2412 }
2105 default: 2413 default:
2106 return -ENOPROTOOPT; 2414 return -ENOPROTOOPT;
2107 } 2415 }
@@ -2200,6 +2508,15 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
2200 val = po->tp_tstamp; 2508 val = po->tp_tstamp;
2201 data = &val; 2509 data = &val;
2202 break; 2510 break;
2511 case PACKET_FANOUT:
2512 if (len > sizeof(int))
2513 len = sizeof(int);
2514 val = (po->fanout ?
2515 ((u32)po->fanout->id |
2516 ((u32)po->fanout->type << 16)) :
2517 0);
2518 data = &val;
2519 break;
2203 default: 2520 default:
2204 return -ENOPROTOOPT; 2521 return -ENOPROTOOPT;
2205 } 2522 }
@@ -2233,15 +2550,15 @@ static int packet_notifier(struct notifier_block *this, unsigned long msg, void
2233 if (dev->ifindex == po->ifindex) { 2550 if (dev->ifindex == po->ifindex) {
2234 spin_lock(&po->bind_lock); 2551 spin_lock(&po->bind_lock);
2235 if (po->running) { 2552 if (po->running) {
2236 __dev_remove_pack(&po->prot_hook); 2553 __unregister_prot_hook(sk, false);
2237 __sock_put(sk);
2238 po->running = 0;
2239 sk->sk_err = ENETDOWN; 2554 sk->sk_err = ENETDOWN;
2240 if (!sock_flag(sk, SOCK_DEAD)) 2555 if (!sock_flag(sk, SOCK_DEAD))
2241 sk->sk_error_report(sk); 2556 sk->sk_error_report(sk);
2242 } 2557 }
2243 if (msg == NETDEV_UNREGISTER) { 2558 if (msg == NETDEV_UNREGISTER) {
2244 po->ifindex = -1; 2559 po->ifindex = -1;
2560 if (po->prot_hook.dev)
2561 dev_put(po->prot_hook.dev);
2245 po->prot_hook.dev = NULL; 2562 po->prot_hook.dev = NULL;
2246 } 2563 }
2247 spin_unlock(&po->bind_lock); 2564 spin_unlock(&po->bind_lock);
@@ -2250,11 +2567,8 @@ static int packet_notifier(struct notifier_block *this, unsigned long msg, void
2250 case NETDEV_UP: 2567 case NETDEV_UP:
2251 if (dev->ifindex == po->ifindex) { 2568 if (dev->ifindex == po->ifindex) {
2252 spin_lock(&po->bind_lock); 2569 spin_lock(&po->bind_lock);
2253 if (po->num && !po->running) { 2570 if (po->num)
2254 dev_add_pack(&po->prot_hook); 2571 register_prot_hook(sk);
2255 sock_hold(sk);
2256 po->running = 1;
2257 }
2258 spin_unlock(&po->bind_lock); 2572 spin_unlock(&po->bind_lock);
2259 } 2573 }
2260 break; 2574 break;
@@ -2521,10 +2835,8 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req,
2521 was_running = po->running; 2835 was_running = po->running;
2522 num = po->num; 2836 num = po->num;
2523 if (was_running) { 2837 if (was_running) {
2524 __dev_remove_pack(&po->prot_hook);
2525 po->num = 0; 2838 po->num = 0;
2526 po->running = 0; 2839 __unregister_prot_hook(sk, false);
2527 __sock_put(sk);
2528 } 2840 }
2529 spin_unlock(&po->bind_lock); 2841 spin_unlock(&po->bind_lock);
2530 2842
@@ -2555,11 +2867,9 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req,
2555 mutex_unlock(&po->pg_vec_lock); 2867 mutex_unlock(&po->pg_vec_lock);
2556 2868
2557 spin_lock(&po->bind_lock); 2869 spin_lock(&po->bind_lock);
2558 if (was_running && !po->running) { 2870 if (was_running) {
2559 sock_hold(sk);
2560 po->running = 1;
2561 po->num = num; 2871 po->num = num;
2562 dev_add_pack(&po->prot_hook); 2872 register_prot_hook(sk);
2563 } 2873 }
2564 spin_unlock(&po->bind_lock); 2874 spin_unlock(&po->bind_lock);
2565 2875
diff --git a/net/phonet/pn_netlink.c b/net/phonet/pn_netlink.c
index 438accb7a5a..d61f6761777 100644
--- a/net/phonet/pn_netlink.c
+++ b/net/phonet/pn_netlink.c
@@ -289,15 +289,16 @@ out:
289 289
290int __init phonet_netlink_register(void) 290int __init phonet_netlink_register(void)
291{ 291{
292 int err = __rtnl_register(PF_PHONET, RTM_NEWADDR, addr_doit, NULL); 292 int err = __rtnl_register(PF_PHONET, RTM_NEWADDR, addr_doit,
293 NULL, NULL);
293 if (err) 294 if (err)
294 return err; 295 return err;
295 296
296 /* Further __rtnl_register() cannot fail */ 297 /* Further __rtnl_register() cannot fail */
297 __rtnl_register(PF_PHONET, RTM_DELADDR, addr_doit, NULL); 298 __rtnl_register(PF_PHONET, RTM_DELADDR, addr_doit, NULL, NULL);
298 __rtnl_register(PF_PHONET, RTM_GETADDR, NULL, getaddr_dumpit); 299 __rtnl_register(PF_PHONET, RTM_GETADDR, NULL, getaddr_dumpit, NULL);
299 __rtnl_register(PF_PHONET, RTM_NEWROUTE, route_doit, NULL); 300 __rtnl_register(PF_PHONET, RTM_NEWROUTE, route_doit, NULL, NULL);
300 __rtnl_register(PF_PHONET, RTM_DELROUTE, route_doit, NULL); 301 __rtnl_register(PF_PHONET, RTM_DELROUTE, route_doit, NULL, NULL);
301 __rtnl_register(PF_PHONET, RTM_GETROUTE, NULL, route_dumpit); 302 __rtnl_register(PF_PHONET, RTM_GETROUTE, NULL, route_dumpit, NULL);
302 return 0; 303 return 0;
303} 304}
diff --git a/net/rds/bind.c b/net/rds/bind.c
index 2f6b3fcc79f..637bde56c9d 100644
--- a/net/rds/bind.c
+++ b/net/rds/bind.c
@@ -35,6 +35,7 @@
35#include <linux/in.h> 35#include <linux/in.h>
36#include <linux/if_arp.h> 36#include <linux/if_arp.h>
37#include <linux/jhash.h> 37#include <linux/jhash.h>
38#include <linux/ratelimit.h>
38#include "rds.h" 39#include "rds.h"
39 40
40#define BIND_HASH_SIZE 1024 41#define BIND_HASH_SIZE 1024
@@ -185,8 +186,7 @@ int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
185 if (!trans) { 186 if (!trans) {
186 ret = -EADDRNOTAVAIL; 187 ret = -EADDRNOTAVAIL;
187 rds_remove_bound(rs); 188 rds_remove_bound(rs);
188 if (printk_ratelimit()) 189 printk_ratelimited(KERN_INFO "RDS: rds_bind() could not find a transport, "
189 printk(KERN_INFO "RDS: rds_bind() could not find a transport, "
190 "load rds_tcp or rds_rdma?\n"); 190 "load rds_tcp or rds_rdma?\n");
191 goto out; 191 goto out;
192 } 192 }
diff --git a/net/rds/ib.h b/net/rds/ib.h
index 4297d92788d..edfaaaf164e 100644
--- a/net/rds/ib.h
+++ b/net/rds/ib.h
@@ -3,6 +3,7 @@
3 3
4#include <rdma/ib_verbs.h> 4#include <rdma/ib_verbs.h>
5#include <rdma/rdma_cm.h> 5#include <rdma/rdma_cm.h>
6#include <linux/interrupt.h>
6#include <linux/pci.h> 7#include <linux/pci.h>
7#include <linux/slab.h> 8#include <linux/slab.h>
8#include "rds.h" 9#include "rds.h"
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
index fd453dd5124..cd67026be2d 100644
--- a/net/rds/ib_cm.c
+++ b/net/rds/ib_cm.c
@@ -34,6 +34,7 @@
34#include <linux/in.h> 34#include <linux/in.h>
35#include <linux/slab.h> 35#include <linux/slab.h>
36#include <linux/vmalloc.h> 36#include <linux/vmalloc.h>
37#include <linux/ratelimit.h>
37 38
38#include "rds.h" 39#include "rds.h"
39#include "ib.h" 40#include "ib.h"
@@ -435,13 +436,12 @@ static u32 rds_ib_protocol_compatible(struct rdma_cm_event *event)
435 version = RDS_PROTOCOL_3_0; 436 version = RDS_PROTOCOL_3_0;
436 while ((common >>= 1) != 0) 437 while ((common >>= 1) != 0)
437 version++; 438 version++;
438 } else if (printk_ratelimit()) { 439 }
439 printk(KERN_NOTICE "RDS: Connection from %pI4 using " 440 printk_ratelimited(KERN_NOTICE "RDS: Connection from %pI4 using "
440 "incompatible protocol version %u.%u\n", 441 "incompatible protocol version %u.%u\n",
441 &dp->dp_saddr, 442 &dp->dp_saddr,
442 dp->dp_protocol_major, 443 dp->dp_protocol_major,
443 dp->dp_protocol_minor); 444 dp->dp_protocol_minor);
444 }
445 return version; 445 return version;
446} 446}
447 447
diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c
index 7c4dce8fa5e..e5909498117 100644
--- a/net/rds/ib_send.c
+++ b/net/rds/ib_send.c
@@ -34,6 +34,7 @@
34#include <linux/in.h> 34#include <linux/in.h>
35#include <linux/device.h> 35#include <linux/device.h>
36#include <linux/dmapool.h> 36#include <linux/dmapool.h>
37#include <linux/ratelimit.h>
37 38
38#include "rds.h" 39#include "rds.h"
39#include "ib.h" 40#include "ib.h"
@@ -207,8 +208,7 @@ static struct rds_message *rds_ib_send_unmap_op(struct rds_ib_connection *ic,
207 } 208 }
208 break; 209 break;
209 default: 210 default:
210 if (printk_ratelimit()) 211 printk_ratelimited(KERN_NOTICE
211 printk(KERN_NOTICE
212 "RDS/IB: %s: unexpected opcode 0x%x in WR!\n", 212 "RDS/IB: %s: unexpected opcode 0x%x in WR!\n",
213 __func__, send->s_wr.opcode); 213 __func__, send->s_wr.opcode);
214 break; 214 break;
diff --git a/net/rds/iw.h b/net/rds/iw.h
index 90151922178..04ce3b193f7 100644
--- a/net/rds/iw.h
+++ b/net/rds/iw.h
@@ -1,6 +1,7 @@
1#ifndef _RDS_IW_H 1#ifndef _RDS_IW_H
2#define _RDS_IW_H 2#define _RDS_IW_H
3 3
4#include <linux/interrupt.h>
4#include <rdma/ib_verbs.h> 5#include <rdma/ib_verbs.h>
5#include <rdma/rdma_cm.h> 6#include <rdma/rdma_cm.h>
6#include "rds.h" 7#include "rds.h"
diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c
index c12db66f24c..9556d2895f7 100644
--- a/net/rds/iw_cm.c
+++ b/net/rds/iw_cm.c
@@ -34,6 +34,7 @@
34#include <linux/in.h> 34#include <linux/in.h>
35#include <linux/slab.h> 35#include <linux/slab.h>
36#include <linux/vmalloc.h> 36#include <linux/vmalloc.h>
37#include <linux/ratelimit.h>
37 38
38#include "rds.h" 39#include "rds.h"
39#include "iw.h" 40#include "iw.h"
@@ -258,8 +259,7 @@ static int rds_iw_setup_qp(struct rds_connection *conn)
258 */ 259 */
259 rds_iwdev = ib_get_client_data(dev, &rds_iw_client); 260 rds_iwdev = ib_get_client_data(dev, &rds_iw_client);
260 if (!rds_iwdev) { 261 if (!rds_iwdev) {
261 if (printk_ratelimit()) 262 printk_ratelimited(KERN_NOTICE "RDS/IW: No client_data for device %s\n",
262 printk(KERN_NOTICE "RDS/IW: No client_data for device %s\n",
263 dev->name); 263 dev->name);
264 return -EOPNOTSUPP; 264 return -EOPNOTSUPP;
265 } 265 }
@@ -365,13 +365,12 @@ static u32 rds_iw_protocol_compatible(const struct rds_iw_connect_private *dp)
365 version = RDS_PROTOCOL_3_0; 365 version = RDS_PROTOCOL_3_0;
366 while ((common >>= 1) != 0) 366 while ((common >>= 1) != 0)
367 version++; 367 version++;
368 } else if (printk_ratelimit()) { 368 }
369 printk(KERN_NOTICE "RDS: Connection from %pI4 using " 369 printk_ratelimited(KERN_NOTICE "RDS: Connection from %pI4 using "
370 "incompatible protocol version %u.%u\n", 370 "incompatible protocol version %u.%u\n",
371 &dp->dp_saddr, 371 &dp->dp_saddr,
372 dp->dp_protocol_major, 372 dp->dp_protocol_major,
373 dp->dp_protocol_minor); 373 dp->dp_protocol_minor);
374 }
375 return version; 374 return version;
376} 375}
377 376
diff --git a/net/rds/iw_rdma.c b/net/rds/iw_rdma.c
index 6deaa77495e..8b77edbab27 100644
--- a/net/rds/iw_rdma.c
+++ b/net/rds/iw_rdma.c
@@ -32,6 +32,7 @@
32 */ 32 */
33#include <linux/kernel.h> 33#include <linux/kernel.h>
34#include <linux/slab.h> 34#include <linux/slab.h>
35#include <linux/ratelimit.h>
35 36
36#include "rds.h" 37#include "rds.h"
37#include "iw.h" 38#include "iw.h"
@@ -729,8 +730,8 @@ static int rds_iw_rdma_build_fastreg(struct rds_iw_mapping *mapping)
729 failed_wr = &f_wr; 730 failed_wr = &f_wr;
730 ret = ib_post_send(ibmr->cm_id->qp, &f_wr, &failed_wr); 731 ret = ib_post_send(ibmr->cm_id->qp, &f_wr, &failed_wr);
731 BUG_ON(failed_wr != &f_wr); 732 BUG_ON(failed_wr != &f_wr);
732 if (ret && printk_ratelimit()) 733 if (ret)
733 printk(KERN_WARNING "RDS/IW: %s:%d ib_post_send returned %d\n", 734 printk_ratelimited(KERN_WARNING "RDS/IW: %s:%d ib_post_send returned %d\n",
734 __func__, __LINE__, ret); 735 __func__, __LINE__, ret);
735 return ret; 736 return ret;
736} 737}
@@ -751,8 +752,8 @@ static int rds_iw_rdma_fastreg_inv(struct rds_iw_mr *ibmr)
751 752
752 failed_wr = &s_wr; 753 failed_wr = &s_wr;
753 ret = ib_post_send(ibmr->cm_id->qp, &s_wr, &failed_wr); 754 ret = ib_post_send(ibmr->cm_id->qp, &s_wr, &failed_wr);
754 if (ret && printk_ratelimit()) { 755 if (ret) {
755 printk(KERN_WARNING "RDS/IW: %s:%d ib_post_send returned %d\n", 756 printk_ratelimited(KERN_WARNING "RDS/IW: %s:%d ib_post_send returned %d\n",
756 __func__, __LINE__, ret); 757 __func__, __LINE__, ret);
757 goto out; 758 goto out;
758 } 759 }
diff --git a/net/rds/iw_send.c b/net/rds/iw_send.c
index 545d8ee3efb..e40c3c5db2c 100644
--- a/net/rds/iw_send.c
+++ b/net/rds/iw_send.c
@@ -34,6 +34,7 @@
34#include <linux/in.h> 34#include <linux/in.h>
35#include <linux/device.h> 35#include <linux/device.h>
36#include <linux/dmapool.h> 36#include <linux/dmapool.h>
37#include <linux/ratelimit.h>
37 38
38#include "rds.h" 39#include "rds.h"
39#include "iw.h" 40#include "iw.h"
@@ -258,8 +259,7 @@ void rds_iw_send_cq_comp_handler(struct ib_cq *cq, void *context)
258 * when the SEND completes. */ 259 * when the SEND completes. */
259 break; 260 break;
260 default: 261 default:
261 if (printk_ratelimit()) 262 printk_ratelimited(KERN_NOTICE
262 printk(KERN_NOTICE
263 "RDS/IW: %s: unexpected opcode 0x%x in WR!\n", 263 "RDS/IW: %s: unexpected opcode 0x%x in WR!\n",
264 __func__, send->s_wr.opcode); 264 __func__, send->s_wr.opcode);
265 break; 265 break;
diff --git a/net/rds/send.c b/net/rds/send.c
index d58ae5f9339..aa57e22539e 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -35,6 +35,7 @@
35#include <net/sock.h> 35#include <net/sock.h>
36#include <linux/in.h> 36#include <linux/in.h>
37#include <linux/list.h> 37#include <linux/list.h>
38#include <linux/ratelimit.h>
38 39
39#include "rds.h" 40#include "rds.h"
40 41
@@ -1006,16 +1007,14 @@ int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
1006 goto out; 1007 goto out;
1007 1008
1008 if (rm->rdma.op_active && !conn->c_trans->xmit_rdma) { 1009 if (rm->rdma.op_active && !conn->c_trans->xmit_rdma) {
1009 if (printk_ratelimit()) 1010 printk_ratelimited(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n",
1010 printk(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n",
1011 &rm->rdma, conn->c_trans->xmit_rdma); 1011 &rm->rdma, conn->c_trans->xmit_rdma);
1012 ret = -EOPNOTSUPP; 1012 ret = -EOPNOTSUPP;
1013 goto out; 1013 goto out;
1014 } 1014 }
1015 1015
1016 if (rm->atomic.op_active && !conn->c_trans->xmit_atomic) { 1016 if (rm->atomic.op_active && !conn->c_trans->xmit_atomic) {
1017 if (printk_ratelimit()) 1017 printk_ratelimited(KERN_NOTICE "atomic_op %p conn xmit_atomic %p\n",
1018 printk(KERN_NOTICE "atomic_op %p conn xmit_atomic %p\n",
1019 &rm->atomic, conn->c_trans->xmit_atomic); 1018 &rm->atomic, conn->c_trans->xmit_atomic);
1020 ret = -EOPNOTSUPP; 1019 ret = -EOPNOTSUPP;
1021 goto out; 1020 goto out;
diff --git a/net/rds/tcp_stats.c b/net/rds/tcp_stats.c
index d5898d03cd6..f8a7954f1f5 100644
--- a/net/rds/tcp_stats.c
+++ b/net/rds/tcp_stats.c
@@ -40,7 +40,7 @@
40DEFINE_PER_CPU(struct rds_tcp_statistics, rds_tcp_stats) 40DEFINE_PER_CPU(struct rds_tcp_statistics, rds_tcp_stats)
41 ____cacheline_aligned; 41 ____cacheline_aligned;
42 42
43static const char const *rds_tcp_stat_names[] = { 43static const char * const rds_tcp_stat_names[] = {
44 "tcp_data_ready_calls", 44 "tcp_data_ready_calls",
45 "tcp_write_space_calls", 45 "tcp_write_space_calls",
46 "tcp_sndbuf_full", 46 "tcp_sndbuf_full",
diff --git a/net/rose/rose_link.c b/net/rose/rose_link.c
index fa5f5641a2c..7a02bd1cc5a 100644
--- a/net/rose/rose_link.c
+++ b/net/rose/rose_link.c
@@ -266,13 +266,6 @@ void rose_transmit_link(struct sk_buff *skb, struct rose_neigh *neigh)
266{ 266{
267 unsigned char *dptr; 267 unsigned char *dptr;
268 268
269#if 0
270 if (call_fw_firewall(PF_ROSE, skb->dev, skb->data, NULL, &skb) != FW_ACCEPT) {
271 kfree_skb(skb);
272 return;
273 }
274#endif
275
276 if (neigh->loopback) { 269 if (neigh->loopback) {
277 rose_loopback_queue(skb, neigh); 270 rose_loopback_queue(skb, neigh);
278 return; 271 return;
diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c
index 479cae57d18..d389de19708 100644
--- a/net/rose/rose_route.c
+++ b/net/rose/rose_route.c
@@ -864,11 +864,6 @@ int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25)
864 int res = 0; 864 int res = 0;
865 char buf[11]; 865 char buf[11];
866 866
867#if 0
868 if (call_in_firewall(PF_ROSE, skb->dev, skb->data, NULL, &skb) != FW_ACCEPT)
869 return res;
870#endif
871
872 if (skb->len < ROSE_MIN_LEN) 867 if (skb->len < ROSE_MIN_LEN)
873 return res; 868 return res;
874 frametype = skb->data[2]; 869 frametype = skb->data[2];
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index a606025814a..f2fb67e701a 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -365,10 +365,10 @@ static struct tc_action_ops *tc_lookup_action_id(u32 type)
365} 365}
366#endif 366#endif
367 367
368int tcf_action_exec(struct sk_buff *skb, struct tc_action *act, 368int tcf_action_exec(struct sk_buff *skb, const struct tc_action *act,
369 struct tcf_result *res) 369 struct tcf_result *res)
370{ 370{
371 struct tc_action *a; 371 const struct tc_action *a;
372 int ret = -1; 372 int ret = -1;
373 373
374 if (skb->tc_verd & TC_NCLS) { 374 if (skb->tc_verd & TC_NCLS) {
@@ -1115,9 +1115,10 @@ nlmsg_failure:
1115 1115
1116static int __init tc_action_init(void) 1116static int __init tc_action_init(void)
1117{ 1117{
1118 rtnl_register(PF_UNSPEC, RTM_NEWACTION, tc_ctl_action, NULL); 1118 rtnl_register(PF_UNSPEC, RTM_NEWACTION, tc_ctl_action, NULL, NULL);
1119 rtnl_register(PF_UNSPEC, RTM_DELACTION, tc_ctl_action, NULL); 1119 rtnl_register(PF_UNSPEC, RTM_DELACTION, tc_ctl_action, NULL, NULL);
1120 rtnl_register(PF_UNSPEC, RTM_GETACTION, tc_ctl_action, tc_dump_action); 1120 rtnl_register(PF_UNSPEC, RTM_GETACTION, tc_ctl_action, tc_dump_action,
1121 NULL);
1121 1122
1122 return 0; 1123 return 0;
1123} 1124}
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index 6cdf9abe475..453a73431ac 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -500,7 +500,7 @@ fail:
500} 500}
501 501
502static int tcf_csum(struct sk_buff *skb, 502static int tcf_csum(struct sk_buff *skb,
503 struct tc_action *a, struct tcf_result *res) 503 const struct tc_action *a, struct tcf_result *res)
504{ 504{
505 struct tcf_csum *p = a->priv; 505 struct tcf_csum *p = a->priv;
506 int action; 506 int action;
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index 2b4ab4b05ce..b77f5a06a65 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -125,7 +125,8 @@ static int tcf_gact_cleanup(struct tc_action *a, int bind)
125 return 0; 125 return 0;
126} 126}
127 127
128static int tcf_gact(struct sk_buff *skb, struct tc_action *a, struct tcf_result *res) 128static int tcf_gact(struct sk_buff *skb, const struct tc_action *a,
129 struct tcf_result *res)
129{ 130{
130 struct tcf_gact *gact = a->priv; 131 struct tcf_gact *gact = a->priv;
131 int action = TC_ACT_SHOT; 132 int action = TC_ACT_SHOT;
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index 9fc211a1b20..60f8f616e8f 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -195,7 +195,7 @@ static int tcf_ipt_cleanup(struct tc_action *a, int bind)
195 return tcf_ipt_release(ipt, bind); 195 return tcf_ipt_release(ipt, bind);
196} 196}
197 197
198static int tcf_ipt(struct sk_buff *skb, struct tc_action *a, 198static int tcf_ipt(struct sk_buff *skb, const struct tc_action *a,
199 struct tcf_result *res) 199 struct tcf_result *res)
200{ 200{
201 int ret = 0, result = 0; 201 int ret = 0, result = 0;
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 961386e2f2c..102fc212cd6 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -154,7 +154,7 @@ static int tcf_mirred_cleanup(struct tc_action *a, int bind)
154 return 0; 154 return 0;
155} 155}
156 156
157static int tcf_mirred(struct sk_buff *skb, struct tc_action *a, 157static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a,
158 struct tcf_result *res) 158 struct tcf_result *res)
159{ 159{
160 struct tcf_mirred *m = a->priv; 160 struct tcf_mirred *m = a->priv;
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index 762b027650a..001d1b35486 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -102,7 +102,7 @@ static int tcf_nat_cleanup(struct tc_action *a, int bind)
102 return tcf_hash_release(&p->common, bind, &nat_hash_info); 102 return tcf_hash_release(&p->common, bind, &nat_hash_info);
103} 103}
104 104
105static int tcf_nat(struct sk_buff *skb, struct tc_action *a, 105static int tcf_nat(struct sk_buff *skb, const struct tc_action *a,
106 struct tcf_result *res) 106 struct tcf_result *res)
107{ 107{
108 struct tcf_nat *p = a->priv; 108 struct tcf_nat *p = a->priv;
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 7affe9a9275..10d3aed8656 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -120,7 +120,7 @@ static int tcf_pedit_cleanup(struct tc_action *a, int bind)
120 return 0; 120 return 0;
121} 121}
122 122
123static int tcf_pedit(struct sk_buff *skb, struct tc_action *a, 123static int tcf_pedit(struct sk_buff *skb, const struct tc_action *a,
124 struct tcf_result *res) 124 struct tcf_result *res)
125{ 125{
126 struct tcf_pedit *p = a->priv; 126 struct tcf_pedit *p = a->priv;
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index b3b9b32f4e0..6fb3f5af0f8 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -282,7 +282,7 @@ static int tcf_act_police_cleanup(struct tc_action *a, int bind)
282 return ret; 282 return ret;
283} 283}
284 284
285static int tcf_act_police(struct sk_buff *skb, struct tc_action *a, 285static int tcf_act_police(struct sk_buff *skb, const struct tc_action *a,
286 struct tcf_result *res) 286 struct tcf_result *res)
287{ 287{
288 struct tcf_police *police = a->priv; 288 struct tcf_police *police = a->priv;
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index a34a22de60b..73e0a3ab4d5 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -36,7 +36,8 @@ static struct tcf_hashinfo simp_hash_info = {
36}; 36};
37 37
38#define SIMP_MAX_DATA 32 38#define SIMP_MAX_DATA 32
39static int tcf_simp(struct sk_buff *skb, struct tc_action *a, struct tcf_result *res) 39static int tcf_simp(struct sk_buff *skb, const struct tc_action *a,
40 struct tcf_result *res)
40{ 41{
41 struct tcf_defact *d = a->priv; 42 struct tcf_defact *d = a->priv;
42 43
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index 5f6f0c7c390..35dbbe91027 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -39,7 +39,7 @@ static struct tcf_hashinfo skbedit_hash_info = {
39 .lock = &skbedit_lock, 39 .lock = &skbedit_lock,
40}; 40};
41 41
42static int tcf_skbedit(struct sk_buff *skb, struct tc_action *a, 42static int tcf_skbedit(struct sk_buff *skb, const struct tc_action *a,
43 struct tcf_result *res) 43 struct tcf_result *res)
44{ 44{
45 struct tcf_skbedit *d = a->priv; 45 struct tcf_skbedit *d = a->priv;
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index bb2c523f815..a69d44f1dac 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -40,9 +40,9 @@ static DEFINE_RWLOCK(cls_mod_lock);
40 40
41/* Find classifier type by string name */ 41/* Find classifier type by string name */
42 42
43static struct tcf_proto_ops *tcf_proto_lookup_ops(struct nlattr *kind) 43static const struct tcf_proto_ops *tcf_proto_lookup_ops(struct nlattr *kind)
44{ 44{
45 struct tcf_proto_ops *t = NULL; 45 const struct tcf_proto_ops *t = NULL;
46 46
47 if (kind) { 47 if (kind) {
48 read_lock(&cls_mod_lock); 48 read_lock(&cls_mod_lock);
@@ -132,7 +132,7 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
132 struct Qdisc *q; 132 struct Qdisc *q;
133 struct tcf_proto **back, **chain; 133 struct tcf_proto **back, **chain;
134 struct tcf_proto *tp; 134 struct tcf_proto *tp;
135 struct tcf_proto_ops *tp_ops; 135 const struct tcf_proto_ops *tp_ops;
136 const struct Qdisc_class_ops *cops; 136 const struct Qdisc_class_ops *cops;
137 unsigned long cl; 137 unsigned long cl;
138 unsigned long fh; 138 unsigned long fh;
@@ -610,10 +610,10 @@ EXPORT_SYMBOL(tcf_exts_dump_stats);
610 610
611static int __init tc_filter_init(void) 611static int __init tc_filter_init(void)
612{ 612{
613 rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_ctl_tfilter, NULL); 613 rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_ctl_tfilter, NULL, NULL);
614 rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_ctl_tfilter, NULL); 614 rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_ctl_tfilter, NULL, NULL);
615 rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_ctl_tfilter, 615 rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_ctl_tfilter,
616 tc_dump_tfilter); 616 tc_dump_tfilter, NULL);
617 617
618 return 0; 618 return 0;
619} 619}
diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c
index 8be8872dd57..ea1f70b5a5f 100644
--- a/net/sched/cls_basic.c
+++ b/net/sched/cls_basic.c
@@ -39,7 +39,7 @@ static const struct tcf_ext_map basic_ext_map = {
39 .police = TCA_BASIC_POLICE 39 .police = TCA_BASIC_POLICE
40}; 40};
41 41
42static int basic_classify(struct sk_buff *skb, struct tcf_proto *tp, 42static int basic_classify(struct sk_buff *skb, const struct tcf_proto *tp,
43 struct tcf_result *res) 43 struct tcf_result *res)
44{ 44{
45 int r; 45 int r;
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c
index 32a335194ca..f84fdc3a7f2 100644
--- a/net/sched/cls_cgroup.c
+++ b/net/sched/cls_cgroup.c
@@ -101,7 +101,7 @@ struct cls_cgroup_head {
101 struct tcf_ematch_tree ematches; 101 struct tcf_ematch_tree ematches;
102}; 102};
103 103
104static int cls_cgroup_classify(struct sk_buff *skb, struct tcf_proto *tp, 104static int cls_cgroup_classify(struct sk_buff *skb, const struct tcf_proto *tp,
105 struct tcf_result *res) 105 struct tcf_result *res)
106{ 106{
107 struct cls_cgroup_head *head = tp->root; 107 struct cls_cgroup_head *head = tp->root;
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index 8ec01391d98..6994214db8f 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -121,7 +121,7 @@ static u32 flow_get_proto_src(struct sk_buff *skb)
121 if (!pskb_network_may_pull(skb, sizeof(*iph))) 121 if (!pskb_network_may_pull(skb, sizeof(*iph)))
122 break; 122 break;
123 iph = ip_hdr(skb); 123 iph = ip_hdr(skb);
124 if (iph->frag_off & htons(IP_MF | IP_OFFSET)) 124 if (ip_is_fragment(iph))
125 break; 125 break;
126 poff = proto_ports_offset(iph->protocol); 126 poff = proto_ports_offset(iph->protocol);
127 if (poff >= 0 && 127 if (poff >= 0 &&
@@ -163,7 +163,7 @@ static u32 flow_get_proto_dst(struct sk_buff *skb)
163 if (!pskb_network_may_pull(skb, sizeof(*iph))) 163 if (!pskb_network_may_pull(skb, sizeof(*iph)))
164 break; 164 break;
165 iph = ip_hdr(skb); 165 iph = ip_hdr(skb);
166 if (iph->frag_off & htons(IP_MF | IP_OFFSET)) 166 if (ip_is_fragment(iph))
167 break; 167 break;
168 poff = proto_ports_offset(iph->protocol); 168 poff = proto_ports_offset(iph->protocol);
169 if (poff >= 0 && 169 if (poff >= 0 &&
@@ -356,7 +356,7 @@ static u32 flow_key_get(struct sk_buff *skb, int key)
356 } 356 }
357} 357}
358 358
359static int flow_classify(struct sk_buff *skb, struct tcf_proto *tp, 359static int flow_classify(struct sk_buff *skb, const struct tcf_proto *tp,
360 struct tcf_result *res) 360 struct tcf_result *res)
361{ 361{
362 struct flow_head *head = tp->root; 362 struct flow_head *head = tp->root;
diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c
index 26e7bc4ffb7..389af152ec4 100644
--- a/net/sched/cls_fw.c
+++ b/net/sched/cls_fw.c
@@ -77,7 +77,7 @@ static inline int fw_hash(u32 handle)
77 return handle & (HTSIZE - 1); 77 return handle & (HTSIZE - 1);
78} 78}
79 79
80static int fw_classify(struct sk_buff *skb, struct tcf_proto *tp, 80static int fw_classify(struct sk_buff *skb, const struct tcf_proto *tp,
81 struct tcf_result *res) 81 struct tcf_result *res)
82{ 82{
83 struct fw_head *head = (struct fw_head *)tp->root; 83 struct fw_head *head = (struct fw_head *)tp->root;
diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c
index a907905376d..13ab66e9df5 100644
--- a/net/sched/cls_route.c
+++ b/net/sched/cls_route.c
@@ -125,7 +125,7 @@ static inline int route4_hash_wild(void)
125 return 0; \ 125 return 0; \
126} 126}
127 127
128static int route4_classify(struct sk_buff *skb, struct tcf_proto *tp, 128static int route4_classify(struct sk_buff *skb, const struct tcf_proto *tp,
129 struct tcf_result *res) 129 struct tcf_result *res)
130{ 130{
131 struct route4_head *head = (struct route4_head *)tp->root; 131 struct route4_head *head = (struct route4_head *)tp->root;
diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h
index 402c44b241a..be4505ee67a 100644
--- a/net/sched/cls_rsvp.h
+++ b/net/sched/cls_rsvp.h
@@ -130,7 +130,7 @@ static struct tcf_ext_map rsvp_ext_map = {
130 return r; \ 130 return r; \
131} 131}
132 132
133static int rsvp_classify(struct sk_buff *skb, struct tcf_proto *tp, 133static int rsvp_classify(struct sk_buff *skb, const struct tcf_proto *tp,
134 struct tcf_result *res) 134 struct tcf_result *res)
135{ 135{
136 struct rsvp_session **sht = ((struct rsvp_head *)tp->root)->ht; 136 struct rsvp_session **sht = ((struct rsvp_head *)tp->root)->ht;
@@ -167,7 +167,7 @@ restart:
167 dst = &nhptr->daddr; 167 dst = &nhptr->daddr;
168 protocol = nhptr->protocol; 168 protocol = nhptr->protocol;
169 xprt = ((u8 *)nhptr) + (nhptr->ihl<<2); 169 xprt = ((u8 *)nhptr) + (nhptr->ihl<<2);
170 if (nhptr->frag_off & htons(IP_MF | IP_OFFSET)) 170 if (ip_is_fragment(nhptr))
171 return -1; 171 return -1;
172#endif 172#endif
173 173
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
index 36667fa6423..dbe199234c6 100644
--- a/net/sched/cls_tcindex.c
+++ b/net/sched/cls_tcindex.c
@@ -79,7 +79,7 @@ tcindex_lookup(struct tcindex_data *p, u16 key)
79} 79}
80 80
81 81
82static int tcindex_classify(struct sk_buff *skb, struct tcf_proto *tp, 82static int tcindex_classify(struct sk_buff *skb, const struct tcf_proto *tp,
83 struct tcf_result *res) 83 struct tcf_result *res)
84{ 84{
85 struct tcindex_data *p = PRIV(tp); 85 struct tcindex_data *p = PRIV(tp);
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 3b93fc0c895..939b627b479 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -93,7 +93,7 @@ static inline unsigned int u32_hash_fold(__be32 key,
93 return h; 93 return h;
94} 94}
95 95
96static int u32_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_result *res) 96static int u32_classify(struct sk_buff *skb, const struct tcf_proto *tp, struct tcf_result *res)
97{ 97{
98 struct { 98 struct {
99 struct tc_u_knode *knode; 99 struct tc_u_knode *knode;
diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c
index 49130e8abff..1363bf14e61 100644
--- a/net/sched/em_meta.c
+++ b/net/sched/em_meta.c
@@ -404,12 +404,6 @@ META_COLLECTOR(int_sk_alloc)
404 dst->value = (__force int) skb->sk->sk_allocation; 404 dst->value = (__force int) skb->sk->sk_allocation;
405} 405}
406 406
407META_COLLECTOR(int_sk_route_caps)
408{
409 SKIP_NONLOCAL(skb);
410 dst->value = skb->sk->sk_route_caps;
411}
412
413META_COLLECTOR(int_sk_hash) 407META_COLLECTOR(int_sk_hash)
414{ 408{
415 SKIP_NONLOCAL(skb); 409 SKIP_NONLOCAL(skb);
@@ -530,7 +524,6 @@ static struct meta_ops __meta_ops[TCF_META_TYPE_MAX + 1][TCF_META_ID_MAX + 1] =
530 [META_ID(SK_ERR_QLEN)] = META_FUNC(int_sk_err_qlen), 524 [META_ID(SK_ERR_QLEN)] = META_FUNC(int_sk_err_qlen),
531 [META_ID(SK_FORWARD_ALLOCS)] = META_FUNC(int_sk_fwd_alloc), 525 [META_ID(SK_FORWARD_ALLOCS)] = META_FUNC(int_sk_fwd_alloc),
532 [META_ID(SK_ALLOCS)] = META_FUNC(int_sk_alloc), 526 [META_ID(SK_ALLOCS)] = META_FUNC(int_sk_alloc),
533 [META_ID(SK_ROUTE_CAPS)] = META_FUNC(int_sk_route_caps),
534 [META_ID(SK_HASH)] = META_FUNC(int_sk_hash), 527 [META_ID(SK_HASH)] = META_FUNC(int_sk_hash),
535 [META_ID(SK_LINGERTIME)] = META_FUNC(int_sk_lingertime), 528 [META_ID(SK_LINGERTIME)] = META_FUNC(int_sk_lingertime),
536 [META_ID(SK_ACK_BACKLOG)] = META_FUNC(int_sk_ack_bl), 529 [META_ID(SK_ACK_BACKLOG)] = META_FUNC(int_sk_ack_bl),
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 6b8627661c9..dca6c1a576f 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -1644,7 +1644,7 @@ done:
1644 * to this qdisc, (optionally) tests for protocol and asks 1644 * to this qdisc, (optionally) tests for protocol and asks
1645 * specific classifiers. 1645 * specific classifiers.
1646 */ 1646 */
1647int tc_classify_compat(struct sk_buff *skb, struct tcf_proto *tp, 1647int tc_classify_compat(struct sk_buff *skb, const struct tcf_proto *tp,
1648 struct tcf_result *res) 1648 struct tcf_result *res)
1649{ 1649{
1650 __be16 protocol = skb->protocol; 1650 __be16 protocol = skb->protocol;
@@ -1668,12 +1668,12 @@ int tc_classify_compat(struct sk_buff *skb, struct tcf_proto *tp,
1668} 1668}
1669EXPORT_SYMBOL(tc_classify_compat); 1669EXPORT_SYMBOL(tc_classify_compat);
1670 1670
1671int tc_classify(struct sk_buff *skb, struct tcf_proto *tp, 1671int tc_classify(struct sk_buff *skb, const struct tcf_proto *tp,
1672 struct tcf_result *res) 1672 struct tcf_result *res)
1673{ 1673{
1674 int err = 0; 1674 int err = 0;
1675#ifdef CONFIG_NET_CLS_ACT 1675#ifdef CONFIG_NET_CLS_ACT
1676 struct tcf_proto *otp = tp; 1676 const struct tcf_proto *otp = tp;
1677reclassify: 1677reclassify:
1678#endif 1678#endif
1679 1679
@@ -1792,12 +1792,12 @@ static int __init pktsched_init(void)
1792 register_qdisc(&pfifo_head_drop_qdisc_ops); 1792 register_qdisc(&pfifo_head_drop_qdisc_ops);
1793 register_qdisc(&mq_qdisc_ops); 1793 register_qdisc(&mq_qdisc_ops);
1794 1794
1795 rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL); 1795 rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL, NULL);
1796 rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL); 1796 rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL, NULL);
1797 rtnl_register(PF_UNSPEC, RTM_GETQDISC, tc_get_qdisc, tc_dump_qdisc); 1797 rtnl_register(PF_UNSPEC, RTM_GETQDISC, tc_get_qdisc, tc_dump_qdisc, NULL);
1798 rtnl_register(PF_UNSPEC, RTM_NEWTCLASS, tc_ctl_tclass, NULL); 1798 rtnl_register(PF_UNSPEC, RTM_NEWTCLASS, tc_ctl_tclass, NULL, NULL);
1799 rtnl_register(PF_UNSPEC, RTM_DELTCLASS, tc_ctl_tclass, NULL); 1799 rtnl_register(PF_UNSPEC, RTM_DELTCLASS, tc_ctl_tclass, NULL, NULL);
1800 rtnl_register(PF_UNSPEC, RTM_GETTCLASS, tc_ctl_tclass, tc_dump_tclass); 1800 rtnl_register(PF_UNSPEC, RTM_GETTCLASS, tc_ctl_tclass, tc_dump_tclass, NULL);
1801 1801
1802 return 0; 1802 return 0;
1803} 1803}
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index 3f08158b868..e25e49061a0 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -5,6 +5,7 @@
5#include <linux/module.h> 5#include <linux/module.h>
6#include <linux/slab.h> 6#include <linux/slab.h>
7#include <linux/init.h> 7#include <linux/init.h>
8#include <linux/interrupt.h>
8#include <linux/string.h> 9#include <linux/string.h>
9#include <linux/errno.h> 10#include <linux/errno.h>
10#include <linux/skbuff.h> 11#include <linux/skbuff.h>
diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
index 06afbaeb4c8..3422b25df9e 100644
--- a/net/sched/sch_choke.c
+++ b/net/sched/sch_choke.c
@@ -181,7 +181,7 @@ static bool choke_match_flow(struct sk_buff *skb1,
181 ip1->saddr != ip2->saddr || ip1->daddr != ip2->daddr) 181 ip1->saddr != ip2->saddr || ip1->daddr != ip2->daddr)
182 return false; 182 return false;
183 183
184 if ((ip1->frag_off | ip2->frag_off) & htons(IP_MF | IP_OFFSET)) 184 if (ip_is_fragment(ip1) | ip_is_fragment(ip2))
185 ip_proto = 0; 185 ip_proto = 0;
186 off1 += ip1->ihl * 4; 186 off1 += ip1->ihl * 4;
187 off2 += ip2->ihl * 4; 187 off2 += ip2->ihl * 4;
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index b4c680900d7..69fca279880 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -189,15 +189,15 @@ static inline int qdisc_restart(struct Qdisc *q)
189 189
190void __qdisc_run(struct Qdisc *q) 190void __qdisc_run(struct Qdisc *q)
191{ 191{
192 unsigned long start_time = jiffies; 192 int quota = weight_p;
193 193
194 while (qdisc_restart(q)) { 194 while (qdisc_restart(q)) {
195 /* 195 /*
196 * Postpone processing if 196 * Ordered by possible occurrence: Postpone processing if
197 * 1. another process needs the CPU; 197 * 1. we've exceeded packet quota
198 * 2. we've been doing it for too long. 198 * 2. another process needs the CPU;
199 */ 199 */
200 if (need_resched() || jiffies != start_time) { 200 if (--quota <= 0 || need_resched()) {
201 __netif_schedule(q); 201 __netif_schedule(q);
202 break; 202 break;
203 } 203 }
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 69c35f6cd13..eb3b9a86c6e 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -13,6 +13,7 @@
13 * Catalin(ux aka Dino) BOIE <catab at umbrella dot ro> 13 * Catalin(ux aka Dino) BOIE <catab at umbrella dot ro>
14 */ 14 */
15 15
16#include <linux/mm.h>
16#include <linux/module.h> 17#include <linux/module.h>
17#include <linux/slab.h> 18#include <linux/slab.h>
18#include <linux/types.h> 19#include <linux/types.h>
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index b6ea6afa55b..4536ee64383 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -157,7 +157,7 @@ static unsigned int sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
157 iph = ip_hdr(skb); 157 iph = ip_hdr(skb);
158 h = (__force u32)iph->daddr; 158 h = (__force u32)iph->daddr;
159 h2 = (__force u32)iph->saddr ^ iph->protocol; 159 h2 = (__force u32)iph->saddr ^ iph->protocol;
160 if (iph->frag_off & htons(IP_MF | IP_OFFSET)) 160 if (ip_is_fragment(iph))
161 break; 161 break;
162 poff = proto_ports_offset(iph->protocol); 162 poff = proto_ports_offset(iph->protocol);
163 if (poff >= 0 && 163 if (poff >= 0 &&
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 45cd30098e3..a3b7120fcc7 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -229,7 +229,7 @@ __teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *
229{ 229{
230 struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, 0); 230 struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, 0);
231 struct teql_sched_data *q = qdisc_priv(dev_queue->qdisc); 231 struct teql_sched_data *q = qdisc_priv(dev_queue->qdisc);
232 struct neighbour *mn = skb_dst(skb)->neighbour; 232 struct neighbour *mn = dst_get_neighbour(skb_dst(skb));
233 struct neighbour *n = q->ncache; 233 struct neighbour *n = q->ncache;
234 234
235 if (mn->tbl == NULL) 235 if (mn->tbl == NULL)
@@ -270,7 +270,7 @@ static inline int teql_resolve(struct sk_buff *skb,
270 270
271 if (dev->header_ops == NULL || 271 if (dev->header_ops == NULL ||
272 skb_dst(skb) == NULL || 272 skb_dst(skb) == NULL ||
273 skb_dst(skb)->neighbour == NULL) 273 dst_get_neighbour(skb_dst(skb)) == NULL)
274 return 0; 274 return 0;
275 return __teql_resolve(skb, skb_res, dev); 275 return __teql_resolve(skb, skb_res, dev);
276} 276}
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 4a62888f2e4..dc16b90ddb6 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -280,6 +280,8 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
280 asoc->peer.asconf_capable = 0; 280 asoc->peer.asconf_capable = 0;
281 if (sctp_addip_noauth) 281 if (sctp_addip_noauth)
282 asoc->peer.asconf_capable = 1; 282 asoc->peer.asconf_capable = 1;
283 asoc->asconf_addr_del_pending = NULL;
284 asoc->src_out_of_asoc_ok = 0;
283 285
284 /* Create an input queue. */ 286 /* Create an input queue. */
285 sctp_inq_init(&asoc->base.inqueue); 287 sctp_inq_init(&asoc->base.inqueue);
@@ -446,6 +448,10 @@ void sctp_association_free(struct sctp_association *asoc)
446 448
447 sctp_asconf_queue_teardown(asoc); 449 sctp_asconf_queue_teardown(asoc);
448 450
451 /* Free pending address space being deleted */
452 if (asoc->asconf_addr_del_pending != NULL)
453 kfree(asoc->asconf_addr_del_pending);
454
449 /* AUTH - Free the endpoint shared keys */ 455 /* AUTH - Free the endpoint shared keys */
450 sctp_auth_destroy_keys(&asoc->endpoint_shared_keys); 456 sctp_auth_destroy_keys(&asoc->endpoint_shared_keys);
451 457
diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c
index 83e3011c19c..4ece451c8d2 100644
--- a/net/sctp/bind_addr.c
+++ b/net/sctp/bind_addr.c
@@ -430,7 +430,7 @@ union sctp_addr *sctp_find_unmatch_addr(struct sctp_bind_addr *bp,
430 list_for_each_entry(laddr, &bp->address_list, list) { 430 list_for_each_entry(laddr, &bp->address_list, list) {
431 addr_buf = (union sctp_addr *)addrs; 431 addr_buf = (union sctp_addr *)addrs;
432 for (i = 0; i < addrcnt; i++) { 432 for (i = 0; i < addrcnt; i++) {
433 addr = (union sctp_addr *)addr_buf; 433 addr = addr_buf;
434 af = sctp_get_af_specific(addr->v4.sin_family); 434 af = sctp_get_af_specific(addr->v4.sin_family);
435 if (!af) 435 if (!af)
436 break; 436 break;
@@ -534,6 +534,21 @@ int sctp_in_scope(const union sctp_addr *addr, sctp_scope_t scope)
534 return 0; 534 return 0;
535} 535}
536 536
537int sctp_is_ep_boundall(struct sock *sk)
538{
539 struct sctp_bind_addr *bp;
540 struct sctp_sockaddr_entry *addr;
541
542 bp = &sctp_sk(sk)->ep->base.bind_addr;
543 if (sctp_list_single_entry(&bp->address_list)) {
544 addr = list_entry(bp->address_list.next,
545 struct sctp_sockaddr_entry, list);
546 if (sctp_is_any(sk, &addr->a))
547 return 1;
548 }
549 return 0;
550}
551
537/******************************************************************** 552/********************************************************************
538 * 3rd Level Abstractions 553 * 3rd Level Abstractions
539 ********************************************************************/ 554 ********************************************************************/
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 741ed164883..b7692aab6e9 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -510,8 +510,7 @@ struct sock *sctp_err_lookup(int family, struct sk_buff *skb,
510 * discard the packet. 510 * discard the packet.
511 */ 511 */
512 if (vtag == 0) { 512 if (vtag == 0) {
513 chunkhdr = (struct sctp_init_chunk *)((void *)sctphdr 513 chunkhdr = (void *)sctphdr + sizeof(struct sctphdr);
514 + sizeof(struct sctphdr));
515 if (len < sizeof(struct sctphdr) + sizeof(sctp_chunkhdr_t) 514 if (len < sizeof(struct sctphdr) + sizeof(sctp_chunkhdr_t)
516 + sizeof(__be32) || 515 + sizeof(__be32) ||
517 chunkhdr->chunk_hdr.type != SCTP_CID_INIT || 516 chunkhdr->chunk_hdr.type != SCTP_CID_INIT ||
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 0bb0d7cb9f1..aabaee41dd3 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -112,6 +112,7 @@ static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev,
112 addr->valid = 1; 112 addr->valid = 1;
113 spin_lock_bh(&sctp_local_addr_lock); 113 spin_lock_bh(&sctp_local_addr_lock);
114 list_add_tail_rcu(&addr->list, &sctp_local_addr_list); 114 list_add_tail_rcu(&addr->list, &sctp_local_addr_list);
115 sctp_addr_wq_mgmt(addr, SCTP_ADDR_NEW);
115 spin_unlock_bh(&sctp_local_addr_lock); 116 spin_unlock_bh(&sctp_local_addr_lock);
116 } 117 }
117 break; 118 break;
@@ -122,6 +123,7 @@ static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev,
122 if (addr->a.sa.sa_family == AF_INET6 && 123 if (addr->a.sa.sa_family == AF_INET6 &&
123 ipv6_addr_equal(&addr->a.v6.sin6_addr, 124 ipv6_addr_equal(&addr->a.v6.sin6_addr,
124 &ifa->addr)) { 125 &ifa->addr)) {
126 sctp_addr_wq_mgmt(addr, SCTP_ADDR_DEL);
125 found = 1; 127 found = 1;
126 addr->valid = 0; 128 addr->valid = 0;
127 list_del_rcu(&addr->list); 129 list_del_rcu(&addr->list);
diff --git a/net/sctp/output.c b/net/sctp/output.c
index b4f3cf06d8d..08b3cead650 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -500,23 +500,20 @@ int sctp_packet_transmit(struct sctp_packet *packet)
500 * Note: Adler-32 is no longer applicable, as has been replaced 500 * Note: Adler-32 is no longer applicable, as has been replaced
501 * by CRC32-C as described in <draft-ietf-tsvwg-sctpcsum-02.txt>. 501 * by CRC32-C as described in <draft-ietf-tsvwg-sctpcsum-02.txt>.
502 */ 502 */
503 if (!sctp_checksum_disable && 503 if (!sctp_checksum_disable) {
504 !(dst->dev->features & (NETIF_F_NO_CSUM | NETIF_F_SCTP_CSUM))) { 504 if (!(dst->dev->features & NETIF_F_SCTP_CSUM)) {
505 __u32 crc32 = sctp_start_cksum((__u8 *)sh, cksum_buf_len); 505 __u32 crc32 = sctp_start_cksum((__u8 *)sh, cksum_buf_len);
506 506
507 /* 3) Put the resultant value into the checksum field in the 507 /* 3) Put the resultant value into the checksum field in the
508 * common header, and leave the rest of the bits unchanged. 508 * common header, and leave the rest of the bits unchanged.
509 */ 509 */
510 sh->checksum = sctp_end_cksum(crc32); 510 sh->checksum = sctp_end_cksum(crc32);
511 } else { 511 } else {
512 if (dst->dev->features & NETIF_F_SCTP_CSUM) {
513 /* no need to seed pseudo checksum for SCTP */ 512 /* no need to seed pseudo checksum for SCTP */
514 nskb->ip_summed = CHECKSUM_PARTIAL; 513 nskb->ip_summed = CHECKSUM_PARTIAL;
515 nskb->csum_start = (skb_transport_header(nskb) - 514 nskb->csum_start = (skb_transport_header(nskb) -
516 nskb->head); 515 nskb->head);
517 nskb->csum_offset = offsetof(struct sctphdr, checksum); 516 nskb->csum_offset = offsetof(struct sctphdr, checksum);
518 } else {
519 nskb->ip_summed = CHECKSUM_UNNECESSARY;
520 } 517 }
521 } 518 }
522 519
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 1c88c8911dc..a6d27bf563a 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -754,6 +754,16 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
754 */ 754 */
755 755
756 list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) { 756 list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) {
757 /* RFC 5061, 5.3
758 * F1) This means that until such time as the ASCONF
759 * containing the add is acknowledged, the sender MUST
760 * NOT use the new IP address as a source for ANY SCTP
761 * packet except on carrying an ASCONF Chunk.
762 */
763 if (asoc->src_out_of_asoc_ok &&
764 chunk->chunk_hdr->type != SCTP_CID_ASCONF)
765 continue;
766
757 list_del_init(&chunk->list); 767 list_del_init(&chunk->list);
758 768
759 /* Pick the right transport to use. */ 769 /* Pick the right transport to use. */
@@ -881,6 +891,9 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
881 } 891 }
882 } 892 }
883 893
894 if (q->asoc->src_out_of_asoc_ok)
895 goto sctp_flush_out;
896
884 /* Is it OK to send data chunks? */ 897 /* Is it OK to send data chunks? */
885 switch (asoc->state) { 898 switch (asoc->state) {
886 case SCTP_STATE_COOKIE_ECHOED: 899 case SCTP_STATE_COOKIE_ECHOED:
@@ -1582,6 +1595,8 @@ static void sctp_check_transmitted(struct sctp_outq *q,
1582#endif /* SCTP_DEBUG */ 1595#endif /* SCTP_DEBUG */
1583 if (transport) { 1596 if (transport) {
1584 if (bytes_acked) { 1597 if (bytes_acked) {
1598 struct sctp_association *asoc = transport->asoc;
1599
1585 /* We may have counted DATA that was migrated 1600 /* We may have counted DATA that was migrated
1586 * to this transport due to DEL-IP operation. 1601 * to this transport due to DEL-IP operation.
1587 * Subtract those bytes, since the were never 1602 * Subtract those bytes, since the were never
@@ -1600,6 +1615,17 @@ static void sctp_check_transmitted(struct sctp_outq *q,
1600 transport->error_count = 0; 1615 transport->error_count = 0;
1601 transport->asoc->overall_error_count = 0; 1616 transport->asoc->overall_error_count = 0;
1602 1617
1618 /*
1619 * While in SHUTDOWN PENDING, we may have started
1620 * the T5 shutdown guard timer after reaching the
1621 * retransmission limit. Stop that timer as soon
1622 * as the receiver acknowledged any data.
1623 */
1624 if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING &&
1625 del_timer(&asoc->timers
1626 [SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD]))
1627 sctp_association_put(asoc);
1628
1603 /* Mark the destination transport address as 1629 /* Mark the destination transport address as
1604 * active if it is not so marked. 1630 * active if it is not so marked.
1605 */ 1631 */
@@ -1629,10 +1655,15 @@ static void sctp_check_transmitted(struct sctp_outq *q,
1629 * A sender is doing zero window probing when the 1655 * A sender is doing zero window probing when the
1630 * receiver's advertised window is zero, and there is 1656 * receiver's advertised window is zero, and there is
1631 * only one data chunk in flight to the receiver. 1657 * only one data chunk in flight to the receiver.
1658 *
1659 * Allow the association to timeout while in SHUTDOWN
1660 * PENDING or SHUTDOWN RECEIVED in case the receiver
1661 * stays in zero window mode forever.
1632 */ 1662 */
1633 if (!q->asoc->peer.rwnd && 1663 if (!q->asoc->peer.rwnd &&
1634 !list_empty(&tlist) && 1664 !list_empty(&tlist) &&
1635 (sack_ctsn+2 == q->asoc->next_tsn)) { 1665 (sack_ctsn+2 == q->asoc->next_tsn) &&
1666 q->asoc->state < SCTP_STATE_SHUTDOWN_PENDING) {
1636 SCTP_DEBUG_PRINTK("%s: SACK received for zero " 1667 SCTP_DEBUG_PRINTK("%s: SACK received for zero "
1637 "window probe: %u\n", 1668 "window probe: %u\n",
1638 __func__, sack_ctsn); 1669 __func__, sack_ctsn);
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 67380a29e2e..91784f44a2e 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -503,7 +503,9 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
503 sctp_v4_dst_saddr(&dst_saddr, fl4, htons(bp->port)); 503 sctp_v4_dst_saddr(&dst_saddr, fl4, htons(bp->port));
504 rcu_read_lock(); 504 rcu_read_lock();
505 list_for_each_entry_rcu(laddr, &bp->address_list, list) { 505 list_for_each_entry_rcu(laddr, &bp->address_list, list) {
506 if (!laddr->valid || (laddr->state != SCTP_ADDR_SRC)) 506 if (!laddr->valid || (laddr->state == SCTP_ADDR_DEL) ||
507 (laddr->state != SCTP_ADDR_SRC &&
508 !asoc->src_out_of_asoc_ok))
507 continue; 509 continue;
508 if (sctp_v4_cmp_addr(&dst_saddr, &laddr->a)) 510 if (sctp_v4_cmp_addr(&dst_saddr, &laddr->a))
509 goto out_unlock; 511 goto out_unlock;
@@ -623,6 +625,143 @@ static void sctp_v4_ecn_capable(struct sock *sk)
623 INET_ECN_xmit(sk); 625 INET_ECN_xmit(sk);
624} 626}
625 627
628void sctp_addr_wq_timeout_handler(unsigned long arg)
629{
630 struct sctp_sockaddr_entry *addrw, *temp;
631 struct sctp_sock *sp;
632
633 spin_lock_bh(&sctp_addr_wq_lock);
634
635 list_for_each_entry_safe(addrw, temp, &sctp_addr_waitq, list) {
636 SCTP_DEBUG_PRINTK_IPADDR("sctp_addrwq_timo_handler: the first ent in wq %p is ",
637 " for cmd %d at entry %p\n", &sctp_addr_waitq, &addrw->a, addrw->state,
638 addrw);
639
640#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
641 /* Now we send an ASCONF for each association */
642 /* Note. we currently don't handle link local IPv6 addressees */
643 if (addrw->a.sa.sa_family == AF_INET6) {
644 struct in6_addr *in6;
645
646 if (ipv6_addr_type(&addrw->a.v6.sin6_addr) &
647 IPV6_ADDR_LINKLOCAL)
648 goto free_next;
649
650 in6 = (struct in6_addr *)&addrw->a.v6.sin6_addr;
651 if (ipv6_chk_addr(&init_net, in6, NULL, 0) == 0 &&
652 addrw->state == SCTP_ADDR_NEW) {
653 unsigned long timeo_val;
654
655 SCTP_DEBUG_PRINTK("sctp_timo_handler: this is on DAD, trying %d sec later\n",
656 SCTP_ADDRESS_TICK_DELAY);
657 timeo_val = jiffies;
658 timeo_val += msecs_to_jiffies(SCTP_ADDRESS_TICK_DELAY);
659 mod_timer(&sctp_addr_wq_timer, timeo_val);
660 break;
661 }
662 }
663#endif
664 list_for_each_entry(sp, &sctp_auto_asconf_splist, auto_asconf_list) {
665 struct sock *sk;
666
667 sk = sctp_opt2sk(sp);
668 /* ignore bound-specific endpoints */
669 if (!sctp_is_ep_boundall(sk))
670 continue;
671 sctp_bh_lock_sock(sk);
672 if (sctp_asconf_mgmt(sp, addrw) < 0)
673 SCTP_DEBUG_PRINTK("sctp_addrwq_timo_handler: sctp_asconf_mgmt failed\n");
674 sctp_bh_unlock_sock(sk);
675 }
676free_next:
677 list_del(&addrw->list);
678 kfree(addrw);
679 }
680 spin_unlock_bh(&sctp_addr_wq_lock);
681}
682
683static void sctp_free_addr_wq(void)
684{
685 struct sctp_sockaddr_entry *addrw;
686 struct sctp_sockaddr_entry *temp;
687
688 spin_lock_bh(&sctp_addr_wq_lock);
689 del_timer(&sctp_addr_wq_timer);
690 list_for_each_entry_safe(addrw, temp, &sctp_addr_waitq, list) {
691 list_del(&addrw->list);
692 kfree(addrw);
693 }
694 spin_unlock_bh(&sctp_addr_wq_lock);
695}
696
697/* lookup the entry for the same address in the addr_waitq
698 * sctp_addr_wq MUST be locked
699 */
700static struct sctp_sockaddr_entry *sctp_addr_wq_lookup(struct sctp_sockaddr_entry *addr)
701{
702 struct sctp_sockaddr_entry *addrw;
703
704 list_for_each_entry(addrw, &sctp_addr_waitq, list) {
705 if (addrw->a.sa.sa_family != addr->a.sa.sa_family)
706 continue;
707 if (addrw->a.sa.sa_family == AF_INET) {
708 if (addrw->a.v4.sin_addr.s_addr ==
709 addr->a.v4.sin_addr.s_addr)
710 return addrw;
711 } else if (addrw->a.sa.sa_family == AF_INET6) {
712 if (ipv6_addr_equal(&addrw->a.v6.sin6_addr,
713 &addr->a.v6.sin6_addr))
714 return addrw;
715 }
716 }
717 return NULL;
718}
719
720void sctp_addr_wq_mgmt(struct sctp_sockaddr_entry *addr, int cmd)
721{
722 struct sctp_sockaddr_entry *addrw;
723 unsigned long timeo_val;
724
725 /* first, we check if an opposite message already exist in the queue.
726 * If we found such message, it is removed.
727 * This operation is a bit stupid, but the DHCP client attaches the
728 * new address after a couple of addition and deletion of that address
729 */
730
731 spin_lock_bh(&sctp_addr_wq_lock);
732 /* Offsets existing events in addr_wq */
733 addrw = sctp_addr_wq_lookup(addr);
734 if (addrw) {
735 if (addrw->state != cmd) {
736 SCTP_DEBUG_PRINTK_IPADDR("sctp_addr_wq_mgmt offsets existing entry for %d ",
737 " in wq %p\n", addrw->state, &addrw->a,
738 &sctp_addr_waitq);
739 list_del(&addrw->list);
740 kfree(addrw);
741 }
742 spin_unlock_bh(&sctp_addr_wq_lock);
743 return;
744 }
745
746 /* OK, we have to add the new address to the wait queue */
747 addrw = kmemdup(addr, sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC);
748 if (addrw == NULL) {
749 spin_unlock_bh(&sctp_addr_wq_lock);
750 return;
751 }
752 addrw->state = cmd;
753 list_add_tail(&addrw->list, &sctp_addr_waitq);
754 SCTP_DEBUG_PRINTK_IPADDR("sctp_addr_wq_mgmt add new entry for cmd:%d ",
755 " in wq %p\n", addrw->state, &addrw->a, &sctp_addr_waitq);
756
757 if (!timer_pending(&sctp_addr_wq_timer)) {
758 timeo_val = jiffies;
759 timeo_val += msecs_to_jiffies(SCTP_ADDRESS_TICK_DELAY);
760 mod_timer(&sctp_addr_wq_timer, timeo_val);
761 }
762 spin_unlock_bh(&sctp_addr_wq_lock);
763}
764
626/* Event handler for inet address addition/deletion events. 765/* Event handler for inet address addition/deletion events.
627 * The sctp_local_addr_list needs to be protocted by a spin lock since 766 * The sctp_local_addr_list needs to be protocted by a spin lock since
628 * multiple notifiers (say IPv4 and IPv6) may be running at the same 767 * multiple notifiers (say IPv4 and IPv6) may be running at the same
@@ -650,6 +789,7 @@ static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev,
650 addr->valid = 1; 789 addr->valid = 1;
651 spin_lock_bh(&sctp_local_addr_lock); 790 spin_lock_bh(&sctp_local_addr_lock);
652 list_add_tail_rcu(&addr->list, &sctp_local_addr_list); 791 list_add_tail_rcu(&addr->list, &sctp_local_addr_list);
792 sctp_addr_wq_mgmt(addr, SCTP_ADDR_NEW);
653 spin_unlock_bh(&sctp_local_addr_lock); 793 spin_unlock_bh(&sctp_local_addr_lock);
654 } 794 }
655 break; 795 break;
@@ -660,6 +800,7 @@ static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev,
660 if (addr->a.sa.sa_family == AF_INET && 800 if (addr->a.sa.sa_family == AF_INET &&
661 addr->a.v4.sin_addr.s_addr == 801 addr->a.v4.sin_addr.s_addr ==
662 ifa->ifa_local) { 802 ifa->ifa_local) {
803 sctp_addr_wq_mgmt(addr, SCTP_ADDR_DEL);
663 found = 1; 804 found = 1;
664 addr->valid = 0; 805 addr->valid = 0;
665 list_del_rcu(&addr->list); 806 list_del_rcu(&addr->list);
@@ -1058,7 +1199,6 @@ SCTP_STATIC __init int sctp_init(void)
1058 int status = -EINVAL; 1199 int status = -EINVAL;
1059 unsigned long goal; 1200 unsigned long goal;
1060 unsigned long limit; 1201 unsigned long limit;
1061 unsigned long nr_pages;
1062 int max_share; 1202 int max_share;
1063 int order; 1203 int order;
1064 1204
@@ -1148,15 +1288,7 @@ SCTP_STATIC __init int sctp_init(void)
1148 /* Initialize handle used for association ids. */ 1288 /* Initialize handle used for association ids. */
1149 idr_init(&sctp_assocs_id); 1289 idr_init(&sctp_assocs_id);
1150 1290
1151 /* Set the pressure threshold to be a fraction of global memory that 1291 limit = nr_free_buffer_pages() / 8;
1152 * is up to 1/2 at 256 MB, decreasing toward zero with the amount of
1153 * memory, with a floor of 128 pages.
1154 * Note this initializes the data in sctpv6_prot too
1155 * Unabashedly stolen from tcp_init
1156 */
1157 nr_pages = totalram_pages - totalhigh_pages;
1158 limit = min(nr_pages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT);
1159 limit = (limit * (nr_pages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11);
1160 limit = max(limit, 128UL); 1292 limit = max(limit, 128UL);
1161 sysctl_sctp_mem[0] = limit / 4 * 3; 1293 sysctl_sctp_mem[0] = limit / 4 * 3;
1162 sysctl_sctp_mem[1] = limit; 1294 sysctl_sctp_mem[1] = limit;
@@ -1242,6 +1374,7 @@ SCTP_STATIC __init int sctp_init(void)
1242 /* Disable ADDIP by default. */ 1374 /* Disable ADDIP by default. */
1243 sctp_addip_enable = 0; 1375 sctp_addip_enable = 0;
1244 sctp_addip_noauth = 0; 1376 sctp_addip_noauth = 0;
1377 sctp_default_auto_asconf = 0;
1245 1378
1246 /* Enable PR-SCTP by default. */ 1379 /* Enable PR-SCTP by default. */
1247 sctp_prsctp_enable = 1; 1380 sctp_prsctp_enable = 1;
@@ -1266,6 +1399,13 @@ SCTP_STATIC __init int sctp_init(void)
1266 spin_lock_init(&sctp_local_addr_lock); 1399 spin_lock_init(&sctp_local_addr_lock);
1267 sctp_get_local_addr_list(); 1400 sctp_get_local_addr_list();
1268 1401
1402 /* Initialize the address event list */
1403 INIT_LIST_HEAD(&sctp_addr_waitq);
1404 INIT_LIST_HEAD(&sctp_auto_asconf_splist);
1405 spin_lock_init(&sctp_addr_wq_lock);
1406 sctp_addr_wq_timer.expires = 0;
1407 setup_timer(&sctp_addr_wq_timer, sctp_addr_wq_timeout_handler, 0);
1408
1269 status = sctp_v4_protosw_init(); 1409 status = sctp_v4_protosw_init();
1270 1410
1271 if (status) 1411 if (status)
@@ -1337,6 +1477,7 @@ SCTP_STATIC __exit void sctp_exit(void)
1337 /* Unregister with inet6/inet layers. */ 1477 /* Unregister with inet6/inet layers. */
1338 sctp_v6_del_protocol(); 1478 sctp_v6_del_protocol();
1339 sctp_v4_del_protocol(); 1479 sctp_v4_del_protocol();
1480 sctp_free_addr_wq();
1340 1481
1341 /* Free the control endpoint. */ 1482 /* Free the control endpoint. */
1342 inet_ctl_sock_destroy(sctp_ctl_sock); 1483 inet_ctl_sock_destroy(sctp_ctl_sock);
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 58eb27fed4b..81db4e38535 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -2768,11 +2768,12 @@ struct sctp_chunk *sctp_make_asconf_update_ip(struct sctp_association *asoc,
2768 int addr_param_len = 0; 2768 int addr_param_len = 0;
2769 int totallen = 0; 2769 int totallen = 0;
2770 int i; 2770 int i;
2771 int del_pickup = 0;
2771 2772
2772 /* Get total length of all the address parameters. */ 2773 /* Get total length of all the address parameters. */
2773 addr_buf = addrs; 2774 addr_buf = addrs;
2774 for (i = 0; i < addrcnt; i++) { 2775 for (i = 0; i < addrcnt; i++) {
2775 addr = (union sctp_addr *)addr_buf; 2776 addr = addr_buf;
2776 af = sctp_get_af_specific(addr->v4.sin_family); 2777 af = sctp_get_af_specific(addr->v4.sin_family);
2777 addr_param_len = af->to_addr_param(addr, &addr_param); 2778 addr_param_len = af->to_addr_param(addr, &addr_param);
2778 2779
@@ -2780,6 +2781,13 @@ struct sctp_chunk *sctp_make_asconf_update_ip(struct sctp_association *asoc,
2780 totallen += addr_param_len; 2781 totallen += addr_param_len;
2781 2782
2782 addr_buf += af->sockaddr_len; 2783 addr_buf += af->sockaddr_len;
2784 if (asoc->asconf_addr_del_pending && !del_pickup) {
2785 /* reuse the parameter length from the same scope one */
2786 totallen += paramlen;
2787 totallen += addr_param_len;
2788 del_pickup = 1;
2789 SCTP_DEBUG_PRINTK("mkasconf_update_ip: picked same-scope del_pending addr, totallen for all addresses is %d\n", totallen);
2790 }
2783 } 2791 }
2784 2792
2785 /* Create an asconf chunk with the required length. */ 2793 /* Create an asconf chunk with the required length. */
@@ -2790,7 +2798,7 @@ struct sctp_chunk *sctp_make_asconf_update_ip(struct sctp_association *asoc,
2790 /* Add the address parameters to the asconf chunk. */ 2798 /* Add the address parameters to the asconf chunk. */
2791 addr_buf = addrs; 2799 addr_buf = addrs;
2792 for (i = 0; i < addrcnt; i++) { 2800 for (i = 0; i < addrcnt; i++) {
2793 addr = (union sctp_addr *)addr_buf; 2801 addr = addr_buf;
2794 af = sctp_get_af_specific(addr->v4.sin_family); 2802 af = sctp_get_af_specific(addr->v4.sin_family);
2795 addr_param_len = af->to_addr_param(addr, &addr_param); 2803 addr_param_len = af->to_addr_param(addr, &addr_param);
2796 param.param_hdr.type = flags; 2804 param.param_hdr.type = flags;
@@ -2802,6 +2810,17 @@ struct sctp_chunk *sctp_make_asconf_update_ip(struct sctp_association *asoc,
2802 2810
2803 addr_buf += af->sockaddr_len; 2811 addr_buf += af->sockaddr_len;
2804 } 2812 }
2813 if (flags == SCTP_PARAM_ADD_IP && del_pickup) {
2814 addr = asoc->asconf_addr_del_pending;
2815 af = sctp_get_af_specific(addr->v4.sin_family);
2816 addr_param_len = af->to_addr_param(addr, &addr_param);
2817 param.param_hdr.type = SCTP_PARAM_DEL_IP;
2818 param.param_hdr.length = htons(paramlen + addr_param_len);
2819 param.crr_id = i;
2820
2821 sctp_addto_chunk(retval, paramlen, &param);
2822 sctp_addto_chunk(retval, addr_param_len, &addr_param);
2823 }
2805 return retval; 2824 return retval;
2806} 2825}
2807 2826
@@ -2939,8 +2958,7 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc,
2939 union sctp_addr addr; 2958 union sctp_addr addr;
2940 union sctp_addr_param *addr_param; 2959 union sctp_addr_param *addr_param;
2941 2960
2942 addr_param = (union sctp_addr_param *) 2961 addr_param = (void *)asconf_param + sizeof(sctp_addip_param_t);
2943 ((void *)asconf_param + sizeof(sctp_addip_param_t));
2944 2962
2945 if (asconf_param->param_hdr.type != SCTP_PARAM_ADD_IP && 2963 if (asconf_param->param_hdr.type != SCTP_PARAM_ADD_IP &&
2946 asconf_param->param_hdr.type != SCTP_PARAM_DEL_IP && 2964 asconf_param->param_hdr.type != SCTP_PARAM_DEL_IP &&
@@ -3014,7 +3032,7 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc,
3014 * an Error Cause TLV set to the new error code 'Request to 3032 * an Error Cause TLV set to the new error code 'Request to
3015 * Delete Source IP Address' 3033 * Delete Source IP Address'
3016 */ 3034 */
3017 if (sctp_cmp_addr_exact(sctp_source(asconf), &addr)) 3035 if (sctp_cmp_addr_exact(&asconf->source, &addr))
3018 return SCTP_ERROR_DEL_SRC_IP; 3036 return SCTP_ERROR_DEL_SRC_IP;
3019 3037
3020 /* Section 4.2.2 3038 /* Section 4.2.2
@@ -3125,7 +3143,7 @@ struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
3125 * asconf parameter. 3143 * asconf parameter.
3126 */ 3144 */
3127 length = ntohs(addr_param->p.length); 3145 length = ntohs(addr_param->p.length);
3128 asconf_param = (sctp_addip_param_t *)((void *)addr_param + length); 3146 asconf_param = (void *)addr_param + length;
3129 chunk_len -= length; 3147 chunk_len -= length;
3130 3148
3131 /* create an ASCONF_ACK chunk. 3149 /* create an ASCONF_ACK chunk.
@@ -3166,8 +3184,7 @@ struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
3166 3184
3167 /* Move to the next ASCONF param. */ 3185 /* Move to the next ASCONF param. */
3168 length = ntohs(asconf_param->param_hdr.length); 3186 length = ntohs(asconf_param->param_hdr.length);
3169 asconf_param = (sctp_addip_param_t *)((void *)asconf_param + 3187 asconf_param = (void *)asconf_param + length;
3170 length);
3171 chunk_len -= length; 3188 chunk_len -= length;
3172 } 3189 }
3173 3190
@@ -3197,8 +3214,7 @@ static void sctp_asconf_param_success(struct sctp_association *asoc,
3197 struct sctp_transport *transport; 3214 struct sctp_transport *transport;
3198 struct sctp_sockaddr_entry *saddr; 3215 struct sctp_sockaddr_entry *saddr;
3199 3216
3200 addr_param = (union sctp_addr_param *) 3217 addr_param = (void *)asconf_param + sizeof(sctp_addip_param_t);
3201 ((void *)asconf_param + sizeof(sctp_addip_param_t));
3202 3218
3203 /* We have checked the packet before, so we do not check again. */ 3219 /* We have checked the packet before, so we do not check again. */
3204 af = sctp_get_af_specific(param_type2af(addr_param->p.type)); 3220 af = sctp_get_af_specific(param_type2af(addr_param->p.type));
@@ -3224,6 +3240,11 @@ static void sctp_asconf_param_success(struct sctp_association *asoc,
3224 case SCTP_PARAM_DEL_IP: 3240 case SCTP_PARAM_DEL_IP:
3225 local_bh_disable(); 3241 local_bh_disable();
3226 sctp_del_bind_addr(bp, &addr); 3242 sctp_del_bind_addr(bp, &addr);
3243 if (asoc->asconf_addr_del_pending != NULL &&
3244 sctp_cmp_addr_exact(asoc->asconf_addr_del_pending, &addr)) {
3245 kfree(asoc->asconf_addr_del_pending);
3246 asoc->asconf_addr_del_pending = NULL;
3247 }
3227 local_bh_enable(); 3248 local_bh_enable();
3228 list_for_each_entry(transport, &asoc->peer.transport_addr_list, 3249 list_for_each_entry(transport, &asoc->peer.transport_addr_list,
3229 transports) { 3250 transports) {
@@ -3278,8 +3299,7 @@ static __be16 sctp_get_asconf_response(struct sctp_chunk *asconf_ack,
3278 return SCTP_ERROR_NO_ERROR; 3299 return SCTP_ERROR_NO_ERROR;
3279 case SCTP_PARAM_ERR_CAUSE: 3300 case SCTP_PARAM_ERR_CAUSE:
3280 length = sizeof(sctp_addip_param_t); 3301 length = sizeof(sctp_addip_param_t);
3281 err_param = (sctp_errhdr_t *) 3302 err_param = (void *)asconf_ack_param + length;
3282 ((void *)asconf_ack_param + length);
3283 asconf_ack_len -= length; 3303 asconf_ack_len -= length;
3284 if (asconf_ack_len > 0) 3304 if (asconf_ack_len > 0)
3285 return err_param->cause; 3305 return err_param->cause;
@@ -3292,8 +3312,7 @@ static __be16 sctp_get_asconf_response(struct sctp_chunk *asconf_ack,
3292 } 3312 }
3293 3313
3294 length = ntohs(asconf_ack_param->param_hdr.length); 3314 length = ntohs(asconf_ack_param->param_hdr.length);
3295 asconf_ack_param = (sctp_addip_param_t *) 3315 asconf_ack_param = (void *)asconf_ack_param + length;
3296 ((void *)asconf_ack_param + length);
3297 asconf_ack_len -= length; 3316 asconf_ack_len -= length;
3298 } 3317 }
3299 3318
@@ -3325,7 +3344,7 @@ int sctp_process_asconf_ack(struct sctp_association *asoc,
3325 * pointer to the first asconf parameter. 3344 * pointer to the first asconf parameter.
3326 */ 3345 */
3327 length = ntohs(addr_param->p.length); 3346 length = ntohs(addr_param->p.length);
3328 asconf_param = (sctp_addip_param_t *)((void *)addr_param + length); 3347 asconf_param = (void *)addr_param + length;
3329 asconf_len -= length; 3348 asconf_len -= length;
3330 3349
3331 /* ADDIP 4.1 3350 /* ADDIP 4.1
@@ -3376,11 +3395,13 @@ int sctp_process_asconf_ack(struct sctp_association *asoc,
3376 * one. 3395 * one.
3377 */ 3396 */
3378 length = ntohs(asconf_param->param_hdr.length); 3397 length = ntohs(asconf_param->param_hdr.length);
3379 asconf_param = (sctp_addip_param_t *)((void *)asconf_param + 3398 asconf_param = (void *)asconf_param + length;
3380 length);
3381 asconf_len -= length; 3399 asconf_len -= length;
3382 } 3400 }
3383 3401
3402 if (no_err && asoc->src_out_of_asoc_ok)
3403 asoc->src_out_of_asoc_ok = 0;
3404
3384 /* Free the cached last sent asconf chunk. */ 3405 /* Free the cached last sent asconf chunk. */
3385 list_del_init(&asconf->transmitted_list); 3406 list_del_init(&asconf->transmitted_list);
3386 sctp_chunk_free(asconf); 3407 sctp_chunk_free(asconf);
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 534c2e5feb0..167c880cf8d 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -670,10 +670,19 @@ static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds,
670 /* 8.3 Upon the receipt of the HEARTBEAT ACK, the sender of the 670 /* 8.3 Upon the receipt of the HEARTBEAT ACK, the sender of the
671 * HEARTBEAT should clear the error counter of the destination 671 * HEARTBEAT should clear the error counter of the destination
672 * transport address to which the HEARTBEAT was sent. 672 * transport address to which the HEARTBEAT was sent.
673 * The association's overall error count is also cleared.
674 */ 673 */
675 t->error_count = 0; 674 t->error_count = 0;
676 t->asoc->overall_error_count = 0; 675
676 /*
677 * Although RFC4960 specifies that the overall error count must
678 * be cleared when a HEARTBEAT ACK is received, we make an
679 * exception while in SHUTDOWN PENDING. If the peer keeps its
680 * window shut forever, we may never be able to transmit our
681 * outstanding data and rely on the retransmission limit be reached
682 * to shutdown the association.
683 */
684 if (t->asoc->state != SCTP_STATE_SHUTDOWN_PENDING)
685 t->asoc->overall_error_count = 0;
677 686
678 /* Clear the hb_sent flag to signal that we had a good 687 /* Clear the hb_sent flag to signal that we had a good
679 * acknowledgement. 688 * acknowledgement.
@@ -1201,7 +1210,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
1201 int local_cork = 0; 1210 int local_cork = 0;
1202 1211
1203 if (SCTP_EVENT_T_TIMEOUT != event_type) 1212 if (SCTP_EVENT_T_TIMEOUT != event_type)
1204 chunk = (struct sctp_chunk *) event_arg; 1213 chunk = event_arg;
1205 1214
1206 /* Note: This whole file is a huge candidate for rework. 1215 /* Note: This whole file is a huge candidate for rework.
1207 * For example, each command could either have its own handler, so 1216 * For example, each command could either have its own handler, so
@@ -1437,6 +1446,13 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
1437 sctp_cmd_setup_t2(commands, asoc, cmd->obj.ptr); 1446 sctp_cmd_setup_t2(commands, asoc, cmd->obj.ptr);
1438 break; 1447 break;
1439 1448
1449 case SCTP_CMD_TIMER_START_ONCE:
1450 timer = &asoc->timers[cmd->obj.to];
1451
1452 if (timer_pending(timer))
1453 break;
1454 /* fall through */
1455
1440 case SCTP_CMD_TIMER_START: 1456 case SCTP_CMD_TIMER_START:
1441 timer = &asoc->timers[cmd->obj.to]; 1457 timer = &asoc->timers[cmd->obj.to];
1442 timeout = asoc->timeouts[cmd->obj.to]; 1458 timeout = asoc->timeouts[cmd->obj.to];
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index a297283154d..49b847b00f9 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -4008,31 +4008,32 @@ sctp_disposition_t sctp_sf_eat_auth(const struct sctp_endpoint *ep,
4008 auth_hdr = (struct sctp_authhdr *)chunk->skb->data; 4008 auth_hdr = (struct sctp_authhdr *)chunk->skb->data;
4009 error = sctp_sf_authenticate(ep, asoc, type, chunk); 4009 error = sctp_sf_authenticate(ep, asoc, type, chunk);
4010 switch (error) { 4010 switch (error) {
4011 case SCTP_IERROR_AUTH_BAD_HMAC: 4011 case SCTP_IERROR_AUTH_BAD_HMAC:
4012 /* Generate the ERROR chunk and discard the rest 4012 /* Generate the ERROR chunk and discard the rest
4013 * of the packet 4013 * of the packet
4014 */ 4014 */
4015 err_chunk = sctp_make_op_error(asoc, chunk, 4015 err_chunk = sctp_make_op_error(asoc, chunk,
4016 SCTP_ERROR_UNSUP_HMAC, 4016 SCTP_ERROR_UNSUP_HMAC,
4017 &auth_hdr->hmac_id, 4017 &auth_hdr->hmac_id,
4018 sizeof(__u16), 0); 4018 sizeof(__u16), 0);
4019 if (err_chunk) { 4019 if (err_chunk) {
4020 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, 4020 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
4021 SCTP_CHUNK(err_chunk)); 4021 SCTP_CHUNK(err_chunk));
4022 } 4022 }
4023 /* Fall Through */ 4023 /* Fall Through */
4024 case SCTP_IERROR_AUTH_BAD_KEYID: 4024 case SCTP_IERROR_AUTH_BAD_KEYID:
4025 case SCTP_IERROR_BAD_SIG: 4025 case SCTP_IERROR_BAD_SIG:
4026 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 4026 return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
4027 break; 4027
4028 case SCTP_IERROR_PROTO_VIOLATION: 4028 case SCTP_IERROR_PROTO_VIOLATION:
4029 return sctp_sf_violation_chunklen(ep, asoc, type, arg, 4029 return sctp_sf_violation_chunklen(ep, asoc, type, arg,
4030 commands); 4030 commands);
4031 break; 4031
4032 case SCTP_IERROR_NOMEM: 4032 case SCTP_IERROR_NOMEM:
4033 return SCTP_DISPOSITION_NOMEM; 4033 return SCTP_DISPOSITION_NOMEM;
4034 default: 4034
4035 break; 4035 default: /* Prevent gcc warnings */
4036 break;
4036 } 4037 }
4037 4038
4038 if (asoc->active_key_id != ntohs(auth_hdr->shkey_id)) { 4039 if (asoc->active_key_id != ntohs(auth_hdr->shkey_id)) {
@@ -5154,7 +5155,7 @@ sctp_disposition_t sctp_sf_do_9_2_start_shutdown(
5154 * The sender of the SHUTDOWN MAY also start an overall guard timer 5155 * The sender of the SHUTDOWN MAY also start an overall guard timer
5155 * 'T5-shutdown-guard' to bound the overall time for shutdown sequence. 5156 * 'T5-shutdown-guard' to bound the overall time for shutdown sequence.
5156 */ 5157 */
5157 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START, 5158 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
5158 SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD)); 5159 SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD));
5159 5160
5160 if (asoc->autoclose) 5161 if (asoc->autoclose)
@@ -5299,14 +5300,28 @@ sctp_disposition_t sctp_sf_do_6_3_3_rtx(const struct sctp_endpoint *ep,
5299 SCTP_INC_STATS(SCTP_MIB_T3_RTX_EXPIREDS); 5300 SCTP_INC_STATS(SCTP_MIB_T3_RTX_EXPIREDS);
5300 5301
5301 if (asoc->overall_error_count >= asoc->max_retrans) { 5302 if (asoc->overall_error_count >= asoc->max_retrans) {
5302 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, 5303 if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING) {
5303 SCTP_ERROR(ETIMEDOUT)); 5304 /*
5304 /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */ 5305 * We are here likely because the receiver had its rwnd
5305 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, 5306 * closed for a while and we have not been able to
5306 SCTP_PERR(SCTP_ERROR_NO_ERROR)); 5307 * transmit the locally queued data within the maximum
5307 SCTP_INC_STATS(SCTP_MIB_ABORTEDS); 5308 * retransmission attempts limit. Start the T5
5308 SCTP_DEC_STATS(SCTP_MIB_CURRESTAB); 5309 * shutdown guard timer to give the receiver one last
5309 return SCTP_DISPOSITION_DELETE_TCB; 5310 * chance and some additional time to recover before
5311 * aborting.
5312 */
5313 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START_ONCE,
5314 SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD));
5315 } else {
5316 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
5317 SCTP_ERROR(ETIMEDOUT));
5318 /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
5319 sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
5320 SCTP_PERR(SCTP_ERROR_NO_ERROR));
5321 SCTP_INC_STATS(SCTP_MIB_ABORTEDS);
5322 SCTP_DEC_STATS(SCTP_MIB_CURRESTAB);
5323 return SCTP_DISPOSITION_DELETE_TCB;
5324 }
5310 } 5325 }
5311 5326
5312 /* E1) For the destination address for which the timer 5327 /* E1) For the destination address for which the timer
diff --git a/net/sctp/sm_statetable.c b/net/sctp/sm_statetable.c
index 0338dc6fdc9..7c211a7f90f 100644
--- a/net/sctp/sm_statetable.c
+++ b/net/sctp/sm_statetable.c
@@ -827,7 +827,7 @@ static const sctp_sm_table_entry_t other_event_table[SCTP_NUM_OTHER_TYPES][SCTP_
827 /* SCTP_STATE_ESTABLISHED */ \ 827 /* SCTP_STATE_ESTABLISHED */ \
828 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ 828 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
829 /* SCTP_STATE_SHUTDOWN_PENDING */ \ 829 /* SCTP_STATE_SHUTDOWN_PENDING */ \
830 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ 830 TYPE_SCTP_FUNC(sctp_sf_t5_timer_expire), \
831 /* SCTP_STATE_SHUTDOWN_SENT */ \ 831 /* SCTP_STATE_SHUTDOWN_SENT */ \
832 TYPE_SCTP_FUNC(sctp_sf_t5_timer_expire), \ 832 TYPE_SCTP_FUNC(sctp_sf_t5_timer_expire), \
833 /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ 833 /* SCTP_STATE_SHUTDOWN_RECEIVED */ \
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 6766913a53e..836aa63ee12 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -476,7 +476,7 @@ static int sctp_bindx_add(struct sock *sk, struct sockaddr *addrs, int addrcnt)
476 /* The list may contain either IPv4 or IPv6 address; 476 /* The list may contain either IPv4 or IPv6 address;
477 * determine the address length for walking thru the list. 477 * determine the address length for walking thru the list.
478 */ 478 */
479 sa_addr = (struct sockaddr *)addr_buf; 479 sa_addr = addr_buf;
480 af = sctp_get_af_specific(sa_addr->sa_family); 480 af = sctp_get_af_specific(sa_addr->sa_family);
481 if (!af) { 481 if (!af) {
482 retval = -EINVAL; 482 retval = -EINVAL;
@@ -555,7 +555,7 @@ static int sctp_send_asconf_add_ip(struct sock *sk,
555 */ 555 */
556 addr_buf = addrs; 556 addr_buf = addrs;
557 for (i = 0; i < addrcnt; i++) { 557 for (i = 0; i < addrcnt; i++) {
558 addr = (union sctp_addr *)addr_buf; 558 addr = addr_buf;
559 af = sctp_get_af_specific(addr->v4.sin_family); 559 af = sctp_get_af_specific(addr->v4.sin_family);
560 if (!af) { 560 if (!af) {
561 retval = -EINVAL; 561 retval = -EINVAL;
@@ -583,22 +583,35 @@ static int sctp_send_asconf_add_ip(struct sock *sk,
583 goto out; 583 goto out;
584 } 584 }
585 585
586 retval = sctp_send_asconf(asoc, chunk);
587 if (retval)
588 goto out;
589
590 /* Add the new addresses to the bind address list with 586 /* Add the new addresses to the bind address list with
591 * use_as_src set to 0. 587 * use_as_src set to 0.
592 */ 588 */
593 addr_buf = addrs; 589 addr_buf = addrs;
594 for (i = 0; i < addrcnt; i++) { 590 for (i = 0; i < addrcnt; i++) {
595 addr = (union sctp_addr *)addr_buf; 591 addr = addr_buf;
596 af = sctp_get_af_specific(addr->v4.sin_family); 592 af = sctp_get_af_specific(addr->v4.sin_family);
597 memcpy(&saveaddr, addr, af->sockaddr_len); 593 memcpy(&saveaddr, addr, af->sockaddr_len);
598 retval = sctp_add_bind_addr(bp, &saveaddr, 594 retval = sctp_add_bind_addr(bp, &saveaddr,
599 SCTP_ADDR_NEW, GFP_ATOMIC); 595 SCTP_ADDR_NEW, GFP_ATOMIC);
600 addr_buf += af->sockaddr_len; 596 addr_buf += af->sockaddr_len;
601 } 597 }
598 if (asoc->src_out_of_asoc_ok) {
599 struct sctp_transport *trans;
600
601 list_for_each_entry(trans,
602 &asoc->peer.transport_addr_list, transports) {
603 /* Clear the source and route cache */
604 dst_release(trans->dst);
605 trans->cwnd = min(4*asoc->pathmtu, max_t(__u32,
606 2*asoc->pathmtu, 4380));
607 trans->ssthresh = asoc->peer.i.a_rwnd;
608 trans->rto = asoc->rto_initial;
609 trans->rtt = trans->srtt = trans->rttvar = 0;
610 sctp_transport_route(trans, NULL,
611 sctp_sk(asoc->base.sk));
612 }
613 }
614 retval = sctp_send_asconf(asoc, chunk);
602 } 615 }
603 616
604out: 617out:
@@ -646,7 +659,7 @@ static int sctp_bindx_rem(struct sock *sk, struct sockaddr *addrs, int addrcnt)
646 goto err_bindx_rem; 659 goto err_bindx_rem;
647 } 660 }
648 661
649 sa_addr = (union sctp_addr *)addr_buf; 662 sa_addr = addr_buf;
650 af = sctp_get_af_specific(sa_addr->sa.sa_family); 663 af = sctp_get_af_specific(sa_addr->sa.sa_family);
651 if (!af) { 664 if (!af) {
652 retval = -EINVAL; 665 retval = -EINVAL;
@@ -715,7 +728,9 @@ static int sctp_send_asconf_del_ip(struct sock *sk,
715 struct sctp_sockaddr_entry *saddr; 728 struct sctp_sockaddr_entry *saddr;
716 int i; 729 int i;
717 int retval = 0; 730 int retval = 0;
731 int stored = 0;
718 732
733 chunk = NULL;
719 if (!sctp_addip_enable) 734 if (!sctp_addip_enable)
720 return retval; 735 return retval;
721 736
@@ -743,7 +758,7 @@ static int sctp_send_asconf_del_ip(struct sock *sk,
743 */ 758 */
744 addr_buf = addrs; 759 addr_buf = addrs;
745 for (i = 0; i < addrcnt; i++) { 760 for (i = 0; i < addrcnt; i++) {
746 laddr = (union sctp_addr *)addr_buf; 761 laddr = addr_buf;
747 af = sctp_get_af_specific(laddr->v4.sin_family); 762 af = sctp_get_af_specific(laddr->v4.sin_family);
748 if (!af) { 763 if (!af) {
749 retval = -EINVAL; 764 retval = -EINVAL;
@@ -766,8 +781,37 @@ static int sctp_send_asconf_del_ip(struct sock *sk,
766 bp = &asoc->base.bind_addr; 781 bp = &asoc->base.bind_addr;
767 laddr = sctp_find_unmatch_addr(bp, (union sctp_addr *)addrs, 782 laddr = sctp_find_unmatch_addr(bp, (union sctp_addr *)addrs,
768 addrcnt, sp); 783 addrcnt, sp);
769 if (!laddr) 784 if ((laddr == NULL) && (addrcnt == 1)) {
770 continue; 785 if (asoc->asconf_addr_del_pending)
786 continue;
787 asoc->asconf_addr_del_pending =
788 kzalloc(sizeof(union sctp_addr), GFP_ATOMIC);
789 if (asoc->asconf_addr_del_pending == NULL) {
790 retval = -ENOMEM;
791 goto out;
792 }
793 asoc->asconf_addr_del_pending->sa.sa_family =
794 addrs->sa_family;
795 asoc->asconf_addr_del_pending->v4.sin_port =
796 htons(bp->port);
797 if (addrs->sa_family == AF_INET) {
798 struct sockaddr_in *sin;
799
800 sin = (struct sockaddr_in *)addrs;
801 asoc->asconf_addr_del_pending->v4.sin_addr.s_addr = sin->sin_addr.s_addr;
802 } else if (addrs->sa_family == AF_INET6) {
803 struct sockaddr_in6 *sin6;
804
805 sin6 = (struct sockaddr_in6 *)addrs;
806 ipv6_addr_copy(&asoc->asconf_addr_del_pending->v6.sin6_addr, &sin6->sin6_addr);
807 }
808 SCTP_DEBUG_PRINTK_IPADDR("send_asconf_del_ip: keep the last address asoc: %p ",
809 " at %p\n", asoc, asoc->asconf_addr_del_pending,
810 asoc->asconf_addr_del_pending);
811 asoc->src_out_of_asoc_ok = 1;
812 stored = 1;
813 goto skip_mkasconf;
814 }
771 815
772 /* We do not need RCU protection throughout this loop 816 /* We do not need RCU protection throughout this loop
773 * because this is done under a socket lock from the 817 * because this is done under a socket lock from the
@@ -780,12 +824,13 @@ static int sctp_send_asconf_del_ip(struct sock *sk,
780 goto out; 824 goto out;
781 } 825 }
782 826
827skip_mkasconf:
783 /* Reset use_as_src flag for the addresses in the bind address 828 /* Reset use_as_src flag for the addresses in the bind address
784 * list that are to be deleted. 829 * list that are to be deleted.
785 */ 830 */
786 addr_buf = addrs; 831 addr_buf = addrs;
787 for (i = 0; i < addrcnt; i++) { 832 for (i = 0; i < addrcnt; i++) {
788 laddr = (union sctp_addr *)addr_buf; 833 laddr = addr_buf;
789 af = sctp_get_af_specific(laddr->v4.sin_family); 834 af = sctp_get_af_specific(laddr->v4.sin_family);
790 list_for_each_entry(saddr, &bp->address_list, list) { 835 list_for_each_entry(saddr, &bp->address_list, list) {
791 if (sctp_cmp_addr_exact(&saddr->a, laddr)) 836 if (sctp_cmp_addr_exact(&saddr->a, laddr))
@@ -805,12 +850,37 @@ static int sctp_send_asconf_del_ip(struct sock *sk,
805 sctp_sk(asoc->base.sk)); 850 sctp_sk(asoc->base.sk));
806 } 851 }
807 852
853 if (stored)
854 /* We don't need to transmit ASCONF */
855 continue;
808 retval = sctp_send_asconf(asoc, chunk); 856 retval = sctp_send_asconf(asoc, chunk);
809 } 857 }
810out: 858out:
811 return retval; 859 return retval;
812} 860}
813 861
862/* set addr events to assocs in the endpoint. ep and addr_wq must be locked */
863int sctp_asconf_mgmt(struct sctp_sock *sp, struct sctp_sockaddr_entry *addrw)
864{
865 struct sock *sk = sctp_opt2sk(sp);
866 union sctp_addr *addr;
867 struct sctp_af *af;
868
869 /* It is safe to write port space in caller. */
870 addr = &addrw->a;
871 addr->v4.sin_port = htons(sp->ep->base.bind_addr.port);
872 af = sctp_get_af_specific(addr->sa.sa_family);
873 if (!af)
874 return -EINVAL;
875 if (sctp_verify_addr(sk, addr, af->sockaddr_len))
876 return -EINVAL;
877
878 if (addrw->state == SCTP_ADDR_NEW)
879 return sctp_send_asconf_add_ip(sk, (struct sockaddr *)addr, 1);
880 else
881 return sctp_send_asconf_del_ip(sk, (struct sockaddr *)addr, 1);
882}
883
814/* Helper for tunneling sctp_bindx() requests through sctp_setsockopt() 884/* Helper for tunneling sctp_bindx() requests through sctp_setsockopt()
815 * 885 *
816 * API 8.1 886 * API 8.1
@@ -927,7 +997,7 @@ SCTP_STATIC int sctp_setsockopt_bindx(struct sock* sk,
927 return -EINVAL; 997 return -EINVAL;
928 } 998 }
929 999
930 sa_addr = (struct sockaddr *)addr_buf; 1000 sa_addr = addr_buf;
931 af = sctp_get_af_specific(sa_addr->sa_family); 1001 af = sctp_get_af_specific(sa_addr->sa_family);
932 1002
933 /* If the address family is not supported or if this address 1003 /* If the address family is not supported or if this address
@@ -1018,7 +1088,7 @@ static int __sctp_connect(struct sock* sk,
1018 goto out_free; 1088 goto out_free;
1019 } 1089 }
1020 1090
1021 sa_addr = (union sctp_addr *)addr_buf; 1091 sa_addr = addr_buf;
1022 af = sctp_get_af_specific(sa_addr->sa.sa_family); 1092 af = sctp_get_af_specific(sa_addr->sa.sa_family);
1023 1093
1024 /* If the address family is not supported or if this address 1094 /* If the address family is not supported or if this address
@@ -1384,6 +1454,7 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout)
1384 struct sctp_endpoint *ep; 1454 struct sctp_endpoint *ep;
1385 struct sctp_association *asoc; 1455 struct sctp_association *asoc;
1386 struct list_head *pos, *temp; 1456 struct list_head *pos, *temp;
1457 unsigned int data_was_unread;
1387 1458
1388 SCTP_DEBUG_PRINTK("sctp_close(sk: 0x%p, timeout:%ld)\n", sk, timeout); 1459 SCTP_DEBUG_PRINTK("sctp_close(sk: 0x%p, timeout:%ld)\n", sk, timeout);
1389 1460
@@ -1393,6 +1464,10 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout)
1393 1464
1394 ep = sctp_sk(sk)->ep; 1465 ep = sctp_sk(sk)->ep;
1395 1466
1467 /* Clean up any skbs sitting on the receive queue. */
1468 data_was_unread = sctp_queue_purge_ulpevents(&sk->sk_receive_queue);
1469 data_was_unread += sctp_queue_purge_ulpevents(&sctp_sk(sk)->pd_lobby);
1470
1396 /* Walk all associations on an endpoint. */ 1471 /* Walk all associations on an endpoint. */
1397 list_for_each_safe(pos, temp, &ep->asocs) { 1472 list_for_each_safe(pos, temp, &ep->asocs) {
1398 asoc = list_entry(pos, struct sctp_association, asocs); 1473 asoc = list_entry(pos, struct sctp_association, asocs);
@@ -1410,7 +1485,9 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout)
1410 } 1485 }
1411 } 1486 }
1412 1487
1413 if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) { 1488 if (data_was_unread || !skb_queue_empty(&asoc->ulpq.lobby) ||
1489 !skb_queue_empty(&asoc->ulpq.reasm) ||
1490 (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime)) {
1414 struct sctp_chunk *chunk; 1491 struct sctp_chunk *chunk;
1415 1492
1416 chunk = sctp_make_abort_user(asoc, NULL, 0); 1493 chunk = sctp_make_abort_user(asoc, NULL, 0);
@@ -1420,10 +1497,6 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout)
1420 sctp_primitive_SHUTDOWN(asoc, NULL); 1497 sctp_primitive_SHUTDOWN(asoc, NULL);
1421 } 1498 }
1422 1499
1423 /* Clean up any skbs sitting on the receive queue. */
1424 sctp_queue_purge_ulpevents(&sk->sk_receive_queue);
1425 sctp_queue_purge_ulpevents(&sctp_sk(sk)->pd_lobby);
1426
1427 /* On a TCP-style socket, block for at most linger_time if set. */ 1500 /* On a TCP-style socket, block for at most linger_time if set. */
1428 if (sctp_style(sk, TCP) && timeout) 1501 if (sctp_style(sk, TCP) && timeout)
1429 sctp_wait_for_close(sk, timeout); 1502 sctp_wait_for_close(sk, timeout);
@@ -2073,10 +2146,33 @@ static int sctp_setsockopt_disable_fragments(struct sock *sk,
2073static int sctp_setsockopt_events(struct sock *sk, char __user *optval, 2146static int sctp_setsockopt_events(struct sock *sk, char __user *optval,
2074 unsigned int optlen) 2147 unsigned int optlen)
2075{ 2148{
2149 struct sctp_association *asoc;
2150 struct sctp_ulpevent *event;
2151
2076 if (optlen > sizeof(struct sctp_event_subscribe)) 2152 if (optlen > sizeof(struct sctp_event_subscribe))
2077 return -EINVAL; 2153 return -EINVAL;
2078 if (copy_from_user(&sctp_sk(sk)->subscribe, optval, optlen)) 2154 if (copy_from_user(&sctp_sk(sk)->subscribe, optval, optlen))
2079 return -EFAULT; 2155 return -EFAULT;
2156
2157 /*
2158 * At the time when a user app subscribes to SCTP_SENDER_DRY_EVENT,
2159 * if there is no data to be sent or retransmit, the stack will
2160 * immediately send up this notification.
2161 */
2162 if (sctp_ulpevent_type_enabled(SCTP_SENDER_DRY_EVENT,
2163 &sctp_sk(sk)->subscribe)) {
2164 asoc = sctp_id2assoc(sk, 0);
2165
2166 if (asoc && sctp_outq_is_empty(&asoc->outqueue)) {
2167 event = sctp_ulpevent_make_sender_dry_event(asoc,
2168 GFP_ATOMIC);
2169 if (!event)
2170 return -ENOMEM;
2171
2172 sctp_ulpq_tail_event(&asoc->ulpq, event);
2173 }
2174 }
2175
2080 return 0; 2176 return 0;
2081} 2177}
2082 2178
@@ -3187,11 +3283,11 @@ static int sctp_setsockopt_auth_chunk(struct sock *sk,
3187 return -EFAULT; 3283 return -EFAULT;
3188 3284
3189 switch (val.sauth_chunk) { 3285 switch (val.sauth_chunk) {
3190 case SCTP_CID_INIT: 3286 case SCTP_CID_INIT:
3191 case SCTP_CID_INIT_ACK: 3287 case SCTP_CID_INIT_ACK:
3192 case SCTP_CID_SHUTDOWN_COMPLETE: 3288 case SCTP_CID_SHUTDOWN_COMPLETE:
3193 case SCTP_CID_AUTH: 3289 case SCTP_CID_AUTH:
3194 return -EINVAL; 3290 return -EINVAL;
3195 } 3291 }
3196 3292
3197 /* add this chunk id to the endpoint */ 3293 /* add this chunk id to the endpoint */
@@ -3334,6 +3430,46 @@ static int sctp_setsockopt_del_key(struct sock *sk,
3334 3430
3335} 3431}
3336 3432
3433/*
3434 * 8.1.23 SCTP_AUTO_ASCONF
3435 *
3436 * This option will enable or disable the use of the automatic generation of
3437 * ASCONF chunks to add and delete addresses to an existing association. Note
3438 * that this option has two caveats namely: a) it only affects sockets that
3439 * are bound to all addresses available to the SCTP stack, and b) the system
3440 * administrator may have an overriding control that turns the ASCONF feature
3441 * off no matter what setting the socket option may have.
3442 * This option expects an integer boolean flag, where a non-zero value turns on
3443 * the option, and a zero value turns off the option.
3444 * Note. In this implementation, socket operation overrides default parameter
3445 * being set by sysctl as well as FreeBSD implementation
3446 */
3447static int sctp_setsockopt_auto_asconf(struct sock *sk, char __user *optval,
3448 unsigned int optlen)
3449{
3450 int val;
3451 struct sctp_sock *sp = sctp_sk(sk);
3452
3453 if (optlen < sizeof(int))
3454 return -EINVAL;
3455 if (get_user(val, (int __user *)optval))
3456 return -EFAULT;
3457 if (!sctp_is_ep_boundall(sk) && val)
3458 return -EINVAL;
3459 if ((val && sp->do_auto_asconf) || (!val && !sp->do_auto_asconf))
3460 return 0;
3461
3462 if (val == 0 && sp->do_auto_asconf) {
3463 list_del(&sp->auto_asconf_list);
3464 sp->do_auto_asconf = 0;
3465 } else if (val && !sp->do_auto_asconf) {
3466 list_add_tail(&sp->auto_asconf_list,
3467 &sctp_auto_asconf_splist);
3468 sp->do_auto_asconf = 1;
3469 }
3470 return 0;
3471}
3472
3337 3473
3338/* API 6.2 setsockopt(), getsockopt() 3474/* API 6.2 setsockopt(), getsockopt()
3339 * 3475 *
@@ -3481,6 +3617,9 @@ SCTP_STATIC int sctp_setsockopt(struct sock *sk, int level, int optname,
3481 case SCTP_AUTH_DELETE_KEY: 3617 case SCTP_AUTH_DELETE_KEY:
3482 retval = sctp_setsockopt_del_key(sk, optval, optlen); 3618 retval = sctp_setsockopt_del_key(sk, optval, optlen);
3483 break; 3619 break;
3620 case SCTP_AUTO_ASCONF:
3621 retval = sctp_setsockopt_auto_asconf(sk, optval, optlen);
3622 break;
3484 default: 3623 default:
3485 retval = -ENOPROTOOPT; 3624 retval = -ENOPROTOOPT;
3486 break; 3625 break;
@@ -3763,6 +3902,12 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
3763 local_bh_disable(); 3902 local_bh_disable();
3764 percpu_counter_inc(&sctp_sockets_allocated); 3903 percpu_counter_inc(&sctp_sockets_allocated);
3765 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); 3904 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
3905 if (sctp_default_auto_asconf) {
3906 list_add_tail(&sp->auto_asconf_list,
3907 &sctp_auto_asconf_splist);
3908 sp->do_auto_asconf = 1;
3909 } else
3910 sp->do_auto_asconf = 0;
3766 local_bh_enable(); 3911 local_bh_enable();
3767 3912
3768 return 0; 3913 return 0;
@@ -3771,13 +3916,17 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
3771/* Cleanup any SCTP per socket resources. */ 3916/* Cleanup any SCTP per socket resources. */
3772SCTP_STATIC void sctp_destroy_sock(struct sock *sk) 3917SCTP_STATIC void sctp_destroy_sock(struct sock *sk)
3773{ 3918{
3774 struct sctp_endpoint *ep; 3919 struct sctp_sock *sp;
3775 3920
3776 SCTP_DEBUG_PRINTK("sctp_destroy_sock(sk: %p)\n", sk); 3921 SCTP_DEBUG_PRINTK("sctp_destroy_sock(sk: %p)\n", sk);
3777 3922
3778 /* Release our hold on the endpoint. */ 3923 /* Release our hold on the endpoint. */
3779 ep = sctp_sk(sk)->ep; 3924 sp = sctp_sk(sk);
3780 sctp_endpoint_free(ep); 3925 if (sp->do_auto_asconf) {
3926 sp->do_auto_asconf = 0;
3927 list_del(&sp->auto_asconf_list);
3928 }
3929 sctp_endpoint_free(sp->ep);
3781 local_bh_disable(); 3930 local_bh_disable();
3782 percpu_counter_dec(&sctp_sockets_allocated); 3931 percpu_counter_dec(&sctp_sockets_allocated);
3783 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); 3932 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
@@ -5277,6 +5426,28 @@ static int sctp_getsockopt_assoc_number(struct sock *sk, int len,
5277} 5426}
5278 5427
5279/* 5428/*
5429 * 8.1.23 SCTP_AUTO_ASCONF
5430 * See the corresponding setsockopt entry as description
5431 */
5432static int sctp_getsockopt_auto_asconf(struct sock *sk, int len,
5433 char __user *optval, int __user *optlen)
5434{
5435 int val = 0;
5436
5437 if (len < sizeof(int))
5438 return -EINVAL;
5439
5440 len = sizeof(int);
5441 if (sctp_sk(sk)->do_auto_asconf && sctp_is_ep_boundall(sk))
5442 val = 1;
5443 if (put_user(len, optlen))
5444 return -EFAULT;
5445 if (copy_to_user(optval, &val, len))
5446 return -EFAULT;
5447 return 0;
5448}
5449
5450/*
5280 * 8.2.6. Get the Current Identifiers of Associations 5451 * 8.2.6. Get the Current Identifiers of Associations
5281 * (SCTP_GET_ASSOC_ID_LIST) 5452 * (SCTP_GET_ASSOC_ID_LIST)
5282 * 5453 *
@@ -5460,6 +5631,9 @@ SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname,
5460 case SCTP_GET_ASSOC_ID_LIST: 5631 case SCTP_GET_ASSOC_ID_LIST:
5461 retval = sctp_getsockopt_assoc_ids(sk, len, optval, optlen); 5632 retval = sctp_getsockopt_assoc_ids(sk, len, optval, optlen);
5462 break; 5633 break;
5634 case SCTP_AUTO_ASCONF:
5635 retval = sctp_getsockopt_auto_asconf(sk, len, optval, optlen);
5636 break;
5463 default: 5637 default:
5464 retval = -ENOPROTOOPT; 5638 retval = -ENOPROTOOPT;
5465 break; 5639 break;
@@ -6512,6 +6686,7 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
6512 struct sk_buff *skb, *tmp; 6686 struct sk_buff *skb, *tmp;
6513 struct sctp_ulpevent *event; 6687 struct sctp_ulpevent *event;
6514 struct sctp_bind_hashbucket *head; 6688 struct sctp_bind_hashbucket *head;
6689 struct list_head tmplist;
6515 6690
6516 /* Migrate socket buffer sizes and all the socket level options to the 6691 /* Migrate socket buffer sizes and all the socket level options to the
6517 * new socket. 6692 * new socket.
@@ -6519,7 +6694,12 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
6519 newsk->sk_sndbuf = oldsk->sk_sndbuf; 6694 newsk->sk_sndbuf = oldsk->sk_sndbuf;
6520 newsk->sk_rcvbuf = oldsk->sk_rcvbuf; 6695 newsk->sk_rcvbuf = oldsk->sk_rcvbuf;
6521 /* Brute force copy old sctp opt. */ 6696 /* Brute force copy old sctp opt. */
6522 inet_sk_copy_descendant(newsk, oldsk); 6697 if (oldsp->do_auto_asconf) {
6698 memcpy(&tmplist, &newsp->auto_asconf_list, sizeof(tmplist));
6699 inet_sk_copy_descendant(newsk, oldsk);
6700 memcpy(&newsp->auto_asconf_list, &tmplist, sizeof(tmplist));
6701 } else
6702 inet_sk_copy_descendant(newsk, oldsk);
6523 6703
6524 /* Restore the ep value that was overwritten with the above structure 6704 /* Restore the ep value that was overwritten with the above structure
6525 * copy. 6705 * copy.
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
index 50cb57f0919..6b3952961b8 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
@@ -183,6 +183,13 @@ static ctl_table sctp_table[] = {
183 .proc_handler = proc_dointvec, 183 .proc_handler = proc_dointvec,
184 }, 184 },
185 { 185 {
186 .procname = "default_auto_asconf",
187 .data = &sctp_default_auto_asconf,
188 .maxlen = sizeof(int),
189 .mode = 0644,
190 .proc_handler = proc_dointvec,
191 },
192 {
186 .procname = "prsctp_enable", 193 .procname = "prsctp_enable",
187 .data = &sctp_prsctp_enable, 194 .data = &sctp_prsctp_enable,
188 .maxlen = sizeof(int), 195 .maxlen = sizeof(int),
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
index e70e5fc8789..8a84017834c 100644
--- a/net/sctp/ulpevent.c
+++ b/net/sctp/ulpevent.c
@@ -1081,9 +1081,19 @@ void sctp_ulpevent_free(struct sctp_ulpevent *event)
1081} 1081}
1082 1082
1083/* Purge the skb lists holding ulpevents. */ 1083/* Purge the skb lists holding ulpevents. */
1084void sctp_queue_purge_ulpevents(struct sk_buff_head *list) 1084unsigned int sctp_queue_purge_ulpevents(struct sk_buff_head *list)
1085{ 1085{
1086 struct sk_buff *skb; 1086 struct sk_buff *skb;
1087 while ((skb = skb_dequeue(list)) != NULL) 1087 unsigned int data_unread = 0;
1088 sctp_ulpevent_free(sctp_skb2event(skb)); 1088
1089 while ((skb = skb_dequeue(list)) != NULL) {
1090 struct sctp_ulpevent *event = sctp_skb2event(skb);
1091
1092 if (!sctp_ulpevent_is_notification(event))
1093 data_unread += skb->len;
1094
1095 sctp_ulpevent_free(event);
1096 }
1097
1098 return data_unread;
1089} 1099}
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index cd6e4aa19db..727e506cacd 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -626,7 +626,7 @@ rpcauth_refreshcred(struct rpc_task *task)
626 if (err < 0) 626 if (err < 0)
627 goto out; 627 goto out;
628 cred = task->tk_rqstp->rq_cred; 628 cred = task->tk_rqstp->rq_cred;
629 }; 629 }
630 dprintk("RPC: %5u refreshing %s cred %p\n", 630 dprintk("RPC: %5u refreshing %s cred %p\n",
631 task->tk_pid, cred->cr_auth->au_ops->au_name, cred); 631 task->tk_pid, cred->cr_auth->au_ops->au_name, cred);
632 632
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 5daf6cc4fae..364eb45e989 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -1421,18 +1421,16 @@ gss_wrap_req(struct rpc_task *task,
1421 goto out; 1421 goto out;
1422 } 1422 }
1423 switch (gss_cred->gc_service) { 1423 switch (gss_cred->gc_service) {
1424 case RPC_GSS_SVC_NONE: 1424 case RPC_GSS_SVC_NONE:
1425 gss_wrap_req_encode(encode, rqstp, p, obj); 1425 gss_wrap_req_encode(encode, rqstp, p, obj);
1426 status = 0; 1426 status = 0;
1427 break; 1427 break;
1428 case RPC_GSS_SVC_INTEGRITY: 1428 case RPC_GSS_SVC_INTEGRITY:
1429 status = gss_wrap_req_integ(cred, ctx, encode, 1429 status = gss_wrap_req_integ(cred, ctx, encode, rqstp, p, obj);
1430 rqstp, p, obj); 1430 break;
1431 break; 1431 case RPC_GSS_SVC_PRIVACY:
1432 case RPC_GSS_SVC_PRIVACY: 1432 status = gss_wrap_req_priv(cred, ctx, encode, rqstp, p, obj);
1433 status = gss_wrap_req_priv(cred, ctx, encode, 1433 break;
1434 rqstp, p, obj);
1435 break;
1436 } 1434 }
1437out: 1435out:
1438 gss_put_ctx(ctx); 1436 gss_put_ctx(ctx);
@@ -1531,18 +1529,18 @@ gss_unwrap_resp(struct rpc_task *task,
1531 if (ctx->gc_proc != RPC_GSS_PROC_DATA) 1529 if (ctx->gc_proc != RPC_GSS_PROC_DATA)
1532 goto out_decode; 1530 goto out_decode;
1533 switch (gss_cred->gc_service) { 1531 switch (gss_cred->gc_service) {
1534 case RPC_GSS_SVC_NONE: 1532 case RPC_GSS_SVC_NONE:
1535 break; 1533 break;
1536 case RPC_GSS_SVC_INTEGRITY: 1534 case RPC_GSS_SVC_INTEGRITY:
1537 status = gss_unwrap_resp_integ(cred, ctx, rqstp, &p); 1535 status = gss_unwrap_resp_integ(cred, ctx, rqstp, &p);
1538 if (status) 1536 if (status)
1539 goto out; 1537 goto out;
1540 break; 1538 break;
1541 case RPC_GSS_SVC_PRIVACY: 1539 case RPC_GSS_SVC_PRIVACY:
1542 status = gss_unwrap_resp_priv(cred, ctx, rqstp, &p); 1540 status = gss_unwrap_resp_priv(cred, ctx, rqstp, &p);
1543 if (status) 1541 if (status)
1544 goto out; 1542 goto out;
1545 break; 1543 break;
1546 } 1544 }
1547 /* take into account extra slack for integrity and privacy cases: */ 1545 /* take into account extra slack for integrity and privacy cases: */
1548 cred->cr_auth->au_rslack = cred->cr_auth->au_verfsize + (p - savedp) 1546 cred->cr_auth->au_rslack = cred->cr_auth->au_verfsize + (p - savedp)
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index d3fe866f57a..c5347d29cfb 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -97,8 +97,7 @@ static int
97rpc_setup_pipedir(struct rpc_clnt *clnt, char *dir_name) 97rpc_setup_pipedir(struct rpc_clnt *clnt, char *dir_name)
98{ 98{
99 static uint32_t clntid; 99 static uint32_t clntid;
100 struct nameidata nd; 100 struct path path, dir;
101 struct path path;
102 char name[15]; 101 char name[15];
103 struct qstr q = { 102 struct qstr q = {
104 .name = name, 103 .name = name,
@@ -113,7 +112,7 @@ rpc_setup_pipedir(struct rpc_clnt *clnt, char *dir_name)
113 path.mnt = rpc_get_mount(); 112 path.mnt = rpc_get_mount();
114 if (IS_ERR(path.mnt)) 113 if (IS_ERR(path.mnt))
115 return PTR_ERR(path.mnt); 114 return PTR_ERR(path.mnt);
116 error = vfs_path_lookup(path.mnt->mnt_root, path.mnt, dir_name, 0, &nd); 115 error = vfs_path_lookup(path.mnt->mnt_root, path.mnt, dir_name, 0, &dir);
117 if (error) 116 if (error)
118 goto err; 117 goto err;
119 118
@@ -121,7 +120,7 @@ rpc_setup_pipedir(struct rpc_clnt *clnt, char *dir_name)
121 q.len = snprintf(name, sizeof(name), "clnt%x", (unsigned int)clntid++); 120 q.len = snprintf(name, sizeof(name), "clnt%x", (unsigned int)clntid++);
122 name[sizeof(name) - 1] = '\0'; 121 name[sizeof(name) - 1] = '\0';
123 q.hash = full_name_hash(q.name, q.len); 122 q.hash = full_name_hash(q.name, q.len);
124 path.dentry = rpc_create_client_dir(nd.path.dentry, &q, clnt); 123 path.dentry = rpc_create_client_dir(dir.dentry, &q, clnt);
125 if (!IS_ERR(path.dentry)) 124 if (!IS_ERR(path.dentry))
126 break; 125 break;
127 error = PTR_ERR(path.dentry); 126 error = PTR_ERR(path.dentry);
@@ -132,11 +131,11 @@ rpc_setup_pipedir(struct rpc_clnt *clnt, char *dir_name)
132 goto err_path_put; 131 goto err_path_put;
133 } 132 }
134 } 133 }
135 path_put(&nd.path); 134 path_put(&dir);
136 clnt->cl_path = path; 135 clnt->cl_path = path;
137 return 0; 136 return 0;
138err_path_put: 137err_path_put:
139 path_put(&nd.path); 138 path_put(&dir);
140err: 139err:
141 rpc_put_mount(); 140 rpc_put_mount();
142 return error; 141 return error;
@@ -1665,19 +1664,18 @@ rpc_verify_header(struct rpc_task *task)
1665 if (--len < 0) 1664 if (--len < 0)
1666 goto out_overflow; 1665 goto out_overflow;
1667 switch ((n = ntohl(*p++))) { 1666 switch ((n = ntohl(*p++))) {
1668 case RPC_AUTH_ERROR: 1667 case RPC_AUTH_ERROR:
1669 break; 1668 break;
1670 case RPC_MISMATCH: 1669 case RPC_MISMATCH:
1671 dprintk("RPC: %5u %s: RPC call version " 1670 dprintk("RPC: %5u %s: RPC call version mismatch!\n",
1672 "mismatch!\n", 1671 task->tk_pid, __func__);
1673 task->tk_pid, __func__); 1672 error = -EPROTONOSUPPORT;
1674 error = -EPROTONOSUPPORT; 1673 goto out_err;
1675 goto out_err; 1674 default:
1676 default: 1675 dprintk("RPC: %5u %s: RPC call rejected, "
1677 dprintk("RPC: %5u %s: RPC call rejected, " 1676 "unknown error: %x\n",
1678 "unknown error: %x\n", 1677 task->tk_pid, __func__, n);
1679 task->tk_pid, __func__, n); 1678 goto out_eio;
1680 goto out_eio;
1681 } 1679 }
1682 if (--len < 0) 1680 if (--len < 0)
1683 goto out_overflow; 1681 goto out_overflow;
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 72bc5368396..b181e344132 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -456,13 +456,13 @@ rpc_get_inode(struct super_block *sb, umode_t mode)
456 inode->i_ino = get_next_ino(); 456 inode->i_ino = get_next_ino();
457 inode->i_mode = mode; 457 inode->i_mode = mode;
458 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; 458 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
459 switch(mode & S_IFMT) { 459 switch (mode & S_IFMT) {
460 case S_IFDIR: 460 case S_IFDIR:
461 inode->i_fop = &simple_dir_operations; 461 inode->i_fop = &simple_dir_operations;
462 inode->i_op = &simple_dir_inode_operations; 462 inode->i_op = &simple_dir_inode_operations;
463 inc_nlink(inode); 463 inc_nlink(inode);
464 default: 464 default:
465 break; 465 break;
466 } 466 }
467 return inode; 467 return inode;
468} 468}
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index c3c232a88d9..a385430c722 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -42,6 +42,7 @@
42#include <linux/sunrpc/svc_xprt.h> 42#include <linux/sunrpc/svc_xprt.h>
43#include <linux/sunrpc/debug.h> 43#include <linux/sunrpc/debug.h>
44#include <linux/sunrpc/rpc_rdma.h> 44#include <linux/sunrpc/rpc_rdma.h>
45#include <linux/interrupt.h>
45#include <linux/sched.h> 46#include <linux/sched.h>
46#include <linux/slab.h> 47#include <linux/slab.h>
47#include <linux/spinlock.h> 48#include <linux/spinlock.h>
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index 80f8da344df..28236bab57f 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -47,6 +47,7 @@
47 * o buffer memory 47 * o buffer memory
48 */ 48 */
49 49
50#include <linux/interrupt.h>
50#include <linux/pci.h> /* for Tavor hack below */ 51#include <linux/pci.h> /* for Tavor hack below */
51#include <linux/slab.h> 52#include <linux/slab.h>
52 53
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index fa68d1e9ff4..759b318b5ff 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -552,12 +552,16 @@ static int tipc_bcbearer_send(struct sk_buff *buf,
552 if (likely(!msg_non_seq(buf_msg(buf)))) { 552 if (likely(!msg_non_seq(buf_msg(buf)))) {
553 struct tipc_msg *msg; 553 struct tipc_msg *msg;
554 554
555 assert(tipc_bcast_nmap.count != 0);
556 bcbuf_set_acks(buf, tipc_bcast_nmap.count); 555 bcbuf_set_acks(buf, tipc_bcast_nmap.count);
557 msg = buf_msg(buf); 556 msg = buf_msg(buf);
558 msg_set_non_seq(msg, 1); 557 msg_set_non_seq(msg, 1);
559 msg_set_mc_netid(msg, tipc_net_id); 558 msg_set_mc_netid(msg, tipc_net_id);
560 bcl->stats.sent_info++; 559 bcl->stats.sent_info++;
560
561 if (WARN_ON(!tipc_bcast_nmap.count)) {
562 dump_stack();
563 return 0;
564 }
561 } 565 }
562 566
563 /* Send buffer over bearers until all targets reached */ 567 /* Send buffer over bearers until all targets reached */
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 85209eadfae..85eba9c08ee 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -402,7 +402,6 @@ void tipc_bearer_lock_push(struct tipc_bearer *b_ptr)
402void tipc_continue(struct tipc_bearer *b_ptr) 402void tipc_continue(struct tipc_bearer *b_ptr)
403{ 403{
404 spin_lock_bh(&b_ptr->lock); 404 spin_lock_bh(&b_ptr->lock);
405 b_ptr->continue_count++;
406 if (!list_empty(&b_ptr->cong_links)) 405 if (!list_empty(&b_ptr->cong_links))
407 tipc_k_signal((Handler)tipc_bearer_lock_push, (unsigned long)b_ptr); 406 tipc_k_signal((Handler)tipc_bearer_lock_push, (unsigned long)b_ptr);
408 b_ptr->blocked = 0; 407 b_ptr->blocked = 0;
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
index 31d6172b20f..5ad70eff1eb 100644
--- a/net/tipc/bearer.h
+++ b/net/tipc/bearer.h
@@ -107,7 +107,6 @@ struct media {
107 * @link_req: ptr to (optional) structure making periodic link setup requests 107 * @link_req: ptr to (optional) structure making periodic link setup requests
108 * @links: list of non-congested links associated with bearer 108 * @links: list of non-congested links associated with bearer
109 * @cong_links: list of congested links associated with bearer 109 * @cong_links: list of congested links associated with bearer
110 * @continue_count: # of times bearer has resumed after congestion or blocking
111 * @active: non-zero if bearer structure is represents a bearer 110 * @active: non-zero if bearer structure is represents a bearer
112 * @net_plane: network plane ('A' through 'H') currently associated with bearer 111 * @net_plane: network plane ('A' through 'H') currently associated with bearer
113 * @nodes: indicates which nodes in cluster can be reached through bearer 112 * @nodes: indicates which nodes in cluster can be reached through bearer
@@ -129,7 +128,6 @@ struct tipc_bearer {
129 struct link_req *link_req; 128 struct link_req *link_req;
130 struct list_head links; 129 struct list_head links;
131 struct list_head cong_links; 130 struct list_head cong_links;
132 u32 continue_count;
133 int active; 131 int active;
134 char net_plane; 132 char net_plane;
135 struct tipc_node_map nodes; 133 struct tipc_node_map nodes;
diff --git a/net/tipc/core.h b/net/tipc/core.h
index 436dda1159d..d234a98a460 100644
--- a/net/tipc/core.h
+++ b/net/tipc/core.h
@@ -62,12 +62,6 @@ struct tipc_msg; /* msg.h */
62struct print_buf; /* log.h */ 62struct print_buf; /* log.h */
63 63
64/* 64/*
65 * TIPC sanity test macros
66 */
67
68#define assert(i) BUG_ON(!(i))
69
70/*
71 * TIPC system monitoring code 65 * TIPC system monitoring code
72 */ 66 */
73 67
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 5ed4b4f7452..f89570c54f5 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -1572,7 +1572,7 @@ static struct sk_buff *link_insert_deferred_queue(struct link *l_ptr,
1572static int link_recv_buf_validate(struct sk_buff *buf) 1572static int link_recv_buf_validate(struct sk_buff *buf)
1573{ 1573{
1574 static u32 min_data_hdr_size[8] = { 1574 static u32 min_data_hdr_size[8] = {
1575 SHORT_H_SIZE, MCAST_H_SIZE, LONG_H_SIZE, DIR_MSG_H_SIZE, 1575 SHORT_H_SIZE, MCAST_H_SIZE, NAMED_H_SIZE, BASIC_H_SIZE,
1576 MAX_H_SIZE, MAX_H_SIZE, MAX_H_SIZE, MAX_H_SIZE 1576 MAX_H_SIZE, MAX_H_SIZE, MAX_H_SIZE, MAX_H_SIZE
1577 }; 1577 };
1578 1578
@@ -2553,7 +2553,7 @@ int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb,
2553 u32 msg_sz = msg_size(imsg); 2553 u32 msg_sz = msg_size(imsg);
2554 u32 fragm_sz = msg_data_sz(fragm); 2554 u32 fragm_sz = msg_data_sz(fragm);
2555 u32 exp_fragm_cnt = msg_sz/fragm_sz + !!(msg_sz % fragm_sz); 2555 u32 exp_fragm_cnt = msg_sz/fragm_sz + !!(msg_sz % fragm_sz);
2556 u32 max = TIPC_MAX_USER_MSG_SIZE + LONG_H_SIZE; 2556 u32 max = TIPC_MAX_USER_MSG_SIZE + NAMED_H_SIZE;
2557 if (msg_type(imsg) == TIPC_MCAST_MSG) 2557 if (msg_type(imsg) == TIPC_MCAST_MSG)
2558 max = TIPC_MAX_USER_MSG_SIZE + MCAST_H_SIZE; 2558 max = TIPC_MAX_USER_MSG_SIZE + MCAST_H_SIZE;
2559 if (msg_size(imsg) > max) { 2559 if (msg_size(imsg) > max) {
@@ -2882,7 +2882,7 @@ static int tipc_link_stats(const char *name, char *buf, const u32 buf_size)
2882 profile_total = 1; 2882 profile_total = 1;
2883 tipc_printf(&pb, " TX profile sample:%u packets average:%u octets\n" 2883 tipc_printf(&pb, " TX profile sample:%u packets average:%u octets\n"
2884 " 0-64:%u%% -256:%u%% -1024:%u%% -4096:%u%% " 2884 " 0-64:%u%% -256:%u%% -1024:%u%% -4096:%u%% "
2885 "-16354:%u%% -32768:%u%% -66000:%u%%\n", 2885 "-16384:%u%% -32768:%u%% -66000:%u%%\n",
2886 l_ptr->stats.msg_length_counts, 2886 l_ptr->stats.msg_length_counts,
2887 l_ptr->stats.msg_lengths_total / profile_total, 2887 l_ptr->stats.msg_lengths_total / profile_total,
2888 percent(l_ptr->stats.msg_length_profile[0], profile_total), 2888 percent(l_ptr->stats.msg_length_profile[0], profile_total),
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index 03e57bf92c7..83d50967910 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -61,10 +61,8 @@ void tipc_msg_init(struct tipc_msg *m, u32 user, u32 type,
61 msg_set_size(m, hsize); 61 msg_set_size(m, hsize);
62 msg_set_prevnode(m, tipc_own_addr); 62 msg_set_prevnode(m, tipc_own_addr);
63 msg_set_type(m, type); 63 msg_set_type(m, type);
64 if (!msg_short(m)) { 64 msg_set_orignode(m, tipc_own_addr);
65 msg_set_orignode(m, tipc_own_addr); 65 msg_set_destnode(m, destnode);
66 msg_set_destnode(m, destnode);
67 }
68} 66}
69 67
70/** 68/**
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index 8452454731f..d93178f2e85 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -68,10 +68,10 @@
68 * Message header sizes 68 * Message header sizes
69 */ 69 */
70 70
71#define SHORT_H_SIZE 24 /* Connected, in-cluster messages */ 71#define SHORT_H_SIZE 24 /* In-cluster basic payload message */
72#define DIR_MSG_H_SIZE 32 /* Directly addressed messages */ 72#define BASIC_H_SIZE 32 /* Basic payload message */
73#define LONG_H_SIZE 40 /* Named messages */ 73#define NAMED_H_SIZE 40 /* Named payload message */
74#define MCAST_H_SIZE 44 /* Multicast messages */ 74#define MCAST_H_SIZE 44 /* Multicast payload message */
75#define INT_H_SIZE 40 /* Internal messages */ 75#define INT_H_SIZE 40 /* Internal messages */
76#define MIN_H_SIZE 24 /* Smallest legal TIPC header size */ 76#define MIN_H_SIZE 24 /* Smallest legal TIPC header size */
77#define MAX_H_SIZE 60 /* Largest possible TIPC header size */ 77#define MAX_H_SIZE 60 /* Largest possible TIPC header size */
@@ -311,26 +311,6 @@ static inline void msg_set_seqno(struct tipc_msg *m, u32 n)
311} 311}
312 312
313/* 313/*
314 * TIPC may utilize the "link ack #" and "link seq #" fields of a short
315 * message header to hold the destination node for the message, since the
316 * normal "dest node" field isn't present. This cache is only referenced
317 * when required, so populating the cache of a longer message header is
318 * harmless (as long as the header has the two link sequence fields present).
319 *
320 * Note: Host byte order is OK here, since the info never goes off-card.
321 */
322
323static inline u32 msg_destnode_cache(struct tipc_msg *m)
324{
325 return m->hdr[2];
326}
327
328static inline void msg_set_destnode_cache(struct tipc_msg *m, u32 dnode)
329{
330 m->hdr[2] = dnode;
331}
332
333/*
334 * Words 3-10 314 * Words 3-10
335 */ 315 */
336 316
@@ -377,7 +357,7 @@ static inline void msg_set_mc_netid(struct tipc_msg *m, u32 p)
377 357
378static inline int msg_short(struct tipc_msg *m) 358static inline int msg_short(struct tipc_msg *m)
379{ 359{
380 return msg_hdr_sz(m) == 24; 360 return msg_hdr_sz(m) == SHORT_H_SIZE;
381} 361}
382 362
383static inline u32 msg_orignode(struct tipc_msg *m) 363static inline u32 msg_orignode(struct tipc_msg *m)
@@ -635,7 +615,7 @@ static inline u32 msg_link_selector(struct tipc_msg *m)
635 615
636static inline void msg_set_link_selector(struct tipc_msg *m, u32 n) 616static inline void msg_set_link_selector(struct tipc_msg *m, u32 n)
637{ 617{
638 msg_set_bits(m, 4, 0, 1, (n & 1)); 618 msg_set_bits(m, 4, 0, 1, n);
639} 619}
640 620
641/* 621/*
@@ -659,7 +639,7 @@ static inline u32 msg_probe(struct tipc_msg *m)
659 639
660static inline void msg_set_probe(struct tipc_msg *m, u32 val) 640static inline void msg_set_probe(struct tipc_msg *m, u32 val)
661{ 641{
662 msg_set_bits(m, 5, 0, 1, (val & 1)); 642 msg_set_bits(m, 5, 0, 1, val);
663} 643}
664 644
665static inline char msg_net_plane(struct tipc_msg *m) 645static inline char msg_net_plane(struct tipc_msg *m)
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index 80025a1b3bf..cd356e50433 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -94,13 +94,13 @@ static void publ_to_item(struct distr_item *i, struct publication *p)
94 94
95static struct sk_buff *named_prepare_buf(u32 type, u32 size, u32 dest) 95static struct sk_buff *named_prepare_buf(u32 type, u32 size, u32 dest)
96{ 96{
97 struct sk_buff *buf = tipc_buf_acquire(LONG_H_SIZE + size); 97 struct sk_buff *buf = tipc_buf_acquire(INT_H_SIZE + size);
98 struct tipc_msg *msg; 98 struct tipc_msg *msg;
99 99
100 if (buf != NULL) { 100 if (buf != NULL) {
101 msg = buf_msg(buf); 101 msg = buf_msg(buf);
102 tipc_msg_init(msg, NAME_DISTRIBUTOR, type, LONG_H_SIZE, dest); 102 tipc_msg_init(msg, NAME_DISTRIBUTOR, type, INT_H_SIZE, dest);
103 msg_set_size(msg, LONG_H_SIZE + size); 103 msg_set_size(msg, INT_H_SIZE + size);
104 } 104 }
105 return buf; 105 return buf;
106} 106}
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index 205ed4a4e18..46e6b6c2ecc 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -2,7 +2,7 @@
2 * net/tipc/name_table.c: TIPC name table code 2 * net/tipc/name_table.c: TIPC name table code
3 * 3 *
4 * Copyright (c) 2000-2006, Ericsson AB 4 * Copyright (c) 2000-2006, Ericsson AB
5 * Copyright (c) 2004-2008, Wind River Systems 5 * Copyright (c) 2004-2008, 2010-2011, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
@@ -44,9 +44,7 @@
44static int tipc_nametbl_size = 1024; /* must be a power of 2 */ 44static int tipc_nametbl_size = 1024; /* must be a power of 2 */
45 45
46/** 46/**
47 * struct sub_seq - container for all published instances of a name sequence 47 * struct name_info - name sequence publication info
48 * @lower: name sequence lower bound
49 * @upper: name sequence upper bound
50 * @node_list: circular list of publications made by own node 48 * @node_list: circular list of publications made by own node
51 * @cluster_list: circular list of publications made by own cluster 49 * @cluster_list: circular list of publications made by own cluster
52 * @zone_list: circular list of publications made by own zone 50 * @zone_list: circular list of publications made by own zone
@@ -59,18 +57,29 @@ static int tipc_nametbl_size = 1024; /* must be a power of 2 */
59 * (The cluster and node lists may be empty.) 57 * (The cluster and node lists may be empty.)
60 */ 58 */
61 59
62struct sub_seq { 60struct name_info {
63 u32 lower; 61 struct list_head node_list;
64 u32 upper; 62 struct list_head cluster_list;
65 struct publication *node_list; 63 struct list_head zone_list;
66 struct publication *cluster_list;
67 struct publication *zone_list;
68 u32 node_list_size; 64 u32 node_list_size;
69 u32 cluster_list_size; 65 u32 cluster_list_size;
70 u32 zone_list_size; 66 u32 zone_list_size;
71}; 67};
72 68
73/** 69/**
70 * struct sub_seq - container for all published instances of a name sequence
71 * @lower: name sequence lower bound
72 * @upper: name sequence upper bound
73 * @info: pointer to name sequence publication info
74 */
75
76struct sub_seq {
77 u32 lower;
78 u32 upper;
79 struct name_info *info;
80};
81
82/**
74 * struct name_seq - container for all published instances of a name type 83 * struct name_seq - container for all published instances of a name type
75 * @type: 32 bit 'type' value for name sequence 84 * @type: 32 bit 'type' value for name sequence
76 * @sseq: pointer to dynamically-sized array of sub-sequences of this 'type'; 85 * @sseq: pointer to dynamically-sized array of sub-sequences of this 'type';
@@ -246,6 +255,7 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
246 struct subscription *st; 255 struct subscription *st;
247 struct publication *publ; 256 struct publication *publ;
248 struct sub_seq *sseq; 257 struct sub_seq *sseq;
258 struct name_info *info;
249 int created_subseq = 0; 259 int created_subseq = 0;
250 260
251 sseq = nameseq_find_subseq(nseq, lower); 261 sseq = nameseq_find_subseq(nseq, lower);
@@ -258,6 +268,8 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
258 type, lower, upper); 268 type, lower, upper);
259 return NULL; 269 return NULL;
260 } 270 }
271
272 info = sseq->info;
261 } else { 273 } else {
262 u32 inspos; 274 u32 inspos;
263 struct sub_seq *freesseq; 275 struct sub_seq *freesseq;
@@ -292,6 +304,17 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
292 nseq->alloc *= 2; 304 nseq->alloc *= 2;
293 } 305 }
294 306
307 info = kzalloc(sizeof(*info), GFP_ATOMIC);
308 if (!info) {
309 warn("Cannot publish {%u,%u,%u}, no memory\n",
310 type, lower, upper);
311 return NULL;
312 }
313
314 INIT_LIST_HEAD(&info->node_list);
315 INIT_LIST_HEAD(&info->cluster_list);
316 INIT_LIST_HEAD(&info->zone_list);
317
295 /* Insert new sub-sequence */ 318 /* Insert new sub-sequence */
296 319
297 sseq = &nseq->sseqs[inspos]; 320 sseq = &nseq->sseqs[inspos];
@@ -301,6 +324,7 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
301 nseq->first_free++; 324 nseq->first_free++;
302 sseq->lower = lower; 325 sseq->lower = lower;
303 sseq->upper = upper; 326 sseq->upper = upper;
327 sseq->info = info;
304 created_subseq = 1; 328 created_subseq = 1;
305 } 329 }
306 330
@@ -310,33 +334,17 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
310 if (!publ) 334 if (!publ)
311 return NULL; 335 return NULL;
312 336
313 sseq->zone_list_size++; 337 list_add(&publ->zone_list, &info->zone_list);
314 if (!sseq->zone_list) 338 info->zone_list_size++;
315 sseq->zone_list = publ->zone_list_next = publ;
316 else {
317 publ->zone_list_next = sseq->zone_list->zone_list_next;
318 sseq->zone_list->zone_list_next = publ;
319 }
320 339
321 if (in_own_cluster(node)) { 340 if (in_own_cluster(node)) {
322 sseq->cluster_list_size++; 341 list_add(&publ->cluster_list, &info->cluster_list);
323 if (!sseq->cluster_list) 342 info->cluster_list_size++;
324 sseq->cluster_list = publ->cluster_list_next = publ;
325 else {
326 publ->cluster_list_next =
327 sseq->cluster_list->cluster_list_next;
328 sseq->cluster_list->cluster_list_next = publ;
329 }
330 } 343 }
331 344
332 if (node == tipc_own_addr) { 345 if (node == tipc_own_addr) {
333 sseq->node_list_size++; 346 list_add(&publ->node_list, &info->node_list);
334 if (!sseq->node_list) 347 info->node_list_size++;
335 sseq->node_list = publ->node_list_next = publ;
336 else {
337 publ->node_list_next = sseq->node_list->node_list_next;
338 sseq->node_list->node_list_next = publ;
339 }
340 } 348 }
341 349
342 /* 350 /*
@@ -370,9 +378,8 @@ static struct publication *tipc_nameseq_remove_publ(struct name_seq *nseq, u32 i
370 u32 node, u32 ref, u32 key) 378 u32 node, u32 ref, u32 key)
371{ 379{
372 struct publication *publ; 380 struct publication *publ;
373 struct publication *curr;
374 struct publication *prev;
375 struct sub_seq *sseq = nameseq_find_subseq(nseq, inst); 381 struct sub_seq *sseq = nameseq_find_subseq(nseq, inst);
382 struct name_info *info;
376 struct sub_seq *free; 383 struct sub_seq *free;
377 struct subscription *s, *st; 384 struct subscription *s, *st;
378 int removed_subseq = 0; 385 int removed_subseq = 0;
@@ -380,96 +387,41 @@ static struct publication *tipc_nameseq_remove_publ(struct name_seq *nseq, u32 i
380 if (!sseq) 387 if (!sseq)
381 return NULL; 388 return NULL;
382 389
383 /* Remove publication from zone scope list */ 390 info = sseq->info;
384 391
385 prev = sseq->zone_list; 392 /* Locate publication, if it exists */
386 publ = sseq->zone_list->zone_list_next;
387 while ((publ->key != key) || (publ->ref != ref) ||
388 (publ->node && (publ->node != node))) {
389 prev = publ;
390 publ = publ->zone_list_next;
391 if (prev == sseq->zone_list) {
392 393
393 /* Prevent endless loop if publication not found */ 394 list_for_each_entry(publ, &info->zone_list, zone_list) {
394 395 if ((publ->key == key) && (publ->ref == ref) &&
395 return NULL; 396 (!publ->node || (publ->node == node)))
396 } 397 goto found;
397 }
398 if (publ != sseq->zone_list)
399 prev->zone_list_next = publ->zone_list_next;
400 else if (publ->zone_list_next != publ) {
401 prev->zone_list_next = publ->zone_list_next;
402 sseq->zone_list = publ->zone_list_next;
403 } else {
404 sseq->zone_list = NULL;
405 } 398 }
406 sseq->zone_list_size--; 399 return NULL;
400
401found:
402 /* Remove publication from zone scope list */
403
404 list_del(&publ->zone_list);
405 info->zone_list_size--;
407 406
408 /* Remove publication from cluster scope list, if present */ 407 /* Remove publication from cluster scope list, if present */
409 408
410 if (in_own_cluster(node)) { 409 if (in_own_cluster(node)) {
411 prev = sseq->cluster_list; 410 list_del(&publ->cluster_list);
412 curr = sseq->cluster_list->cluster_list_next; 411 info->cluster_list_size--;
413 while (curr != publ) {
414 prev = curr;
415 curr = curr->cluster_list_next;
416 if (prev == sseq->cluster_list) {
417
418 /* Prevent endless loop for malformed list */
419
420 err("Unable to de-list cluster publication\n"
421 "{%u%u}, node=0x%x, ref=%u, key=%u)\n",
422 publ->type, publ->lower, publ->node,
423 publ->ref, publ->key);
424 goto end_cluster;
425 }
426 }
427 if (publ != sseq->cluster_list)
428 prev->cluster_list_next = publ->cluster_list_next;
429 else if (publ->cluster_list_next != publ) {
430 prev->cluster_list_next = publ->cluster_list_next;
431 sseq->cluster_list = publ->cluster_list_next;
432 } else {
433 sseq->cluster_list = NULL;
434 }
435 sseq->cluster_list_size--;
436 } 412 }
437end_cluster:
438 413
439 /* Remove publication from node scope list, if present */ 414 /* Remove publication from node scope list, if present */
440 415
441 if (node == tipc_own_addr) { 416 if (node == tipc_own_addr) {
442 prev = sseq->node_list; 417 list_del(&publ->node_list);
443 curr = sseq->node_list->node_list_next; 418 info->node_list_size--;
444 while (curr != publ) {
445 prev = curr;
446 curr = curr->node_list_next;
447 if (prev == sseq->node_list) {
448
449 /* Prevent endless loop for malformed list */
450
451 err("Unable to de-list node publication\n"
452 "{%u%u}, node=0x%x, ref=%u, key=%u)\n",
453 publ->type, publ->lower, publ->node,
454 publ->ref, publ->key);
455 goto end_node;
456 }
457 }
458 if (publ != sseq->node_list)
459 prev->node_list_next = publ->node_list_next;
460 else if (publ->node_list_next != publ) {
461 prev->node_list_next = publ->node_list_next;
462 sseq->node_list = publ->node_list_next;
463 } else {
464 sseq->node_list = NULL;
465 }
466 sseq->node_list_size--;
467 } 419 }
468end_node:
469 420
470 /* Contract subseq list if no more publications for that subseq */ 421 /* Contract subseq list if no more publications for that subseq */
471 422
472 if (!sseq->zone_list) { 423 if (list_empty(&info->zone_list)) {
424 kfree(info);
473 free = &nseq->sseqs[nseq->first_free--]; 425 free = &nseq->sseqs[nseq->first_free--];
474 memmove(sseq, sseq + 1, (free - (sseq + 1)) * sizeof(*sseq)); 426 memmove(sseq, sseq + 1, (free - (sseq + 1)) * sizeof(*sseq));
475 removed_subseq = 1; 427 removed_subseq = 1;
@@ -506,12 +458,12 @@ static void tipc_nameseq_subscribe(struct name_seq *nseq, struct subscription *s
506 return; 458 return;
507 459
508 while (sseq != &nseq->sseqs[nseq->first_free]) { 460 while (sseq != &nseq->sseqs[nseq->first_free]) {
509 struct publication *zl = sseq->zone_list; 461 if (tipc_subscr_overlap(s, sseq->lower, sseq->upper)) {
510 if (zl && tipc_subscr_overlap(s, sseq->lower, sseq->upper)) { 462 struct publication *crs;
511 struct publication *crs = zl; 463 struct name_info *info = sseq->info;
512 int must_report = 1; 464 int must_report = 1;
513 465
514 do { 466 list_for_each_entry(crs, &info->zone_list, zone_list) {
515 tipc_subscr_report_overlap(s, 467 tipc_subscr_report_overlap(s,
516 sseq->lower, 468 sseq->lower,
517 sseq->upper, 469 sseq->upper,
@@ -520,8 +472,7 @@ static void tipc_nameseq_subscribe(struct name_seq *nseq, struct subscription *s
520 crs->node, 472 crs->node,
521 must_report); 473 must_report);
522 must_report = 0; 474 must_report = 0;
523 crs = crs->zone_list_next; 475 }
524 } while (crs != zl);
525 } 476 }
526 sseq++; 477 sseq++;
527 } 478 }
@@ -591,9 +542,10 @@ struct publication *tipc_nametbl_remove_publ(u32 type, u32 lower,
591u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *destnode) 542u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *destnode)
592{ 543{
593 struct sub_seq *sseq; 544 struct sub_seq *sseq;
594 struct publication *publ = NULL; 545 struct name_info *info;
546 struct publication *publ;
595 struct name_seq *seq; 547 struct name_seq *seq;
596 u32 ref; 548 u32 ref = 0;
597 549
598 if (!tipc_in_scope(*destnode, tipc_own_addr)) 550 if (!tipc_in_scope(*destnode, tipc_own_addr))
599 return 0; 551 return 0;
@@ -606,55 +558,57 @@ u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *destnode)
606 if (unlikely(!sseq)) 558 if (unlikely(!sseq))
607 goto not_found; 559 goto not_found;
608 spin_lock_bh(&seq->lock); 560 spin_lock_bh(&seq->lock);
561 info = sseq->info;
609 562
610 /* Closest-First Algorithm: */ 563 /* Closest-First Algorithm: */
611 if (likely(!*destnode)) { 564 if (likely(!*destnode)) {
612 publ = sseq->node_list; 565 if (!list_empty(&info->node_list)) {
613 if (publ) { 566 publ = list_first_entry(&info->node_list,
614 sseq->node_list = publ->node_list_next; 567 struct publication,
615found: 568 node_list);
616 ref = publ->ref; 569 list_move_tail(&publ->node_list,
617 *destnode = publ->node; 570 &info->node_list);
618 spin_unlock_bh(&seq->lock); 571 } else if (!list_empty(&info->cluster_list)) {
619 read_unlock_bh(&tipc_nametbl_lock); 572 publ = list_first_entry(&info->cluster_list,
620 return ref; 573 struct publication,
621 } 574 cluster_list);
622 publ = sseq->cluster_list; 575 list_move_tail(&publ->cluster_list,
623 if (publ) { 576 &info->cluster_list);
624 sseq->cluster_list = publ->cluster_list_next; 577 } else {
625 goto found; 578 publ = list_first_entry(&info->zone_list,
626 } 579 struct publication,
627 publ = sseq->zone_list; 580 zone_list);
628 if (publ) { 581 list_move_tail(&publ->zone_list,
629 sseq->zone_list = publ->zone_list_next; 582 &info->zone_list);
630 goto found;
631 } 583 }
632 } 584 }
633 585
634 /* Round-Robin Algorithm: */ 586 /* Round-Robin Algorithm: */
635 else if (*destnode == tipc_own_addr) { 587 else if (*destnode == tipc_own_addr) {
636 publ = sseq->node_list; 588 if (list_empty(&info->node_list))
637 if (publ) { 589 goto no_match;
638 sseq->node_list = publ->node_list_next; 590 publ = list_first_entry(&info->node_list, struct publication,
639 goto found; 591 node_list);
640 } 592 list_move_tail(&publ->node_list, &info->node_list);
641 } else if (in_own_cluster(*destnode)) { 593 } else if (in_own_cluster(*destnode)) {
642 publ = sseq->cluster_list; 594 if (list_empty(&info->cluster_list))
643 if (publ) { 595 goto no_match;
644 sseq->cluster_list = publ->cluster_list_next; 596 publ = list_first_entry(&info->cluster_list, struct publication,
645 goto found; 597 cluster_list);
646 } 598 list_move_tail(&publ->cluster_list, &info->cluster_list);
647 } else { 599 } else {
648 publ = sseq->zone_list; 600 publ = list_first_entry(&info->zone_list, struct publication,
649 if (publ) { 601 zone_list);
650 sseq->zone_list = publ->zone_list_next; 602 list_move_tail(&publ->zone_list, &info->zone_list);
651 goto found;
652 }
653 } 603 }
604
605 ref = publ->ref;
606 *destnode = publ->node;
607no_match:
654 spin_unlock_bh(&seq->lock); 608 spin_unlock_bh(&seq->lock);
655not_found: 609not_found:
656 read_unlock_bh(&tipc_nametbl_lock); 610 read_unlock_bh(&tipc_nametbl_lock);
657 return 0; 611 return ref;
658} 612}
659 613
660/** 614/**
@@ -676,6 +630,7 @@ int tipc_nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit,
676 struct name_seq *seq; 630 struct name_seq *seq;
677 struct sub_seq *sseq; 631 struct sub_seq *sseq;
678 struct sub_seq *sseq_stop; 632 struct sub_seq *sseq_stop;
633 struct name_info *info;
679 int res = 0; 634 int res = 0;
680 635
681 read_lock_bh(&tipc_nametbl_lock); 636 read_lock_bh(&tipc_nametbl_lock);
@@ -693,16 +648,13 @@ int tipc_nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit,
693 if (sseq->lower > upper) 648 if (sseq->lower > upper)
694 break; 649 break;
695 650
696 publ = sseq->node_list; 651 info = sseq->info;
697 if (publ) { 652 list_for_each_entry(publ, &info->node_list, node_list) {
698 do { 653 if (publ->scope <= limit)
699 if (publ->scope <= limit) 654 tipc_port_list_add(dports, publ->ref);
700 tipc_port_list_add(dports, publ->ref);
701 publ = publ->node_list_next;
702 } while (publ != sseq->node_list);
703 } 655 }
704 656
705 if (sseq->cluster_list_size != sseq->node_list_size) 657 if (info->cluster_list_size != info->node_list_size)
706 res = 1; 658 res = 1;
707 } 659 }
708 660
@@ -840,16 +792,19 @@ static void subseq_list(struct sub_seq *sseq, struct print_buf *buf, u32 depth,
840{ 792{
841 char portIdStr[27]; 793 char portIdStr[27];
842 const char *scope_str[] = {"", " zone", " cluster", " node"}; 794 const char *scope_str[] = {"", " zone", " cluster", " node"};
843 struct publication *publ = sseq->zone_list; 795 struct publication *publ;
796 struct name_info *info;
844 797
845 tipc_printf(buf, "%-10u %-10u ", sseq->lower, sseq->upper); 798 tipc_printf(buf, "%-10u %-10u ", sseq->lower, sseq->upper);
846 799
847 if (depth == 2 || !publ) { 800 if (depth == 2) {
848 tipc_printf(buf, "\n"); 801 tipc_printf(buf, "\n");
849 return; 802 return;
850 } 803 }
851 804
852 do { 805 info = sseq->info;
806
807 list_for_each_entry(publ, &info->zone_list, zone_list) {
853 sprintf(portIdStr, "<%u.%u.%u:%u>", 808 sprintf(portIdStr, "<%u.%u.%u:%u>",
854 tipc_zone(publ->node), tipc_cluster(publ->node), 809 tipc_zone(publ->node), tipc_cluster(publ->node),
855 tipc_node(publ->node), publ->ref); 810 tipc_node(publ->node), publ->ref);
@@ -858,13 +813,9 @@ static void subseq_list(struct sub_seq *sseq, struct print_buf *buf, u32 depth,
858 tipc_printf(buf, "%-10u %s", publ->key, 813 tipc_printf(buf, "%-10u %s", publ->key,
859 scope_str[publ->scope]); 814 scope_str[publ->scope]);
860 } 815 }
861 816 if (!list_is_last(&publ->zone_list, &info->zone_list))
862 publ = publ->zone_list_next; 817 tipc_printf(buf, "\n%33s", " ");
863 if (publ == sseq->zone_list) 818 };
864 break;
865
866 tipc_printf(buf, "\n%33s", " ");
867 } while (1);
868 819
869 tipc_printf(buf, "\n"); 820 tipc_printf(buf, "\n");
870} 821}
diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h
index d228bd68265..62d77e5e902 100644
--- a/net/tipc/name_table.h
+++ b/net/tipc/name_table.h
@@ -2,7 +2,7 @@
2 * net/tipc/name_table.h: Include file for TIPC name table code 2 * net/tipc/name_table.h: Include file for TIPC name table code
3 * 3 *
4 * Copyright (c) 2000-2006, Ericsson AB 4 * Copyright (c) 2000-2006, Ericsson AB
5 * Copyright (c) 2004-2005, Wind River Systems 5 * Copyright (c) 2004-2005, 2010-2011, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
@@ -61,9 +61,9 @@ struct port_list;
61 * @subscr: subscription to "node down" event (for off-node publications only) 61 * @subscr: subscription to "node down" event (for off-node publications only)
62 * @local_list: adjacent entries in list of publications made by this node 62 * @local_list: adjacent entries in list of publications made by this node
63 * @pport_list: adjacent entries in list of publications made by this port 63 * @pport_list: adjacent entries in list of publications made by this port
64 * @node_list: next matching name seq publication with >= node scope 64 * @node_list: adjacent matching name seq publications with >= node scope
65 * @cluster_list: next matching name seq publication with >= cluster scope 65 * @cluster_list: adjacent matching name seq publications with >= cluster scope
66 * @zone_list: next matching name seq publication with >= zone scope 66 * @zone_list: adjacent matching name seq publications with >= zone scope
67 * 67 *
68 * Note that the node list, cluster list, and zone list are circular lists. 68 * Note that the node list, cluster list, and zone list are circular lists.
69 */ 69 */
@@ -79,9 +79,9 @@ struct publication {
79 struct tipc_node_subscr subscr; 79 struct tipc_node_subscr subscr;
80 struct list_head local_list; 80 struct list_head local_list;
81 struct list_head pport_list; 81 struct list_head pport_list;
82 struct publication *node_list_next; 82 struct list_head node_list;
83 struct publication *cluster_list_next; 83 struct list_head cluster_list;
84 struct publication *zone_list_next; 84 struct list_head zone_list;
85}; 85};
86 86
87 87
diff --git a/net/tipc/port.c b/net/tipc/port.c
index c68dc956a42..54d812a5a4d 100644
--- a/net/tipc/port.c
+++ b/net/tipc/port.c
@@ -222,7 +222,7 @@ struct tipc_port *tipc_createport_raw(void *usr_handle,
222 p_ptr->max_pkt = MAX_PKT_DEFAULT; 222 p_ptr->max_pkt = MAX_PKT_DEFAULT;
223 p_ptr->ref = ref; 223 p_ptr->ref = ref;
224 msg = &p_ptr->phdr; 224 msg = &p_ptr->phdr;
225 tipc_msg_init(msg, importance, TIPC_NAMED_MSG, LONG_H_SIZE, 0); 225 tipc_msg_init(msg, importance, TIPC_NAMED_MSG, NAMED_H_SIZE, 0);
226 msg_set_origport(msg, ref); 226 msg_set_origport(msg, ref);
227 INIT_LIST_HEAD(&p_ptr->wait_list); 227 INIT_LIST_HEAD(&p_ptr->wait_list);
228 INIT_LIST_HEAD(&p_ptr->subscription.nodesub_list); 228 INIT_LIST_HEAD(&p_ptr->subscription.nodesub_list);
@@ -327,26 +327,23 @@ int tipc_set_portunreturnable(u32 ref, unsigned int isunrejectable)
327} 327}
328 328
329/* 329/*
330 * port_build_proto_msg(): build a port level protocol 330 * port_build_proto_msg(): create connection protocol message for port
331 * or a connection abortion message. Called with 331 *
332 * tipc_port lock on. 332 * On entry the port must be locked and connected.
333 */ 333 */
334static struct sk_buff *port_build_proto_msg(u32 destport, u32 destnode, 334static struct sk_buff *port_build_proto_msg(struct tipc_port *p_ptr,
335 u32 origport, u32 orignode, 335 u32 type, u32 ack)
336 u32 usr, u32 type, u32 err,
337 u32 ack)
338{ 336{
339 struct sk_buff *buf; 337 struct sk_buff *buf;
340 struct tipc_msg *msg; 338 struct tipc_msg *msg;
341 339
342 buf = tipc_buf_acquire(LONG_H_SIZE); 340 buf = tipc_buf_acquire(INT_H_SIZE);
343 if (buf) { 341 if (buf) {
344 msg = buf_msg(buf); 342 msg = buf_msg(buf);
345 tipc_msg_init(msg, usr, type, LONG_H_SIZE, destnode); 343 tipc_msg_init(msg, CONN_MANAGER, type, INT_H_SIZE,
346 msg_set_errcode(msg, err); 344 port_peernode(p_ptr));
347 msg_set_destport(msg, destport); 345 msg_set_destport(msg, port_peerport(p_ptr));
348 msg_set_origport(msg, origport); 346 msg_set_origport(msg, p_ptr->ref);
349 msg_set_orignode(msg, orignode);
350 msg_set_msgcnt(msg, ack); 347 msg_set_msgcnt(msg, ack);
351 } 348 }
352 return buf; 349 return buf;
@@ -358,45 +355,48 @@ int tipc_reject_msg(struct sk_buff *buf, u32 err)
358 struct sk_buff *rbuf; 355 struct sk_buff *rbuf;
359 struct tipc_msg *rmsg; 356 struct tipc_msg *rmsg;
360 int hdr_sz; 357 int hdr_sz;
361 u32 imp = msg_importance(msg); 358 u32 imp;
362 u32 data_sz = msg_data_sz(msg); 359 u32 data_sz = msg_data_sz(msg);
363 360 u32 src_node;
364 if (data_sz > MAX_REJECT_SIZE) 361 u32 rmsg_sz;
365 data_sz = MAX_REJECT_SIZE;
366 if (msg_connected(msg) && (imp < TIPC_CRITICAL_IMPORTANCE))
367 imp++;
368 362
369 /* discard rejected message if it shouldn't be returned to sender */ 363 /* discard rejected message if it shouldn't be returned to sender */
370 if (msg_errcode(msg) || msg_dest_droppable(msg)) {
371 buf_discard(buf);
372 return data_sz;
373 }
374 364
375 /* construct rejected message */ 365 if (WARN(!msg_isdata(msg),
376 if (msg_mcast(msg)) 366 "attempt to reject message with user=%u", msg_user(msg))) {
377 hdr_sz = MCAST_H_SIZE; 367 dump_stack();
378 else 368 goto exit;
379 hdr_sz = LONG_H_SIZE;
380 rbuf = tipc_buf_acquire(data_sz + hdr_sz);
381 if (rbuf == NULL) {
382 buf_discard(buf);
383 return data_sz;
384 } 369 }
370 if (msg_errcode(msg) || msg_dest_droppable(msg))
371 goto exit;
372
373 /*
374 * construct returned message by copying rejected message header and
375 * data (or subset), then updating header fields that need adjusting
376 */
377
378 hdr_sz = msg_hdr_sz(msg);
379 rmsg_sz = hdr_sz + min_t(u32, data_sz, MAX_REJECT_SIZE);
380
381 rbuf = tipc_buf_acquire(rmsg_sz);
382 if (rbuf == NULL)
383 goto exit;
384
385 rmsg = buf_msg(rbuf); 385 rmsg = buf_msg(rbuf);
386 tipc_msg_init(rmsg, imp, msg_type(msg), hdr_sz, msg_orignode(msg)); 386 skb_copy_to_linear_data(rbuf, msg, rmsg_sz);
387 msg_set_errcode(rmsg, err); 387
388 msg_set_destport(rmsg, msg_origport(msg)); 388 if (msg_connected(rmsg)) {
389 msg_set_origport(rmsg, msg_destport(msg)); 389 imp = msg_importance(rmsg);
390 if (msg_short(msg)) { 390 if (imp < TIPC_CRITICAL_IMPORTANCE)
391 msg_set_orignode(rmsg, tipc_own_addr); 391 msg_set_importance(rmsg, ++imp);
392 /* leave name type & instance as zeroes */
393 } else {
394 msg_set_orignode(rmsg, msg_destnode(msg));
395 msg_set_nametype(rmsg, msg_nametype(msg));
396 msg_set_nameinst(rmsg, msg_nameinst(msg));
397 } 392 }
398 msg_set_size(rmsg, data_sz + hdr_sz); 393 msg_set_non_seq(rmsg, 0);
399 skb_copy_to_linear_data_offset(rbuf, hdr_sz, msg_data(msg), data_sz); 394 msg_set_size(rmsg, rmsg_sz);
395 msg_set_errcode(rmsg, err);
396 msg_set_prevnode(rmsg, tipc_own_addr);
397 msg_swap_words(rmsg, 4, 5);
398 if (!msg_short(rmsg))
399 msg_swap_words(rmsg, 6, 7);
400 400
401 /* send self-abort message when rejecting on a connected port */ 401 /* send self-abort message when rejecting on a connected port */
402 if (msg_connected(msg)) { 402 if (msg_connected(msg)) {
@@ -411,9 +411,15 @@ int tipc_reject_msg(struct sk_buff *buf, u32 err)
411 tipc_net_route_msg(abuf); 411 tipc_net_route_msg(abuf);
412 } 412 }
413 413
414 /* send rejected message */ 414 /* send returned message & dispose of rejected message */
415
416 src_node = msg_prevnode(msg);
417 if (src_node == tipc_own_addr)
418 tipc_port_recv_msg(rbuf);
419 else
420 tipc_link_send(rbuf, src_node, msg_link_selector(rmsg));
421exit:
415 buf_discard(buf); 422 buf_discard(buf);
416 tipc_net_route_msg(rbuf);
417 return data_sz; 423 return data_sz;
418} 424}
419 425
@@ -449,14 +455,7 @@ static void port_timeout(unsigned long ref)
449 if (p_ptr->probing_state == PROBING) { 455 if (p_ptr->probing_state == PROBING) {
450 buf = port_build_self_abort_msg(p_ptr, TIPC_ERR_NO_PORT); 456 buf = port_build_self_abort_msg(p_ptr, TIPC_ERR_NO_PORT);
451 } else { 457 } else {
452 buf = port_build_proto_msg(port_peerport(p_ptr), 458 buf = port_build_proto_msg(p_ptr, CONN_PROBE, 0);
453 port_peernode(p_ptr),
454 p_ptr->ref,
455 tipc_own_addr,
456 CONN_MANAGER,
457 CONN_PROBE,
458 TIPC_OK,
459 0);
460 p_ptr->probing_state = PROBING; 459 p_ptr->probing_state = PROBING;
461 k_start_timer(&p_ptr->timer, p_ptr->probing_interval); 460 k_start_timer(&p_ptr->timer, p_ptr->probing_interval);
462 } 461 }
@@ -480,100 +479,94 @@ static void port_handle_node_down(unsigned long ref)
480 479
481static struct sk_buff *port_build_self_abort_msg(struct tipc_port *p_ptr, u32 err) 480static struct sk_buff *port_build_self_abort_msg(struct tipc_port *p_ptr, u32 err)
482{ 481{
483 u32 imp = msg_importance(&p_ptr->phdr); 482 struct sk_buff *buf = port_build_peer_abort_msg(p_ptr, err);
484 483
485 if (!p_ptr->connected) 484 if (buf) {
486 return NULL; 485 struct tipc_msg *msg = buf_msg(buf);
487 if (imp < TIPC_CRITICAL_IMPORTANCE) 486 msg_swap_words(msg, 4, 5);
488 imp++; 487 msg_swap_words(msg, 6, 7);
489 return port_build_proto_msg(p_ptr->ref, 488 }
490 tipc_own_addr, 489 return buf;
491 port_peerport(p_ptr),
492 port_peernode(p_ptr),
493 imp,
494 TIPC_CONN_MSG,
495 err,
496 0);
497} 490}
498 491
499 492
500static struct sk_buff *port_build_peer_abort_msg(struct tipc_port *p_ptr, u32 err) 493static struct sk_buff *port_build_peer_abort_msg(struct tipc_port *p_ptr, u32 err)
501{ 494{
502 u32 imp = msg_importance(&p_ptr->phdr); 495 struct sk_buff *buf;
496 struct tipc_msg *msg;
497 u32 imp;
503 498
504 if (!p_ptr->connected) 499 if (!p_ptr->connected)
505 return NULL; 500 return NULL;
506 if (imp < TIPC_CRITICAL_IMPORTANCE) 501
507 imp++; 502 buf = tipc_buf_acquire(BASIC_H_SIZE);
508 return port_build_proto_msg(port_peerport(p_ptr), 503 if (buf) {
509 port_peernode(p_ptr), 504 msg = buf_msg(buf);
510 p_ptr->ref, 505 memcpy(msg, &p_ptr->phdr, BASIC_H_SIZE);
511 tipc_own_addr, 506 msg_set_hdr_sz(msg, BASIC_H_SIZE);
512 imp, 507 msg_set_size(msg, BASIC_H_SIZE);
513 TIPC_CONN_MSG, 508 imp = msg_importance(msg);
514 err, 509 if (imp < TIPC_CRITICAL_IMPORTANCE)
515 0); 510 msg_set_importance(msg, ++imp);
511 msg_set_errcode(msg, err);
512 }
513 return buf;
516} 514}
517 515
518void tipc_port_recv_proto_msg(struct sk_buff *buf) 516void tipc_port_recv_proto_msg(struct sk_buff *buf)
519{ 517{
520 struct tipc_msg *msg = buf_msg(buf); 518 struct tipc_msg *msg = buf_msg(buf);
521 struct tipc_port *p_ptr = tipc_port_lock(msg_destport(msg)); 519 struct tipc_port *p_ptr;
522 u32 err = TIPC_OK;
523 struct sk_buff *r_buf = NULL; 520 struct sk_buff *r_buf = NULL;
524 struct sk_buff *abort_buf = NULL; 521 u32 orignode = msg_orignode(msg);
525 522 u32 origport = msg_origport(msg);
526 if (!p_ptr) { 523 u32 destport = msg_destport(msg);
527 err = TIPC_ERR_NO_PORT; 524 int wakeable;
528 } else if (p_ptr->connected) { 525
529 if ((port_peernode(p_ptr) != msg_orignode(msg)) || 526 /* Validate connection */
530 (port_peerport(p_ptr) != msg_origport(msg))) { 527
531 err = TIPC_ERR_NO_PORT; 528 p_ptr = tipc_port_lock(destport);
532 } else if (msg_type(msg) == CONN_ACK) { 529 if (!p_ptr || !p_ptr->connected ||
533 int wakeup = tipc_port_congested(p_ptr) && 530 (port_peernode(p_ptr) != orignode) ||
534 p_ptr->congested && 531 (port_peerport(p_ptr) != origport)) {
535 p_ptr->wakeup; 532 r_buf = tipc_buf_acquire(BASIC_H_SIZE);
536 p_ptr->acked += msg_msgcnt(msg); 533 if (r_buf) {
537 if (tipc_port_congested(p_ptr)) 534 msg = buf_msg(r_buf);
538 goto exit; 535 tipc_msg_init(msg, TIPC_HIGH_IMPORTANCE, TIPC_CONN_MSG,
539 p_ptr->congested = 0; 536 BASIC_H_SIZE, orignode);
540 if (!wakeup) 537 msg_set_errcode(msg, TIPC_ERR_NO_PORT);
541 goto exit; 538 msg_set_origport(msg, destport);
542 p_ptr->wakeup(p_ptr); 539 msg_set_destport(msg, origport);
543 goto exit;
544 } 540 }
545 } else if (p_ptr->published) { 541 if (p_ptr)
546 err = TIPC_ERR_NO_PORT; 542 tipc_port_unlock(p_ptr);
547 }
548 if (err) {
549 r_buf = port_build_proto_msg(msg_origport(msg),
550 msg_orignode(msg),
551 msg_destport(msg),
552 tipc_own_addr,
553 TIPC_HIGH_IMPORTANCE,
554 TIPC_CONN_MSG,
555 err,
556 0);
557 goto exit; 543 goto exit;
558 } 544 }
559 545
560 /* All is fine */ 546 /* Process protocol message sent by peer */
561 if (msg_type(msg) == CONN_PROBE) { 547
562 r_buf = port_build_proto_msg(msg_origport(msg), 548 switch (msg_type(msg)) {
563 msg_orignode(msg), 549 case CONN_ACK:
564 msg_destport(msg), 550 wakeable = tipc_port_congested(p_ptr) && p_ptr->congested &&
565 tipc_own_addr, 551 p_ptr->wakeup;
566 CONN_MANAGER, 552 p_ptr->acked += msg_msgcnt(msg);
567 CONN_PROBE_REPLY, 553 if (!tipc_port_congested(p_ptr)) {
568 TIPC_OK, 554 p_ptr->congested = 0;
569 0); 555 if (wakeable)
556 p_ptr->wakeup(p_ptr);
557 }
558 break;
559 case CONN_PROBE:
560 r_buf = port_build_proto_msg(p_ptr, CONN_PROBE_REPLY, 0);
561 break;
562 default:
563 /* CONN_PROBE_REPLY or unrecognized - no action required */
564 break;
570 } 565 }
571 p_ptr->probing_state = CONFIRMED; 566 p_ptr->probing_state = CONFIRMED;
567 tipc_port_unlock(p_ptr);
572exit: 568exit:
573 if (p_ptr)
574 tipc_port_unlock(p_ptr);
575 tipc_net_route_msg(r_buf); 569 tipc_net_route_msg(r_buf);
576 tipc_net_route_msg(abort_buf);
577 buf_discard(buf); 570 buf_discard(buf);
578} 571}
579 572
@@ -889,14 +882,7 @@ void tipc_acknowledge(u32 ref, u32 ack)
889 return; 882 return;
890 if (p_ptr->connected) { 883 if (p_ptr->connected) {
891 p_ptr->conn_unacked -= ack; 884 p_ptr->conn_unacked -= ack;
892 buf = port_build_proto_msg(port_peerport(p_ptr), 885 buf = port_build_proto_msg(p_ptr, CONN_ACK, ack);
893 port_peernode(p_ptr),
894 ref,
895 tipc_own_addr,
896 CONN_MANAGER,
897 CONN_ACK,
898 TIPC_OK,
899 ack);
900 } 886 }
901 tipc_port_unlock(p_ptr); 887 tipc_port_unlock(p_ptr);
902 tipc_net_route_msg(buf); 888 tipc_net_route_msg(buf);
@@ -1140,19 +1126,7 @@ int tipc_shutdown(u32 ref)
1140 if (!p_ptr) 1126 if (!p_ptr)
1141 return -EINVAL; 1127 return -EINVAL;
1142 1128
1143 if (p_ptr->connected) { 1129 buf = port_build_peer_abort_msg(p_ptr, TIPC_CONN_SHUTDOWN);
1144 u32 imp = msg_importance(&p_ptr->phdr);
1145 if (imp < TIPC_CRITICAL_IMPORTANCE)
1146 imp++;
1147 buf = port_build_proto_msg(port_peerport(p_ptr),
1148 port_peernode(p_ptr),
1149 ref,
1150 tipc_own_addr,
1151 imp,
1152 TIPC_CONN_MSG,
1153 TIPC_CONN_SHUTDOWN,
1154 0);
1155 }
1156 tipc_port_unlock(p_ptr); 1130 tipc_port_unlock(p_ptr);
1157 tipc_net_route_msg(buf); 1131 tipc_net_route_msg(buf);
1158 return tipc_disconnect(ref); 1132 return tipc_disconnect(ref);
@@ -1238,7 +1212,7 @@ int tipc_send2name(u32 ref, struct tipc_name const *name, unsigned int domain,
1238 msg_set_type(msg, TIPC_NAMED_MSG); 1212 msg_set_type(msg, TIPC_NAMED_MSG);
1239 msg_set_orignode(msg, tipc_own_addr); 1213 msg_set_orignode(msg, tipc_own_addr);
1240 msg_set_origport(msg, ref); 1214 msg_set_origport(msg, ref);
1241 msg_set_hdr_sz(msg, LONG_H_SIZE); 1215 msg_set_hdr_sz(msg, NAMED_H_SIZE);
1242 msg_set_nametype(msg, name->type); 1216 msg_set_nametype(msg, name->type);
1243 msg_set_nameinst(msg, name->instance); 1217 msg_set_nameinst(msg, name->instance);
1244 msg_set_lookup_scope(msg, tipc_addr_scope(domain)); 1218 msg_set_lookup_scope(msg, tipc_addr_scope(domain));
@@ -1291,7 +1265,7 @@ int tipc_send2port(u32 ref, struct tipc_portid const *dest,
1291 msg_set_origport(msg, ref); 1265 msg_set_origport(msg, ref);
1292 msg_set_destnode(msg, dest->node); 1266 msg_set_destnode(msg, dest->node);
1293 msg_set_destport(msg, dest->ref); 1267 msg_set_destport(msg, dest->ref);
1294 msg_set_hdr_sz(msg, DIR_MSG_H_SIZE); 1268 msg_set_hdr_sz(msg, BASIC_H_SIZE);
1295 1269
1296 if (dest->node == tipc_own_addr) 1270 if (dest->node == tipc_own_addr)
1297 res = tipc_port_recv_sections(p_ptr, num_sect, msg_sect, 1271 res = tipc_port_recv_sections(p_ptr, num_sect, msg_sect,
@@ -1331,13 +1305,13 @@ int tipc_send_buf2port(u32 ref, struct tipc_portid const *dest,
1331 msg_set_origport(msg, ref); 1305 msg_set_origport(msg, ref);
1332 msg_set_destnode(msg, dest->node); 1306 msg_set_destnode(msg, dest->node);
1333 msg_set_destport(msg, dest->ref); 1307 msg_set_destport(msg, dest->ref);
1334 msg_set_hdr_sz(msg, DIR_MSG_H_SIZE); 1308 msg_set_hdr_sz(msg, BASIC_H_SIZE);
1335 msg_set_size(msg, DIR_MSG_H_SIZE + dsz); 1309 msg_set_size(msg, BASIC_H_SIZE + dsz);
1336 if (skb_cow(buf, DIR_MSG_H_SIZE)) 1310 if (skb_cow(buf, BASIC_H_SIZE))
1337 return -ENOMEM; 1311 return -ENOMEM;
1338 1312
1339 skb_push(buf, DIR_MSG_H_SIZE); 1313 skb_push(buf, BASIC_H_SIZE);
1340 skb_copy_to_linear_data(buf, msg, DIR_MSG_H_SIZE); 1314 skb_copy_to_linear_data(buf, msg, BASIC_H_SIZE);
1341 1315
1342 if (dest->node == tipc_own_addr) 1316 if (dest->node == tipc_own_addr)
1343 res = tipc_port_recv_msg(buf); 1317 res = tipc_port_recv_msg(buf);
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 33883739664..adb2eff4a10 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -36,9 +36,6 @@
36 36
37#include <net/sock.h> 37#include <net/sock.h>
38 38
39#include <linux/tipc.h>
40#include <linux/tipc_config.h>
41
42#include "core.h" 39#include "core.h"
43#include "port.h" 40#include "port.h"
44 41
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 0722a25a3a3..ec68e1c05b8 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -808,8 +808,9 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
808 struct net *net = sock_net(sk); 808 struct net *net = sock_net(sk);
809 struct unix_sock *u = unix_sk(sk); 809 struct unix_sock *u = unix_sk(sk);
810 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr; 810 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
811 char *sun_path = sunaddr->sun_path;
811 struct dentry *dentry = NULL; 812 struct dentry *dentry = NULL;
812 struct nameidata nd; 813 struct path path;
813 int err; 814 int err;
814 unsigned hash; 815 unsigned hash;
815 struct unix_address *addr; 816 struct unix_address *addr;
@@ -845,48 +846,44 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
845 addr->hash = hash ^ sk->sk_type; 846 addr->hash = hash ^ sk->sk_type;
846 atomic_set(&addr->refcnt, 1); 847 atomic_set(&addr->refcnt, 1);
847 848
848 if (sunaddr->sun_path[0]) { 849 if (sun_path[0]) {
849 unsigned int mode; 850 unsigned int mode;
850 err = 0; 851 err = 0;
851 /* 852 /*
852 * Get the parent directory, calculate the hash for last 853 * Get the parent directory, calculate the hash for last
853 * component. 854 * component.
854 */ 855 */
855 err = kern_path_parent(sunaddr->sun_path, &nd); 856 dentry = kern_path_create(AT_FDCWD, sun_path, &path, 0);
856 if (err)
857 goto out_mknod_parent;
858
859 dentry = lookup_create(&nd, 0);
860 err = PTR_ERR(dentry); 857 err = PTR_ERR(dentry);
861 if (IS_ERR(dentry)) 858 if (IS_ERR(dentry))
862 goto out_mknod_unlock; 859 goto out_mknod_parent;
863 860
864 /* 861 /*
865 * All right, let's create it. 862 * All right, let's create it.
866 */ 863 */
867 mode = S_IFSOCK | 864 mode = S_IFSOCK |
868 (SOCK_INODE(sock)->i_mode & ~current_umask()); 865 (SOCK_INODE(sock)->i_mode & ~current_umask());
869 err = mnt_want_write(nd.path.mnt); 866 err = mnt_want_write(path.mnt);
870 if (err) 867 if (err)
871 goto out_mknod_dput; 868 goto out_mknod_dput;
872 err = security_path_mknod(&nd.path, dentry, mode, 0); 869 err = security_path_mknod(&path, dentry, mode, 0);
873 if (err) 870 if (err)
874 goto out_mknod_drop_write; 871 goto out_mknod_drop_write;
875 err = vfs_mknod(nd.path.dentry->d_inode, dentry, mode, 0); 872 err = vfs_mknod(path.dentry->d_inode, dentry, mode, 0);
876out_mknod_drop_write: 873out_mknod_drop_write:
877 mnt_drop_write(nd.path.mnt); 874 mnt_drop_write(path.mnt);
878 if (err) 875 if (err)
879 goto out_mknod_dput; 876 goto out_mknod_dput;
880 mutex_unlock(&nd.path.dentry->d_inode->i_mutex); 877 mutex_unlock(&path.dentry->d_inode->i_mutex);
881 dput(nd.path.dentry); 878 dput(path.dentry);
882 nd.path.dentry = dentry; 879 path.dentry = dentry;
883 880
884 addr->hash = UNIX_HASH_SIZE; 881 addr->hash = UNIX_HASH_SIZE;
885 } 882 }
886 883
887 spin_lock(&unix_table_lock); 884 spin_lock(&unix_table_lock);
888 885
889 if (!sunaddr->sun_path[0]) { 886 if (!sun_path[0]) {
890 err = -EADDRINUSE; 887 err = -EADDRINUSE;
891 if (__unix_find_socket_byname(net, sunaddr, addr_len, 888 if (__unix_find_socket_byname(net, sunaddr, addr_len,
892 sk->sk_type, hash)) { 889 sk->sk_type, hash)) {
@@ -897,8 +894,8 @@ out_mknod_drop_write:
897 list = &unix_socket_table[addr->hash]; 894 list = &unix_socket_table[addr->hash];
898 } else { 895 } else {
899 list = &unix_socket_table[dentry->d_inode->i_ino & (UNIX_HASH_SIZE-1)]; 896 list = &unix_socket_table[dentry->d_inode->i_ino & (UNIX_HASH_SIZE-1)];
900 u->dentry = nd.path.dentry; 897 u->dentry = path.dentry;
901 u->mnt = nd.path.mnt; 898 u->mnt = path.mnt;
902 } 899 }
903 900
904 err = 0; 901 err = 0;
@@ -915,9 +912,8 @@ out:
915 912
916out_mknod_dput: 913out_mknod_dput:
917 dput(dentry); 914 dput(dentry);
918out_mknod_unlock: 915 mutex_unlock(&path.dentry->d_inode->i_mutex);
919 mutex_unlock(&nd.path.dentry->d_inode->i_mutex); 916 path_put(&path);
920 path_put(&nd.path);
921out_mknod_parent: 917out_mknod_parent:
922 if (err == -EEXIST) 918 if (err == -EEXIST)
923 err = -EADDRINUSE; 919 err = -EADDRINUSE;
diff --git a/net/wireless/core.c b/net/wireless/core.c
index c22ef3492ee..880dbe2e6f9 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -366,6 +366,7 @@ struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv)
366 366
367 mutex_init(&rdev->mtx); 367 mutex_init(&rdev->mtx);
368 mutex_init(&rdev->devlist_mtx); 368 mutex_init(&rdev->devlist_mtx);
369 mutex_init(&rdev->sched_scan_mtx);
369 INIT_LIST_HEAD(&rdev->netdev_list); 370 INIT_LIST_HEAD(&rdev->netdev_list);
370 spin_lock_init(&rdev->bss_lock); 371 spin_lock_init(&rdev->bss_lock);
371 INIT_LIST_HEAD(&rdev->bss_list); 372 INIT_LIST_HEAD(&rdev->bss_list);
@@ -701,6 +702,7 @@ void cfg80211_dev_free(struct cfg80211_registered_device *rdev)
701 rfkill_destroy(rdev->rfkill); 702 rfkill_destroy(rdev->rfkill);
702 mutex_destroy(&rdev->mtx); 703 mutex_destroy(&rdev->mtx);
703 mutex_destroy(&rdev->devlist_mtx); 704 mutex_destroy(&rdev->devlist_mtx);
705 mutex_destroy(&rdev->sched_scan_mtx);
704 list_for_each_entry_safe(scan, tmp, &rdev->bss_list, list) 706 list_for_each_entry_safe(scan, tmp, &rdev->bss_list, list)
705 cfg80211_put_bss(&scan->pub); 707 cfg80211_put_bss(&scan->pub);
706 cfg80211_rdev_free_wowlan(rdev); 708 cfg80211_rdev_free_wowlan(rdev);
@@ -737,12 +739,16 @@ static void wdev_cleanup_work(struct work_struct *work)
737 ___cfg80211_scan_done(rdev, true); 739 ___cfg80211_scan_done(rdev, true);
738 } 740 }
739 741
742 cfg80211_unlock_rdev(rdev);
743
744 mutex_lock(&rdev->sched_scan_mtx);
745
740 if (WARN_ON(rdev->sched_scan_req && 746 if (WARN_ON(rdev->sched_scan_req &&
741 rdev->sched_scan_req->dev == wdev->netdev)) { 747 rdev->sched_scan_req->dev == wdev->netdev)) {
742 __cfg80211_stop_sched_scan(rdev, false); 748 __cfg80211_stop_sched_scan(rdev, false);
743 } 749 }
744 750
745 cfg80211_unlock_rdev(rdev); 751 mutex_unlock(&rdev->sched_scan_mtx);
746 752
747 mutex_lock(&rdev->devlist_mtx); 753 mutex_lock(&rdev->devlist_mtx);
748 rdev->opencount--; 754 rdev->opencount--;
@@ -830,9 +836,9 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
830 break; 836 break;
831 case NL80211_IFTYPE_P2P_CLIENT: 837 case NL80211_IFTYPE_P2P_CLIENT:
832 case NL80211_IFTYPE_STATION: 838 case NL80211_IFTYPE_STATION:
833 cfg80211_lock_rdev(rdev); 839 mutex_lock(&rdev->sched_scan_mtx);
834 __cfg80211_stop_sched_scan(rdev, false); 840 __cfg80211_stop_sched_scan(rdev, false);
835 cfg80211_unlock_rdev(rdev); 841 mutex_unlock(&rdev->sched_scan_mtx);
836 842
837 wdev_lock(wdev); 843 wdev_lock(wdev);
838#ifdef CONFIG_CFG80211_WEXT 844#ifdef CONFIG_CFG80211_WEXT
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 3dce1f167eb..a570ff9214e 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -65,6 +65,8 @@ struct cfg80211_registered_device {
65 struct work_struct scan_done_wk; 65 struct work_struct scan_done_wk;
66 struct work_struct sched_scan_results_wk; 66 struct work_struct sched_scan_results_wk;
67 67
68 struct mutex sched_scan_mtx;
69
68#ifdef CONFIG_NL80211_TESTMODE 70#ifdef CONFIG_NL80211_TESTMODE
69 struct genl_info *testmode_info; 71 struct genl_info *testmode_info;
70#endif 72#endif
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index 493b939970c..832f6574e4e 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -170,7 +170,9 @@ void __cfg80211_send_deauth(struct net_device *dev,
170 break; 170 break;
171 } 171 }
172 if (wdev->authtry_bsses[i] && 172 if (wdev->authtry_bsses[i] &&
173 memcmp(wdev->authtry_bsses[i]->pub.bssid, bssid, ETH_ALEN) == 0) { 173 memcmp(wdev->authtry_bsses[i]->pub.bssid, bssid,
174 ETH_ALEN) == 0 &&
175 memcmp(mgmt->sa, dev->dev_addr, ETH_ALEN) == 0) {
174 cfg80211_unhold_bss(wdev->authtry_bsses[i]); 176 cfg80211_unhold_bss(wdev->authtry_bsses[i]);
175 cfg80211_put_bss(&wdev->authtry_bsses[i]->pub); 177 cfg80211_put_bss(&wdev->authtry_bsses[i]->pub);
176 wdev->authtry_bsses[i] = NULL; 178 wdev->authtry_bsses[i] = NULL;
@@ -1082,3 +1084,14 @@ void cfg80211_cqm_pktloss_notify(struct net_device *dev,
1082 nl80211_send_cqm_pktloss_notify(rdev, dev, peer, num_packets, gfp); 1084 nl80211_send_cqm_pktloss_notify(rdev, dev, peer, num_packets, gfp);
1083} 1085}
1084EXPORT_SYMBOL(cfg80211_cqm_pktloss_notify); 1086EXPORT_SYMBOL(cfg80211_cqm_pktloss_notify);
1087
1088void cfg80211_gtk_rekey_notify(struct net_device *dev, const u8 *bssid,
1089 const u8 *replay_ctr, gfp_t gfp)
1090{
1091 struct wireless_dev *wdev = dev->ieee80211_ptr;
1092 struct wiphy *wiphy = wdev->wiphy;
1093 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
1094
1095 nl80211_gtk_rekey_notify(rdev, dev, bssid, replay_ctr, gfp);
1096}
1097EXPORT_SYMBOL(cfg80211_gtk_rekey_notify);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 98fa8eb6cc4..6a82c898f83 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -176,6 +176,7 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
176 [NL80211_ATTR_WOWLAN_TRIGGERS] = { .type = NLA_NESTED }, 176 [NL80211_ATTR_WOWLAN_TRIGGERS] = { .type = NLA_NESTED },
177 [NL80211_ATTR_STA_PLINK_STATE] = { .type = NLA_U8 }, 177 [NL80211_ATTR_STA_PLINK_STATE] = { .type = NLA_U8 },
178 [NL80211_ATTR_SCHED_SCAN_INTERVAL] = { .type = NLA_U32 }, 178 [NL80211_ATTR_SCHED_SCAN_INTERVAL] = { .type = NLA_U32 },
179 [NL80211_ATTR_REKEY_DATA] = { .type = NLA_NESTED },
179}; 180};
180 181
181/* policy for the key attributes */ 182/* policy for the key attributes */
@@ -206,6 +207,14 @@ nl80211_wowlan_policy[NUM_NL80211_WOWLAN_TRIG] = {
206 [NL80211_WOWLAN_TRIG_PKT_PATTERN] = { .type = NLA_NESTED }, 207 [NL80211_WOWLAN_TRIG_PKT_PATTERN] = { .type = NLA_NESTED },
207}; 208};
208 209
210/* policy for GTK rekey offload attributes */
211static const struct nla_policy
212nl80211_rekey_policy[NUM_NL80211_REKEY_DATA] = {
213 [NL80211_REKEY_DATA_KEK] = { .len = NL80211_KEK_LEN },
214 [NL80211_REKEY_DATA_KCK] = { .len = NL80211_KCK_LEN },
215 [NL80211_REKEY_DATA_REPLAY_CTR] = { .len = NL80211_REPLAY_CTR_LEN },
216};
217
209/* ifidx get helper */ 218/* ifidx get helper */
210static int nl80211_get_ifidx(struct netlink_callback *cb) 219static int nl80211_get_ifidx(struct netlink_callback *cb)
211{ 220{
@@ -3461,9 +3470,6 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
3461 if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE])) 3470 if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
3462 return -EINVAL; 3471 return -EINVAL;
3463 3472
3464 if (rdev->sched_scan_req)
3465 return -EINPROGRESS;
3466
3467 if (!info->attrs[NL80211_ATTR_SCHED_SCAN_INTERVAL]) 3473 if (!info->attrs[NL80211_ATTR_SCHED_SCAN_INTERVAL])
3468 return -EINVAL; 3474 return -EINVAL;
3469 3475
@@ -3502,12 +3508,21 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
3502 if (ie_len > wiphy->max_scan_ie_len) 3508 if (ie_len > wiphy->max_scan_ie_len)
3503 return -EINVAL; 3509 return -EINVAL;
3504 3510
3511 mutex_lock(&rdev->sched_scan_mtx);
3512
3513 if (rdev->sched_scan_req) {
3514 err = -EINPROGRESS;
3515 goto out;
3516 }
3517
3505 request = kzalloc(sizeof(*request) 3518 request = kzalloc(sizeof(*request)
3506 + sizeof(*request->ssids) * n_ssids 3519 + sizeof(*request->ssids) * n_ssids
3507 + sizeof(*request->channels) * n_channels 3520 + sizeof(*request->channels) * n_channels
3508 + ie_len, GFP_KERNEL); 3521 + ie_len, GFP_KERNEL);
3509 if (!request) 3522 if (!request) {
3510 return -ENOMEM; 3523 err = -ENOMEM;
3524 goto out;
3525 }
3511 3526
3512 if (n_ssids) 3527 if (n_ssids)
3513 request->ssids = (void *)&request->channels[n_channels]; 3528 request->ssids = (void *)&request->channels[n_channels];
@@ -3605,6 +3620,7 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
3605out_free: 3620out_free:
3606 kfree(request); 3621 kfree(request);
3607out: 3622out:
3623 mutex_unlock(&rdev->sched_scan_mtx);
3608 return err; 3624 return err;
3609} 3625}
3610 3626
@@ -3612,15 +3628,21 @@ static int nl80211_stop_sched_scan(struct sk_buff *skb,
3612 struct genl_info *info) 3628 struct genl_info *info)
3613{ 3629{
3614 struct cfg80211_registered_device *rdev = info->user_ptr[0]; 3630 struct cfg80211_registered_device *rdev = info->user_ptr[0];
3631 int err;
3615 3632
3616 if (!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN) || 3633 if (!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN) ||
3617 !rdev->ops->sched_scan_stop) 3634 !rdev->ops->sched_scan_stop)
3618 return -EOPNOTSUPP; 3635 return -EOPNOTSUPP;
3619 3636
3620 return __cfg80211_stop_sched_scan(rdev, false); 3637 mutex_lock(&rdev->sched_scan_mtx);
3638 err = __cfg80211_stop_sched_scan(rdev, false);
3639 mutex_unlock(&rdev->sched_scan_mtx);
3640
3641 return err;
3621} 3642}
3622 3643
3623static int nl80211_send_bss(struct sk_buff *msg, u32 pid, u32 seq, int flags, 3644static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb,
3645 u32 seq, int flags,
3624 struct cfg80211_registered_device *rdev, 3646 struct cfg80211_registered_device *rdev,
3625 struct wireless_dev *wdev, 3647 struct wireless_dev *wdev,
3626 struct cfg80211_internal_bss *intbss) 3648 struct cfg80211_internal_bss *intbss)
@@ -3632,11 +3654,13 @@ static int nl80211_send_bss(struct sk_buff *msg, u32 pid, u32 seq, int flags,
3632 3654
3633 ASSERT_WDEV_LOCK(wdev); 3655 ASSERT_WDEV_LOCK(wdev);
3634 3656
3635 hdr = nl80211hdr_put(msg, pid, seq, flags, 3657 hdr = nl80211hdr_put(msg, NETLINK_CB(cb->skb).pid, seq, flags,
3636 NL80211_CMD_NEW_SCAN_RESULTS); 3658 NL80211_CMD_NEW_SCAN_RESULTS);
3637 if (!hdr) 3659 if (!hdr)
3638 return -1; 3660 return -1;
3639 3661
3662 genl_dump_check_consistent(cb, hdr, &nl80211_fam);
3663
3640 NLA_PUT_U32(msg, NL80211_ATTR_GENERATION, rdev->bss_generation); 3664 NLA_PUT_U32(msg, NL80211_ATTR_GENERATION, rdev->bss_generation);
3641 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, wdev->netdev->ifindex); 3665 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, wdev->netdev->ifindex);
3642 3666
@@ -3725,11 +3749,12 @@ static int nl80211_dump_scan(struct sk_buff *skb,
3725 spin_lock_bh(&rdev->bss_lock); 3749 spin_lock_bh(&rdev->bss_lock);
3726 cfg80211_bss_expire(rdev); 3750 cfg80211_bss_expire(rdev);
3727 3751
3752 cb->seq = rdev->bss_generation;
3753
3728 list_for_each_entry(scan, &rdev->bss_list, list) { 3754 list_for_each_entry(scan, &rdev->bss_list, list) {
3729 if (++idx <= start) 3755 if (++idx <= start)
3730 continue; 3756 continue;
3731 if (nl80211_send_bss(skb, 3757 if (nl80211_send_bss(skb, cb,
3732 NETLINK_CB(cb->skb).pid,
3733 cb->nlh->nlmsg_seq, NLM_F_MULTI, 3758 cb->nlh->nlmsg_seq, NLM_F_MULTI,
3734 rdev, wdev, scan) < 0) { 3759 rdev, wdev, scan) < 0) {
3735 idx--; 3760 idx--;
@@ -3753,10 +3778,6 @@ static int nl80211_send_survey(struct sk_buff *msg, u32 pid, u32 seq,
3753 void *hdr; 3778 void *hdr;
3754 struct nlattr *infoattr; 3779 struct nlattr *infoattr;
3755 3780
3756 /* Survey without a channel doesn't make sense */
3757 if (!survey->channel)
3758 return -EINVAL;
3759
3760 hdr = nl80211hdr_put(msg, pid, seq, flags, 3781 hdr = nl80211hdr_put(msg, pid, seq, flags,
3761 NL80211_CMD_NEW_SURVEY_RESULTS); 3782 NL80211_CMD_NEW_SURVEY_RESULTS);
3762 if (!hdr) 3783 if (!hdr)
@@ -3819,6 +3840,8 @@ static int nl80211_dump_survey(struct sk_buff *skb,
3819 } 3840 }
3820 3841
3821 while (1) { 3842 while (1) {
3843 struct ieee80211_channel *chan;
3844
3822 res = dev->ops->dump_survey(&dev->wiphy, netdev, survey_idx, 3845 res = dev->ops->dump_survey(&dev->wiphy, netdev, survey_idx,
3823 &survey); 3846 &survey);
3824 if (res == -ENOENT) 3847 if (res == -ENOENT)
@@ -3826,6 +3849,19 @@ static int nl80211_dump_survey(struct sk_buff *skb,
3826 if (res) 3849 if (res)
3827 goto out_err; 3850 goto out_err;
3828 3851
3852 /* Survey without a channel doesn't make sense */
3853 if (!survey.channel) {
3854 res = -EINVAL;
3855 goto out;
3856 }
3857
3858 chan = ieee80211_get_channel(&dev->wiphy,
3859 survey.channel->center_freq);
3860 if (!chan || chan->flags & IEEE80211_CHAN_DISABLED) {
3861 survey_idx++;
3862 continue;
3863 }
3864
3829 if (nl80211_send_survey(skb, 3865 if (nl80211_send_survey(skb,
3830 NETLINK_CB(cb->skb).pid, 3866 NETLINK_CB(cb->skb).pid,
3831 cb->nlh->nlmsg_seq, NLM_F_MULTI, 3867 cb->nlh->nlmsg_seq, NLM_F_MULTI,
@@ -4360,6 +4396,93 @@ static int nl80211_testmode_do(struct sk_buff *skb, struct genl_info *info)
4360 return err; 4396 return err;
4361} 4397}
4362 4398
4399static int nl80211_testmode_dump(struct sk_buff *skb,
4400 struct netlink_callback *cb)
4401{
4402 struct cfg80211_registered_device *dev;
4403 int err;
4404 long phy_idx;
4405 void *data = NULL;
4406 int data_len = 0;
4407
4408 if (cb->args[0]) {
4409 /*
4410 * 0 is a valid index, but not valid for args[0],
4411 * so we need to offset by 1.
4412 */
4413 phy_idx = cb->args[0] - 1;
4414 } else {
4415 err = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize,
4416 nl80211_fam.attrbuf, nl80211_fam.maxattr,
4417 nl80211_policy);
4418 if (err)
4419 return err;
4420 if (!nl80211_fam.attrbuf[NL80211_ATTR_WIPHY])
4421 return -EINVAL;
4422 phy_idx = nla_get_u32(nl80211_fam.attrbuf[NL80211_ATTR_WIPHY]);
4423 if (nl80211_fam.attrbuf[NL80211_ATTR_TESTDATA])
4424 cb->args[1] =
4425 (long)nl80211_fam.attrbuf[NL80211_ATTR_TESTDATA];
4426 }
4427
4428 if (cb->args[1]) {
4429 data = nla_data((void *)cb->args[1]);
4430 data_len = nla_len((void *)cb->args[1]);
4431 }
4432
4433 mutex_lock(&cfg80211_mutex);
4434 dev = cfg80211_rdev_by_wiphy_idx(phy_idx);
4435 if (!dev) {
4436 mutex_unlock(&cfg80211_mutex);
4437 return -ENOENT;
4438 }
4439 cfg80211_lock_rdev(dev);
4440 mutex_unlock(&cfg80211_mutex);
4441
4442 if (!dev->ops->testmode_dump) {
4443 err = -EOPNOTSUPP;
4444 goto out_err;
4445 }
4446
4447 while (1) {
4448 void *hdr = nl80211hdr_put(skb, NETLINK_CB(cb->skb).pid,
4449 cb->nlh->nlmsg_seq, NLM_F_MULTI,
4450 NL80211_CMD_TESTMODE);
4451 struct nlattr *tmdata;
4452
4453 if (nla_put_u32(skb, NL80211_ATTR_WIPHY, dev->wiphy_idx) < 0) {
4454 genlmsg_cancel(skb, hdr);
4455 break;
4456 }
4457
4458 tmdata = nla_nest_start(skb, NL80211_ATTR_TESTDATA);
4459 if (!tmdata) {
4460 genlmsg_cancel(skb, hdr);
4461 break;
4462 }
4463 err = dev->ops->testmode_dump(&dev->wiphy, skb, cb,
4464 data, data_len);
4465 nla_nest_end(skb, tmdata);
4466
4467 if (err == -ENOBUFS || err == -ENOENT) {
4468 genlmsg_cancel(skb, hdr);
4469 break;
4470 } else if (err) {
4471 genlmsg_cancel(skb, hdr);
4472 goto out_err;
4473 }
4474
4475 genlmsg_end(skb, hdr);
4476 }
4477
4478 err = skb->len;
4479 /* see above */
4480 cb->args[0] = phy_idx + 1;
4481 out_err:
4482 cfg80211_unlock_rdev(dev);
4483 return err;
4484}
4485
4363static struct sk_buff * 4486static struct sk_buff *
4364__cfg80211_testmode_alloc_skb(struct cfg80211_registered_device *rdev, 4487__cfg80211_testmode_alloc_skb(struct cfg80211_registered_device *rdev,
4365 int approxlen, u32 pid, u32 seq, gfp_t gfp) 4488 int approxlen, u32 pid, u32 seq, gfp_t gfp)
@@ -5306,6 +5429,57 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
5306 return err; 5429 return err;
5307} 5430}
5308 5431
5432static int nl80211_set_rekey_data(struct sk_buff *skb, struct genl_info *info)
5433{
5434 struct cfg80211_registered_device *rdev = info->user_ptr[0];
5435 struct net_device *dev = info->user_ptr[1];
5436 struct wireless_dev *wdev = dev->ieee80211_ptr;
5437 struct nlattr *tb[NUM_NL80211_REKEY_DATA];
5438 struct cfg80211_gtk_rekey_data rekey_data;
5439 int err;
5440
5441 if (!info->attrs[NL80211_ATTR_REKEY_DATA])
5442 return -EINVAL;
5443
5444 err = nla_parse(tb, MAX_NL80211_REKEY_DATA,
5445 nla_data(info->attrs[NL80211_ATTR_REKEY_DATA]),
5446 nla_len(info->attrs[NL80211_ATTR_REKEY_DATA]),
5447 nl80211_rekey_policy);
5448 if (err)
5449 return err;
5450
5451 if (nla_len(tb[NL80211_REKEY_DATA_REPLAY_CTR]) != NL80211_REPLAY_CTR_LEN)
5452 return -ERANGE;
5453 if (nla_len(tb[NL80211_REKEY_DATA_KEK]) != NL80211_KEK_LEN)
5454 return -ERANGE;
5455 if (nla_len(tb[NL80211_REKEY_DATA_KCK]) != NL80211_KCK_LEN)
5456 return -ERANGE;
5457
5458 memcpy(rekey_data.kek, nla_data(tb[NL80211_REKEY_DATA_KEK]),
5459 NL80211_KEK_LEN);
5460 memcpy(rekey_data.kck, nla_data(tb[NL80211_REKEY_DATA_KCK]),
5461 NL80211_KCK_LEN);
5462 memcpy(rekey_data.replay_ctr,
5463 nla_data(tb[NL80211_REKEY_DATA_REPLAY_CTR]),
5464 NL80211_REPLAY_CTR_LEN);
5465
5466 wdev_lock(wdev);
5467 if (!wdev->current_bss) {
5468 err = -ENOTCONN;
5469 goto out;
5470 }
5471
5472 if (!rdev->ops->set_rekey_data) {
5473 err = -EOPNOTSUPP;
5474 goto out;
5475 }
5476
5477 err = rdev->ops->set_rekey_data(&rdev->wiphy, dev, &rekey_data);
5478 out:
5479 wdev_unlock(wdev);
5480 return err;
5481}
5482
5309#define NL80211_FLAG_NEED_WIPHY 0x01 5483#define NL80211_FLAG_NEED_WIPHY 0x01
5310#define NL80211_FLAG_NEED_NETDEV 0x02 5484#define NL80211_FLAG_NEED_NETDEV 0x02
5311#define NL80211_FLAG_NEED_RTNL 0x04 5485#define NL80211_FLAG_NEED_RTNL 0x04
@@ -5657,6 +5831,7 @@ static struct genl_ops nl80211_ops[] = {
5657 { 5831 {
5658 .cmd = NL80211_CMD_TESTMODE, 5832 .cmd = NL80211_CMD_TESTMODE,
5659 .doit = nl80211_testmode_do, 5833 .doit = nl80211_testmode_do,
5834 .dumpit = nl80211_testmode_dump,
5660 .policy = nl80211_policy, 5835 .policy = nl80211_policy,
5661 .flags = GENL_ADMIN_PERM, 5836 .flags = GENL_ADMIN_PERM,
5662 .internal_flags = NL80211_FLAG_NEED_WIPHY | 5837 .internal_flags = NL80211_FLAG_NEED_WIPHY |
@@ -5836,6 +6011,14 @@ static struct genl_ops nl80211_ops[] = {
5836 .internal_flags = NL80211_FLAG_NEED_WIPHY | 6011 .internal_flags = NL80211_FLAG_NEED_WIPHY |
5837 NL80211_FLAG_NEED_RTNL, 6012 NL80211_FLAG_NEED_RTNL,
5838 }, 6013 },
6014 {
6015 .cmd = NL80211_CMD_SET_REKEY_OFFLOAD,
6016 .doit = nl80211_set_rekey_data,
6017 .policy = nl80211_policy,
6018 .flags = GENL_ADMIN_PERM,
6019 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
6020 NL80211_FLAG_NEED_RTNL,
6021 },
5839}; 6022};
5840 6023
5841static struct genl_multicast_group nl80211_mlme_mcgrp = { 6024static struct genl_multicast_group nl80211_mlme_mcgrp = {
@@ -6463,7 +6646,8 @@ void nl80211_michael_mic_failure(struct cfg80211_registered_device *rdev,
6463 if (addr) 6646 if (addr)
6464 NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, addr); 6647 NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, addr);
6465 NLA_PUT_U32(msg, NL80211_ATTR_KEY_TYPE, key_type); 6648 NLA_PUT_U32(msg, NL80211_ATTR_KEY_TYPE, key_type);
6466 NLA_PUT_U8(msg, NL80211_ATTR_KEY_IDX, key_id); 6649 if (key_id != -1)
6650 NLA_PUT_U8(msg, NL80211_ATTR_KEY_IDX, key_id);
6467 if (tsc) 6651 if (tsc)
6468 NLA_PUT(msg, NL80211_ATTR_KEY_SEQ, 6, tsc); 6652 NLA_PUT(msg, NL80211_ATTR_KEY_SEQ, 6, tsc);
6469 6653
@@ -6779,6 +6963,51 @@ nl80211_send_cqm_rssi_notify(struct cfg80211_registered_device *rdev,
6779 nlmsg_free(msg); 6963 nlmsg_free(msg);
6780} 6964}
6781 6965
6966void nl80211_gtk_rekey_notify(struct cfg80211_registered_device *rdev,
6967 struct net_device *netdev, const u8 *bssid,
6968 const u8 *replay_ctr, gfp_t gfp)
6969{
6970 struct sk_buff *msg;
6971 struct nlattr *rekey_attr;
6972 void *hdr;
6973
6974 msg = nlmsg_new(NLMSG_GOODSIZE, gfp);
6975 if (!msg)
6976 return;
6977
6978 hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_SET_REKEY_OFFLOAD);
6979 if (!hdr) {
6980 nlmsg_free(msg);
6981 return;
6982 }
6983
6984 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
6985 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
6986 NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid);
6987
6988 rekey_attr = nla_nest_start(msg, NL80211_ATTR_REKEY_DATA);
6989 if (!rekey_attr)
6990 goto nla_put_failure;
6991
6992 NLA_PUT(msg, NL80211_REKEY_DATA_REPLAY_CTR,
6993 NL80211_REPLAY_CTR_LEN, replay_ctr);
6994
6995 nla_nest_end(msg, rekey_attr);
6996
6997 if (genlmsg_end(msg, hdr) < 0) {
6998 nlmsg_free(msg);
6999 return;
7000 }
7001
7002 genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
7003 nl80211_mlme_mcgrp.id, gfp);
7004 return;
7005
7006 nla_put_failure:
7007 genlmsg_cancel(msg, hdr);
7008 nlmsg_free(msg);
7009}
7010
6782void 7011void
6783nl80211_send_cqm_pktloss_notify(struct cfg80211_registered_device *rdev, 7012nl80211_send_cqm_pktloss_notify(struct cfg80211_registered_device *rdev,
6784 struct net_device *netdev, const u8 *peer, 7013 struct net_device *netdev, const u8 *peer,
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h
index 2f1bfb87a65..5d69c56400a 100644
--- a/net/wireless/nl80211.h
+++ b/net/wireless/nl80211.h
@@ -109,4 +109,8 @@ nl80211_send_cqm_pktloss_notify(struct cfg80211_registered_device *rdev,
109 struct net_device *netdev, const u8 *peer, 109 struct net_device *netdev, const u8 *peer,
110 u32 num_packets, gfp_t gfp); 110 u32 num_packets, gfp_t gfp);
111 111
112void nl80211_gtk_rekey_notify(struct cfg80211_registered_device *rdev,
113 struct net_device *netdev, const u8 *bssid,
114 const u8 *replay_ctr, gfp_t gfp);
115
112#endif /* __NET_WIRELESS_NL80211_H */ 116#endif /* __NET_WIRELESS_NL80211_H */
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 7a6c67667d7..1c4672e3514 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -100,14 +100,14 @@ void __cfg80211_sched_scan_results(struct work_struct *wk)
100 rdev = container_of(wk, struct cfg80211_registered_device, 100 rdev = container_of(wk, struct cfg80211_registered_device,
101 sched_scan_results_wk); 101 sched_scan_results_wk);
102 102
103 cfg80211_lock_rdev(rdev); 103 mutex_lock(&rdev->sched_scan_mtx);
104 104
105 /* we don't have sched_scan_req anymore if the scan is stopping */ 105 /* we don't have sched_scan_req anymore if the scan is stopping */
106 if (rdev->sched_scan_req) 106 if (rdev->sched_scan_req)
107 nl80211_send_sched_scan_results(rdev, 107 nl80211_send_sched_scan_results(rdev,
108 rdev->sched_scan_req->dev); 108 rdev->sched_scan_req->dev);
109 109
110 cfg80211_unlock_rdev(rdev); 110 mutex_unlock(&rdev->sched_scan_mtx);
111} 111}
112 112
113void cfg80211_sched_scan_results(struct wiphy *wiphy) 113void cfg80211_sched_scan_results(struct wiphy *wiphy)
@@ -123,27 +123,26 @@ void cfg80211_sched_scan_stopped(struct wiphy *wiphy)
123{ 123{
124 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); 124 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
125 125
126 cfg80211_lock_rdev(rdev); 126 mutex_lock(&rdev->sched_scan_mtx);
127 __cfg80211_stop_sched_scan(rdev, true); 127 __cfg80211_stop_sched_scan(rdev, true);
128 cfg80211_unlock_rdev(rdev); 128 mutex_unlock(&rdev->sched_scan_mtx);
129} 129}
130EXPORT_SYMBOL(cfg80211_sched_scan_stopped); 130EXPORT_SYMBOL(cfg80211_sched_scan_stopped);
131 131
132int __cfg80211_stop_sched_scan(struct cfg80211_registered_device *rdev, 132int __cfg80211_stop_sched_scan(struct cfg80211_registered_device *rdev,
133 bool driver_initiated) 133 bool driver_initiated)
134{ 134{
135 int err;
136 struct net_device *dev; 135 struct net_device *dev;
137 136
138 ASSERT_RDEV_LOCK(rdev); 137 lockdep_assert_held(&rdev->sched_scan_mtx);
139 138
140 if (!rdev->sched_scan_req) 139 if (!rdev->sched_scan_req)
141 return 0; 140 return -ENOENT;
142 141
143 dev = rdev->sched_scan_req->dev; 142 dev = rdev->sched_scan_req->dev;
144 143
145 if (!driver_initiated) { 144 if (!driver_initiated) {
146 err = rdev->ops->sched_scan_stop(&rdev->wiphy, dev); 145 int err = rdev->ops->sched_scan_stop(&rdev->wiphy, dev);
147 if (err) 146 if (err)
148 return err; 147 return err;
149 } 148 }
@@ -153,7 +152,7 @@ int __cfg80211_stop_sched_scan(struct cfg80211_registered_device *rdev,
153 kfree(rdev->sched_scan_req); 152 kfree(rdev->sched_scan_req);
154 rdev->sched_scan_req = NULL; 153 rdev->sched_scan_req = NULL;
155 154
156 return err; 155 return 0;
157} 156}
158 157
159static void bss_release(struct kref *ref) 158static void bss_release(struct kref *ref)
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index 4680b1e4c79..d30615419b4 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -237,21 +237,21 @@ static int x25_device_event(struct notifier_block *this, unsigned long event,
237#endif 237#endif
238 ) { 238 ) {
239 switch (event) { 239 switch (event) {
240 case NETDEV_UP: 240 case NETDEV_UP:
241 x25_link_device_up(dev); 241 x25_link_device_up(dev);
242 break; 242 break;
243 case NETDEV_GOING_DOWN: 243 case NETDEV_GOING_DOWN:
244 nb = x25_get_neigh(dev); 244 nb = x25_get_neigh(dev);
245 if (nb) { 245 if (nb) {
246 x25_terminate_link(nb); 246 x25_terminate_link(nb);
247 x25_neigh_put(nb); 247 x25_neigh_put(nb);
248 } 248 }
249 break; 249 break;
250 case NETDEV_DOWN: 250 case NETDEV_DOWN:
251 x25_kill_by_device(dev); 251 x25_kill_by_device(dev);
252 x25_route_device_down(dev); 252 x25_route_device_down(dev);
253 x25_link_device_down(dev); 253 x25_link_device_down(dev);
254 break; 254 break;
255 } 255 }
256 } 256 }
257 257
@@ -1336,256 +1336,253 @@ static int x25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1336 int rc; 1336 int rc;
1337 1337
1338 switch (cmd) { 1338 switch (cmd) {
1339 case TIOCOUTQ: { 1339 case TIOCOUTQ: {
1340 int amount; 1340 int amount;
1341 1341
1342 amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk); 1342 amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
1343 if (amount < 0) 1343 if (amount < 0)
1344 amount = 0; 1344 amount = 0;
1345 rc = put_user(amount, (unsigned int __user *)argp); 1345 rc = put_user(amount, (unsigned int __user *)argp);
1346 break; 1346 break;
1347 } 1347 }
1348 1348
1349 case TIOCINQ: { 1349 case TIOCINQ: {
1350 struct sk_buff *skb; 1350 struct sk_buff *skb;
1351 int amount = 0; 1351 int amount = 0;
1352 /* 1352 /*
1353 * These two are safe on a single CPU system as 1353 * These two are safe on a single CPU system as
1354 * only user tasks fiddle here 1354 * only user tasks fiddle here
1355 */ 1355 */
1356 lock_sock(sk); 1356 lock_sock(sk);
1357 if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) 1357 if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL)
1358 amount = skb->len; 1358 amount = skb->len;
1359 release_sock(sk); 1359 release_sock(sk);
1360 rc = put_user(amount, (unsigned int __user *)argp); 1360 rc = put_user(amount, (unsigned int __user *)argp);
1361 break; 1361 break;
1362 } 1362 }
1363 1363
1364 case SIOCGSTAMP: 1364 case SIOCGSTAMP:
1365 rc = -EINVAL; 1365 rc = -EINVAL;
1366 if (sk) 1366 if (sk)
1367 rc = sock_get_timestamp(sk, 1367 rc = sock_get_timestamp(sk,
1368 (struct timeval __user *)argp); 1368 (struct timeval __user *)argp);
1369 break;
1370 case SIOCGSTAMPNS:
1371 rc = -EINVAL;
1372 if (sk)
1373 rc = sock_get_timestampns(sk,
1374 (struct timespec __user *)argp);
1375 break;
1376 case SIOCGIFADDR:
1377 case SIOCSIFADDR:
1378 case SIOCGIFDSTADDR:
1379 case SIOCSIFDSTADDR:
1380 case SIOCGIFBRDADDR:
1381 case SIOCSIFBRDADDR:
1382 case SIOCGIFNETMASK:
1383 case SIOCSIFNETMASK:
1384 case SIOCGIFMETRIC:
1385 case SIOCSIFMETRIC:
1386 rc = -EINVAL;
1387 break;
1388 case SIOCADDRT:
1389 case SIOCDELRT:
1390 rc = -EPERM;
1391 if (!capable(CAP_NET_ADMIN))
1369 break; 1392 break;
1370 case SIOCGSTAMPNS: 1393 rc = x25_route_ioctl(cmd, argp);
1371 rc = -EINVAL; 1394 break;
1372 if (sk) 1395 case SIOCX25GSUBSCRIP:
1373 rc = sock_get_timestampns(sk, 1396 rc = x25_subscr_ioctl(cmd, argp);
1374 (struct timespec __user *)argp); 1397 break;
1375 break; 1398 case SIOCX25SSUBSCRIP:
1376 case SIOCGIFADDR: 1399 rc = -EPERM;
1377 case SIOCSIFADDR: 1400 if (!capable(CAP_NET_ADMIN))
1378 case SIOCGIFDSTADDR:
1379 case SIOCSIFDSTADDR:
1380 case SIOCGIFBRDADDR:
1381 case SIOCSIFBRDADDR:
1382 case SIOCGIFNETMASK:
1383 case SIOCSIFNETMASK:
1384 case SIOCGIFMETRIC:
1385 case SIOCSIFMETRIC:
1386 rc = -EINVAL;
1387 break;
1388 case SIOCADDRT:
1389 case SIOCDELRT:
1390 rc = -EPERM;
1391 if (!capable(CAP_NET_ADMIN))
1392 break;
1393 rc = x25_route_ioctl(cmd, argp);
1394 break;
1395 case SIOCX25GSUBSCRIP:
1396 rc = x25_subscr_ioctl(cmd, argp);
1397 break;
1398 case SIOCX25SSUBSCRIP:
1399 rc = -EPERM;
1400 if (!capable(CAP_NET_ADMIN))
1401 break;
1402 rc = x25_subscr_ioctl(cmd, argp);
1403 break;
1404 case SIOCX25GFACILITIES: {
1405 lock_sock(sk);
1406 rc = copy_to_user(argp, &x25->facilities,
1407 sizeof(x25->facilities))
1408 ? -EFAULT : 0;
1409 release_sock(sk);
1410 break; 1401 break;
1411 } 1402 rc = x25_subscr_ioctl(cmd, argp);
1403 break;
1404 case SIOCX25GFACILITIES: {
1405 lock_sock(sk);
1406 rc = copy_to_user(argp, &x25->facilities,
1407 sizeof(x25->facilities))
1408 ? -EFAULT : 0;
1409 release_sock(sk);
1410 break;
1411 }
1412 1412
1413 case SIOCX25SFACILITIES: { 1413 case SIOCX25SFACILITIES: {
1414 struct x25_facilities facilities; 1414 struct x25_facilities facilities;
1415 rc = -EFAULT; 1415 rc = -EFAULT;
1416 if (copy_from_user(&facilities, argp, 1416 if (copy_from_user(&facilities, argp, sizeof(facilities)))
1417 sizeof(facilities))) 1417 break;
1418 break; 1418 rc = -EINVAL;
1419 rc = -EINVAL; 1419 lock_sock(sk);
1420 lock_sock(sk); 1420 if (sk->sk_state != TCP_LISTEN &&
1421 if (sk->sk_state != TCP_LISTEN && 1421 sk->sk_state != TCP_CLOSE)
1422 sk->sk_state != TCP_CLOSE) 1422 goto out_fac_release;
1423 goto out_fac_release; 1423 if (facilities.pacsize_in < X25_PS16 ||
1424 if (facilities.pacsize_in < X25_PS16 || 1424 facilities.pacsize_in > X25_PS4096)
1425 facilities.pacsize_in > X25_PS4096) 1425 goto out_fac_release;
1426 goto out_fac_release; 1426 if (facilities.pacsize_out < X25_PS16 ||
1427 if (facilities.pacsize_out < X25_PS16 || 1427 facilities.pacsize_out > X25_PS4096)
1428 facilities.pacsize_out > X25_PS4096) 1428 goto out_fac_release;
1429 goto out_fac_release; 1429 if (facilities.winsize_in < 1 ||
1430 if (facilities.winsize_in < 1 || 1430 facilities.winsize_in > 127)
1431 facilities.winsize_in > 127) 1431 goto out_fac_release;
1432 if (facilities.throughput) {
1433 int out = facilities.throughput & 0xf0;
1434 int in = facilities.throughput & 0x0f;
1435 if (!out)
1436 facilities.throughput |=
1437 X25_DEFAULT_THROUGHPUT << 4;
1438 else if (out < 0x30 || out > 0xD0)
1432 goto out_fac_release; 1439 goto out_fac_release;
1433 if (facilities.throughput) { 1440 if (!in)
1434 int out = facilities.throughput & 0xf0; 1441 facilities.throughput |=
1435 int in = facilities.throughput & 0x0f; 1442 X25_DEFAULT_THROUGHPUT;
1436 if (!out) 1443 else if (in < 0x03 || in > 0x0D)
1437 facilities.throughput |=
1438 X25_DEFAULT_THROUGHPUT << 4;
1439 else if (out < 0x30 || out > 0xD0)
1440 goto out_fac_release;
1441 if (!in)
1442 facilities.throughput |=
1443 X25_DEFAULT_THROUGHPUT;
1444 else if (in < 0x03 || in > 0x0D)
1445 goto out_fac_release;
1446 }
1447 if (facilities.reverse &&
1448 (facilities.reverse & 0x81) != 0x81)
1449 goto out_fac_release; 1444 goto out_fac_release;
1450 x25->facilities = facilities;
1451 rc = 0;
1452out_fac_release:
1453 release_sock(sk);
1454 break;
1455 }
1456
1457 case SIOCX25GDTEFACILITIES: {
1458 lock_sock(sk);
1459 rc = copy_to_user(argp, &x25->dte_facilities,
1460 sizeof(x25->dte_facilities));
1461 release_sock(sk);
1462 if (rc)
1463 rc = -EFAULT;
1464 break;
1465 } 1445 }
1446 if (facilities.reverse &&
1447 (facilities.reverse & 0x81) != 0x81)
1448 goto out_fac_release;
1449 x25->facilities = facilities;
1450 rc = 0;
1451out_fac_release:
1452 release_sock(sk);
1453 break;
1454 }
1466 1455
1467 case SIOCX25SDTEFACILITIES: { 1456 case SIOCX25GDTEFACILITIES: {
1468 struct x25_dte_facilities dtefacs; 1457 lock_sock(sk);
1458 rc = copy_to_user(argp, &x25->dte_facilities,
1459 sizeof(x25->dte_facilities));
1460 release_sock(sk);
1461 if (rc)
1469 rc = -EFAULT; 1462 rc = -EFAULT;
1470 if (copy_from_user(&dtefacs, argp, sizeof(dtefacs))) 1463 break;
1471 break; 1464 }
1472 rc = -EINVAL;
1473 lock_sock(sk);
1474 if (sk->sk_state != TCP_LISTEN &&
1475 sk->sk_state != TCP_CLOSE)
1476 goto out_dtefac_release;
1477 if (dtefacs.calling_len > X25_MAX_AE_LEN)
1478 goto out_dtefac_release;
1479 if (dtefacs.calling_ae == NULL)
1480 goto out_dtefac_release;
1481 if (dtefacs.called_len > X25_MAX_AE_LEN)
1482 goto out_dtefac_release;
1483 if (dtefacs.called_ae == NULL)
1484 goto out_dtefac_release;
1485 x25->dte_facilities = dtefacs;
1486 rc = 0;
1487out_dtefac_release:
1488 release_sock(sk);
1489 break;
1490 }
1491 1465
1492 case SIOCX25GCALLUSERDATA: { 1466 case SIOCX25SDTEFACILITIES: {
1493 lock_sock(sk); 1467 struct x25_dte_facilities dtefacs;
1494 rc = copy_to_user(argp, &x25->calluserdata, 1468 rc = -EFAULT;
1495 sizeof(x25->calluserdata)) 1469 if (copy_from_user(&dtefacs, argp, sizeof(dtefacs)))
1496 ? -EFAULT : 0;
1497 release_sock(sk);
1498 break; 1470 break;
1499 } 1471 rc = -EINVAL;
1472 lock_sock(sk);
1473 if (sk->sk_state != TCP_LISTEN &&
1474 sk->sk_state != TCP_CLOSE)
1475 goto out_dtefac_release;
1476 if (dtefacs.calling_len > X25_MAX_AE_LEN)
1477 goto out_dtefac_release;
1478 if (dtefacs.calling_ae == NULL)
1479 goto out_dtefac_release;
1480 if (dtefacs.called_len > X25_MAX_AE_LEN)
1481 goto out_dtefac_release;
1482 if (dtefacs.called_ae == NULL)
1483 goto out_dtefac_release;
1484 x25->dte_facilities = dtefacs;
1485 rc = 0;
1486out_dtefac_release:
1487 release_sock(sk);
1488 break;
1489 }
1500 1490
1501 case SIOCX25SCALLUSERDATA: { 1491 case SIOCX25GCALLUSERDATA: {
1502 struct x25_calluserdata calluserdata; 1492 lock_sock(sk);
1493 rc = copy_to_user(argp, &x25->calluserdata,
1494 sizeof(x25->calluserdata))
1495 ? -EFAULT : 0;
1496 release_sock(sk);
1497 break;
1498 }
1503 1499
1504 rc = -EFAULT; 1500 case SIOCX25SCALLUSERDATA: {
1505 if (copy_from_user(&calluserdata, argp, 1501 struct x25_calluserdata calluserdata;
1506 sizeof(calluserdata)))
1507 break;
1508 rc = -EINVAL;
1509 if (calluserdata.cudlength > X25_MAX_CUD_LEN)
1510 break;
1511 lock_sock(sk);
1512 x25->calluserdata = calluserdata;
1513 release_sock(sk);
1514 rc = 0;
1515 break;
1516 }
1517 1502
1518 case SIOCX25GCAUSEDIAG: { 1503 rc = -EFAULT;
1519 lock_sock(sk); 1504 if (copy_from_user(&calluserdata, argp, sizeof(calluserdata)))
1520 rc = copy_to_user(argp, &x25->causediag,
1521 sizeof(x25->causediag))
1522 ? -EFAULT : 0;
1523 release_sock(sk);
1524 break; 1505 break;
1525 } 1506 rc = -EINVAL;
1507 if (calluserdata.cudlength > X25_MAX_CUD_LEN)
1508 break;
1509 lock_sock(sk);
1510 x25->calluserdata = calluserdata;
1511 release_sock(sk);
1512 rc = 0;
1513 break;
1514 }
1526 1515
1527 case SIOCX25SCAUSEDIAG: { 1516 case SIOCX25GCAUSEDIAG: {
1528 struct x25_causediag causediag; 1517 lock_sock(sk);
1529 rc = -EFAULT; 1518 rc = copy_to_user(argp, &x25->causediag, sizeof(x25->causediag))
1530 if (copy_from_user(&causediag, argp, sizeof(causediag))) 1519 ? -EFAULT : 0;
1531 break; 1520 release_sock(sk);
1532 lock_sock(sk); 1521 break;
1533 x25->causediag = causediag; 1522 }
1534 release_sock(sk); 1523
1535 rc = 0; 1524 case SIOCX25SCAUSEDIAG: {
1525 struct x25_causediag causediag;
1526 rc = -EFAULT;
1527 if (copy_from_user(&causediag, argp, sizeof(causediag)))
1536 break; 1528 break;
1529 lock_sock(sk);
1530 x25->causediag = causediag;
1531 release_sock(sk);
1532 rc = 0;
1533 break;
1537 1534
1538 } 1535 }
1539 1536
1540 case SIOCX25SCUDMATCHLEN: { 1537 case SIOCX25SCUDMATCHLEN: {
1541 struct x25_subaddr sub_addr; 1538 struct x25_subaddr sub_addr;
1542 rc = -EINVAL; 1539 rc = -EINVAL;
1543 lock_sock(sk); 1540 lock_sock(sk);
1544 if(sk->sk_state != TCP_CLOSE) 1541 if(sk->sk_state != TCP_CLOSE)
1545 goto out_cud_release; 1542 goto out_cud_release;
1546 rc = -EFAULT; 1543 rc = -EFAULT;
1547 if (copy_from_user(&sub_addr, argp, 1544 if (copy_from_user(&sub_addr, argp,
1548 sizeof(sub_addr))) 1545 sizeof(sub_addr)))
1549 goto out_cud_release; 1546 goto out_cud_release;
1550 rc = -EINVAL; 1547 rc = -EINVAL;
1551 if(sub_addr.cudmatchlength > X25_MAX_CUD_LEN) 1548 if (sub_addr.cudmatchlength > X25_MAX_CUD_LEN)
1552 goto out_cud_release; 1549 goto out_cud_release;
1553 x25->cudmatchlength = sub_addr.cudmatchlength; 1550 x25->cudmatchlength = sub_addr.cudmatchlength;
1554 rc = 0; 1551 rc = 0;
1555out_cud_release: 1552out_cud_release:
1556 release_sock(sk); 1553 release_sock(sk);
1557 break; 1554 break;
1558 } 1555 }
1559 1556
1560 case SIOCX25CALLACCPTAPPRV: { 1557 case SIOCX25CALLACCPTAPPRV: {
1561 rc = -EINVAL; 1558 rc = -EINVAL;
1562 lock_sock(sk); 1559 lock_sock(sk);
1563 if (sk->sk_state != TCP_CLOSE) 1560 if (sk->sk_state != TCP_CLOSE)
1564 break;
1565 clear_bit(X25_ACCPT_APPRV_FLAG, &x25->flags);
1566 release_sock(sk);
1567 rc = 0;
1568 break; 1561 break;
1569 } 1562 clear_bit(X25_ACCPT_APPRV_FLAG, &x25->flags);
1563 release_sock(sk);
1564 rc = 0;
1565 break;
1566 }
1570 1567
1571 case SIOCX25SENDCALLACCPT: { 1568 case SIOCX25SENDCALLACCPT: {
1572 rc = -EINVAL; 1569 rc = -EINVAL;
1573 lock_sock(sk); 1570 lock_sock(sk);
1574 if (sk->sk_state != TCP_ESTABLISHED) 1571 if (sk->sk_state != TCP_ESTABLISHED)
1575 break;
1576 /* must call accptapprv above */
1577 if (test_bit(X25_ACCPT_APPRV_FLAG, &x25->flags))
1578 break;
1579 x25_write_internal(sk, X25_CALL_ACCEPTED);
1580 x25->state = X25_STATE_3;
1581 release_sock(sk);
1582 rc = 0;
1583 break; 1572 break;
1584 } 1573 /* must call accptapprv above */
1585 1574 if (test_bit(X25_ACCPT_APPRV_FLAG, &x25->flags))
1586 default:
1587 rc = -ENOIOCTLCMD;
1588 break; 1575 break;
1576 x25_write_internal(sk, X25_CALL_ACCEPTED);
1577 x25->state = X25_STATE_3;
1578 release_sock(sk);
1579 rc = 0;
1580 break;
1581 }
1582
1583 default:
1584 rc = -ENOIOCTLCMD;
1585 break;
1589 } 1586 }
1590 1587
1591 return rc; 1588 return rc;
diff --git a/net/x25/x25_dev.c b/net/x25/x25_dev.c
index 9005f6daeab..e547ca1578c 100644
--- a/net/x25/x25_dev.c
+++ b/net/x25/x25_dev.c
@@ -146,21 +146,21 @@ void x25_establish_link(struct x25_neigh *nb)
146 unsigned char *ptr; 146 unsigned char *ptr;
147 147
148 switch (nb->dev->type) { 148 switch (nb->dev->type) {
149 case ARPHRD_X25: 149 case ARPHRD_X25:
150 if ((skb = alloc_skb(1, GFP_ATOMIC)) == NULL) { 150 if ((skb = alloc_skb(1, GFP_ATOMIC)) == NULL) {
151 printk(KERN_ERR "x25_dev: out of memory\n"); 151 printk(KERN_ERR "x25_dev: out of memory\n");
152 return; 152 return;
153 } 153 }
154 ptr = skb_put(skb, 1); 154 ptr = skb_put(skb, 1);
155 *ptr = X25_IFACE_CONNECT; 155 *ptr = X25_IFACE_CONNECT;
156 break; 156 break;
157 157
158#if defined(CONFIG_LLC) || defined(CONFIG_LLC_MODULE) 158#if defined(CONFIG_LLC) || defined(CONFIG_LLC_MODULE)
159 case ARPHRD_ETHER: 159 case ARPHRD_ETHER:
160 return; 160 return;
161#endif 161#endif
162 default: 162 default:
163 return; 163 return;
164 } 164 }
165 165
166 skb->protocol = htons(ETH_P_X25); 166 skb->protocol = htons(ETH_P_X25);
@@ -202,19 +202,19 @@ void x25_send_frame(struct sk_buff *skb, struct x25_neigh *nb)
202 skb_reset_network_header(skb); 202 skb_reset_network_header(skb);
203 203
204 switch (nb->dev->type) { 204 switch (nb->dev->type) {
205 case ARPHRD_X25: 205 case ARPHRD_X25:
206 dptr = skb_push(skb, 1); 206 dptr = skb_push(skb, 1);
207 *dptr = X25_IFACE_DATA; 207 *dptr = X25_IFACE_DATA;
208 break; 208 break;
209 209
210#if defined(CONFIG_LLC) || defined(CONFIG_LLC_MODULE) 210#if defined(CONFIG_LLC) || defined(CONFIG_LLC_MODULE)
211 case ARPHRD_ETHER: 211 case ARPHRD_ETHER:
212 kfree_skb(skb); 212 kfree_skb(skb);
213 return; 213 return;
214#endif 214#endif
215 default: 215 default:
216 kfree_skb(skb); 216 kfree_skb(skb);
217 return; 217 return;
218 } 218 }
219 219
220 skb->protocol = htons(ETH_P_X25); 220 skb->protocol = htons(ETH_P_X25);
diff --git a/net/x25/x25_in.c b/net/x25/x25_in.c
index 15de65f0471..0b073b51b18 100644
--- a/net/x25/x25_in.c
+++ b/net/x25/x25_in.c
@@ -94,55 +94,55 @@ static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametyp
94 struct x25_sock *x25 = x25_sk(sk); 94 struct x25_sock *x25 = x25_sk(sk);
95 95
96 switch (frametype) { 96 switch (frametype) {
97 case X25_CALL_ACCEPTED: { 97 case X25_CALL_ACCEPTED: {
98 98
99 x25_stop_timer(sk); 99 x25_stop_timer(sk);
100 x25->condition = 0x00; 100 x25->condition = 0x00;
101 x25->vs = 0; 101 x25->vs = 0;
102 x25->va = 0; 102 x25->va = 0;
103 x25->vr = 0; 103 x25->vr = 0;
104 x25->vl = 0; 104 x25->vl = 0;
105 x25->state = X25_STATE_3; 105 x25->state = X25_STATE_3;
106 sk->sk_state = TCP_ESTABLISHED; 106 sk->sk_state = TCP_ESTABLISHED;
107 /* 107 /*
108 * Parse the data in the frame. 108 * Parse the data in the frame.
109 */ 109 */
110 skb_pull(skb, X25_STD_MIN_LEN); 110 skb_pull(skb, X25_STD_MIN_LEN);
111 111
112 len = x25_parse_address_block(skb, &source_addr, 112 len = x25_parse_address_block(skb, &source_addr,
113 &dest_addr); 113 &dest_addr);
114 if (len > 0) 114 if (len > 0)
115 skb_pull(skb, len); 115 skb_pull(skb, len);
116 else if (len < 0) 116 else if (len < 0)
117 goto out_clear; 117 goto out_clear;
118 118
119 len = x25_parse_facilities(skb, &x25->facilities, 119 len = x25_parse_facilities(skb, &x25->facilities,
120 &x25->dte_facilities, 120 &x25->dte_facilities,
121 &x25->vc_facil_mask); 121 &x25->vc_facil_mask);
122 if (len > 0) 122 if (len > 0)
123 skb_pull(skb, len); 123 skb_pull(skb, len);
124 else if (len < 0) 124 else if (len < 0)
125 goto out_clear; 125 goto out_clear;
126 /* 126 /*
127 * Copy any Call User Data. 127 * Copy any Call User Data.
128 */ 128 */
129 if (skb->len > 0) { 129 if (skb->len > 0) {
130 skb_copy_from_linear_data(skb, 130 skb_copy_from_linear_data(skb,
131 x25->calluserdata.cuddata, 131 x25->calluserdata.cuddata,
132 skb->len); 132 skb->len);
133 x25->calluserdata.cudlength = skb->len; 133 x25->calluserdata.cudlength = skb->len;
134 }
135 if (!sock_flag(sk, SOCK_DEAD))
136 sk->sk_state_change(sk);
137 break;
138 } 134 }
139 case X25_CLEAR_REQUEST: 135 if (!sock_flag(sk, SOCK_DEAD))
140 x25_write_internal(sk, X25_CLEAR_CONFIRMATION); 136 sk->sk_state_change(sk);
141 x25_disconnect(sk, ECONNREFUSED, skb->data[3], skb->data[4]); 137 break;
142 break; 138 }
139 case X25_CLEAR_REQUEST:
140 x25_write_internal(sk, X25_CLEAR_CONFIRMATION);
141 x25_disconnect(sk, ECONNREFUSED, skb->data[3], skb->data[4]);
142 break;
143 143
144 default: 144 default:
145 break; 145 break;
146 } 146 }
147 147
148 return 0; 148 return 0;
@@ -354,18 +354,18 @@ int x25_process_rx_frame(struct sock *sk, struct sk_buff *skb)
354 frametype = x25_decode(sk, skb, &ns, &nr, &q, &d, &m); 354 frametype = x25_decode(sk, skb, &ns, &nr, &q, &d, &m);
355 355
356 switch (x25->state) { 356 switch (x25->state) {
357 case X25_STATE_1: 357 case X25_STATE_1:
358 queued = x25_state1_machine(sk, skb, frametype); 358 queued = x25_state1_machine(sk, skb, frametype);
359 break; 359 break;
360 case X25_STATE_2: 360 case X25_STATE_2:
361 queued = x25_state2_machine(sk, skb, frametype); 361 queued = x25_state2_machine(sk, skb, frametype);
362 break; 362 break;
363 case X25_STATE_3: 363 case X25_STATE_3:
364 queued = x25_state3_machine(sk, skb, frametype, ns, nr, q, d, m); 364 queued = x25_state3_machine(sk, skb, frametype, ns, nr, q, d, m);
365 break; 365 break;
366 case X25_STATE_4: 366 case X25_STATE_4:
367 queued = x25_state4_machine(sk, skb, frametype); 367 queued = x25_state4_machine(sk, skb, frametype);
368 break; 368 break;
369 } 369 }
370 370
371 x25_kick(sk); 371 x25_kick(sk);
diff --git a/net/x25/x25_link.c b/net/x25/x25_link.c
index 21306928d47..037958ff8ee 100644
--- a/net/x25/x25_link.c
+++ b/net/x25/x25_link.c
@@ -76,30 +76,29 @@ void x25_link_control(struct sk_buff *skb, struct x25_neigh *nb,
76 int confirm; 76 int confirm;
77 77
78 switch (frametype) { 78 switch (frametype) {
79 case X25_RESTART_REQUEST: 79 case X25_RESTART_REQUEST:
80 confirm = !x25_t20timer_pending(nb); 80 confirm = !x25_t20timer_pending(nb);
81 x25_stop_t20timer(nb); 81 x25_stop_t20timer(nb);
82 nb->state = X25_LINK_STATE_3; 82 nb->state = X25_LINK_STATE_3;
83 if (confirm) 83 if (confirm)
84 x25_transmit_restart_confirmation(nb); 84 x25_transmit_restart_confirmation(nb);
85 break; 85 break;
86 86
87 case X25_RESTART_CONFIRMATION: 87 case X25_RESTART_CONFIRMATION:
88 x25_stop_t20timer(nb); 88 x25_stop_t20timer(nb);
89 nb->state = X25_LINK_STATE_3; 89 nb->state = X25_LINK_STATE_3;
90 break; 90 break;
91 91
92 case X25_DIAGNOSTIC: 92 case X25_DIAGNOSTIC:
93 printk(KERN_WARNING "x25: diagnostic #%d - " 93 printk(KERN_WARNING "x25: diagnostic #%d - %02X %02X %02X\n",
94 "%02X %02X %02X\n", 94 skb->data[3], skb->data[4],
95 skb->data[3], skb->data[4], 95 skb->data[5], skb->data[6]);
96 skb->data[5], skb->data[6]); 96 break;
97 break; 97
98 98 default:
99 default: 99 printk(KERN_WARNING "x25: received unknown %02X with LCI 000\n",
100 printk(KERN_WARNING "x25: received unknown %02X " 100 frametype);
101 "with LCI 000\n", frametype); 101 break;
102 break;
103 } 102 }
104 103
105 if (nb->state == X25_LINK_STATE_3) 104 if (nb->state == X25_LINK_STATE_3)
@@ -193,18 +192,18 @@ void x25_transmit_clear_request(struct x25_neigh *nb, unsigned int lci,
193void x25_transmit_link(struct sk_buff *skb, struct x25_neigh *nb) 192void x25_transmit_link(struct sk_buff *skb, struct x25_neigh *nb)
194{ 193{
195 switch (nb->state) { 194 switch (nb->state) {
196 case X25_LINK_STATE_0: 195 case X25_LINK_STATE_0:
197 skb_queue_tail(&nb->queue, skb); 196 skb_queue_tail(&nb->queue, skb);
198 nb->state = X25_LINK_STATE_1; 197 nb->state = X25_LINK_STATE_1;
199 x25_establish_link(nb); 198 x25_establish_link(nb);
200 break; 199 break;
201 case X25_LINK_STATE_1: 200 case X25_LINK_STATE_1:
202 case X25_LINK_STATE_2: 201 case X25_LINK_STATE_2:
203 skb_queue_tail(&nb->queue, skb); 202 skb_queue_tail(&nb->queue, skb);
204 break; 203 break;
205 case X25_LINK_STATE_3: 204 case X25_LINK_STATE_3:
206 x25_send_frame(skb, nb); 205 x25_send_frame(skb, nb);
207 break; 206 break;
208 } 207 }
209} 208}
210 209
@@ -214,14 +213,14 @@ void x25_transmit_link(struct sk_buff *skb, struct x25_neigh *nb)
214void x25_link_established(struct x25_neigh *nb) 213void x25_link_established(struct x25_neigh *nb)
215{ 214{
216 switch (nb->state) { 215 switch (nb->state) {
217 case X25_LINK_STATE_0: 216 case X25_LINK_STATE_0:
218 nb->state = X25_LINK_STATE_2; 217 nb->state = X25_LINK_STATE_2;
219 break; 218 break;
220 case X25_LINK_STATE_1: 219 case X25_LINK_STATE_1:
221 x25_transmit_restart_request(nb); 220 x25_transmit_restart_request(nb);
222 nb->state = X25_LINK_STATE_2; 221 nb->state = X25_LINK_STATE_2;
223 x25_start_t20timer(nb); 222 x25_start_t20timer(nb);
224 break; 223 break;
225 } 224 }
226} 225}
227 226
diff --git a/net/x25/x25_subr.c b/net/x25/x25_subr.c
index dc20cf12f39..24a342ebc7f 100644
--- a/net/x25/x25_subr.c
+++ b/net/x25/x25_subr.c
@@ -126,32 +126,30 @@ void x25_write_internal(struct sock *sk, int frametype)
126 * Adjust frame size. 126 * Adjust frame size.
127 */ 127 */
128 switch (frametype) { 128 switch (frametype) {
129 case X25_CALL_REQUEST: 129 case X25_CALL_REQUEST:
130 len += 1 + X25_ADDR_LEN + X25_MAX_FAC_LEN + 130 len += 1 + X25_ADDR_LEN + X25_MAX_FAC_LEN + X25_MAX_CUD_LEN;
131 X25_MAX_CUD_LEN; 131 break;
132 break; 132 case X25_CALL_ACCEPTED: /* fast sel with no restr on resp */
133 case X25_CALL_ACCEPTED: /* fast sel with no restr on resp */ 133 if (x25->facilities.reverse & 0x80) {
134 if(x25->facilities.reverse & 0x80) { 134 len += 1 + X25_MAX_FAC_LEN + X25_MAX_CUD_LEN;
135 len += 1 + X25_MAX_FAC_LEN + X25_MAX_CUD_LEN; 135 } else {
136 } else { 136 len += 1 + X25_MAX_FAC_LEN;
137 len += 1 + X25_MAX_FAC_LEN; 137 }
138 } 138 break;
139 break; 139 case X25_CLEAR_REQUEST:
140 case X25_CLEAR_REQUEST: 140 case X25_RESET_REQUEST:
141 case X25_RESET_REQUEST: 141 len += 2;
142 len += 2; 142 break;
143 break; 143 case X25_RR:
144 case X25_RR: 144 case X25_RNR:
145 case X25_RNR: 145 case X25_REJ:
146 case X25_REJ: 146 case X25_CLEAR_CONFIRMATION:
147 case X25_CLEAR_CONFIRMATION: 147 case X25_INTERRUPT_CONFIRMATION:
148 case X25_INTERRUPT_CONFIRMATION: 148 case X25_RESET_CONFIRMATION:
149 case X25_RESET_CONFIRMATION: 149 break;
150 break; 150 default:
151 default: 151 printk(KERN_ERR "X.25: invalid frame type %02X\n", frametype);
152 printk(KERN_ERR "X.25: invalid frame type %02X\n", 152 return;
153 frametype);
154 return;
155 } 153 }
156 154
157 if ((skb = alloc_skb(len, GFP_ATOMIC)) == NULL) 155 if ((skb = alloc_skb(len, GFP_ATOMIC)) == NULL)
@@ -276,20 +274,20 @@ int x25_decode(struct sock *sk, struct sk_buff *skb, int *ns, int *nr, int *q,
276 *ns = *nr = *q = *d = *m = 0; 274 *ns = *nr = *q = *d = *m = 0;
277 275
278 switch (frame[2]) { 276 switch (frame[2]) {
279 case X25_CALL_REQUEST: 277 case X25_CALL_REQUEST:
280 case X25_CALL_ACCEPTED: 278 case X25_CALL_ACCEPTED:
281 case X25_CLEAR_REQUEST: 279 case X25_CLEAR_REQUEST:
282 case X25_CLEAR_CONFIRMATION: 280 case X25_CLEAR_CONFIRMATION:
283 case X25_INTERRUPT: 281 case X25_INTERRUPT:
284 case X25_INTERRUPT_CONFIRMATION: 282 case X25_INTERRUPT_CONFIRMATION:
285 case X25_RESET_REQUEST: 283 case X25_RESET_REQUEST:
286 case X25_RESET_CONFIRMATION: 284 case X25_RESET_CONFIRMATION:
287 case X25_RESTART_REQUEST: 285 case X25_RESTART_REQUEST:
288 case X25_RESTART_CONFIRMATION: 286 case X25_RESTART_CONFIRMATION:
289 case X25_REGISTRATION_REQUEST: 287 case X25_REGISTRATION_REQUEST:
290 case X25_REGISTRATION_CONFIRMATION: 288 case X25_REGISTRATION_CONFIRMATION:
291 case X25_DIAGNOSTIC: 289 case X25_DIAGNOSTIC:
292 return frame[2]; 290 return frame[2];
293 } 291 }
294 292
295 if (x25->neighbour->extended) { 293 if (x25->neighbour->extended) {
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 9bec2e8a838..94fdcc7f103 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -50,7 +50,7 @@ static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family);
50static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo); 50static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo);
51static void xfrm_init_pmtu(struct dst_entry *dst); 51static void xfrm_init_pmtu(struct dst_entry *dst);
52static int stale_bundle(struct dst_entry *dst); 52static int stale_bundle(struct dst_entry *dst);
53static int xfrm_bundle_ok(struct xfrm_dst *xdst, int family); 53static int xfrm_bundle_ok(struct xfrm_dst *xdst);
54 54
55 55
56static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol, 56static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
@@ -1497,7 +1497,7 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
1497 goto free_dst; 1497 goto free_dst;
1498 1498
1499 /* Copy neighbour for reachability confirmation */ 1499 /* Copy neighbour for reachability confirmation */
1500 dst0->neighbour = neigh_clone(dst->neighbour); 1500 dst_set_neighbour(dst0, neigh_clone(dst_get_neighbour(dst)));
1501 1501
1502 xfrm_init_path((struct xfrm_dst *)dst0, dst, nfheader_len); 1502 xfrm_init_path((struct xfrm_dst *)dst0, dst, nfheader_len);
1503 xfrm_init_pmtu(dst_prev); 1503 xfrm_init_pmtu(dst_prev);
@@ -2241,7 +2241,7 @@ static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
2241 2241
2242static int stale_bundle(struct dst_entry *dst) 2242static int stale_bundle(struct dst_entry *dst)
2243{ 2243{
2244 return !xfrm_bundle_ok((struct xfrm_dst *)dst, AF_UNSPEC); 2244 return !xfrm_bundle_ok((struct xfrm_dst *)dst);
2245} 2245}
2246 2246
2247void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev) 2247void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
@@ -2313,7 +2313,7 @@ static void xfrm_init_pmtu(struct dst_entry *dst)
2313 * still valid. 2313 * still valid.
2314 */ 2314 */
2315 2315
2316static int xfrm_bundle_ok(struct xfrm_dst *first, int family) 2316static int xfrm_bundle_ok(struct xfrm_dst *first)
2317{ 2317{
2318 struct dst_entry *dst = &first->u.dst; 2318 struct dst_entry *dst = &first->u.dst;
2319 struct xfrm_dst *last; 2319 struct xfrm_dst *last;
@@ -2385,6 +2385,11 @@ static unsigned int xfrm_default_mtu(const struct dst_entry *dst)
2385 return dst_mtu(dst->path); 2385 return dst_mtu(dst->path);
2386} 2386}
2387 2387
2388static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst, const void *daddr)
2389{
2390 return dst_neigh_lookup(dst->path, daddr);
2391}
2392
2388int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo) 2393int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
2389{ 2394{
2390 struct net *net; 2395 struct net *net;
@@ -2410,6 +2415,8 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
2410 dst_ops->negative_advice = xfrm_negative_advice; 2415 dst_ops->negative_advice = xfrm_negative_advice;
2411 if (likely(dst_ops->link_failure == NULL)) 2416 if (likely(dst_ops->link_failure == NULL))
2412 dst_ops->link_failure = xfrm_link_failure; 2417 dst_ops->link_failure = xfrm_link_failure;
2418 if (likely(dst_ops->neigh_lookup == NULL))
2419 dst_ops->neigh_lookup = xfrm_neigh_lookup;
2413 if (likely(afinfo->garbage_collect == NULL)) 2420 if (likely(afinfo->garbage_collect == NULL))
2414 afinfo->garbage_collect = __xfrm_garbage_collect; 2421 afinfo->garbage_collect = __xfrm_garbage_collect;
2415 xfrm_policy_afinfo[afinfo->family] = afinfo; 2422 xfrm_policy_afinfo[afinfo->family] = afinfo;
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index d70f85eb786..9414b9c5b1e 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -1345,6 +1345,8 @@ out:
1345 xfrm_state_check_expire(x1); 1345 xfrm_state_check_expire(x1);
1346 1346
1347 err = 0; 1347 err = 0;
1348 x->km.state = XFRM_STATE_DEAD;
1349 __xfrm_state_put(x);
1348 } 1350 }
1349 spin_unlock_bh(&x1->lock); 1351 spin_unlock_bh(&x1->lock);
1350 1352
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index c658cb3bc7c..0256b8a0a7c 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -2299,7 +2299,8 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
2299 if (link->dump == NULL) 2299 if (link->dump == NULL)
2300 return -EINVAL; 2300 return -EINVAL;
2301 2301
2302 return netlink_dump_start(net->xfrm.nlsk, skb, nlh, link->dump, link->done); 2302 return netlink_dump_start(net->xfrm.nlsk, skb, nlh,
2303 link->dump, link->done, 0);
2303 } 2304 }
2304 2305
2305 err = nlmsg_parse(nlh, xfrm_msg_min[type], attrs, XFRMA_MAX, 2306 err = nlmsg_parse(nlh, xfrm_msg_min[type], attrs, XFRMA_MAX,