aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan.c13
-rw-r--r--net/8021q/vlan.h13
-rw-r--r--net/8021q/vlan_core.c26
-rw-r--r--net/8021q/vlan_dev.c161
-rw-r--r--net/8021q/vlanproc.c16
-rw-r--r--net/9p/client.c393
-rw-r--r--net/9p/protocol.c72
-rw-r--r--net/9p/trans_fd.c2
-rw-r--r--net/Kconfig13
-rw-r--r--net/Makefile5
-rw-r--r--net/atm/br2684.c66
-rw-r--r--net/atm/clip.c2
-rw-r--r--net/atm/common.c30
-rw-r--r--net/bluetooth/Kconfig13
-rw-r--r--net/bluetooth/bnep/bnep.h8
-rw-r--r--net/bluetooth/bnep/netdev.c2
-rw-r--r--net/bluetooth/hci_conn.c39
-rw-r--r--net/bluetooth/hci_core.c204
-rw-r--r--net/bluetooth/hci_event.c41
-rw-r--r--net/bluetooth/hci_sock.c90
-rw-r--r--net/bluetooth/hci_sysfs.c38
-rw-r--r--net/bluetooth/l2cap.c677
-rw-r--r--net/bluetooth/rfcomm/sock.c2
-rw-r--r--net/bluetooth/rfcomm/tty.c4
-rw-r--r--net/bridge/br.c2
-rw-r--r--net/bridge/br_device.c143
-rw-r--r--net/bridge/br_fdb.c12
-rw-r--r--net/bridge/br_forward.c38
-rw-r--r--net/bridge/br_if.c33
-rw-r--r--net/bridge/br_input.c24
-rw-r--r--net/bridge/br_multicast.c32
-rw-r--r--net/bridge/br_netfilter.c63
-rw-r--r--net/bridge/br_netlink.c9
-rw-r--r--net/bridge/br_notify.c5
-rw-r--r--net/bridge/br_private.h67
-rw-r--r--net/bridge/br_stp_bpdu.c7
-rw-r--r--net/bridge/br_sysfs_br.c72
-rw-r--r--net/bridge/netfilter/ebt_redirect.c3
-rw-r--r--net/bridge/netfilter/ebt_ulog.c8
-rw-r--r--net/bridge/netfilter/ebtables.c11
-rw-r--r--net/caif/Kconfig7
-rw-r--r--net/caif/Makefile14
-rw-r--r--net/caif/caif_config_util.c5
-rw-r--r--net/caif/caif_dev.c12
-rw-r--r--net/caif/caif_socket.c61
-rw-r--r--net/caif/cfcnfg.c54
-rw-r--r--net/caif/cfctrl.c8
-rw-r--r--net/caif/cfdbgl.c2
-rw-r--r--net/caif/cfdgml.c7
-rw-r--r--net/caif/cfpkt_skbuff.c5
-rw-r--r--net/caif/cfrfml.c318
-rw-r--r--net/caif/cfserl.c7
-rw-r--r--net/caif/cfsrvl.c26
-rw-r--r--net/caif/cfutill.c8
-rw-r--r--net/caif/cfveil.c7
-rw-r--r--net/caif/cfvidl.c2
-rw-r--r--net/caif/chnl_net.c67
-rw-r--r--net/can/raw.c15
-rw-r--r--net/compat.c53
-rw-r--r--net/core/Makefile2
-rw-r--r--net/core/datagram.c8
-rw-r--r--net/core/dev.c426
-rw-r--r--net/core/drop_monitor.c33
-rw-r--r--net/core/dst.c2
-rw-r--r--net/core/ethtool.c149
-rw-r--r--net/core/filter.c212
-rw-r--r--net/core/flow.c9
-rw-r--r--net/core/gen_estimator.c1
-rw-r--r--net/core/gen_stats.c14
-rw-r--r--net/core/iovec.c9
-rw-r--r--net/core/link_watch.c1
-rw-r--r--net/core/neighbour.c5
-rw-r--r--net/core/net-sysfs.c19
-rw-r--r--net/core/netevent.c5
-rw-r--r--net/core/netpoll.c182
-rw-r--r--net/core/pktgen.c212
-rw-r--r--net/core/rtnetlink.c11
-rw-r--r--net/core/scm.c33
-rw-r--r--net/core/skbuff.c12
-rw-r--r--net/core/sock.c49
-rw-r--r--net/core/stream.c6
-rw-r--r--net/core/timestamping.c126
-rw-r--r--net/core/utils.c3
-rw-r--r--net/dccp/ackvec.c4
-rw-r--r--net/dccp/ccids/ccid3.c4
-rw-r--r--net/dccp/dccp.h12
-rw-r--r--net/dccp/input.c13
-rw-r--r--net/dccp/ipv4.c4
-rw-r--r--net/dccp/ipv6.c30
-rw-r--r--net/dccp/options.c20
-rw-r--r--net/dccp/proto.c14
-rw-r--r--net/decnet/dn_route.c158
-rw-r--r--net/dns_resolver/Kconfig27
-rw-r--r--net/dns_resolver/Makefile7
-rw-r--r--net/dns_resolver/dns_key.c211
-rw-r--r--net/dns_resolver/dns_query.c160
-rw-r--r--net/dns_resolver/internal.h44
-rw-r--r--net/dsa/Kconfig2
-rw-r--r--net/dsa/slave.c3
-rw-r--r--net/econet/af_econet.c27
-rw-r--r--net/ethernet/eth.c5
-rw-r--r--net/ethernet/pe2.c3
-rw-r--r--net/ipv4/af_inet.c84
-rw-r--r--net/ipv4/arp.c51
-rw-r--r--net/ipv4/datagram.c4
-rw-r--r--net/ipv4/devinet.c1
-rw-r--r--net/ipv4/fib_frontend.c13
-rw-r--r--net/ipv4/icmp.c37
-rw-r--r--net/ipv4/igmp.c32
-rw-r--r--net/ipv4/inet_connection_sock.c21
-rw-r--r--net/ipv4/inet_fragment.c1
-rw-r--r--net/ipv4/inet_hashtables.c4
-rw-r--r--net/ipv4/inetpeer.c244
-rw-r--r--net/ipv4/ip_forward.c10
-rw-r--r--net/ipv4/ip_fragment.c27
-rw-r--r--net/ipv4/ip_gre.c16
-rw-r--r--net/ipv4/ip_input.c26
-rw-r--r--net/ipv4/ip_output.c87
-rw-r--r--net/ipv4/ip_sockglue.c45
-rw-r--r--net/ipv4/ipconfig.c7
-rw-r--r--net/ipv4/ipip.c8
-rw-r--r--net/ipv4/ipmr.c16
-rw-r--r--net/ipv4/netfilter.c12
-rw-r--r--net/ipv4/netfilter/arp_tables.c22
-rw-r--r--net/ipv4/netfilter/ip_queue.c57
-rw-r--r--net/ipv4/netfilter/ip_tables.c16
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c50
-rw-r--r--net/ipv4/netfilter/ipt_LOG.c54
-rw-r--r--net/ipv4/netfilter/ipt_NETMAP.c6
-rw-r--r--net/ipv4/netfilter/ipt_REJECT.c12
-rw-r--r--net/ipv4/netfilter/nf_defrag_ipv4.c5
-rw-r--r--net/ipv4/netfilter/nf_nat_core.c29
-rw-r--r--net/ipv4/netfilter/nf_nat_proto_common.c12
-rw-r--r--net/ipv4/netfilter/nf_nat_proto_dccp.c6
-rw-r--r--net/ipv4/netfilter/nf_nat_proto_gre.c12
-rw-r--r--net/ipv4/netfilter/nf_nat_proto_icmp.c10
-rw-r--r--net/ipv4/netfilter/nf_nat_proto_sctp.c6
-rw-r--r--net/ipv4/netfilter/nf_nat_proto_tcp.c5
-rw-r--r--net/ipv4/netfilter/nf_nat_proto_udp.c5
-rw-r--r--net/ipv4/netfilter/nf_nat_proto_udplite.c6
-rw-r--r--net/ipv4/netfilter/nf_nat_proto_unknown.c4
-rw-r--r--net/ipv4/netfilter/nf_nat_rule.c10
-rw-r--r--net/ipv4/netfilter/nf_nat_standalone.c10
-rw-r--r--net/ipv4/proc.c16
-rw-r--r--net/ipv4/protocol.c3
-rw-r--r--net/ipv4/raw.c22
-rw-r--r--net/ipv4/route.c518
-rw-r--r--net/ipv4/syncookies.c105
-rw-r--r--net/ipv4/tcp.c79
-rw-r--r--net/ipv4/tcp_input.c20
-rw-r--r--net/ipv4/tcp_ipv4.c175
-rw-r--r--net/ipv4/tcp_minisocks.c9
-rw-r--r--net/ipv4/tcp_output.c80
-rw-r--r--net/ipv4/tcp_timer.c1
-rw-r--r--net/ipv4/tunnel4.c2
-rw-r--r--net/ipv4/udp.c4
-rw-r--r--net/ipv4/udplite.c3
-rw-r--r--net/ipv4/xfrm4_input.c1
-rw-r--r--net/ipv4/xfrm4_policy.c4
-rw-r--r--net/ipv6/addrconf.c67
-rw-r--r--net/ipv6/addrlabel.c6
-rw-r--r--net/ipv6/af_inet6.c32
-rw-r--r--net/ipv6/anycast.c96
-rw-r--r--net/ipv6/datagram.c18
-rw-r--r--net/ipv6/exthdrs.c34
-rw-r--r--net/ipv6/fib6_rules.c10
-rw-r--r--net/ipv6/inet6_connection_sock.c9
-rw-r--r--net/ipv6/ip6_fib.c30
-rw-r--r--net/ipv6/ip6_output.c38
-rw-r--r--net/ipv6/ip6_tunnel.c8
-rw-r--r--net/ipv6/ipv6_sockglue.c2
-rw-r--r--net/ipv6/mcast.c190
-rw-r--r--net/ipv6/mip6.c3
-rw-r--r--net/ipv6/ndisc.c10
-rw-r--r--net/ipv6/netfilter.c4
-rw-r--r--net/ipv6/netfilter/ip6_queue.c57
-rw-r--r--net/ipv6/netfilter/ip6_tables.c21
-rw-r--r--net/ipv6/netfilter/ip6t_LOG.c81
-rw-r--r--net/ipv6/netfilter/ip6t_REJECT.c6
-rw-r--r--net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c2
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c20
-rw-r--r--net/ipv6/proc.c17
-rw-r--r--net/ipv6/raw.c32
-rw-r--r--net/ipv6/reassembly.c21
-rw-r--r--net/ipv6/route.c319
-rw-r--r--net/ipv6/sit.c16
-rw-r--r--net/ipv6/syncookies.c58
-rw-r--r--net/ipv6/tcp_ipv6.c47
-rw-r--r--net/ipv6/udp.c11
-rw-r--r--net/ipv6/xfrm6_policy.c2
-rw-r--r--net/irda/irnet/irnet_ppp.c10
-rw-r--r--net/irda/irttp.c14
-rw-r--r--net/iucv/iucv.c14
-rw-r--r--net/l2tp/l2tp_ip.c6
-rw-r--r--net/mac80211/Kconfig8
-rw-r--r--net/mac80211/Makefile4
-rw-r--r--net/mac80211/agg-rx.c123
-rw-r--r--net/mac80211/agg-tx.c554
-rw-r--r--net/mac80211/cfg.c126
-rw-r--r--net/mac80211/debugfs.c154
-rw-r--r--net/mac80211/debugfs_key.c2
-rw-r--r--net/mac80211/debugfs_sta.c65
-rw-r--r--net/mac80211/driver-ops.h102
-rw-r--r--net/mac80211/driver-trace.h210
-rw-r--r--net/mac80211/ht.c52
-rw-r--r--net/mac80211/ibss.c171
-rw-r--r--net/mac80211/ieee80211_i.h81
-rw-r--r--net/mac80211/iface.c190
-rw-r--r--net/mac80211/key.c295
-rw-r--r--net/mac80211/key.h33
-rw-r--r--net/mac80211/main.c124
-rw-r--r--net/mac80211/mesh.c73
-rw-r--r--net/mac80211/mesh.h2
-rw-r--r--net/mac80211/mesh_hwmp.c4
-rw-r--r--net/mac80211/mesh_pathtbl.c4
-rw-r--r--net/mac80211/mesh_plink.c42
-rw-r--r--net/mac80211/mlme.c284
-rw-r--r--net/mac80211/pm.c18
-rw-r--r--net/mac80211/rate.h13
-rw-r--r--net/mac80211/rc80211_minstrel.c1
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c827
-rw-r--r--net/mac80211/rc80211_minstrel_ht.h130
-rw-r--r--net/mac80211/rc80211_minstrel_ht_debugfs.c118
-rw-r--r--net/mac80211/rx.c195
-rw-r--r--net/mac80211/scan.c6
-rw-r--r--net/mac80211/sta_info.c24
-rw-r--r--net/mac80211/sta_info.h113
-rw-r--r--net/mac80211/status.c6
-rw-r--r--net/mac80211/tkip.c8
-rw-r--r--net/mac80211/tkip.h2
-rw-r--r--net/mac80211/tx.c112
-rw-r--r--net/mac80211/util.c42
-rw-r--r--net/mac80211/wep.c29
-rw-r--r--net/mac80211/wep.h2
-rw-r--r--net/mac80211/work.c47
-rw-r--r--net/mac80211/wpa.c13
-rw-r--r--net/netfilter/Kconfig71
-rw-r--r--net/netfilter/Makefile4
-rw-r--r--net/netfilter/ipvs/Kconfig11
-rw-r--r--net/netfilter/ipvs/ip_vs_app.c43
-rw-r--r--net/netfilter/ipvs/ip_vs_conn.c59
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c57
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c10
-rw-r--r--net/netfilter/ipvs/ip_vs_ftp.c176
-rw-r--r--net/netfilter/ipvs/ip_vs_lblc.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_lblcr.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_proto.c1
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_sctp.c55
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_tcp.c50
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_udp.c56
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c115
-rw-r--r--net/netfilter/nf_conntrack_acct.c14
-rw-r--r--net/netfilter/nf_conntrack_core.c49
-rw-r--r--net/netfilter/nf_conntrack_extend.c22
-rw-r--r--net/netfilter/nf_conntrack_h323_main.c12
-rw-r--r--net/netfilter/nf_conntrack_netbios_ns.c2
-rw-r--r--net/netfilter/nf_conntrack_netlink.c2
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c42
-rw-r--r--net/netfilter/nfnetlink_log.c73
-rw-r--r--net/netfilter/nfnetlink_queue.c39
-rw-r--r--net/netfilter/xt_CHECKSUM.c70
-rw-r--r--net/netfilter/xt_CT.c4
-rw-r--r--net/netfilter/xt_IDLETIMER.c315
-rw-r--r--net/netfilter/xt_NOTRACK.c2
-rw-r--r--net/netfilter/xt_RATEEST.c12
-rw-r--r--net/netfilter/xt_TCPMSS.c8
-rw-r--r--net/netfilter/xt_TEE.c8
-rw-r--r--net/netfilter/xt_TPROXY.c6
-rw-r--r--net/netfilter/xt_cluster.c2
-rw-r--r--net/netfilter/xt_connbytes.c10
-rw-r--r--net/netfilter/xt_conntrack.c11
-rw-r--r--net/netfilter/xt_cpu.c63
-rw-r--r--net/netfilter/xt_ipvs.c189
-rw-r--r--net/netfilter/xt_quota.c12
-rw-r--r--net/netfilter/xt_sctp.c3
-rw-r--r--net/netfilter/xt_socket.c2
-rw-r--r--net/netfilter/xt_state.c14
-rw-r--r--net/netfilter/xt_statistic.c19
-rw-r--r--net/netlink/af_netlink.c42
-rw-r--r--net/netlink/genetlink.c15
-rw-r--r--net/packet/af_packet.c37
-rw-r--r--net/phonet/pep.c1
-rw-r--r--net/phonet/pn_dev.c15
-rw-r--r--net/rose/rose_route.c4
-rw-r--r--net/rxrpc/ar-peer.c4
-rw-r--r--net/sched/act_api.c11
-rw-r--r--net/sched/act_mirred.c55
-rw-r--r--net/sched/act_nat.c39
-rw-r--r--net/sched/act_pedit.c3
-rw-r--r--net/sched/act_police.c12
-rw-r--r--net/sched/act_simple.c4
-rw-r--r--net/sched/cls_u32.c6
-rw-r--r--net/sched/sch_atm.c98
-rw-r--r--net/sched/sch_generic.c25
-rw-r--r--net/sched/sch_htb.c2
-rw-r--r--net/sched/sch_teql.c1
-rw-r--r--net/sctp/associola.c2
-rw-r--r--net/sctp/protocol.c9
-rw-r--r--net/sctp/sm_make_chunk.c2
-rw-r--r--net/socket.c177
-rw-r--r--net/sunrpc/auth.c2
-rw-r--r--net/unix/af_unix.c99
-rw-r--r--net/wanrouter/wanmain.c7
-rw-r--r--net/wanrouter/wanproc.c7
-rw-r--r--net/wireless/chan.c5
-rw-r--r--net/wireless/core.c65
-rw-r--r--net/wireless/core.h1
-rw-r--r--net/wireless/genregdb.awk1
-rw-r--r--net/wireless/ibss.c4
-rw-r--r--net/wireless/lib80211_crypt_ccmp.c1
-rw-r--r--net/wireless/lib80211_crypt_tkip.c3
-rw-r--r--net/wireless/lib80211_crypt_wep.c1
-rw-r--r--net/wireless/mlme.c16
-rw-r--r--net/wireless/nl80211.c93
-rw-r--r--net/wireless/reg.c668
-rw-r--r--net/wireless/reg.h2
-rw-r--r--net/wireless/scan.c5
-rw-r--r--net/wireless/sme.c2
-rw-r--r--net/wireless/util.c4
-rw-r--r--net/wireless/wext-compat.c11
-rw-r--r--net/xfrm/xfrm_policy.c21
321 files changed, 10467 insertions, 6091 deletions
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 3c1c8c14e929..a2ad15250575 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -155,9 +155,10 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
155 BUG_ON(!grp); 155 BUG_ON(!grp);
156 156
157 /* Take it out of our own structures, but be sure to interlock with 157 /* Take it out of our own structures, but be sure to interlock with
158 * HW accelerating devices or SW vlan input packet processing. 158 * HW accelerating devices or SW vlan input packet processing if
159 * VLAN is not 0 (leave it there for 802.1p).
159 */ 160 */
160 if (real_dev->features & NETIF_F_HW_VLAN_FILTER) 161 if (vlan_id && (real_dev->features & NETIF_F_HW_VLAN_FILTER))
161 ops->ndo_vlan_rx_kill_vid(real_dev, vlan_id); 162 ops->ndo_vlan_rx_kill_vid(real_dev, vlan_id);
162 163
163 grp->nr_vlans--; 164 grp->nr_vlans--;
@@ -419,6 +420,14 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
419 if (is_vlan_dev(dev)) 420 if (is_vlan_dev(dev))
420 __vlan_device_event(dev, event); 421 __vlan_device_event(dev, event);
421 422
423 if ((event == NETDEV_UP) &&
424 (dev->features & NETIF_F_HW_VLAN_FILTER) &&
425 dev->netdev_ops->ndo_vlan_rx_add_vid) {
426 pr_info("8021q: adding VLAN 0 to HW filter on device %s\n",
427 dev->name);
428 dev->netdev_ops->ndo_vlan_rx_add_vid(dev, 0);
429 }
430
422 grp = __vlan_find_group(dev); 431 grp = __vlan_find_group(dev);
423 if (!grp) 432 if (!grp)
424 goto out; 433 goto out;
diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h
index 6abdcac1b2e8..8d9503ad01da 100644
--- a/net/8021q/vlan.h
+++ b/net/8021q/vlan.h
@@ -2,6 +2,7 @@
2#define __BEN_VLAN_802_1Q_INC__ 2#define __BEN_VLAN_802_1Q_INC__
3 3
4#include <linux/if_vlan.h> 4#include <linux/if_vlan.h>
5#include <linux/u64_stats_sync.h>
5 6
6 7
7/** 8/**
@@ -21,14 +22,16 @@ struct vlan_priority_tci_mapping {
21 * struct vlan_rx_stats - VLAN percpu rx stats 22 * struct vlan_rx_stats - VLAN percpu rx stats
22 * @rx_packets: number of received packets 23 * @rx_packets: number of received packets
23 * @rx_bytes: number of received bytes 24 * @rx_bytes: number of received bytes
24 * @multicast: number of received multicast packets 25 * @rx_multicast: number of received multicast packets
26 * @syncp: synchronization point for 64bit counters
25 * @rx_errors: number of errors 27 * @rx_errors: number of errors
26 */ 28 */
27struct vlan_rx_stats { 29struct vlan_rx_stats {
28 unsigned long rx_packets; 30 u64 rx_packets;
29 unsigned long rx_bytes; 31 u64 rx_bytes;
30 unsigned long multicast; 32 u64 rx_multicast;
31 unsigned long rx_errors; 33 struct u64_stats_sync syncp;
34 unsigned long rx_errors;
32}; 35};
33 36
34/** 37/**
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index 50f58f5f1c34..01ddb0472f86 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -8,6 +8,9 @@
8int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, 8int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
9 u16 vlan_tci, int polling) 9 u16 vlan_tci, int polling)
10{ 10{
11 struct net_device *vlan_dev;
12 u16 vlan_id;
13
11 if (netpoll_rx(skb)) 14 if (netpoll_rx(skb))
12 return NET_RX_DROP; 15 return NET_RX_DROP;
13 16
@@ -16,9 +19,12 @@ int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
16 19
17 skb->skb_iif = skb->dev->ifindex; 20 skb->skb_iif = skb->dev->ifindex;
18 __vlan_hwaccel_put_tag(skb, vlan_tci); 21 __vlan_hwaccel_put_tag(skb, vlan_tci);
19 skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK); 22 vlan_id = vlan_tci & VLAN_VID_MASK;
23 vlan_dev = vlan_group_get_device(grp, vlan_id);
20 24
21 if (!skb->dev) 25 if (vlan_dev)
26 skb->dev = vlan_dev;
27 else if (vlan_id)
22 goto drop; 28 goto drop;
23 29
24 return (polling ? netif_receive_skb(skb) : netif_rx(skb)); 30 return (polling ? netif_receive_skb(skb) : netif_rx(skb));
@@ -41,9 +47,9 @@ int vlan_hwaccel_do_receive(struct sk_buff *skb)
41 skb->priority = vlan_get_ingress_priority(dev, skb->vlan_tci); 47 skb->priority = vlan_get_ingress_priority(dev, skb->vlan_tci);
42 skb->vlan_tci = 0; 48 skb->vlan_tci = 0;
43 49
44 rx_stats = per_cpu_ptr(vlan_dev_info(dev)->vlan_rx_stats, 50 rx_stats = this_cpu_ptr(vlan_dev_info(dev)->vlan_rx_stats);
45 smp_processor_id());
46 51
52 u64_stats_update_begin(&rx_stats->syncp);
47 rx_stats->rx_packets++; 53 rx_stats->rx_packets++;
48 rx_stats->rx_bytes += skb->len; 54 rx_stats->rx_bytes += skb->len;
49 55
@@ -51,7 +57,7 @@ int vlan_hwaccel_do_receive(struct sk_buff *skb)
51 case PACKET_BROADCAST: 57 case PACKET_BROADCAST:
52 break; 58 break;
53 case PACKET_MULTICAST: 59 case PACKET_MULTICAST:
54 rx_stats->multicast++; 60 rx_stats->rx_multicast++;
55 break; 61 break;
56 case PACKET_OTHERHOST: 62 case PACKET_OTHERHOST:
57 /* Our lower layer thinks this is not local, let's make sure. 63 /* Our lower layer thinks this is not local, let's make sure.
@@ -62,6 +68,7 @@ int vlan_hwaccel_do_receive(struct sk_buff *skb)
62 skb->pkt_type = PACKET_HOST; 68 skb->pkt_type = PACKET_HOST;
63 break; 69 break;
64 } 70 }
71 u64_stats_update_end(&rx_stats->syncp);
65 return 0; 72 return 0;
66} 73}
67 74
@@ -82,15 +89,20 @@ vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp,
82 unsigned int vlan_tci, struct sk_buff *skb) 89 unsigned int vlan_tci, struct sk_buff *skb)
83{ 90{
84 struct sk_buff *p; 91 struct sk_buff *p;
92 struct net_device *vlan_dev;
93 u16 vlan_id;
85 94
86 if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master))) 95 if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master)))
87 skb->deliver_no_wcard = 1; 96 skb->deliver_no_wcard = 1;
88 97
89 skb->skb_iif = skb->dev->ifindex; 98 skb->skb_iif = skb->dev->ifindex;
90 __vlan_hwaccel_put_tag(skb, vlan_tci); 99 __vlan_hwaccel_put_tag(skb, vlan_tci);
91 skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK); 100 vlan_id = vlan_tci & VLAN_VID_MASK;
101 vlan_dev = vlan_group_get_device(grp, vlan_id);
92 102
93 if (!skb->dev) 103 if (vlan_dev)
104 skb->dev = vlan_dev;
105 else if (vlan_id)
94 goto drop; 106 goto drop;
95 107
96 for (p = napi->gro_list; p; p = p->next) { 108 for (p = napi->gro_list; p; p = p->next) {
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 529842677817..3d59c9bf8feb 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -142,6 +142,7 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
142{ 142{
143 struct vlan_hdr *vhdr; 143 struct vlan_hdr *vhdr;
144 struct vlan_rx_stats *rx_stats; 144 struct vlan_rx_stats *rx_stats;
145 struct net_device *vlan_dev;
145 u16 vlan_id; 146 u16 vlan_id;
146 u16 vlan_tci; 147 u16 vlan_tci;
147 148
@@ -157,53 +158,71 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
157 vlan_id = vlan_tci & VLAN_VID_MASK; 158 vlan_id = vlan_tci & VLAN_VID_MASK;
158 159
159 rcu_read_lock(); 160 rcu_read_lock();
160 skb->dev = __find_vlan_dev(dev, vlan_id); 161 vlan_dev = __find_vlan_dev(dev, vlan_id);
161 if (!skb->dev) {
162 pr_debug("%s: ERROR: No net_device for VID: %u on dev: %s\n",
163 __func__, vlan_id, dev->name);
164 goto err_unlock;
165 }
166
167 rx_stats = per_cpu_ptr(vlan_dev_info(skb->dev)->vlan_rx_stats,
168 smp_processor_id());
169 rx_stats->rx_packets++;
170 rx_stats->rx_bytes += skb->len;
171
172 skb_pull_rcsum(skb, VLAN_HLEN);
173
174 skb->priority = vlan_get_ingress_priority(skb->dev, vlan_tci);
175
176 pr_debug("%s: priority: %u for TCI: %hu\n",
177 __func__, skb->priority, vlan_tci);
178
179 switch (skb->pkt_type) {
180 case PACKET_BROADCAST: /* Yeah, stats collect these together.. */
181 /* stats->broadcast ++; // no such counter :-( */
182 break;
183 162
184 case PACKET_MULTICAST: 163 /* If the VLAN device is defined, we use it.
185 rx_stats->multicast++; 164 * If not, and the VID is 0, it is a 802.1p packet (not
186 break; 165 * really a VLAN), so we will just netif_rx it later to the
166 * original interface, but with the skb->proto set to the
167 * wrapped proto: we do nothing here.
168 */
187 169
188 case PACKET_OTHERHOST: 170 if (!vlan_dev) {
189 /* Our lower layer thinks this is not local, let's make sure. 171 if (vlan_id) {
190 * This allows the VLAN to have a different MAC than the 172 pr_debug("%s: ERROR: No net_device for VID: %u on dev: %s\n",
191 * underlying device, and still route correctly. 173 __func__, vlan_id, dev->name);
192 */ 174 goto err_unlock;
193 if (!compare_ether_addr(eth_hdr(skb)->h_dest, 175 }
194 skb->dev->dev_addr)) 176 rx_stats = NULL;
195 skb->pkt_type = PACKET_HOST; 177 } else {
196 break; 178 skb->dev = vlan_dev;
197 default: 179
198 break; 180 rx_stats = per_cpu_ptr(vlan_dev_info(skb->dev)->vlan_rx_stats,
181 smp_processor_id());
182 u64_stats_update_begin(&rx_stats->syncp);
183 rx_stats->rx_packets++;
184 rx_stats->rx_bytes += skb->len;
185
186 skb->priority = vlan_get_ingress_priority(skb->dev, vlan_tci);
187
188 pr_debug("%s: priority: %u for TCI: %hu\n",
189 __func__, skb->priority, vlan_tci);
190
191 switch (skb->pkt_type) {
192 case PACKET_BROADCAST:
193 /* Yeah, stats collect these together.. */
194 /* stats->broadcast ++; // no such counter :-( */
195 break;
196
197 case PACKET_MULTICAST:
198 rx_stats->rx_multicast++;
199 break;
200
201 case PACKET_OTHERHOST:
202 /* Our lower layer thinks this is not local, let's make
203 * sure.
204 * This allows the VLAN to have a different MAC than the
205 * underlying device, and still route correctly.
206 */
207 if (!compare_ether_addr(eth_hdr(skb)->h_dest,
208 skb->dev->dev_addr))
209 skb->pkt_type = PACKET_HOST;
210 break;
211 default:
212 break;
213 }
214 u64_stats_update_end(&rx_stats->syncp);
199 } 215 }
200 216
217 skb_pull_rcsum(skb, VLAN_HLEN);
201 vlan_set_encap_proto(skb, vhdr); 218 vlan_set_encap_proto(skb, vhdr);
202 219
203 skb = vlan_check_reorder_header(skb); 220 if (vlan_dev) {
204 if (!skb) { 221 skb = vlan_check_reorder_header(skb);
205 rx_stats->rx_errors++; 222 if (!skb) {
206 goto err_unlock; 223 rx_stats->rx_errors++;
224 goto err_unlock;
225 }
207 } 226 }
208 227
209 netif_rx(skb); 228 netif_rx(skb);
@@ -801,37 +820,65 @@ static u32 vlan_ethtool_get_flags(struct net_device *dev)
801 return dev_ethtool_get_flags(vlan->real_dev); 820 return dev_ethtool_get_flags(vlan->real_dev);
802} 821}
803 822
804static struct net_device_stats *vlan_dev_get_stats(struct net_device *dev) 823static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
805{ 824{
806 struct net_device_stats *stats = &dev->stats;
807
808 dev_txq_stats_fold(dev, stats); 825 dev_txq_stats_fold(dev, stats);
809 826
810 if (vlan_dev_info(dev)->vlan_rx_stats) { 827 if (vlan_dev_info(dev)->vlan_rx_stats) {
811 struct vlan_rx_stats *p, rx = {0}; 828 struct vlan_rx_stats *p, accum = {0};
812 int i; 829 int i;
813 830
814 for_each_possible_cpu(i) { 831 for_each_possible_cpu(i) {
832 u64 rxpackets, rxbytes, rxmulticast;
833 unsigned int start;
834
815 p = per_cpu_ptr(vlan_dev_info(dev)->vlan_rx_stats, i); 835 p = per_cpu_ptr(vlan_dev_info(dev)->vlan_rx_stats, i);
816 rx.rx_packets += p->rx_packets; 836 do {
817 rx.rx_bytes += p->rx_bytes; 837 start = u64_stats_fetch_begin_bh(&p->syncp);
818 rx.rx_errors += p->rx_errors; 838 rxpackets = p->rx_packets;
819 rx.multicast += p->multicast; 839 rxbytes = p->rx_bytes;
840 rxmulticast = p->rx_multicast;
841 } while (u64_stats_fetch_retry_bh(&p->syncp, start));
842 accum.rx_packets += rxpackets;
843 accum.rx_bytes += rxbytes;
844 accum.rx_multicast += rxmulticast;
845 /* rx_errors is an ulong, not protected by syncp */
846 accum.rx_errors += p->rx_errors;
820 } 847 }
821 stats->rx_packets = rx.rx_packets; 848 stats->rx_packets = accum.rx_packets;
822 stats->rx_bytes = rx.rx_bytes; 849 stats->rx_bytes = accum.rx_bytes;
823 stats->rx_errors = rx.rx_errors; 850 stats->rx_errors = accum.rx_errors;
824 stats->multicast = rx.multicast; 851 stats->multicast = accum.rx_multicast;
825 } 852 }
826 return stats; 853 return stats;
827} 854}
828 855
856static int vlan_ethtool_set_tso(struct net_device *dev, u32 data)
857{
858 if (data) {
859 struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
860
861 /* Underlying device must support TSO for VLAN-tagged packets
862 * and must have TSO enabled now.
863 */
864 if (!(real_dev->vlan_features & NETIF_F_TSO))
865 return -EOPNOTSUPP;
866 if (!(real_dev->features & NETIF_F_TSO))
867 return -EINVAL;
868 dev->features |= NETIF_F_TSO;
869 } else {
870 dev->features &= ~NETIF_F_TSO;
871 }
872 return 0;
873}
874
829static const struct ethtool_ops vlan_ethtool_ops = { 875static const struct ethtool_ops vlan_ethtool_ops = {
830 .get_settings = vlan_ethtool_get_settings, 876 .get_settings = vlan_ethtool_get_settings,
831 .get_drvinfo = vlan_ethtool_get_drvinfo, 877 .get_drvinfo = vlan_ethtool_get_drvinfo,
832 .get_link = ethtool_op_get_link, 878 .get_link = ethtool_op_get_link,
833 .get_rx_csum = vlan_ethtool_get_rx_csum, 879 .get_rx_csum = vlan_ethtool_get_rx_csum,
834 .get_flags = vlan_ethtool_get_flags, 880 .get_flags = vlan_ethtool_get_flags,
881 .set_tso = vlan_ethtool_set_tso,
835}; 882};
836 883
837static const struct net_device_ops vlan_netdev_ops = { 884static const struct net_device_ops vlan_netdev_ops = {
@@ -848,7 +895,7 @@ static const struct net_device_ops vlan_netdev_ops = {
848 .ndo_change_rx_flags = vlan_dev_change_rx_flags, 895 .ndo_change_rx_flags = vlan_dev_change_rx_flags,
849 .ndo_do_ioctl = vlan_dev_ioctl, 896 .ndo_do_ioctl = vlan_dev_ioctl,
850 .ndo_neigh_setup = vlan_dev_neigh_setup, 897 .ndo_neigh_setup = vlan_dev_neigh_setup,
851 .ndo_get_stats = vlan_dev_get_stats, 898 .ndo_get_stats64 = vlan_dev_get_stats64,
852#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) 899#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
853 .ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup, 900 .ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup,
854 .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done, 901 .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done,
@@ -872,7 +919,7 @@ static const struct net_device_ops vlan_netdev_accel_ops = {
872 .ndo_change_rx_flags = vlan_dev_change_rx_flags, 919 .ndo_change_rx_flags = vlan_dev_change_rx_flags,
873 .ndo_do_ioctl = vlan_dev_ioctl, 920 .ndo_do_ioctl = vlan_dev_ioctl,
874 .ndo_neigh_setup = vlan_dev_neigh_setup, 921 .ndo_neigh_setup = vlan_dev_neigh_setup,
875 .ndo_get_stats = vlan_dev_get_stats, 922 .ndo_get_stats64 = vlan_dev_get_stats64,
876#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) 923#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
877 .ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup, 924 .ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup,
878 .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done, 925 .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done,
@@ -897,7 +944,7 @@ static const struct net_device_ops vlan_netdev_ops_sq = {
897 .ndo_change_rx_flags = vlan_dev_change_rx_flags, 944 .ndo_change_rx_flags = vlan_dev_change_rx_flags,
898 .ndo_do_ioctl = vlan_dev_ioctl, 945 .ndo_do_ioctl = vlan_dev_ioctl,
899 .ndo_neigh_setup = vlan_dev_neigh_setup, 946 .ndo_neigh_setup = vlan_dev_neigh_setup,
900 .ndo_get_stats = vlan_dev_get_stats, 947 .ndo_get_stats64 = vlan_dev_get_stats64,
901#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) 948#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
902 .ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup, 949 .ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup,
903 .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done, 950 .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done,
@@ -922,7 +969,7 @@ static const struct net_device_ops vlan_netdev_accel_ops_sq = {
922 .ndo_change_rx_flags = vlan_dev_change_rx_flags, 969 .ndo_change_rx_flags = vlan_dev_change_rx_flags,
923 .ndo_do_ioctl = vlan_dev_ioctl, 970 .ndo_do_ioctl = vlan_dev_ioctl,
924 .ndo_neigh_setup = vlan_dev_neigh_setup, 971 .ndo_neigh_setup = vlan_dev_neigh_setup,
925 .ndo_get_stats = vlan_dev_get_stats, 972 .ndo_get_stats64 = vlan_dev_get_stats64,
926#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) 973#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
927 .ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup, 974 .ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup,
928 .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done, 975 .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done,
diff --git a/net/8021q/vlanproc.c b/net/8021q/vlanproc.c
index afead353e215..80e280f56686 100644
--- a/net/8021q/vlanproc.c
+++ b/net/8021q/vlanproc.c
@@ -278,25 +278,27 @@ static int vlandev_seq_show(struct seq_file *seq, void *offset)
278{ 278{
279 struct net_device *vlandev = (struct net_device *) seq->private; 279 struct net_device *vlandev = (struct net_device *) seq->private;
280 const struct vlan_dev_info *dev_info = vlan_dev_info(vlandev); 280 const struct vlan_dev_info *dev_info = vlan_dev_info(vlandev);
281 const struct net_device_stats *stats; 281 struct rtnl_link_stats64 temp;
282 const struct rtnl_link_stats64 *stats;
282 static const char fmt[] = "%30s %12lu\n"; 283 static const char fmt[] = "%30s %12lu\n";
284 static const char fmt64[] = "%30s %12llu\n";
283 int i; 285 int i;
284 286
285 if (!is_vlan_dev(vlandev)) 287 if (!is_vlan_dev(vlandev))
286 return 0; 288 return 0;
287 289
288 stats = dev_get_stats(vlandev); 290 stats = dev_get_stats(vlandev, &temp);
289 seq_printf(seq, 291 seq_printf(seq,
290 "%s VID: %d REORDER_HDR: %i dev->priv_flags: %hx\n", 292 "%s VID: %d REORDER_HDR: %i dev->priv_flags: %hx\n",
291 vlandev->name, dev_info->vlan_id, 293 vlandev->name, dev_info->vlan_id,
292 (int)(dev_info->flags & 1), vlandev->priv_flags); 294 (int)(dev_info->flags & 1), vlandev->priv_flags);
293 295
294 seq_printf(seq, fmt, "total frames received", stats->rx_packets); 296 seq_printf(seq, fmt64, "total frames received", stats->rx_packets);
295 seq_printf(seq, fmt, "total bytes received", stats->rx_bytes); 297 seq_printf(seq, fmt64, "total bytes received", stats->rx_bytes);
296 seq_printf(seq, fmt, "Broadcast/Multicast Rcvd", stats->multicast); 298 seq_printf(seq, fmt64, "Broadcast/Multicast Rcvd", stats->multicast);
297 seq_puts(seq, "\n"); 299 seq_puts(seq, "\n");
298 seq_printf(seq, fmt, "total frames transmitted", stats->tx_packets); 300 seq_printf(seq, fmt64, "total frames transmitted", stats->tx_packets);
299 seq_printf(seq, fmt, "total bytes transmitted", stats->tx_bytes); 301 seq_printf(seq, fmt64, "total bytes transmitted", stats->tx_bytes);
300 seq_printf(seq, fmt, "total headroom inc", 302 seq_printf(seq, fmt, "total headroom inc",
301 dev_info->cnt_inc_headroom_on_tx); 303 dev_info->cnt_inc_headroom_on_tx);
302 seq_printf(seq, fmt, "total encap on xmit", 304 seq_printf(seq, fmt, "total encap on xmit",
diff --git a/net/9p/client.c b/net/9p/client.c
index 37c8da07a80b..dc6f2f26d023 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -460,7 +460,8 @@ static int p9_check_errors(struct p9_client *c, struct p9_req_t *req)
460 return err; 460 return err;
461 } 461 }
462 462
463 if (p9_is_proto_dotu(c)) 463 if (p9_is_proto_dotu(c) ||
464 p9_is_proto_dotl(c))
464 err = -ecode; 465 err = -ecode;
465 466
466 if (!err || !IS_ERR_VALUE(err)) 467 if (!err || !IS_ERR_VALUE(err))
@@ -1015,14 +1016,18 @@ int p9_client_open(struct p9_fid *fid, int mode)
1015 struct p9_qid qid; 1016 struct p9_qid qid;
1016 int iounit; 1017 int iounit;
1017 1018
1018 P9_DPRINTK(P9_DEBUG_9P, ">>> TOPEN fid %d mode %d\n", fid->fid, mode);
1019 err = 0;
1020 clnt = fid->clnt; 1019 clnt = fid->clnt;
1020 P9_DPRINTK(P9_DEBUG_9P, ">>> %s fid %d mode %d\n",
1021 p9_is_proto_dotl(clnt) ? "TLOPEN" : "TOPEN", fid->fid, mode);
1022 err = 0;
1021 1023
1022 if (fid->mode != -1) 1024 if (fid->mode != -1)
1023 return -EINVAL; 1025 return -EINVAL;
1024 1026
1025 req = p9_client_rpc(clnt, P9_TOPEN, "db", fid->fid, mode); 1027 if (p9_is_proto_dotl(clnt))
1028 req = p9_client_rpc(clnt, P9_TLOPEN, "dd", fid->fid, mode);
1029 else
1030 req = p9_client_rpc(clnt, P9_TOPEN, "db", fid->fid, mode);
1026 if (IS_ERR(req)) { 1031 if (IS_ERR(req)) {
1027 err = PTR_ERR(req); 1032 err = PTR_ERR(req);
1028 goto error; 1033 goto error;
@@ -1034,10 +1039,9 @@ int p9_client_open(struct p9_fid *fid, int mode)
1034 goto free_and_error; 1039 goto free_and_error;
1035 } 1040 }
1036 1041
1037 P9_DPRINTK(P9_DEBUG_9P, "<<< ROPEN qid %x.%llx.%x iounit %x\n", 1042 P9_DPRINTK(P9_DEBUG_9P, "<<< %s qid %x.%llx.%x iounit %x\n",
1038 qid.type, 1043 p9_is_proto_dotl(clnt) ? "RLOPEN" : "ROPEN", qid.type,
1039 (unsigned long long)qid.path, 1044 (unsigned long long)qid.path, qid.version, iounit);
1040 qid.version, iounit);
1041 1045
1042 fid->mode = mode; 1046 fid->mode = mode;
1043 fid->iounit = iounit; 1047 fid->iounit = iounit;
@@ -1049,6 +1053,50 @@ error:
1049} 1053}
1050EXPORT_SYMBOL(p9_client_open); 1054EXPORT_SYMBOL(p9_client_open);
1051 1055
1056int p9_client_create_dotl(struct p9_fid *ofid, char *name, u32 flags, u32 mode,
1057 gid_t gid, struct p9_qid *qid)
1058{
1059 int err = 0;
1060 struct p9_client *clnt;
1061 struct p9_req_t *req;
1062 int iounit;
1063
1064 P9_DPRINTK(P9_DEBUG_9P,
1065 ">>> TLCREATE fid %d name %s flags %d mode %d gid %d\n",
1066 ofid->fid, name, flags, mode, gid);
1067 clnt = ofid->clnt;
1068
1069 if (ofid->mode != -1)
1070 return -EINVAL;
1071
1072 req = p9_client_rpc(clnt, P9_TLCREATE, "dsddd", ofid->fid, name, flags,
1073 mode, gid);
1074 if (IS_ERR(req)) {
1075 err = PTR_ERR(req);
1076 goto error;
1077 }
1078
1079 err = p9pdu_readf(req->rc, clnt->proto_version, "Qd", qid, &iounit);
1080 if (err) {
1081 p9pdu_dump(1, req->rc);
1082 goto free_and_error;
1083 }
1084
1085 P9_DPRINTK(P9_DEBUG_9P, "<<< RLCREATE qid %x.%llx.%x iounit %x\n",
1086 qid->type,
1087 (unsigned long long)qid->path,
1088 qid->version, iounit);
1089
1090 ofid->mode = mode;
1091 ofid->iounit = iounit;
1092
1093free_and_error:
1094 p9_free_req(clnt, req);
1095error:
1096 return err;
1097}
1098EXPORT_SYMBOL(p9_client_create_dotl);
1099
1052int p9_client_fcreate(struct p9_fid *fid, char *name, u32 perm, int mode, 1100int p9_client_fcreate(struct p9_fid *fid, char *name, u32 perm, int mode,
1053 char *extension) 1101 char *extension)
1054{ 1102{
@@ -1094,6 +1142,59 @@ error:
1094} 1142}
1095EXPORT_SYMBOL(p9_client_fcreate); 1143EXPORT_SYMBOL(p9_client_fcreate);
1096 1144
1145int p9_client_symlink(struct p9_fid *dfid, char *name, char *symtgt, gid_t gid,
1146 struct p9_qid *qid)
1147{
1148 int err = 0;
1149 struct p9_client *clnt;
1150 struct p9_req_t *req;
1151
1152 P9_DPRINTK(P9_DEBUG_9P, ">>> TSYMLINK dfid %d name %s symtgt %s\n",
1153 dfid->fid, name, symtgt);
1154 clnt = dfid->clnt;
1155
1156 req = p9_client_rpc(clnt, P9_TSYMLINK, "dssd", dfid->fid, name, symtgt,
1157 gid);
1158 if (IS_ERR(req)) {
1159 err = PTR_ERR(req);
1160 goto error;
1161 }
1162
1163 err = p9pdu_readf(req->rc, clnt->proto_version, "Q", qid);
1164 if (err) {
1165 p9pdu_dump(1, req->rc);
1166 goto free_and_error;
1167 }
1168
1169 P9_DPRINTK(P9_DEBUG_9P, "<<< RSYMLINK qid %x.%llx.%x\n",
1170 qid->type, (unsigned long long)qid->path, qid->version);
1171
1172free_and_error:
1173 p9_free_req(clnt, req);
1174error:
1175 return err;
1176}
1177EXPORT_SYMBOL(p9_client_symlink);
1178
1179int p9_client_link(struct p9_fid *dfid, struct p9_fid *oldfid, char *newname)
1180{
1181 struct p9_client *clnt;
1182 struct p9_req_t *req;
1183
1184 P9_DPRINTK(P9_DEBUG_9P, ">>> TLINK dfid %d oldfid %d newname %s\n",
1185 dfid->fid, oldfid->fid, newname);
1186 clnt = dfid->clnt;
1187 req = p9_client_rpc(clnt, P9_TLINK, "dds", dfid->fid, oldfid->fid,
1188 newname);
1189 if (IS_ERR(req))
1190 return PTR_ERR(req);
1191
1192 P9_DPRINTK(P9_DEBUG_9P, "<<< RLINK\n");
1193 p9_free_req(clnt, req);
1194 return 0;
1195}
1196EXPORT_SYMBOL(p9_client_link);
1197
1097int p9_client_clunk(struct p9_fid *fid) 1198int p9_client_clunk(struct p9_fid *fid)
1098{ 1199{
1099 int err; 1200 int err;
@@ -1139,9 +1240,8 @@ int p9_client_remove(struct p9_fid *fid)
1139 P9_DPRINTK(P9_DEBUG_9P, "<<< RREMOVE fid %d\n", fid->fid); 1240 P9_DPRINTK(P9_DEBUG_9P, "<<< RREMOVE fid %d\n", fid->fid);
1140 1241
1141 p9_free_req(clnt, req); 1242 p9_free_req(clnt, req);
1142 p9_fid_destroy(fid);
1143
1144error: 1243error:
1244 p9_fid_destroy(fid);
1145 return err; 1245 return err;
1146} 1246}
1147EXPORT_SYMBOL(p9_client_remove); 1247EXPORT_SYMBOL(p9_client_remove);
@@ -1302,6 +1402,65 @@ error:
1302} 1402}
1303EXPORT_SYMBOL(p9_client_stat); 1403EXPORT_SYMBOL(p9_client_stat);
1304 1404
1405struct p9_stat_dotl *p9_client_getattr_dotl(struct p9_fid *fid,
1406 u64 request_mask)
1407{
1408 int err;
1409 struct p9_client *clnt;
1410 struct p9_stat_dotl *ret = kmalloc(sizeof(struct p9_stat_dotl),
1411 GFP_KERNEL);
1412 struct p9_req_t *req;
1413
1414 P9_DPRINTK(P9_DEBUG_9P, ">>> TGETATTR fid %d, request_mask %lld\n",
1415 fid->fid, request_mask);
1416
1417 if (!ret)
1418 return ERR_PTR(-ENOMEM);
1419
1420 err = 0;
1421 clnt = fid->clnt;
1422
1423 req = p9_client_rpc(clnt, P9_TGETATTR, "dq", fid->fid, request_mask);
1424 if (IS_ERR(req)) {
1425 err = PTR_ERR(req);
1426 goto error;
1427 }
1428
1429 err = p9pdu_readf(req->rc, clnt->proto_version, "A", ret);
1430 if (err) {
1431 p9pdu_dump(1, req->rc);
1432 p9_free_req(clnt, req);
1433 goto error;
1434 }
1435
1436 P9_DPRINTK(P9_DEBUG_9P,
1437 "<<< RGETATTR st_result_mask=%lld\n"
1438 "<<< qid=%x.%llx.%x\n"
1439 "<<< st_mode=%8.8x st_nlink=%llu\n"
1440 "<<< st_uid=%d st_gid=%d\n"
1441 "<<< st_rdev=%llx st_size=%llx st_blksize=%llu st_blocks=%llu\n"
1442 "<<< st_atime_sec=%lld st_atime_nsec=%lld\n"
1443 "<<< st_mtime_sec=%lld st_mtime_nsec=%lld\n"
1444 "<<< st_ctime_sec=%lld st_ctime_nsec=%lld\n"
1445 "<<< st_btime_sec=%lld st_btime_nsec=%lld\n"
1446 "<<< st_gen=%lld st_data_version=%lld",
1447 ret->st_result_mask, ret->qid.type, ret->qid.path,
1448 ret->qid.version, ret->st_mode, ret->st_nlink, ret->st_uid,
1449 ret->st_gid, ret->st_rdev, ret->st_size, ret->st_blksize,
1450 ret->st_blocks, ret->st_atime_sec, ret->st_atime_nsec,
1451 ret->st_mtime_sec, ret->st_mtime_nsec, ret->st_ctime_sec,
1452 ret->st_ctime_nsec, ret->st_btime_sec, ret->st_btime_nsec,
1453 ret->st_gen, ret->st_data_version);
1454
1455 p9_free_req(clnt, req);
1456 return ret;
1457
1458error:
1459 kfree(ret);
1460 return ERR_PTR(err);
1461}
1462EXPORT_SYMBOL(p9_client_getattr_dotl);
1463
1305static int p9_client_statsize(struct p9_wstat *wst, int proto_version) 1464static int p9_client_statsize(struct p9_wstat *wst, int proto_version)
1306{ 1465{
1307 int ret; 1466 int ret;
@@ -1366,6 +1525,36 @@ error:
1366} 1525}
1367EXPORT_SYMBOL(p9_client_wstat); 1526EXPORT_SYMBOL(p9_client_wstat);
1368 1527
1528int p9_client_setattr(struct p9_fid *fid, struct p9_iattr_dotl *p9attr)
1529{
1530 int err;
1531 struct p9_req_t *req;
1532 struct p9_client *clnt;
1533
1534 err = 0;
1535 clnt = fid->clnt;
1536 P9_DPRINTK(P9_DEBUG_9P, ">>> TSETATTR fid %d\n", fid->fid);
1537 P9_DPRINTK(P9_DEBUG_9P,
1538 " valid=%x mode=%x uid=%d gid=%d size=%lld\n"
1539 " atime_sec=%lld atime_nsec=%lld\n"
1540 " mtime_sec=%lld mtime_nsec=%lld\n",
1541 p9attr->valid, p9attr->mode, p9attr->uid, p9attr->gid,
1542 p9attr->size, p9attr->atime_sec, p9attr->atime_nsec,
1543 p9attr->mtime_sec, p9attr->mtime_nsec);
1544
1545 req = p9_client_rpc(clnt, P9_TSETATTR, "dI", fid->fid, p9attr);
1546
1547 if (IS_ERR(req)) {
1548 err = PTR_ERR(req);
1549 goto error;
1550 }
1551 P9_DPRINTK(P9_DEBUG_9P, "<<< RSETATTR fid %d\n", fid->fid);
1552 p9_free_req(clnt, req);
1553error:
1554 return err;
1555}
1556EXPORT_SYMBOL(p9_client_setattr);
1557
1369int p9_client_statfs(struct p9_fid *fid, struct p9_rstatfs *sb) 1558int p9_client_statfs(struct p9_fid *fid, struct p9_rstatfs *sb)
1370{ 1559{
1371 int err; 1560 int err;
@@ -1432,3 +1621,187 @@ error:
1432} 1621}
1433EXPORT_SYMBOL(p9_client_rename); 1622EXPORT_SYMBOL(p9_client_rename);
1434 1623
1624/*
1625 * An xattrwalk without @attr_name gives the fid for the lisxattr namespace
1626 */
1627struct p9_fid *p9_client_xattrwalk(struct p9_fid *file_fid,
1628 const char *attr_name, u64 *attr_size)
1629{
1630 int err;
1631 struct p9_req_t *req;
1632 struct p9_client *clnt;
1633 struct p9_fid *attr_fid;
1634
1635 err = 0;
1636 clnt = file_fid->clnt;
1637 attr_fid = p9_fid_create(clnt);
1638 if (IS_ERR(attr_fid)) {
1639 err = PTR_ERR(attr_fid);
1640 attr_fid = NULL;
1641 goto error;
1642 }
1643 P9_DPRINTK(P9_DEBUG_9P,
1644 ">>> TXATTRWALK file_fid %d, attr_fid %d name %s\n",
1645 file_fid->fid, attr_fid->fid, attr_name);
1646
1647 req = p9_client_rpc(clnt, P9_TXATTRWALK, "dds",
1648 file_fid->fid, attr_fid->fid, attr_name);
1649 if (IS_ERR(req)) {
1650 err = PTR_ERR(req);
1651 goto error;
1652 }
1653 err = p9pdu_readf(req->rc, clnt->proto_version, "q", attr_size);
1654 if (err) {
1655 p9pdu_dump(1, req->rc);
1656 p9_free_req(clnt, req);
1657 goto clunk_fid;
1658 }
1659 p9_free_req(clnt, req);
1660 P9_DPRINTK(P9_DEBUG_9P, "<<< RXATTRWALK fid %d size %llu\n",
1661 attr_fid->fid, *attr_size);
1662 return attr_fid;
1663clunk_fid:
1664 p9_client_clunk(attr_fid);
1665 attr_fid = NULL;
1666error:
1667 if (attr_fid && (attr_fid != file_fid))
1668 p9_fid_destroy(attr_fid);
1669
1670 return ERR_PTR(err);
1671}
1672EXPORT_SYMBOL_GPL(p9_client_xattrwalk);
1673
1674int p9_client_xattrcreate(struct p9_fid *fid, const char *name,
1675 u64 attr_size, int flags)
1676{
1677 int err;
1678 struct p9_req_t *req;
1679 struct p9_client *clnt;
1680
1681 P9_DPRINTK(P9_DEBUG_9P,
1682 ">>> TXATTRCREATE fid %d name %s size %lld flag %d\n",
1683 fid->fid, name, (long long)attr_size, flags);
1684 err = 0;
1685 clnt = fid->clnt;
1686 req = p9_client_rpc(clnt, P9_TXATTRCREATE, "dsqd",
1687 fid->fid, name, attr_size, flags);
1688 if (IS_ERR(req)) {
1689 err = PTR_ERR(req);
1690 goto error;
1691 }
1692 P9_DPRINTK(P9_DEBUG_9P, "<<< RXATTRCREATE fid %d\n", fid->fid);
1693 p9_free_req(clnt, req);
1694error:
1695 return err;
1696}
1697EXPORT_SYMBOL_GPL(p9_client_xattrcreate);
1698
1699int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset)
1700{
1701 int err, rsize, total;
1702 struct p9_client *clnt;
1703 struct p9_req_t *req;
1704 char *dataptr;
1705
1706 P9_DPRINTK(P9_DEBUG_9P, ">>> TREADDIR fid %d offset %llu count %d\n",
1707 fid->fid, (long long unsigned) offset, count);
1708
1709 err = 0;
1710 clnt = fid->clnt;
1711 total = 0;
1712
1713 rsize = fid->iounit;
1714 if (!rsize || rsize > clnt->msize-P9_READDIRHDRSZ)
1715 rsize = clnt->msize - P9_READDIRHDRSZ;
1716
1717 if (count < rsize)
1718 rsize = count;
1719
1720 req = p9_client_rpc(clnt, P9_TREADDIR, "dqd", fid->fid, offset, rsize);
1721 if (IS_ERR(req)) {
1722 err = PTR_ERR(req);
1723 goto error;
1724 }
1725
1726 err = p9pdu_readf(req->rc, clnt->proto_version, "D", &count, &dataptr);
1727 if (err) {
1728 p9pdu_dump(1, req->rc);
1729 goto free_and_error;
1730 }
1731
1732 P9_DPRINTK(P9_DEBUG_9P, "<<< RREADDIR count %d\n", count);
1733
1734 if (data)
1735 memmove(data, dataptr, count);
1736
1737 p9_free_req(clnt, req);
1738 return count;
1739
1740free_and_error:
1741 p9_free_req(clnt, req);
1742error:
1743 return err;
1744}
1745EXPORT_SYMBOL(p9_client_readdir);
1746
1747int p9_client_mknod_dotl(struct p9_fid *fid, char *name, int mode,
1748 dev_t rdev, gid_t gid, struct p9_qid *qid)
1749{
1750 int err;
1751 struct p9_client *clnt;
1752 struct p9_req_t *req;
1753
1754 err = 0;
1755 clnt = fid->clnt;
1756 P9_DPRINTK(P9_DEBUG_9P, ">>> TMKNOD fid %d name %s mode %d major %d "
1757 "minor %d\n", fid->fid, name, mode, MAJOR(rdev), MINOR(rdev));
1758 req = p9_client_rpc(clnt, P9_TMKNOD, "dsdddd", fid->fid, name, mode,
1759 MAJOR(rdev), MINOR(rdev), gid);
1760 if (IS_ERR(req))
1761 return PTR_ERR(req);
1762
1763 err = p9pdu_readf(req->rc, clnt->proto_version, "Q", qid);
1764 if (err) {
1765 p9pdu_dump(1, req->rc);
1766 goto error;
1767 }
1768 P9_DPRINTK(P9_DEBUG_9P, "<<< RMKNOD qid %x.%llx.%x\n", qid->type,
1769 (unsigned long long)qid->path, qid->version);
1770
1771error:
1772 p9_free_req(clnt, req);
1773 return err;
1774
1775}
1776EXPORT_SYMBOL(p9_client_mknod_dotl);
1777
1778int p9_client_mkdir_dotl(struct p9_fid *fid, char *name, int mode,
1779 gid_t gid, struct p9_qid *qid)
1780{
1781 int err;
1782 struct p9_client *clnt;
1783 struct p9_req_t *req;
1784
1785 err = 0;
1786 clnt = fid->clnt;
1787 P9_DPRINTK(P9_DEBUG_9P, ">>> TMKDIR fid %d name %s mode %d gid %d\n",
1788 fid->fid, name, mode, gid);
1789 req = p9_client_rpc(clnt, P9_TMKDIR, "dsdd", fid->fid, name, mode,
1790 gid);
1791 if (IS_ERR(req))
1792 return PTR_ERR(req);
1793
1794 err = p9pdu_readf(req->rc, clnt->proto_version, "Q", qid);
1795 if (err) {
1796 p9pdu_dump(1, req->rc);
1797 goto error;
1798 }
1799 P9_DPRINTK(P9_DEBUG_9P, "<<< RMKDIR qid %x.%llx.%x\n", qid->type,
1800 (unsigned long long)qid->path, qid->version);
1801
1802error:
1803 p9_free_req(clnt, req);
1804 return err;
1805
1806}
1807EXPORT_SYMBOL(p9_client_mkdir_dotl);
diff --git a/net/9p/protocol.c b/net/9p/protocol.c
index 149f82160130..3acd3afb20c8 100644
--- a/net/9p/protocol.c
+++ b/net/9p/protocol.c
@@ -141,6 +141,7 @@ pdu_write_u(struct p9_fcall *pdu, const char __user *udata, size_t size)
141 D - data blob (int32_t size followed by void *, results are not freed) 141 D - data blob (int32_t size followed by void *, results are not freed)
142 T - array of strings (int16_t count, followed by strings) 142 T - array of strings (int16_t count, followed by strings)
143 R - array of qids (int16_t count, followed by qids) 143 R - array of qids (int16_t count, followed by qids)
144 A - stat for 9p2000.L (p9_stat_dotl)
144 ? - if optional = 1, continue parsing 145 ? - if optional = 1, continue parsing
145*/ 146*/
146 147
@@ -340,6 +341,33 @@ p9pdu_vreadf(struct p9_fcall *pdu, int proto_version, const char *fmt,
340 } 341 }
341 } 342 }
342 break; 343 break;
344 case 'A': {
345 struct p9_stat_dotl *stbuf =
346 va_arg(ap, struct p9_stat_dotl *);
347
348 memset(stbuf, 0, sizeof(struct p9_stat_dotl));
349 errcode =
350 p9pdu_readf(pdu, proto_version,
351 "qQdddqqqqqqqqqqqqqqq",
352 &stbuf->st_result_mask,
353 &stbuf->qid,
354 &stbuf->st_mode,
355 &stbuf->st_uid, &stbuf->st_gid,
356 &stbuf->st_nlink,
357 &stbuf->st_rdev, &stbuf->st_size,
358 &stbuf->st_blksize, &stbuf->st_blocks,
359 &stbuf->st_atime_sec,
360 &stbuf->st_atime_nsec,
361 &stbuf->st_mtime_sec,
362 &stbuf->st_mtime_nsec,
363 &stbuf->st_ctime_sec,
364 &stbuf->st_ctime_nsec,
365 &stbuf->st_btime_sec,
366 &stbuf->st_btime_nsec,
367 &stbuf->st_gen,
368 &stbuf->st_data_version);
369 }
370 break;
343 case '?': 371 case '?':
344 if ((proto_version != p9_proto_2000u) && 372 if ((proto_version != p9_proto_2000u) &&
345 (proto_version != p9_proto_2000L)) 373 (proto_version != p9_proto_2000L))
@@ -488,6 +516,23 @@ p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt,
488 } 516 }
489 } 517 }
490 break; 518 break;
519 case 'I':{
520 struct p9_iattr_dotl *p9attr = va_arg(ap,
521 struct p9_iattr_dotl *);
522
523 errcode = p9pdu_writef(pdu, proto_version,
524 "ddddqqqqq",
525 p9attr->valid,
526 p9attr->mode,
527 p9attr->uid,
528 p9attr->gid,
529 p9attr->size,
530 p9attr->atime_sec,
531 p9attr->atime_nsec,
532 p9attr->mtime_sec,
533 p9attr->mtime_nsec);
534 }
535 break;
491 case '?': 536 case '?':
492 if ((proto_version != p9_proto_2000u) && 537 if ((proto_version != p9_proto_2000u) &&
493 (proto_version != p9_proto_2000L)) 538 (proto_version != p9_proto_2000L))
@@ -580,3 +625,30 @@ void p9pdu_reset(struct p9_fcall *pdu)
580 pdu->offset = 0; 625 pdu->offset = 0;
581 pdu->size = 0; 626 pdu->size = 0;
582} 627}
628
629int p9dirent_read(char *buf, int len, struct p9_dirent *dirent,
630 int proto_version)
631{
632 struct p9_fcall fake_pdu;
633 int ret;
634 char *nameptr;
635
636 fake_pdu.size = len;
637 fake_pdu.capacity = len;
638 fake_pdu.sdata = buf;
639 fake_pdu.offset = 0;
640
641 ret = p9pdu_readf(&fake_pdu, proto_version, "Qqbs", &dirent->qid,
642 &dirent->d_off, &dirent->d_type, &nameptr);
643 if (ret) {
644 P9_DPRINTK(P9_DEBUG_9P, "<<< p9dirent_read failed: %d\n", ret);
645 p9pdu_dump(1, &fake_pdu);
646 goto out;
647 }
648
649 strcpy(dirent->d_name, nameptr);
650
651out:
652 return fake_pdu.offset;
653}
654EXPORT_SYMBOL(p9dirent_read);
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
index 98ce9bcb0e15..c85109d809ca 100644
--- a/net/9p/trans_fd.c
+++ b/net/9p/trans_fd.c
@@ -948,7 +948,7 @@ p9_fd_create_unix(struct p9_client *client, const char *addr, char *args)
948 948
949 csocket = NULL; 949 csocket = NULL;
950 950
951 if (strlen(addr) > UNIX_PATH_MAX) { 951 if (strlen(addr) >= UNIX_PATH_MAX) {
952 P9_EPRINTK(KERN_ERR, "p9_trans_unix: address too long: %s\n", 952 P9_EPRINTK(KERN_ERR, "p9_trans_unix: address too long: %s\n",
953 addr); 953 addr);
954 return -ENAMETOOLONG; 954 return -ENAMETOOLONG;
diff --git a/net/Kconfig b/net/Kconfig
index 0d68b40fc0e6..e330594d3709 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -32,7 +32,7 @@ config WANT_COMPAT_NETLINK_MESSAGES
32config COMPAT_NETLINK_MESSAGES 32config COMPAT_NETLINK_MESSAGES
33 def_bool y 33 def_bool y
34 depends on COMPAT 34 depends on COMPAT
35 depends on WIRELESS_EXT || WANT_COMPAT_NETLINK_MESSAGES 35 depends on WEXT_CORE || WANT_COMPAT_NETLINK_MESSAGES
36 help 36 help
37 This option makes it possible to send different netlink messages 37 This option makes it possible to send different netlink messages
38 to tasks depending on whether the task is a compat task or not. To 38 to tasks depending on whether the task is a compat task or not. To
@@ -86,6 +86,16 @@ config NETWORK_SECMARK
86 to nfmark, but designated for security purposes. 86 to nfmark, but designated for security purposes.
87 If you are unsure how to answer this question, answer N. 87 If you are unsure how to answer this question, answer N.
88 88
89config NETWORK_PHY_TIMESTAMPING
90 bool "Timestamping in PHY devices"
91 depends on EXPERIMENTAL
92 help
93 This allows timestamping of network packets by PHYs with
94 hardware timestamping capabilities. This option adds some
95 overhead in the transmit and receive paths.
96
97 If you are unsure how to answer this question, answer N.
98
89menuconfig NETFILTER 99menuconfig NETFILTER
90 bool "Network packet filtering framework (Netfilter)" 100 bool "Network packet filtering framework (Netfilter)"
91 ---help--- 101 ---help---
@@ -203,6 +213,7 @@ source "net/phonet/Kconfig"
203source "net/ieee802154/Kconfig" 213source "net/ieee802154/Kconfig"
204source "net/sched/Kconfig" 214source "net/sched/Kconfig"
205source "net/dcb/Kconfig" 215source "net/dcb/Kconfig"
216source "net/dns_resolver/Kconfig"
206 217
207config RPS 218config RPS
208 boolean 219 boolean
diff --git a/net/Makefile b/net/Makefile
index cb7bdc1210cb..ea60fbce9b1b 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -50,7 +50,7 @@ endif
50obj-$(CONFIG_IP_DCCP) += dccp/ 50obj-$(CONFIG_IP_DCCP) += dccp/
51obj-$(CONFIG_IP_SCTP) += sctp/ 51obj-$(CONFIG_IP_SCTP) += sctp/
52obj-$(CONFIG_RDS) += rds/ 52obj-$(CONFIG_RDS) += rds/
53obj-y += wireless/ 53obj-$(CONFIG_WIRELESS) += wireless/
54obj-$(CONFIG_MAC80211) += mac80211/ 54obj-$(CONFIG_MAC80211) += mac80211/
55obj-$(CONFIG_TIPC) += tipc/ 55obj-$(CONFIG_TIPC) += tipc/
56obj-$(CONFIG_NETLABEL) += netlabel/ 56obj-$(CONFIG_NETLABEL) += netlabel/
@@ -61,9 +61,10 @@ obj-$(CONFIG_CAIF) += caif/
61ifneq ($(CONFIG_DCB),) 61ifneq ($(CONFIG_DCB),)
62obj-y += dcb/ 62obj-y += dcb/
63endif 63endif
64obj-y += ieee802154/ 64obj-$(CONFIG_IEEE802154) += ieee802154/
65 65
66ifeq ($(CONFIG_NET),y) 66ifeq ($(CONFIG_NET),y)
67obj-$(CONFIG_SYSCTL) += sysctl_net.o 67obj-$(CONFIG_SYSCTL) += sysctl_net.o
68endif 68endif
69obj-$(CONFIG_WIMAX) += wimax/ 69obj-$(CONFIG_WIMAX) += wimax/
70obj-$(CONFIG_DNS_RESOLVER) += dns_resolver/
diff --git a/net/atm/br2684.c b/net/atm/br2684.c
index 6719af6a59fa..651babdfab38 100644
--- a/net/atm/br2684.c
+++ b/net/atm/br2684.c
@@ -139,6 +139,43 @@ static struct net_device *br2684_find_dev(const struct br2684_if_spec *s)
139 return NULL; 139 return NULL;
140} 140}
141 141
142static int atm_dev_event(struct notifier_block *this, unsigned long event,
143 void *arg)
144{
145 struct atm_dev *atm_dev = arg;
146 struct list_head *lh;
147 struct net_device *net_dev;
148 struct br2684_vcc *brvcc;
149 struct atm_vcc *atm_vcc;
150 unsigned long flags;
151
152 pr_debug("event=%ld dev=%p\n", event, atm_dev);
153
154 read_lock_irqsave(&devs_lock, flags);
155 list_for_each(lh, &br2684_devs) {
156 net_dev = list_entry_brdev(lh);
157
158 list_for_each_entry(brvcc, &BRPRIV(net_dev)->brvccs, brvccs) {
159 atm_vcc = brvcc->atmvcc;
160 if (atm_vcc && brvcc->atmvcc->dev == atm_dev) {
161
162 if (atm_vcc->dev->signal == ATM_PHY_SIG_LOST)
163 netif_carrier_off(net_dev);
164 else
165 netif_carrier_on(net_dev);
166
167 }
168 }
169 }
170 read_unlock_irqrestore(&devs_lock, flags);
171
172 return NOTIFY_DONE;
173}
174
175static struct notifier_block atm_dev_notifier = {
176 .notifier_call = atm_dev_event,
177};
178
142/* chained vcc->pop function. Check if we should wake the netif_queue */ 179/* chained vcc->pop function. Check if we should wake the netif_queue */
143static void br2684_pop(struct atm_vcc *vcc, struct sk_buff *skb) 180static void br2684_pop(struct atm_vcc *vcc, struct sk_buff *skb)
144{ 181{
@@ -362,6 +399,12 @@ static void br2684_push(struct atm_vcc *atmvcc, struct sk_buff *skb)
362 unregister_netdev(net_dev); 399 unregister_netdev(net_dev);
363 free_netdev(net_dev); 400 free_netdev(net_dev);
364 } 401 }
402 read_lock_irq(&devs_lock);
403 if (list_empty(&br2684_devs)) {
404 /* last br2684 device */
405 unregister_atmdevice_notifier(&atm_dev_notifier);
406 }
407 read_unlock_irq(&devs_lock);
365 return; 408 return;
366 } 409 }
367 410
@@ -530,6 +573,13 @@ static int br2684_regvcc(struct atm_vcc *atmvcc, void __user * arg)
530 573
531 br2684_push(atmvcc, skb); 574 br2684_push(atmvcc, skb);
532 } 575 }
576
577 /* initialize netdev carrier state */
578 if (atmvcc->dev->signal == ATM_PHY_SIG_LOST)
579 netif_carrier_off(net_dev);
580 else
581 netif_carrier_on(net_dev);
582
533 __module_get(THIS_MODULE); 583 __module_get(THIS_MODULE);
534 return 0; 584 return 0;
535 585
@@ -620,9 +670,16 @@ static int br2684_create(void __user *arg)
620 } 670 }
621 671
622 write_lock_irq(&devs_lock); 672 write_lock_irq(&devs_lock);
673
623 brdev->payload = payload; 674 brdev->payload = payload;
624 brdev->number = list_empty(&br2684_devs) ? 1 : 675
625 BRPRIV(list_entry_brdev(br2684_devs.prev))->number + 1; 676 if (list_empty(&br2684_devs)) {
677 /* 1st br2684 device */
678 register_atmdevice_notifier(&atm_dev_notifier);
679 brdev->number = 1;
680 } else
681 brdev->number = BRPRIV(list_entry_brdev(br2684_devs.prev))->number + 1;
682
626 list_add_tail(&brdev->br2684_devs, &br2684_devs); 683 list_add_tail(&brdev->br2684_devs, &br2684_devs);
627 write_unlock_irq(&devs_lock); 684 write_unlock_irq(&devs_lock);
628 return 0; 685 return 0;
@@ -772,6 +829,11 @@ static void __exit br2684_exit(void)
772 remove_proc_entry("br2684", atm_proc_root); 829 remove_proc_entry("br2684", atm_proc_root);
773#endif 830#endif
774 831
832
833 /* if not already empty */
834 if (!list_empty(&br2684_devs))
835 unregister_atmdevice_notifier(&atm_dev_notifier);
836
775 while (!list_empty(&br2684_devs)) { 837 while (!list_empty(&br2684_devs)) {
776 net_dev = list_entry_brdev(br2684_devs.next); 838 net_dev = list_entry_brdev(br2684_devs.next);
777 brdev = BRPRIV(net_dev); 839 brdev = BRPRIV(net_dev);
diff --git a/net/atm/clip.c b/net/atm/clip.c
index 313aba11316b..95fdd1185067 100644
--- a/net/atm/clip.c
+++ b/net/atm/clip.c
@@ -522,7 +522,7 @@ static int clip_setentry(struct atm_vcc *vcc, __be32 ip)
522 error = ip_route_output_key(&init_net, &rt, &fl); 522 error = ip_route_output_key(&init_net, &rt, &fl);
523 if (error) 523 if (error)
524 return error; 524 return error;
525 neigh = __neigh_lookup(&clip_tbl, &ip, rt->u.dst.dev, 1); 525 neigh = __neigh_lookup(&clip_tbl, &ip, rt->dst.dev, 1);
526 ip_rt_put(rt); 526 ip_rt_put(rt);
527 if (!neigh) 527 if (!neigh)
528 return -ENOMEM; 528 return -ENOMEM;
diff --git a/net/atm/common.c b/net/atm/common.c
index b43feb1a3995..940404a73b3d 100644
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@ -37,6 +37,8 @@ EXPORT_SYMBOL(vcc_hash);
37DEFINE_RWLOCK(vcc_sklist_lock); 37DEFINE_RWLOCK(vcc_sklist_lock);
38EXPORT_SYMBOL(vcc_sklist_lock); 38EXPORT_SYMBOL(vcc_sklist_lock);
39 39
40static ATOMIC_NOTIFIER_HEAD(atm_dev_notify_chain);
41
40static void __vcc_insert_socket(struct sock *sk) 42static void __vcc_insert_socket(struct sock *sk)
41{ 43{
42 struct atm_vcc *vcc = atm_sk(sk); 44 struct atm_vcc *vcc = atm_sk(sk);
@@ -212,6 +214,22 @@ void vcc_release_async(struct atm_vcc *vcc, int reply)
212} 214}
213EXPORT_SYMBOL(vcc_release_async); 215EXPORT_SYMBOL(vcc_release_async);
214 216
217void atm_dev_signal_change(struct atm_dev *dev, char signal)
218{
219 pr_debug("%s signal=%d dev=%p number=%d dev->signal=%d\n",
220 __func__, signal, dev, dev->number, dev->signal);
221
222 /* atm driver sending invalid signal */
223 WARN_ON(signal < ATM_PHY_SIG_LOST || signal > ATM_PHY_SIG_FOUND);
224
225 if (dev->signal == signal)
226 return; /* no change */
227
228 dev->signal = signal;
229
230 atomic_notifier_call_chain(&atm_dev_notify_chain, signal, dev);
231}
232EXPORT_SYMBOL(atm_dev_signal_change);
215 233
216void atm_dev_release_vccs(struct atm_dev *dev) 234void atm_dev_release_vccs(struct atm_dev *dev)
217{ 235{
@@ -781,6 +799,18 @@ int vcc_getsockopt(struct socket *sock, int level, int optname,
781 return vcc->dev->ops->getsockopt(vcc, level, optname, optval, len); 799 return vcc->dev->ops->getsockopt(vcc, level, optname, optval, len);
782} 800}
783 801
802int register_atmdevice_notifier(struct notifier_block *nb)
803{
804 return atomic_notifier_chain_register(&atm_dev_notify_chain, nb);
805}
806EXPORT_SYMBOL_GPL(register_atmdevice_notifier);
807
808void unregister_atmdevice_notifier(struct notifier_block *nb)
809{
810 atomic_notifier_chain_unregister(&atm_dev_notify_chain, nb);
811}
812EXPORT_SYMBOL_GPL(unregister_atmdevice_notifier);
813
784static int __init atm_init(void) 814static int __init atm_init(void)
785{ 815{
786 int error; 816 int error;
diff --git a/net/bluetooth/Kconfig b/net/bluetooth/Kconfig
index ee3b3049d385..ed371684c133 100644
--- a/net/bluetooth/Kconfig
+++ b/net/bluetooth/Kconfig
@@ -43,19 +43,6 @@ config BT_L2CAP
43 Say Y here to compile L2CAP support into the kernel or say M to 43 Say Y here to compile L2CAP support into the kernel or say M to
44 compile it as module (l2cap). 44 compile it as module (l2cap).
45 45
46config BT_L2CAP_EXT_FEATURES
47 bool "L2CAP Extended Features support (EXPERIMENTAL)"
48 depends on BT_L2CAP && EXPERIMENTAL
49 help
50 This option enables the L2CAP Extended Features support. These
51 new features include the Enhanced Retransmission and Streaming
52 Modes, the Frame Check Sequence (FCS), and Segmentation and
53 Reassembly (SAR) for L2CAP packets. They are a required for the
54 new Alternate MAC/PHY and the Bluetooth Medical Profile.
55
56 You should say N unless you know what you are doing. Note that
57 this is in an experimental state yet.
58
59config BT_SCO 46config BT_SCO
60 tristate "SCO links support" 47 tristate "SCO links support"
61 depends on BT 48 depends on BT
diff --git a/net/bluetooth/bnep/bnep.h b/net/bluetooth/bnep/bnep.h
index 0d9e506f5d5a..70672544db86 100644
--- a/net/bluetooth/bnep/bnep.h
+++ b/net/bluetooth/bnep/bnep.h
@@ -86,26 +86,26 @@ struct bnep_setup_conn_req {
86 __u8 ctrl; 86 __u8 ctrl;
87 __u8 uuid_size; 87 __u8 uuid_size;
88 __u8 service[0]; 88 __u8 service[0];
89} __attribute__((packed)); 89} __packed;
90 90
91struct bnep_set_filter_req { 91struct bnep_set_filter_req {
92 __u8 type; 92 __u8 type;
93 __u8 ctrl; 93 __u8 ctrl;
94 __be16 len; 94 __be16 len;
95 __u8 list[0]; 95 __u8 list[0];
96} __attribute__((packed)); 96} __packed;
97 97
98struct bnep_control_rsp { 98struct bnep_control_rsp {
99 __u8 type; 99 __u8 type;
100 __u8 ctrl; 100 __u8 ctrl;
101 __be16 resp; 101 __be16 resp;
102} __attribute__((packed)); 102} __packed;
103 103
104struct bnep_ext_hdr { 104struct bnep_ext_hdr {
105 __u8 type; 105 __u8 type;
106 __u8 len; 106 __u8 len;
107 __u8 data[0]; 107 __u8 data[0];
108} __attribute__((packed)); 108} __packed;
109 109
110/* BNEP ioctl defines */ 110/* BNEP ioctl defines */
111#define BNEPCONNADD _IOW('B', 200, int) 111#define BNEPCONNADD _IOW('B', 200, int)
diff --git a/net/bluetooth/bnep/netdev.c b/net/bluetooth/bnep/netdev.c
index 0faad5ce6dc4..8c100c9dae28 100644
--- a/net/bluetooth/bnep/netdev.c
+++ b/net/bluetooth/bnep/netdev.c
@@ -104,6 +104,8 @@ static void bnep_net_set_mc_list(struct net_device *dev)
104 break; 104 break;
105 memcpy(__skb_put(skb, ETH_ALEN), ha->addr, ETH_ALEN); 105 memcpy(__skb_put(skb, ETH_ALEN), ha->addr, ETH_ALEN);
106 memcpy(__skb_put(skb, ETH_ALEN), ha->addr, ETH_ALEN); 106 memcpy(__skb_put(skb, ETH_ALEN), ha->addr, ETH_ALEN);
107
108 i++;
107 } 109 }
108 r->len = htons(skb->len - len); 110 r->len = htons(skb->len - len);
109 } 111 }
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index b10e3cdb08f8..0b1e460fe440 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -1,6 +1,6 @@
1/* 1/*
2 BlueZ - Bluetooth protocol stack for Linux 2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated 3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4 4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> 5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 6
@@ -155,6 +155,27 @@ void hci_setup_sync(struct hci_conn *conn, __u16 handle)
155 hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp); 155 hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp);
156} 156}
157 157
158/* Device _must_ be locked */
159void hci_sco_setup(struct hci_conn *conn, __u8 status)
160{
161 struct hci_conn *sco = conn->link;
162
163 BT_DBG("%p", conn);
164
165 if (!sco)
166 return;
167
168 if (!status) {
169 if (lmp_esco_capable(conn->hdev))
170 hci_setup_sync(sco, conn->handle);
171 else
172 hci_add_sco(sco, conn->handle);
173 } else {
174 hci_proto_connect_cfm(sco, status);
175 hci_conn_del(sco);
176 }
177}
178
158static void hci_conn_timeout(unsigned long arg) 179static void hci_conn_timeout(unsigned long arg)
159{ 180{
160 struct hci_conn *conn = (void *) arg; 181 struct hci_conn *conn = (void *) arg;
@@ -358,6 +379,11 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8
358 acl->sec_level = sec_level; 379 acl->sec_level = sec_level;
359 acl->auth_type = auth_type; 380 acl->auth_type = auth_type;
360 hci_acl_connect(acl); 381 hci_acl_connect(acl);
382 } else {
383 if (acl->sec_level < sec_level)
384 acl->sec_level = sec_level;
385 if (acl->auth_type < auth_type)
386 acl->auth_type = auth_type;
361 } 387 }
362 388
363 if (type == ACL_LINK) 389 if (type == ACL_LINK)
@@ -380,10 +406,13 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8
380 acl->power_save = 1; 406 acl->power_save = 1;
381 hci_conn_enter_active_mode(acl); 407 hci_conn_enter_active_mode(acl);
382 408
383 if (lmp_esco_capable(hdev)) 409 if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->pend)) {
384 hci_setup_sync(sco, acl->handle); 410 /* defer SCO setup until mode change completed */
385 else 411 set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->pend);
386 hci_add_sco(sco, acl->handle); 412 return sco;
413 }
414
415 hci_sco_setup(acl, 0x00);
387 } 416 }
388 417
389 return sco; 418 return sco;
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 2f768de87011..8303f1c9ef54 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -562,6 +562,7 @@ static int hci_dev_do_close(struct hci_dev *hdev)
562 hci_dev_lock_bh(hdev); 562 hci_dev_lock_bh(hdev);
563 inquiry_cache_flush(hdev); 563 inquiry_cache_flush(hdev);
564 hci_conn_hash_flush(hdev); 564 hci_conn_hash_flush(hdev);
565 hci_blacklist_clear(hdev);
565 hci_dev_unlock_bh(hdev); 566 hci_dev_unlock_bh(hdev);
566 567
567 hci_notify(hdev, HCI_DEV_DOWN); 568 hci_notify(hdev, HCI_DEV_DOWN);
@@ -913,7 +914,7 @@ int hci_register_dev(struct hci_dev *hdev)
913 skb_queue_head_init(&hdev->cmd_q); 914 skb_queue_head_init(&hdev->cmd_q);
914 skb_queue_head_init(&hdev->raw_q); 915 skb_queue_head_init(&hdev->raw_q);
915 916
916 for (i = 0; i < 3; i++) 917 for (i = 0; i < NUM_REASSEMBLY; i++)
917 hdev->reassembly[i] = NULL; 918 hdev->reassembly[i] = NULL;
918 919
919 init_waitqueue_head(&hdev->req_wait_q); 920 init_waitqueue_head(&hdev->req_wait_q);
@@ -923,6 +924,8 @@ int hci_register_dev(struct hci_dev *hdev)
923 924
924 hci_conn_hash_init(hdev); 925 hci_conn_hash_init(hdev);
925 926
927 INIT_LIST_HEAD(&hdev->blacklist.list);
928
926 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats)); 929 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
927 930
928 atomic_set(&hdev->promisc, 0); 931 atomic_set(&hdev->promisc, 0);
@@ -970,7 +973,7 @@ int hci_unregister_dev(struct hci_dev *hdev)
970 973
971 hci_dev_do_close(hdev); 974 hci_dev_do_close(hdev);
972 975
973 for (i = 0; i < 3; i++) 976 for (i = 0; i < NUM_REASSEMBLY; i++)
974 kfree_skb(hdev->reassembly[i]); 977 kfree_skb(hdev->reassembly[i]);
975 978
976 hci_notify(hdev, HCI_DEV_UNREG); 979 hci_notify(hdev, HCI_DEV_UNREG);
@@ -1030,89 +1033,170 @@ int hci_recv_frame(struct sk_buff *skb)
1030} 1033}
1031EXPORT_SYMBOL(hci_recv_frame); 1034EXPORT_SYMBOL(hci_recv_frame);
1032 1035
1033/* Receive packet type fragment */ 1036static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1034#define __reassembly(hdev, type) ((hdev)->reassembly[(type) - 2]) 1037 int count, __u8 index, gfp_t gfp_mask)
1035
1036int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1037{ 1038{
1038 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) 1039 int len = 0;
1040 int hlen = 0;
1041 int remain = count;
1042 struct sk_buff *skb;
1043 struct bt_skb_cb *scb;
1044
1045 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1046 index >= NUM_REASSEMBLY)
1039 return -EILSEQ; 1047 return -EILSEQ;
1040 1048
1049 skb = hdev->reassembly[index];
1050
1051 if (!skb) {
1052 switch (type) {
1053 case HCI_ACLDATA_PKT:
1054 len = HCI_MAX_FRAME_SIZE;
1055 hlen = HCI_ACL_HDR_SIZE;
1056 break;
1057 case HCI_EVENT_PKT:
1058 len = HCI_MAX_EVENT_SIZE;
1059 hlen = HCI_EVENT_HDR_SIZE;
1060 break;
1061 case HCI_SCODATA_PKT:
1062 len = HCI_MAX_SCO_SIZE;
1063 hlen = HCI_SCO_HDR_SIZE;
1064 break;
1065 }
1066
1067 skb = bt_skb_alloc(len, gfp_mask);
1068 if (!skb)
1069 return -ENOMEM;
1070
1071 scb = (void *) skb->cb;
1072 scb->expect = hlen;
1073 scb->pkt_type = type;
1074
1075 skb->dev = (void *) hdev;
1076 hdev->reassembly[index] = skb;
1077 }
1078
1041 while (count) { 1079 while (count) {
1042 struct sk_buff *skb = __reassembly(hdev, type); 1080 scb = (void *) skb->cb;
1043 struct { int expect; } *scb; 1081 len = min(scb->expect, (__u16)count);
1044 int len = 0;
1045 1082
1046 if (!skb) { 1083 memcpy(skb_put(skb, len), data, len);
1047 /* Start of the frame */
1048 1084
1049 switch (type) { 1085 count -= len;
1050 case HCI_EVENT_PKT: 1086 data += len;
1051 if (count >= HCI_EVENT_HDR_SIZE) { 1087 scb->expect -= len;
1052 struct hci_event_hdr *h = data; 1088 remain = count;
1053 len = HCI_EVENT_HDR_SIZE + h->plen;
1054 } else
1055 return -EILSEQ;
1056 break;
1057 1089
1058 case HCI_ACLDATA_PKT: 1090 switch (type) {
1059 if (count >= HCI_ACL_HDR_SIZE) { 1091 case HCI_EVENT_PKT:
1060 struct hci_acl_hdr *h = data; 1092 if (skb->len == HCI_EVENT_HDR_SIZE) {
1061 len = HCI_ACL_HDR_SIZE + __le16_to_cpu(h->dlen); 1093 struct hci_event_hdr *h = hci_event_hdr(skb);
1062 } else 1094 scb->expect = h->plen;
1063 return -EILSEQ; 1095
1064 break; 1096 if (skb_tailroom(skb) < scb->expect) {
1097 kfree_skb(skb);
1098 hdev->reassembly[index] = NULL;
1099 return -ENOMEM;
1100 }
1101 }
1102 break;
1065 1103
1066 case HCI_SCODATA_PKT: 1104 case HCI_ACLDATA_PKT:
1067 if (count >= HCI_SCO_HDR_SIZE) { 1105 if (skb->len == HCI_ACL_HDR_SIZE) {
1068 struct hci_sco_hdr *h = data; 1106 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1069 len = HCI_SCO_HDR_SIZE + h->dlen; 1107 scb->expect = __le16_to_cpu(h->dlen);
1070 } else 1108
1071 return -EILSEQ; 1109 if (skb_tailroom(skb) < scb->expect) {
1072 break; 1110 kfree_skb(skb);
1111 hdev->reassembly[index] = NULL;
1112 return -ENOMEM;
1113 }
1073 } 1114 }
1115 break;
1074 1116
1075 skb = bt_skb_alloc(len, GFP_ATOMIC); 1117 case HCI_SCODATA_PKT:
1076 if (!skb) { 1118 if (skb->len == HCI_SCO_HDR_SIZE) {
1077 BT_ERR("%s no memory for packet", hdev->name); 1119 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1078 return -ENOMEM; 1120 scb->expect = h->dlen;
1121
1122 if (skb_tailroom(skb) < scb->expect) {
1123 kfree_skb(skb);
1124 hdev->reassembly[index] = NULL;
1125 return -ENOMEM;
1126 }
1079 } 1127 }
1128 break;
1129 }
1130
1131 if (scb->expect == 0) {
1132 /* Complete frame */
1080 1133
1081 skb->dev = (void *) hdev;
1082 bt_cb(skb)->pkt_type = type; 1134 bt_cb(skb)->pkt_type = type;
1135 hci_recv_frame(skb);
1083 1136
1084 __reassembly(hdev, type) = skb; 1137 hdev->reassembly[index] = NULL;
1138 return remain;
1139 }
1140 }
1085 1141
1086 scb = (void *) skb->cb; 1142 return remain;
1087 scb->expect = len; 1143}
1088 } else {
1089 /* Continuation */
1090 1144
1091 scb = (void *) skb->cb; 1145int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1092 len = scb->expect; 1146{
1093 } 1147 int rem = 0;
1094 1148
1095 len = min(len, count); 1149 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1150 return -EILSEQ;
1096 1151
1097 memcpy(skb_put(skb, len), data, len); 1152 while (count) {
1153 rem = hci_reassembly(hdev, type, data, count,
1154 type - 1, GFP_ATOMIC);
1155 if (rem < 0)
1156 return rem;
1098 1157
1099 scb->expect -= len; 1158 data += (count - rem);
1159 count = rem;
1160 };
1100 1161
1101 if (scb->expect == 0) { 1162 return rem;
1102 /* Complete frame */ 1163}
1164EXPORT_SYMBOL(hci_recv_fragment);
1103 1165
1104 __reassembly(hdev, type) = NULL; 1166#define STREAM_REASSEMBLY 0
1105 1167
1106 bt_cb(skb)->pkt_type = type; 1168int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1107 hci_recv_frame(skb); 1169{
1108 } 1170 int type;
1171 int rem = 0;
1109 1172
1110 count -= len; data += len; 1173 while (count) {
1111 } 1174 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1112 1175
1113 return 0; 1176 if (!skb) {
1177 struct { char type; } *pkt;
1178
1179 /* Start of the frame */
1180 pkt = data;
1181 type = pkt->type;
1182
1183 data++;
1184 count--;
1185 } else
1186 type = bt_cb(skb)->pkt_type;
1187
1188 rem = hci_reassembly(hdev, type, data,
1189 count, STREAM_REASSEMBLY, GFP_ATOMIC);
1190 if (rem < 0)
1191 return rem;
1192
1193 data += (count - rem);
1194 count = rem;
1195 };
1196
1197 return rem;
1114} 1198}
1115EXPORT_SYMBOL(hci_recv_fragment); 1199EXPORT_SYMBOL(hci_recv_stream_fragment);
1116 1200
1117/* ---- Interface to upper protocols ---- */ 1201/* ---- Interface to upper protocols ---- */
1118 1202
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 6c57fc71c7e2..bfef5bae0b3a 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -1,6 +1,6 @@
1/* 1/*
2 BlueZ - Bluetooth protocol stack for Linux 2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated 3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4 4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> 5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 6
@@ -584,7 +584,7 @@ static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
584 conn->out = 1; 584 conn->out = 1;
585 conn->link_mode |= HCI_LM_MASTER; 585 conn->link_mode |= HCI_LM_MASTER;
586 } else 586 } else
587 BT_ERR("No memmory for new connection"); 587 BT_ERR("No memory for new connection");
588 } 588 }
589 } 589 }
590 590
@@ -785,9 +785,13 @@ static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
785 hci_dev_lock(hdev); 785 hci_dev_lock(hdev);
786 786
787 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 787 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
788 if (conn) 788 if (conn) {
789 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend); 789 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend);
790 790
791 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->pend))
792 hci_sco_setup(conn, status);
793 }
794
791 hci_dev_unlock(hdev); 795 hci_dev_unlock(hdev);
792} 796}
793 797
@@ -808,9 +812,13 @@ static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
808 hci_dev_lock(hdev); 812 hci_dev_lock(hdev);
809 813
810 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); 814 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
811 if (conn) 815 if (conn) {
812 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend); 816 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend);
813 817
818 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->pend))
819 hci_sco_setup(conn, status);
820 }
821
814 hci_dev_unlock(hdev); 822 hci_dev_unlock(hdev);
815} 823}
816 824
@@ -915,20 +923,8 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s
915 } else 923 } else
916 conn->state = BT_CLOSED; 924 conn->state = BT_CLOSED;
917 925
918 if (conn->type == ACL_LINK) { 926 if (conn->type == ACL_LINK)
919 struct hci_conn *sco = conn->link; 927 hci_sco_setup(conn, ev->status);
920 if (sco) {
921 if (!ev->status) {
922 if (lmp_esco_capable(hdev))
923 hci_setup_sync(sco, conn->handle);
924 else
925 hci_add_sco(sco, conn->handle);
926 } else {
927 hci_proto_connect_cfm(sco, ev->status);
928 hci_conn_del(sco);
929 }
930 }
931 }
932 928
933 if (ev->status) { 929 if (ev->status) {
934 hci_proto_connect_cfm(conn, ev->status); 930 hci_proto_connect_cfm(conn, ev->status);
@@ -952,7 +948,7 @@ static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *sk
952 948
953 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type); 949 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type);
954 950
955 if (mask & HCI_LM_ACCEPT) { 951 if ((mask & HCI_LM_ACCEPT) && !hci_blacklist_lookup(hdev, &ev->bdaddr)) {
956 /* Connection accepted */ 952 /* Connection accepted */
957 struct inquiry_entry *ie; 953 struct inquiry_entry *ie;
958 struct hci_conn *conn; 954 struct hci_conn *conn;
@@ -965,7 +961,7 @@ static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *sk
965 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); 961 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
966 if (!conn) { 962 if (!conn) {
967 if (!(conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr))) { 963 if (!(conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr))) {
968 BT_ERR("No memmory for new connection"); 964 BT_ERR("No memory for new connection");
969 hci_dev_unlock(hdev); 965 hci_dev_unlock(hdev);
970 return; 966 return;
971 } 967 }
@@ -1049,6 +1045,8 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s
1049 if (conn) { 1045 if (conn) {
1050 if (!ev->status) 1046 if (!ev->status)
1051 conn->link_mode |= HCI_LM_AUTH; 1047 conn->link_mode |= HCI_LM_AUTH;
1048 else
1049 conn->sec_level = BT_SECURITY_LOW;
1052 1050
1053 clear_bit(HCI_CONN_AUTH_PEND, &conn->pend); 1051 clear_bit(HCI_CONN_AUTH_PEND, &conn->pend);
1054 1052
@@ -1479,6 +1477,9 @@ static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb
1479 else 1477 else
1480 conn->power_save = 0; 1478 conn->power_save = 0;
1481 } 1479 }
1480
1481 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->pend))
1482 hci_sco_setup(conn, ev->status);
1482 } 1483 }
1483 1484
1484 hci_dev_unlock(hdev); 1485 hci_dev_unlock(hdev);
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 38f08f6b86f6..4f170a595934 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -165,6 +165,86 @@ static int hci_sock_release(struct socket *sock)
165 return 0; 165 return 0;
166} 166}
167 167
168struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
169{
170 struct list_head *p;
171 struct bdaddr_list *blacklist = &hdev->blacklist;
172
173 list_for_each(p, &blacklist->list) {
174 struct bdaddr_list *b;
175
176 b = list_entry(p, struct bdaddr_list, list);
177
178 if (bacmp(bdaddr, &b->bdaddr) == 0)
179 return b;
180 }
181
182 return NULL;
183}
184
185static int hci_blacklist_add(struct hci_dev *hdev, void __user *arg)
186{
187 bdaddr_t bdaddr;
188 struct bdaddr_list *entry;
189
190 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
191 return -EFAULT;
192
193 if (bacmp(&bdaddr, BDADDR_ANY) == 0)
194 return -EBADF;
195
196 if (hci_blacklist_lookup(hdev, &bdaddr))
197 return -EEXIST;
198
199 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
200 if (!entry)
201 return -ENOMEM;
202
203 bacpy(&entry->bdaddr, &bdaddr);
204
205 list_add(&entry->list, &hdev->blacklist.list);
206
207 return 0;
208}
209
210int hci_blacklist_clear(struct hci_dev *hdev)
211{
212 struct list_head *p, *n;
213 struct bdaddr_list *blacklist = &hdev->blacklist;
214
215 list_for_each_safe(p, n, &blacklist->list) {
216 struct bdaddr_list *b;
217
218 b = list_entry(p, struct bdaddr_list, list);
219
220 list_del(p);
221 kfree(b);
222 }
223
224 return 0;
225}
226
227static int hci_blacklist_del(struct hci_dev *hdev, void __user *arg)
228{
229 bdaddr_t bdaddr;
230 struct bdaddr_list *entry;
231
232 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
233 return -EFAULT;
234
235 if (bacmp(&bdaddr, BDADDR_ANY) == 0)
236 return hci_blacklist_clear(hdev);
237
238 entry = hci_blacklist_lookup(hdev, &bdaddr);
239 if (!entry)
240 return -ENOENT;
241
242 list_del(&entry->list);
243 kfree(entry);
244
245 return 0;
246}
247
168/* Ioctls that require bound socket */ 248/* Ioctls that require bound socket */
169static inline int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd, unsigned long arg) 249static inline int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd, unsigned long arg)
170{ 250{
@@ -194,6 +274,16 @@ static inline int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd, unsign
194 case HCIGETAUTHINFO: 274 case HCIGETAUTHINFO:
195 return hci_get_auth_info(hdev, (void __user *) arg); 275 return hci_get_auth_info(hdev, (void __user *) arg);
196 276
277 case HCIBLOCKADDR:
278 if (!capable(CAP_NET_ADMIN))
279 return -EACCES;
280 return hci_blacklist_add(hdev, (void __user *) arg);
281
282 case HCIUNBLOCKADDR:
283 if (!capable(CAP_NET_ADMIN))
284 return -EACCES;
285 return hci_blacklist_del(hdev, (void __user *) arg);
286
197 default: 287 default:
198 if (hdev->ioctl) 288 if (hdev->ioctl)
199 return hdev->ioctl(hdev, cmd, arg); 289 return hdev->ioctl(hdev, cmd, arg);
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index 463ffa4fe042..ce44c47eeac1 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -436,6 +436,41 @@ static const struct file_operations inquiry_cache_fops = {
436 .release = single_release, 436 .release = single_release,
437}; 437};
438 438
439static int blacklist_show(struct seq_file *f, void *p)
440{
441 struct hci_dev *hdev = f->private;
442 struct bdaddr_list *blacklist = &hdev->blacklist;
443 struct list_head *l;
444
445 hci_dev_lock_bh(hdev);
446
447 list_for_each(l, &blacklist->list) {
448 struct bdaddr_list *b;
449 bdaddr_t bdaddr;
450
451 b = list_entry(l, struct bdaddr_list, list);
452
453 baswap(&bdaddr, &b->bdaddr);
454
455 seq_printf(f, "%s\n", batostr(&bdaddr));
456 }
457
458 hci_dev_unlock_bh(hdev);
459
460 return 0;
461}
462
463static int blacklist_open(struct inode *inode, struct file *file)
464{
465 return single_open(file, blacklist_show, inode->i_private);
466}
467
468static const struct file_operations blacklist_fops = {
469 .open = blacklist_open,
470 .read = seq_read,
471 .llseek = seq_lseek,
472 .release = single_release,
473};
439int hci_register_sysfs(struct hci_dev *hdev) 474int hci_register_sysfs(struct hci_dev *hdev)
440{ 475{
441 struct device *dev = &hdev->dev; 476 struct device *dev = &hdev->dev;
@@ -465,6 +500,9 @@ int hci_register_sysfs(struct hci_dev *hdev)
465 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs, 500 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
466 hdev, &inquiry_cache_fops); 501 hdev, &inquiry_cache_fops);
467 502
503 debugfs_create_file("blacklist", 0444, hdev->debugfs,
504 hdev, &blacklist_fops);
505
468 return 0; 506 return 0;
469} 507}
470 508
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c
index 1b682a5aa061..9ba1e8eee37c 100644
--- a/net/bluetooth/l2cap.c
+++ b/net/bluetooth/l2cap.c
@@ -1,6 +1,8 @@
1/* 1/*
2 BlueZ - Bluetooth protocol stack for Linux 2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated 3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
4 6
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> 7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6 8
@@ -53,15 +55,9 @@
53#include <net/bluetooth/hci_core.h> 55#include <net/bluetooth/hci_core.h>
54#include <net/bluetooth/l2cap.h> 56#include <net/bluetooth/l2cap.h>
55 57
56#define VERSION "2.14" 58#define VERSION "2.15"
57 59
58#ifdef CONFIG_BT_L2CAP_EXT_FEATURES 60static int disable_ertm = 0;
59static int enable_ertm = 1;
60#else
61static int enable_ertm = 0;
62#endif
63static int max_transmit = L2CAP_DEFAULT_MAX_TX;
64static int tx_window = L2CAP_DEFAULT_TX_WINDOW;
65 61
66static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN; 62static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
67static u8 l2cap_fixed_chan[8] = { 0x02, }; 63static u8 l2cap_fixed_chan[8] = { 0x02, };
@@ -80,9 +76,12 @@ static void __l2cap_sock_close(struct sock *sk, int reason);
80static void l2cap_sock_close(struct sock *sk); 76static void l2cap_sock_close(struct sock *sk);
81static void l2cap_sock_kill(struct sock *sk); 77static void l2cap_sock_kill(struct sock *sk);
82 78
79static int l2cap_build_conf_req(struct sock *sk, void *data);
83static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, 80static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
84 u8 code, u8 ident, u16 dlen, void *data); 81 u8 code, u8 ident, u16 dlen, void *data);
85 82
83static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
84
86/* ---- L2CAP timers ---- */ 85/* ---- L2CAP timers ---- */
87static void l2cap_sock_timeout(unsigned long arg) 86static void l2cap_sock_timeout(unsigned long arg)
88{ 87{
@@ -278,6 +277,24 @@ static void l2cap_chan_del(struct sock *sk, int err)
278 parent->sk_data_ready(parent, 0); 277 parent->sk_data_ready(parent, 0);
279 } else 278 } else
280 sk->sk_state_change(sk); 279 sk->sk_state_change(sk);
280
281 skb_queue_purge(TX_QUEUE(sk));
282
283 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
284 struct srej_list *l, *tmp;
285
286 del_timer(&l2cap_pi(sk)->retrans_timer);
287 del_timer(&l2cap_pi(sk)->monitor_timer);
288 del_timer(&l2cap_pi(sk)->ack_timer);
289
290 skb_queue_purge(SREJ_QUEUE(sk));
291 skb_queue_purge(BUSY_QUEUE(sk));
292
293 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
294 list_del(&l->list);
295 kfree(l);
296 }
297 }
281} 298}
282 299
283/* Service level security */ 300/* Service level security */
@@ -351,8 +368,12 @@ static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
351 struct sk_buff *skb; 368 struct sk_buff *skb;
352 struct l2cap_hdr *lh; 369 struct l2cap_hdr *lh;
353 struct l2cap_conn *conn = pi->conn; 370 struct l2cap_conn *conn = pi->conn;
371 struct sock *sk = (struct sock *)pi;
354 int count, hlen = L2CAP_HDR_SIZE + 2; 372 int count, hlen = L2CAP_HDR_SIZE + 2;
355 373
374 if (sk->sk_state != BT_CONNECTED)
375 return;
376
356 if (pi->fcs == L2CAP_FCS_CRC16) 377 if (pi->fcs == L2CAP_FCS_CRC16)
357 hlen += 2; 378 hlen += 2;
358 379
@@ -401,6 +422,11 @@ static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
401 l2cap_send_sframe(pi, control); 422 l2cap_send_sframe(pi, control);
402} 423}
403 424
425static inline int __l2cap_no_conn_pending(struct sock *sk)
426{
427 return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
428}
429
404static void l2cap_do_start(struct sock *sk) 430static void l2cap_do_start(struct sock *sk)
405{ 431{
406 struct l2cap_conn *conn = l2cap_pi(sk)->conn; 432 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
@@ -409,12 +435,13 @@ static void l2cap_do_start(struct sock *sk)
409 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)) 435 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
410 return; 436 return;
411 437
412 if (l2cap_check_security(sk)) { 438 if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
413 struct l2cap_conn_req req; 439 struct l2cap_conn_req req;
414 req.scid = cpu_to_le16(l2cap_pi(sk)->scid); 440 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
415 req.psm = l2cap_pi(sk)->psm; 441 req.psm = l2cap_pi(sk)->psm;
416 442
417 l2cap_pi(sk)->ident = l2cap_get_ident(conn); 443 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
444 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
418 445
419 l2cap_send_cmd(conn, l2cap_pi(sk)->ident, 446 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
420 L2CAP_CONN_REQ, sizeof(req), &req); 447 L2CAP_CONN_REQ, sizeof(req), &req);
@@ -434,24 +461,57 @@ static void l2cap_do_start(struct sock *sk)
434 } 461 }
435} 462}
436 463
437static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk) 464static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
465{
466 u32 local_feat_mask = l2cap_feat_mask;
467 if (!disable_ertm)
468 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
469
470 switch (mode) {
471 case L2CAP_MODE_ERTM:
472 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
473 case L2CAP_MODE_STREAMING:
474 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
475 default:
476 return 0x00;
477 }
478}
479
480static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err)
438{ 481{
439 struct l2cap_disconn_req req; 482 struct l2cap_disconn_req req;
440 483
484 if (!conn)
485 return;
486
487 skb_queue_purge(TX_QUEUE(sk));
488
489 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
490 del_timer(&l2cap_pi(sk)->retrans_timer);
491 del_timer(&l2cap_pi(sk)->monitor_timer);
492 del_timer(&l2cap_pi(sk)->ack_timer);
493 }
494
441 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid); 495 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
442 req.scid = cpu_to_le16(l2cap_pi(sk)->scid); 496 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
443 l2cap_send_cmd(conn, l2cap_get_ident(conn), 497 l2cap_send_cmd(conn, l2cap_get_ident(conn),
444 L2CAP_DISCONN_REQ, sizeof(req), &req); 498 L2CAP_DISCONN_REQ, sizeof(req), &req);
499
500 sk->sk_state = BT_DISCONN;
501 sk->sk_err = err;
445} 502}
446 503
447/* ---- L2CAP connections ---- */ 504/* ---- L2CAP connections ---- */
448static void l2cap_conn_start(struct l2cap_conn *conn) 505static void l2cap_conn_start(struct l2cap_conn *conn)
449{ 506{
450 struct l2cap_chan_list *l = &conn->chan_list; 507 struct l2cap_chan_list *l = &conn->chan_list;
508 struct sock_del_list del, *tmp1, *tmp2;
451 struct sock *sk; 509 struct sock *sk;
452 510
453 BT_DBG("conn %p", conn); 511 BT_DBG("conn %p", conn);
454 512
513 INIT_LIST_HEAD(&del.list);
514
455 read_lock(&l->lock); 515 read_lock(&l->lock);
456 516
457 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) { 517 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
@@ -464,18 +524,38 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
464 } 524 }
465 525
466 if (sk->sk_state == BT_CONNECT) { 526 if (sk->sk_state == BT_CONNECT) {
467 if (l2cap_check_security(sk)) { 527 struct l2cap_conn_req req;
468 struct l2cap_conn_req req;
469 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
470 req.psm = l2cap_pi(sk)->psm;
471 528
472 l2cap_pi(sk)->ident = l2cap_get_ident(conn); 529 if (!l2cap_check_security(sk) ||
530 !__l2cap_no_conn_pending(sk)) {
531 bh_unlock_sock(sk);
532 continue;
533 }
473 534
474 l2cap_send_cmd(conn, l2cap_pi(sk)->ident, 535 if (!l2cap_mode_supported(l2cap_pi(sk)->mode,
475 L2CAP_CONN_REQ, sizeof(req), &req); 536 conn->feat_mask)
537 && l2cap_pi(sk)->conf_state &
538 L2CAP_CONF_STATE2_DEVICE) {
539 tmp1 = kzalloc(sizeof(struct sock_del_list),
540 GFP_ATOMIC);
541 tmp1->sk = sk;
542 list_add_tail(&tmp1->list, &del.list);
543 bh_unlock_sock(sk);
544 continue;
476 } 545 }
546
547 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
548 req.psm = l2cap_pi(sk)->psm;
549
550 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
551 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
552
553 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
554 L2CAP_CONN_REQ, sizeof(req), &req);
555
477 } else if (sk->sk_state == BT_CONNECT2) { 556 } else if (sk->sk_state == BT_CONNECT2) {
478 struct l2cap_conn_rsp rsp; 557 struct l2cap_conn_rsp rsp;
558 char buf[128];
479 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid); 559 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
480 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid); 560 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
481 561
@@ -498,12 +578,31 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
498 578
499 l2cap_send_cmd(conn, l2cap_pi(sk)->ident, 579 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
500 L2CAP_CONN_RSP, sizeof(rsp), &rsp); 580 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
581
582 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT ||
583 rsp.result != L2CAP_CR_SUCCESS) {
584 bh_unlock_sock(sk);
585 continue;
586 }
587
588 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
589 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
590 l2cap_build_conf_req(sk, buf), buf);
591 l2cap_pi(sk)->num_conf_req++;
501 } 592 }
502 593
503 bh_unlock_sock(sk); 594 bh_unlock_sock(sk);
504 } 595 }
505 596
506 read_unlock(&l->lock); 597 read_unlock(&l->lock);
598
599 list_for_each_entry_safe(tmp1, tmp2, &del.list, list) {
600 bh_lock_sock(tmp1->sk);
601 __l2cap_sock_close(tmp1->sk, ECONNRESET);
602 bh_unlock_sock(tmp1->sk);
603 list_del(&tmp1->list);
604 kfree(tmp1);
605 }
507} 606}
508 607
509static void l2cap_conn_ready(struct l2cap_conn *conn) 608static void l2cap_conn_ready(struct l2cap_conn *conn)
@@ -732,9 +831,8 @@ static void __l2cap_sock_close(struct sock *sk, int reason)
732 sk->sk_type == SOCK_STREAM) { 831 sk->sk_type == SOCK_STREAM) {
733 struct l2cap_conn *conn = l2cap_pi(sk)->conn; 832 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
734 833
735 sk->sk_state = BT_DISCONN;
736 l2cap_sock_set_timer(sk, sk->sk_sndtimeo); 834 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
737 l2cap_send_disconn_req(conn, sk); 835 l2cap_send_disconn_req(conn, sk, reason);
738 } else 836 } else
739 l2cap_chan_del(sk, reason); 837 l2cap_chan_del(sk, reason);
740 break; 838 break;
@@ -794,6 +892,7 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent)
794 892
795 pi->imtu = l2cap_pi(parent)->imtu; 893 pi->imtu = l2cap_pi(parent)->imtu;
796 pi->omtu = l2cap_pi(parent)->omtu; 894 pi->omtu = l2cap_pi(parent)->omtu;
895 pi->conf_state = l2cap_pi(parent)->conf_state;
797 pi->mode = l2cap_pi(parent)->mode; 896 pi->mode = l2cap_pi(parent)->mode;
798 pi->fcs = l2cap_pi(parent)->fcs; 897 pi->fcs = l2cap_pi(parent)->fcs;
799 pi->max_tx = l2cap_pi(parent)->max_tx; 898 pi->max_tx = l2cap_pi(parent)->max_tx;
@@ -804,13 +903,15 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent)
804 } else { 903 } else {
805 pi->imtu = L2CAP_DEFAULT_MTU; 904 pi->imtu = L2CAP_DEFAULT_MTU;
806 pi->omtu = 0; 905 pi->omtu = 0;
807 if (enable_ertm && sk->sk_type == SOCK_STREAM) 906 if (!disable_ertm && sk->sk_type == SOCK_STREAM) {
808 pi->mode = L2CAP_MODE_ERTM; 907 pi->mode = L2CAP_MODE_ERTM;
809 else 908 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
909 } else {
810 pi->mode = L2CAP_MODE_BASIC; 910 pi->mode = L2CAP_MODE_BASIC;
811 pi->max_tx = max_transmit; 911 }
912 pi->max_tx = L2CAP_DEFAULT_MAX_TX;
812 pi->fcs = L2CAP_FCS_CRC16; 913 pi->fcs = L2CAP_FCS_CRC16;
813 pi->tx_win = tx_window; 914 pi->tx_win = L2CAP_DEFAULT_TX_WINDOW;
814 pi->sec_level = BT_SECURITY_LOW; 915 pi->sec_level = BT_SECURITY_LOW;
815 pi->role_switch = 0; 916 pi->role_switch = 0;
816 pi->force_reliable = 0; 917 pi->force_reliable = 0;
@@ -1059,7 +1160,7 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int al
1059 break; 1160 break;
1060 case L2CAP_MODE_ERTM: 1161 case L2CAP_MODE_ERTM:
1061 case L2CAP_MODE_STREAMING: 1162 case L2CAP_MODE_STREAMING:
1062 if (enable_ertm) 1163 if (!disable_ertm)
1063 break; 1164 break;
1064 /* fall through */ 1165 /* fall through */
1065 default: 1166 default:
@@ -1076,6 +1177,7 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int al
1076 1177
1077 case BT_CONNECTED: 1178 case BT_CONNECTED:
1078 /* Already connected */ 1179 /* Already connected */
1180 err = -EISCONN;
1079 goto done; 1181 goto done;
1080 1182
1081 case BT_OPEN: 1183 case BT_OPEN:
@@ -1124,7 +1226,7 @@ static int l2cap_sock_listen(struct socket *sock, int backlog)
1124 break; 1226 break;
1125 case L2CAP_MODE_ERTM: 1227 case L2CAP_MODE_ERTM:
1126 case L2CAP_MODE_STREAMING: 1228 case L2CAP_MODE_STREAMING:
1127 if (enable_ertm) 1229 if (!disable_ertm)
1128 break; 1230 break;
1129 /* fall through */ 1231 /* fall through */
1130 default: 1232 default:
@@ -1277,9 +1379,11 @@ static void l2cap_monitor_timeout(unsigned long arg)
1277{ 1379{
1278 struct sock *sk = (void *) arg; 1380 struct sock *sk = (void *) arg;
1279 1381
1382 BT_DBG("sk %p", sk);
1383
1280 bh_lock_sock(sk); 1384 bh_lock_sock(sk);
1281 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) { 1385 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1282 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk); 1386 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNABORTED);
1283 bh_unlock_sock(sk); 1387 bh_unlock_sock(sk);
1284 return; 1388 return;
1285 } 1389 }
@@ -1295,6 +1399,8 @@ static void l2cap_retrans_timeout(unsigned long arg)
1295{ 1399{
1296 struct sock *sk = (void *) arg; 1400 struct sock *sk = (void *) arg;
1297 1401
1402 BT_DBG("sk %p", sk);
1403
1298 bh_lock_sock(sk); 1404 bh_lock_sock(sk);
1299 l2cap_pi(sk)->retry_count = 1; 1405 l2cap_pi(sk)->retry_count = 1;
1300 __mod_monitor_timer(); 1406 __mod_monitor_timer();
@@ -1333,7 +1439,7 @@ static inline void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1333 hci_send_acl(pi->conn->hcon, skb, 0); 1439 hci_send_acl(pi->conn->hcon, skb, 0);
1334} 1440}
1335 1441
1336static int l2cap_streaming_send(struct sock *sk) 1442static void l2cap_streaming_send(struct sock *sk)
1337{ 1443{
1338 struct sk_buff *skb, *tx_skb; 1444 struct sk_buff *skb, *tx_skb;
1339 struct l2cap_pinfo *pi = l2cap_pi(sk); 1445 struct l2cap_pinfo *pi = l2cap_pi(sk);
@@ -1363,7 +1469,6 @@ static int l2cap_streaming_send(struct sock *sk)
1363 skb = skb_dequeue(TX_QUEUE(sk)); 1469 skb = skb_dequeue(TX_QUEUE(sk));
1364 kfree_skb(skb); 1470 kfree_skb(skb);
1365 } 1471 }
1366 return 0;
1367} 1472}
1368 1473
1369static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq) 1474static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
@@ -1387,15 +1492,22 @@ static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
1387 1492
1388 if (pi->remote_max_tx && 1493 if (pi->remote_max_tx &&
1389 bt_cb(skb)->retries == pi->remote_max_tx) { 1494 bt_cb(skb)->retries == pi->remote_max_tx) {
1390 l2cap_send_disconn_req(pi->conn, sk); 1495 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1391 return; 1496 return;
1392 } 1497 }
1393 1498
1394 tx_skb = skb_clone(skb, GFP_ATOMIC); 1499 tx_skb = skb_clone(skb, GFP_ATOMIC);
1395 bt_cb(skb)->retries++; 1500 bt_cb(skb)->retries++;
1396 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE); 1501 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1502
1503 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1504 control |= L2CAP_CTRL_FINAL;
1505 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1506 }
1507
1397 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT) 1508 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1398 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT); 1509 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1510
1399 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE); 1511 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1400 1512
1401 if (pi->fcs == L2CAP_FCS_CRC16) { 1513 if (pi->fcs == L2CAP_FCS_CRC16) {
@@ -1413,15 +1525,14 @@ static int l2cap_ertm_send(struct sock *sk)
1413 u16 control, fcs; 1525 u16 control, fcs;
1414 int nsent = 0; 1526 int nsent = 0;
1415 1527
1416 if (pi->conn_state & L2CAP_CONN_WAIT_F) 1528 if (sk->sk_state != BT_CONNECTED)
1417 return 0; 1529 return -ENOTCONN;
1418 1530
1419 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk)) && 1531 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk))) {
1420 !(pi->conn_state & L2CAP_CONN_REMOTE_BUSY)) {
1421 1532
1422 if (pi->remote_max_tx && 1533 if (pi->remote_max_tx &&
1423 bt_cb(skb)->retries == pi->remote_max_tx) { 1534 bt_cb(skb)->retries == pi->remote_max_tx) {
1424 l2cap_send_disconn_req(pi->conn, sk); 1535 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1425 break; 1536 break;
1426 } 1537 }
1427 1538
@@ -1430,6 +1541,8 @@ static int l2cap_ertm_send(struct sock *sk)
1430 bt_cb(skb)->retries++; 1541 bt_cb(skb)->retries++;
1431 1542
1432 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE); 1543 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1544 control &= L2CAP_CTRL_SAR;
1545
1433 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) { 1546 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1434 control |= L2CAP_CTRL_FINAL; 1547 control |= L2CAP_CTRL_FINAL;
1435 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT; 1548 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
@@ -1470,16 +1583,11 @@ static int l2cap_retransmit_frames(struct sock *sk)
1470 struct l2cap_pinfo *pi = l2cap_pi(sk); 1583 struct l2cap_pinfo *pi = l2cap_pi(sk);
1471 int ret; 1584 int ret;
1472 1585
1473 spin_lock_bh(&pi->send_lock);
1474
1475 if (!skb_queue_empty(TX_QUEUE(sk))) 1586 if (!skb_queue_empty(TX_QUEUE(sk)))
1476 sk->sk_send_head = TX_QUEUE(sk)->next; 1587 sk->sk_send_head = TX_QUEUE(sk)->next;
1477 1588
1478 pi->next_tx_seq = pi->expected_ack_seq; 1589 pi->next_tx_seq = pi->expected_ack_seq;
1479 ret = l2cap_ertm_send(sk); 1590 ret = l2cap_ertm_send(sk);
1480
1481 spin_unlock_bh(&pi->send_lock);
1482
1483 return ret; 1591 return ret;
1484} 1592}
1485 1593
@@ -1487,7 +1595,6 @@ static void l2cap_send_ack(struct l2cap_pinfo *pi)
1487{ 1595{
1488 struct sock *sk = (struct sock *)pi; 1596 struct sock *sk = (struct sock *)pi;
1489 u16 control = 0; 1597 u16 control = 0;
1490 int nframes;
1491 1598
1492 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT; 1599 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1493 1600
@@ -1498,11 +1605,7 @@ static void l2cap_send_ack(struct l2cap_pinfo *pi)
1498 return; 1605 return;
1499 } 1606 }
1500 1607
1501 spin_lock_bh(&pi->send_lock); 1608 if (l2cap_ertm_send(sk) > 0)
1502 nframes = l2cap_ertm_send(sk);
1503 spin_unlock_bh(&pi->send_lock);
1504
1505 if (nframes > 0)
1506 return; 1609 return;
1507 1610
1508 control |= L2CAP_SUPER_RCV_READY; 1611 control |= L2CAP_SUPER_RCV_READY;
@@ -1697,10 +1800,8 @@ static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, siz
1697 size += buflen; 1800 size += buflen;
1698 } 1801 }
1699 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk)); 1802 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1700 spin_lock_bh(&pi->send_lock);
1701 if (sk->sk_send_head == NULL) 1803 if (sk->sk_send_head == NULL)
1702 sk->sk_send_head = sar_queue.next; 1804 sk->sk_send_head = sar_queue.next;
1703 spin_unlock_bh(&pi->send_lock);
1704 1805
1705 return size; 1806 return size;
1706} 1807}
@@ -1745,7 +1846,7 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct ms
1745 case L2CAP_MODE_BASIC: 1846 case L2CAP_MODE_BASIC:
1746 /* Check outgoing MTU */ 1847 /* Check outgoing MTU */
1747 if (len > pi->omtu) { 1848 if (len > pi->omtu) {
1748 err = -EINVAL; 1849 err = -EMSGSIZE;
1749 goto done; 1850 goto done;
1750 } 1851 }
1751 1852
@@ -1772,14 +1873,9 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct ms
1772 } 1873 }
1773 __skb_queue_tail(TX_QUEUE(sk), skb); 1874 __skb_queue_tail(TX_QUEUE(sk), skb);
1774 1875
1775 if (pi->mode == L2CAP_MODE_ERTM)
1776 spin_lock_bh(&pi->send_lock);
1777
1778 if (sk->sk_send_head == NULL) 1876 if (sk->sk_send_head == NULL)
1779 sk->sk_send_head = skb; 1877 sk->sk_send_head = skb;
1780 1878
1781 if (pi->mode == L2CAP_MODE_ERTM)
1782 spin_unlock_bh(&pi->send_lock);
1783 } else { 1879 } else {
1784 /* Segment SDU into multiples PDUs */ 1880 /* Segment SDU into multiples PDUs */
1785 err = l2cap_sar_segment_sdu(sk, msg, len); 1881 err = l2cap_sar_segment_sdu(sk, msg, len);
@@ -1788,11 +1884,14 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct ms
1788 } 1884 }
1789 1885
1790 if (pi->mode == L2CAP_MODE_STREAMING) { 1886 if (pi->mode == L2CAP_MODE_STREAMING) {
1791 err = l2cap_streaming_send(sk); 1887 l2cap_streaming_send(sk);
1792 } else { 1888 } else {
1793 spin_lock_bh(&pi->send_lock); 1889 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY &&
1890 pi->conn_state && L2CAP_CONN_WAIT_F) {
1891 err = len;
1892 break;
1893 }
1794 err = l2cap_ertm_send(sk); 1894 err = l2cap_ertm_send(sk);
1795 spin_unlock_bh(&pi->send_lock);
1796 } 1895 }
1797 1896
1798 if (err >= 0) 1897 if (err >= 0)
@@ -1801,7 +1900,7 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct ms
1801 1900
1802 default: 1901 default:
1803 BT_DBG("bad state %1.1x", pi->mode); 1902 BT_DBG("bad state %1.1x", pi->mode);
1804 err = -EINVAL; 1903 err = -EBADFD;
1805 } 1904 }
1806 1905
1807done: 1906done:
@@ -1817,6 +1916,8 @@ static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct ms
1817 1916
1818 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) { 1917 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1819 struct l2cap_conn_rsp rsp; 1918 struct l2cap_conn_rsp rsp;
1919 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1920 u8 buf[128];
1820 1921
1821 sk->sk_state = BT_CONFIG; 1922 sk->sk_state = BT_CONFIG;
1822 1923
@@ -1827,6 +1928,16 @@ static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct ms
1827 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident, 1928 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1828 L2CAP_CONN_RSP, sizeof(rsp), &rsp); 1929 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1829 1930
1931 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) {
1932 release_sock(sk);
1933 return 0;
1934 }
1935
1936 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1937 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1938 l2cap_build_conf_req(sk, buf), buf);
1939 l2cap_pi(sk)->num_conf_req++;
1940
1830 release_sock(sk); 1941 release_sock(sk);
1831 return 0; 1942 return 0;
1832 } 1943 }
@@ -1863,13 +1974,19 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
1863 break; 1974 break;
1864 } 1975 }
1865 1976
1977 if (opts.txwin_size > L2CAP_DEFAULT_TX_WINDOW) {
1978 err = -EINVAL;
1979 break;
1980 }
1981
1866 l2cap_pi(sk)->mode = opts.mode; 1982 l2cap_pi(sk)->mode = opts.mode;
1867 switch (l2cap_pi(sk)->mode) { 1983 switch (l2cap_pi(sk)->mode) {
1868 case L2CAP_MODE_BASIC: 1984 case L2CAP_MODE_BASIC:
1985 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_STATE2_DEVICE;
1869 break; 1986 break;
1870 case L2CAP_MODE_ERTM: 1987 case L2CAP_MODE_ERTM:
1871 case L2CAP_MODE_STREAMING: 1988 case L2CAP_MODE_STREAMING:
1872 if (enable_ertm) 1989 if (!disable_ertm)
1873 break; 1990 break;
1874 /* fall through */ 1991 /* fall through */
1875 default: 1992 default:
@@ -2137,6 +2254,10 @@ static int l2cap_sock_shutdown(struct socket *sock, int how)
2137 err = bt_sock_wait_state(sk, BT_CLOSED, 2254 err = bt_sock_wait_state(sk, BT_CLOSED,
2138 sk->sk_lingertime); 2255 sk->sk_lingertime);
2139 } 2256 }
2257
2258 if (!err && sk->sk_err)
2259 err = -sk->sk_err;
2260
2140 release_sock(sk); 2261 release_sock(sk);
2141 return err; 2262 return err;
2142} 2263}
@@ -2357,25 +2478,10 @@ static inline void l2cap_ertm_init(struct sock *sk)
2357 2478
2358 __skb_queue_head_init(SREJ_QUEUE(sk)); 2479 __skb_queue_head_init(SREJ_QUEUE(sk));
2359 __skb_queue_head_init(BUSY_QUEUE(sk)); 2480 __skb_queue_head_init(BUSY_QUEUE(sk));
2360 spin_lock_init(&l2cap_pi(sk)->send_lock);
2361 2481
2362 INIT_WORK(&l2cap_pi(sk)->busy_work, l2cap_busy_work); 2482 INIT_WORK(&l2cap_pi(sk)->busy_work, l2cap_busy_work);
2363}
2364 2483
2365static int l2cap_mode_supported(__u8 mode, __u32 feat_mask) 2484 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
2366{
2367 u32 local_feat_mask = l2cap_feat_mask;
2368 if (enable_ertm)
2369 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
2370
2371 switch (mode) {
2372 case L2CAP_MODE_ERTM:
2373 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
2374 case L2CAP_MODE_STREAMING:
2375 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
2376 default:
2377 return 0x00;
2378 }
2379} 2485}
2380 2486
2381static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask) 2487static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
@@ -2406,10 +2512,10 @@ static int l2cap_build_conf_req(struct sock *sk, void *data)
2406 switch (pi->mode) { 2512 switch (pi->mode) {
2407 case L2CAP_MODE_STREAMING: 2513 case L2CAP_MODE_STREAMING:
2408 case L2CAP_MODE_ERTM: 2514 case L2CAP_MODE_ERTM:
2409 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE; 2515 if (pi->conf_state & L2CAP_CONF_STATE2_DEVICE)
2410 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask)) 2516 break;
2411 l2cap_send_disconn_req(pi->conn, sk); 2517
2412 break; 2518 /* fall through */
2413 default: 2519 default:
2414 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask); 2520 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2415 break; 2521 break;
@@ -2420,6 +2526,14 @@ done:
2420 case L2CAP_MODE_BASIC: 2526 case L2CAP_MODE_BASIC:
2421 if (pi->imtu != L2CAP_DEFAULT_MTU) 2527 if (pi->imtu != L2CAP_DEFAULT_MTU)
2422 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu); 2528 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2529
2530 rfc.mode = L2CAP_MODE_BASIC;
2531 rfc.txwin_size = 0;
2532 rfc.max_transmit = 0;
2533 rfc.retrans_timeout = 0;
2534 rfc.monitor_timeout = 0;
2535 rfc.max_pdu_size = 0;
2536
2423 break; 2537 break;
2424 2538
2425 case L2CAP_MODE_ERTM: 2539 case L2CAP_MODE_ERTM:
@@ -2432,9 +2546,6 @@ done:
2432 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10) 2546 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2433 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10); 2547 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2434 2548
2435 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2436 sizeof(rfc), (unsigned long) &rfc);
2437
2438 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS)) 2549 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2439 break; 2550 break;
2440 2551
@@ -2455,9 +2566,6 @@ done:
2455 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10) 2566 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2456 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10); 2567 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2457 2568
2458 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2459 sizeof(rfc), (unsigned long) &rfc);
2460
2461 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS)) 2569 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2462 break; 2570 break;
2463 2571
@@ -2469,6 +2577,9 @@ done:
2469 break; 2577 break;
2470 } 2578 }
2471 2579
2580 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2581 (unsigned long) &rfc);
2582
2472 /* FIXME: Need actual value of the flush timeout */ 2583 /* FIXME: Need actual value of the flush timeout */
2473 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO) 2584 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2474 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to); 2585 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
@@ -2533,18 +2644,21 @@ static int l2cap_parse_conf_req(struct sock *sk, void *data)
2533 } 2644 }
2534 } 2645 }
2535 2646
2536 if (pi->num_conf_rsp || pi->num_conf_req) 2647 if (pi->num_conf_rsp || pi->num_conf_req > 1)
2537 goto done; 2648 goto done;
2538 2649
2539 switch (pi->mode) { 2650 switch (pi->mode) {
2540 case L2CAP_MODE_STREAMING: 2651 case L2CAP_MODE_STREAMING:
2541 case L2CAP_MODE_ERTM: 2652 case L2CAP_MODE_ERTM:
2542 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE; 2653 if (!(pi->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
2543 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask)) 2654 pi->mode = l2cap_select_mode(rfc.mode,
2655 pi->conn->feat_mask);
2656 break;
2657 }
2658
2659 if (pi->mode != rfc.mode)
2544 return -ECONNREFUSED; 2660 return -ECONNREFUSED;
2545 break; 2661
2546 default:
2547 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2548 break; 2662 break;
2549 } 2663 }
2550 2664
@@ -2667,7 +2781,6 @@ static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data,
2667 rfc.mode != pi->mode) 2781 rfc.mode != pi->mode)
2668 return -ECONNREFUSED; 2782 return -ECONNREFUSED;
2669 2783
2670 pi->mode = rfc.mode;
2671 pi->fcs = 0; 2784 pi->fcs = 0;
2672 2785
2673 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, 2786 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
@@ -2676,6 +2789,11 @@ static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data,
2676 } 2789 }
2677 } 2790 }
2678 2791
2792 if (pi->mode == L2CAP_MODE_BASIC && pi->mode != rfc.mode)
2793 return -ECONNREFUSED;
2794
2795 pi->mode = rfc.mode;
2796
2679 if (*result == L2CAP_CONF_SUCCESS) { 2797 if (*result == L2CAP_CONF_SUCCESS) {
2680 switch (rfc.mode) { 2798 switch (rfc.mode) {
2681 case L2CAP_MODE_ERTM: 2799 case L2CAP_MODE_ERTM:
@@ -2770,7 +2888,7 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd
2770 struct l2cap_chan_list *list = &conn->chan_list; 2888 struct l2cap_chan_list *list = &conn->chan_list;
2771 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data; 2889 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2772 struct l2cap_conn_rsp rsp; 2890 struct l2cap_conn_rsp rsp;
2773 struct sock *sk, *parent; 2891 struct sock *parent, *uninitialized_var(sk);
2774 int result, status = L2CAP_CS_NO_INFO; 2892 int result, status = L2CAP_CS_NO_INFO;
2775 2893
2776 u16 dcid = 0, scid = __le16_to_cpu(req->scid); 2894 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
@@ -2879,6 +2997,15 @@ sendresp:
2879 L2CAP_INFO_REQ, sizeof(info), &info); 2997 L2CAP_INFO_REQ, sizeof(info), &info);
2880 } 2998 }
2881 2999
3000 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) &&
3001 result == L2CAP_CR_SUCCESS) {
3002 u8 buf[128];
3003 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
3004 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3005 l2cap_build_conf_req(sk, buf), buf);
3006 l2cap_pi(sk)->num_conf_req++;
3007 }
3008
2882 return 0; 3009 return 0;
2883} 3010}
2884 3011
@@ -2899,11 +3026,11 @@ static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hd
2899 if (scid) { 3026 if (scid) {
2900 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid); 3027 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2901 if (!sk) 3028 if (!sk)
2902 return 0; 3029 return -EFAULT;
2903 } else { 3030 } else {
2904 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident); 3031 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2905 if (!sk) 3032 if (!sk)
2906 return 0; 3033 return -EFAULT;
2907 } 3034 }
2908 3035
2909 switch (result) { 3036 switch (result) {
@@ -2911,10 +3038,13 @@ static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hd
2911 sk->sk_state = BT_CONFIG; 3038 sk->sk_state = BT_CONFIG;
2912 l2cap_pi(sk)->ident = 0; 3039 l2cap_pi(sk)->ident = 0;
2913 l2cap_pi(sk)->dcid = dcid; 3040 l2cap_pi(sk)->dcid = dcid;
2914 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2915
2916 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND; 3041 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2917 3042
3043 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)
3044 break;
3045
3046 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
3047
2918 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, 3048 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2919 l2cap_build_conf_req(sk, req), req); 3049 l2cap_build_conf_req(sk, req), req);
2920 l2cap_pi(sk)->num_conf_req++; 3050 l2cap_pi(sk)->num_conf_req++;
@@ -2950,8 +3080,14 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
2950 if (!sk) 3080 if (!sk)
2951 return -ENOENT; 3081 return -ENOENT;
2952 3082
2953 if (sk->sk_state == BT_DISCONN) 3083 if (sk->sk_state != BT_CONFIG) {
3084 struct l2cap_cmd_rej rej;
3085
3086 rej.reason = cpu_to_le16(0x0002);
3087 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3088 sizeof(rej), &rej);
2954 goto unlock; 3089 goto unlock;
3090 }
2955 3091
2956 /* Reject if config buffer is too small. */ 3092 /* Reject if config buffer is too small. */
2957 len = cmd_len - sizeof(*req); 3093 len = cmd_len - sizeof(*req);
@@ -2977,7 +3113,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
2977 /* Complete config. */ 3113 /* Complete config. */
2978 len = l2cap_parse_conf_req(sk, rsp); 3114 len = l2cap_parse_conf_req(sk, rsp);
2979 if (len < 0) { 3115 if (len < 0) {
2980 l2cap_send_disconn_req(conn, sk); 3116 l2cap_send_disconn_req(conn, sk, ECONNRESET);
2981 goto unlock; 3117 goto unlock;
2982 } 3118 }
2983 3119
@@ -3047,7 +3183,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
3047 char req[64]; 3183 char req[64];
3048 3184
3049 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) { 3185 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3050 l2cap_send_disconn_req(conn, sk); 3186 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3051 goto done; 3187 goto done;
3052 } 3188 }
3053 3189
@@ -3056,7 +3192,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
3056 len = l2cap_parse_conf_rsp(sk, rsp->data, 3192 len = l2cap_parse_conf_rsp(sk, rsp->data,
3057 len, req, &result); 3193 len, req, &result);
3058 if (len < 0) { 3194 if (len < 0) {
3059 l2cap_send_disconn_req(conn, sk); 3195 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3060 goto done; 3196 goto done;
3061 } 3197 }
3062 3198
@@ -3069,10 +3205,9 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
3069 } 3205 }
3070 3206
3071 default: 3207 default:
3072 sk->sk_state = BT_DISCONN;
3073 sk->sk_err = ECONNRESET; 3208 sk->sk_err = ECONNRESET;
3074 l2cap_sock_set_timer(sk, HZ * 5); 3209 l2cap_sock_set_timer(sk, HZ * 5);
3075 l2cap_send_disconn_req(conn, sk); 3210 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3076 goto done; 3211 goto done;
3077 } 3212 }
3078 3213
@@ -3123,16 +3258,6 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd
3123 3258
3124 sk->sk_shutdown = SHUTDOWN_MASK; 3259 sk->sk_shutdown = SHUTDOWN_MASK;
3125 3260
3126 skb_queue_purge(TX_QUEUE(sk));
3127
3128 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
3129 skb_queue_purge(SREJ_QUEUE(sk));
3130 skb_queue_purge(BUSY_QUEUE(sk));
3131 del_timer(&l2cap_pi(sk)->retrans_timer);
3132 del_timer(&l2cap_pi(sk)->monitor_timer);
3133 del_timer(&l2cap_pi(sk)->ack_timer);
3134 }
3135
3136 l2cap_chan_del(sk, ECONNRESET); 3261 l2cap_chan_del(sk, ECONNRESET);
3137 bh_unlock_sock(sk); 3262 bh_unlock_sock(sk);
3138 3263
@@ -3155,16 +3280,6 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd
3155 if (!sk) 3280 if (!sk)
3156 return 0; 3281 return 0;
3157 3282
3158 skb_queue_purge(TX_QUEUE(sk));
3159
3160 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
3161 skb_queue_purge(SREJ_QUEUE(sk));
3162 skb_queue_purge(BUSY_QUEUE(sk));
3163 del_timer(&l2cap_pi(sk)->retrans_timer);
3164 del_timer(&l2cap_pi(sk)->monitor_timer);
3165 del_timer(&l2cap_pi(sk)->ack_timer);
3166 }
3167
3168 l2cap_chan_del(sk, 0); 3283 l2cap_chan_del(sk, 0);
3169 bh_unlock_sock(sk); 3284 bh_unlock_sock(sk);
3170 3285
@@ -3187,7 +3302,7 @@ static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cm
3187 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf; 3302 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3188 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK); 3303 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3189 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS); 3304 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3190 if (enable_ertm) 3305 if (!disable_ertm)
3191 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING 3306 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3192 | L2CAP_FEAT_FCS; 3307 | L2CAP_FEAT_FCS;
3193 put_unaligned_le32(feat_mask, rsp->data); 3308 put_unaligned_le32(feat_mask, rsp->data);
@@ -3352,7 +3467,7 @@ static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3352 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size); 3467 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3353 3468
3354 if (our_fcs != rcv_fcs) 3469 if (our_fcs != rcv_fcs)
3355 return -EINVAL; 3470 return -EBADMSG;
3356 } 3471 }
3357 return 0; 3472 return 0;
3358} 3473}
@@ -3363,25 +3478,19 @@ static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
3363 u16 control = 0; 3478 u16 control = 0;
3364 3479
3365 pi->frames_sent = 0; 3480 pi->frames_sent = 0;
3366 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
3367 3481
3368 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT; 3482 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3369 3483
3370 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) { 3484 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3371 control |= L2CAP_SUPER_RCV_NOT_READY | L2CAP_CTRL_FINAL; 3485 control |= L2CAP_SUPER_RCV_NOT_READY;
3372 l2cap_send_sframe(pi, control); 3486 l2cap_send_sframe(pi, control);
3373 pi->conn_state |= L2CAP_CONN_RNR_SENT; 3487 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3374 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
3375 } 3488 }
3376 3489
3377 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY && pi->unacked_frames > 0) 3490 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
3378 __mod_retrans_timer(); 3491 l2cap_retransmit_frames(sk);
3379
3380 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3381 3492
3382 spin_lock_bh(&pi->send_lock);
3383 l2cap_ertm_send(sk); 3493 l2cap_ertm_send(sk);
3384 spin_unlock_bh(&pi->send_lock);
3385 3494
3386 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) && 3495 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
3387 pi->frames_sent == 0) { 3496 pi->frames_sent == 0) {
@@ -3393,6 +3502,8 @@ static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
3393static int l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar) 3502static int l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3394{ 3503{
3395 struct sk_buff *next_skb; 3504 struct sk_buff *next_skb;
3505 struct l2cap_pinfo *pi = l2cap_pi(sk);
3506 int tx_seq_offset, next_tx_seq_offset;
3396 3507
3397 bt_cb(skb)->tx_seq = tx_seq; 3508 bt_cb(skb)->tx_seq = tx_seq;
3398 bt_cb(skb)->sar = sar; 3509 bt_cb(skb)->sar = sar;
@@ -3403,11 +3514,20 @@ static int l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_s
3403 return 0; 3514 return 0;
3404 } 3515 }
3405 3516
3517 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3518 if (tx_seq_offset < 0)
3519 tx_seq_offset += 64;
3520
3406 do { 3521 do {
3407 if (bt_cb(next_skb)->tx_seq == tx_seq) 3522 if (bt_cb(next_skb)->tx_seq == tx_seq)
3408 return -EINVAL; 3523 return -EINVAL;
3409 3524
3410 if (bt_cb(next_skb)->tx_seq > tx_seq) { 3525 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
3526 pi->buffer_seq) % 64;
3527 if (next_tx_seq_offset < 0)
3528 next_tx_seq_offset += 64;
3529
3530 if (next_tx_seq_offset > tx_seq_offset) {
3411 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb); 3531 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3412 return 0; 3532 return 0;
3413 } 3533 }
@@ -3525,11 +3645,51 @@ drop:
3525 pi->sdu = NULL; 3645 pi->sdu = NULL;
3526 3646
3527disconnect: 3647disconnect:
3528 l2cap_send_disconn_req(pi->conn, sk); 3648 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3529 kfree_skb(skb); 3649 kfree_skb(skb);
3530 return 0; 3650 return 0;
3531} 3651}
3532 3652
3653static int l2cap_try_push_rx_skb(struct sock *sk)
3654{
3655 struct l2cap_pinfo *pi = l2cap_pi(sk);
3656 struct sk_buff *skb;
3657 u16 control;
3658 int err;
3659
3660 while ((skb = skb_dequeue(BUSY_QUEUE(sk)))) {
3661 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3662 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3663 if (err < 0) {
3664 skb_queue_head(BUSY_QUEUE(sk), skb);
3665 return -EBUSY;
3666 }
3667
3668 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3669 }
3670
3671 if (!(pi->conn_state & L2CAP_CONN_RNR_SENT))
3672 goto done;
3673
3674 control = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3675 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3676 l2cap_send_sframe(pi, control);
3677 l2cap_pi(sk)->retry_count = 1;
3678
3679 del_timer(&pi->retrans_timer);
3680 __mod_monitor_timer();
3681
3682 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
3683
3684done:
3685 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
3686 pi->conn_state &= ~L2CAP_CONN_RNR_SENT;
3687
3688 BT_DBG("sk %p, Exit local busy", sk);
3689
3690 return 0;
3691}
3692
3533static void l2cap_busy_work(struct work_struct *work) 3693static void l2cap_busy_work(struct work_struct *work)
3534{ 3694{
3535 DECLARE_WAITQUEUE(wait, current); 3695 DECLARE_WAITQUEUE(wait, current);
@@ -3538,7 +3698,6 @@ static void l2cap_busy_work(struct work_struct *work)
3538 struct sock *sk = (struct sock *)pi; 3698 struct sock *sk = (struct sock *)pi;
3539 int n_tries = 0, timeo = HZ/5, err; 3699 int n_tries = 0, timeo = HZ/5, err;
3540 struct sk_buff *skb; 3700 struct sk_buff *skb;
3541 u16 control;
3542 3701
3543 lock_sock(sk); 3702 lock_sock(sk);
3544 3703
@@ -3548,8 +3707,8 @@ static void l2cap_busy_work(struct work_struct *work)
3548 3707
3549 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) { 3708 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
3550 err = -EBUSY; 3709 err = -EBUSY;
3551 l2cap_send_disconn_req(pi->conn, sk); 3710 l2cap_send_disconn_req(pi->conn, sk, EBUSY);
3552 goto done; 3711 break;
3553 } 3712 }
3554 3713
3555 if (!timeo) 3714 if (!timeo)
@@ -3557,7 +3716,7 @@ static void l2cap_busy_work(struct work_struct *work)
3557 3716
3558 if (signal_pending(current)) { 3717 if (signal_pending(current)) {
3559 err = sock_intr_errno(timeo); 3718 err = sock_intr_errno(timeo);
3560 goto done; 3719 break;
3561 } 3720 }
3562 3721
3563 release_sock(sk); 3722 release_sock(sk);
@@ -3566,40 +3725,12 @@ static void l2cap_busy_work(struct work_struct *work)
3566 3725
3567 err = sock_error(sk); 3726 err = sock_error(sk);
3568 if (err) 3727 if (err)
3569 goto done; 3728 break;
3570
3571 while ((skb = skb_dequeue(BUSY_QUEUE(sk)))) {
3572 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3573 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3574 if (err < 0) {
3575 skb_queue_head(BUSY_QUEUE(sk), skb);
3576 break;
3577 }
3578
3579 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3580 }
3581 3729
3582 if (!skb) 3730 if (l2cap_try_push_rx_skb(sk) == 0)
3583 break; 3731 break;
3584 } 3732 }
3585 3733
3586 if (!(pi->conn_state & L2CAP_CONN_RNR_SENT))
3587 goto done;
3588
3589 control = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3590 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3591 l2cap_send_sframe(pi, control);
3592 l2cap_pi(sk)->retry_count = 1;
3593
3594 del_timer(&pi->retrans_timer);
3595 __mod_monitor_timer();
3596
3597 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
3598
3599done:
3600 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
3601 pi->conn_state &= ~L2CAP_CONN_RNR_SENT;
3602
3603 set_current_state(TASK_RUNNING); 3734 set_current_state(TASK_RUNNING);
3604 remove_wait_queue(sk_sleep(sk), &wait); 3735 remove_wait_queue(sk_sleep(sk), &wait);
3605 3736
@@ -3614,7 +3745,9 @@ static int l2cap_push_rx_skb(struct sock *sk, struct sk_buff *skb, u16 control)
3614 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) { 3745 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3615 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT; 3746 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3616 __skb_queue_tail(BUSY_QUEUE(sk), skb); 3747 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3617 return -EBUSY; 3748 return l2cap_try_push_rx_skb(sk);
3749
3750
3618 } 3751 }
3619 3752
3620 err = l2cap_ertm_reassembly_sdu(sk, skb, control); 3753 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
@@ -3624,6 +3757,8 @@ static int l2cap_push_rx_skb(struct sock *sk, struct sk_buff *skb, u16 control)
3624 } 3757 }
3625 3758
3626 /* Busy Condition */ 3759 /* Busy Condition */
3760 BT_DBG("sk %p, Enter local busy", sk);
3761
3627 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY; 3762 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
3628 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT; 3763 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3629 __skb_queue_tail(BUSY_QUEUE(sk), skb); 3764 __skb_queue_tail(BUSY_QUEUE(sk), skb);
@@ -3634,6 +3769,8 @@ static int l2cap_push_rx_skb(struct sock *sk, struct sk_buff *skb, u16 control)
3634 3769
3635 pi->conn_state |= L2CAP_CONN_RNR_SENT; 3770 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3636 3771
3772 del_timer(&pi->ack_timer);
3773
3637 queue_work(_busy_wq, &pi->busy_work); 3774 queue_work(_busy_wq, &pi->busy_work);
3638 3775
3639 return err; 3776 return err;
@@ -3747,7 +3884,7 @@ static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3747 l2cap_ertm_reassembly_sdu(sk, skb, control); 3884 l2cap_ertm_reassembly_sdu(sk, skb, control);
3748 l2cap_pi(sk)->buffer_seq_srej = 3885 l2cap_pi(sk)->buffer_seq_srej =
3749 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64; 3886 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3750 tx_seq++; 3887 tx_seq = (tx_seq + 1) % 64;
3751 } 3888 }
3752} 3889}
3753 3890
@@ -3783,10 +3920,11 @@ static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3783 l2cap_send_sframe(pi, control); 3920 l2cap_send_sframe(pi, control);
3784 3921
3785 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC); 3922 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3786 new->tx_seq = pi->expected_tx_seq++; 3923 new->tx_seq = pi->expected_tx_seq;
3924 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3787 list_add_tail(&new->list, SREJ_LIST(sk)); 3925 list_add_tail(&new->list, SREJ_LIST(sk));
3788 } 3926 }
3789 pi->expected_tx_seq++; 3927 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3790} 3928}
3791 3929
3792static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb) 3930static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
@@ -3795,11 +3933,12 @@ static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, str
3795 u8 tx_seq = __get_txseq(rx_control); 3933 u8 tx_seq = __get_txseq(rx_control);
3796 u8 req_seq = __get_reqseq(rx_control); 3934 u8 req_seq = __get_reqseq(rx_control);
3797 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT; 3935 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3798 u8 tx_seq_offset, expected_tx_seq_offset; 3936 int tx_seq_offset, expected_tx_seq_offset;
3799 int num_to_ack = (pi->tx_win/6) + 1; 3937 int num_to_ack = (pi->tx_win/6) + 1;
3800 int err = 0; 3938 int err = 0;
3801 3939
3802 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len); 3940 BT_DBG("sk %p len %d tx_seq %d rx_control 0x%4.4x", sk, skb->len, tx_seq,
3941 rx_control);
3803 3942
3804 if (L2CAP_CTRL_FINAL & rx_control && 3943 if (L2CAP_CTRL_FINAL & rx_control &&
3805 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) { 3944 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
@@ -3821,7 +3960,7 @@ static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, str
3821 3960
3822 /* invalid tx_seq */ 3961 /* invalid tx_seq */
3823 if (tx_seq_offset >= pi->tx_win) { 3962 if (tx_seq_offset >= pi->tx_win) {
3824 l2cap_send_disconn_req(pi->conn, sk); 3963 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3825 goto drop; 3964 goto drop;
3826 } 3965 }
3827 3966
@@ -3844,6 +3983,7 @@ static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, str
3844 pi->buffer_seq = pi->buffer_seq_srej; 3983 pi->buffer_seq = pi->buffer_seq_srej;
3845 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT; 3984 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3846 l2cap_send_ack(pi); 3985 l2cap_send_ack(pi);
3986 BT_DBG("sk %p, Exit SREJ_SENT", sk);
3847 } 3987 }
3848 } else { 3988 } else {
3849 struct srej_list *l; 3989 struct srej_list *l;
@@ -3872,6 +4012,8 @@ static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, str
3872 4012
3873 pi->conn_state |= L2CAP_CONN_SREJ_SENT; 4013 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
3874 4014
4015 BT_DBG("sk %p, Enter SREJ", sk);
4016
3875 INIT_LIST_HEAD(SREJ_LIST(sk)); 4017 INIT_LIST_HEAD(SREJ_LIST(sk));
3876 pi->buffer_seq_srej = pi->buffer_seq; 4018 pi->buffer_seq_srej = pi->buffer_seq;
3877 4019
@@ -3882,6 +4024,8 @@ static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, str
3882 pi->conn_state |= L2CAP_CONN_SEND_PBIT; 4024 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
3883 4025
3884 l2cap_send_srejframe(sk, tx_seq); 4026 l2cap_send_srejframe(sk, tx_seq);
4027
4028 del_timer(&pi->ack_timer);
3885 } 4029 }
3886 return 0; 4030 return 0;
3887 4031
@@ -3895,6 +4039,10 @@ expected:
3895 return 0; 4039 return 0;
3896 } 4040 }
3897 4041
4042 err = l2cap_push_rx_skb(sk, skb, rx_control);
4043 if (err < 0)
4044 return 0;
4045
3898 if (rx_control & L2CAP_CTRL_FINAL) { 4046 if (rx_control & L2CAP_CTRL_FINAL) {
3899 if (pi->conn_state & L2CAP_CONN_REJ_ACT) 4047 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3900 pi->conn_state &= ~L2CAP_CONN_REJ_ACT; 4048 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
@@ -3902,10 +4050,6 @@ expected:
3902 l2cap_retransmit_frames(sk); 4050 l2cap_retransmit_frames(sk);
3903 } 4051 }
3904 4052
3905 err = l2cap_push_rx_skb(sk, skb, rx_control);
3906 if (err < 0)
3907 return 0;
3908
3909 __mod_ack_timer(); 4053 __mod_ack_timer();
3910 4054
3911 pi->num_acked = (pi->num_acked + 1) % num_to_ack; 4055 pi->num_acked = (pi->num_acked + 1) % num_to_ack;
@@ -3923,10 +4067,14 @@ static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
3923{ 4067{
3924 struct l2cap_pinfo *pi = l2cap_pi(sk); 4068 struct l2cap_pinfo *pi = l2cap_pi(sk);
3925 4069
4070 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, __get_reqseq(rx_control),
4071 rx_control);
4072
3926 pi->expected_ack_seq = __get_reqseq(rx_control); 4073 pi->expected_ack_seq = __get_reqseq(rx_control);
3927 l2cap_drop_acked_frames(sk); 4074 l2cap_drop_acked_frames(sk);
3928 4075
3929 if (rx_control & L2CAP_CTRL_POLL) { 4076 if (rx_control & L2CAP_CTRL_POLL) {
4077 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
3930 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) { 4078 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3931 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) && 4079 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3932 (pi->unacked_frames > 0)) 4080 (pi->unacked_frames > 0))
@@ -3955,9 +4103,7 @@ static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
3955 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) { 4103 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3956 l2cap_send_ack(pi); 4104 l2cap_send_ack(pi);
3957 } else { 4105 } else {
3958 spin_lock_bh(&pi->send_lock);
3959 l2cap_ertm_send(sk); 4106 l2cap_ertm_send(sk);
3960 spin_unlock_bh(&pi->send_lock);
3961 } 4107 }
3962 } 4108 }
3963} 4109}
@@ -3967,6 +4113,8 @@ static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control)
3967 struct l2cap_pinfo *pi = l2cap_pi(sk); 4113 struct l2cap_pinfo *pi = l2cap_pi(sk);
3968 u8 tx_seq = __get_reqseq(rx_control); 4114 u8 tx_seq = __get_reqseq(rx_control);
3969 4115
4116 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4117
3970 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY; 4118 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3971 4119
3972 pi->expected_ack_seq = tx_seq; 4120 pi->expected_ack_seq = tx_seq;
@@ -3989,16 +4137,18 @@ static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control)
3989 struct l2cap_pinfo *pi = l2cap_pi(sk); 4137 struct l2cap_pinfo *pi = l2cap_pi(sk);
3990 u8 tx_seq = __get_reqseq(rx_control); 4138 u8 tx_seq = __get_reqseq(rx_control);
3991 4139
4140 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4141
3992 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY; 4142 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3993 4143
3994 if (rx_control & L2CAP_CTRL_POLL) { 4144 if (rx_control & L2CAP_CTRL_POLL) {
3995 pi->expected_ack_seq = tx_seq; 4145 pi->expected_ack_seq = tx_seq;
3996 l2cap_drop_acked_frames(sk); 4146 l2cap_drop_acked_frames(sk);
4147
4148 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
3997 l2cap_retransmit_one_frame(sk, tx_seq); 4149 l2cap_retransmit_one_frame(sk, tx_seq);
3998 4150
3999 spin_lock_bh(&pi->send_lock);
4000 l2cap_ertm_send(sk); 4151 l2cap_ertm_send(sk);
4001 spin_unlock_bh(&pi->send_lock);
4002 4152
4003 if (pi->conn_state & L2CAP_CONN_WAIT_F) { 4153 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4004 pi->srej_save_reqseq = tx_seq; 4154 pi->srej_save_reqseq = tx_seq;
@@ -4024,10 +4174,15 @@ static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control)
4024 struct l2cap_pinfo *pi = l2cap_pi(sk); 4174 struct l2cap_pinfo *pi = l2cap_pi(sk);
4025 u8 tx_seq = __get_reqseq(rx_control); 4175 u8 tx_seq = __get_reqseq(rx_control);
4026 4176
4177 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4178
4027 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY; 4179 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
4028 pi->expected_ack_seq = tx_seq; 4180 pi->expected_ack_seq = tx_seq;
4029 l2cap_drop_acked_frames(sk); 4181 l2cap_drop_acked_frames(sk);
4030 4182
4183 if (rx_control & L2CAP_CTRL_POLL)
4184 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4185
4031 if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) { 4186 if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) {
4032 del_timer(&pi->retrans_timer); 4187 del_timer(&pi->retrans_timer);
4033 if (rx_control & L2CAP_CTRL_POLL) 4188 if (rx_control & L2CAP_CTRL_POLL)
@@ -4075,12 +4230,83 @@ static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, str
4075 return 0; 4230 return 0;
4076} 4231}
4077 4232
4233static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
4234{
4235 struct l2cap_pinfo *pi = l2cap_pi(sk);
4236 u16 control;
4237 u8 req_seq;
4238 int len, next_tx_seq_offset, req_seq_offset;
4239
4240 control = get_unaligned_le16(skb->data);
4241 skb_pull(skb, 2);
4242 len = skb->len;
4243
4244 /*
4245 * We can just drop the corrupted I-frame here.
4246 * Receiver will miss it and start proper recovery
4247 * procedures and ask retransmission.
4248 */
4249 if (l2cap_check_fcs(pi, skb))
4250 goto drop;
4251
4252 if (__is_sar_start(control) && __is_iframe(control))
4253 len -= 2;
4254
4255 if (pi->fcs == L2CAP_FCS_CRC16)
4256 len -= 2;
4257
4258 if (len > pi->mps) {
4259 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4260 goto drop;
4261 }
4262
4263 req_seq = __get_reqseq(control);
4264 req_seq_offset = (req_seq - pi->expected_ack_seq) % 64;
4265 if (req_seq_offset < 0)
4266 req_seq_offset += 64;
4267
4268 next_tx_seq_offset =
4269 (pi->next_tx_seq - pi->expected_ack_seq) % 64;
4270 if (next_tx_seq_offset < 0)
4271 next_tx_seq_offset += 64;
4272
4273 /* check for invalid req-seq */
4274 if (req_seq_offset > next_tx_seq_offset) {
4275 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4276 goto drop;
4277 }
4278
4279 if (__is_iframe(control)) {
4280 if (len < 0) {
4281 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4282 goto drop;
4283 }
4284
4285 l2cap_data_channel_iframe(sk, control, skb);
4286 } else {
4287 if (len != 0) {
4288 BT_ERR("%d", len);
4289 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4290 goto drop;
4291 }
4292
4293 l2cap_data_channel_sframe(sk, control, skb);
4294 }
4295
4296 return 0;
4297
4298drop:
4299 kfree_skb(skb);
4300 return 0;
4301}
4302
4078static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb) 4303static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4079{ 4304{
4080 struct sock *sk; 4305 struct sock *sk;
4081 struct l2cap_pinfo *pi; 4306 struct l2cap_pinfo *pi;
4082 u16 control, len; 4307 u16 control;
4083 u8 tx_seq, req_seq, next_tx_seq_offset, req_seq_offset; 4308 u8 tx_seq;
4309 int len;
4084 4310
4085 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid); 4311 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
4086 if (!sk) { 4312 if (!sk) {
@@ -4110,59 +4336,11 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk
4110 break; 4336 break;
4111 4337
4112 case L2CAP_MODE_ERTM: 4338 case L2CAP_MODE_ERTM:
4113 control = get_unaligned_le16(skb->data); 4339 if (!sock_owned_by_user(sk)) {
4114 skb_pull(skb, 2); 4340 l2cap_ertm_data_rcv(sk, skb);
4115 len = skb->len;
4116
4117 if (__is_sar_start(control))
4118 len -= 2;
4119
4120 if (pi->fcs == L2CAP_FCS_CRC16)
4121 len -= 2;
4122
4123 /*
4124 * We can just drop the corrupted I-frame here.
4125 * Receiver will miss it and start proper recovery
4126 * procedures and ask retransmission.
4127 */
4128 if (len > pi->mps) {
4129 l2cap_send_disconn_req(pi->conn, sk);
4130 goto drop;
4131 }
4132
4133 if (l2cap_check_fcs(pi, skb))
4134 goto drop;
4135
4136 req_seq = __get_reqseq(control);
4137 req_seq_offset = (req_seq - pi->expected_ack_seq) % 64;
4138 if (req_seq_offset < 0)
4139 req_seq_offset += 64;
4140
4141 next_tx_seq_offset =
4142 (pi->next_tx_seq - pi->expected_ack_seq) % 64;
4143 if (next_tx_seq_offset < 0)
4144 next_tx_seq_offset += 64;
4145
4146 /* check for invalid req-seq */
4147 if (req_seq_offset > next_tx_seq_offset) {
4148 l2cap_send_disconn_req(pi->conn, sk);
4149 goto drop;
4150 }
4151
4152 if (__is_iframe(control)) {
4153 if (len < 4) {
4154 l2cap_send_disconn_req(pi->conn, sk);
4155 goto drop;
4156 }
4157
4158 l2cap_data_channel_iframe(sk, control, skb);
4159 } else { 4341 } else {
4160 if (len != 0) { 4342 if (sk_add_backlog(sk, skb))
4161 l2cap_send_disconn_req(pi->conn, sk);
4162 goto drop; 4343 goto drop;
4163 }
4164
4165 l2cap_data_channel_sframe(sk, control, skb);
4166 } 4344 }
4167 4345
4168 goto done; 4346 goto done;
@@ -4172,16 +4350,16 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk
4172 skb_pull(skb, 2); 4350 skb_pull(skb, 2);
4173 len = skb->len; 4351 len = skb->len;
4174 4352
4353 if (l2cap_check_fcs(pi, skb))
4354 goto drop;
4355
4175 if (__is_sar_start(control)) 4356 if (__is_sar_start(control))
4176 len -= 2; 4357 len -= 2;
4177 4358
4178 if (pi->fcs == L2CAP_FCS_CRC16) 4359 if (pi->fcs == L2CAP_FCS_CRC16)
4179 len -= 2; 4360 len -= 2;
4180 4361
4181 if (len > pi->mps || len < 4 || __is_sframe(control)) 4362 if (len > pi->mps || len < 0 || __is_sframe(control))
4182 goto drop;
4183
4184 if (l2cap_check_fcs(pi, skb))
4185 goto drop; 4363 goto drop;
4186 4364
4187 tx_seq = __get_txseq(control); 4365 tx_seq = __get_txseq(control);
@@ -4281,7 +4459,7 @@ static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4281 struct hlist_node *node; 4459 struct hlist_node *node;
4282 4460
4283 if (type != ACL_LINK) 4461 if (type != ACL_LINK)
4284 return 0; 4462 return -EINVAL;
4285 4463
4286 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr)); 4464 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4287 4465
@@ -4314,7 +4492,7 @@ static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4314 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status); 4492 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4315 4493
4316 if (hcon->type != ACL_LINK) 4494 if (hcon->type != ACL_LINK)
4317 return 0; 4495 return -EINVAL;
4318 4496
4319 if (!status) { 4497 if (!status) {
4320 conn = l2cap_conn_add(hcon, status); 4498 conn = l2cap_conn_add(hcon, status);
@@ -4343,7 +4521,7 @@ static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4343 BT_DBG("hcon %p reason %d", hcon, reason); 4521 BT_DBG("hcon %p reason %d", hcon, reason);
4344 4522
4345 if (hcon->type != ACL_LINK) 4523 if (hcon->type != ACL_LINK)
4346 return 0; 4524 return -EINVAL;
4347 4525
4348 l2cap_conn_del(hcon, bt_err(reason)); 4526 l2cap_conn_del(hcon, bt_err(reason));
4349 4527
@@ -4404,6 +4582,7 @@ static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4404 req.psm = l2cap_pi(sk)->psm; 4582 req.psm = l2cap_pi(sk)->psm;
4405 4583
4406 l2cap_pi(sk)->ident = l2cap_get_ident(conn); 4584 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
4585 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
4407 4586
4408 l2cap_send_cmd(conn, l2cap_pi(sk)->ident, 4587 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4409 L2CAP_CONN_REQ, sizeof(req), &req); 4588 L2CAP_CONN_REQ, sizeof(req), &req);
@@ -4671,14 +4850,8 @@ EXPORT_SYMBOL(l2cap_load);
4671module_init(l2cap_init); 4850module_init(l2cap_init);
4672module_exit(l2cap_exit); 4851module_exit(l2cap_exit);
4673 4852
4674module_param(enable_ertm, bool, 0644); 4853module_param(disable_ertm, bool, 0644);
4675MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode"); 4854MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
4676
4677module_param(max_transmit, uint, 0644);
4678MODULE_PARM_DESC(max_transmit, "Max transmit value (default = 3)");
4679
4680module_param(tx_window, uint, 0644);
4681MODULE_PARM_DESC(tx_window, "Transmission window size value (default = 63)");
4682 4855
4683MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>"); 4856MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4684MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION); 4857MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index 43fbf6b4b4bf..44a623275951 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -1152,7 +1152,7 @@ error:
1152 return err; 1152 return err;
1153} 1153}
1154 1154
1155void rfcomm_cleanup_sockets(void) 1155void __exit rfcomm_cleanup_sockets(void)
1156{ 1156{
1157 debugfs_remove(rfcomm_sock_debugfs); 1157 debugfs_remove(rfcomm_sock_debugfs);
1158 1158
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index 309b6c261b25..026205c18b78 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -1153,7 +1153,7 @@ static const struct tty_operations rfcomm_ops = {
1153 .tiocmset = rfcomm_tty_tiocmset, 1153 .tiocmset = rfcomm_tty_tiocmset,
1154}; 1154};
1155 1155
1156int rfcomm_init_ttys(void) 1156int __init rfcomm_init_ttys(void)
1157{ 1157{
1158 rfcomm_tty_driver = alloc_tty_driver(RFCOMM_TTY_PORTS); 1158 rfcomm_tty_driver = alloc_tty_driver(RFCOMM_TTY_PORTS);
1159 if (!rfcomm_tty_driver) 1159 if (!rfcomm_tty_driver)
@@ -1183,7 +1183,7 @@ int rfcomm_init_ttys(void)
1183 return 0; 1183 return 0;
1184} 1184}
1185 1185
1186void rfcomm_cleanup_ttys(void) 1186void __exit rfcomm_cleanup_ttys(void)
1187{ 1187{
1188 tty_unregister_driver(rfcomm_tty_driver); 1188 tty_unregister_driver(rfcomm_tty_driver);
1189 put_tty_driver(rfcomm_tty_driver); 1189 put_tty_driver(rfcomm_tty_driver);
diff --git a/net/bridge/br.c b/net/bridge/br.c
index 76357b547752..c8436fa31344 100644
--- a/net/bridge/br.c
+++ b/net/bridge/br.c
@@ -63,7 +63,6 @@ static int __init br_init(void)
63 goto err_out4; 63 goto err_out4;
64 64
65 brioctl_set(br_ioctl_deviceless_stub); 65 brioctl_set(br_ioctl_deviceless_stub);
66 br_handle_frame_hook = br_handle_frame;
67 66
68#if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE) 67#if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)
69 br_fdb_test_addr_hook = br_fdb_test_addr; 68 br_fdb_test_addr_hook = br_fdb_test_addr;
@@ -100,7 +99,6 @@ static void __exit br_deinit(void)
100 br_fdb_test_addr_hook = NULL; 99 br_fdb_test_addr_hook = NULL;
101#endif 100#endif
102 101
103 br_handle_frame_hook = NULL;
104 br_fdb_fini(); 102 br_fdb_fini();
105} 103}
106 104
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index eedf2c94820e..cf09fe591fc2 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -22,7 +22,7 @@
22#include <asm/uaccess.h> 22#include <asm/uaccess.h>
23#include "br_private.h" 23#include "br_private.h"
24 24
25/* net device transmit always called with no BH (preempt_disabled) */ 25/* net device transmit always called with BH disabled */
26netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev) 26netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
27{ 27{
28 struct net_bridge *br = netdev_priv(dev); 28 struct net_bridge *br = netdev_priv(dev);
@@ -38,17 +38,26 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
38 } 38 }
39#endif 39#endif
40 40
41 u64_stats_update_begin(&brstats->syncp);
41 brstats->tx_packets++; 42 brstats->tx_packets++;
42 brstats->tx_bytes += skb->len; 43 brstats->tx_bytes += skb->len;
44 u64_stats_update_end(&brstats->syncp);
43 45
44 BR_INPUT_SKB_CB(skb)->brdev = dev; 46 BR_INPUT_SKB_CB(skb)->brdev = dev;
45 47
46 skb_reset_mac_header(skb); 48 skb_reset_mac_header(skb);
47 skb_pull(skb, ETH_HLEN); 49 skb_pull(skb, ETH_HLEN);
48 50
51 rcu_read_lock();
49 if (is_multicast_ether_addr(dest)) { 52 if (is_multicast_ether_addr(dest)) {
50 if (br_multicast_rcv(br, NULL, skb)) 53 if (unlikely(netpoll_tx_running(dev))) {
54 br_flood_deliver(br, skb);
55 goto out;
56 }
57 if (br_multicast_rcv(br, NULL, skb)) {
58 kfree_skb(skb);
51 goto out; 59 goto out;
60 }
52 61
53 mdst = br_mdb_get(br, skb); 62 mdst = br_mdb_get(br, skb);
54 if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) 63 if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb))
@@ -61,6 +70,7 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
61 br_flood_deliver(br, skb); 70 br_flood_deliver(br, skb);
62 71
63out: 72out:
73 rcu_read_unlock();
64 return NETDEV_TX_OK; 74 return NETDEV_TX_OK;
65} 75}
66 76
@@ -92,21 +102,25 @@ static int br_dev_stop(struct net_device *dev)
92 return 0; 102 return 0;
93} 103}
94 104
95static struct net_device_stats *br_get_stats(struct net_device *dev) 105static struct rtnl_link_stats64 *br_get_stats64(struct net_device *dev,
106 struct rtnl_link_stats64 *stats)
96{ 107{
97 struct net_bridge *br = netdev_priv(dev); 108 struct net_bridge *br = netdev_priv(dev);
98 struct net_device_stats *stats = &dev->stats; 109 struct br_cpu_netstats tmp, sum = { 0 };
99 struct br_cpu_netstats sum = { 0 };
100 unsigned int cpu; 110 unsigned int cpu;
101 111
102 for_each_possible_cpu(cpu) { 112 for_each_possible_cpu(cpu) {
113 unsigned int start;
103 const struct br_cpu_netstats *bstats 114 const struct br_cpu_netstats *bstats
104 = per_cpu_ptr(br->stats, cpu); 115 = per_cpu_ptr(br->stats, cpu);
105 116 do {
106 sum.tx_bytes += bstats->tx_bytes; 117 start = u64_stats_fetch_begin(&bstats->syncp);
107 sum.tx_packets += bstats->tx_packets; 118 memcpy(&tmp, bstats, sizeof(tmp));
108 sum.rx_bytes += bstats->rx_bytes; 119 } while (u64_stats_fetch_retry(&bstats->syncp, start));
109 sum.rx_packets += bstats->rx_packets; 120 sum.tx_bytes += tmp.tx_bytes;
121 sum.tx_packets += tmp.tx_packets;
122 sum.rx_bytes += tmp.rx_bytes;
123 sum.rx_packets += tmp.rx_packets;
110 } 124 }
111 125
112 stats->tx_bytes = sum.tx_bytes; 126 stats->tx_bytes = sum.tx_bytes;
@@ -127,7 +141,7 @@ static int br_change_mtu(struct net_device *dev, int new_mtu)
127 141
128#ifdef CONFIG_BRIDGE_NETFILTER 142#ifdef CONFIG_BRIDGE_NETFILTER
129 /* remember the MTU in the rtable for PMTU */ 143 /* remember the MTU in the rtable for PMTU */
130 br->fake_rtable.u.dst.metrics[RTAX_MTU - 1] = new_mtu; 144 br->fake_rtable.dst.metrics[RTAX_MTU - 1] = new_mtu;
131#endif 145#endif
132 146
133 return 0; 147 return 0;
@@ -199,73 +213,81 @@ static int br_set_tx_csum(struct net_device *dev, u32 data)
199} 213}
200 214
201#ifdef CONFIG_NET_POLL_CONTROLLER 215#ifdef CONFIG_NET_POLL_CONTROLLER
202static bool br_devices_support_netpoll(struct net_bridge *br) 216static void br_poll_controller(struct net_device *br_dev)
203{ 217{
204 struct net_bridge_port *p;
205 bool ret = true;
206 int count = 0;
207 unsigned long flags;
208
209 spin_lock_irqsave(&br->lock, flags);
210 list_for_each_entry(p, &br->port_list, list) {
211 count++;
212 if ((p->dev->priv_flags & IFF_DISABLE_NETPOLL) ||
213 !p->dev->netdev_ops->ndo_poll_controller)
214 ret = false;
215 }
216 spin_unlock_irqrestore(&br->lock, flags);
217 return count != 0 && ret;
218} 218}
219 219
220static void br_poll_controller(struct net_device *br_dev) 220static void br_netpoll_cleanup(struct net_device *dev)
221{ 221{
222 struct netpoll *np = br_dev->npinfo->netpoll; 222 struct net_bridge *br = netdev_priv(dev);
223 struct net_bridge_port *p, *n;
223 224
224 if (np->real_dev != br_dev) 225 list_for_each_entry_safe(p, n, &br->port_list, list) {
225 netpoll_poll_dev(np->real_dev); 226 br_netpoll_disable(p);
227 }
226} 228}
227 229
228void br_netpoll_cleanup(struct net_device *dev) 230static int br_netpoll_setup(struct net_device *dev, struct netpoll_info *ni)
229{ 231{
230 struct net_bridge *br = netdev_priv(dev); 232 struct net_bridge *br = netdev_priv(dev);
231 struct net_bridge_port *p, *n; 233 struct net_bridge_port *p, *n;
232 const struct net_device_ops *ops; 234 int err = 0;
233 235
234 br->dev->npinfo = NULL;
235 list_for_each_entry_safe(p, n, &br->port_list, list) { 236 list_for_each_entry_safe(p, n, &br->port_list, list) {
236 if (p->dev) { 237 if (!p->dev)
237 ops = p->dev->netdev_ops; 238 continue;
238 if (ops->ndo_netpoll_cleanup) 239
239 ops->ndo_netpoll_cleanup(p->dev); 240 err = br_netpoll_enable(p);
240 else 241 if (err)
241 p->dev->npinfo = NULL; 242 goto fail;
242 }
243 } 243 }
244
245out:
246 return err;
247
248fail:
249 br_netpoll_cleanup(dev);
250 goto out;
244} 251}
245 252
246void br_netpoll_disable(struct net_bridge *br, 253int br_netpoll_enable(struct net_bridge_port *p)
247 struct net_device *dev)
248{ 254{
249 if (br_devices_support_netpoll(br)) 255 struct netpoll *np;
250 br->dev->priv_flags &= ~IFF_DISABLE_NETPOLL; 256 int err = 0;
251 if (dev->netdev_ops->ndo_netpoll_cleanup) 257
252 dev->netdev_ops->ndo_netpoll_cleanup(dev); 258 np = kzalloc(sizeof(*p->np), GFP_KERNEL);
253 else 259 err = -ENOMEM;
254 dev->npinfo = NULL; 260 if (!np)
261 goto out;
262
263 np->dev = p->dev;
264
265 err = __netpoll_setup(np);
266 if (err) {
267 kfree(np);
268 goto out;
269 }
270
271 p->np = np;
272
273out:
274 return err;
255} 275}
256 276
257void br_netpoll_enable(struct net_bridge *br, 277void br_netpoll_disable(struct net_bridge_port *p)
258 struct net_device *dev)
259{ 278{
260 if (br_devices_support_netpoll(br)) { 279 struct netpoll *np = p->np;
261 br->dev->priv_flags &= ~IFF_DISABLE_NETPOLL; 280
262 if (br->dev->npinfo) 281 if (!np)
263 dev->npinfo = br->dev->npinfo; 282 return;
264 } else if (!(br->dev->priv_flags & IFF_DISABLE_NETPOLL)) { 283
265 br->dev->priv_flags |= IFF_DISABLE_NETPOLL; 284 p->np = NULL;
266 br_info(br,"new device %s does not support netpoll (disabling)", 285
267 dev->name); 286 /* Wait for transmitting packets to finish before freeing. */
268 } 287 synchronize_rcu_bh();
288
289 __netpoll_cleanup(np);
290 kfree(np);
269} 291}
270 292
271#endif 293#endif
@@ -288,12 +310,13 @@ static const struct net_device_ops br_netdev_ops = {
288 .ndo_open = br_dev_open, 310 .ndo_open = br_dev_open,
289 .ndo_stop = br_dev_stop, 311 .ndo_stop = br_dev_stop,
290 .ndo_start_xmit = br_dev_xmit, 312 .ndo_start_xmit = br_dev_xmit,
291 .ndo_get_stats = br_get_stats, 313 .ndo_get_stats64 = br_get_stats64,
292 .ndo_set_mac_address = br_set_mac_address, 314 .ndo_set_mac_address = br_set_mac_address,
293 .ndo_set_multicast_list = br_dev_set_multicast_list, 315 .ndo_set_multicast_list = br_dev_set_multicast_list,
294 .ndo_change_mtu = br_change_mtu, 316 .ndo_change_mtu = br_change_mtu,
295 .ndo_do_ioctl = br_dev_ioctl, 317 .ndo_do_ioctl = br_dev_ioctl,
296#ifdef CONFIG_NET_POLL_CONTROLLER 318#ifdef CONFIG_NET_POLL_CONTROLLER
319 .ndo_netpoll_setup = br_netpoll_setup,
297 .ndo_netpoll_cleanup = br_netpoll_cleanup, 320 .ndo_netpoll_cleanup = br_netpoll_cleanup,
298 .ndo_poll_controller = br_poll_controller, 321 .ndo_poll_controller = br_poll_controller,
299#endif 322#endif
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 26637439965b..90512ccfd3e9 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -128,7 +128,7 @@ void br_fdb_cleanup(unsigned long _data)
128{ 128{
129 struct net_bridge *br = (struct net_bridge *)_data; 129 struct net_bridge *br = (struct net_bridge *)_data;
130 unsigned long delay = hold_time(br); 130 unsigned long delay = hold_time(br);
131 unsigned long next_timer = jiffies + br->forward_delay; 131 unsigned long next_timer = jiffies + br->ageing_time;
132 int i; 132 int i;
133 133
134 spin_lock_bh(&br->hash_lock); 134 spin_lock_bh(&br->hash_lock);
@@ -149,9 +149,7 @@ void br_fdb_cleanup(unsigned long _data)
149 } 149 }
150 spin_unlock_bh(&br->hash_lock); 150 spin_unlock_bh(&br->hash_lock);
151 151
152 /* Add HZ/4 to ensure we round the jiffies upwards to be after the next 152 mod_timer(&br->gc_timer, round_jiffies_up(next_timer));
153 * timer, otherwise we might round down and will have no-op run. */
154 mod_timer(&br->gc_timer, round_jiffies(next_timer + HZ/4));
155} 153}
156 154
157/* Completely flush all dynamic entries in forwarding database.*/ 155/* Completely flush all dynamic entries in forwarding database.*/
@@ -216,7 +214,7 @@ void br_fdb_delete_by_port(struct net_bridge *br,
216 spin_unlock_bh(&br->hash_lock); 214 spin_unlock_bh(&br->hash_lock);
217} 215}
218 216
219/* No locking or refcounting, assumes caller has no preempt (rcu_read_lock) */ 217/* No locking or refcounting, assumes caller has rcu_read_lock */
220struct net_bridge_fdb_entry *__br_fdb_get(struct net_bridge *br, 218struct net_bridge_fdb_entry *__br_fdb_get(struct net_bridge *br,
221 const unsigned char *addr) 219 const unsigned char *addr)
222{ 220{
@@ -242,11 +240,11 @@ int br_fdb_test_addr(struct net_device *dev, unsigned char *addr)
242 struct net_bridge_fdb_entry *fdb; 240 struct net_bridge_fdb_entry *fdb;
243 int ret; 241 int ret;
244 242
245 if (!dev->br_port) 243 if (!br_port_exists(dev))
246 return 0; 244 return 0;
247 245
248 rcu_read_lock(); 246 rcu_read_lock();
249 fdb = __br_fdb_get(dev->br_port->br, addr); 247 fdb = __br_fdb_get(br_port_get_rcu(dev)->br, addr);
250 ret = fdb && fdb->dst->dev != dev && 248 ret = fdb && fdb->dst->dev != dev &&
251 fdb->dst->state == BR_STATE_FORWARDING; 249 fdb->dst->state == BR_STATE_FORWARDING;
252 rcu_read_unlock(); 250 rcu_read_unlock();
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index a98ef1393097..cbfe87f0f34a 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -50,14 +50,7 @@ int br_dev_queue_push_xmit(struct sk_buff *skb)
50 kfree_skb(skb); 50 kfree_skb(skb);
51 else { 51 else {
52 skb_push(skb, ETH_HLEN); 52 skb_push(skb, ETH_HLEN);
53 53 dev_queue_xmit(skb);
54#ifdef CONFIG_NET_POLL_CONTROLLER
55 if (unlikely(skb->dev->priv_flags & IFF_IN_NETPOLL)) {
56 netpoll_send_skb(skb->dev->npinfo->netpoll, skb);
57 skb->dev->priv_flags &= ~IFF_IN_NETPOLL;
58 } else
59#endif
60 dev_queue_xmit(skb);
61 } 54 }
62 } 55 }
63 56
@@ -73,23 +66,20 @@ int br_forward_finish(struct sk_buff *skb)
73 66
74static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb) 67static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
75{ 68{
76#ifdef CONFIG_NET_POLL_CONTROLLER
77 struct net_bridge *br = to->br;
78 if (unlikely(br->dev->priv_flags & IFF_IN_NETPOLL)) {
79 struct netpoll *np;
80 to->dev->npinfo = skb->dev->npinfo;
81 np = skb->dev->npinfo->netpoll;
82 np->real_dev = np->dev = to->dev;
83 to->dev->priv_flags |= IFF_IN_NETPOLL;
84 }
85#endif
86 skb->dev = to->dev; 69 skb->dev = to->dev;
70
71 if (unlikely(netpoll_tx_running(to->dev))) {
72 if (packet_length(skb) > skb->dev->mtu && !skb_is_gso(skb))
73 kfree_skb(skb);
74 else {
75 skb_push(skb, ETH_HLEN);
76 br_netpoll_send_skb(to, skb);
77 }
78 return;
79 }
80
87 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev, 81 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
88 br_forward_finish); 82 br_forward_finish);
89#ifdef CONFIG_NET_POLL_CONTROLLER
90 if (skb->dev->npinfo)
91 skb->dev->npinfo->netpoll->dev = br->dev;
92#endif
93} 83}
94 84
95static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb) 85static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
@@ -140,10 +130,10 @@ static int deliver_clone(const struct net_bridge_port *prev,
140 void (*__packet_hook)(const struct net_bridge_port *p, 130 void (*__packet_hook)(const struct net_bridge_port *p,
141 struct sk_buff *skb)) 131 struct sk_buff *skb))
142{ 132{
133 struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev;
134
143 skb = skb_clone(skb, GFP_ATOMIC); 135 skb = skb_clone(skb, GFP_ATOMIC);
144 if (!skb) { 136 if (!skb) {
145 struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev;
146
147 dev->stats.tx_dropped++; 137 dev->stats.tx_dropped++;
148 return -ENOMEM; 138 return -ENOMEM;
149 } 139 }
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 18b245e2c00e..c03d2c3ff03e 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -147,14 +147,17 @@ static void del_nbp(struct net_bridge_port *p)
147 147
148 list_del_rcu(&p->list); 148 list_del_rcu(&p->list);
149 149
150 rcu_assign_pointer(dev->br_port, NULL); 150 dev->priv_flags &= ~IFF_BRIDGE_PORT;
151
152 netdev_rx_handler_unregister(dev);
151 153
152 br_multicast_del_port(p); 154 br_multicast_del_port(p);
153 155
154 kobject_uevent(&p->kobj, KOBJ_REMOVE); 156 kobject_uevent(&p->kobj, KOBJ_REMOVE);
155 kobject_del(&p->kobj); 157 kobject_del(&p->kobj);
156 158
157 br_netpoll_disable(br, dev); 159 br_netpoll_disable(p);
160
158 call_rcu(&p->rcu, destroy_nbp_rcu); 161 call_rcu(&p->rcu, destroy_nbp_rcu);
159} 162}
160 163
@@ -167,8 +170,6 @@ static void del_br(struct net_bridge *br, struct list_head *head)
167 del_nbp(p); 170 del_nbp(p);
168 } 171 }
169 172
170 br_netpoll_cleanup(br->dev);
171
172 del_timer_sync(&br->gc_timer); 173 del_timer_sync(&br->gc_timer);
173 174
174 br_sysfs_delbr(br->dev); 175 br_sysfs_delbr(br->dev);
@@ -400,7 +401,7 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
400 return -ELOOP; 401 return -ELOOP;
401 402
402 /* Device is already being bridged */ 403 /* Device is already being bridged */
403 if (dev->br_port != NULL) 404 if (br_port_exists(dev))
404 return -EBUSY; 405 return -EBUSY;
405 406
406 /* No bridging devices that dislike that (e.g. wireless) */ 407 /* No bridging devices that dislike that (e.g. wireless) */
@@ -428,7 +429,15 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
428 if (err) 429 if (err)
429 goto err2; 430 goto err2;
430 431
431 rcu_assign_pointer(dev->br_port, p); 432 if (br_netpoll_info(br) && ((err = br_netpoll_enable(p))))
433 goto err3;
434
435 err = netdev_rx_handler_register(dev, br_handle_frame, p);
436 if (err)
437 goto err3;
438
439 dev->priv_flags |= IFF_BRIDGE_PORT;
440
432 dev_disable_lro(dev); 441 dev_disable_lro(dev);
433 442
434 list_add_rcu(&p->list, &br->port_list); 443 list_add_rcu(&p->list, &br->port_list);
@@ -448,9 +457,9 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
448 457
449 kobject_uevent(&p->kobj, KOBJ_ADD); 458 kobject_uevent(&p->kobj, KOBJ_ADD);
450 459
451 br_netpoll_enable(br, dev);
452
453 return 0; 460 return 0;
461err3:
462 sysfs_remove_link(br->ifobj, p->dev->name);
454err2: 463err2:
455 br_fdb_delete_by_port(br, p, 1); 464 br_fdb_delete_by_port(br, p, 1);
456err1: 465err1:
@@ -467,9 +476,13 @@ put_back:
467/* called with RTNL */ 476/* called with RTNL */
468int br_del_if(struct net_bridge *br, struct net_device *dev) 477int br_del_if(struct net_bridge *br, struct net_device *dev)
469{ 478{
470 struct net_bridge_port *p = dev->br_port; 479 struct net_bridge_port *p;
480
481 if (!br_port_exists(dev))
482 return -EINVAL;
471 483
472 if (!p || p->br != br) 484 p = br_port_get(dev);
485 if (p->br != br)
473 return -EINVAL; 486 return -EINVAL;
474 487
475 del_nbp(p); 488 del_nbp(p);
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index d36e700f7a26..826cd5221536 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -27,8 +27,10 @@ static int br_pass_frame_up(struct sk_buff *skb)
27 struct net_bridge *br = netdev_priv(brdev); 27 struct net_bridge *br = netdev_priv(brdev);
28 struct br_cpu_netstats *brstats = this_cpu_ptr(br->stats); 28 struct br_cpu_netstats *brstats = this_cpu_ptr(br->stats);
29 29
30 u64_stats_update_begin(&brstats->syncp);
30 brstats->rx_packets++; 31 brstats->rx_packets++;
31 brstats->rx_bytes += skb->len; 32 brstats->rx_bytes += skb->len;
33 u64_stats_update_end(&brstats->syncp);
32 34
33 indev = skb->dev; 35 indev = skb->dev;
34 skb->dev = brdev; 36 skb->dev = brdev;
@@ -37,11 +39,11 @@ static int br_pass_frame_up(struct sk_buff *skb)
37 netif_receive_skb); 39 netif_receive_skb);
38} 40}
39 41
40/* note: already called with rcu_read_lock (preempt_disabled) */ 42/* note: already called with rcu_read_lock */
41int br_handle_frame_finish(struct sk_buff *skb) 43int br_handle_frame_finish(struct sk_buff *skb)
42{ 44{
43 const unsigned char *dest = eth_hdr(skb)->h_dest; 45 const unsigned char *dest = eth_hdr(skb)->h_dest;
44 struct net_bridge_port *p = rcu_dereference(skb->dev->br_port); 46 struct net_bridge_port *p = br_port_get_rcu(skb->dev);
45 struct net_bridge *br; 47 struct net_bridge *br;
46 struct net_bridge_fdb_entry *dst; 48 struct net_bridge_fdb_entry *dst;
47 struct net_bridge_mdb_entry *mdst; 49 struct net_bridge_mdb_entry *mdst;
@@ -108,13 +110,12 @@ drop:
108 goto out; 110 goto out;
109} 111}
110 112
111/* note: already called with rcu_read_lock (preempt_disabled) */ 113/* note: already called with rcu_read_lock */
112static int br_handle_local_finish(struct sk_buff *skb) 114static int br_handle_local_finish(struct sk_buff *skb)
113{ 115{
114 struct net_bridge_port *p = rcu_dereference(skb->dev->br_port); 116 struct net_bridge_port *p = br_port_get_rcu(skb->dev);
115 117
116 if (p) 118 br_fdb_update(p->br, p, eth_hdr(skb)->h_source);
117 br_fdb_update(p->br, p, eth_hdr(skb)->h_source);
118 return 0; /* process further */ 119 return 0; /* process further */
119} 120}
120 121
@@ -131,15 +132,18 @@ static inline int is_link_local(const unsigned char *dest)
131} 132}
132 133
133/* 134/*
134 * Called via br_handle_frame_hook.
135 * Return NULL if skb is handled 135 * Return NULL if skb is handled
136 * note: already called with rcu_read_lock (preempt_disabled) 136 * note: already called with rcu_read_lock
137 */ 137 */
138struct sk_buff *br_handle_frame(struct net_bridge_port *p, struct sk_buff *skb) 138struct sk_buff *br_handle_frame(struct sk_buff *skb)
139{ 139{
140 struct net_bridge_port *p;
140 const unsigned char *dest = eth_hdr(skb)->h_dest; 141 const unsigned char *dest = eth_hdr(skb)->h_dest;
141 int (*rhook)(struct sk_buff *skb); 142 int (*rhook)(struct sk_buff *skb);
142 143
144 if (skb->pkt_type == PACKET_LOOPBACK)
145 return skb;
146
143 if (!is_valid_ether_addr(eth_hdr(skb)->h_source)) 147 if (!is_valid_ether_addr(eth_hdr(skb)->h_source))
144 goto drop; 148 goto drop;
145 149
@@ -147,6 +151,8 @@ struct sk_buff *br_handle_frame(struct net_bridge_port *p, struct sk_buff *skb)
147 if (!skb) 151 if (!skb)
148 return NULL; 152 return NULL;
149 153
154 p = br_port_get_rcu(skb->dev);
155
150 if (unlikely(is_link_local(dest))) { 156 if (unlikely(is_link_local(dest))) {
151 /* Pause frames shouldn't be passed up by driver anyway */ 157 /* Pause frames shouldn't be passed up by driver anyway */
152 if (skb->protocol == htons(ETH_P_PAUSE)) 158 if (skb->protocol == htons(ETH_P_PAUSE))
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 9d21d98ae5fa..eb5b256ffc88 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -99,6 +99,15 @@ static struct net_bridge_mdb_entry *__br_mdb_ip_get(
99 return NULL; 99 return NULL;
100} 100}
101 101
102static struct net_bridge_mdb_entry *br_mdb_ip_get(
103 struct net_bridge_mdb_htable *mdb, struct br_ip *dst)
104{
105 if (!mdb)
106 return NULL;
107
108 return __br_mdb_ip_get(mdb, dst, br_ip_hash(mdb, dst));
109}
110
102static struct net_bridge_mdb_entry *br_mdb_ip4_get( 111static struct net_bridge_mdb_entry *br_mdb_ip4_get(
103 struct net_bridge_mdb_htable *mdb, __be32 dst) 112 struct net_bridge_mdb_htable *mdb, __be32 dst)
104{ 113{
@@ -107,7 +116,7 @@ static struct net_bridge_mdb_entry *br_mdb_ip4_get(
107 br_dst.u.ip4 = dst; 116 br_dst.u.ip4 = dst;
108 br_dst.proto = htons(ETH_P_IP); 117 br_dst.proto = htons(ETH_P_IP);
109 118
110 return __br_mdb_ip_get(mdb, &br_dst, __br_ip4_hash(mdb, dst)); 119 return br_mdb_ip_get(mdb, &br_dst);
111} 120}
112 121
113#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 122#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
@@ -119,23 +128,17 @@ static struct net_bridge_mdb_entry *br_mdb_ip6_get(
119 ipv6_addr_copy(&br_dst.u.ip6, dst); 128 ipv6_addr_copy(&br_dst.u.ip6, dst);
120 br_dst.proto = htons(ETH_P_IPV6); 129 br_dst.proto = htons(ETH_P_IPV6);
121 130
122 return __br_mdb_ip_get(mdb, &br_dst, __br_ip6_hash(mdb, dst)); 131 return br_mdb_ip_get(mdb, &br_dst);
123} 132}
124#endif 133#endif
125 134
126static struct net_bridge_mdb_entry *br_mdb_ip_get(
127 struct net_bridge_mdb_htable *mdb, struct br_ip *dst)
128{
129 return __br_mdb_ip_get(mdb, dst, br_ip_hash(mdb, dst));
130}
131
132struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, 135struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
133 struct sk_buff *skb) 136 struct sk_buff *skb)
134{ 137{
135 struct net_bridge_mdb_htable *mdb = br->mdb; 138 struct net_bridge_mdb_htable *mdb = br->mdb;
136 struct br_ip ip; 139 struct br_ip ip;
137 140
138 if (!mdb || br->multicast_disabled) 141 if (br->multicast_disabled)
139 return NULL; 142 return NULL;
140 143
141 if (BR_INPUT_SKB_CB(skb)->igmp) 144 if (BR_INPUT_SKB_CB(skb)->igmp)
@@ -1432,7 +1435,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
1432 struct icmp6hdr *icmp6h; 1435 struct icmp6hdr *icmp6h;
1433 u8 nexthdr; 1436 u8 nexthdr;
1434 unsigned len; 1437 unsigned len;
1435 unsigned offset; 1438 int offset;
1436 int err; 1439 int err;
1437 1440
1438 if (!pskb_may_pull(skb, sizeof(*ip6h))) 1441 if (!pskb_may_pull(skb, sizeof(*ip6h)))
@@ -1725,13 +1728,9 @@ unlock:
1725int br_multicast_toggle(struct net_bridge *br, unsigned long val) 1728int br_multicast_toggle(struct net_bridge *br, unsigned long val)
1726{ 1729{
1727 struct net_bridge_port *port; 1730 struct net_bridge_port *port;
1728 int err = -ENOENT; 1731 int err = 0;
1729 1732
1730 spin_lock(&br->multicast_lock); 1733 spin_lock(&br->multicast_lock);
1731 if (!netif_running(br->dev))
1732 goto unlock;
1733
1734 err = 0;
1735 if (br->multicast_disabled == !val) 1734 if (br->multicast_disabled == !val)
1736 goto unlock; 1735 goto unlock;
1737 1736
@@ -1739,6 +1738,9 @@ int br_multicast_toggle(struct net_bridge *br, unsigned long val)
1739 if (br->multicast_disabled) 1738 if (br->multicast_disabled)
1740 goto unlock; 1739 goto unlock;
1741 1740
1741 if (!netif_running(br->dev))
1742 goto unlock;
1743
1742 if (br->mdb) { 1744 if (br->mdb) {
1743 if (br->mdb->old) { 1745 if (br->mdb->old) {
1744 err = -EEXIST; 1746 err = -EEXIST;
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index 44420992f72f..2c911c0759c2 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -55,6 +55,9 @@ static int brnf_call_arptables __read_mostly = 1;
55static int brnf_filter_vlan_tagged __read_mostly = 0; 55static int brnf_filter_vlan_tagged __read_mostly = 0;
56static int brnf_filter_pppoe_tagged __read_mostly = 0; 56static int brnf_filter_pppoe_tagged __read_mostly = 0;
57#else 57#else
58#define brnf_call_iptables 1
59#define brnf_call_ip6tables 1
60#define brnf_call_arptables 1
58#define brnf_filter_vlan_tagged 0 61#define brnf_filter_vlan_tagged 0
59#define brnf_filter_pppoe_tagged 0 62#define brnf_filter_pppoe_tagged 0
60#endif 63#endif
@@ -117,26 +120,27 @@ void br_netfilter_rtable_init(struct net_bridge *br)
117{ 120{
118 struct rtable *rt = &br->fake_rtable; 121 struct rtable *rt = &br->fake_rtable;
119 122
120 atomic_set(&rt->u.dst.__refcnt, 1); 123 atomic_set(&rt->dst.__refcnt, 1);
121 rt->u.dst.dev = br->dev; 124 rt->dst.dev = br->dev;
122 rt->u.dst.path = &rt->u.dst; 125 rt->dst.path = &rt->dst;
123 rt->u.dst.metrics[RTAX_MTU - 1] = 1500; 126 rt->dst.metrics[RTAX_MTU - 1] = 1500;
124 rt->u.dst.flags = DST_NOXFRM; 127 rt->dst.flags = DST_NOXFRM;
125 rt->u.dst.ops = &fake_dst_ops; 128 rt->dst.ops = &fake_dst_ops;
126} 129}
127 130
128static inline struct rtable *bridge_parent_rtable(const struct net_device *dev) 131static inline struct rtable *bridge_parent_rtable(const struct net_device *dev)
129{ 132{
130 struct net_bridge_port *port = rcu_dereference(dev->br_port); 133 if (!br_port_exists(dev))
131 134 return NULL;
132 return port ? &port->br->fake_rtable : NULL; 135 return &br_port_get_rcu(dev)->br->fake_rtable;
133} 136}
134 137
135static inline struct net_device *bridge_parent(const struct net_device *dev) 138static inline struct net_device *bridge_parent(const struct net_device *dev)
136{ 139{
137 struct net_bridge_port *port = rcu_dereference(dev->br_port); 140 if (!br_port_exists(dev))
141 return NULL;
138 142
139 return port ? port->br->dev : NULL; 143 return br_port_get_rcu(dev)->br->dev;
140} 144}
141 145
142static inline struct nf_bridge_info *nf_bridge_alloc(struct sk_buff *skb) 146static inline struct nf_bridge_info *nf_bridge_alloc(struct sk_buff *skb)
@@ -244,8 +248,7 @@ static int br_nf_pre_routing_finish_ipv6(struct sk_buff *skb)
244 kfree_skb(skb); 248 kfree_skb(skb);
245 return 0; 249 return 0;
246 } 250 }
247 dst_hold(&rt->u.dst); 251 skb_dst_set_noref(skb, &rt->dst);
248 skb_dst_set(skb, &rt->u.dst);
249 252
250 skb->dev = nf_bridge->physindev; 253 skb->dev = nf_bridge->physindev;
251 nf_bridge_update_protocol(skb); 254 nf_bridge_update_protocol(skb);
@@ -396,8 +399,7 @@ bridged_dnat:
396 kfree_skb(skb); 399 kfree_skb(skb);
397 return 0; 400 return 0;
398 } 401 }
399 dst_hold(&rt->u.dst); 402 skb_dst_set_noref(skb, &rt->dst);
400 skb_dst_set(skb, &rt->u.dst);
401 } 403 }
402 404
403 skb->dev = nf_bridge->physindev; 405 skb->dev = nf_bridge->physindev;
@@ -545,25 +547,30 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff *skb,
545 const struct net_device *out, 547 const struct net_device *out,
546 int (*okfn)(struct sk_buff *)) 548 int (*okfn)(struct sk_buff *))
547{ 549{
550 struct net_bridge_port *p;
551 struct net_bridge *br;
548 struct iphdr *iph; 552 struct iphdr *iph;
549 __u32 len = nf_bridge_encap_header_len(skb); 553 __u32 len = nf_bridge_encap_header_len(skb);
550 554
551 if (unlikely(!pskb_may_pull(skb, len))) 555 if (unlikely(!pskb_may_pull(skb, len)))
552 goto out; 556 goto out;
553 557
558 p = br_port_get_rcu(in);
559 if (p == NULL)
560 goto out;
561 br = p->br;
562
554 if (skb->protocol == htons(ETH_P_IPV6) || IS_VLAN_IPV6(skb) || 563 if (skb->protocol == htons(ETH_P_IPV6) || IS_VLAN_IPV6(skb) ||
555 IS_PPPOE_IPV6(skb)) { 564 IS_PPPOE_IPV6(skb)) {
556#ifdef CONFIG_SYSCTL 565 if (!brnf_call_ip6tables && !br->nf_call_ip6tables)
557 if (!brnf_call_ip6tables)
558 return NF_ACCEPT; 566 return NF_ACCEPT;
559#endif 567
560 nf_bridge_pull_encap_header_rcsum(skb); 568 nf_bridge_pull_encap_header_rcsum(skb);
561 return br_nf_pre_routing_ipv6(hook, skb, in, out, okfn); 569 return br_nf_pre_routing_ipv6(hook, skb, in, out, okfn);
562 } 570 }
563#ifdef CONFIG_SYSCTL 571
564 if (!brnf_call_iptables) 572 if (!brnf_call_iptables && !br->nf_call_iptables)
565 return NF_ACCEPT; 573 return NF_ACCEPT;
566#endif
567 574
568 if (skb->protocol != htons(ETH_P_IP) && !IS_VLAN_IP(skb) && 575 if (skb->protocol != htons(ETH_P_IP) && !IS_VLAN_IP(skb) &&
569 !IS_PPPOE_IP(skb)) 576 !IS_PPPOE_IP(skb))
@@ -591,6 +598,9 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff *skb,
591 598
592 pskb_trim_rcsum(skb, len); 599 pskb_trim_rcsum(skb, len);
593 600
601 /* BUG: Should really parse the IP options here. */
602 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
603
594 nf_bridge_put(skb->nf_bridge); 604 nf_bridge_put(skb->nf_bridge);
595 if (!nf_bridge_alloc(skb)) 605 if (!nf_bridge_alloc(skb))
596 return NF_DROP; 606 return NF_DROP;
@@ -716,12 +726,17 @@ static unsigned int br_nf_forward_arp(unsigned int hook, struct sk_buff *skb,
716 const struct net_device *out, 726 const struct net_device *out,
717 int (*okfn)(struct sk_buff *)) 727 int (*okfn)(struct sk_buff *))
718{ 728{
729 struct net_bridge_port *p;
730 struct net_bridge *br;
719 struct net_device **d = (struct net_device **)(skb->cb); 731 struct net_device **d = (struct net_device **)(skb->cb);
720 732
721#ifdef CONFIG_SYSCTL 733 p = br_port_get_rcu(out);
722 if (!brnf_call_arptables) 734 if (p == NULL)
735 return NF_ACCEPT;
736 br = p->br;
737
738 if (!brnf_call_arptables && !br->nf_call_arptables)
723 return NF_ACCEPT; 739 return NF_ACCEPT;
724#endif
725 740
726 if (skb->protocol != htons(ETH_P_ARP)) { 741 if (skb->protocol != htons(ETH_P_ARP)) {
727 if (!IS_VLAN_ARP(skb)) 742 if (!IS_VLAN_ARP(skb))
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index fe0a79018ab2..4a6a378c84e3 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -120,10 +120,11 @@ static int br_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
120 idx = 0; 120 idx = 0;
121 for_each_netdev(net, dev) { 121 for_each_netdev(net, dev) {
122 /* not a bridge port */ 122 /* not a bridge port */
123 if (dev->br_port == NULL || idx < cb->args[0]) 123 if (!br_port_exists(dev) || idx < cb->args[0])
124 goto skip; 124 goto skip;
125 125
126 if (br_fill_ifinfo(skb, dev->br_port, NETLINK_CB(cb->skb).pid, 126 if (br_fill_ifinfo(skb, br_port_get(dev),
127 NETLINK_CB(cb->skb).pid,
127 cb->nlh->nlmsg_seq, RTM_NEWLINK, 128 cb->nlh->nlmsg_seq, RTM_NEWLINK,
128 NLM_F_MULTI) < 0) 129 NLM_F_MULTI) < 0)
129 break; 130 break;
@@ -168,9 +169,9 @@ static int br_rtm_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
168 if (!dev) 169 if (!dev)
169 return -ENODEV; 170 return -ENODEV;
170 171
171 p = dev->br_port; 172 if (!br_port_exists(dev))
172 if (!p)
173 return -EINVAL; 173 return -EINVAL;
174 p = br_port_get(dev);
174 175
175 /* if kernel STP is running, don't allow changes */ 176 /* if kernel STP is running, don't allow changes */
176 if (p->br->stp_enabled == BR_KERNEL_STP) 177 if (p->br->stp_enabled == BR_KERNEL_STP)
diff --git a/net/bridge/br_notify.c b/net/bridge/br_notify.c
index 717e1fd6133c..404d4e14c6a7 100644
--- a/net/bridge/br_notify.c
+++ b/net/bridge/br_notify.c
@@ -32,14 +32,15 @@ struct notifier_block br_device_notifier = {
32static int br_device_event(struct notifier_block *unused, unsigned long event, void *ptr) 32static int br_device_event(struct notifier_block *unused, unsigned long event, void *ptr)
33{ 33{
34 struct net_device *dev = ptr; 34 struct net_device *dev = ptr;
35 struct net_bridge_port *p = dev->br_port; 35 struct net_bridge_port *p = br_port_get(dev);
36 struct net_bridge *br; 36 struct net_bridge *br;
37 int err; 37 int err;
38 38
39 /* not a port of a bridge */ 39 /* not a port of a bridge */
40 if (p == NULL) 40 if (!br_port_exists(dev))
41 return NOTIFY_DONE; 41 return NOTIFY_DONE;
42 42
43 p = br_port_get(dev);
43 br = p->br; 44 br = p->br;
44 45
45 switch (event) { 46 switch (event) {
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 0f4a74bc6a9b..75c90edaf7db 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -15,6 +15,8 @@
15 15
16#include <linux/netdevice.h> 16#include <linux/netdevice.h>
17#include <linux/if_bridge.h> 17#include <linux/if_bridge.h>
18#include <linux/netpoll.h>
19#include <linux/u64_stats_sync.h>
18#include <net/route.h> 20#include <net/route.h>
19 21
20#define BR_HASH_BITS 8 22#define BR_HASH_BITS 8
@@ -143,13 +145,23 @@ struct net_bridge_port
143#ifdef CONFIG_SYSFS 145#ifdef CONFIG_SYSFS
144 char sysfs_name[IFNAMSIZ]; 146 char sysfs_name[IFNAMSIZ];
145#endif 147#endif
148
149#ifdef CONFIG_NET_POLL_CONTROLLER
150 struct netpoll *np;
151#endif
146}; 152};
147 153
154#define br_port_get_rcu(dev) \
155 ((struct net_bridge_port *) rcu_dereference(dev->rx_handler_data))
156#define br_port_get(dev) ((struct net_bridge_port *) dev->rx_handler_data)
157#define br_port_exists(dev) (dev->priv_flags & IFF_BRIDGE_PORT)
158
148struct br_cpu_netstats { 159struct br_cpu_netstats {
149 unsigned long rx_packets; 160 u64 rx_packets;
150 unsigned long rx_bytes; 161 u64 rx_bytes;
151 unsigned long tx_packets; 162 u64 tx_packets;
152 unsigned long tx_bytes; 163 u64 tx_bytes;
164 struct u64_stats_sync syncp;
153}; 165};
154 166
155struct net_bridge 167struct net_bridge
@@ -164,6 +176,9 @@ struct net_bridge
164 unsigned long feature_mask; 176 unsigned long feature_mask;
165#ifdef CONFIG_BRIDGE_NETFILTER 177#ifdef CONFIG_BRIDGE_NETFILTER
166 struct rtable fake_rtable; 178 struct rtable fake_rtable;
179 bool nf_call_iptables;
180 bool nf_call_ip6tables;
181 bool nf_call_arptables;
167#endif 182#endif
168 unsigned long flags; 183 unsigned long flags;
169#define BR_SET_MAC_ADDR 0x00000001 184#define BR_SET_MAC_ADDR 0x00000001
@@ -273,16 +288,41 @@ extern void br_dev_setup(struct net_device *dev);
273extern netdev_tx_t br_dev_xmit(struct sk_buff *skb, 288extern netdev_tx_t br_dev_xmit(struct sk_buff *skb,
274 struct net_device *dev); 289 struct net_device *dev);
275#ifdef CONFIG_NET_POLL_CONTROLLER 290#ifdef CONFIG_NET_POLL_CONTROLLER
276extern void br_netpoll_cleanup(struct net_device *dev); 291static inline struct netpoll_info *br_netpoll_info(struct net_bridge *br)
277extern void br_netpoll_enable(struct net_bridge *br, 292{
278 struct net_device *dev); 293 return br->dev->npinfo;
279extern void br_netpoll_disable(struct net_bridge *br, 294}
280 struct net_device *dev); 295
296static inline void br_netpoll_send_skb(const struct net_bridge_port *p,
297 struct sk_buff *skb)
298{
299 struct netpoll *np = p->np;
300
301 if (np)
302 netpoll_send_skb(np, skb);
303}
304
305extern int br_netpoll_enable(struct net_bridge_port *p);
306extern void br_netpoll_disable(struct net_bridge_port *p);
281#else 307#else
282#define br_netpoll_cleanup(br) 308static inline struct netpoll_info *br_netpoll_info(struct net_bridge *br)
283#define br_netpoll_enable(br, dev) 309{
284#define br_netpoll_disable(br, dev) 310 return NULL;
311}
312
313static inline void br_netpoll_send_skb(const struct net_bridge_port *p,
314 struct sk_buff *skb)
315{
316}
285 317
318static inline int br_netpoll_enable(struct net_bridge_port *p)
319{
320 return 0;
321}
322
323static inline void br_netpoll_disable(struct net_bridge_port *p)
324{
325}
286#endif 326#endif
287 327
288/* br_fdb.c */ 328/* br_fdb.c */
@@ -331,8 +371,7 @@ extern void br_features_recompute(struct net_bridge *br);
331 371
332/* br_input.c */ 372/* br_input.c */
333extern int br_handle_frame_finish(struct sk_buff *skb); 373extern int br_handle_frame_finish(struct sk_buff *skb);
334extern struct sk_buff *br_handle_frame(struct net_bridge_port *p, 374extern struct sk_buff *br_handle_frame(struct sk_buff *skb);
335 struct sk_buff *skb);
336 375
337/* br_ioctl.c */ 376/* br_ioctl.c */
338extern int br_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 377extern int br_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c
index 217bd225a42f..35cf27087b56 100644
--- a/net/bridge/br_stp_bpdu.c
+++ b/net/bridge/br_stp_bpdu.c
@@ -131,18 +131,19 @@ void br_send_tcn_bpdu(struct net_bridge_port *p)
131/* 131/*
132 * Called from llc. 132 * Called from llc.
133 * 133 *
134 * NO locks, but rcu_read_lock (preempt_disabled) 134 * NO locks, but rcu_read_lock
135 */ 135 */
136void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb, 136void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb,
137 struct net_device *dev) 137 struct net_device *dev)
138{ 138{
139 const unsigned char *dest = eth_hdr(skb)->h_dest; 139 const unsigned char *dest = eth_hdr(skb)->h_dest;
140 struct net_bridge_port *p = rcu_dereference(dev->br_port); 140 struct net_bridge_port *p;
141 struct net_bridge *br; 141 struct net_bridge *br;
142 const unsigned char *buf; 142 const unsigned char *buf;
143 143
144 if (!p) 144 if (!br_port_exists(dev))
145 goto err; 145 goto err;
146 p = br_port_get_rcu(dev);
146 147
147 if (!pskb_may_pull(skb, 4)) 148 if (!pskb_may_pull(skb, 4))
148 goto err; 149 goto err;
diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c
index 486b8f3861d2..5c1e5559ebba 100644
--- a/net/bridge/br_sysfs_br.c
+++ b/net/bridge/br_sysfs_br.c
@@ -611,6 +611,73 @@ static DEVICE_ATTR(multicast_startup_query_interval, S_IRUGO | S_IWUSR,
611 show_multicast_startup_query_interval, 611 show_multicast_startup_query_interval,
612 store_multicast_startup_query_interval); 612 store_multicast_startup_query_interval);
613#endif 613#endif
614#ifdef CONFIG_BRIDGE_NETFILTER
615static ssize_t show_nf_call_iptables(
616 struct device *d, struct device_attribute *attr, char *buf)
617{
618 struct net_bridge *br = to_bridge(d);
619 return sprintf(buf, "%u\n", br->nf_call_iptables);
620}
621
622static int set_nf_call_iptables(struct net_bridge *br, unsigned long val)
623{
624 br->nf_call_iptables = val ? true : false;
625 return 0;
626}
627
628static ssize_t store_nf_call_iptables(
629 struct device *d, struct device_attribute *attr, const char *buf,
630 size_t len)
631{
632 return store_bridge_parm(d, buf, len, set_nf_call_iptables);
633}
634static DEVICE_ATTR(nf_call_iptables, S_IRUGO | S_IWUSR,
635 show_nf_call_iptables, store_nf_call_iptables);
636
637static ssize_t show_nf_call_ip6tables(
638 struct device *d, struct device_attribute *attr, char *buf)
639{
640 struct net_bridge *br = to_bridge(d);
641 return sprintf(buf, "%u\n", br->nf_call_ip6tables);
642}
643
644static int set_nf_call_ip6tables(struct net_bridge *br, unsigned long val)
645{
646 br->nf_call_ip6tables = val ? true : false;
647 return 0;
648}
649
650static ssize_t store_nf_call_ip6tables(
651 struct device *d, struct device_attribute *attr, const char *buf,
652 size_t len)
653{
654 return store_bridge_parm(d, buf, len, set_nf_call_ip6tables);
655}
656static DEVICE_ATTR(nf_call_ip6tables, S_IRUGO | S_IWUSR,
657 show_nf_call_ip6tables, store_nf_call_ip6tables);
658
659static ssize_t show_nf_call_arptables(
660 struct device *d, struct device_attribute *attr, char *buf)
661{
662 struct net_bridge *br = to_bridge(d);
663 return sprintf(buf, "%u\n", br->nf_call_arptables);
664}
665
666static int set_nf_call_arptables(struct net_bridge *br, unsigned long val)
667{
668 br->nf_call_arptables = val ? true : false;
669 return 0;
670}
671
672static ssize_t store_nf_call_arptables(
673 struct device *d, struct device_attribute *attr, const char *buf,
674 size_t len)
675{
676 return store_bridge_parm(d, buf, len, set_nf_call_arptables);
677}
678static DEVICE_ATTR(nf_call_arptables, S_IRUGO | S_IWUSR,
679 show_nf_call_arptables, store_nf_call_arptables);
680#endif
614 681
615static struct attribute *bridge_attrs[] = { 682static struct attribute *bridge_attrs[] = {
616 &dev_attr_forward_delay.attr, 683 &dev_attr_forward_delay.attr,
@@ -645,6 +712,11 @@ static struct attribute *bridge_attrs[] = {
645 &dev_attr_multicast_query_response_interval.attr, 712 &dev_attr_multicast_query_response_interval.attr,
646 &dev_attr_multicast_startup_query_interval.attr, 713 &dev_attr_multicast_startup_query_interval.attr,
647#endif 714#endif
715#ifdef CONFIG_BRIDGE_NETFILTER
716 &dev_attr_nf_call_iptables.attr,
717 &dev_attr_nf_call_ip6tables.attr,
718 &dev_attr_nf_call_arptables.attr,
719#endif
648 NULL 720 NULL
649}; 721};
650 722
diff --git a/net/bridge/netfilter/ebt_redirect.c b/net/bridge/netfilter/ebt_redirect.c
index 9e19166ba453..46624bb6d9be 100644
--- a/net/bridge/netfilter/ebt_redirect.c
+++ b/net/bridge/netfilter/ebt_redirect.c
@@ -24,8 +24,9 @@ ebt_redirect_tg(struct sk_buff *skb, const struct xt_action_param *par)
24 return EBT_DROP; 24 return EBT_DROP;
25 25
26 if (par->hooknum != NF_BR_BROUTING) 26 if (par->hooknum != NF_BR_BROUTING)
27 /* rcu_read_lock()ed by nf_hook_slow */
27 memcpy(eth_hdr(skb)->h_dest, 28 memcpy(eth_hdr(skb)->h_dest,
28 par->in->br_port->br->dev->dev_addr, ETH_ALEN); 29 br_port_get_rcu(par->in)->br->dev->dev_addr, ETH_ALEN);
29 else 30 else
30 memcpy(eth_hdr(skb)->h_dest, par->in->dev_addr, ETH_ALEN); 31 memcpy(eth_hdr(skb)->h_dest, par->in->dev_addr, ETH_ALEN);
31 skb->pkt_type = PACKET_HOST; 32 skb->pkt_type = PACKET_HOST;
diff --git a/net/bridge/netfilter/ebt_ulog.c b/net/bridge/netfilter/ebt_ulog.c
index ae3c7cef1484..26377e96fa1c 100644
--- a/net/bridge/netfilter/ebt_ulog.c
+++ b/net/bridge/netfilter/ebt_ulog.c
@@ -177,8 +177,9 @@ static void ebt_ulog_packet(unsigned int hooknr, const struct sk_buff *skb,
177 if (in) { 177 if (in) {
178 strcpy(pm->physindev, in->name); 178 strcpy(pm->physindev, in->name);
179 /* If in isn't a bridge, then physindev==indev */ 179 /* If in isn't a bridge, then physindev==indev */
180 if (in->br_port) 180 if (br_port_exists(in))
181 strcpy(pm->indev, in->br_port->br->dev->name); 181 /* rcu_read_lock()ed by nf_hook_slow */
182 strcpy(pm->indev, br_port_get_rcu(in)->br->dev->name);
182 else 183 else
183 strcpy(pm->indev, in->name); 184 strcpy(pm->indev, in->name);
184 } else 185 } else
@@ -187,7 +188,8 @@ static void ebt_ulog_packet(unsigned int hooknr, const struct sk_buff *skb,
187 if (out) { 188 if (out) {
188 /* If out exists, then out is a bridge port */ 189 /* If out exists, then out is a bridge port */
189 strcpy(pm->physoutdev, out->name); 190 strcpy(pm->physoutdev, out->name);
190 strcpy(pm->outdev, out->br_port->br->dev->name); 191 /* rcu_read_lock()ed by nf_hook_slow */
192 strcpy(pm->outdev, br_port_get_rcu(out)->br->dev->name);
191 } else 193 } else
192 pm->outdev[0] = pm->physoutdev[0] = '\0'; 194 pm->outdev[0] = pm->physoutdev[0] = '\0';
193 195
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 59ca00e40dec..bcc102e3be4d 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -140,11 +140,14 @@ ebt_basic_match(const struct ebt_entry *e, const struct ethhdr *h,
140 return 1; 140 return 1;
141 if (FWINV2(ebt_dev_check(e->out, out), EBT_IOUT)) 141 if (FWINV2(ebt_dev_check(e->out, out), EBT_IOUT))
142 return 1; 142 return 1;
143 if ((!in || !in->br_port) ? 0 : FWINV2(ebt_dev_check( 143 /* rcu_read_lock()ed by nf_hook_slow */
144 e->logical_in, in->br_port->br->dev), EBT_ILOGICALIN)) 144 if (in && br_port_exists(in) &&
145 FWINV2(ebt_dev_check(e->logical_in, br_port_get_rcu(in)->br->dev),
146 EBT_ILOGICALIN))
145 return 1; 147 return 1;
146 if ((!out || !out->br_port) ? 0 : FWINV2(ebt_dev_check( 148 if (out && br_port_exists(out) &&
147 e->logical_out, out->br_port->br->dev), EBT_ILOGICALOUT)) 149 FWINV2(ebt_dev_check(e->logical_out, br_port_get_rcu(out)->br->dev),
150 EBT_ILOGICALOUT))
148 return 1; 151 return 1;
149 152
150 if (e->bitmask & EBT_SOURCEMAC) { 153 if (e->bitmask & EBT_SOURCEMAC) {
diff --git a/net/caif/Kconfig b/net/caif/Kconfig
index ed651786f16b..529750da9624 100644
--- a/net/caif/Kconfig
+++ b/net/caif/Kconfig
@@ -21,19 +21,18 @@ menuconfig CAIF
21 See Documentation/networking/caif for a further explanation on how to 21 See Documentation/networking/caif for a further explanation on how to
22 use and configure CAIF. 22 use and configure CAIF.
23 23
24if CAIF
25
26config CAIF_DEBUG 24config CAIF_DEBUG
27 bool "Enable Debug" 25 bool "Enable Debug"
26 depends on CAIF
28 default n 27 default n
29 --- help --- 28 --- help ---
30 Enable the inclusion of debug code in the CAIF stack. 29 Enable the inclusion of debug code in the CAIF stack.
31 Be aware that doing this will impact performance. 30 Be aware that doing this will impact performance.
32 If unsure say N. 31 If unsure say N.
33 32
34
35config CAIF_NETDEV 33config CAIF_NETDEV
36 tristate "CAIF GPRS Network device" 34 tristate "CAIF GPRS Network device"
35 depends on CAIF
37 default CAIF 36 default CAIF
38 ---help--- 37 ---help---
39 Say Y if you will be using a CAIF based GPRS network device. 38 Say Y if you will be using a CAIF based GPRS network device.
@@ -41,5 +40,3 @@ config CAIF_NETDEV
41 If you select to build it as a built-in then the main CAIF device must 40 If you select to build it as a built-in then the main CAIF device must
42 also be a built-in. 41 also be a built-in.
43 If unsure say Y. 42 If unsure say Y.
44
45endif
diff --git a/net/caif/Makefile b/net/caif/Makefile
index 34852af2595e..f87481fb0e65 100644
--- a/net/caif/Makefile
+++ b/net/caif/Makefile
@@ -1,23 +1,13 @@
1ifeq ($(CONFIG_CAIF_DEBUG),1) 1ifeq ($(CONFIG_CAIF_DEBUG),y)
2CAIF_DBG_FLAGS := -DDEBUG 2EXTRA_CFLAGS += -DDEBUG
3endif 3endif
4 4
5ccflags-y := $(CAIF_FLAGS) $(CAIF_DBG_FLAGS)
6
7caif-objs := caif_dev.o \ 5caif-objs := caif_dev.o \
8 cfcnfg.o cfmuxl.o cfctrl.o \ 6 cfcnfg.o cfmuxl.o cfctrl.o \
9 cffrml.o cfveil.o cfdbgl.o\ 7 cffrml.o cfveil.o cfdbgl.o\
10 cfserl.o cfdgml.o \ 8 cfserl.o cfdgml.o \
11 cfrfml.o cfvidl.o cfutill.o \ 9 cfrfml.o cfvidl.o cfutill.o \
12 cfsrvl.o cfpkt_skbuff.o caif_config_util.o 10 cfsrvl.o cfpkt_skbuff.o caif_config_util.o
13clean-dirs:= .tmp_versions
14
15clean-files:= \
16 Module.symvers \
17 modules.order \
18 *.cmd \
19 *.o \
20 *~
21 11
22obj-$(CONFIG_CAIF) += caif.o 12obj-$(CONFIG_CAIF) += caif.o
23obj-$(CONFIG_CAIF_NETDEV) += chnl_net.o 13obj-$(CONFIG_CAIF_NETDEV) += chnl_net.o
diff --git a/net/caif/caif_config_util.c b/net/caif/caif_config_util.c
index 6f36580366f0..76ae68303d3a 100644
--- a/net/caif/caif_config_util.c
+++ b/net/caif/caif_config_util.c
@@ -80,6 +80,11 @@ int connect_req_to_link_param(struct cfcnfg *cnfg,
80 l->u.utility.paramlen); 80 l->u.utility.paramlen);
81 81
82 break; 82 break;
83 case CAIFPROTO_DEBUG:
84 l->linktype = CFCTRL_SRV_DBG;
85 l->endpoint = s->sockaddr.u.dbg.service;
86 l->chtype = s->sockaddr.u.dbg.type;
87 break;
83 default: 88 default:
84 return -EINVAL; 89 return -EINVAL;
85 } 90 }
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
index e2b86f1f5a47..0b586e9d1378 100644
--- a/net/caif/caif_dev.c
+++ b/net/caif/caif_dev.c
@@ -255,7 +255,7 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
255 pref = CFPHYPREF_HIGH_BW; 255 pref = CFPHYPREF_HIGH_BW;
256 break; 256 break;
257 } 257 }
258 258 dev_hold(dev);
259 cfcnfg_add_phy_layer(get_caif_conf(), 259 cfcnfg_add_phy_layer(get_caif_conf(),
260 phy_type, 260 phy_type,
261 dev, 261 dev,
@@ -285,6 +285,7 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
285 caifd->layer.up->ctrlcmd(caifd->layer.up, 285 caifd->layer.up->ctrlcmd(caifd->layer.up,
286 _CAIF_CTRLCMD_PHYIF_DOWN_IND, 286 _CAIF_CTRLCMD_PHYIF_DOWN_IND,
287 caifd->layer.id); 287 caifd->layer.id);
288 might_sleep();
288 res = wait_event_interruptible_timeout(caifd->event, 289 res = wait_event_interruptible_timeout(caifd->event,
289 atomic_read(&caifd->in_use) == 0, 290 atomic_read(&caifd->in_use) == 0,
290 TIMEOUT); 291 TIMEOUT);
@@ -300,6 +301,7 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
300 "Unregistering an active CAIF device: %s\n", 301 "Unregistering an active CAIF device: %s\n",
301 __func__, dev->name); 302 __func__, dev->name);
302 cfcnfg_del_phy_layer(get_caif_conf(), &caifd->layer); 303 cfcnfg_del_phy_layer(get_caif_conf(), &caifd->layer);
304 dev_put(dev);
303 atomic_set(&caifd->state, what); 305 atomic_set(&caifd->state, what);
304 break; 306 break;
305 307
@@ -326,7 +328,8 @@ struct cfcnfg *get_caif_conf(void)
326EXPORT_SYMBOL(get_caif_conf); 328EXPORT_SYMBOL(get_caif_conf);
327 329
328int caif_connect_client(struct caif_connect_request *conn_req, 330int caif_connect_client(struct caif_connect_request *conn_req,
329 struct cflayer *client_layer) 331 struct cflayer *client_layer, int *ifindex,
332 int *headroom, int *tailroom)
330{ 333{
331 struct cfctrl_link_param param; 334 struct cfctrl_link_param param;
332 int ret; 335 int ret;
@@ -334,8 +337,9 @@ int caif_connect_client(struct caif_connect_request *conn_req,
334 if (ret) 337 if (ret)
335 return ret; 338 return ret;
336 /* Hook up the adaptation layer. */ 339 /* Hook up the adaptation layer. */
337 return cfcnfg_add_adaptation_layer(get_caif_conf(), 340 return cfcnfg_add_adaptation_layer(get_caif_conf(), &param,
338 &param, client_layer); 341 client_layer, ifindex,
342 headroom, tailroom);
339} 343}
340EXPORT_SYMBOL(caif_connect_client); 344EXPORT_SYMBOL(caif_connect_client);
341 345
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index 3d0e09584fae..8ce904786116 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -28,8 +28,8 @@
28MODULE_LICENSE("GPL"); 28MODULE_LICENSE("GPL");
29MODULE_ALIAS_NETPROTO(AF_CAIF); 29MODULE_ALIAS_NETPROTO(AF_CAIF);
30 30
31#define CAIF_DEF_SNDBUF (CAIF_MAX_PAYLOAD_SIZE*10) 31#define CAIF_DEF_SNDBUF (4096*10)
32#define CAIF_DEF_RCVBUF (CAIF_MAX_PAYLOAD_SIZE*100) 32#define CAIF_DEF_RCVBUF (4096*100)
33 33
34/* 34/*
35 * CAIF state is re-using the TCP socket states. 35 * CAIF state is re-using the TCP socket states.
@@ -76,6 +76,7 @@ struct caifsock {
76 struct caif_connect_request conn_req; 76 struct caif_connect_request conn_req;
77 struct mutex readlock; 77 struct mutex readlock;
78 struct dentry *debugfs_socket_dir; 78 struct dentry *debugfs_socket_dir;
79 int headroom, tailroom, maxframe;
79}; 80};
80 81
81static int rx_flow_is_on(struct caifsock *cf_sk) 82static int rx_flow_is_on(struct caifsock *cf_sk)
@@ -594,27 +595,32 @@ static int caif_seqpkt_sendmsg(struct kiocb *kiocb, struct socket *sock,
594 goto err; 595 goto err;
595 noblock = msg->msg_flags & MSG_DONTWAIT; 596 noblock = msg->msg_flags & MSG_DONTWAIT;
596 597
597 buffer_size = len + CAIF_NEEDED_HEADROOM + CAIF_NEEDED_TAILROOM;
598
599 ret = -EMSGSIZE;
600 if (buffer_size > CAIF_MAX_PAYLOAD_SIZE)
601 goto err;
602
603 timeo = sock_sndtimeo(sk, noblock); 598 timeo = sock_sndtimeo(sk, noblock);
604 timeo = caif_wait_for_flow_on(container_of(sk, struct caifsock, sk), 599 timeo = caif_wait_for_flow_on(container_of(sk, struct caifsock, sk),
605 1, timeo, &ret); 600 1, timeo, &ret);
606 601
602 if (ret)
603 goto err;
607 ret = -EPIPE; 604 ret = -EPIPE;
608 if (cf_sk->sk.sk_state != CAIF_CONNECTED || 605 if (cf_sk->sk.sk_state != CAIF_CONNECTED ||
609 sock_flag(sk, SOCK_DEAD) || 606 sock_flag(sk, SOCK_DEAD) ||
610 (sk->sk_shutdown & RCV_SHUTDOWN)) 607 (sk->sk_shutdown & RCV_SHUTDOWN))
611 goto err; 608 goto err;
612 609
610 /* Error if trying to write more than maximum frame size. */
611 ret = -EMSGSIZE;
612 if (len > cf_sk->maxframe && cf_sk->sk.sk_protocol != CAIFPROTO_RFM)
613 goto err;
614
615 buffer_size = len + cf_sk->headroom + cf_sk->tailroom;
616
613 ret = -ENOMEM; 617 ret = -ENOMEM;
614 skb = sock_alloc_send_skb(sk, buffer_size, noblock, &ret); 618 skb = sock_alloc_send_skb(sk, buffer_size, noblock, &ret);
615 if (!skb) 619
620 if (!skb || skb_tailroom(skb) < buffer_size)
616 goto err; 621 goto err;
617 skb_reserve(skb, CAIF_NEEDED_HEADROOM); 622
623 skb_reserve(skb, cf_sk->headroom);
618 624
619 ret = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len); 625 ret = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
620 626
@@ -645,7 +651,6 @@ static int caif_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
645 long timeo; 651 long timeo;
646 652
647 err = -EOPNOTSUPP; 653 err = -EOPNOTSUPP;
648
649 if (unlikely(msg->msg_flags&MSG_OOB)) 654 if (unlikely(msg->msg_flags&MSG_OOB))
650 goto out_err; 655 goto out_err;
651 656
@@ -662,8 +667,8 @@ static int caif_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
662 667
663 size = len-sent; 668 size = len-sent;
664 669
665 if (size > CAIF_MAX_PAYLOAD_SIZE) 670 if (size > cf_sk->maxframe)
666 size = CAIF_MAX_PAYLOAD_SIZE; 671 size = cf_sk->maxframe;
667 672
668 /* If size is more than half of sndbuf, chop up message */ 673 /* If size is more than half of sndbuf, chop up message */
669 if (size > ((sk->sk_sndbuf >> 1) - 64)) 674 if (size > ((sk->sk_sndbuf >> 1) - 64))
@@ -673,14 +678,14 @@ static int caif_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
673 size = SKB_MAX_ALLOC; 678 size = SKB_MAX_ALLOC;
674 679
675 skb = sock_alloc_send_skb(sk, 680 skb = sock_alloc_send_skb(sk,
676 size + CAIF_NEEDED_HEADROOM 681 size + cf_sk->headroom +
677 + CAIF_NEEDED_TAILROOM, 682 cf_sk->tailroom,
678 msg->msg_flags&MSG_DONTWAIT, 683 msg->msg_flags&MSG_DONTWAIT,
679 &err); 684 &err);
680 if (skb == NULL) 685 if (skb == NULL)
681 goto out_err; 686 goto out_err;
682 687
683 skb_reserve(skb, CAIF_NEEDED_HEADROOM); 688 skb_reserve(skb, cf_sk->headroom);
684 /* 689 /*
685 * If you pass two values to the sock_alloc_send_skb 690 * If you pass two values to the sock_alloc_send_skb
686 * it tries to grab the large buffer with GFP_NOFS 691 * it tries to grab the large buffer with GFP_NOFS
@@ -821,17 +826,15 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
821 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); 826 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
822 long timeo; 827 long timeo;
823 int err; 828 int err;
829 int ifindex, headroom, tailroom;
830 struct net_device *dev;
831
824 lock_sock(sk); 832 lock_sock(sk);
825 833
826 err = -EAFNOSUPPORT; 834 err = -EAFNOSUPPORT;
827 if (uaddr->sa_family != AF_CAIF) 835 if (uaddr->sa_family != AF_CAIF)
828 goto out; 836 goto out;
829 837
830 err = -ESOCKTNOSUPPORT;
831 if (unlikely(!(sk->sk_type == SOCK_STREAM &&
832 cf_sk->sk.sk_protocol == CAIFPROTO_AT) &&
833 sk->sk_type != SOCK_SEQPACKET))
834 goto out;
835 switch (sock->state) { 838 switch (sock->state) {
836 case SS_UNCONNECTED: 839 case SS_UNCONNECTED:
837 /* Normal case, a fresh connect */ 840 /* Normal case, a fresh connect */
@@ -874,8 +877,7 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
874 sk_stream_kill_queues(&cf_sk->sk); 877 sk_stream_kill_queues(&cf_sk->sk);
875 878
876 err = -EINVAL; 879 err = -EINVAL;
877 if (addr_len != sizeof(struct sockaddr_caif) || 880 if (addr_len != sizeof(struct sockaddr_caif))
878 !uaddr)
879 goto out; 881 goto out;
880 882
881 memcpy(&cf_sk->conn_req.sockaddr, uaddr, 883 memcpy(&cf_sk->conn_req.sockaddr, uaddr,
@@ -888,12 +890,23 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
888 dbfs_atomic_inc(&cnt.num_connect_req); 890 dbfs_atomic_inc(&cnt.num_connect_req);
889 cf_sk->layer.receive = caif_sktrecv_cb; 891 cf_sk->layer.receive = caif_sktrecv_cb;
890 err = caif_connect_client(&cf_sk->conn_req, 892 err = caif_connect_client(&cf_sk->conn_req,
891 &cf_sk->layer); 893 &cf_sk->layer, &ifindex, &headroom, &tailroom);
892 if (err < 0) { 894 if (err < 0) {
893 cf_sk->sk.sk_socket->state = SS_UNCONNECTED; 895 cf_sk->sk.sk_socket->state = SS_UNCONNECTED;
894 cf_sk->sk.sk_state = CAIF_DISCONNECTED; 896 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
895 goto out; 897 goto out;
896 } 898 }
899 dev = dev_get_by_index(sock_net(sk), ifindex);
900 cf_sk->headroom = LL_RESERVED_SPACE_EXTRA(dev, headroom);
901 cf_sk->tailroom = tailroom;
902 cf_sk->maxframe = dev->mtu - (headroom + tailroom);
903 dev_put(dev);
904 if (cf_sk->maxframe < 1) {
905 pr_warning("CAIF: %s(): CAIF Interface MTU too small (%d)\n",
906 __func__, dev->mtu);
907 err = -ENODEV;
908 goto out;
909 }
897 910
898 err = -EINPROGRESS; 911 err = -EINPROGRESS;
899wait_connect: 912wait_connect:
diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c
index df43f264d9fb..1c29189b344d 100644
--- a/net/caif/cfcnfg.c
+++ b/net/caif/cfcnfg.c
@@ -6,6 +6,7 @@
6#include <linux/kernel.h> 6#include <linux/kernel.h>
7#include <linux/stddef.h> 7#include <linux/stddef.h>
8#include <linux/slab.h> 8#include <linux/slab.h>
9#include <linux/netdevice.h>
9#include <net/caif/caif_layer.h> 10#include <net/caif/caif_layer.h>
10#include <net/caif/cfpkt.h> 11#include <net/caif/cfpkt.h>
11#include <net/caif/cfcnfg.h> 12#include <net/caif/cfcnfg.h>
@@ -22,6 +23,7 @@
22#define PHY_NAME_LEN 20 23#define PHY_NAME_LEN 20
23 24
24#define container_obj(layr) container_of(layr, struct cfcnfg, layer) 25#define container_obj(layr) container_of(layr, struct cfcnfg, layer)
26#define RFM_FRAGMENT_SIZE 4030
25 27
26/* Information about CAIF physical interfaces held by Config Module in order 28/* Information about CAIF physical interfaces held by Config Module in order
27 * to manage physical interfaces 29 * to manage physical interfaces
@@ -41,6 +43,15 @@ struct cfcnfg_phyinfo {
41 43
42 /* Information about the physical device */ 44 /* Information about the physical device */
43 struct dev_info dev_info; 45 struct dev_info dev_info;
46
47 /* Interface index */
48 int ifindex;
49
50 /* Use Start of frame extension */
51 bool use_stx;
52
53 /* Use Start of frame checksum */
54 bool use_fcs;
44}; 55};
45 56
46struct cfcnfg { 57struct cfcnfg {
@@ -248,9 +259,20 @@ static void cfcnfg_linkdestroy_rsp(struct cflayer *layer, u8 channel_id)
248{ 259{
249} 260}
250 261
262int protohead[CFCTRL_SRV_MASK] = {
263 [CFCTRL_SRV_VEI] = 4,
264 [CFCTRL_SRV_DATAGRAM] = 7,
265 [CFCTRL_SRV_UTIL] = 4,
266 [CFCTRL_SRV_RFM] = 3,
267 [CFCTRL_SRV_DBG] = 3,
268};
269
251int cfcnfg_add_adaptation_layer(struct cfcnfg *cnfg, 270int cfcnfg_add_adaptation_layer(struct cfcnfg *cnfg,
252 struct cfctrl_link_param *param, 271 struct cfctrl_link_param *param,
253 struct cflayer *adap_layer) 272 struct cflayer *adap_layer,
273 int *ifindex,
274 int *proto_head,
275 int *proto_tail)
254{ 276{
255 struct cflayer *frml; 277 struct cflayer *frml;
256 if (adap_layer == NULL) { 278 if (adap_layer == NULL) {
@@ -276,6 +298,14 @@ int cfcnfg_add_adaptation_layer(struct cfcnfg *cnfg,
276 param->phyid); 298 param->phyid);
277 caif_assert(cnfg->phy_layers[param->phyid].phy_layer->id == 299 caif_assert(cnfg->phy_layers[param->phyid].phy_layer->id ==
278 param->phyid); 300 param->phyid);
301
302 *ifindex = cnfg->phy_layers[param->phyid].ifindex;
303 *proto_head =
304 protohead[param->linktype]+
305 (cnfg->phy_layers[param->phyid].use_stx ? 1 : 0);
306
307 *proto_tail = 2;
308
279 /* FIXME: ENUMERATE INITIALLY WHEN ACTIVATING PHYSICAL INTERFACE */ 309 /* FIXME: ENUMERATE INITIALLY WHEN ACTIVATING PHYSICAL INTERFACE */
280 cfctrl_enum_req(cnfg->ctrl, param->phyid); 310 cfctrl_enum_req(cnfg->ctrl, param->phyid);
281 return cfctrl_linkup_request(cnfg->ctrl, param, adap_layer); 311 return cfctrl_linkup_request(cnfg->ctrl, param, adap_layer);
@@ -297,6 +327,8 @@ cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id, enum cfctrl_srv serv,
297 struct cfcnfg *cnfg = container_obj(layer); 327 struct cfcnfg *cnfg = container_obj(layer);
298 struct cflayer *servicel = NULL; 328 struct cflayer *servicel = NULL;
299 struct cfcnfg_phyinfo *phyinfo; 329 struct cfcnfg_phyinfo *phyinfo;
330 struct net_device *netdev;
331
300 if (adapt_layer == NULL) { 332 if (adapt_layer == NULL) {
301 pr_debug("CAIF: %s(): link setup response " 333 pr_debug("CAIF: %s(): link setup response "
302 "but no client exist, send linkdown back\n", 334 "but no client exist, send linkdown back\n",
@@ -308,19 +340,15 @@ cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id, enum cfctrl_srv serv,
308 caif_assert(cnfg != NULL); 340 caif_assert(cnfg != NULL);
309 caif_assert(phyid != 0); 341 caif_assert(phyid != 0);
310 phyinfo = &cnfg->phy_layers[phyid]; 342 phyinfo = &cnfg->phy_layers[phyid];
311 caif_assert(phyinfo != NULL);
312 caif_assert(phyinfo->id == phyid); 343 caif_assert(phyinfo->id == phyid);
313 caif_assert(phyinfo->phy_layer != NULL); 344 caif_assert(phyinfo->phy_layer != NULL);
314 caif_assert(phyinfo->phy_layer->id == phyid); 345 caif_assert(phyinfo->phy_layer->id == phyid);
315 346
316 if (phyinfo != NULL && 347 phyinfo->phy_ref_count++;
317 phyinfo->phy_ref_count++ == 0 && 348 if (phyinfo->phy_ref_count == 1 &&
318 phyinfo->phy_layer != NULL &&
319 phyinfo->phy_layer->modemcmd != NULL) { 349 phyinfo->phy_layer->modemcmd != NULL) {
320 caif_assert(phyinfo->phy_layer->id == phyid);
321 phyinfo->phy_layer->modemcmd(phyinfo->phy_layer, 350 phyinfo->phy_layer->modemcmd(phyinfo->phy_layer,
322 _CAIF_MODEMCMD_PHYIF_USEFULL); 351 _CAIF_MODEMCMD_PHYIF_USEFULL);
323
324 } 352 }
325 adapt_layer->id = channel_id; 353 adapt_layer->id = channel_id;
326 354
@@ -332,7 +360,9 @@ cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id, enum cfctrl_srv serv,
332 servicel = cfdgml_create(channel_id, &phyinfo->dev_info); 360 servicel = cfdgml_create(channel_id, &phyinfo->dev_info);
333 break; 361 break;
334 case CFCTRL_SRV_RFM: 362 case CFCTRL_SRV_RFM:
335 servicel = cfrfml_create(channel_id, &phyinfo->dev_info); 363 netdev = phyinfo->dev_info.dev;
364 servicel = cfrfml_create(channel_id, &phyinfo->dev_info,
365 netdev->mtu);
336 break; 366 break;
337 case CFCTRL_SRV_UTIL: 367 case CFCTRL_SRV_UTIL:
338 servicel = cfutill_create(channel_id, &phyinfo->dev_info); 368 servicel = cfutill_create(channel_id, &phyinfo->dev_info);
@@ -363,8 +393,8 @@ cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id, enum cfctrl_srv serv,
363 393
364void 394void
365cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type, 395cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type,
366 void *dev, struct cflayer *phy_layer, u16 *phyid, 396 struct net_device *dev, struct cflayer *phy_layer,
367 enum cfcnfg_phy_preference pref, 397 u16 *phyid, enum cfcnfg_phy_preference pref,
368 bool fcs, bool stx) 398 bool fcs, bool stx)
369{ 399{
370 struct cflayer *frml; 400 struct cflayer *frml;
@@ -418,6 +448,10 @@ cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type,
418 cnfg->phy_layers[*phyid].dev_info.dev = dev; 448 cnfg->phy_layers[*phyid].dev_info.dev = dev;
419 cnfg->phy_layers[*phyid].phy_layer = phy_layer; 449 cnfg->phy_layers[*phyid].phy_layer = phy_layer;
420 cnfg->phy_layers[*phyid].phy_ref_count = 0; 450 cnfg->phy_layers[*phyid].phy_ref_count = 0;
451 cnfg->phy_layers[*phyid].ifindex = dev->ifindex;
452 cnfg->phy_layers[*phyid].use_stx = stx;
453 cnfg->phy_layers[*phyid].use_fcs = fcs;
454
421 phy_layer->type = phy_type; 455 phy_layer->type = phy_type;
422 frml = cffrml_create(*phyid, fcs); 456 frml = cffrml_create(*phyid, fcs);
423 if (!frml) { 457 if (!frml) {
diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
index fcfda98a5e6d..563145fdc4c3 100644
--- a/net/caif/cfctrl.c
+++ b/net/caif/cfctrl.c
@@ -19,7 +19,7 @@
19#ifdef CAIF_NO_LOOP 19#ifdef CAIF_NO_LOOP
20static int handle_loop(struct cfctrl *ctrl, 20static int handle_loop(struct cfctrl *ctrl,
21 int cmd, struct cfpkt *pkt){ 21 int cmd, struct cfpkt *pkt){
22 return CAIF_FAILURE; 22 return -1;
23} 23}
24#else 24#else
25static int handle_loop(struct cfctrl *ctrl, 25static int handle_loop(struct cfctrl *ctrl,
@@ -43,7 +43,7 @@ struct cflayer *cfctrl_create(void)
43 memset(&dev_info, 0, sizeof(dev_info)); 43 memset(&dev_info, 0, sizeof(dev_info));
44 dev_info.id = 0xff; 44 dev_info.id = 0xff;
45 memset(this, 0, sizeof(*this)); 45 memset(this, 0, sizeof(*this));
46 cfsrvl_init(&this->serv, 0, &dev_info); 46 cfsrvl_init(&this->serv, 0, &dev_info, false);
47 atomic_set(&this->req_seq_no, 1); 47 atomic_set(&this->req_seq_no, 1);
48 atomic_set(&this->rsp_seq_no, 1); 48 atomic_set(&this->rsp_seq_no, 1);
49 this->serv.layer.receive = cfctrl_recv; 49 this->serv.layer.receive = cfctrl_recv;
@@ -395,7 +395,7 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
395 cmd = cmdrsp & CFCTRL_CMD_MASK; 395 cmd = cmdrsp & CFCTRL_CMD_MASK;
396 if (cmd != CFCTRL_CMD_LINK_ERR 396 if (cmd != CFCTRL_CMD_LINK_ERR
397 && CFCTRL_RSP_BIT != (CFCTRL_RSP_BIT & cmdrsp)) { 397 && CFCTRL_RSP_BIT != (CFCTRL_RSP_BIT & cmdrsp)) {
398 if (handle_loop(cfctrl, cmd, pkt) == CAIF_FAILURE) 398 if (handle_loop(cfctrl, cmd, pkt) != 0)
399 cmdrsp |= CFCTRL_ERR_BIT; 399 cmdrsp |= CFCTRL_ERR_BIT;
400 } 400 }
401 401
@@ -647,6 +647,6 @@ found:
647 default: 647 default:
648 break; 648 break;
649 } 649 }
650 return CAIF_SUCCESS; 650 return 0;
651} 651}
652#endif 652#endif
diff --git a/net/caif/cfdbgl.c b/net/caif/cfdbgl.c
index ab6b6dc34cf8..676648cac8dd 100644
--- a/net/caif/cfdbgl.c
+++ b/net/caif/cfdbgl.c
@@ -22,7 +22,7 @@ struct cflayer *cfdbgl_create(u8 channel_id, struct dev_info *dev_info)
22 } 22 }
23 caif_assert(offsetof(struct cfsrvl, layer) == 0); 23 caif_assert(offsetof(struct cfsrvl, layer) == 0);
24 memset(dbg, 0, sizeof(struct cfsrvl)); 24 memset(dbg, 0, sizeof(struct cfsrvl));
25 cfsrvl_init(dbg, channel_id, dev_info); 25 cfsrvl_init(dbg, channel_id, dev_info, false);
26 dbg->layer.receive = cfdbgl_receive; 26 dbg->layer.receive = cfdbgl_receive;
27 dbg->layer.transmit = cfdbgl_transmit; 27 dbg->layer.transmit = cfdbgl_transmit;
28 snprintf(dbg->layer.name, CAIF_LAYER_NAME_SZ - 1, "dbg%d", channel_id); 28 snprintf(dbg->layer.name, CAIF_LAYER_NAME_SZ - 1, "dbg%d", channel_id);
diff --git a/net/caif/cfdgml.c b/net/caif/cfdgml.c
index 53194840ecb6..ed9d53aff280 100644
--- a/net/caif/cfdgml.c
+++ b/net/caif/cfdgml.c
@@ -17,6 +17,7 @@
17#define DGM_FLOW_OFF 0x81 17#define DGM_FLOW_OFF 0x81
18#define DGM_FLOW_ON 0x80 18#define DGM_FLOW_ON 0x80
19#define DGM_CTRL_PKT_SIZE 1 19#define DGM_CTRL_PKT_SIZE 1
20#define DGM_MTU 1500
20 21
21static int cfdgml_receive(struct cflayer *layr, struct cfpkt *pkt); 22static int cfdgml_receive(struct cflayer *layr, struct cfpkt *pkt);
22static int cfdgml_transmit(struct cflayer *layr, struct cfpkt *pkt); 23static int cfdgml_transmit(struct cflayer *layr, struct cfpkt *pkt);
@@ -30,7 +31,7 @@ struct cflayer *cfdgml_create(u8 channel_id, struct dev_info *dev_info)
30 } 31 }
31 caif_assert(offsetof(struct cfsrvl, layer) == 0); 32 caif_assert(offsetof(struct cfsrvl, layer) == 0);
32 memset(dgm, 0, sizeof(struct cfsrvl)); 33 memset(dgm, 0, sizeof(struct cfsrvl));
33 cfsrvl_init(dgm, channel_id, dev_info); 34 cfsrvl_init(dgm, channel_id, dev_info, true);
34 dgm->layer.receive = cfdgml_receive; 35 dgm->layer.receive = cfdgml_receive;
35 dgm->layer.transmit = cfdgml_transmit; 36 dgm->layer.transmit = cfdgml_transmit;
36 snprintf(dgm->layer.name, CAIF_LAYER_NAME_SZ - 1, "dgm%d", channel_id); 37 snprintf(dgm->layer.name, CAIF_LAYER_NAME_SZ - 1, "dgm%d", channel_id);
@@ -89,6 +90,10 @@ static int cfdgml_transmit(struct cflayer *layr, struct cfpkt *pkt)
89 if (!cfsrvl_ready(service, &ret)) 90 if (!cfsrvl_ready(service, &ret))
90 return ret; 91 return ret;
91 92
93 /* STE Modem cannot handle more than 1500 bytes datagrams */
94 if (cfpkt_getlen(pkt) > DGM_MTU)
95 return -EMSGSIZE;
96
92 cfpkt_add_head(pkt, &zero, 4); 97 cfpkt_add_head(pkt, &zero, 4);
93 98
94 /* Add info for MUX-layer to route the packet out. */ 99 /* Add info for MUX-layer to route the packet out. */
diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c
index a6fdf899741a..01f238ff2346 100644
--- a/net/caif/cfpkt_skbuff.c
+++ b/net/caif/cfpkt_skbuff.c
@@ -9,8 +9,8 @@
9#include <linux/hardirq.h> 9#include <linux/hardirq.h>
10#include <net/caif/cfpkt.h> 10#include <net/caif/cfpkt.h>
11 11
12#define PKT_PREFIX CAIF_NEEDED_HEADROOM 12#define PKT_PREFIX 16
13#define PKT_POSTFIX CAIF_NEEDED_TAILROOM 13#define PKT_POSTFIX 2
14#define PKT_LEN_WHEN_EXTENDING 128 14#define PKT_LEN_WHEN_EXTENDING 128
15#define PKT_ERROR(pkt, errmsg) do { \ 15#define PKT_ERROR(pkt, errmsg) do { \
16 cfpkt_priv(pkt)->erronous = true; \ 16 cfpkt_priv(pkt)->erronous = true; \
@@ -338,7 +338,6 @@ struct cfpkt *cfpkt_append(struct cfpkt *dstpkt,
338 u16 dstlen; 338 u16 dstlen;
339 u16 createlen; 339 u16 createlen;
340 if (unlikely(is_erronous(dstpkt) || is_erronous(addpkt))) { 340 if (unlikely(is_erronous(dstpkt) || is_erronous(addpkt))) {
341 cfpkt_destroy(addpkt);
342 return dstpkt; 341 return dstpkt;
343 } 342 }
344 if (expectlen > addlen) 343 if (expectlen > addlen)
diff --git a/net/caif/cfrfml.c b/net/caif/cfrfml.c
index fd27b172fb5d..eb1602022ac0 100644
--- a/net/caif/cfrfml.c
+++ b/net/caif/cfrfml.c
@@ -7,102 +7,304 @@
7#include <linux/stddef.h> 7#include <linux/stddef.h>
8#include <linux/spinlock.h> 8#include <linux/spinlock.h>
9#include <linux/slab.h> 9#include <linux/slab.h>
10#include <linux/unaligned/le_byteshift.h>
10#include <net/caif/caif_layer.h> 11#include <net/caif/caif_layer.h>
11#include <net/caif/cfsrvl.h> 12#include <net/caif/cfsrvl.h>
12#include <net/caif/cfpkt.h> 13#include <net/caif/cfpkt.h>
13 14
14#define container_obj(layr) container_of(layr, struct cfsrvl, layer) 15#define container_obj(layr) container_of(layr, struct cfrfml, serv.layer)
15
16#define RFM_SEGMENTATION_BIT 0x01 16#define RFM_SEGMENTATION_BIT 0x01
17#define RFM_PAYLOAD 0x00 17#define RFM_HEAD_SIZE 7
18#define RFM_CMD_BIT 0x80
19#define RFM_FLOW_OFF 0x81
20#define RFM_FLOW_ON 0x80
21#define RFM_SET_PIN 0x82
22#define RFM_CTRL_PKT_SIZE 1
23 18
24static int cfrfml_receive(struct cflayer *layr, struct cfpkt *pkt); 19static int cfrfml_receive(struct cflayer *layr, struct cfpkt *pkt);
25static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt); 20static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt);
26static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl);
27 21
28struct cflayer *cfrfml_create(u8 channel_id, struct dev_info *dev_info) 22struct cfrfml {
23 struct cfsrvl serv;
24 struct cfpkt *incomplete_frm;
25 int fragment_size;
26 u8 seghead[6];
27 u16 pdu_size;
28 /* Protects serialized processing of packets */
29 spinlock_t sync;
30};
31
32static void cfrfml_release(struct kref *kref)
33{
34 struct cfsrvl *srvl = container_of(kref, struct cfsrvl, ref);
35 struct cfrfml *rfml = container_obj(&srvl->layer);
36
37 if (rfml->incomplete_frm)
38 cfpkt_destroy(rfml->incomplete_frm);
39
40 kfree(srvl);
41}
42
43struct cflayer *cfrfml_create(u8 channel_id, struct dev_info *dev_info,
44 int mtu_size)
29{ 45{
30 struct cfsrvl *rfm = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC); 46 int tmp;
31 if (!rfm) { 47 struct cfrfml *this =
48 kzalloc(sizeof(struct cfrfml), GFP_ATOMIC);
49
50 if (!this) {
32 pr_warning("CAIF: %s(): Out of memory\n", __func__); 51 pr_warning("CAIF: %s(): Out of memory\n", __func__);
33 return NULL; 52 return NULL;
34 } 53 }
35 caif_assert(offsetof(struct cfsrvl, layer) == 0); 54
36 memset(rfm, 0, sizeof(struct cfsrvl)); 55 cfsrvl_init(&this->serv, channel_id, dev_info, false);
37 cfsrvl_init(rfm, channel_id, dev_info); 56 this->serv.release = cfrfml_release;
38 rfm->layer.modemcmd = cfservl_modemcmd; 57 this->serv.layer.receive = cfrfml_receive;
39 rfm->layer.receive = cfrfml_receive; 58 this->serv.layer.transmit = cfrfml_transmit;
40 rfm->layer.transmit = cfrfml_transmit; 59
41 snprintf(rfm->layer.name, CAIF_LAYER_NAME_SZ, "rfm%d", channel_id); 60 /* Round down to closest multiple of 16 */
42 return &rfm->layer; 61 tmp = (mtu_size - RFM_HEAD_SIZE - 6) / 16;
62 tmp *= 16;
63
64 this->fragment_size = tmp;
65 spin_lock_init(&this->sync);
66 snprintf(this->serv.layer.name, CAIF_LAYER_NAME_SZ,
67 "rfm%d", channel_id);
68
69 return &this->serv.layer;
43} 70}
44 71
45static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl) 72static struct cfpkt *rfm_append(struct cfrfml *rfml, char *seghead,
73 struct cfpkt *pkt, int *err)
46{ 74{
47 return -EPROTO; 75 struct cfpkt *tmppkt;
76 *err = -EPROTO;
77 /* n-th but not last segment */
78
79 if (cfpkt_extr_head(pkt, seghead, 6) < 0)
80 return NULL;
81
82 /* Verify correct header */
83 if (memcmp(seghead, rfml->seghead, 6) != 0)
84 return NULL;
85
86 tmppkt = cfpkt_append(rfml->incomplete_frm, pkt,
87 rfml->pdu_size + RFM_HEAD_SIZE);
88
89 /* If cfpkt_append failes input pkts are not freed */
90 *err = -ENOMEM;
91 if (tmppkt == NULL)
92 return NULL;
93
94 *err = 0;
95 return tmppkt;
48} 96}
49 97
50static int cfrfml_receive(struct cflayer *layr, struct cfpkt *pkt) 98static int cfrfml_receive(struct cflayer *layr, struct cfpkt *pkt)
51{ 99{
52 u8 tmp; 100 u8 tmp;
53 bool segmented; 101 bool segmented;
54 int ret; 102 int err;
103 u8 seghead[6];
104 struct cfrfml *rfml;
105 struct cfpkt *tmppkt = NULL;
106
55 caif_assert(layr->up != NULL); 107 caif_assert(layr->up != NULL);
56 caif_assert(layr->receive != NULL); 108 caif_assert(layr->receive != NULL);
109 rfml = container_obj(layr);
110 spin_lock(&rfml->sync);
111
112 err = -EPROTO;
113 if (cfpkt_extr_head(pkt, &tmp, 1) < 0)
114 goto out;
115 segmented = tmp & RFM_SEGMENTATION_BIT;
116
117 if (segmented) {
118 if (rfml->incomplete_frm == NULL) {
119 /* Initial Segment */
120 if (cfpkt_peek_head(pkt, rfml->seghead, 6) < 0)
121 goto out;
122
123 rfml->pdu_size = get_unaligned_le16(rfml->seghead+4);
124
125 if (cfpkt_erroneous(pkt))
126 goto out;
127 rfml->incomplete_frm = pkt;
128 pkt = NULL;
129 } else {
130
131 tmppkt = rfm_append(rfml, seghead, pkt, &err);
132 if (tmppkt == NULL)
133 goto out;
134
135 if (cfpkt_erroneous(tmppkt))
136 goto out;
137
138 rfml->incomplete_frm = tmppkt;
139
140
141 if (cfpkt_erroneous(tmppkt))
142 goto out;
143 }
144 err = 0;
145 goto out;
146 }
147
148 if (rfml->incomplete_frm) {
149
150 /* Last Segment */
151 tmppkt = rfm_append(rfml, seghead, pkt, &err);
152 if (tmppkt == NULL)
153 goto out;
154
155 if (cfpkt_erroneous(tmppkt))
156 goto out;
157
158 rfml->incomplete_frm = NULL;
159 pkt = tmppkt;
160 tmppkt = NULL;
161
162 /* Verify that length is correct */
163 err = EPROTO;
164 if (rfml->pdu_size != cfpkt_getlen(pkt) - RFM_HEAD_SIZE + 1)
165 goto out;
166 }
167
168 err = rfml->serv.layer.up->receive(rfml->serv.layer.up, pkt);
169
170out:
171
172 if (err != 0) {
173 if (tmppkt)
174 cfpkt_destroy(tmppkt);
175 if (pkt)
176 cfpkt_destroy(pkt);
177 if (rfml->incomplete_frm)
178 cfpkt_destroy(rfml->incomplete_frm);
179 rfml->incomplete_frm = NULL;
180
181 pr_info("CAIF: %s(): "
182 "Connection error %d triggered on RFM link\n",
183 __func__, err);
184
185 /* Trigger connection error upon failure.*/
186 layr->up->ctrlcmd(layr->up, CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND,
187 rfml->serv.dev_info.id);
188 }
189 spin_unlock(&rfml->sync);
190 return err;
191}
192
193
194static int cfrfml_transmit_segment(struct cfrfml *rfml, struct cfpkt *pkt)
195{
196 caif_assert(cfpkt_getlen(pkt) >= rfml->fragment_size);
197
198 /* Add info for MUX-layer to route the packet out. */
199 cfpkt_info(pkt)->channel_id = rfml->serv.layer.id;
57 200
58 /* 201 /*
59 * RFM is taking care of segmentation and stripping of 202 * To optimize alignment, we add up the size of CAIF header before
60 * segmentation bit. 203 * payload.
61 */ 204 */
62 if (cfpkt_extr_head(pkt, &tmp, 1) < 0) { 205 cfpkt_info(pkt)->hdr_len = RFM_HEAD_SIZE;
63 pr_err("CAIF: %s(): Packet is erroneous!\n", __func__); 206 cfpkt_info(pkt)->dev_info = &rfml->serv.dev_info;
64 cfpkt_destroy(pkt);
65 return -EPROTO;
66 }
67 segmented = tmp & RFM_SEGMENTATION_BIT;
68 caif_assert(!segmented);
69 207
70 ret = layr->up->receive(layr->up, pkt); 208 return rfml->serv.layer.dn->transmit(rfml->serv.layer.dn, pkt);
71 return ret;
72} 209}
73 210
74static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt) 211static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt)
75{ 212{
76 u8 tmp = 0; 213 int err;
77 int ret; 214 u8 seg;
78 struct cfsrvl *service = container_obj(layr); 215 u8 head[6];
216 struct cfpkt *rearpkt = NULL;
217 struct cfpkt *frontpkt = pkt;
218 struct cfrfml *rfml = container_obj(layr);
79 219
80 caif_assert(layr->dn != NULL); 220 caif_assert(layr->dn != NULL);
81 caif_assert(layr->dn->transmit != NULL); 221 caif_assert(layr->dn->transmit != NULL);
82 222
83 if (!cfsrvl_ready(service, &ret)) 223 if (!cfsrvl_ready(&rfml->serv, &err))
84 return ret; 224 return err;
225
226 err = -EPROTO;
227 if (cfpkt_getlen(pkt) <= RFM_HEAD_SIZE-1)
228 goto out;
229
230 err = 0;
231 if (cfpkt_getlen(pkt) > rfml->fragment_size + RFM_HEAD_SIZE)
232 err = cfpkt_peek_head(pkt, head, 6);
233
234 if (err < 0)
235 goto out;
236
237 while (cfpkt_getlen(frontpkt) > rfml->fragment_size + RFM_HEAD_SIZE) {
238
239 seg = 1;
240 err = -EPROTO;
241
242 if (cfpkt_add_head(frontpkt, &seg, 1) < 0)
243 goto out;
244 /*
245 * On OOM error cfpkt_split returns NULL.
246 *
247 * NOTE: Segmented pdu is not correctly aligned.
248 * This has negative performance impact.
249 */
250
251 rearpkt = cfpkt_split(frontpkt, rfml->fragment_size);
252 if (rearpkt == NULL)
253 goto out;
254
255 err = cfrfml_transmit_segment(rfml, frontpkt);
256
257 if (err != 0)
258 goto out;
259 frontpkt = rearpkt;
260 rearpkt = NULL;
261
262 err = -ENOMEM;
263 if (frontpkt == NULL)
264 goto out;
265 err = -EPROTO;
266 if (cfpkt_add_head(frontpkt, head, 6) < 0)
267 goto out;
85 268
86 if (cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) {
87 pr_err("CAIF: %s():Packet too large - size=%d\n",
88 __func__, cfpkt_getlen(pkt));
89 return -EOVERFLOW;
90 } 269 }
91 if (cfpkt_add_head(pkt, &tmp, 1) < 0) { 270
92 pr_err("CAIF: %s(): Packet is erroneous!\n", __func__); 271 seg = 0;
93 return -EPROTO; 272 err = -EPROTO;
273
274 if (cfpkt_add_head(frontpkt, &seg, 1) < 0)
275 goto out;
276
277 err = cfrfml_transmit_segment(rfml, frontpkt);
278
279 frontpkt = NULL;
280out:
281
282 if (err != 0) {
283 pr_info("CAIF: %s(): "
284 "Connection error %d triggered on RFM link\n",
285 __func__, err);
286 /* Trigger connection error upon failure.*/
287
288 layr->up->ctrlcmd(layr->up, CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND,
289 rfml->serv.dev_info.id);
290
291 if (rearpkt)
292 cfpkt_destroy(rearpkt);
293
294 if (frontpkt && frontpkt != pkt) {
295
296 cfpkt_destroy(frontpkt);
297 /*
298 * Socket layer will free the original packet,
299 * but this packet may already be sent and
300 * freed. So we have to return 0 in this case
301 * to avoid socket layer to re-free this packet.
302 * The return of shutdown indication will
303 * cause connection to be invalidated anyhow.
304 */
305 err = 0;
306 }
94 } 307 }
95 308
96 /* Add info for MUX-layer to route the packet out. */ 309 return err;
97 cfpkt_info(pkt)->channel_id = service->layer.id;
98 /*
99 * To optimize alignment, we add up the size of CAIF header before
100 * payload.
101 */
102 cfpkt_info(pkt)->hdr_len = 1;
103 cfpkt_info(pkt)->dev_info = &service->dev_info;
104 ret = layr->dn->transmit(layr->dn, pkt);
105 if (ret < 0)
106 cfpkt_extr_head(pkt, &tmp, 1);
107 return ret;
108} 310}
diff --git a/net/caif/cfserl.c b/net/caif/cfserl.c
index 965c5baace40..a11fbd68a13d 100644
--- a/net/caif/cfserl.c
+++ b/net/caif/cfserl.c
@@ -14,7 +14,8 @@
14#define container_obj(layr) ((struct cfserl *) layr) 14#define container_obj(layr) ((struct cfserl *) layr)
15 15
16#define CFSERL_STX 0x02 16#define CFSERL_STX 0x02
17#define CAIF_MINIUM_PACKET_SIZE 4 17#define SERIAL_MINIUM_PACKET_SIZE 4
18#define SERIAL_MAX_FRAMESIZE 4096
18struct cfserl { 19struct cfserl {
19 struct cflayer layer; 20 struct cflayer layer;
20 struct cfpkt *incomplete_frm; 21 struct cfpkt *incomplete_frm;
@@ -119,8 +120,8 @@ static int cfserl_receive(struct cflayer *l, struct cfpkt *newpkt)
119 /* 120 /*
120 * Frame error handling 121 * Frame error handling
121 */ 122 */
122 if (expectlen < CAIF_MINIUM_PACKET_SIZE 123 if (expectlen < SERIAL_MINIUM_PACKET_SIZE
123 || expectlen > CAIF_MAX_FRAMESIZE) { 124 || expectlen > SERIAL_MAX_FRAMESIZE) {
124 if (!layr->usestx) { 125 if (!layr->usestx) {
125 if (pkt != NULL) 126 if (pkt != NULL)
126 cfpkt_destroy(pkt); 127 cfpkt_destroy(pkt);
diff --git a/net/caif/cfsrvl.c b/net/caif/cfsrvl.c
index 6e5b7079a684..f40939a91211 100644
--- a/net/caif/cfsrvl.c
+++ b/net/caif/cfsrvl.c
@@ -24,8 +24,10 @@ static void cfservl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
24 int phyid) 24 int phyid)
25{ 25{
26 struct cfsrvl *service = container_obj(layr); 26 struct cfsrvl *service = container_obj(layr);
27
27 caif_assert(layr->up != NULL); 28 caif_assert(layr->up != NULL);
28 caif_assert(layr->up->ctrlcmd != NULL); 29 caif_assert(layr->up->ctrlcmd != NULL);
30
29 switch (ctrl) { 31 switch (ctrl) {
30 case CAIF_CTRLCMD_INIT_RSP: 32 case CAIF_CTRLCMD_INIT_RSP:
31 service->open = true; 33 service->open = true;
@@ -89,9 +91,14 @@ static void cfservl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
89static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl) 91static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl)
90{ 92{
91 struct cfsrvl *service = container_obj(layr); 93 struct cfsrvl *service = container_obj(layr);
94
92 caif_assert(layr != NULL); 95 caif_assert(layr != NULL);
93 caif_assert(layr->dn != NULL); 96 caif_assert(layr->dn != NULL);
94 caif_assert(layr->dn->transmit != NULL); 97 caif_assert(layr->dn->transmit != NULL);
98
99 if (!service->supports_flowctrl)
100 return 0;
101
95 switch (ctrl) { 102 switch (ctrl) {
96 case CAIF_MODEMCMD_FLOW_ON_REQ: 103 case CAIF_MODEMCMD_FLOW_ON_REQ:
97 { 104 {
@@ -152,9 +159,17 @@ void cfservl_destroy(struct cflayer *layer)
152 kfree(layer); 159 kfree(layer);
153} 160}
154 161
162void cfsrvl_release(struct kref *kref)
163{
164 struct cfsrvl *service = container_of(kref, struct cfsrvl, ref);
165 kfree(service);
166}
167
155void cfsrvl_init(struct cfsrvl *service, 168void cfsrvl_init(struct cfsrvl *service,
156 u8 channel_id, 169 u8 channel_id,
157 struct dev_info *dev_info) 170 struct dev_info *dev_info,
171 bool supports_flowctrl
172 )
158{ 173{
159 caif_assert(offsetof(struct cfsrvl, layer) == 0); 174 caif_assert(offsetof(struct cfsrvl, layer) == 0);
160 service->open = false; 175 service->open = false;
@@ -164,14 +179,11 @@ void cfsrvl_init(struct cfsrvl *service,
164 service->layer.ctrlcmd = cfservl_ctrlcmd; 179 service->layer.ctrlcmd = cfservl_ctrlcmd;
165 service->layer.modemcmd = cfservl_modemcmd; 180 service->layer.modemcmd = cfservl_modemcmd;
166 service->dev_info = *dev_info; 181 service->dev_info = *dev_info;
182 service->supports_flowctrl = supports_flowctrl;
183 service->release = cfsrvl_release;
167 kref_init(&service->ref); 184 kref_init(&service->ref);
168} 185}
169 186
170void cfsrvl_release(struct kref *kref)
171{
172 struct cfsrvl *service = container_of(kref, struct cfsrvl, ref);
173 kfree(service);
174}
175 187
176bool cfsrvl_ready(struct cfsrvl *service, int *err) 188bool cfsrvl_ready(struct cfsrvl *service, int *err)
177{ 189{
diff --git a/net/caif/cfutill.c b/net/caif/cfutill.c
index 5fd2c9ea8b42..02795aff57a4 100644
--- a/net/caif/cfutill.c
+++ b/net/caif/cfutill.c
@@ -31,7 +31,7 @@ struct cflayer *cfutill_create(u8 channel_id, struct dev_info *dev_info)
31 } 31 }
32 caif_assert(offsetof(struct cfsrvl, layer) == 0); 32 caif_assert(offsetof(struct cfsrvl, layer) == 0);
33 memset(util, 0, sizeof(struct cfsrvl)); 33 memset(util, 0, sizeof(struct cfsrvl));
34 cfsrvl_init(util, channel_id, dev_info); 34 cfsrvl_init(util, channel_id, dev_info, true);
35 util->layer.receive = cfutill_receive; 35 util->layer.receive = cfutill_receive;
36 util->layer.transmit = cfutill_transmit; 36 util->layer.transmit = cfutill_transmit;
37 snprintf(util->layer.name, CAIF_LAYER_NAME_SZ - 1, "util1"); 37 snprintf(util->layer.name, CAIF_LAYER_NAME_SZ - 1, "util1");
@@ -90,12 +90,6 @@ static int cfutill_transmit(struct cflayer *layr, struct cfpkt *pkt)
90 if (!cfsrvl_ready(service, &ret)) 90 if (!cfsrvl_ready(service, &ret))
91 return ret; 91 return ret;
92 92
93 if (cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) {
94 pr_err("CAIF: %s(): packet too large size=%d\n",
95 __func__, cfpkt_getlen(pkt));
96 return -EOVERFLOW;
97 }
98
99 cfpkt_add_head(pkt, &zero, 1); 93 cfpkt_add_head(pkt, &zero, 1);
100 /* Add info for MUX-layer to route the packet out. */ 94 /* Add info for MUX-layer to route the packet out. */
101 info = cfpkt_info(pkt); 95 info = cfpkt_info(pkt);
diff --git a/net/caif/cfveil.c b/net/caif/cfveil.c
index e04f7d964e83..77cc09faac9a 100644
--- a/net/caif/cfveil.c
+++ b/net/caif/cfveil.c
@@ -30,7 +30,7 @@ struct cflayer *cfvei_create(u8 channel_id, struct dev_info *dev_info)
30 } 30 }
31 caif_assert(offsetof(struct cfsrvl, layer) == 0); 31 caif_assert(offsetof(struct cfsrvl, layer) == 0);
32 memset(vei, 0, sizeof(struct cfsrvl)); 32 memset(vei, 0, sizeof(struct cfsrvl));
33 cfsrvl_init(vei, channel_id, dev_info); 33 cfsrvl_init(vei, channel_id, dev_info, true);
34 vei->layer.receive = cfvei_receive; 34 vei->layer.receive = cfvei_receive;
35 vei->layer.transmit = cfvei_transmit; 35 vei->layer.transmit = cfvei_transmit;
36 snprintf(vei->layer.name, CAIF_LAYER_NAME_SZ - 1, "vei%d", channel_id); 36 snprintf(vei->layer.name, CAIF_LAYER_NAME_SZ - 1, "vei%d", channel_id);
@@ -84,11 +84,6 @@ static int cfvei_transmit(struct cflayer *layr, struct cfpkt *pkt)
84 return ret; 84 return ret;
85 caif_assert(layr->dn != NULL); 85 caif_assert(layr->dn != NULL);
86 caif_assert(layr->dn->transmit != NULL); 86 caif_assert(layr->dn->transmit != NULL);
87 if (cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) {
88 pr_warning("CAIF: %s(): Packet too large - size=%d\n",
89 __func__, cfpkt_getlen(pkt));
90 return -EOVERFLOW;
91 }
92 87
93 if (cfpkt_add_head(pkt, &tmp, 1) < 0) { 88 if (cfpkt_add_head(pkt, &tmp, 1) < 0) {
94 pr_err("CAIF: %s(): Packet is erroneous!\n", __func__); 89 pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
diff --git a/net/caif/cfvidl.c b/net/caif/cfvidl.c
index 89ad4ea239f1..ada6ee2d48f5 100644
--- a/net/caif/cfvidl.c
+++ b/net/caif/cfvidl.c
@@ -27,7 +27,7 @@ struct cflayer *cfvidl_create(u8 channel_id, struct dev_info *dev_info)
27 caif_assert(offsetof(struct cfsrvl, layer) == 0); 27 caif_assert(offsetof(struct cfsrvl, layer) == 0);
28 28
29 memset(vid, 0, sizeof(struct cfsrvl)); 29 memset(vid, 0, sizeof(struct cfsrvl));
30 cfsrvl_init(vid, channel_id, dev_info); 30 cfsrvl_init(vid, channel_id, dev_info, false);
31 vid->layer.receive = cfvidl_receive; 31 vid->layer.receive = cfvidl_receive;
32 vid->layer.transmit = cfvidl_transmit; 32 vid->layer.transmit = cfvidl_transmit;
33 snprintf(vid->layer.name, CAIF_LAYER_NAME_SZ - 1, "vid1"); 33 snprintf(vid->layer.name, CAIF_LAYER_NAME_SZ - 1, "vid1");
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
index 610966abe2dc..4293e190ec53 100644
--- a/net/caif/chnl_net.c
+++ b/net/caif/chnl_net.c
@@ -23,7 +23,7 @@
23#include <net/caif/caif_dev.h> 23#include <net/caif/caif_dev.h>
24 24
25/* GPRS PDP connection has MTU to 1500 */ 25/* GPRS PDP connection has MTU to 1500 */
26#define SIZE_MTU 1500 26#define GPRS_PDP_MTU 1500
27/* 5 sec. connect timeout */ 27/* 5 sec. connect timeout */
28#define CONNECT_TIMEOUT (5 * HZ) 28#define CONNECT_TIMEOUT (5 * HZ)
29#define CAIF_NET_DEFAULT_QUEUE_LEN 500 29#define CAIF_NET_DEFAULT_QUEUE_LEN 500
@@ -232,6 +232,8 @@ static int chnl_net_open(struct net_device *dev)
232{ 232{
233 struct chnl_net *priv = NULL; 233 struct chnl_net *priv = NULL;
234 int result = -1; 234 int result = -1;
235 int llifindex, headroom, tailroom, mtu;
236 struct net_device *lldev;
235 ASSERT_RTNL(); 237 ASSERT_RTNL();
236 priv = netdev_priv(dev); 238 priv = netdev_priv(dev);
237 if (!priv) { 239 if (!priv) {
@@ -241,41 +243,88 @@ static int chnl_net_open(struct net_device *dev)
241 243
242 if (priv->state != CAIF_CONNECTING) { 244 if (priv->state != CAIF_CONNECTING) {
243 priv->state = CAIF_CONNECTING; 245 priv->state = CAIF_CONNECTING;
244 result = caif_connect_client(&priv->conn_req, &priv->chnl); 246 result = caif_connect_client(&priv->conn_req, &priv->chnl,
247 &llifindex, &headroom, &tailroom);
245 if (result != 0) { 248 if (result != 0) {
246 priv->state = CAIF_DISCONNECTED;
247 pr_debug("CAIF: %s(): err: " 249 pr_debug("CAIF: %s(): err: "
248 "Unable to register and open device," 250 "Unable to register and open device,"
249 " Err:%d\n", 251 " Err:%d\n",
250 __func__, 252 __func__,
251 result); 253 result);
252 return result; 254 goto error;
255 }
256
257 lldev = dev_get_by_index(dev_net(dev), llifindex);
258
259 if (lldev == NULL) {
260 pr_debug("CAIF: %s(): no interface?\n", __func__);
261 result = -ENODEV;
262 goto error;
263 }
264
265 dev->needed_tailroom = tailroom + lldev->needed_tailroom;
266 dev->hard_header_len = headroom + lldev->hard_header_len +
267 lldev->needed_tailroom;
268
269 /*
270 * MTU, head-room etc is not know before we have a
271 * CAIF link layer device available. MTU calculation may
272 * override initial RTNL configuration.
273 * MTU is minimum of current mtu, link layer mtu pluss
274 * CAIF head and tail, and PDP GPRS contexts max MTU.
275 */
276 mtu = min_t(int, dev->mtu, lldev->mtu - (headroom + tailroom));
277 mtu = min_t(int, GPRS_PDP_MTU, mtu);
278 dev_set_mtu(dev, mtu);
279 dev_put(lldev);
280
281 if (mtu < 100) {
282 pr_warning("CAIF: %s(): "
283 "CAIF Interface MTU too small (%d)\n",
284 __func__, mtu);
285 result = -ENODEV;
286 goto error;
253 } 287 }
254 } 288 }
255 289
290 rtnl_unlock(); /* Release RTNL lock during connect wait */
291
256 result = wait_event_interruptible_timeout(priv->netmgmt_wq, 292 result = wait_event_interruptible_timeout(priv->netmgmt_wq,
257 priv->state != CAIF_CONNECTING, 293 priv->state != CAIF_CONNECTING,
258 CONNECT_TIMEOUT); 294 CONNECT_TIMEOUT);
259 295
296 rtnl_lock();
297
260 if (result == -ERESTARTSYS) { 298 if (result == -ERESTARTSYS) {
261 pr_debug("CAIF: %s(): wait_event_interruptible" 299 pr_debug("CAIF: %s(): wait_event_interruptible"
262 " woken by a signal\n", __func__); 300 " woken by a signal\n", __func__);
263 return -ERESTARTSYS; 301 result = -ERESTARTSYS;
302 goto error;
264 } 303 }
304
265 if (result == 0) { 305 if (result == 0) {
266 pr_debug("CAIF: %s(): connect timeout\n", __func__); 306 pr_debug("CAIF: %s(): connect timeout\n", __func__);
267 caif_disconnect_client(&priv->chnl); 307 caif_disconnect_client(&priv->chnl);
268 priv->state = CAIF_DISCONNECTED; 308 priv->state = CAIF_DISCONNECTED;
269 pr_debug("CAIF: %s(): state disconnected\n", __func__); 309 pr_debug("CAIF: %s(): state disconnected\n", __func__);
270 return -ETIMEDOUT; 310 result = -ETIMEDOUT;
311 goto error;
271 } 312 }
272 313
273 if (priv->state != CAIF_CONNECTED) { 314 if (priv->state != CAIF_CONNECTED) {
274 pr_debug("CAIF: %s(): connect failed\n", __func__); 315 pr_debug("CAIF: %s(): connect failed\n", __func__);
275 return -ECONNREFUSED; 316 result = -ECONNREFUSED;
317 goto error;
276 } 318 }
277 pr_debug("CAIF: %s(): CAIF Netdevice connected\n", __func__); 319 pr_debug("CAIF: %s(): CAIF Netdevice connected\n", __func__);
278 return 0; 320 return 0;
321
322error:
323 caif_disconnect_client(&priv->chnl);
324 priv->state = CAIF_DISCONNECTED;
325 pr_debug("CAIF: %s(): state disconnected\n", __func__);
326 return result;
327
279} 328}
280 329
281static int chnl_net_stop(struct net_device *dev) 330static int chnl_net_stop(struct net_device *dev)
@@ -321,9 +370,7 @@ static void ipcaif_net_setup(struct net_device *dev)
321 dev->destructor = free_netdev; 370 dev->destructor = free_netdev;
322 dev->flags |= IFF_NOARP; 371 dev->flags |= IFF_NOARP;
323 dev->flags |= IFF_POINTOPOINT; 372 dev->flags |= IFF_POINTOPOINT;
324 dev->needed_headroom = CAIF_NEEDED_HEADROOM; 373 dev->mtu = GPRS_PDP_MTU;
325 dev->needed_tailroom = CAIF_NEEDED_TAILROOM;
326 dev->mtu = SIZE_MTU;
327 dev->tx_queue_len = CAIF_NET_DEFAULT_QUEUE_LEN; 374 dev->tx_queue_len = CAIF_NET_DEFAULT_QUEUE_LEN;
328 375
329 priv = netdev_priv(dev); 376 priv = netdev_priv(dev);
diff --git a/net/can/raw.c b/net/can/raw.c
index da99cf153b33..a10e3338f084 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -436,14 +436,9 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
436 436
437 if (count > 1) { 437 if (count > 1) {
438 /* filter does not fit into dfilter => alloc space */ 438 /* filter does not fit into dfilter => alloc space */
439 filter = kmalloc(optlen, GFP_KERNEL); 439 filter = memdup_user(optval, optlen);
440 if (!filter) 440 if (IS_ERR(filter))
441 return -ENOMEM; 441 return PTR_ERR(filter);
442
443 if (copy_from_user(filter, optval, optlen)) {
444 kfree(filter);
445 return -EFAULT;
446 }
447 } else if (count == 1) { 442 } else if (count == 1) {
448 if (copy_from_user(&sfilter, optval, sizeof(sfilter))) 443 if (copy_from_user(&sfilter, optval, sizeof(sfilter)))
449 return -EFAULT; 444 return -EFAULT;
@@ -655,6 +650,10 @@ static int raw_sendmsg(struct kiocb *iocb, struct socket *sock,
655 err = sock_tx_timestamp(msg, sk, skb_tx(skb)); 650 err = sock_tx_timestamp(msg, sk, skb_tx(skb));
656 if (err < 0) 651 if (err < 0)
657 goto free_skb; 652 goto free_skb;
653
654 /* to be able to check the received tx sock reference in raw_rcv() */
655 skb_tx(skb)->prevent_sk_orphan = 1;
656
658 skb->dev = dev; 657 skb->dev = dev;
659 skb->sk = sk; 658 skb->sk = sk;
660 659
diff --git a/net/compat.c b/net/compat.c
index ec24d9edb025..63d260e81472 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -81,7 +81,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
81 int tot_len; 81 int tot_len;
82 82
83 if (kern_msg->msg_namelen) { 83 if (kern_msg->msg_namelen) {
84 if (mode==VERIFY_READ) { 84 if (mode == VERIFY_READ) {
85 int err = move_addr_to_kernel(kern_msg->msg_name, 85 int err = move_addr_to_kernel(kern_msg->msg_name,
86 kern_msg->msg_namelen, 86 kern_msg->msg_namelen,
87 kern_address); 87 kern_address);
@@ -354,7 +354,7 @@ static int do_set_attach_filter(struct socket *sock, int level, int optname,
354static int do_set_sock_timeout(struct socket *sock, int level, 354static int do_set_sock_timeout(struct socket *sock, int level,
355 int optname, char __user *optval, unsigned int optlen) 355 int optname, char __user *optval, unsigned int optlen)
356{ 356{
357 struct compat_timeval __user *up = (struct compat_timeval __user *) optval; 357 struct compat_timeval __user *up = (struct compat_timeval __user *)optval;
358 struct timeval ktime; 358 struct timeval ktime;
359 mm_segment_t old_fs; 359 mm_segment_t old_fs;
360 int err; 360 int err;
@@ -367,7 +367,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
367 return -EFAULT; 367 return -EFAULT;
368 old_fs = get_fs(); 368 old_fs = get_fs();
369 set_fs(KERNEL_DS); 369 set_fs(KERNEL_DS);
370 err = sock_setsockopt(sock, level, optname, (char *) &ktime, sizeof(ktime)); 370 err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
371 set_fs(old_fs); 371 set_fs(old_fs);
372 372
373 return err; 373 return err;
@@ -389,11 +389,10 @@ asmlinkage long compat_sys_setsockopt(int fd, int level, int optname,
389 char __user *optval, unsigned int optlen) 389 char __user *optval, unsigned int optlen)
390{ 390{
391 int err; 391 int err;
392 struct socket *sock; 392 struct socket *sock = sockfd_lookup(fd, &err);
393 393
394 if ((sock = sockfd_lookup(fd, &err))!=NULL) 394 if (sock) {
395 { 395 err = security_socket_setsockopt(sock, level, optname);
396 err = security_socket_setsockopt(sock,level,optname);
397 if (err) { 396 if (err) {
398 sockfd_put(sock); 397 sockfd_put(sock);
399 return err; 398 return err;
@@ -453,7 +452,7 @@ static int compat_sock_getsockopt(struct socket *sock, int level, int optname,
453int compat_sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp) 452int compat_sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
454{ 453{
455 struct compat_timeval __user *ctv = 454 struct compat_timeval __user *ctv =
456 (struct compat_timeval __user*) userstamp; 455 (struct compat_timeval __user *) userstamp;
457 int err = -ENOENT; 456 int err = -ENOENT;
458 struct timeval tv; 457 struct timeval tv;
459 458
@@ -477,7 +476,7 @@ EXPORT_SYMBOL(compat_sock_get_timestamp);
477int compat_sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp) 476int compat_sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp)
478{ 477{
479 struct compat_timespec __user *ctv = 478 struct compat_timespec __user *ctv =
480 (struct compat_timespec __user*) userstamp; 479 (struct compat_timespec __user *) userstamp;
481 int err = -ENOENT; 480 int err = -ENOENT;
482 struct timespec ts; 481 struct timespec ts;
483 482
@@ -502,12 +501,10 @@ asmlinkage long compat_sys_getsockopt(int fd, int level, int optname,
502 char __user *optval, int __user *optlen) 501 char __user *optval, int __user *optlen)
503{ 502{
504 int err; 503 int err;
505 struct socket *sock; 504 struct socket *sock = sockfd_lookup(fd, &err);
506 505
507 if ((sock = sockfd_lookup(fd, &err))!=NULL) 506 if (sock) {
508 { 507 err = security_socket_getsockopt(sock, level, optname);
509 err = security_socket_getsockopt(sock, level,
510 optname);
511 if (err) { 508 if (err) {
512 sockfd_put(sock); 509 sockfd_put(sock);
513 return err; 510 return err;
@@ -531,7 +528,7 @@ struct compat_group_req {
531 __u32 gr_interface; 528 __u32 gr_interface;
532 struct __kernel_sockaddr_storage gr_group 529 struct __kernel_sockaddr_storage gr_group
533 __attribute__ ((aligned(4))); 530 __attribute__ ((aligned(4)));
534} __attribute__ ((packed)); 531} __packed;
535 532
536struct compat_group_source_req { 533struct compat_group_source_req {
537 __u32 gsr_interface; 534 __u32 gsr_interface;
@@ -539,7 +536,7 @@ struct compat_group_source_req {
539 __attribute__ ((aligned(4))); 536 __attribute__ ((aligned(4)));
540 struct __kernel_sockaddr_storage gsr_source 537 struct __kernel_sockaddr_storage gsr_source
541 __attribute__ ((aligned(4))); 538 __attribute__ ((aligned(4)));
542} __attribute__ ((packed)); 539} __packed;
543 540
544struct compat_group_filter { 541struct compat_group_filter {
545 __u32 gf_interface; 542 __u32 gf_interface;
@@ -549,7 +546,7 @@ struct compat_group_filter {
549 __u32 gf_numsrc; 546 __u32 gf_numsrc;
550 struct __kernel_sockaddr_storage gf_slist[1] 547 struct __kernel_sockaddr_storage gf_slist[1]
551 __attribute__ ((aligned(4))); 548 __attribute__ ((aligned(4)));
552} __attribute__ ((packed)); 549} __packed;
553 550
554#define __COMPAT_GF0_SIZE (sizeof(struct compat_group_filter) - \ 551#define __COMPAT_GF0_SIZE (sizeof(struct compat_group_filter) - \
555 sizeof(struct __kernel_sockaddr_storage)) 552 sizeof(struct __kernel_sockaddr_storage))
@@ -557,7 +554,7 @@ struct compat_group_filter {
557 554
558int compat_mc_setsockopt(struct sock *sock, int level, int optname, 555int compat_mc_setsockopt(struct sock *sock, int level, int optname,
559 char __user *optval, unsigned int optlen, 556 char __user *optval, unsigned int optlen,
560 int (*setsockopt)(struct sock *,int,int,char __user *,unsigned int)) 557 int (*setsockopt)(struct sock *, int, int, char __user *, unsigned int))
561{ 558{
562 char __user *koptval = optval; 559 char __user *koptval = optval;
563 int koptlen = optlen; 560 int koptlen = optlen;
@@ -640,12 +637,11 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
640 } 637 }
641 return setsockopt(sock, level, optname, koptval, koptlen); 638 return setsockopt(sock, level, optname, koptval, koptlen);
642} 639}
643
644EXPORT_SYMBOL(compat_mc_setsockopt); 640EXPORT_SYMBOL(compat_mc_setsockopt);
645 641
646int compat_mc_getsockopt(struct sock *sock, int level, int optname, 642int compat_mc_getsockopt(struct sock *sock, int level, int optname,
647 char __user *optval, int __user *optlen, 643 char __user *optval, int __user *optlen,
648 int (*getsockopt)(struct sock *,int,int,char __user *,int __user *)) 644 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
649{ 645{
650 struct compat_group_filter __user *gf32 = (void *)optval; 646 struct compat_group_filter __user *gf32 = (void *)optval;
651 struct group_filter __user *kgf; 647 struct group_filter __user *kgf;
@@ -681,7 +677,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
681 __put_user(interface, &kgf->gf_interface) || 677 __put_user(interface, &kgf->gf_interface) ||
682 __put_user(fmode, &kgf->gf_fmode) || 678 __put_user(fmode, &kgf->gf_fmode) ||
683 __put_user(numsrc, &kgf->gf_numsrc) || 679 __put_user(numsrc, &kgf->gf_numsrc) ||
684 copy_in_user(&kgf->gf_group,&gf32->gf_group,sizeof(kgf->gf_group))) 680 copy_in_user(&kgf->gf_group, &gf32->gf_group, sizeof(kgf->gf_group)))
685 return -EFAULT; 681 return -EFAULT;
686 682
687 err = getsockopt(sock, level, optname, (char __user *)kgf, koptlen); 683 err = getsockopt(sock, level, optname, (char __user *)kgf, koptlen);
@@ -714,21 +710,22 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
714 copylen = numsrc * sizeof(gf32->gf_slist[0]); 710 copylen = numsrc * sizeof(gf32->gf_slist[0]);
715 if (copylen > klen) 711 if (copylen > klen)
716 copylen = klen; 712 copylen = klen;
717 if (copy_in_user(gf32->gf_slist, kgf->gf_slist, copylen)) 713 if (copy_in_user(gf32->gf_slist, kgf->gf_slist, copylen))
718 return -EFAULT; 714 return -EFAULT;
719 } 715 }
720 return err; 716 return err;
721} 717}
722
723EXPORT_SYMBOL(compat_mc_getsockopt); 718EXPORT_SYMBOL(compat_mc_getsockopt);
724 719
725 720
726/* Argument list sizes for compat_sys_socketcall */ 721/* Argument list sizes for compat_sys_socketcall */
727#define AL(x) ((x) * sizeof(u32)) 722#define AL(x) ((x) * sizeof(u32))
728static unsigned char nas[20]={AL(0),AL(3),AL(3),AL(3),AL(2),AL(3), 723static unsigned char nas[20] = {
729 AL(3),AL(3),AL(4),AL(4),AL(4),AL(6), 724 AL(0), AL(3), AL(3), AL(3), AL(2), AL(3),
730 AL(6),AL(2),AL(5),AL(5),AL(3),AL(3), 725 AL(3), AL(3), AL(4), AL(4), AL(4), AL(6),
731 AL(4),AL(5)}; 726 AL(6), AL(2), AL(5), AL(5), AL(3), AL(3),
727 AL(4), AL(5)
728};
732#undef AL 729#undef AL
733 730
734asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg, unsigned flags) 731asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg, unsigned flags)
@@ -827,7 +824,7 @@ asmlinkage long compat_sys_socketcall(int call, u32 __user *args)
827 compat_ptr(a[4]), compat_ptr(a[5])); 824 compat_ptr(a[4]), compat_ptr(a[5]));
828 break; 825 break;
829 case SYS_SHUTDOWN: 826 case SYS_SHUTDOWN:
830 ret = sys_shutdown(a0,a1); 827 ret = sys_shutdown(a0, a1);
831 break; 828 break;
832 case SYS_SETSOCKOPT: 829 case SYS_SETSOCKOPT:
833 ret = compat_sys_setsockopt(a0, a1, a[2], 830 ret = compat_sys_setsockopt(a0, a1, a[2],
diff --git a/net/core/Makefile b/net/core/Makefile
index 51c3eec850ef..8a04dd22cf77 100644
--- a/net/core/Makefile
+++ b/net/core/Makefile
@@ -18,4 +18,4 @@ obj-$(CONFIG_NET_DMA) += user_dma.o
18obj-$(CONFIG_FIB_RULES) += fib_rules.o 18obj-$(CONFIG_FIB_RULES) += fib_rules.o
19obj-$(CONFIG_TRACEPOINTS) += net-traces.o 19obj-$(CONFIG_TRACEPOINTS) += net-traces.o
20obj-$(CONFIG_NET_DROP_MONITOR) += drop_monitor.o 20obj-$(CONFIG_NET_DROP_MONITOR) += drop_monitor.o
21 21obj-$(CONFIG_NETWORK_PHY_TIMESTAMPING) += timestamping.o
diff --git a/net/core/datagram.c b/net/core/datagram.c
index f5b6f43a4c2e..251997a95483 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -219,6 +219,7 @@ struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
219 return __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0), 219 return __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
220 &peeked, err); 220 &peeked, err);
221} 221}
222EXPORT_SYMBOL(skb_recv_datagram);
222 223
223void skb_free_datagram(struct sock *sk, struct sk_buff *skb) 224void skb_free_datagram(struct sock *sk, struct sk_buff *skb)
224{ 225{
@@ -288,7 +289,6 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
288 289
289 return err; 290 return err;
290} 291}
291
292EXPORT_SYMBOL(skb_kill_datagram); 292EXPORT_SYMBOL(skb_kill_datagram);
293 293
294/** 294/**
@@ -373,6 +373,7 @@ int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset,
373fault: 373fault:
374 return -EFAULT; 374 return -EFAULT;
375} 375}
376EXPORT_SYMBOL(skb_copy_datagram_iovec);
376 377
377/** 378/**
378 * skb_copy_datagram_const_iovec - Copy a datagram to an iovec. 379 * skb_copy_datagram_const_iovec - Copy a datagram to an iovec.
@@ -716,6 +717,7 @@ csum_error:
716fault: 717fault:
717 return -EFAULT; 718 return -EFAULT;
718} 719}
720EXPORT_SYMBOL(skb_copy_and_csum_datagram_iovec);
719 721
720/** 722/**
721 * datagram_poll - generic datagram poll 723 * datagram_poll - generic datagram poll
@@ -770,8 +772,4 @@ unsigned int datagram_poll(struct file *file, struct socket *sock,
770 772
771 return mask; 773 return mask;
772} 774}
773
774EXPORT_SYMBOL(datagram_poll); 775EXPORT_SYMBOL(datagram_poll);
775EXPORT_SYMBOL(skb_copy_and_csum_datagram_iovec);
776EXPORT_SYMBOL(skb_copy_datagram_iovec);
777EXPORT_SYMBOL(skb_recv_datagram);
diff --git a/net/core/dev.c b/net/core/dev.c
index 2b3bf53bc687..e1c1cdcc2bb0 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -101,8 +101,6 @@
101#include <linux/proc_fs.h> 101#include <linux/proc_fs.h>
102#include <linux/seq_file.h> 102#include <linux/seq_file.h>
103#include <linux/stat.h> 103#include <linux/stat.h>
104#include <linux/if_bridge.h>
105#include <linux/if_macvlan.h>
106#include <net/dst.h> 104#include <net/dst.h>
107#include <net/pkt_sched.h> 105#include <net/pkt_sched.h>
108#include <net/checksum.h> 106#include <net/checksum.h>
@@ -803,35 +801,31 @@ struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
803EXPORT_SYMBOL(dev_getfirstbyhwtype); 801EXPORT_SYMBOL(dev_getfirstbyhwtype);
804 802
805/** 803/**
806 * dev_get_by_flags - find any device with given flags 804 * dev_get_by_flags_rcu - find any device with given flags
807 * @net: the applicable net namespace 805 * @net: the applicable net namespace
808 * @if_flags: IFF_* values 806 * @if_flags: IFF_* values
809 * @mask: bitmask of bits in if_flags to check 807 * @mask: bitmask of bits in if_flags to check
810 * 808 *
811 * Search for any interface with the given flags. Returns NULL if a device 809 * Search for any interface with the given flags. Returns NULL if a device
812 * is not found or a pointer to the device. The device returned has 810 * is not found or a pointer to the device. Must be called inside
813 * had a reference added and the pointer is safe until the user calls 811 * rcu_read_lock(), and result refcount is unchanged.
814 * dev_put to indicate they have finished with it.
815 */ 812 */
816 813
817struct net_device *dev_get_by_flags(struct net *net, unsigned short if_flags, 814struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short if_flags,
818 unsigned short mask) 815 unsigned short mask)
819{ 816{
820 struct net_device *dev, *ret; 817 struct net_device *dev, *ret;
821 818
822 ret = NULL; 819 ret = NULL;
823 rcu_read_lock();
824 for_each_netdev_rcu(net, dev) { 820 for_each_netdev_rcu(net, dev) {
825 if (((dev->flags ^ if_flags) & mask) == 0) { 821 if (((dev->flags ^ if_flags) & mask) == 0) {
826 dev_hold(dev);
827 ret = dev; 822 ret = dev;
828 break; 823 break;
829 } 824 }
830 } 825 }
831 rcu_read_unlock();
832 return ret; 826 return ret;
833} 827}
834EXPORT_SYMBOL(dev_get_by_flags); 828EXPORT_SYMBOL(dev_get_by_flags_rcu);
835 829
836/** 830/**
837 * dev_valid_name - check if name is okay for network device 831 * dev_valid_name - check if name is okay for network device
@@ -1488,6 +1482,7 @@ static inline void net_timestamp_check(struct sk_buff *skb)
1488int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) 1482int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1489{ 1483{
1490 skb_orphan(skb); 1484 skb_orphan(skb);
1485 nf_reset(skb);
1491 1486
1492 if (!(dev->flags & IFF_UP) || 1487 if (!(dev->flags & IFF_UP) ||
1493 (skb->len > (dev->mtu + dev->hard_header_len))) { 1488 (skb->len > (dev->mtu + dev->hard_header_len))) {
@@ -1541,7 +1536,8 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1541 if (net_ratelimit()) 1536 if (net_ratelimit())
1542 printk(KERN_CRIT "protocol %04x is " 1537 printk(KERN_CRIT "protocol %04x is "
1543 "buggy, dev %s\n", 1538 "buggy, dev %s\n",
1544 skb2->protocol, dev->name); 1539 ntohs(skb2->protocol),
1540 dev->name);
1545 skb_reset_network_header(skb2); 1541 skb_reset_network_header(skb2);
1546 } 1542 }
1547 1543
@@ -1553,6 +1549,24 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1553 rcu_read_unlock(); 1549 rcu_read_unlock();
1554} 1550}
1555 1551
1552/*
1553 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
1554 * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
1555 */
1556void netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
1557{
1558 unsigned int real_num = dev->real_num_tx_queues;
1559
1560 if (unlikely(txq > dev->num_tx_queues))
1561 ;
1562 else if (txq > real_num)
1563 dev->real_num_tx_queues = txq;
1564 else if (txq < real_num) {
1565 dev->real_num_tx_queues = txq;
1566 qdisc_reset_all_tx_gt(dev, txq);
1567 }
1568}
1569EXPORT_SYMBOL(netif_set_real_num_tx_queues);
1556 1570
1557static inline void __netif_reschedule(struct Qdisc *q) 1571static inline void __netif_reschedule(struct Qdisc *q)
1558{ 1572{
@@ -1893,8 +1907,32 @@ static int dev_gso_segment(struct sk_buff *skb)
1893 */ 1907 */
1894static inline void skb_orphan_try(struct sk_buff *skb) 1908static inline void skb_orphan_try(struct sk_buff *skb)
1895{ 1909{
1896 if (!skb_tx(skb)->flags) 1910 struct sock *sk = skb->sk;
1911
1912 if (sk && !skb_tx(skb)->flags) {
1913 /* skb_tx_hash() wont be able to get sk.
1914 * We copy sk_hash into skb->rxhash
1915 */
1916 if (!skb->rxhash)
1917 skb->rxhash = sk->sk_hash;
1897 skb_orphan(skb); 1918 skb_orphan(skb);
1919 }
1920}
1921
1922/*
1923 * Returns true if either:
1924 * 1. skb has frag_list and the device doesn't support FRAGLIST, or
1925 * 2. skb is fragmented and the device does not support SG, or if
1926 * at least one of fragments is in highmem and device does not
1927 * support DMA from it.
1928 */
1929static inline int skb_needs_linearize(struct sk_buff *skb,
1930 struct net_device *dev)
1931{
1932 return skb_is_nonlinear(skb) &&
1933 ((skb_has_frags(skb) && !(dev->features & NETIF_F_FRAGLIST)) ||
1934 (skb_shinfo(skb)->nr_frags && (!(dev->features & NETIF_F_SG) ||
1935 illegal_highdma(dev, skb))));
1898} 1936}
1899 1937
1900int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, 1938int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
@@ -1921,6 +1959,22 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
1921 goto out_kfree_skb; 1959 goto out_kfree_skb;
1922 if (skb->next) 1960 if (skb->next)
1923 goto gso; 1961 goto gso;
1962 } else {
1963 if (skb_needs_linearize(skb, dev) &&
1964 __skb_linearize(skb))
1965 goto out_kfree_skb;
1966
1967 /* If packet is not checksummed and device does not
1968 * support checksumming for this protocol, complete
1969 * checksumming here.
1970 */
1971 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1972 skb_set_transport_header(skb, skb->csum_start -
1973 skb_headroom(skb));
1974 if (!dev_can_checksum(dev, skb) &&
1975 skb_checksum_help(skb))
1976 goto out_kfree_skb;
1977 }
1924 } 1978 }
1925 1979
1926 rc = ops->ndo_start_xmit(skb, dev); 1980 rc = ops->ndo_start_xmit(skb, dev);
@@ -1980,8 +2034,7 @@ u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb)
1980 if (skb->sk && skb->sk->sk_hash) 2034 if (skb->sk && skb->sk->sk_hash)
1981 hash = skb->sk->sk_hash; 2035 hash = skb->sk->sk_hash;
1982 else 2036 else
1983 hash = (__force u16) skb->protocol; 2037 hash = (__force u16) skb->protocol ^ skb->rxhash;
1984
1985 hash = jhash_1word(hash, hashrnd); 2038 hash = jhash_1word(hash, hashrnd);
1986 2039
1987 return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32); 2040 return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
@@ -2004,12 +2057,11 @@ static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
2004static struct netdev_queue *dev_pick_tx(struct net_device *dev, 2057static struct netdev_queue *dev_pick_tx(struct net_device *dev,
2005 struct sk_buff *skb) 2058 struct sk_buff *skb)
2006{ 2059{
2007 u16 queue_index; 2060 int queue_index;
2008 struct sock *sk = skb->sk; 2061 struct sock *sk = skb->sk;
2009 2062
2010 if (sk_tx_queue_recorded(sk)) { 2063 queue_index = sk_tx_queue_get(sk);
2011 queue_index = sk_tx_queue_get(sk); 2064 if (queue_index < 0) {
2012 } else {
2013 const struct net_device_ops *ops = dev->netdev_ops; 2065 const struct net_device_ops *ops = dev->netdev_ops;
2014 2066
2015 if (ops->ndo_select_queue) { 2067 if (ops->ndo_select_queue) {
@@ -2038,14 +2090,24 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
2038 struct netdev_queue *txq) 2090 struct netdev_queue *txq)
2039{ 2091{
2040 spinlock_t *root_lock = qdisc_lock(q); 2092 spinlock_t *root_lock = qdisc_lock(q);
2093 bool contended = qdisc_is_running(q);
2041 int rc; 2094 int rc;
2042 2095
2096 /*
2097 * Heuristic to force contended enqueues to serialize on a
2098 * separate lock before trying to get qdisc main lock.
2099 * This permits __QDISC_STATE_RUNNING owner to get the lock more often
2100 * and dequeue packets faster.
2101 */
2102 if (unlikely(contended))
2103 spin_lock(&q->busylock);
2104
2043 spin_lock(root_lock); 2105 spin_lock(root_lock);
2044 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) { 2106 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
2045 kfree_skb(skb); 2107 kfree_skb(skb);
2046 rc = NET_XMIT_DROP; 2108 rc = NET_XMIT_DROP;
2047 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) && 2109 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
2048 !test_and_set_bit(__QDISC_STATE_RUNNING, &q->state)) { 2110 qdisc_run_begin(q)) {
2049 /* 2111 /*
2050 * This is a work-conserving queue; there are no old skbs 2112 * This is a work-conserving queue; there are no old skbs
2051 * waiting to be sent out; and the qdisc is not running - 2113 * waiting to be sent out; and the qdisc is not running -
@@ -2054,37 +2116,33 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
2054 if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE)) 2116 if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE))
2055 skb_dst_force(skb); 2117 skb_dst_force(skb);
2056 __qdisc_update_bstats(q, skb->len); 2118 __qdisc_update_bstats(q, skb->len);
2057 if (sch_direct_xmit(skb, q, dev, txq, root_lock)) 2119 if (sch_direct_xmit(skb, q, dev, txq, root_lock)) {
2120 if (unlikely(contended)) {
2121 spin_unlock(&q->busylock);
2122 contended = false;
2123 }
2058 __qdisc_run(q); 2124 __qdisc_run(q);
2059 else 2125 } else
2060 clear_bit(__QDISC_STATE_RUNNING, &q->state); 2126 qdisc_run_end(q);
2061 2127
2062 rc = NET_XMIT_SUCCESS; 2128 rc = NET_XMIT_SUCCESS;
2063 } else { 2129 } else {
2064 skb_dst_force(skb); 2130 skb_dst_force(skb);
2065 rc = qdisc_enqueue_root(skb, q); 2131 rc = qdisc_enqueue_root(skb, q);
2066 qdisc_run(q); 2132 if (qdisc_run_begin(q)) {
2133 if (unlikely(contended)) {
2134 spin_unlock(&q->busylock);
2135 contended = false;
2136 }
2137 __qdisc_run(q);
2138 }
2067 } 2139 }
2068 spin_unlock(root_lock); 2140 spin_unlock(root_lock);
2069 2141 if (unlikely(contended))
2142 spin_unlock(&q->busylock);
2070 return rc; 2143 return rc;
2071} 2144}
2072 2145
2073/*
2074 * Returns true if either:
2075 * 1. skb has frag_list and the device doesn't support FRAGLIST, or
2076 * 2. skb is fragmented and the device does not support SG, or if
2077 * at least one of fragments is in highmem and device does not
2078 * support DMA from it.
2079 */
2080static inline int skb_needs_linearize(struct sk_buff *skb,
2081 struct net_device *dev)
2082{
2083 return (skb_has_frags(skb) && !(dev->features & NETIF_F_FRAGLIST)) ||
2084 (skb_shinfo(skb)->nr_frags && (!(dev->features & NETIF_F_SG) ||
2085 illegal_highdma(dev, skb)));
2086}
2087
2088/** 2146/**
2089 * dev_queue_xmit - transmit a buffer 2147 * dev_queue_xmit - transmit a buffer
2090 * @skb: buffer to transmit 2148 * @skb: buffer to transmit
@@ -2117,25 +2175,6 @@ int dev_queue_xmit(struct sk_buff *skb)
2117 struct Qdisc *q; 2175 struct Qdisc *q;
2118 int rc = -ENOMEM; 2176 int rc = -ENOMEM;
2119 2177
2120 /* GSO will handle the following emulations directly. */
2121 if (netif_needs_gso(dev, skb))
2122 goto gso;
2123
2124 /* Convert a paged skb to linear, if required */
2125 if (skb_needs_linearize(skb, dev) && __skb_linearize(skb))
2126 goto out_kfree_skb;
2127
2128 /* If packet is not checksummed and device does not support
2129 * checksumming for this protocol, complete checksumming here.
2130 */
2131 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2132 skb_set_transport_header(skb, skb->csum_start -
2133 skb_headroom(skb));
2134 if (!dev_can_checksum(dev, skb) && skb_checksum_help(skb))
2135 goto out_kfree_skb;
2136 }
2137
2138gso:
2139 /* Disable soft irqs for various locks below. Also 2178 /* Disable soft irqs for various locks below. Also
2140 * stops preemption for RCU. 2179 * stops preemption for RCU.
2141 */ 2180 */
@@ -2194,7 +2233,6 @@ gso:
2194 rc = -ENETDOWN; 2233 rc = -ENETDOWN;
2195 rcu_read_unlock_bh(); 2234 rcu_read_unlock_bh();
2196 2235
2197out_kfree_skb:
2198 kfree_skb(skb); 2236 kfree_skb(skb);
2199 return rc; 2237 return rc;
2200out: 2238out:
@@ -2579,70 +2617,14 @@ static inline int deliver_skb(struct sk_buff *skb,
2579 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev); 2617 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
2580} 2618}
2581 2619
2582#if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE) 2620#if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \
2583 2621 (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE))
2584#if defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE)
2585/* This hook is defined here for ATM LANE */ 2622/* This hook is defined here for ATM LANE */
2586int (*br_fdb_test_addr_hook)(struct net_device *dev, 2623int (*br_fdb_test_addr_hook)(struct net_device *dev,
2587 unsigned char *addr) __read_mostly; 2624 unsigned char *addr) __read_mostly;
2588EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook); 2625EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
2589#endif 2626#endif
2590 2627
2591/*
2592 * If bridge module is loaded call bridging hook.
2593 * returns NULL if packet was consumed.
2594 */
2595struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p,
2596 struct sk_buff *skb) __read_mostly;
2597EXPORT_SYMBOL_GPL(br_handle_frame_hook);
2598
2599static inline struct sk_buff *handle_bridge(struct sk_buff *skb,
2600 struct packet_type **pt_prev, int *ret,
2601 struct net_device *orig_dev)
2602{
2603 struct net_bridge_port *port;
2604
2605 if (skb->pkt_type == PACKET_LOOPBACK ||
2606 (port = rcu_dereference(skb->dev->br_port)) == NULL)
2607 return skb;
2608
2609 if (*pt_prev) {
2610 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2611 *pt_prev = NULL;
2612 }
2613
2614 return br_handle_frame_hook(port, skb);
2615}
2616#else
2617#define handle_bridge(skb, pt_prev, ret, orig_dev) (skb)
2618#endif
2619
2620#if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE)
2621struct sk_buff *(*macvlan_handle_frame_hook)(struct macvlan_port *p,
2622 struct sk_buff *skb) __read_mostly;
2623EXPORT_SYMBOL_GPL(macvlan_handle_frame_hook);
2624
2625static inline struct sk_buff *handle_macvlan(struct sk_buff *skb,
2626 struct packet_type **pt_prev,
2627 int *ret,
2628 struct net_device *orig_dev)
2629{
2630 struct macvlan_port *port;
2631
2632 port = rcu_dereference(skb->dev->macvlan_port);
2633 if (!port)
2634 return skb;
2635
2636 if (*pt_prev) {
2637 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2638 *pt_prev = NULL;
2639 }
2640 return macvlan_handle_frame_hook(port, skb);
2641}
2642#else
2643#define handle_macvlan(skb, pt_prev, ret, orig_dev) (skb)
2644#endif
2645
2646#ifdef CONFIG_NET_CLS_ACT 2628#ifdef CONFIG_NET_CLS_ACT
2647/* TODO: Maybe we should just force sch_ingress to be compiled in 2629/* TODO: Maybe we should just force sch_ingress to be compiled in
2648 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions 2630 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
@@ -2660,10 +2642,10 @@ static int ing_filter(struct sk_buff *skb)
2660 int result = TC_ACT_OK; 2642 int result = TC_ACT_OK;
2661 struct Qdisc *q; 2643 struct Qdisc *q;
2662 2644
2663 if (MAX_RED_LOOP < ttl++) { 2645 if (unlikely(MAX_RED_LOOP < ttl++)) {
2664 printk(KERN_WARNING 2646 if (net_ratelimit())
2665 "Redir loop detected Dropping packet (%d->%d)\n", 2647 pr_warning( "Redir loop detected Dropping packet (%d->%d)\n",
2666 skb->skb_iif, dev->ifindex); 2648 skb->skb_iif, dev->ifindex);
2667 return TC_ACT_SHOT; 2649 return TC_ACT_SHOT;
2668 } 2650 }
2669 2651
@@ -2693,9 +2675,6 @@ static inline struct sk_buff *handle_ing(struct sk_buff *skb,
2693 if (*pt_prev) { 2675 if (*pt_prev) {
2694 *ret = deliver_skb(skb, *pt_prev, orig_dev); 2676 *ret = deliver_skb(skb, *pt_prev, orig_dev);
2695 *pt_prev = NULL; 2677 *pt_prev = NULL;
2696 } else {
2697 /* Huh? Why does turning on AF_PACKET affect this? */
2698 skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
2699 } 2678 }
2700 2679
2701 switch (ing_filter(skb)) { 2680 switch (ing_filter(skb)) {
@@ -2738,6 +2717,51 @@ void netif_nit_deliver(struct sk_buff *skb)
2738 rcu_read_unlock(); 2717 rcu_read_unlock();
2739} 2718}
2740 2719
2720/**
2721 * netdev_rx_handler_register - register receive handler
2722 * @dev: device to register a handler for
2723 * @rx_handler: receive handler to register
2724 * @rx_handler_data: data pointer that is used by rx handler
2725 *
2726 * Register a receive hander for a device. This handler will then be
2727 * called from __netif_receive_skb. A negative errno code is returned
2728 * on a failure.
2729 *
2730 * The caller must hold the rtnl_mutex.
2731 */
2732int netdev_rx_handler_register(struct net_device *dev,
2733 rx_handler_func_t *rx_handler,
2734 void *rx_handler_data)
2735{
2736 ASSERT_RTNL();
2737
2738 if (dev->rx_handler)
2739 return -EBUSY;
2740
2741 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
2742 rcu_assign_pointer(dev->rx_handler, rx_handler);
2743
2744 return 0;
2745}
2746EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
2747
2748/**
2749 * netdev_rx_handler_unregister - unregister receive handler
2750 * @dev: device to unregister a handler from
2751 *
2752 * Unregister a receive hander from a device.
2753 *
2754 * The caller must hold the rtnl_mutex.
2755 */
2756void netdev_rx_handler_unregister(struct net_device *dev)
2757{
2758
2759 ASSERT_RTNL();
2760 rcu_assign_pointer(dev->rx_handler, NULL);
2761 rcu_assign_pointer(dev->rx_handler_data, NULL);
2762}
2763EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
2764
2741static inline void skb_bond_set_mac_by_master(struct sk_buff *skb, 2765static inline void skb_bond_set_mac_by_master(struct sk_buff *skb,
2742 struct net_device *master) 2766 struct net_device *master)
2743{ 2767{
@@ -2759,7 +2783,8 @@ int __skb_bond_should_drop(struct sk_buff *skb, struct net_device *master)
2759 if (master->priv_flags & IFF_MASTER_ARPMON) 2783 if (master->priv_flags & IFF_MASTER_ARPMON)
2760 dev->last_rx = jiffies; 2784 dev->last_rx = jiffies;
2761 2785
2762 if ((master->priv_flags & IFF_MASTER_ALB) && master->br_port) { 2786 if ((master->priv_flags & IFF_MASTER_ALB) &&
2787 (master->priv_flags & IFF_BRIDGE_PORT)) {
2763 /* Do address unmangle. The local destination address 2788 /* Do address unmangle. The local destination address
2764 * will be always the one master has. Provides the right 2789 * will be always the one master has. Provides the right
2765 * functionality in a bridge. 2790 * functionality in a bridge.
@@ -2790,6 +2815,7 @@ EXPORT_SYMBOL(__skb_bond_should_drop);
2790static int __netif_receive_skb(struct sk_buff *skb) 2815static int __netif_receive_skb(struct sk_buff *skb)
2791{ 2816{
2792 struct packet_type *ptype, *pt_prev; 2817 struct packet_type *ptype, *pt_prev;
2818 rx_handler_func_t *rx_handler;
2793 struct net_device *orig_dev; 2819 struct net_device *orig_dev;
2794 struct net_device *master; 2820 struct net_device *master;
2795 struct net_device *null_or_orig; 2821 struct net_device *null_or_orig;
@@ -2831,8 +2857,7 @@ static int __netif_receive_skb(struct sk_buff *skb)
2831 skb->dev = master; 2857 skb->dev = master;
2832 } 2858 }
2833 2859
2834 __get_cpu_var(softnet_data).processed++; 2860 __this_cpu_inc(softnet_data.processed);
2835
2836 skb_reset_network_header(skb); 2861 skb_reset_network_header(skb);
2837 skb_reset_transport_header(skb); 2862 skb_reset_transport_header(skb);
2838 skb->mac_len = skb->network_header - skb->mac_header; 2863 skb->mac_len = skb->network_header - skb->mac_header;
@@ -2864,12 +2889,17 @@ static int __netif_receive_skb(struct sk_buff *skb)
2864ncls: 2889ncls:
2865#endif 2890#endif
2866 2891
2867 skb = handle_bridge(skb, &pt_prev, &ret, orig_dev); 2892 /* Handle special case of bridge or macvlan */
2868 if (!skb) 2893 rx_handler = rcu_dereference(skb->dev->rx_handler);
2869 goto out; 2894 if (rx_handler) {
2870 skb = handle_macvlan(skb, &pt_prev, &ret, orig_dev); 2895 if (pt_prev) {
2871 if (!skb) 2896 ret = deliver_skb(skb, pt_prev, orig_dev);
2872 goto out; 2897 pt_prev = NULL;
2898 }
2899 skb = rx_handler(skb);
2900 if (!skb)
2901 goto out;
2902 }
2873 2903
2874 /* 2904 /*
2875 * Make sure frames received on VLAN interfaces stacked on 2905 * Make sure frames received on VLAN interfaces stacked on
@@ -2930,6 +2960,9 @@ int netif_receive_skb(struct sk_buff *skb)
2930 if (netdev_tstamp_prequeue) 2960 if (netdev_tstamp_prequeue)
2931 net_timestamp_check(skb); 2961 net_timestamp_check(skb);
2932 2962
2963 if (skb_defer_rx_timestamp(skb))
2964 return NET_RX_SUCCESS;
2965
2933#ifdef CONFIG_RPS 2966#ifdef CONFIG_RPS
2934 { 2967 {
2935 struct rps_dev_flow voidflow, *rflow = &voidflow; 2968 struct rps_dev_flow voidflow, *rflow = &voidflow;
@@ -3694,10 +3727,11 @@ void dev_seq_stop(struct seq_file *seq, void *v)
3694 3727
3695static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev) 3728static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
3696{ 3729{
3697 const struct net_device_stats *stats = dev_get_stats(dev); 3730 struct rtnl_link_stats64 temp;
3731 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
3698 3732
3699 seq_printf(seq, "%6s: %7lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu " 3733 seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
3700 "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n", 3734 "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
3701 dev->name, stats->rx_bytes, stats->rx_packets, 3735 dev->name, stats->rx_bytes, stats->rx_packets,
3702 stats->rx_errors, 3736 stats->rx_errors,
3703 stats->rx_dropped + stats->rx_missed_errors, 3737 stats->rx_dropped + stats->rx_missed_errors,
@@ -5246,20 +5280,22 @@ void netdev_run_todo(void)
5246/** 5280/**
5247 * dev_txq_stats_fold - fold tx_queues stats 5281 * dev_txq_stats_fold - fold tx_queues stats
5248 * @dev: device to get statistics from 5282 * @dev: device to get statistics from
5249 * @stats: struct net_device_stats to hold results 5283 * @stats: struct rtnl_link_stats64 to hold results
5250 */ 5284 */
5251void dev_txq_stats_fold(const struct net_device *dev, 5285void dev_txq_stats_fold(const struct net_device *dev,
5252 struct net_device_stats *stats) 5286 struct rtnl_link_stats64 *stats)
5253{ 5287{
5254 unsigned long tx_bytes = 0, tx_packets = 0, tx_dropped = 0; 5288 u64 tx_bytes = 0, tx_packets = 0, tx_dropped = 0;
5255 unsigned int i; 5289 unsigned int i;
5256 struct netdev_queue *txq; 5290 struct netdev_queue *txq;
5257 5291
5258 for (i = 0; i < dev->num_tx_queues; i++) { 5292 for (i = 0; i < dev->num_tx_queues; i++) {
5259 txq = netdev_get_tx_queue(dev, i); 5293 txq = netdev_get_tx_queue(dev, i);
5294 spin_lock_bh(&txq->_xmit_lock);
5260 tx_bytes += txq->tx_bytes; 5295 tx_bytes += txq->tx_bytes;
5261 tx_packets += txq->tx_packets; 5296 tx_packets += txq->tx_packets;
5262 tx_dropped += txq->tx_dropped; 5297 tx_dropped += txq->tx_dropped;
5298 spin_unlock_bh(&txq->_xmit_lock);
5263 } 5299 }
5264 if (tx_bytes || tx_packets || tx_dropped) { 5300 if (tx_bytes || tx_packets || tx_dropped) {
5265 stats->tx_bytes = tx_bytes; 5301 stats->tx_bytes = tx_bytes;
@@ -5269,23 +5305,53 @@ void dev_txq_stats_fold(const struct net_device *dev,
5269} 5305}
5270EXPORT_SYMBOL(dev_txq_stats_fold); 5306EXPORT_SYMBOL(dev_txq_stats_fold);
5271 5307
5308/* Convert net_device_stats to rtnl_link_stats64. They have the same
5309 * fields in the same order, with only the type differing.
5310 */
5311static void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
5312 const struct net_device_stats *netdev_stats)
5313{
5314#if BITS_PER_LONG == 64
5315 BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats));
5316 memcpy(stats64, netdev_stats, sizeof(*stats64));
5317#else
5318 size_t i, n = sizeof(*stats64) / sizeof(u64);
5319 const unsigned long *src = (const unsigned long *)netdev_stats;
5320 u64 *dst = (u64 *)stats64;
5321
5322 BUILD_BUG_ON(sizeof(*netdev_stats) / sizeof(unsigned long) !=
5323 sizeof(*stats64) / sizeof(u64));
5324 for (i = 0; i < n; i++)
5325 dst[i] = src[i];
5326#endif
5327}
5328
5272/** 5329/**
5273 * dev_get_stats - get network device statistics 5330 * dev_get_stats - get network device statistics
5274 * @dev: device to get statistics from 5331 * @dev: device to get statistics from
5332 * @storage: place to store stats
5275 * 5333 *
5276 * Get network statistics from device. The device driver may provide 5334 * Get network statistics from device. Return @storage.
5277 * its own method by setting dev->netdev_ops->get_stats; otherwise 5335 * The device driver may provide its own method by setting
5278 * the internal statistics structure is used. 5336 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
5337 * otherwise the internal statistics structure is used.
5279 */ 5338 */
5280const struct net_device_stats *dev_get_stats(struct net_device *dev) 5339struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
5340 struct rtnl_link_stats64 *storage)
5281{ 5341{
5282 const struct net_device_ops *ops = dev->netdev_ops; 5342 const struct net_device_ops *ops = dev->netdev_ops;
5283 5343
5284 if (ops->ndo_get_stats) 5344 if (ops->ndo_get_stats64) {
5285 return ops->ndo_get_stats(dev); 5345 memset(storage, 0, sizeof(*storage));
5286 5346 return ops->ndo_get_stats64(dev, storage);
5287 dev_txq_stats_fold(dev, &dev->stats); 5347 }
5288 return &dev->stats; 5348 if (ops->ndo_get_stats) {
5349 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
5350 return storage;
5351 }
5352 netdev_stats_to_stats64(storage, &dev->stats);
5353 dev_txq_stats_fold(dev, storage);
5354 return storage;
5289} 5355}
5290EXPORT_SYMBOL(dev_get_stats); 5356EXPORT_SYMBOL(dev_get_stats);
5291 5357
@@ -5790,6 +5856,68 @@ char *netdev_drivername(const struct net_device *dev, char *buffer, int len)
5790 return buffer; 5856 return buffer;
5791} 5857}
5792 5858
5859static int __netdev_printk(const char *level, const struct net_device *dev,
5860 struct va_format *vaf)
5861{
5862 int r;
5863
5864 if (dev && dev->dev.parent)
5865 r = dev_printk(level, dev->dev.parent, "%s: %pV",
5866 netdev_name(dev), vaf);
5867 else if (dev)
5868 r = printk("%s%s: %pV", level, netdev_name(dev), vaf);
5869 else
5870 r = printk("%s(NULL net_device): %pV", level, vaf);
5871
5872 return r;
5873}
5874
5875int netdev_printk(const char *level, const struct net_device *dev,
5876 const char *format, ...)
5877{
5878 struct va_format vaf;
5879 va_list args;
5880 int r;
5881
5882 va_start(args, format);
5883
5884 vaf.fmt = format;
5885 vaf.va = &args;
5886
5887 r = __netdev_printk(level, dev, &vaf);
5888 va_end(args);
5889
5890 return r;
5891}
5892EXPORT_SYMBOL(netdev_printk);
5893
5894#define define_netdev_printk_level(func, level) \
5895int func(const struct net_device *dev, const char *fmt, ...) \
5896{ \
5897 int r; \
5898 struct va_format vaf; \
5899 va_list args; \
5900 \
5901 va_start(args, fmt); \
5902 \
5903 vaf.fmt = fmt; \
5904 vaf.va = &args; \
5905 \
5906 r = __netdev_printk(level, dev, &vaf); \
5907 va_end(args); \
5908 \
5909 return r; \
5910} \
5911EXPORT_SYMBOL(func);
5912
5913define_netdev_printk_level(netdev_emerg, KERN_EMERG);
5914define_netdev_printk_level(netdev_alert, KERN_ALERT);
5915define_netdev_printk_level(netdev_crit, KERN_CRIT);
5916define_netdev_printk_level(netdev_err, KERN_ERR);
5917define_netdev_printk_level(netdev_warn, KERN_WARNING);
5918define_netdev_printk_level(netdev_notice, KERN_NOTICE);
5919define_netdev_printk_level(netdev_info, KERN_INFO);
5920
5793static void __net_exit netdev_exit(struct net *net) 5921static void __net_exit netdev_exit(struct net *net)
5794{ 5922{
5795 kfree(net->dev_name_head); 5923 kfree(net->dev_name_head);
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
index ad41529fb60f..36e603c78ce9 100644
--- a/net/core/drop_monitor.c
+++ b/net/core/drop_monitor.c
@@ -223,6 +223,11 @@ static int set_all_monitor_traces(int state)
223 223
224 spin_lock(&trace_state_lock); 224 spin_lock(&trace_state_lock);
225 225
226 if (state == trace_state) {
227 rc = -EAGAIN;
228 goto out_unlock;
229 }
230
226 switch (state) { 231 switch (state) {
227 case TRACE_ON: 232 case TRACE_ON:
228 rc |= register_trace_kfree_skb(trace_kfree_skb_hit, NULL); 233 rc |= register_trace_kfree_skb(trace_kfree_skb_hit, NULL);
@@ -251,11 +256,12 @@ static int set_all_monitor_traces(int state)
251 256
252 if (!rc) 257 if (!rc)
253 trace_state = state; 258 trace_state = state;
259 else
260 rc = -EINPROGRESS;
254 261
262out_unlock:
255 spin_unlock(&trace_state_lock); 263 spin_unlock(&trace_state_lock);
256 264
257 if (rc)
258 return -EINPROGRESS;
259 return rc; 265 return rc;
260} 266}
261 267
@@ -341,9 +347,9 @@ static struct notifier_block dropmon_net_notifier = {
341 347
342static int __init init_net_drop_monitor(void) 348static int __init init_net_drop_monitor(void)
343{ 349{
344 int cpu;
345 int rc, i, ret;
346 struct per_cpu_dm_data *data; 350 struct per_cpu_dm_data *data;
351 int cpu, rc;
352
347 printk(KERN_INFO "Initalizing network drop monitor service\n"); 353 printk(KERN_INFO "Initalizing network drop monitor service\n");
348 354
349 if (sizeof(void *) > 8) { 355 if (sizeof(void *) > 8) {
@@ -351,21 +357,12 @@ static int __init init_net_drop_monitor(void)
351 return -ENOSPC; 357 return -ENOSPC;
352 } 358 }
353 359
354 if (genl_register_family(&net_drop_monitor_family) < 0) { 360 rc = genl_register_family_with_ops(&net_drop_monitor_family,
361 dropmon_ops,
362 ARRAY_SIZE(dropmon_ops));
363 if (rc) {
355 printk(KERN_ERR "Could not create drop monitor netlink family\n"); 364 printk(KERN_ERR "Could not create drop monitor netlink family\n");
356 return -EFAULT; 365 return rc;
357 }
358
359 rc = -EFAULT;
360
361 for (i = 0; i < ARRAY_SIZE(dropmon_ops); i++) {
362 ret = genl_register_ops(&net_drop_monitor_family,
363 &dropmon_ops[i]);
364 if (ret) {
365 printk(KERN_CRIT "Failed to register operation %d\n",
366 dropmon_ops[i].cmd);
367 goto out_unreg;
368 }
369 } 366 }
370 367
371 rc = register_netdevice_notifier(&dropmon_net_notifier); 368 rc = register_netdevice_notifier(&dropmon_net_notifier);
diff --git a/net/core/dst.c b/net/core/dst.c
index 9920722cc82b..6c41b1fac3db 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -197,7 +197,6 @@ static void ___dst_free(struct dst_entry *dst)
197 dst->input = dst->output = dst_discard; 197 dst->input = dst->output = dst_discard;
198 dst->obsolete = 2; 198 dst->obsolete = 2;
199} 199}
200EXPORT_SYMBOL(__dst_free);
201 200
202void __dst_free(struct dst_entry *dst) 201void __dst_free(struct dst_entry *dst)
203{ 202{
@@ -213,6 +212,7 @@ void __dst_free(struct dst_entry *dst)
213 } 212 }
214 spin_unlock_bh(&dst_garbage.lock); 213 spin_unlock_bh(&dst_garbage.lock);
215} 214}
215EXPORT_SYMBOL(__dst_free);
216 216
217struct dst_entry *dst_destroy(struct dst_entry * dst) 217struct dst_entry *dst_destroy(struct dst_entry * dst)
218{ 218{
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index a0f4964033d2..7a85367b3c2f 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -144,31 +144,13 @@ u32 ethtool_op_get_flags(struct net_device *dev)
144} 144}
145EXPORT_SYMBOL(ethtool_op_get_flags); 145EXPORT_SYMBOL(ethtool_op_get_flags);
146 146
147int ethtool_op_set_flags(struct net_device *dev, u32 data) 147int ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported)
148{ 148{
149 const struct ethtool_ops *ops = dev->ethtool_ops; 149 if (data & ~supported)
150 unsigned long features = dev->features; 150 return -EINVAL;
151
152 if (data & ETH_FLAG_LRO)
153 features |= NETIF_F_LRO;
154 else
155 features &= ~NETIF_F_LRO;
156
157 if (data & ETH_FLAG_NTUPLE) {
158 if (!ops->set_rx_ntuple)
159 return -EOPNOTSUPP;
160 features |= NETIF_F_NTUPLE;
161 } else {
162 /* safe to clear regardless */
163 features &= ~NETIF_F_NTUPLE;
164 }
165
166 if (data & ETH_FLAG_RXHASH)
167 features |= NETIF_F_RXHASH;
168 else
169 features &= ~NETIF_F_RXHASH;
170 151
171 dev->features = features; 152 dev->features = ((dev->features & ~flags_dup_features) |
153 (data & flags_dup_features));
172 return 0; 154 return 0;
173} 155}
174EXPORT_SYMBOL(ethtool_op_set_flags); 156EXPORT_SYMBOL(ethtool_op_set_flags);
@@ -318,23 +300,33 @@ out:
318} 300}
319 301
320static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev, 302static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev,
321 void __user *useraddr) 303 u32 cmd, void __user *useraddr)
322{ 304{
323 struct ethtool_rxnfc cmd; 305 struct ethtool_rxnfc info;
306 size_t info_size = sizeof(info);
324 307
325 if (!dev->ethtool_ops->set_rxnfc) 308 if (!dev->ethtool_ops->set_rxnfc)
326 return -EOPNOTSUPP; 309 return -EOPNOTSUPP;
327 310
328 if (copy_from_user(&cmd, useraddr, sizeof(cmd))) 311 /* struct ethtool_rxnfc was originally defined for
312 * ETHTOOL_{G,S}RXFH with only the cmd, flow_type and data
313 * members. User-space might still be using that
314 * definition. */
315 if (cmd == ETHTOOL_SRXFH)
316 info_size = (offsetof(struct ethtool_rxnfc, data) +
317 sizeof(info.data));
318
319 if (copy_from_user(&info, useraddr, info_size))
329 return -EFAULT; 320 return -EFAULT;
330 321
331 return dev->ethtool_ops->set_rxnfc(dev, &cmd); 322 return dev->ethtool_ops->set_rxnfc(dev, &info);
332} 323}
333 324
334static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev, 325static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev,
335 void __user *useraddr) 326 u32 cmd, void __user *useraddr)
336{ 327{
337 struct ethtool_rxnfc info; 328 struct ethtool_rxnfc info;
329 size_t info_size = sizeof(info);
338 const struct ethtool_ops *ops = dev->ethtool_ops; 330 const struct ethtool_ops *ops = dev->ethtool_ops;
339 int ret; 331 int ret;
340 void *rule_buf = NULL; 332 void *rule_buf = NULL;
@@ -342,13 +334,22 @@ static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev,
342 if (!ops->get_rxnfc) 334 if (!ops->get_rxnfc)
343 return -EOPNOTSUPP; 335 return -EOPNOTSUPP;
344 336
345 if (copy_from_user(&info, useraddr, sizeof(info))) 337 /* struct ethtool_rxnfc was originally defined for
338 * ETHTOOL_{G,S}RXFH with only the cmd, flow_type and data
339 * members. User-space might still be using that
340 * definition. */
341 if (cmd == ETHTOOL_GRXFH)
342 info_size = (offsetof(struct ethtool_rxnfc, data) +
343 sizeof(info.data));
344
345 if (copy_from_user(&info, useraddr, info_size))
346 return -EFAULT; 346 return -EFAULT;
347 347
348 if (info.cmd == ETHTOOL_GRXCLSRLALL) { 348 if (info.cmd == ETHTOOL_GRXCLSRLALL) {
349 if (info.rule_cnt > 0) { 349 if (info.rule_cnt > 0) {
350 rule_buf = kmalloc(info.rule_cnt * sizeof(u32), 350 if (info.rule_cnt <= KMALLOC_MAX_SIZE / sizeof(u32))
351 GFP_USER); 351 rule_buf = kmalloc(info.rule_cnt * sizeof(u32),
352 GFP_USER);
352 if (!rule_buf) 353 if (!rule_buf)
353 return -ENOMEM; 354 return -ENOMEM;
354 } 355 }
@@ -359,7 +360,7 @@ static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev,
359 goto err_out; 360 goto err_out;
360 361
361 ret = -EFAULT; 362 ret = -EFAULT;
362 if (copy_to_user(useraddr, &info, sizeof(info))) 363 if (copy_to_user(useraddr, &info, info_size))
363 goto err_out; 364 goto err_out;
364 365
365 if (rule_buf) { 366 if (rule_buf) {
@@ -376,6 +377,80 @@ err_out:
376 return ret; 377 return ret;
377} 378}
378 379
380static noinline_for_stack int ethtool_get_rxfh_indir(struct net_device *dev,
381 void __user *useraddr)
382{
383 struct ethtool_rxfh_indir *indir;
384 u32 table_size;
385 size_t full_size;
386 int ret;
387
388 if (!dev->ethtool_ops->get_rxfh_indir)
389 return -EOPNOTSUPP;
390
391 if (copy_from_user(&table_size,
392 useraddr + offsetof(struct ethtool_rxfh_indir, size),
393 sizeof(table_size)))
394 return -EFAULT;
395
396 if (table_size >
397 (KMALLOC_MAX_SIZE - sizeof(*indir)) / sizeof(*indir->ring_index))
398 return -ENOMEM;
399 full_size = sizeof(*indir) + sizeof(*indir->ring_index) * table_size;
400 indir = kmalloc(full_size, GFP_USER);
401 if (!indir)
402 return -ENOMEM;
403
404 indir->cmd = ETHTOOL_GRXFHINDIR;
405 indir->size = table_size;
406 ret = dev->ethtool_ops->get_rxfh_indir(dev, indir);
407 if (ret)
408 goto out;
409
410 if (copy_to_user(useraddr, indir, full_size))
411 ret = -EFAULT;
412
413out:
414 kfree(indir);
415 return ret;
416}
417
418static noinline_for_stack int ethtool_set_rxfh_indir(struct net_device *dev,
419 void __user *useraddr)
420{
421 struct ethtool_rxfh_indir *indir;
422 u32 table_size;
423 size_t full_size;
424 int ret;
425
426 if (!dev->ethtool_ops->set_rxfh_indir)
427 return -EOPNOTSUPP;
428
429 if (copy_from_user(&table_size,
430 useraddr + offsetof(struct ethtool_rxfh_indir, size),
431 sizeof(table_size)))
432 return -EFAULT;
433
434 if (table_size >
435 (KMALLOC_MAX_SIZE - sizeof(*indir)) / sizeof(*indir->ring_index))
436 return -ENOMEM;
437 full_size = sizeof(*indir) + sizeof(*indir->ring_index) * table_size;
438 indir = kmalloc(full_size, GFP_USER);
439 if (!indir)
440 return -ENOMEM;
441
442 if (copy_from_user(indir, useraddr, full_size)) {
443 ret = -EFAULT;
444 goto out;
445 }
446
447 ret = dev->ethtool_ops->set_rxfh_indir(dev, indir);
448
449out:
450 kfree(indir);
451 return ret;
452}
453
379static void __rx_ntuple_filter_add(struct ethtool_rx_ntuple_list *list, 454static void __rx_ntuple_filter_add(struct ethtool_rx_ntuple_list *list,
380 struct ethtool_rx_ntuple_flow_spec *spec, 455 struct ethtool_rx_ntuple_flow_spec *spec,
381 struct ethtool_rx_ntuple_flow_spec_container *fsc) 456 struct ethtool_rx_ntuple_flow_spec_container *fsc)
@@ -1516,12 +1591,12 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
1516 case ETHTOOL_GRXCLSRLCNT: 1591 case ETHTOOL_GRXCLSRLCNT:
1517 case ETHTOOL_GRXCLSRULE: 1592 case ETHTOOL_GRXCLSRULE:
1518 case ETHTOOL_GRXCLSRLALL: 1593 case ETHTOOL_GRXCLSRLALL:
1519 rc = ethtool_get_rxnfc(dev, useraddr); 1594 rc = ethtool_get_rxnfc(dev, ethcmd, useraddr);
1520 break; 1595 break;
1521 case ETHTOOL_SRXFH: 1596 case ETHTOOL_SRXFH:
1522 case ETHTOOL_SRXCLSRLDEL: 1597 case ETHTOOL_SRXCLSRLDEL:
1523 case ETHTOOL_SRXCLSRLINS: 1598 case ETHTOOL_SRXCLSRLINS:
1524 rc = ethtool_set_rxnfc(dev, useraddr); 1599 rc = ethtool_set_rxnfc(dev, ethcmd, useraddr);
1525 break; 1600 break;
1526 case ETHTOOL_GGRO: 1601 case ETHTOOL_GGRO:
1527 rc = ethtool_get_gro(dev, useraddr); 1602 rc = ethtool_get_gro(dev, useraddr);
@@ -1544,6 +1619,12 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
1544 case ETHTOOL_GSSET_INFO: 1619 case ETHTOOL_GSSET_INFO:
1545 rc = ethtool_get_sset_info(dev, useraddr); 1620 rc = ethtool_get_sset_info(dev, useraddr);
1546 break; 1621 break;
1622 case ETHTOOL_GRXFHINDIR:
1623 rc = ethtool_get_rxfh_indir(dev, useraddr);
1624 break;
1625 case ETHTOOL_SRXFHINDIR:
1626 rc = ethtool_set_rxfh_indir(dev, useraddr);
1627 break;
1547 default: 1628 default:
1548 rc = -EOPNOTSUPP; 1629 rc = -EOPNOTSUPP;
1549 } 1630 }
diff --git a/net/core/filter.c b/net/core/filter.c
index da69fb728d32..52b051f82a01 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -128,87 +128,87 @@ unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int
128 fentry = &filter[pc]; 128 fentry = &filter[pc];
129 129
130 switch (fentry->code) { 130 switch (fentry->code) {
131 case BPF_ALU|BPF_ADD|BPF_X: 131 case BPF_S_ALU_ADD_X:
132 A += X; 132 A += X;
133 continue; 133 continue;
134 case BPF_ALU|BPF_ADD|BPF_K: 134 case BPF_S_ALU_ADD_K:
135 A += fentry->k; 135 A += fentry->k;
136 continue; 136 continue;
137 case BPF_ALU|BPF_SUB|BPF_X: 137 case BPF_S_ALU_SUB_X:
138 A -= X; 138 A -= X;
139 continue; 139 continue;
140 case BPF_ALU|BPF_SUB|BPF_K: 140 case BPF_S_ALU_SUB_K:
141 A -= fentry->k; 141 A -= fentry->k;
142 continue; 142 continue;
143 case BPF_ALU|BPF_MUL|BPF_X: 143 case BPF_S_ALU_MUL_X:
144 A *= X; 144 A *= X;
145 continue; 145 continue;
146 case BPF_ALU|BPF_MUL|BPF_K: 146 case BPF_S_ALU_MUL_K:
147 A *= fentry->k; 147 A *= fentry->k;
148 continue; 148 continue;
149 case BPF_ALU|BPF_DIV|BPF_X: 149 case BPF_S_ALU_DIV_X:
150 if (X == 0) 150 if (X == 0)
151 return 0; 151 return 0;
152 A /= X; 152 A /= X;
153 continue; 153 continue;
154 case BPF_ALU|BPF_DIV|BPF_K: 154 case BPF_S_ALU_DIV_K:
155 A /= fentry->k; 155 A /= fentry->k;
156 continue; 156 continue;
157 case BPF_ALU|BPF_AND|BPF_X: 157 case BPF_S_ALU_AND_X:
158 A &= X; 158 A &= X;
159 continue; 159 continue;
160 case BPF_ALU|BPF_AND|BPF_K: 160 case BPF_S_ALU_AND_K:
161 A &= fentry->k; 161 A &= fentry->k;
162 continue; 162 continue;
163 case BPF_ALU|BPF_OR|BPF_X: 163 case BPF_S_ALU_OR_X:
164 A |= X; 164 A |= X;
165 continue; 165 continue;
166 case BPF_ALU|BPF_OR|BPF_K: 166 case BPF_S_ALU_OR_K:
167 A |= fentry->k; 167 A |= fentry->k;
168 continue; 168 continue;
169 case BPF_ALU|BPF_LSH|BPF_X: 169 case BPF_S_ALU_LSH_X:
170 A <<= X; 170 A <<= X;
171 continue; 171 continue;
172 case BPF_ALU|BPF_LSH|BPF_K: 172 case BPF_S_ALU_LSH_K:
173 A <<= fentry->k; 173 A <<= fentry->k;
174 continue; 174 continue;
175 case BPF_ALU|BPF_RSH|BPF_X: 175 case BPF_S_ALU_RSH_X:
176 A >>= X; 176 A >>= X;
177 continue; 177 continue;
178 case BPF_ALU|BPF_RSH|BPF_K: 178 case BPF_S_ALU_RSH_K:
179 A >>= fentry->k; 179 A >>= fentry->k;
180 continue; 180 continue;
181 case BPF_ALU|BPF_NEG: 181 case BPF_S_ALU_NEG:
182 A = -A; 182 A = -A;
183 continue; 183 continue;
184 case BPF_JMP|BPF_JA: 184 case BPF_S_JMP_JA:
185 pc += fentry->k; 185 pc += fentry->k;
186 continue; 186 continue;
187 case BPF_JMP|BPF_JGT|BPF_K: 187 case BPF_S_JMP_JGT_K:
188 pc += (A > fentry->k) ? fentry->jt : fentry->jf; 188 pc += (A > fentry->k) ? fentry->jt : fentry->jf;
189 continue; 189 continue;
190 case BPF_JMP|BPF_JGE|BPF_K: 190 case BPF_S_JMP_JGE_K:
191 pc += (A >= fentry->k) ? fentry->jt : fentry->jf; 191 pc += (A >= fentry->k) ? fentry->jt : fentry->jf;
192 continue; 192 continue;
193 case BPF_JMP|BPF_JEQ|BPF_K: 193 case BPF_S_JMP_JEQ_K:
194 pc += (A == fentry->k) ? fentry->jt : fentry->jf; 194 pc += (A == fentry->k) ? fentry->jt : fentry->jf;
195 continue; 195 continue;
196 case BPF_JMP|BPF_JSET|BPF_K: 196 case BPF_S_JMP_JSET_K:
197 pc += (A & fentry->k) ? fentry->jt : fentry->jf; 197 pc += (A & fentry->k) ? fentry->jt : fentry->jf;
198 continue; 198 continue;
199 case BPF_JMP|BPF_JGT|BPF_X: 199 case BPF_S_JMP_JGT_X:
200 pc += (A > X) ? fentry->jt : fentry->jf; 200 pc += (A > X) ? fentry->jt : fentry->jf;
201 continue; 201 continue;
202 case BPF_JMP|BPF_JGE|BPF_X: 202 case BPF_S_JMP_JGE_X:
203 pc += (A >= X) ? fentry->jt : fentry->jf; 203 pc += (A >= X) ? fentry->jt : fentry->jf;
204 continue; 204 continue;
205 case BPF_JMP|BPF_JEQ|BPF_X: 205 case BPF_S_JMP_JEQ_X:
206 pc += (A == X) ? fentry->jt : fentry->jf; 206 pc += (A == X) ? fentry->jt : fentry->jf;
207 continue; 207 continue;
208 case BPF_JMP|BPF_JSET|BPF_X: 208 case BPF_S_JMP_JSET_X:
209 pc += (A & X) ? fentry->jt : fentry->jf; 209 pc += (A & X) ? fentry->jt : fentry->jf;
210 continue; 210 continue;
211 case BPF_LD|BPF_W|BPF_ABS: 211 case BPF_S_LD_W_ABS:
212 k = fentry->k; 212 k = fentry->k;
213load_w: 213load_w:
214 ptr = load_pointer(skb, k, 4, &tmp); 214 ptr = load_pointer(skb, k, 4, &tmp);
@@ -217,7 +217,7 @@ load_w:
217 continue; 217 continue;
218 } 218 }
219 break; 219 break;
220 case BPF_LD|BPF_H|BPF_ABS: 220 case BPF_S_LD_H_ABS:
221 k = fentry->k; 221 k = fentry->k;
222load_h: 222load_h:
223 ptr = load_pointer(skb, k, 2, &tmp); 223 ptr = load_pointer(skb, k, 2, &tmp);
@@ -226,7 +226,7 @@ load_h:
226 continue; 226 continue;
227 } 227 }
228 break; 228 break;
229 case BPF_LD|BPF_B|BPF_ABS: 229 case BPF_S_LD_B_ABS:
230 k = fentry->k; 230 k = fentry->k;
231load_b: 231load_b:
232 ptr = load_pointer(skb, k, 1, &tmp); 232 ptr = load_pointer(skb, k, 1, &tmp);
@@ -235,54 +235,54 @@ load_b:
235 continue; 235 continue;
236 } 236 }
237 break; 237 break;
238 case BPF_LD|BPF_W|BPF_LEN: 238 case BPF_S_LD_W_LEN:
239 A = skb->len; 239 A = skb->len;
240 continue; 240 continue;
241 case BPF_LDX|BPF_W|BPF_LEN: 241 case BPF_S_LDX_W_LEN:
242 X = skb->len; 242 X = skb->len;
243 continue; 243 continue;
244 case BPF_LD|BPF_W|BPF_IND: 244 case BPF_S_LD_W_IND:
245 k = X + fentry->k; 245 k = X + fentry->k;
246 goto load_w; 246 goto load_w;
247 case BPF_LD|BPF_H|BPF_IND: 247 case BPF_S_LD_H_IND:
248 k = X + fentry->k; 248 k = X + fentry->k;
249 goto load_h; 249 goto load_h;
250 case BPF_LD|BPF_B|BPF_IND: 250 case BPF_S_LD_B_IND:
251 k = X + fentry->k; 251 k = X + fentry->k;
252 goto load_b; 252 goto load_b;
253 case BPF_LDX|BPF_B|BPF_MSH: 253 case BPF_S_LDX_B_MSH:
254 ptr = load_pointer(skb, fentry->k, 1, &tmp); 254 ptr = load_pointer(skb, fentry->k, 1, &tmp);
255 if (ptr != NULL) { 255 if (ptr != NULL) {
256 X = (*(u8 *)ptr & 0xf) << 2; 256 X = (*(u8 *)ptr & 0xf) << 2;
257 continue; 257 continue;
258 } 258 }
259 return 0; 259 return 0;
260 case BPF_LD|BPF_IMM: 260 case BPF_S_LD_IMM:
261 A = fentry->k; 261 A = fentry->k;
262 continue; 262 continue;
263 case BPF_LDX|BPF_IMM: 263 case BPF_S_LDX_IMM:
264 X = fentry->k; 264 X = fentry->k;
265 continue; 265 continue;
266 case BPF_LD|BPF_MEM: 266 case BPF_S_LD_MEM:
267 A = mem[fentry->k]; 267 A = mem[fentry->k];
268 continue; 268 continue;
269 case BPF_LDX|BPF_MEM: 269 case BPF_S_LDX_MEM:
270 X = mem[fentry->k]; 270 X = mem[fentry->k];
271 continue; 271 continue;
272 case BPF_MISC|BPF_TAX: 272 case BPF_S_MISC_TAX:
273 X = A; 273 X = A;
274 continue; 274 continue;
275 case BPF_MISC|BPF_TXA: 275 case BPF_S_MISC_TXA:
276 A = X; 276 A = X;
277 continue; 277 continue;
278 case BPF_RET|BPF_K: 278 case BPF_S_RET_K:
279 return fentry->k; 279 return fentry->k;
280 case BPF_RET|BPF_A: 280 case BPF_S_RET_A:
281 return A; 281 return A;
282 case BPF_ST: 282 case BPF_S_ST:
283 mem[fentry->k] = A; 283 mem[fentry->k] = A;
284 continue; 284 continue;
285 case BPF_STX: 285 case BPF_S_STX:
286 mem[fentry->k] = X; 286 mem[fentry->k] = X;
287 continue; 287 continue;
288 default: 288 default:
@@ -390,53 +390,128 @@ int sk_chk_filter(struct sock_filter *filter, int flen)
390 /* Only allow valid instructions */ 390 /* Only allow valid instructions */
391 switch (ftest->code) { 391 switch (ftest->code) {
392 case BPF_ALU|BPF_ADD|BPF_K: 392 case BPF_ALU|BPF_ADD|BPF_K:
393 ftest->code = BPF_S_ALU_ADD_K;
394 break;
393 case BPF_ALU|BPF_ADD|BPF_X: 395 case BPF_ALU|BPF_ADD|BPF_X:
396 ftest->code = BPF_S_ALU_ADD_X;
397 break;
394 case BPF_ALU|BPF_SUB|BPF_K: 398 case BPF_ALU|BPF_SUB|BPF_K:
399 ftest->code = BPF_S_ALU_SUB_K;
400 break;
395 case BPF_ALU|BPF_SUB|BPF_X: 401 case BPF_ALU|BPF_SUB|BPF_X:
402 ftest->code = BPF_S_ALU_SUB_X;
403 break;
396 case BPF_ALU|BPF_MUL|BPF_K: 404 case BPF_ALU|BPF_MUL|BPF_K:
405 ftest->code = BPF_S_ALU_MUL_K;
406 break;
397 case BPF_ALU|BPF_MUL|BPF_X: 407 case BPF_ALU|BPF_MUL|BPF_X:
408 ftest->code = BPF_S_ALU_MUL_X;
409 break;
398 case BPF_ALU|BPF_DIV|BPF_X: 410 case BPF_ALU|BPF_DIV|BPF_X:
411 ftest->code = BPF_S_ALU_DIV_X;
412 break;
399 case BPF_ALU|BPF_AND|BPF_K: 413 case BPF_ALU|BPF_AND|BPF_K:
414 ftest->code = BPF_S_ALU_AND_K;
415 break;
400 case BPF_ALU|BPF_AND|BPF_X: 416 case BPF_ALU|BPF_AND|BPF_X:
417 ftest->code = BPF_S_ALU_AND_X;
418 break;
401 case BPF_ALU|BPF_OR|BPF_K: 419 case BPF_ALU|BPF_OR|BPF_K:
420 ftest->code = BPF_S_ALU_OR_K;
421 break;
402 case BPF_ALU|BPF_OR|BPF_X: 422 case BPF_ALU|BPF_OR|BPF_X:
423 ftest->code = BPF_S_ALU_OR_X;
424 break;
403 case BPF_ALU|BPF_LSH|BPF_K: 425 case BPF_ALU|BPF_LSH|BPF_K:
426 ftest->code = BPF_S_ALU_LSH_K;
427 break;
404 case BPF_ALU|BPF_LSH|BPF_X: 428 case BPF_ALU|BPF_LSH|BPF_X:
429 ftest->code = BPF_S_ALU_LSH_X;
430 break;
405 case BPF_ALU|BPF_RSH|BPF_K: 431 case BPF_ALU|BPF_RSH|BPF_K:
432 ftest->code = BPF_S_ALU_RSH_K;
433 break;
406 case BPF_ALU|BPF_RSH|BPF_X: 434 case BPF_ALU|BPF_RSH|BPF_X:
435 ftest->code = BPF_S_ALU_RSH_X;
436 break;
407 case BPF_ALU|BPF_NEG: 437 case BPF_ALU|BPF_NEG:
438 ftest->code = BPF_S_ALU_NEG;
439 break;
408 case BPF_LD|BPF_W|BPF_ABS: 440 case BPF_LD|BPF_W|BPF_ABS:
441 ftest->code = BPF_S_LD_W_ABS;
442 break;
409 case BPF_LD|BPF_H|BPF_ABS: 443 case BPF_LD|BPF_H|BPF_ABS:
444 ftest->code = BPF_S_LD_H_ABS;
445 break;
410 case BPF_LD|BPF_B|BPF_ABS: 446 case BPF_LD|BPF_B|BPF_ABS:
447 ftest->code = BPF_S_LD_B_ABS;
448 break;
411 case BPF_LD|BPF_W|BPF_LEN: 449 case BPF_LD|BPF_W|BPF_LEN:
450 ftest->code = BPF_S_LD_W_LEN;
451 break;
412 case BPF_LD|BPF_W|BPF_IND: 452 case BPF_LD|BPF_W|BPF_IND:
453 ftest->code = BPF_S_LD_W_IND;
454 break;
413 case BPF_LD|BPF_H|BPF_IND: 455 case BPF_LD|BPF_H|BPF_IND:
456 ftest->code = BPF_S_LD_H_IND;
457 break;
414 case BPF_LD|BPF_B|BPF_IND: 458 case BPF_LD|BPF_B|BPF_IND:
459 ftest->code = BPF_S_LD_B_IND;
460 break;
415 case BPF_LD|BPF_IMM: 461 case BPF_LD|BPF_IMM:
462 ftest->code = BPF_S_LD_IMM;
463 break;
416 case BPF_LDX|BPF_W|BPF_LEN: 464 case BPF_LDX|BPF_W|BPF_LEN:
465 ftest->code = BPF_S_LDX_W_LEN;
466 break;
417 case BPF_LDX|BPF_B|BPF_MSH: 467 case BPF_LDX|BPF_B|BPF_MSH:
468 ftest->code = BPF_S_LDX_B_MSH;
469 break;
418 case BPF_LDX|BPF_IMM: 470 case BPF_LDX|BPF_IMM:
471 ftest->code = BPF_S_LDX_IMM;
472 break;
419 case BPF_MISC|BPF_TAX: 473 case BPF_MISC|BPF_TAX:
474 ftest->code = BPF_S_MISC_TAX;
475 break;
420 case BPF_MISC|BPF_TXA: 476 case BPF_MISC|BPF_TXA:
477 ftest->code = BPF_S_MISC_TXA;
478 break;
421 case BPF_RET|BPF_K: 479 case BPF_RET|BPF_K:
480 ftest->code = BPF_S_RET_K;
481 break;
422 case BPF_RET|BPF_A: 482 case BPF_RET|BPF_A:
483 ftest->code = BPF_S_RET_A;
423 break; 484 break;
424 485
425 /* Some instructions need special checks */ 486 /* Some instructions need special checks */
426 487
427 case BPF_ALU|BPF_DIV|BPF_K:
428 /* check for division by zero */ 488 /* check for division by zero */
489 case BPF_ALU|BPF_DIV|BPF_K:
429 if (ftest->k == 0) 490 if (ftest->k == 0)
430 return -EINVAL; 491 return -EINVAL;
492 ftest->code = BPF_S_ALU_DIV_K;
431 break; 493 break;
432 494
495 /* check for invalid memory addresses */
433 case BPF_LD|BPF_MEM: 496 case BPF_LD|BPF_MEM:
497 if (ftest->k >= BPF_MEMWORDS)
498 return -EINVAL;
499 ftest->code = BPF_S_LD_MEM;
500 break;
434 case BPF_LDX|BPF_MEM: 501 case BPF_LDX|BPF_MEM:
502 if (ftest->k >= BPF_MEMWORDS)
503 return -EINVAL;
504 ftest->code = BPF_S_LDX_MEM;
505 break;
435 case BPF_ST: 506 case BPF_ST:
507 if (ftest->k >= BPF_MEMWORDS)
508 return -EINVAL;
509 ftest->code = BPF_S_ST;
510 break;
436 case BPF_STX: 511 case BPF_STX:
437 /* check for invalid memory addresses */
438 if (ftest->k >= BPF_MEMWORDS) 512 if (ftest->k >= BPF_MEMWORDS)
439 return -EINVAL; 513 return -EINVAL;
514 ftest->code = BPF_S_STX;
440 break; 515 break;
441 516
442 case BPF_JMP|BPF_JA: 517 case BPF_JMP|BPF_JA:
@@ -447,28 +522,63 @@ int sk_chk_filter(struct sock_filter *filter, int flen)
447 */ 522 */
448 if (ftest->k >= (unsigned)(flen-pc-1)) 523 if (ftest->k >= (unsigned)(flen-pc-1))
449 return -EINVAL; 524 return -EINVAL;
525 ftest->code = BPF_S_JMP_JA;
450 break; 526 break;
451 527
452 case BPF_JMP|BPF_JEQ|BPF_K: 528 case BPF_JMP|BPF_JEQ|BPF_K:
529 ftest->code = BPF_S_JMP_JEQ_K;
530 break;
453 case BPF_JMP|BPF_JEQ|BPF_X: 531 case BPF_JMP|BPF_JEQ|BPF_X:
532 ftest->code = BPF_S_JMP_JEQ_X;
533 break;
454 case BPF_JMP|BPF_JGE|BPF_K: 534 case BPF_JMP|BPF_JGE|BPF_K:
535 ftest->code = BPF_S_JMP_JGE_K;
536 break;
455 case BPF_JMP|BPF_JGE|BPF_X: 537 case BPF_JMP|BPF_JGE|BPF_X:
538 ftest->code = BPF_S_JMP_JGE_X;
539 break;
456 case BPF_JMP|BPF_JGT|BPF_K: 540 case BPF_JMP|BPF_JGT|BPF_K:
541 ftest->code = BPF_S_JMP_JGT_K;
542 break;
457 case BPF_JMP|BPF_JGT|BPF_X: 543 case BPF_JMP|BPF_JGT|BPF_X:
544 ftest->code = BPF_S_JMP_JGT_X;
545 break;
458 case BPF_JMP|BPF_JSET|BPF_K: 546 case BPF_JMP|BPF_JSET|BPF_K:
547 ftest->code = BPF_S_JMP_JSET_K;
548 break;
459 case BPF_JMP|BPF_JSET|BPF_X: 549 case BPF_JMP|BPF_JSET|BPF_X:
550 ftest->code = BPF_S_JMP_JSET_X;
551 break;
552
553 default:
554 return -EINVAL;
555 }
556
460 /* for conditionals both must be safe */ 557 /* for conditionals both must be safe */
558 switch (ftest->code) {
559 case BPF_S_JMP_JEQ_K:
560 case BPF_S_JMP_JEQ_X:
561 case BPF_S_JMP_JGE_K:
562 case BPF_S_JMP_JGE_X:
563 case BPF_S_JMP_JGT_K:
564 case BPF_S_JMP_JGT_X:
565 case BPF_S_JMP_JSET_X:
566 case BPF_S_JMP_JSET_K:
461 if (pc + ftest->jt + 1 >= flen || 567 if (pc + ftest->jt + 1 >= flen ||
462 pc + ftest->jf + 1 >= flen) 568 pc + ftest->jf + 1 >= flen)
463 return -EINVAL; 569 return -EINVAL;
464 break; 570 }
571 }
465 572
573 /* last instruction must be a RET code */
574 switch (filter[flen - 1].code) {
575 case BPF_S_RET_K:
576 case BPF_S_RET_A:
577 return 0;
578 break;
466 default: 579 default:
467 return -EINVAL; 580 return -EINVAL;
468 } 581 }
469 }
470
471 return (BPF_CLASS(filter[flen - 1].code) == BPF_RET) ? 0 : -EINVAL;
472} 582}
473EXPORT_SYMBOL(sk_chk_filter); 583EXPORT_SYMBOL(sk_chk_filter);
474 584
diff --git a/net/core/flow.c b/net/core/flow.c
index 161900674009..f67dcbfe54ef 100644
--- a/net/core/flow.c
+++ b/net/core/flow.c
@@ -62,6 +62,7 @@ struct flow_cache {
62}; 62};
63 63
64atomic_t flow_cache_genid = ATOMIC_INIT(0); 64atomic_t flow_cache_genid = ATOMIC_INIT(0);
65EXPORT_SYMBOL(flow_cache_genid);
65static struct flow_cache flow_cache_global; 66static struct flow_cache flow_cache_global;
66static struct kmem_cache *flow_cachep; 67static struct kmem_cache *flow_cachep;
67 68
@@ -222,7 +223,7 @@ flow_cache_lookup(struct net *net, struct flowi *key, u16 family, u8 dir,
222 unsigned int hash; 223 unsigned int hash;
223 224
224 local_bh_disable(); 225 local_bh_disable();
225 fcp = per_cpu_ptr(fc->percpu, smp_processor_id()); 226 fcp = this_cpu_ptr(fc->percpu);
226 227
227 fle = NULL; 228 fle = NULL;
228 flo = NULL; 229 flo = NULL;
@@ -291,6 +292,7 @@ ret_object:
291 local_bh_enable(); 292 local_bh_enable();
292 return flo; 293 return flo;
293} 294}
295EXPORT_SYMBOL(flow_cache_lookup);
294 296
295static void flow_cache_flush_tasklet(unsigned long data) 297static void flow_cache_flush_tasklet(unsigned long data)
296{ 298{
@@ -302,7 +304,7 @@ static void flow_cache_flush_tasklet(unsigned long data)
302 LIST_HEAD(gc_list); 304 LIST_HEAD(gc_list);
303 int i, deleted = 0; 305 int i, deleted = 0;
304 306
305 fcp = per_cpu_ptr(fc->percpu, smp_processor_id()); 307 fcp = this_cpu_ptr(fc->percpu);
306 for (i = 0; i < flow_cache_hash_size(fc); i++) { 308 for (i = 0; i < flow_cache_hash_size(fc); i++) {
307 hlist_for_each_entry_safe(fle, entry, tmp, 309 hlist_for_each_entry_safe(fle, entry, tmp,
308 &fcp->hash_table[i], u.hlist) { 310 &fcp->hash_table[i], u.hlist) {
@@ -424,6 +426,3 @@ static int __init flow_cache_init_global(void)
424} 426}
425 427
426module_init(flow_cache_init_global); 428module_init(flow_cache_init_global);
427
428EXPORT_SYMBOL(flow_cache_genid);
429EXPORT_SYMBOL(flow_cache_lookup);
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
index 785e5276a300..9fbe7f7429b0 100644
--- a/net/core/gen_estimator.c
+++ b/net/core/gen_estimator.c
@@ -263,6 +263,7 @@ static void __gen_kill_estimator(struct rcu_head *head)
263 * 263 *
264 * Removes the rate estimator specified by &bstats and &rate_est. 264 * Removes the rate estimator specified by &bstats and &rate_est.
265 * 265 *
266 * Note : Caller should respect an RCU grace period before freeing stats_lock
266 */ 267 */
267void gen_kill_estimator(struct gnet_stats_basic_packed *bstats, 268void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
268 struct gnet_stats_rate_est *rate_est) 269 struct gnet_stats_rate_est *rate_est)
diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c
index 393b1d8618e2..0452eb27a272 100644
--- a/net/core/gen_stats.c
+++ b/net/core/gen_stats.c
@@ -73,6 +73,7 @@ gnet_stats_start_copy_compat(struct sk_buff *skb, int type, int tc_stats_type,
73 73
74 return 0; 74 return 0;
75} 75}
76EXPORT_SYMBOL(gnet_stats_start_copy_compat);
76 77
77/** 78/**
78 * gnet_stats_start_copy_compat - start dumping procedure in compatibility mode 79 * gnet_stats_start_copy_compat - start dumping procedure in compatibility mode
@@ -93,6 +94,7 @@ gnet_stats_start_copy(struct sk_buff *skb, int type, spinlock_t *lock,
93{ 94{
94 return gnet_stats_start_copy_compat(skb, type, 0, 0, lock, d); 95 return gnet_stats_start_copy_compat(skb, type, 0, 0, lock, d);
95} 96}
97EXPORT_SYMBOL(gnet_stats_start_copy);
96 98
97/** 99/**
98 * gnet_stats_copy_basic - copy basic statistics into statistic TLV 100 * gnet_stats_copy_basic - copy basic statistics into statistic TLV
@@ -123,6 +125,7 @@ gnet_stats_copy_basic(struct gnet_dump *d, struct gnet_stats_basic_packed *b)
123 } 125 }
124 return 0; 126 return 0;
125} 127}
128EXPORT_SYMBOL(gnet_stats_copy_basic);
126 129
127/** 130/**
128 * gnet_stats_copy_rate_est - copy rate estimator statistics into statistics TLV 131 * gnet_stats_copy_rate_est - copy rate estimator statistics into statistics TLV
@@ -154,6 +157,7 @@ gnet_stats_copy_rate_est(struct gnet_dump *d,
154 157
155 return 0; 158 return 0;
156} 159}
160EXPORT_SYMBOL(gnet_stats_copy_rate_est);
157 161
158/** 162/**
159 * gnet_stats_copy_queue - copy queue statistics into statistics TLV 163 * gnet_stats_copy_queue - copy queue statistics into statistics TLV
@@ -181,6 +185,7 @@ gnet_stats_copy_queue(struct gnet_dump *d, struct gnet_stats_queue *q)
181 185
182 return 0; 186 return 0;
183} 187}
188EXPORT_SYMBOL(gnet_stats_copy_queue);
184 189
185/** 190/**
186 * gnet_stats_copy_app - copy application specific statistics into statistics TLV 191 * gnet_stats_copy_app - copy application specific statistics into statistics TLV
@@ -208,6 +213,7 @@ gnet_stats_copy_app(struct gnet_dump *d, void *st, int len)
208 213
209 return 0; 214 return 0;
210} 215}
216EXPORT_SYMBOL(gnet_stats_copy_app);
211 217
212/** 218/**
213 * gnet_stats_finish_copy - finish dumping procedure 219 * gnet_stats_finish_copy - finish dumping procedure
@@ -241,12 +247,4 @@ gnet_stats_finish_copy(struct gnet_dump *d)
241 spin_unlock_bh(d->lock); 247 spin_unlock_bh(d->lock);
242 return 0; 248 return 0;
243} 249}
244
245
246EXPORT_SYMBOL(gnet_stats_start_copy);
247EXPORT_SYMBOL(gnet_stats_start_copy_compat);
248EXPORT_SYMBOL(gnet_stats_copy_basic);
249EXPORT_SYMBOL(gnet_stats_copy_rate_est);
250EXPORT_SYMBOL(gnet_stats_copy_queue);
251EXPORT_SYMBOL(gnet_stats_copy_app);
252EXPORT_SYMBOL(gnet_stats_finish_copy); 250EXPORT_SYMBOL(gnet_stats_finish_copy);
diff --git a/net/core/iovec.c b/net/core/iovec.c
index 1e7f4e91a935..1cd98df412df 100644
--- a/net/core/iovec.c
+++ b/net/core/iovec.c
@@ -95,6 +95,7 @@ int memcpy_toiovec(struct iovec *iov, unsigned char *kdata, int len)
95 95
96 return 0; 96 return 0;
97} 97}
98EXPORT_SYMBOL(memcpy_toiovec);
98 99
99/* 100/*
100 * Copy kernel to iovec. Returns -EFAULT on error. 101 * Copy kernel to iovec. Returns -EFAULT on error.
@@ -120,6 +121,7 @@ int memcpy_toiovecend(const struct iovec *iov, unsigned char *kdata,
120 121
121 return 0; 122 return 0;
122} 123}
124EXPORT_SYMBOL(memcpy_toiovecend);
123 125
124/* 126/*
125 * Copy iovec to kernel. Returns -EFAULT on error. 127 * Copy iovec to kernel. Returns -EFAULT on error.
@@ -144,6 +146,7 @@ int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len)
144 146
145 return 0; 147 return 0;
146} 148}
149EXPORT_SYMBOL(memcpy_fromiovec);
147 150
148/* 151/*
149 * Copy iovec from kernel. Returns -EFAULT on error. 152 * Copy iovec from kernel. Returns -EFAULT on error.
@@ -172,6 +175,7 @@ int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
172 175
173 return 0; 176 return 0;
174} 177}
178EXPORT_SYMBOL(memcpy_fromiovecend);
175 179
176/* 180/*
177 * And now for the all-in-one: copy and checksum from a user iovec 181 * And now for the all-in-one: copy and checksum from a user iovec
@@ -256,9 +260,4 @@ out_fault:
256 err = -EFAULT; 260 err = -EFAULT;
257 goto out; 261 goto out;
258} 262}
259
260EXPORT_SYMBOL(csum_partial_copy_fromiovecend); 263EXPORT_SYMBOL(csum_partial_copy_fromiovecend);
261EXPORT_SYMBOL(memcpy_fromiovec);
262EXPORT_SYMBOL(memcpy_fromiovecend);
263EXPORT_SYMBOL(memcpy_toiovec);
264EXPORT_SYMBOL(memcpy_toiovecend);
diff --git a/net/core/link_watch.c b/net/core/link_watch.c
index bdbce2f5875b..01a1101b5936 100644
--- a/net/core/link_watch.c
+++ b/net/core/link_watch.c
@@ -243,5 +243,4 @@ void linkwatch_fire_event(struct net_device *dev)
243 243
244 linkwatch_schedule_work(urgent); 244 linkwatch_schedule_work(urgent);
245} 245}
246
247EXPORT_SYMBOL(linkwatch_fire_event); 246EXPORT_SYMBOL(linkwatch_fire_event);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 6ba1c0eece03..a4e0a7482c2b 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -949,7 +949,10 @@ static void neigh_update_hhs(struct neighbour *neigh)
949{ 949{
950 struct hh_cache *hh; 950 struct hh_cache *hh;
951 void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *) 951 void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
952 = neigh->dev->header_ops->cache_update; 952 = NULL;
953
954 if (neigh->dev->header_ops)
955 update = neigh->dev->header_ops->cache_update;
953 956
954 if (update) { 957 if (update) {
955 for (hh = neigh->hh; hh; hh = hh->hh_next) { 958 for (hh = neigh->hh; hh; hh = hh->hh_next) {
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 99e7052d7323..af4dfbadf2a0 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -29,6 +29,7 @@ static const char fmt_hex[] = "%#x\n";
29static const char fmt_long_hex[] = "%#lx\n"; 29static const char fmt_long_hex[] = "%#lx\n";
30static const char fmt_dec[] = "%d\n"; 30static const char fmt_dec[] = "%d\n";
31static const char fmt_ulong[] = "%lu\n"; 31static const char fmt_ulong[] = "%lu\n";
32static const char fmt_u64[] = "%llu\n";
32 33
33static inline int dev_isalive(const struct net_device *dev) 34static inline int dev_isalive(const struct net_device *dev)
34{ 35{
@@ -94,6 +95,7 @@ static ssize_t netdev_store(struct device *dev, struct device_attribute *attr,
94} 95}
95 96
96NETDEVICE_SHOW(dev_id, fmt_hex); 97NETDEVICE_SHOW(dev_id, fmt_hex);
98NETDEVICE_SHOW(addr_assign_type, fmt_dec);
97NETDEVICE_SHOW(addr_len, fmt_dec); 99NETDEVICE_SHOW(addr_len, fmt_dec);
98NETDEVICE_SHOW(iflink, fmt_dec); 100NETDEVICE_SHOW(iflink, fmt_dec);
99NETDEVICE_SHOW(ifindex, fmt_dec); 101NETDEVICE_SHOW(ifindex, fmt_dec);
@@ -294,6 +296,7 @@ static ssize_t show_ifalias(struct device *dev,
294} 296}
295 297
296static struct device_attribute net_class_attributes[] = { 298static struct device_attribute net_class_attributes[] = {
299 __ATTR(addr_assign_type, S_IRUGO, show_addr_assign_type, NULL),
297 __ATTR(addr_len, S_IRUGO, show_addr_len, NULL), 300 __ATTR(addr_len, S_IRUGO, show_addr_len, NULL),
298 __ATTR(dev_id, S_IRUGO, show_dev_id, NULL), 301 __ATTR(dev_id, S_IRUGO, show_dev_id, NULL),
299 __ATTR(ifalias, S_IRUGO | S_IWUSR, show_ifalias, store_ifalias), 302 __ATTR(ifalias, S_IRUGO | S_IWUSR, show_ifalias, store_ifalias),
@@ -324,14 +327,15 @@ static ssize_t netstat_show(const struct device *d,
324 struct net_device *dev = to_net_dev(d); 327 struct net_device *dev = to_net_dev(d);
325 ssize_t ret = -EINVAL; 328 ssize_t ret = -EINVAL;
326 329
327 WARN_ON(offset > sizeof(struct net_device_stats) || 330 WARN_ON(offset > sizeof(struct rtnl_link_stats64) ||
328 offset % sizeof(unsigned long) != 0); 331 offset % sizeof(u64) != 0);
329 332
330 read_lock(&dev_base_lock); 333 read_lock(&dev_base_lock);
331 if (dev_isalive(dev)) { 334 if (dev_isalive(dev)) {
332 const struct net_device_stats *stats = dev_get_stats(dev); 335 struct rtnl_link_stats64 temp;
333 ret = sprintf(buf, fmt_ulong, 336 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
334 *(unsigned long *)(((u8 *) stats) + offset)); 337
338 ret = sprintf(buf, fmt_u64, *(u64 *)(((u8 *) stats) + offset));
335 } 339 }
336 read_unlock(&dev_base_lock); 340 read_unlock(&dev_base_lock);
337 return ret; 341 return ret;
@@ -343,7 +347,7 @@ static ssize_t show_##name(struct device *d, \
343 struct device_attribute *attr, char *buf) \ 347 struct device_attribute *attr, char *buf) \
344{ \ 348{ \
345 return netstat_show(d, attr, buf, \ 349 return netstat_show(d, attr, buf, \
346 offsetof(struct net_device_stats, name)); \ 350 offsetof(struct rtnl_link_stats64, name)); \
347} \ 351} \
348static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL) 352static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
349 353
@@ -922,13 +926,12 @@ int netdev_class_create_file(struct class_attribute *class_attr)
922{ 926{
923 return class_create_file(&net_class, class_attr); 927 return class_create_file(&net_class, class_attr);
924} 928}
929EXPORT_SYMBOL(netdev_class_create_file);
925 930
926void netdev_class_remove_file(struct class_attribute *class_attr) 931void netdev_class_remove_file(struct class_attribute *class_attr)
927{ 932{
928 class_remove_file(&net_class, class_attr); 933 class_remove_file(&net_class, class_attr);
929} 934}
930
931EXPORT_SYMBOL(netdev_class_create_file);
932EXPORT_SYMBOL(netdev_class_remove_file); 935EXPORT_SYMBOL(netdev_class_remove_file);
933 936
934int netdev_kobject_init(void) 937int netdev_kobject_init(void)
diff --git a/net/core/netevent.c b/net/core/netevent.c
index 95f81de87502..865f0ceb81fb 100644
--- a/net/core/netevent.c
+++ b/net/core/netevent.c
@@ -35,6 +35,7 @@ int register_netevent_notifier(struct notifier_block *nb)
35 err = atomic_notifier_chain_register(&netevent_notif_chain, nb); 35 err = atomic_notifier_chain_register(&netevent_notif_chain, nb);
36 return err; 36 return err;
37} 37}
38EXPORT_SYMBOL_GPL(register_netevent_notifier);
38 39
39/** 40/**
40 * netevent_unregister_notifier - unregister a netevent notifier block 41 * netevent_unregister_notifier - unregister a netevent notifier block
@@ -50,6 +51,7 @@ int unregister_netevent_notifier(struct notifier_block *nb)
50{ 51{
51 return atomic_notifier_chain_unregister(&netevent_notif_chain, nb); 52 return atomic_notifier_chain_unregister(&netevent_notif_chain, nb);
52} 53}
54EXPORT_SYMBOL_GPL(unregister_netevent_notifier);
53 55
54/** 56/**
55 * call_netevent_notifiers - call all netevent notifier blocks 57 * call_netevent_notifiers - call all netevent notifier blocks
@@ -64,7 +66,4 @@ int call_netevent_notifiers(unsigned long val, void *v)
64{ 66{
65 return atomic_notifier_call_chain(&netevent_notif_chain, val, v); 67 return atomic_notifier_call_chain(&netevent_notif_chain, val, v);
66} 68}
67
68EXPORT_SYMBOL_GPL(register_netevent_notifier);
69EXPORT_SYMBOL_GPL(unregister_netevent_notifier);
70EXPORT_SYMBOL_GPL(call_netevent_notifiers); 69EXPORT_SYMBOL_GPL(call_netevent_notifiers);
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 94825b109551..537e01afd81b 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -199,11 +199,13 @@ void netpoll_poll_dev(struct net_device *dev)
199 199
200 zap_completion_queue(); 200 zap_completion_queue();
201} 201}
202EXPORT_SYMBOL(netpoll_poll_dev);
202 203
203void netpoll_poll(struct netpoll *np) 204void netpoll_poll(struct netpoll *np)
204{ 205{
205 netpoll_poll_dev(np->dev); 206 netpoll_poll_dev(np->dev);
206} 207}
208EXPORT_SYMBOL(netpoll_poll);
207 209
208static void refill_skbs(void) 210static void refill_skbs(void)
209{ 211{
@@ -292,6 +294,7 @@ void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
292 unsigned long tries; 294 unsigned long tries;
293 struct net_device *dev = np->dev; 295 struct net_device *dev = np->dev;
294 const struct net_device_ops *ops = dev->netdev_ops; 296 const struct net_device_ops *ops = dev->netdev_ops;
297 /* It is up to the caller to keep npinfo alive. */
295 struct netpoll_info *npinfo = np->dev->npinfo; 298 struct netpoll_info *npinfo = np->dev->npinfo;
296 299
297 if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) { 300 if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
@@ -343,6 +346,7 @@ void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
343 schedule_delayed_work(&npinfo->tx_work,0); 346 schedule_delayed_work(&npinfo->tx_work,0);
344 } 347 }
345} 348}
349EXPORT_SYMBOL(netpoll_send_skb);
346 350
347void netpoll_send_udp(struct netpoll *np, const char *msg, int len) 351void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
348{ 352{
@@ -404,6 +408,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
404 408
405 netpoll_send_skb(np, skb); 409 netpoll_send_skb(np, skb);
406} 410}
411EXPORT_SYMBOL(netpoll_send_udp);
407 412
408static void arp_reply(struct sk_buff *skb) 413static void arp_reply(struct sk_buff *skb)
409{ 414{
@@ -630,6 +635,7 @@ void netpoll_print_options(struct netpoll *np)
630 printk(KERN_INFO "%s: remote ethernet address %pM\n", 635 printk(KERN_INFO "%s: remote ethernet address %pM\n",
631 np->name, np->remote_mac); 636 np->name, np->remote_mac);
632} 637}
638EXPORT_SYMBOL(netpoll_print_options);
633 639
634int netpoll_parse_options(struct netpoll *np, char *opt) 640int netpoll_parse_options(struct netpoll *np, char *opt)
635{ 641{
@@ -722,30 +728,29 @@ int netpoll_parse_options(struct netpoll *np, char *opt)
722 np->name, cur); 728 np->name, cur);
723 return -1; 729 return -1;
724} 730}
731EXPORT_SYMBOL(netpoll_parse_options);
725 732
726int netpoll_setup(struct netpoll *np) 733int __netpoll_setup(struct netpoll *np)
727{ 734{
728 struct net_device *ndev = NULL; 735 struct net_device *ndev = np->dev;
729 struct in_device *in_dev;
730 struct netpoll_info *npinfo; 736 struct netpoll_info *npinfo;
731 struct netpoll *npe, *tmp; 737 const struct net_device_ops *ops;
732 unsigned long flags; 738 unsigned long flags;
733 int err; 739 int err;
734 740
735 if (np->dev_name) 741 if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) ||
736 ndev = dev_get_by_name(&init_net, np->dev_name); 742 !ndev->netdev_ops->ndo_poll_controller) {
737 if (!ndev) { 743 printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n",
738 printk(KERN_ERR "%s: %s doesn't exist, aborting.\n",
739 np->name, np->dev_name); 744 np->name, np->dev_name);
740 return -ENODEV; 745 err = -ENOTSUPP;
746 goto out;
741 } 747 }
742 748
743 np->dev = ndev;
744 if (!ndev->npinfo) { 749 if (!ndev->npinfo) {
745 npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL); 750 npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
746 if (!npinfo) { 751 if (!npinfo) {
747 err = -ENOMEM; 752 err = -ENOMEM;
748 goto put; 753 goto out;
749 } 754 }
750 755
751 npinfo->rx_flags = 0; 756 npinfo->rx_flags = 0;
@@ -757,6 +762,13 @@ int netpoll_setup(struct netpoll *np)
757 INIT_DELAYED_WORK(&npinfo->tx_work, queue_process); 762 INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
758 763
759 atomic_set(&npinfo->refcnt, 1); 764 atomic_set(&npinfo->refcnt, 1);
765
766 ops = np->dev->netdev_ops;
767 if (ops->ndo_netpoll_setup) {
768 err = ops->ndo_netpoll_setup(ndev, npinfo);
769 if (err)
770 goto free_npinfo;
771 }
760 } else { 772 } else {
761 npinfo = ndev->npinfo; 773 npinfo = ndev->npinfo;
762 atomic_inc(&npinfo->refcnt); 774 atomic_inc(&npinfo->refcnt);
@@ -764,12 +776,37 @@ int netpoll_setup(struct netpoll *np)
764 776
765 npinfo->netpoll = np; 777 npinfo->netpoll = np;
766 778
767 if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) || 779 if (np->rx_hook) {
768 !ndev->netdev_ops->ndo_poll_controller) { 780 spin_lock_irqsave(&npinfo->rx_lock, flags);
769 printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n", 781 npinfo->rx_flags |= NETPOLL_RX_ENABLED;
782 list_add_tail(&np->rx, &npinfo->rx_np);
783 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
784 }
785
786 /* last thing to do is link it to the net device structure */
787 rcu_assign_pointer(ndev->npinfo, npinfo);
788
789 return 0;
790
791free_npinfo:
792 kfree(npinfo);
793out:
794 return err;
795}
796EXPORT_SYMBOL_GPL(__netpoll_setup);
797
798int netpoll_setup(struct netpoll *np)
799{
800 struct net_device *ndev = NULL;
801 struct in_device *in_dev;
802 int err;
803
804 if (np->dev_name)
805 ndev = dev_get_by_name(&init_net, np->dev_name);
806 if (!ndev) {
807 printk(KERN_ERR "%s: %s doesn't exist, aborting.\n",
770 np->name, np->dev_name); 808 np->name, np->dev_name);
771 err = -ENOTSUPP; 809 return -ENODEV;
772 goto release;
773 } 810 }
774 811
775 if (!netif_running(ndev)) { 812 if (!netif_running(ndev)) {
@@ -785,7 +822,7 @@ int netpoll_setup(struct netpoll *np)
785 if (err) { 822 if (err) {
786 printk(KERN_ERR "%s: failed to open %s\n", 823 printk(KERN_ERR "%s: failed to open %s\n",
787 np->name, ndev->name); 824 np->name, ndev->name);
788 goto release; 825 goto put;
789 } 826 }
790 827
791 atleast = jiffies + HZ/10; 828 atleast = jiffies + HZ/10;
@@ -822,7 +859,7 @@ int netpoll_setup(struct netpoll *np)
822 printk(KERN_ERR "%s: no IP address for %s, aborting\n", 859 printk(KERN_ERR "%s: no IP address for %s, aborting\n",
823 np->name, np->dev_name); 860 np->name, np->dev_name);
824 err = -EDESTADDRREQ; 861 err = -EDESTADDRREQ;
825 goto release; 862 goto put;
826 } 863 }
827 864
828 np->local_ip = in_dev->ifa_list->ifa_local; 865 np->local_ip = in_dev->ifa_list->ifa_local;
@@ -830,38 +867,25 @@ int netpoll_setup(struct netpoll *np)
830 printk(KERN_INFO "%s: local IP %pI4\n", np->name, &np->local_ip); 867 printk(KERN_INFO "%s: local IP %pI4\n", np->name, &np->local_ip);
831 } 868 }
832 869
833 if (np->rx_hook) { 870 np->dev = ndev;
834 spin_lock_irqsave(&npinfo->rx_lock, flags);
835 npinfo->rx_flags |= NETPOLL_RX_ENABLED;
836 list_add_tail(&np->rx, &npinfo->rx_np);
837 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
838 }
839 871
840 /* fill up the skb queue */ 872 /* fill up the skb queue */
841 refill_skbs(); 873 refill_skbs();
842 874
843 /* last thing to do is link it to the net device structure */ 875 rtnl_lock();
844 ndev->npinfo = npinfo; 876 err = __netpoll_setup(np);
877 rtnl_unlock();
845 878
846 /* avoid racing with NAPI reading npinfo */ 879 if (err)
847 synchronize_rcu(); 880 goto put;
848 881
849 return 0; 882 return 0;
850 883
851 release:
852 if (!ndev->npinfo) {
853 spin_lock_irqsave(&npinfo->rx_lock, flags);
854 list_for_each_entry_safe(npe, tmp, &npinfo->rx_np, rx) {
855 npe->dev = NULL;
856 }
857 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
858
859 kfree(npinfo);
860 }
861put: 884put:
862 dev_put(ndev); 885 dev_put(ndev);
863 return err; 886 return err;
864} 887}
888EXPORT_SYMBOL(netpoll_setup);
865 889
866static int __init netpoll_init(void) 890static int __init netpoll_init(void)
867{ 891{
@@ -870,49 +894,65 @@ static int __init netpoll_init(void)
870} 894}
871core_initcall(netpoll_init); 895core_initcall(netpoll_init);
872 896
873void netpoll_cleanup(struct netpoll *np) 897void __netpoll_cleanup(struct netpoll *np)
874{ 898{
875 struct netpoll_info *npinfo; 899 struct netpoll_info *npinfo;
876 unsigned long flags; 900 unsigned long flags;
877 901
878 if (np->dev) { 902 npinfo = np->dev->npinfo;
879 npinfo = np->dev->npinfo; 903 if (!npinfo)
880 if (npinfo) { 904 return;
881 if (!list_empty(&npinfo->rx_np)) {
882 spin_lock_irqsave(&npinfo->rx_lock, flags);
883 list_del(&np->rx);
884 if (list_empty(&npinfo->rx_np))
885 npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
886 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
887 }
888 905
889 if (atomic_dec_and_test(&npinfo->refcnt)) { 906 if (!list_empty(&npinfo->rx_np)) {
890 const struct net_device_ops *ops; 907 spin_lock_irqsave(&npinfo->rx_lock, flags);
891 skb_queue_purge(&npinfo->arp_tx); 908 list_del(&np->rx);
892 skb_queue_purge(&npinfo->txq); 909 if (list_empty(&npinfo->rx_np))
893 cancel_rearming_delayed_work(&npinfo->tx_work); 910 npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
894 911 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
895 /* clean after last, unfinished work */ 912 }
896 __skb_queue_purge(&npinfo->txq); 913
897 kfree(npinfo); 914 if (atomic_dec_and_test(&npinfo->refcnt)) {
898 ops = np->dev->netdev_ops; 915 const struct net_device_ops *ops;
899 if (ops->ndo_netpoll_cleanup) 916
900 ops->ndo_netpoll_cleanup(np->dev); 917 ops = np->dev->netdev_ops;
901 else 918 if (ops->ndo_netpoll_cleanup)
902 np->dev->npinfo = NULL; 919 ops->ndo_netpoll_cleanup(np->dev);
903 } 920
904 } 921 rcu_assign_pointer(np->dev->npinfo, NULL);
922
923 /* avoid racing with NAPI reading npinfo */
924 synchronize_rcu_bh();
925
926 skb_queue_purge(&npinfo->arp_tx);
927 skb_queue_purge(&npinfo->txq);
928 cancel_rearming_delayed_work(&npinfo->tx_work);
905 929
906 dev_put(np->dev); 930 /* clean after last, unfinished work */
931 __skb_queue_purge(&npinfo->txq);
932 kfree(npinfo);
907 } 933 }
934}
935EXPORT_SYMBOL_GPL(__netpoll_cleanup);
936
937void netpoll_cleanup(struct netpoll *np)
938{
939 if (!np->dev)
940 return;
908 941
942 rtnl_lock();
943 __netpoll_cleanup(np);
944 rtnl_unlock();
945
946 dev_put(np->dev);
909 np->dev = NULL; 947 np->dev = NULL;
910} 948}
949EXPORT_SYMBOL(netpoll_cleanup);
911 950
912int netpoll_trap(void) 951int netpoll_trap(void)
913{ 952{
914 return atomic_read(&trapped); 953 return atomic_read(&trapped);
915} 954}
955EXPORT_SYMBOL(netpoll_trap);
916 956
917void netpoll_set_trap(int trap) 957void netpoll_set_trap(int trap)
918{ 958{
@@ -921,14 +961,4 @@ void netpoll_set_trap(int trap)
921 else 961 else
922 atomic_dec(&trapped); 962 atomic_dec(&trapped);
923} 963}
924
925EXPORT_SYMBOL(netpoll_send_skb);
926EXPORT_SYMBOL(netpoll_set_trap); 964EXPORT_SYMBOL(netpoll_set_trap);
927EXPORT_SYMBOL(netpoll_trap);
928EXPORT_SYMBOL(netpoll_print_options);
929EXPORT_SYMBOL(netpoll_parse_options);
930EXPORT_SYMBOL(netpoll_setup);
931EXPORT_SYMBOL(netpoll_cleanup);
932EXPORT_SYMBOL(netpoll_send_udp);
933EXPORT_SYMBOL(netpoll_poll_dev);
934EXPORT_SYMBOL(netpoll_poll);
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 1dacd7ba8dbb..10a1ea72010d 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -115,6 +115,9 @@
115 * command by Adit Ranadive <adit.262@gmail.com> 115 * command by Adit Ranadive <adit.262@gmail.com>
116 * 116 *
117 */ 117 */
118
119#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
120
118#include <linux/sys.h> 121#include <linux/sys.h>
119#include <linux/types.h> 122#include <linux/types.h>
120#include <linux/module.h> 123#include <linux/module.h>
@@ -169,11 +172,13 @@
169#include <asm/dma.h> 172#include <asm/dma.h>
170#include <asm/div64.h> /* do_div */ 173#include <asm/div64.h> /* do_div */
171 174
172#define VERSION "2.73" 175#define VERSION "2.74"
173#define IP_NAME_SZ 32 176#define IP_NAME_SZ 32
174#define MAX_MPLS_LABELS 16 /* This is the max label stack depth */ 177#define MAX_MPLS_LABELS 16 /* This is the max label stack depth */
175#define MPLS_STACK_BOTTOM htonl(0x00000100) 178#define MPLS_STACK_BOTTOM htonl(0x00000100)
176 179
180#define func_enter() pr_debug("entering %s\n", __func__);
181
177/* Device flag bits */ 182/* Device flag bits */
178#define F_IPSRC_RND (1<<0) /* IP-Src Random */ 183#define F_IPSRC_RND (1<<0) /* IP-Src Random */
179#define F_IPDST_RND (1<<1) /* IP-Dst Random */ 184#define F_IPDST_RND (1<<1) /* IP-Dst Random */
@@ -424,7 +429,8 @@ static inline int ktime_lt(const ktime_t cmp1, const ktime_t cmp2)
424} 429}
425 430
426static const char version[] = 431static const char version[] =
427 "pktgen " VERSION ": Packet Generator for packet performance testing.\n"; 432 "Packet Generator for packet performance testing. "
433 "Version: " VERSION "\n";
428 434
429static int pktgen_remove_device(struct pktgen_thread *t, struct pktgen_dev *i); 435static int pktgen_remove_device(struct pktgen_thread *t, struct pktgen_dev *i);
430static int pktgen_add_device(struct pktgen_thread *t, const char *ifname); 436static int pktgen_add_device(struct pktgen_thread *t, const char *ifname);
@@ -495,7 +501,7 @@ static ssize_t pgctrl_write(struct file *file, const char __user *buf,
495 pktgen_reset_all_threads(); 501 pktgen_reset_all_threads();
496 502
497 else 503 else
498 printk(KERN_WARNING "pktgen: Unknown command: %s\n", data); 504 pr_warning("Unknown command: %s\n", data);
499 505
500 err = count; 506 err = count;
501 507
@@ -840,7 +846,7 @@ static ssize_t pktgen_if_write(struct file *file,
840 const char __user * user_buffer, size_t count, 846 const char __user * user_buffer, size_t count,
841 loff_t * offset) 847 loff_t * offset)
842{ 848{
843 struct seq_file *seq = (struct seq_file *)file->private_data; 849 struct seq_file *seq = file->private_data;
844 struct pktgen_dev *pkt_dev = seq->private; 850 struct pktgen_dev *pkt_dev = seq->private;
845 int i = 0, max, len; 851 int i = 0, max, len;
846 char name[16], valstr[32]; 852 char name[16], valstr[32];
@@ -852,14 +858,14 @@ static ssize_t pktgen_if_write(struct file *file,
852 pg_result = &(pkt_dev->result[0]); 858 pg_result = &(pkt_dev->result[0]);
853 859
854 if (count < 1) { 860 if (count < 1) {
855 printk(KERN_WARNING "pktgen: wrong command format\n"); 861 pr_warning("wrong command format\n");
856 return -EINVAL; 862 return -EINVAL;
857 } 863 }
858 864
859 max = count - i; 865 max = count - i;
860 tmp = count_trail_chars(&user_buffer[i], max); 866 tmp = count_trail_chars(&user_buffer[i], max);
861 if (tmp < 0) { 867 if (tmp < 0) {
862 printk(KERN_WARNING "pktgen: illegal format\n"); 868 pr_warning("illegal format\n");
863 return tmp; 869 return tmp;
864 } 870 }
865 i += tmp; 871 i += tmp;
@@ -980,6 +986,36 @@ static ssize_t pktgen_if_write(struct file *file,
980 (unsigned long long) pkt_dev->delay); 986 (unsigned long long) pkt_dev->delay);
981 return count; 987 return count;
982 } 988 }
989 if (!strcmp(name, "rate")) {
990 len = num_arg(&user_buffer[i], 10, &value);
991 if (len < 0)
992 return len;
993
994 i += len;
995 if (!value)
996 return len;
997 pkt_dev->delay = pkt_dev->min_pkt_size*8*NSEC_PER_USEC/value;
998 if (debug)
999 pr_info("Delay set at: %llu ns\n", pkt_dev->delay);
1000
1001 sprintf(pg_result, "OK: rate=%lu", value);
1002 return count;
1003 }
1004 if (!strcmp(name, "ratep")) {
1005 len = num_arg(&user_buffer[i], 10, &value);
1006 if (len < 0)
1007 return len;
1008
1009 i += len;
1010 if (!value)
1011 return len;
1012 pkt_dev->delay = NSEC_PER_SEC/value;
1013 if (debug)
1014 pr_info("Delay set at: %llu ns\n", pkt_dev->delay);
1015
1016 sprintf(pg_result, "OK: rate=%lu", value);
1017 return count;
1018 }
983 if (!strcmp(name, "udp_src_min")) { 1019 if (!strcmp(name, "udp_src_min")) {
984 len = num_arg(&user_buffer[i], 10, &value); 1020 len = num_arg(&user_buffer[i], 10, &value);
985 if (len < 0) 1021 if (len < 0)
@@ -1398,18 +1434,12 @@ static ssize_t pktgen_if_write(struct file *file,
1398 i += len; 1434 i += len;
1399 1435
1400 for (*m = 0; *v && m < pkt_dev->dst_mac + 6; v++) { 1436 for (*m = 0; *v && m < pkt_dev->dst_mac + 6; v++) {
1401 if (*v >= '0' && *v <= '9') { 1437 int value;
1402 *m *= 16; 1438
1403 *m += *v - '0'; 1439 value = hex_to_bin(*v);
1404 } 1440 if (value >= 0)
1405 if (*v >= 'A' && *v <= 'F') { 1441 *m = *m * 16 + value;
1406 *m *= 16; 1442
1407 *m += *v - 'A' + 10;
1408 }
1409 if (*v >= 'a' && *v <= 'f') {
1410 *m *= 16;
1411 *m += *v - 'a' + 10;
1412 }
1413 if (*v == ':') { 1443 if (*v == ':') {
1414 m++; 1444 m++;
1415 *m = 0; 1445 *m = 0;
@@ -1440,18 +1470,12 @@ static ssize_t pktgen_if_write(struct file *file,
1440 i += len; 1470 i += len;
1441 1471
1442 for (*m = 0; *v && m < pkt_dev->src_mac + 6; v++) { 1472 for (*m = 0; *v && m < pkt_dev->src_mac + 6; v++) {
1443 if (*v >= '0' && *v <= '9') { 1473 int value;
1444 *m *= 16; 1474
1445 *m += *v - '0'; 1475 value = hex_to_bin(*v);
1446 } 1476 if (value >= 0)
1447 if (*v >= 'A' && *v <= 'F') { 1477 *m = *m * 16 + value;
1448 *m *= 16; 1478
1449 *m += *v - 'A' + 10;
1450 }
1451 if (*v >= 'a' && *v <= 'f') {
1452 *m *= 16;
1453 *m += *v - 'a' + 10;
1454 }
1455 if (*v == ':') { 1479 if (*v == ':') {
1456 m++; 1480 m++;
1457 *m = 0; 1481 *m = 0;
@@ -1740,7 +1764,7 @@ static ssize_t pktgen_thread_write(struct file *file,
1740 const char __user * user_buffer, 1764 const char __user * user_buffer,
1741 size_t count, loff_t * offset) 1765 size_t count, loff_t * offset)
1742{ 1766{
1743 struct seq_file *seq = (struct seq_file *)file->private_data; 1767 struct seq_file *seq = file->private_data;
1744 struct pktgen_thread *t = seq->private; 1768 struct pktgen_thread *t = seq->private;
1745 int i = 0, max, len, ret; 1769 int i = 0, max, len, ret;
1746 char name[40]; 1770 char name[40];
@@ -1781,7 +1805,7 @@ static ssize_t pktgen_thread_write(struct file *file,
1781 name, (unsigned long)count); 1805 name, (unsigned long)count);
1782 1806
1783 if (!t) { 1807 if (!t) {
1784 printk(KERN_ERR "pktgen: ERROR: No thread\n"); 1808 pr_err("ERROR: No thread\n");
1785 ret = -EINVAL; 1809 ret = -EINVAL;
1786 goto out; 1810 goto out;
1787 } 1811 }
@@ -1874,7 +1898,7 @@ static void pktgen_mark_device(const char *ifname)
1874 int i = 0; 1898 int i = 0;
1875 1899
1876 mutex_lock(&pktgen_thread_lock); 1900 mutex_lock(&pktgen_thread_lock);
1877 pr_debug("pktgen: pktgen_mark_device marking %s for removal\n", ifname); 1901 pr_debug("%s: marking %s for removal\n", __func__, ifname);
1878 1902
1879 while (1) { 1903 while (1) {
1880 1904
@@ -1883,15 +1907,14 @@ static void pktgen_mark_device(const char *ifname)
1883 break; /* success */ 1907 break; /* success */
1884 1908
1885 mutex_unlock(&pktgen_thread_lock); 1909 mutex_unlock(&pktgen_thread_lock);
1886 pr_debug("pktgen: pktgen_mark_device waiting for %s " 1910 pr_debug("%s: waiting for %s to disappear....\n",
1887 "to disappear....\n", ifname); 1911 __func__, ifname);
1888 schedule_timeout_interruptible(msecs_to_jiffies(msec_per_try)); 1912 schedule_timeout_interruptible(msecs_to_jiffies(msec_per_try));
1889 mutex_lock(&pktgen_thread_lock); 1913 mutex_lock(&pktgen_thread_lock);
1890 1914
1891 if (++i >= max_tries) { 1915 if (++i >= max_tries) {
1892 printk(KERN_ERR "pktgen_mark_device: timed out after " 1916 pr_err("%s: timed out after waiting %d msec for device %s to be removed\n",
1893 "waiting %d msec for device %s to be removed\n", 1917 __func__, msec_per_try * i, ifname);
1894 msec_per_try * i, ifname);
1895 break; 1918 break;
1896 } 1919 }
1897 1920
@@ -1918,8 +1941,8 @@ static void pktgen_change_name(struct net_device *dev)
1918 &pktgen_if_fops, 1941 &pktgen_if_fops,
1919 pkt_dev); 1942 pkt_dev);
1920 if (!pkt_dev->entry) 1943 if (!pkt_dev->entry)
1921 printk(KERN_ERR "pktgen: can't move proc " 1944 pr_err("can't move proc entry for '%s'\n",
1922 " entry for '%s'\n", dev->name); 1945 dev->name);
1923 break; 1946 break;
1924 } 1947 }
1925 } 1948 }
@@ -1983,15 +2006,15 @@ static int pktgen_setup_dev(struct pktgen_dev *pkt_dev, const char *ifname)
1983 2006
1984 odev = pktgen_dev_get_by_name(pkt_dev, ifname); 2007 odev = pktgen_dev_get_by_name(pkt_dev, ifname);
1985 if (!odev) { 2008 if (!odev) {
1986 printk(KERN_ERR "pktgen: no such netdevice: \"%s\"\n", ifname); 2009 pr_err("no such netdevice: \"%s\"\n", ifname);
1987 return -ENODEV; 2010 return -ENODEV;
1988 } 2011 }
1989 2012
1990 if (odev->type != ARPHRD_ETHER) { 2013 if (odev->type != ARPHRD_ETHER) {
1991 printk(KERN_ERR "pktgen: not an ethernet device: \"%s\"\n", ifname); 2014 pr_err("not an ethernet device: \"%s\"\n", ifname);
1992 err = -EINVAL; 2015 err = -EINVAL;
1993 } else if (!netif_running(odev)) { 2016 } else if (!netif_running(odev)) {
1994 printk(KERN_ERR "pktgen: device is down: \"%s\"\n", ifname); 2017 pr_err("device is down: \"%s\"\n", ifname);
1995 err = -ENETDOWN; 2018 err = -ENETDOWN;
1996 } else { 2019 } else {
1997 pkt_dev->odev = odev; 2020 pkt_dev->odev = odev;
@@ -2010,8 +2033,7 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
2010 int ntxq; 2033 int ntxq;
2011 2034
2012 if (!pkt_dev->odev) { 2035 if (!pkt_dev->odev) {
2013 printk(KERN_ERR "pktgen: ERROR: pkt_dev->odev == NULL in " 2036 pr_err("ERROR: pkt_dev->odev == NULL in setup_inject\n");
2014 "setup_inject.\n");
2015 sprintf(pkt_dev->result, 2037 sprintf(pkt_dev->result,
2016 "ERROR: pkt_dev->odev == NULL in setup_inject.\n"); 2038 "ERROR: pkt_dev->odev == NULL in setup_inject.\n");
2017 return; 2039 return;
@@ -2021,19 +2043,15 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
2021 ntxq = pkt_dev->odev->real_num_tx_queues; 2043 ntxq = pkt_dev->odev->real_num_tx_queues;
2022 2044
2023 if (ntxq <= pkt_dev->queue_map_min) { 2045 if (ntxq <= pkt_dev->queue_map_min) {
2024 printk(KERN_WARNING "pktgen: WARNING: Requested " 2046 pr_warning("WARNING: Requested queue_map_min (zero-based) (%d) exceeds valid range [0 - %d] for (%d) queues on %s, resetting\n",
2025 "queue_map_min (zero-based) (%d) exceeds valid range " 2047 pkt_dev->queue_map_min, (ntxq ?: 1) - 1, ntxq,
2026 "[0 - %d] for (%d) queues on %s, resetting\n", 2048 pkt_dev->odevname);
2027 pkt_dev->queue_map_min, (ntxq ?: 1) - 1, ntxq,
2028 pkt_dev->odevname);
2029 pkt_dev->queue_map_min = ntxq - 1; 2049 pkt_dev->queue_map_min = ntxq - 1;
2030 } 2050 }
2031 if (pkt_dev->queue_map_max >= ntxq) { 2051 if (pkt_dev->queue_map_max >= ntxq) {
2032 printk(KERN_WARNING "pktgen: WARNING: Requested " 2052 pr_warning("WARNING: Requested queue_map_max (zero-based) (%d) exceeds valid range [0 - %d] for (%d) queues on %s, resetting\n",
2033 "queue_map_max (zero-based) (%d) exceeds valid range " 2053 pkt_dev->queue_map_max, (ntxq ?: 1) - 1, ntxq,
2034 "[0 - %d] for (%d) queues on %s, resetting\n", 2054 pkt_dev->odevname);
2035 pkt_dev->queue_map_max, (ntxq ?: 1) - 1, ntxq,
2036 pkt_dev->odevname);
2037 pkt_dev->queue_map_max = ntxq - 1; 2055 pkt_dev->queue_map_max = ntxq - 1;
2038 } 2056 }
2039 2057
@@ -2093,8 +2111,7 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
2093 } 2111 }
2094 rcu_read_unlock(); 2112 rcu_read_unlock();
2095 if (err) 2113 if (err)
2096 printk(KERN_ERR "pktgen: ERROR: IPv6 link " 2114 pr_err("ERROR: IPv6 link address not available\n");
2097 "address not availble.\n");
2098 } 2115 }
2099#endif 2116#endif
2100 } else { 2117 } else {
@@ -2142,15 +2159,15 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
2142 hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); 2159 hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
2143 hrtimer_set_expires(&t.timer, spin_until); 2160 hrtimer_set_expires(&t.timer, spin_until);
2144 2161
2145 remaining = ktime_to_us(hrtimer_expires_remaining(&t.timer)); 2162 remaining = ktime_to_ns(hrtimer_expires_remaining(&t.timer));
2146 if (remaining <= 0) { 2163 if (remaining <= 0) {
2147 pkt_dev->next_tx = ktime_add_ns(spin_until, pkt_dev->delay); 2164 pkt_dev->next_tx = ktime_add_ns(spin_until, pkt_dev->delay);
2148 return; 2165 return;
2149 } 2166 }
2150 2167
2151 start_time = ktime_now(); 2168 start_time = ktime_now();
2152 if (remaining < 100) 2169 if (remaining < 100000)
2153 udelay(remaining); /* really small just spin */ 2170 ndelay(remaining); /* really small just spin */
2154 else { 2171 else {
2155 /* see do_nanosleep */ 2172 /* see do_nanosleep */
2156 hrtimer_init_sleeper(&t, current); 2173 hrtimer_init_sleeper(&t, current);
@@ -2528,8 +2545,8 @@ static int process_ipsec(struct pktgen_dev *pkt_dev,
2528 if (nhead > 0) { 2545 if (nhead > 0) {
2529 ret = pskb_expand_head(skb, nhead, 0, GFP_ATOMIC); 2546 ret = pskb_expand_head(skb, nhead, 0, GFP_ATOMIC);
2530 if (ret < 0) { 2547 if (ret < 0) {
2531 printk(KERN_ERR "Error expanding " 2548 pr_err("Error expanding ipsec packet %d\n",
2532 "ipsec packet %d\n", ret); 2549 ret);
2533 goto err; 2550 goto err;
2534 } 2551 }
2535 } 2552 }
@@ -2538,8 +2555,7 @@ static int process_ipsec(struct pktgen_dev *pkt_dev,
2538 skb_pull(skb, ETH_HLEN); 2555 skb_pull(skb, ETH_HLEN);
2539 ret = pktgen_output_ipsec(skb, pkt_dev); 2556 ret = pktgen_output_ipsec(skb, pkt_dev);
2540 if (ret) { 2557 if (ret) {
2541 printk(KERN_ERR "Error creating ipsec " 2558 pr_err("Error creating ipsec packet %d\n", ret);
2542 "packet %d\n", ret);
2543 goto err; 2559 goto err;
2544 } 2560 }
2545 /* restore ll */ 2561 /* restore ll */
@@ -3015,8 +3031,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
3015 if (datalen < sizeof(struct pktgen_hdr)) { 3031 if (datalen < sizeof(struct pktgen_hdr)) {
3016 datalen = sizeof(struct pktgen_hdr); 3032 datalen = sizeof(struct pktgen_hdr);
3017 if (net_ratelimit()) 3033 if (net_ratelimit())
3018 printk(KERN_INFO "pktgen: increased datalen to %d\n", 3034 pr_info("increased datalen to %d\n", datalen);
3019 datalen);
3020 } 3035 }
3021 3036
3022 udph->source = htons(pkt_dev->cur_udp_src); 3037 udph->source = htons(pkt_dev->cur_udp_src);
@@ -3143,7 +3158,7 @@ static void pktgen_run(struct pktgen_thread *t)
3143 struct pktgen_dev *pkt_dev; 3158 struct pktgen_dev *pkt_dev;
3144 int started = 0; 3159 int started = 0;
3145 3160
3146 pr_debug("pktgen: entering pktgen_run. %p\n", t); 3161 func_enter();
3147 3162
3148 if_lock(t); 3163 if_lock(t);
3149 list_for_each_entry(pkt_dev, &t->if_list, list) { 3164 list_for_each_entry(pkt_dev, &t->if_list, list) {
@@ -3176,7 +3191,7 @@ static void pktgen_stop_all_threads_ifs(void)
3176{ 3191{
3177 struct pktgen_thread *t; 3192 struct pktgen_thread *t;
3178 3193
3179 pr_debug("pktgen: entering pktgen_stop_all_threads_ifs.\n"); 3194 func_enter();
3180 3195
3181 mutex_lock(&pktgen_thread_lock); 3196 mutex_lock(&pktgen_thread_lock);
3182 3197
@@ -3241,7 +3256,7 @@ static void pktgen_run_all_threads(void)
3241{ 3256{
3242 struct pktgen_thread *t; 3257 struct pktgen_thread *t;
3243 3258
3244 pr_debug("pktgen: entering pktgen_run_all_threads.\n"); 3259 func_enter();
3245 3260
3246 mutex_lock(&pktgen_thread_lock); 3261 mutex_lock(&pktgen_thread_lock);
3247 3262
@@ -3260,7 +3275,7 @@ static void pktgen_reset_all_threads(void)
3260{ 3275{
3261 struct pktgen_thread *t; 3276 struct pktgen_thread *t;
3262 3277
3263 pr_debug("pktgen: entering pktgen_reset_all_threads.\n"); 3278 func_enter();
3264 3279
3265 mutex_lock(&pktgen_thread_lock); 3280 mutex_lock(&pktgen_thread_lock);
3266 3281
@@ -3310,8 +3325,8 @@ static int pktgen_stop_device(struct pktgen_dev *pkt_dev)
3310 int nr_frags = pkt_dev->skb ? skb_shinfo(pkt_dev->skb)->nr_frags : -1; 3325 int nr_frags = pkt_dev->skb ? skb_shinfo(pkt_dev->skb)->nr_frags : -1;
3311 3326
3312 if (!pkt_dev->running) { 3327 if (!pkt_dev->running) {
3313 printk(KERN_WARNING "pktgen: interface: %s is already " 3328 pr_warning("interface: %s is already stopped\n",
3314 "stopped\n", pkt_dev->odevname); 3329 pkt_dev->odevname);
3315 return -EINVAL; 3330 return -EINVAL;
3316 } 3331 }
3317 3332
@@ -3347,7 +3362,7 @@ static void pktgen_stop(struct pktgen_thread *t)
3347{ 3362{
3348 struct pktgen_dev *pkt_dev; 3363 struct pktgen_dev *pkt_dev;
3349 3364
3350 pr_debug("pktgen: entering pktgen_stop\n"); 3365 func_enter();
3351 3366
3352 if_lock(t); 3367 if_lock(t);
3353 3368
@@ -3367,7 +3382,7 @@ static void pktgen_rem_one_if(struct pktgen_thread *t)
3367 struct list_head *q, *n; 3382 struct list_head *q, *n;
3368 struct pktgen_dev *cur; 3383 struct pktgen_dev *cur;
3369 3384
3370 pr_debug("pktgen: entering pktgen_rem_one_if\n"); 3385 func_enter();
3371 3386
3372 if_lock(t); 3387 if_lock(t);
3373 3388
@@ -3393,9 +3408,10 @@ static void pktgen_rem_all_ifs(struct pktgen_thread *t)
3393 struct list_head *q, *n; 3408 struct list_head *q, *n;
3394 struct pktgen_dev *cur; 3409 struct pktgen_dev *cur;
3395 3410
3411 func_enter();
3412
3396 /* Remove all devices, free mem */ 3413 /* Remove all devices, free mem */
3397 3414
3398 pr_debug("pktgen: entering pktgen_rem_all_ifs\n");
3399 if_lock(t); 3415 if_lock(t);
3400 3416
3401 list_for_each_safe(q, n, &t->if_list) { 3417 list_for_each_safe(q, n, &t->if_list) {
@@ -3477,8 +3493,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
3477 3493
3478 pkt_dev->skb = fill_packet(odev, pkt_dev); 3494 pkt_dev->skb = fill_packet(odev, pkt_dev);
3479 if (pkt_dev->skb == NULL) { 3495 if (pkt_dev->skb == NULL) {
3480 printk(KERN_ERR "pktgen: ERROR: couldn't " 3496 pr_err("ERROR: couldn't allocate skb in fill_packet\n");
3481 "allocate skb in fill_packet.\n");
3482 schedule(); 3497 schedule();
3483 pkt_dev->clone_count--; /* back out increment, OOM */ 3498 pkt_dev->clone_count--; /* back out increment, OOM */
3484 return; 3499 return;
@@ -3558,8 +3573,7 @@ static int pktgen_thread_worker(void *arg)
3558 init_waitqueue_head(&t->queue); 3573 init_waitqueue_head(&t->queue);
3559 complete(&t->start_done); 3574 complete(&t->start_done);
3560 3575
3561 pr_debug("pktgen: starting pktgen/%d: pid=%d\n", 3576 pr_debug("starting pktgen/%d: pid=%d\n", cpu, task_pid_nr(current));
3562 cpu, task_pid_nr(current));
3563 3577
3564 set_current_state(TASK_INTERRUPTIBLE); 3578 set_current_state(TASK_INTERRUPTIBLE);
3565 3579
@@ -3612,13 +3626,13 @@ static int pktgen_thread_worker(void *arg)
3612 set_current_state(TASK_INTERRUPTIBLE); 3626 set_current_state(TASK_INTERRUPTIBLE);
3613 } 3627 }
3614 3628
3615 pr_debug("pktgen: %s stopping all device\n", t->tsk->comm); 3629 pr_debug("%s stopping all device\n", t->tsk->comm);
3616 pktgen_stop(t); 3630 pktgen_stop(t);
3617 3631
3618 pr_debug("pktgen: %s removing all device\n", t->tsk->comm); 3632 pr_debug("%s removing all device\n", t->tsk->comm);
3619 pktgen_rem_all_ifs(t); 3633 pktgen_rem_all_ifs(t);
3620 3634
3621 pr_debug("pktgen: %s removing thread.\n", t->tsk->comm); 3635 pr_debug("%s removing thread\n", t->tsk->comm);
3622 pktgen_rem_thread(t); 3636 pktgen_rem_thread(t);
3623 3637
3624 return 0; 3638 return 0;
@@ -3642,7 +3656,7 @@ static struct pktgen_dev *pktgen_find_dev(struct pktgen_thread *t,
3642 } 3656 }
3643 3657
3644 if_unlock(t); 3658 if_unlock(t);
3645 pr_debug("pktgen: find_dev(%s) returning %p\n", ifname, pkt_dev); 3659 pr_debug("find_dev(%s) returning %p\n", ifname, pkt_dev);
3646 return pkt_dev; 3660 return pkt_dev;
3647} 3661}
3648 3662
@@ -3658,8 +3672,7 @@ static int add_dev_to_thread(struct pktgen_thread *t,
3658 if_lock(t); 3672 if_lock(t);
3659 3673
3660 if (pkt_dev->pg_thread) { 3674 if (pkt_dev->pg_thread) {
3661 printk(KERN_ERR "pktgen: ERROR: already assigned " 3675 pr_err("ERROR: already assigned to a thread\n");
3662 "to a thread.\n");
3663 rv = -EBUSY; 3676 rv = -EBUSY;
3664 goto out; 3677 goto out;
3665 } 3678 }
@@ -3685,7 +3698,7 @@ static int pktgen_add_device(struct pktgen_thread *t, const char *ifname)
3685 3698
3686 pkt_dev = __pktgen_NN_threads(ifname, FIND); 3699 pkt_dev = __pktgen_NN_threads(ifname, FIND);
3687 if (pkt_dev) { 3700 if (pkt_dev) {
3688 printk(KERN_ERR "pktgen: ERROR: interface already used.\n"); 3701 pr_err("ERROR: interface already used\n");
3689 return -EBUSY; 3702 return -EBUSY;
3690 } 3703 }
3691 3704
@@ -3730,7 +3743,7 @@ static int pktgen_add_device(struct pktgen_thread *t, const char *ifname)
3730 pkt_dev->entry = proc_create_data(ifname, 0600, pg_proc_dir, 3743 pkt_dev->entry = proc_create_data(ifname, 0600, pg_proc_dir,
3731 &pktgen_if_fops, pkt_dev); 3744 &pktgen_if_fops, pkt_dev);
3732 if (!pkt_dev->entry) { 3745 if (!pkt_dev->entry) {
3733 printk(KERN_ERR "pktgen: cannot create %s/%s procfs entry.\n", 3746 pr_err("cannot create %s/%s procfs entry\n",
3734 PG_PROC_DIR, ifname); 3747 PG_PROC_DIR, ifname);
3735 err = -EINVAL; 3748 err = -EINVAL;
3736 goto out2; 3749 goto out2;
@@ -3761,8 +3774,7 @@ static int __init pktgen_create_thread(int cpu)
3761 t = kzalloc_node(sizeof(struct pktgen_thread), GFP_KERNEL, 3774 t = kzalloc_node(sizeof(struct pktgen_thread), GFP_KERNEL,
3762 cpu_to_node(cpu)); 3775 cpu_to_node(cpu));
3763 if (!t) { 3776 if (!t) {
3764 printk(KERN_ERR "pktgen: ERROR: out of memory, can't " 3777 pr_err("ERROR: out of memory, can't create new thread\n");
3765 "create new thread.\n");
3766 return -ENOMEM; 3778 return -ENOMEM;
3767 } 3779 }
3768 3780
@@ -3776,8 +3788,7 @@ static int __init pktgen_create_thread(int cpu)
3776 3788
3777 p = kthread_create(pktgen_thread_worker, t, "kpktgend_%d", cpu); 3789 p = kthread_create(pktgen_thread_worker, t, "kpktgend_%d", cpu);
3778 if (IS_ERR(p)) { 3790 if (IS_ERR(p)) {
3779 printk(KERN_ERR "pktgen: kernel_thread() failed " 3791 pr_err("kernel_thread() failed for cpu %d\n", t->cpu);
3780 "for cpu %d\n", t->cpu);
3781 list_del(&t->th_list); 3792 list_del(&t->th_list);
3782 kfree(t); 3793 kfree(t);
3783 return PTR_ERR(p); 3794 return PTR_ERR(p);
@@ -3788,7 +3799,7 @@ static int __init pktgen_create_thread(int cpu)
3788 pe = proc_create_data(t->tsk->comm, 0600, pg_proc_dir, 3799 pe = proc_create_data(t->tsk->comm, 0600, pg_proc_dir,
3789 &pktgen_thread_fops, t); 3800 &pktgen_thread_fops, t);
3790 if (!pe) { 3801 if (!pe) {
3791 printk(KERN_ERR "pktgen: cannot create %s/%s procfs entry.\n", 3802 pr_err("cannot create %s/%s procfs entry\n",
3792 PG_PROC_DIR, t->tsk->comm); 3803 PG_PROC_DIR, t->tsk->comm);
3793 kthread_stop(p); 3804 kthread_stop(p);
3794 list_del(&t->th_list); 3805 list_del(&t->th_list);
@@ -3822,11 +3833,10 @@ static int pktgen_remove_device(struct pktgen_thread *t,
3822 struct pktgen_dev *pkt_dev) 3833 struct pktgen_dev *pkt_dev)
3823{ 3834{
3824 3835
3825 pr_debug("pktgen: remove_device pkt_dev=%p\n", pkt_dev); 3836 pr_debug("remove_device pkt_dev=%p\n", pkt_dev);
3826 3837
3827 if (pkt_dev->running) { 3838 if (pkt_dev->running) {
3828 printk(KERN_WARNING "pktgen: WARNING: trying to remove a " 3839 pr_warning("WARNING: trying to remove a running interface, stopping it now\n");
3829 "running interface, stopping it now.\n");
3830 pktgen_stop_device(pkt_dev); 3840 pktgen_stop_device(pkt_dev);
3831 } 3841 }
3832 3842
@@ -3857,7 +3867,7 @@ static int __init pg_init(void)
3857 int cpu; 3867 int cpu;
3858 struct proc_dir_entry *pe; 3868 struct proc_dir_entry *pe;
3859 3869
3860 printk(KERN_INFO "%s", version); 3870 pr_info("%s", version);
3861 3871
3862 pg_proc_dir = proc_mkdir(PG_PROC_DIR, init_net.proc_net); 3872 pg_proc_dir = proc_mkdir(PG_PROC_DIR, init_net.proc_net);
3863 if (!pg_proc_dir) 3873 if (!pg_proc_dir)
@@ -3865,8 +3875,7 @@ static int __init pg_init(void)
3865 3875
3866 pe = proc_create(PGCTRL, 0600, pg_proc_dir, &pktgen_fops); 3876 pe = proc_create(PGCTRL, 0600, pg_proc_dir, &pktgen_fops);
3867 if (pe == NULL) { 3877 if (pe == NULL) {
3868 printk(KERN_ERR "pktgen: ERROR: cannot create %s " 3878 pr_err("ERROR: cannot create %s procfs entry\n", PGCTRL);
3869 "procfs entry.\n", PGCTRL);
3870 proc_net_remove(&init_net, PG_PROC_DIR); 3879 proc_net_remove(&init_net, PG_PROC_DIR);
3871 return -EINVAL; 3880 return -EINVAL;
3872 } 3881 }
@@ -3879,13 +3888,12 @@ static int __init pg_init(void)
3879 3888
3880 err = pktgen_create_thread(cpu); 3889 err = pktgen_create_thread(cpu);
3881 if (err) 3890 if (err)
3882 printk(KERN_WARNING "pktgen: WARNING: Cannot create " 3891 pr_warning("WARNING: Cannot create thread for cpu %d (%d)\n",
3883 "thread for cpu %d (%d)\n", cpu, err); 3892 cpu, err);
3884 } 3893 }
3885 3894
3886 if (list_empty(&pktgen_threads)) { 3895 if (list_empty(&pktgen_threads)) {
3887 printk(KERN_ERR "pktgen: ERROR: Initialization failed for " 3896 pr_err("ERROR: Initialization failed for all threads\n");
3888 "all threads\n");
3889 unregister_netdevice_notifier(&pktgen_notifier_block); 3897 unregister_netdevice_notifier(&pktgen_notifier_block);
3890 remove_proc_entry(PGCTRL, pg_proc_dir); 3898 remove_proc_entry(PGCTRL, pg_proc_dir);
3891 proc_net_remove(&init_net, PG_PROC_DIR); 3899 proc_net_remove(&init_net, PG_PROC_DIR);
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 1a2af24e9e3d..f78d821bd935 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -579,7 +579,7 @@ static unsigned int rtnl_dev_combine_flags(const struct net_device *dev,
579} 579}
580 580
581static void copy_rtnl_link_stats(struct rtnl_link_stats *a, 581static void copy_rtnl_link_stats(struct rtnl_link_stats *a,
582 const struct net_device_stats *b) 582 const struct rtnl_link_stats64 *b)
583{ 583{
584 a->rx_packets = b->rx_packets; 584 a->rx_packets = b->rx_packets;
585 a->tx_packets = b->tx_packets; 585 a->tx_packets = b->tx_packets;
@@ -610,7 +610,7 @@ static void copy_rtnl_link_stats(struct rtnl_link_stats *a,
610 a->tx_compressed = b->tx_compressed; 610 a->tx_compressed = b->tx_compressed;
611} 611}
612 612
613static void copy_rtnl_link_stats64(void *v, const struct net_device_stats *b) 613static void copy_rtnl_link_stats64(void *v, const struct rtnl_link_stats64 *b)
614{ 614{
615 struct rtnl_link_stats64 a; 615 struct rtnl_link_stats64 a;
616 616
@@ -686,7 +686,7 @@ static size_t rtnl_port_size(const struct net_device *dev)
686 return port_self_size; 686 return port_self_size;
687} 687}
688 688
689static inline size_t if_nlmsg_size(const struct net_device *dev) 689static noinline size_t if_nlmsg_size(const struct net_device *dev)
690{ 690{
691 return NLMSG_ALIGN(sizeof(struct ifinfomsg)) 691 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
692 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */ 692 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
@@ -791,7 +791,8 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
791{ 791{
792 struct ifinfomsg *ifm; 792 struct ifinfomsg *ifm;
793 struct nlmsghdr *nlh; 793 struct nlmsghdr *nlh;
794 const struct net_device_stats *stats; 794 struct rtnl_link_stats64 temp;
795 const struct rtnl_link_stats64 *stats;
795 struct nlattr *attr; 796 struct nlattr *attr;
796 797
797 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags); 798 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags);
@@ -847,7 +848,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
847 if (attr == NULL) 848 if (attr == NULL)
848 goto nla_put_failure; 849 goto nla_put_failure;
849 850
850 stats = dev_get_stats(dev); 851 stats = dev_get_stats(dev, &temp);
851 copy_rtnl_link_stats(nla_data(attr), stats); 852 copy_rtnl_link_stats(nla_data(attr), stats);
852 853
853 attr = nla_reserve(skb, IFLA_STATS64, 854 attr = nla_reserve(skb, IFLA_STATS64,
diff --git a/net/core/scm.c b/net/core/scm.c
index b88f6f9d0b97..413cab89017d 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -130,6 +130,7 @@ void __scm_destroy(struct scm_cookie *scm)
130 } 130 }
131 } 131 }
132} 132}
133EXPORT_SYMBOL(__scm_destroy);
133 134
134int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *p) 135int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *p)
135{ 136{
@@ -170,6 +171,30 @@ int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *p)
170 err = scm_check_creds(&p->creds); 171 err = scm_check_creds(&p->creds);
171 if (err) 172 if (err)
172 goto error; 173 goto error;
174
175 if (pid_vnr(p->pid) != p->creds.pid) {
176 struct pid *pid;
177 err = -ESRCH;
178 pid = find_get_pid(p->creds.pid);
179 if (!pid)
180 goto error;
181 put_pid(p->pid);
182 p->pid = pid;
183 }
184
185 if ((p->cred->euid != p->creds.uid) ||
186 (p->cred->egid != p->creds.gid)) {
187 struct cred *cred;
188 err = -ENOMEM;
189 cred = prepare_creds();
190 if (!cred)
191 goto error;
192
193 cred->uid = cred->euid = p->creds.uid;
194 cred->gid = cred->egid = p->creds.uid;
195 put_cred(p->cred);
196 p->cred = cred;
197 }
173 break; 198 break;
174 default: 199 default:
175 goto error; 200 goto error;
@@ -187,6 +212,7 @@ error:
187 scm_destroy(p); 212 scm_destroy(p);
188 return err; 213 return err;
189} 214}
215EXPORT_SYMBOL(__scm_send);
190 216
191int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data) 217int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
192{ 218{
@@ -225,6 +251,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
225out: 251out:
226 return err; 252 return err;
227} 253}
254EXPORT_SYMBOL(put_cmsg);
228 255
229void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm) 256void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
230{ 257{
@@ -294,6 +321,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
294 */ 321 */
295 __scm_destroy(scm); 322 __scm_destroy(scm);
296} 323}
324EXPORT_SYMBOL(scm_detach_fds);
297 325
298struct scm_fp_list *scm_fp_dup(struct scm_fp_list *fpl) 326struct scm_fp_list *scm_fp_dup(struct scm_fp_list *fpl)
299{ 327{
@@ -311,9 +339,4 @@ struct scm_fp_list *scm_fp_dup(struct scm_fp_list *fpl)
311 } 339 }
312 return new_fpl; 340 return new_fpl;
313} 341}
314
315EXPORT_SYMBOL(__scm_destroy);
316EXPORT_SYMBOL(__scm_send);
317EXPORT_SYMBOL(put_cmsg);
318EXPORT_SYMBOL(scm_detach_fds);
319EXPORT_SYMBOL(scm_fp_dup); 342EXPORT_SYMBOL(scm_fp_dup);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 9f07e749d7b1..3a2513f0d0c3 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -532,6 +532,7 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
532 new->ip_summed = old->ip_summed; 532 new->ip_summed = old->ip_summed;
533 skb_copy_queue_mapping(new, old); 533 skb_copy_queue_mapping(new, old);
534 new->priority = old->priority; 534 new->priority = old->priority;
535 new->deliver_no_wcard = old->deliver_no_wcard;
535#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE) 536#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
536 new->ipvs_property = old->ipvs_property; 537 new->ipvs_property = old->ipvs_property;
537#endif 538#endif
@@ -569,7 +570,6 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
569 C(len); 570 C(len);
570 C(data_len); 571 C(data_len);
571 C(mac_len); 572 C(mac_len);
572 C(rxhash);
573 n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; 573 n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len;
574 n->cloned = 1; 574 n->cloned = 1;
575 n->nohdr = 0; 575 n->nohdr = 0;
@@ -817,7 +817,7 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
817 memcpy(data + nhead, skb->head, skb->tail - skb->head); 817 memcpy(data + nhead, skb->head, skb->tail - skb->head);
818#endif 818#endif
819 memcpy(data + size, skb_end_pointer(skb), 819 memcpy(data + size, skb_end_pointer(skb),
820 sizeof(struct skb_shared_info)); 820 offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags]));
821 821
822 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 822 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
823 get_page(skb_shinfo(skb)->frags[i].page); 823 get_page(skb_shinfo(skb)->frags[i].page);
@@ -843,7 +843,9 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
843 skb->network_header += off; 843 skb->network_header += off;
844 if (skb_mac_header_was_set(skb)) 844 if (skb_mac_header_was_set(skb))
845 skb->mac_header += off; 845 skb->mac_header += off;
846 skb->csum_start += nhead; 846 /* Only adjust this if it actually is csum_start rather than csum */
847 if (skb->ip_summed == CHECKSUM_PARTIAL)
848 skb->csum_start += nhead;
847 skb->cloned = 0; 849 skb->cloned = 0;
848 skb->hdr_len = 0; 850 skb->hdr_len = 0;
849 skb->nohdr = 0; 851 skb->nohdr = 0;
@@ -930,7 +932,8 @@ struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
930 copy_skb_header(n, skb); 932 copy_skb_header(n, skb);
931 933
932 off = newheadroom - oldheadroom; 934 off = newheadroom - oldheadroom;
933 n->csum_start += off; 935 if (n->ip_summed == CHECKSUM_PARTIAL)
936 n->csum_start += off;
934#ifdef NET_SKBUFF_DATA_USES_OFFSET 937#ifdef NET_SKBUFF_DATA_USES_OFFSET
935 n->transport_header += off; 938 n->transport_header += off;
936 n->network_header += off; 939 n->network_header += off;
@@ -2483,7 +2486,6 @@ unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len)
2483 skb_postpull_rcsum(skb, skb->data, len); 2486 skb_postpull_rcsum(skb, skb->data, len);
2484 return skb->data += len; 2487 return skb->data += len;
2485} 2488}
2486
2487EXPORT_SYMBOL_GPL(skb_pull_rcsum); 2489EXPORT_SYMBOL_GPL(skb_pull_rcsum);
2488 2490
2489/** 2491/**
diff --git a/net/core/sock.c b/net/core/sock.c
index 2cf7f9f7e775..b05b9b6ddb87 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -110,6 +110,7 @@
110#include <linux/tcp.h> 110#include <linux/tcp.h>
111#include <linux/init.h> 111#include <linux/init.h>
112#include <linux/highmem.h> 112#include <linux/highmem.h>
113#include <linux/user_namespace.h>
113 114
114#include <asm/uaccess.h> 115#include <asm/uaccess.h>
115#include <asm/system.h> 116#include <asm/system.h>
@@ -156,7 +157,7 @@ static const char *const af_family_key_strings[AF_MAX+1] = {
156 "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" , 157 "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" ,
157 "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" , 158 "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" ,
158 "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" , 159 "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" ,
159 "sk_lock-AF_IEEE802154", 160 "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" ,
160 "sk_lock-AF_MAX" 161 "sk_lock-AF_MAX"
161}; 162};
162static const char *const af_family_slock_key_strings[AF_MAX+1] = { 163static const char *const af_family_slock_key_strings[AF_MAX+1] = {
@@ -172,7 +173,7 @@ static const char *const af_family_slock_key_strings[AF_MAX+1] = {
172 "slock-27" , "slock-28" , "slock-AF_CAN" , 173 "slock-27" , "slock-28" , "slock-AF_CAN" ,
173 "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" , 174 "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" ,
174 "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" , 175 "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" ,
175 "slock-AF_IEEE802154", 176 "slock-AF_IEEE802154", "slock-AF_CAIF" ,
176 "slock-AF_MAX" 177 "slock-AF_MAX"
177}; 178};
178static const char *const af_family_clock_key_strings[AF_MAX+1] = { 179static const char *const af_family_clock_key_strings[AF_MAX+1] = {
@@ -188,7 +189,7 @@ static const char *const af_family_clock_key_strings[AF_MAX+1] = {
188 "clock-27" , "clock-28" , "clock-AF_CAN" , 189 "clock-27" , "clock-28" , "clock-AF_CAN" ,
189 "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" , 190 "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" ,
190 "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" , 191 "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" ,
191 "clock-AF_IEEE802154", 192 "clock-AF_IEEE802154", "clock-AF_CAIF" ,
192 "clock-AF_MAX" 193 "clock-AF_MAX"
193}; 194};
194 195
@@ -749,6 +750,20 @@ set_rcvbuf:
749EXPORT_SYMBOL(sock_setsockopt); 750EXPORT_SYMBOL(sock_setsockopt);
750 751
751 752
753void cred_to_ucred(struct pid *pid, const struct cred *cred,
754 struct ucred *ucred)
755{
756 ucred->pid = pid_vnr(pid);
757 ucred->uid = ucred->gid = -1;
758 if (cred) {
759 struct user_namespace *current_ns = current_user_ns();
760
761 ucred->uid = user_ns_map_uid(current_ns, cred, cred->euid);
762 ucred->gid = user_ns_map_gid(current_ns, cred, cred->egid);
763 }
764}
765EXPORT_SYMBOL_GPL(cred_to_ucred);
766
752int sock_getsockopt(struct socket *sock, int level, int optname, 767int sock_getsockopt(struct socket *sock, int level, int optname,
753 char __user *optval, int __user *optlen) 768 char __user *optval, int __user *optlen)
754{ 769{
@@ -901,11 +916,15 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
901 break; 916 break;
902 917
903 case SO_PEERCRED: 918 case SO_PEERCRED:
904 if (len > sizeof(sk->sk_peercred)) 919 {
905 len = sizeof(sk->sk_peercred); 920 struct ucred peercred;
906 if (copy_to_user(optval, &sk->sk_peercred, len)) 921 if (len > sizeof(peercred))
922 len = sizeof(peercred);
923 cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
924 if (copy_to_user(optval, &peercred, len))
907 return -EFAULT; 925 return -EFAULT;
908 goto lenout; 926 goto lenout;
927 }
909 928
910 case SO_PEERNAME: 929 case SO_PEERNAME:
911 { 930 {
@@ -1119,6 +1138,9 @@ static void __sk_free(struct sock *sk)
1119 printk(KERN_DEBUG "%s: optmem leakage (%d bytes) detected.\n", 1138 printk(KERN_DEBUG "%s: optmem leakage (%d bytes) detected.\n",
1120 __func__, atomic_read(&sk->sk_omem_alloc)); 1139 __func__, atomic_read(&sk->sk_omem_alloc));
1121 1140
1141 if (sk->sk_peer_cred)
1142 put_cred(sk->sk_peer_cred);
1143 put_pid(sk->sk_peer_pid);
1122 put_net(sock_net(sk)); 1144 put_net(sock_net(sk));
1123 sk_prot_free(sk->sk_prot_creator, sk); 1145 sk_prot_free(sk->sk_prot_creator, sk);
1124} 1146}
@@ -1317,9 +1339,10 @@ EXPORT_SYMBOL(sock_wfree);
1317void sock_rfree(struct sk_buff *skb) 1339void sock_rfree(struct sk_buff *skb)
1318{ 1340{
1319 struct sock *sk = skb->sk; 1341 struct sock *sk = skb->sk;
1342 unsigned int len = skb->truesize;
1320 1343
1321 atomic_sub(skb->truesize, &sk->sk_rmem_alloc); 1344 atomic_sub(len, &sk->sk_rmem_alloc);
1322 sk_mem_uncharge(skb->sk, skb->truesize); 1345 sk_mem_uncharge(sk, len);
1323} 1346}
1324EXPORT_SYMBOL(sock_rfree); 1347EXPORT_SYMBOL(sock_rfree);
1325 1348
@@ -1954,9 +1977,8 @@ void sock_init_data(struct socket *sock, struct sock *sk)
1954 sk->sk_sndmsg_page = NULL; 1977 sk->sk_sndmsg_page = NULL;
1955 sk->sk_sndmsg_off = 0; 1978 sk->sk_sndmsg_off = 0;
1956 1979
1957 sk->sk_peercred.pid = 0; 1980 sk->sk_peer_pid = NULL;
1958 sk->sk_peercred.uid = -1; 1981 sk->sk_peer_cred = NULL;
1959 sk->sk_peercred.gid = -1;
1960 sk->sk_write_pending = 0; 1982 sk->sk_write_pending = 0;
1961 sk->sk_rcvlowat = 1; 1983 sk->sk_rcvlowat = 1;
1962 sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT; 1984 sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
@@ -2210,8 +2232,7 @@ static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR);
2210#ifdef CONFIG_NET_NS 2232#ifdef CONFIG_NET_NS
2211void sock_prot_inuse_add(struct net *net, struct proto *prot, int val) 2233void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
2212{ 2234{
2213 int cpu = smp_processor_id(); 2235 __this_cpu_add(net->core.inuse->val[prot->inuse_idx], val);
2214 per_cpu_ptr(net->core.inuse, cpu)->val[prot->inuse_idx] += val;
2215} 2236}
2216EXPORT_SYMBOL_GPL(sock_prot_inuse_add); 2237EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2217 2238
@@ -2257,7 +2278,7 @@ static DEFINE_PER_CPU(struct prot_inuse, prot_inuse);
2257 2278
2258void sock_prot_inuse_add(struct net *net, struct proto *prot, int val) 2279void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
2259{ 2280{
2260 __get_cpu_var(prot_inuse).val[prot->inuse_idx] += val; 2281 __this_cpu_add(prot_inuse.val[prot->inuse_idx], val);
2261} 2282}
2262EXPORT_SYMBOL_GPL(sock_prot_inuse_add); 2283EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
2263 2284
diff --git a/net/core/stream.c b/net/core/stream.c
index cc196f42b8d8..d959e0f41528 100644
--- a/net/core/stream.c
+++ b/net/core/stream.c
@@ -43,7 +43,6 @@ void sk_stream_write_space(struct sock *sk)
43 rcu_read_unlock(); 43 rcu_read_unlock();
44 } 44 }
45} 45}
46
47EXPORT_SYMBOL(sk_stream_write_space); 46EXPORT_SYMBOL(sk_stream_write_space);
48 47
49/** 48/**
@@ -81,7 +80,6 @@ int sk_stream_wait_connect(struct sock *sk, long *timeo_p)
81 } while (!done); 80 } while (!done);
82 return 0; 81 return 0;
83} 82}
84
85EXPORT_SYMBOL(sk_stream_wait_connect); 83EXPORT_SYMBOL(sk_stream_wait_connect);
86 84
87/** 85/**
@@ -109,7 +107,6 @@ void sk_stream_wait_close(struct sock *sk, long timeout)
109 finish_wait(sk_sleep(sk), &wait); 107 finish_wait(sk_sleep(sk), &wait);
110 } 108 }
111} 109}
112
113EXPORT_SYMBOL(sk_stream_wait_close); 110EXPORT_SYMBOL(sk_stream_wait_close);
114 111
115/** 112/**
@@ -174,7 +171,6 @@ do_interrupted:
174 err = sock_intr_errno(*timeo_p); 171 err = sock_intr_errno(*timeo_p);
175 goto out; 172 goto out;
176} 173}
177
178EXPORT_SYMBOL(sk_stream_wait_memory); 174EXPORT_SYMBOL(sk_stream_wait_memory);
179 175
180int sk_stream_error(struct sock *sk, int flags, int err) 176int sk_stream_error(struct sock *sk, int flags, int err)
@@ -185,7 +181,6 @@ int sk_stream_error(struct sock *sk, int flags, int err)
185 send_sig(SIGPIPE, current, 0); 181 send_sig(SIGPIPE, current, 0);
186 return err; 182 return err;
187} 183}
188
189EXPORT_SYMBOL(sk_stream_error); 184EXPORT_SYMBOL(sk_stream_error);
190 185
191void sk_stream_kill_queues(struct sock *sk) 186void sk_stream_kill_queues(struct sock *sk)
@@ -210,5 +205,4 @@ void sk_stream_kill_queues(struct sock *sk)
210 * have gone away, only the net layer knows can touch it. 205 * have gone away, only the net layer knows can touch it.
211 */ 206 */
212} 207}
213
214EXPORT_SYMBOL(sk_stream_kill_queues); 208EXPORT_SYMBOL(sk_stream_kill_queues);
diff --git a/net/core/timestamping.c b/net/core/timestamping.c
new file mode 100644
index 000000000000..0ae6c22da85b
--- /dev/null
+++ b/net/core/timestamping.c
@@ -0,0 +1,126 @@
1/*
2 * PTP 1588 clock support - support for timestamping in PHY devices
3 *
4 * Copyright (C) 2010 OMICRON electronics GmbH
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20#include <linux/errqueue.h>
21#include <linux/phy.h>
22#include <linux/ptp_classify.h>
23#include <linux/skbuff.h>
24
25static struct sock_filter ptp_filter[] = {
26 PTP_FILTER
27};
28
29static unsigned int classify(struct sk_buff *skb)
30{
31 if (likely(skb->dev &&
32 skb->dev->phydev &&
33 skb->dev->phydev->drv))
34 return sk_run_filter(skb, ptp_filter, ARRAY_SIZE(ptp_filter));
35 else
36 return PTP_CLASS_NONE;
37}
38
39void skb_clone_tx_timestamp(struct sk_buff *skb)
40{
41 struct phy_device *phydev;
42 struct sk_buff *clone;
43 struct sock *sk = skb->sk;
44 unsigned int type;
45
46 if (!sk)
47 return;
48
49 type = classify(skb);
50
51 switch (type) {
52 case PTP_CLASS_V1_IPV4:
53 case PTP_CLASS_V1_IPV6:
54 case PTP_CLASS_V2_IPV4:
55 case PTP_CLASS_V2_IPV6:
56 case PTP_CLASS_V2_L2:
57 case PTP_CLASS_V2_VLAN:
58 phydev = skb->dev->phydev;
59 if (likely(phydev->drv->txtstamp)) {
60 clone = skb_clone(skb, GFP_ATOMIC);
61 if (!clone)
62 return;
63 clone->sk = sk;
64 phydev->drv->txtstamp(phydev, clone, type);
65 }
66 break;
67 default:
68 break;
69 }
70}
71
72void skb_complete_tx_timestamp(struct sk_buff *skb,
73 struct skb_shared_hwtstamps *hwtstamps)
74{
75 struct sock *sk = skb->sk;
76 struct sock_exterr_skb *serr;
77 int err;
78
79 if (!hwtstamps)
80 return;
81
82 *skb_hwtstamps(skb) = *hwtstamps;
83 serr = SKB_EXT_ERR(skb);
84 memset(serr, 0, sizeof(*serr));
85 serr->ee.ee_errno = ENOMSG;
86 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
87 skb->sk = NULL;
88 err = sock_queue_err_skb(sk, skb);
89 if (err)
90 kfree_skb(skb);
91}
92EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp);
93
94bool skb_defer_rx_timestamp(struct sk_buff *skb)
95{
96 struct phy_device *phydev;
97 unsigned int type;
98
99 skb_push(skb, ETH_HLEN);
100
101 type = classify(skb);
102
103 skb_pull(skb, ETH_HLEN);
104
105 switch (type) {
106 case PTP_CLASS_V1_IPV4:
107 case PTP_CLASS_V1_IPV6:
108 case PTP_CLASS_V2_IPV4:
109 case PTP_CLASS_V2_IPV6:
110 case PTP_CLASS_V2_L2:
111 case PTP_CLASS_V2_VLAN:
112 phydev = skb->dev->phydev;
113 if (likely(phydev->drv->rxtstamp))
114 return phydev->drv->rxtstamp(phydev, skb, type);
115 break;
116 default:
117 break;
118 }
119
120 return false;
121}
122
123void __init skb_timestamping_init(void)
124{
125 BUG_ON(sk_chk_filter(ptp_filter, ARRAY_SIZE(ptp_filter)));
126}
diff --git a/net/core/utils.c b/net/core/utils.c
index 838250241d26..f41854470539 100644
--- a/net/core/utils.c
+++ b/net/core/utils.c
@@ -77,7 +77,6 @@ __be32 in_aton(const char *str)
77 } 77 }
78 return(htonl(l)); 78 return(htonl(l));
79} 79}
80
81EXPORT_SYMBOL(in_aton); 80EXPORT_SYMBOL(in_aton);
82 81
83#define IN6PTON_XDIGIT 0x00010000 82#define IN6PTON_XDIGIT 0x00010000
@@ -162,7 +161,6 @@ out:
162 *end = s; 161 *end = s;
163 return ret; 162 return ret;
164} 163}
165
166EXPORT_SYMBOL(in4_pton); 164EXPORT_SYMBOL(in4_pton);
167 165
168int in6_pton(const char *src, int srclen, 166int in6_pton(const char *src, int srclen,
@@ -280,7 +278,6 @@ out:
280 *end = s; 278 *end = s;
281 return ret; 279 return ret;
282} 280}
283
284EXPORT_SYMBOL(in6_pton); 281EXPORT_SYMBOL(in6_pton);
285 282
286void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb, 283void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb,
diff --git a/net/dccp/ackvec.c b/net/dccp/ackvec.c
index 01e4d39fa232..92a6fcb40d7d 100644
--- a/net/dccp/ackvec.c
+++ b/net/dccp/ackvec.c
@@ -82,7 +82,7 @@ int dccp_insert_option_ackvec(struct sock *sk, struct sk_buff *skb)
82 elapsed_time = delta / 10; 82 elapsed_time = delta / 10;
83 83
84 if (elapsed_time != 0 && 84 if (elapsed_time != 0 &&
85 dccp_insert_option_elapsed_time(sk, skb, elapsed_time)) 85 dccp_insert_option_elapsed_time(skb, elapsed_time))
86 return -1; 86 return -1;
87 87
88 avr = dccp_ackvec_record_new(); 88 avr = dccp_ackvec_record_new();
@@ -201,7 +201,7 @@ static inline int dccp_ackvec_set_buf_head_state(struct dccp_ackvec *av,
201 const unsigned int packets, 201 const unsigned int packets,
202 const unsigned char state) 202 const unsigned char state)
203{ 203{
204 unsigned int gap; 204 long gap;
205 long new_head; 205 long new_head;
206 206
207 if (av->av_vec_len + packets > DCCP_MAX_ACKVEC_LEN) 207 if (av->av_vec_len + packets > DCCP_MAX_ACKVEC_LEN)
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c
index d3235899c7e3..95f752986497 100644
--- a/net/dccp/ccids/ccid3.c
+++ b/net/dccp/ccids/ccid3.c
@@ -715,9 +715,9 @@ static int ccid3_hc_rx_insert_options(struct sock *sk, struct sk_buff *skb)
715 x_recv = htonl(hc->rx_x_recv); 715 x_recv = htonl(hc->rx_x_recv);
716 pinv = htonl(hc->rx_pinv); 716 pinv = htonl(hc->rx_pinv);
717 717
718 if (dccp_insert_option(sk, skb, TFRC_OPT_LOSS_EVENT_RATE, 718 if (dccp_insert_option(skb, TFRC_OPT_LOSS_EVENT_RATE,
719 &pinv, sizeof(pinv)) || 719 &pinv, sizeof(pinv)) ||
720 dccp_insert_option(sk, skb, TFRC_OPT_RECEIVE_RATE, 720 dccp_insert_option(skb, TFRC_OPT_RECEIVE_RATE,
721 &x_recv, sizeof(x_recv))) 721 &x_recv, sizeof(x_recv)))
722 return -1; 722 return -1;
723 723
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
index a10a61a1ded2..3ccef1b70fee 100644
--- a/net/dccp/dccp.h
+++ b/net/dccp/dccp.h
@@ -446,16 +446,12 @@ extern void dccp_feat_list_purge(struct list_head *fn_list);
446 446
447extern int dccp_insert_options(struct sock *sk, struct sk_buff *skb); 447extern int dccp_insert_options(struct sock *sk, struct sk_buff *skb);
448extern int dccp_insert_options_rsk(struct dccp_request_sock*, struct sk_buff*); 448extern int dccp_insert_options_rsk(struct dccp_request_sock*, struct sk_buff*);
449extern int dccp_insert_option_elapsed_time(struct sock *sk, 449extern int dccp_insert_option_elapsed_time(struct sk_buff *skb, u32 elapsed);
450 struct sk_buff *skb,
451 u32 elapsed_time);
452extern u32 dccp_timestamp(void); 450extern u32 dccp_timestamp(void);
453extern void dccp_timestamping_init(void); 451extern void dccp_timestamping_init(void);
454extern int dccp_insert_option_timestamp(struct sock *sk, 452extern int dccp_insert_option_timestamp(struct sk_buff *skb);
455 struct sk_buff *skb); 453extern int dccp_insert_option(struct sk_buff *skb, unsigned char option,
456extern int dccp_insert_option(struct sock *sk, struct sk_buff *skb, 454 const void *value, unsigned char len);
457 unsigned char option,
458 const void *value, unsigned char len);
459 455
460#ifdef CONFIG_SYSCTL 456#ifdef CONFIG_SYSCTL
461extern int dccp_sysctl_init(void); 457extern int dccp_sysctl_init(void);
diff --git a/net/dccp/input.c b/net/dccp/input.c
index 6beb6a7d6fba..10c957a88f4f 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -430,7 +430,7 @@ static int dccp_rcv_request_sent_state_process(struct sock *sk,
430 if (dccp_parse_options(sk, NULL, skb)) 430 if (dccp_parse_options(sk, NULL, skb))
431 return 1; 431 return 1;
432 432
433 /* Obtain usec RTT sample from SYN exchange (used by CCID 3) */ 433 /* Obtain usec RTT sample from SYN exchange (used by TFRC). */
434 if (likely(dp->dccps_options_received.dccpor_timestamp_echo)) 434 if (likely(dp->dccps_options_received.dccpor_timestamp_echo))
435 dp->dccps_syn_rtt = dccp_sample_rtt(sk, 10 * (tstamp - 435 dp->dccps_syn_rtt = dccp_sample_rtt(sk, 10 * (tstamp -
436 dp->dccps_options_received.dccpor_timestamp_echo)); 436 dp->dccps_options_received.dccpor_timestamp_echo));
@@ -535,6 +535,8 @@ static int dccp_rcv_respond_partopen_state_process(struct sock *sk,
535 const struct dccp_hdr *dh, 535 const struct dccp_hdr *dh,
536 const unsigned len) 536 const unsigned len)
537{ 537{
538 struct dccp_sock *dp = dccp_sk(sk);
539 u32 sample = dp->dccps_options_received.dccpor_timestamp_echo;
538 int queued = 0; 540 int queued = 0;
539 541
540 switch (dh->dccph_type) { 542 switch (dh->dccph_type) {
@@ -559,7 +561,14 @@ static int dccp_rcv_respond_partopen_state_process(struct sock *sk,
559 if (sk->sk_state == DCCP_PARTOPEN) 561 if (sk->sk_state == DCCP_PARTOPEN)
560 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); 562 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
561 563
562 dccp_sk(sk)->dccps_osr = DCCP_SKB_CB(skb)->dccpd_seq; 564 /* Obtain usec RTT sample from SYN exchange (used by TFRC). */
565 if (likely(sample)) {
566 long delta = dccp_timestamp() - sample;
567
568 dp->dccps_syn_rtt = dccp_sample_rtt(sk, 10 * delta);
569 }
570
571 dp->dccps_osr = DCCP_SKB_CB(skb)->dccpd_seq;
563 dccp_set_state(sk, DCCP_OPEN); 572 dccp_set_state(sk, DCCP_OPEN);
564 573
565 if (dh->dccph_type == DCCP_PKT_DATAACK || 574 if (dh->dccph_type == DCCP_PKT_DATAACK ||
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index d9b11ef8694c..d4a166f0f391 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -105,7 +105,7 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
105 goto failure; 105 goto failure;
106 106
107 /* OK, now commit destination to socket. */ 107 /* OK, now commit destination to socket. */
108 sk_setup_caps(sk, &rt->u.dst); 108 sk_setup_caps(sk, &rt->dst);
109 109
110 dp->dccps_iss = secure_dccp_sequence_number(inet->inet_saddr, 110 dp->dccps_iss = secure_dccp_sequence_number(inet->inet_saddr,
111 inet->inet_daddr, 111 inet->inet_daddr,
@@ -475,7 +475,7 @@ static struct dst_entry* dccp_v4_route_skb(struct net *net, struct sock *sk,
475 return NULL; 475 return NULL;
476 } 476 }
477 477
478 return &rt->u.dst; 478 return &rt->dst;
479} 479}
480 480
481static int dccp_v4_send_response(struct sock *sk, struct request_sock *req, 481static int dccp_v4_send_response(struct sock *sk, struct request_sock *req,
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 091698899594..6e3f32575df7 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -248,7 +248,7 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req,
248 struct ipv6_pinfo *np = inet6_sk(sk); 248 struct ipv6_pinfo *np = inet6_sk(sk);
249 struct sk_buff *skb; 249 struct sk_buff *skb;
250 struct ipv6_txoptions *opt = NULL; 250 struct ipv6_txoptions *opt = NULL;
251 struct in6_addr *final_p = NULL, final; 251 struct in6_addr *final_p, final;
252 struct flowi fl; 252 struct flowi fl;
253 int err = -1; 253 int err = -1;
254 struct dst_entry *dst; 254 struct dst_entry *dst;
@@ -265,13 +265,7 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req,
265 265
266 opt = np->opt; 266 opt = np->opt;
267 267
268 if (opt != NULL && opt->srcrt != NULL) { 268 final_p = fl6_update_dst(&fl, opt, &final);
269 const struct rt0_hdr *rt0 = (struct rt0_hdr *)opt->srcrt;
270
271 ipv6_addr_copy(&final, &fl.fl6_dst);
272 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
273 final_p = &final;
274 }
275 269
276 err = ip6_dst_lookup(sk, &dst, &fl); 270 err = ip6_dst_lookup(sk, &dst, &fl);
277 if (err) 271 if (err)
@@ -545,19 +539,13 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
545 goto out_overflow; 539 goto out_overflow;
546 540
547 if (dst == NULL) { 541 if (dst == NULL) {
548 struct in6_addr *final_p = NULL, final; 542 struct in6_addr *final_p, final;
549 struct flowi fl; 543 struct flowi fl;
550 544
551 memset(&fl, 0, sizeof(fl)); 545 memset(&fl, 0, sizeof(fl));
552 fl.proto = IPPROTO_DCCP; 546 fl.proto = IPPROTO_DCCP;
553 ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr); 547 ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr);
554 if (opt != NULL && opt->srcrt != NULL) { 548 final_p = fl6_update_dst(&fl, opt, &final);
555 const struct rt0_hdr *rt0 = (struct rt0_hdr *)opt->srcrt;
556
557 ipv6_addr_copy(&final, &fl.fl6_dst);
558 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
559 final_p = &final;
560 }
561 ipv6_addr_copy(&fl.fl6_src, &ireq6->loc_addr); 549 ipv6_addr_copy(&fl.fl6_src, &ireq6->loc_addr);
562 fl.oif = sk->sk_bound_dev_if; 550 fl.oif = sk->sk_bound_dev_if;
563 fl.fl_ip_dport = inet_rsk(req)->rmt_port; 551 fl.fl_ip_dport = inet_rsk(req)->rmt_port;
@@ -885,7 +873,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
885 struct inet_sock *inet = inet_sk(sk); 873 struct inet_sock *inet = inet_sk(sk);
886 struct ipv6_pinfo *np = inet6_sk(sk); 874 struct ipv6_pinfo *np = inet6_sk(sk);
887 struct dccp_sock *dp = dccp_sk(sk); 875 struct dccp_sock *dp = dccp_sk(sk);
888 struct in6_addr *saddr = NULL, *final_p = NULL, final; 876 struct in6_addr *saddr = NULL, *final_p, final;
889 struct flowi fl; 877 struct flowi fl;
890 struct dst_entry *dst; 878 struct dst_entry *dst;
891 int addr_type; 879 int addr_type;
@@ -988,13 +976,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
988 fl.fl_ip_sport = inet->inet_sport; 976 fl.fl_ip_sport = inet->inet_sport;
989 security_sk_classify_flow(sk, &fl); 977 security_sk_classify_flow(sk, &fl);
990 978
991 if (np->opt != NULL && np->opt->srcrt != NULL) { 979 final_p = fl6_update_dst(&fl, np->opt, &final);
992 const struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt;
993
994 ipv6_addr_copy(&final, &fl.fl6_dst);
995 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
996 final_p = &final;
997 }
998 980
999 err = ip6_dst_lookup(sk, &dst, &fl); 981 err = ip6_dst_lookup(sk, &dst, &fl);
1000 if (err) 982 if (err)
diff --git a/net/dccp/options.c b/net/dccp/options.c
index 07395f861d35..bfda087bd90d 100644
--- a/net/dccp/options.c
+++ b/net/dccp/options.c
@@ -299,9 +299,8 @@ static inline u8 dccp_ndp_len(const u64 ndp)
299 return likely(ndp <= USHRT_MAX) ? 2 : (ndp <= UINT_MAX ? 4 : 6); 299 return likely(ndp <= USHRT_MAX) ? 2 : (ndp <= UINT_MAX ? 4 : 6);
300} 300}
301 301
302int dccp_insert_option(struct sock *sk, struct sk_buff *skb, 302int dccp_insert_option(struct sk_buff *skb, const unsigned char option,
303 const unsigned char option, 303 const void *value, const unsigned char len)
304 const void *value, const unsigned char len)
305{ 304{
306 unsigned char *to; 305 unsigned char *to;
307 306
@@ -354,8 +353,7 @@ static inline int dccp_elapsed_time_len(const u32 elapsed_time)
354 return elapsed_time == 0 ? 0 : elapsed_time <= 0xFFFF ? 2 : 4; 353 return elapsed_time == 0 ? 0 : elapsed_time <= 0xFFFF ? 2 : 4;
355} 354}
356 355
357int dccp_insert_option_elapsed_time(struct sock *sk, struct sk_buff *skb, 356int dccp_insert_option_elapsed_time(struct sk_buff *skb, u32 elapsed_time)
358 u32 elapsed_time)
359{ 357{
360 const int elapsed_time_len = dccp_elapsed_time_len(elapsed_time); 358 const int elapsed_time_len = dccp_elapsed_time_len(elapsed_time);
361 const int len = 2 + elapsed_time_len; 359 const int len = 2 + elapsed_time_len;
@@ -386,13 +384,13 @@ int dccp_insert_option_elapsed_time(struct sock *sk, struct sk_buff *skb,
386 384
387EXPORT_SYMBOL_GPL(dccp_insert_option_elapsed_time); 385EXPORT_SYMBOL_GPL(dccp_insert_option_elapsed_time);
388 386
389int dccp_insert_option_timestamp(struct sock *sk, struct sk_buff *skb) 387int dccp_insert_option_timestamp(struct sk_buff *skb)
390{ 388{
391 __be32 now = htonl(dccp_timestamp()); 389 __be32 now = htonl(dccp_timestamp());
392 /* yes this will overflow but that is the point as we want a 390 /* yes this will overflow but that is the point as we want a
393 * 10 usec 32 bit timer which mean it wraps every 11.9 hours */ 391 * 10 usec 32 bit timer which mean it wraps every 11.9 hours */
394 392
395 return dccp_insert_option(sk, skb, DCCPO_TIMESTAMP, &now, sizeof(now)); 393 return dccp_insert_option(skb, DCCPO_TIMESTAMP, &now, sizeof(now));
396} 394}
397 395
398EXPORT_SYMBOL_GPL(dccp_insert_option_timestamp); 396EXPORT_SYMBOL_GPL(dccp_insert_option_timestamp);
@@ -531,9 +529,9 @@ int dccp_insert_options(struct sock *sk, struct sk_buff *skb)
531 if (DCCP_SKB_CB(skb)->dccpd_type == DCCP_PKT_REQUEST) { 529 if (DCCP_SKB_CB(skb)->dccpd_type == DCCP_PKT_REQUEST) {
532 /* 530 /*
533 * Obtain RTT sample from Request/Response exchange. 531 * Obtain RTT sample from Request/Response exchange.
534 * This is currently used in CCID 3 initialisation. 532 * This is currently used for TFRC initialisation.
535 */ 533 */
536 if (dccp_insert_option_timestamp(sk, skb)) 534 if (dccp_insert_option_timestamp(skb))
537 return -1; 535 return -1;
538 536
539 } else if (dp->dccps_hc_rx_ackvec != NULL && 537 } else if (dp->dccps_hc_rx_ackvec != NULL &&
@@ -564,6 +562,10 @@ int dccp_insert_options_rsk(struct dccp_request_sock *dreq, struct sk_buff *skb)
564 if (dccp_feat_insert_opts(NULL, dreq, skb)) 562 if (dccp_feat_insert_opts(NULL, dreq, skb))
565 return -1; 563 return -1;
566 564
565 /* Obtain RTT sample from Response/Ack exchange (used by TFRC). */
566 if (dccp_insert_option_timestamp(skb))
567 return -1;
568
567 if (dreq->dreq_timestamp_echo != 0 && 569 if (dreq->dreq_timestamp_echo != 0 &&
568 dccp_insert_option_timestamp_echo(NULL, dreq, skb)) 570 dccp_insert_option_timestamp_echo(NULL, dreq, skb))
569 return -1; 571 return -1;
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index b03ecf6b2bb0..096250d1323b 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -473,14 +473,9 @@ static int dccp_setsockopt_ccid(struct sock *sk, int type,
473 if (optlen < 1 || optlen > DCCP_FEAT_MAX_SP_VALS) 473 if (optlen < 1 || optlen > DCCP_FEAT_MAX_SP_VALS)
474 return -EINVAL; 474 return -EINVAL;
475 475
476 val = kmalloc(optlen, GFP_KERNEL); 476 val = memdup_user(optval, optlen);
477 if (val == NULL) 477 if (IS_ERR(val))
478 return -ENOMEM; 478 return PTR_ERR(val);
479
480 if (copy_from_user(val, optval, optlen)) {
481 kfree(val);
482 return -EFAULT;
483 }
484 479
485 lock_sock(sk); 480 lock_sock(sk);
486 if (type == DCCP_SOCKOPT_TX_CCID || type == DCCP_SOCKOPT_CCID) 481 if (type == DCCP_SOCKOPT_TX_CCID || type == DCCP_SOCKOPT_CCID)
@@ -1007,7 +1002,8 @@ EXPORT_SYMBOL_GPL(dccp_shutdown);
1007static inline int dccp_mib_init(void) 1002static inline int dccp_mib_init(void)
1008{ 1003{
1009 return snmp_mib_init((void __percpu **)dccp_statistics, 1004 return snmp_mib_init((void __percpu **)dccp_statistics,
1010 sizeof(struct dccp_mib)); 1005 sizeof(struct dccp_mib),
1006 __alignof__(struct dccp_mib));
1011} 1007}
1012 1008
1013static inline void dccp_mib_exit(void) 1009static inline void dccp_mib_exit(void)
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 812e6dff6067..6585ea6d1182 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -146,13 +146,13 @@ static __inline__ unsigned dn_hash(__le16 src, __le16 dst)
146 146
147static inline void dnrt_free(struct dn_route *rt) 147static inline void dnrt_free(struct dn_route *rt)
148{ 148{
149 call_rcu_bh(&rt->u.dst.rcu_head, dst_rcu_free); 149 call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
150} 150}
151 151
152static inline void dnrt_drop(struct dn_route *rt) 152static inline void dnrt_drop(struct dn_route *rt)
153{ 153{
154 dst_release(&rt->u.dst); 154 dst_release(&rt->dst);
155 call_rcu_bh(&rt->u.dst.rcu_head, dst_rcu_free); 155 call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
156} 156}
157 157
158static void dn_dst_check_expire(unsigned long dummy) 158static void dn_dst_check_expire(unsigned long dummy)
@@ -167,13 +167,13 @@ static void dn_dst_check_expire(unsigned long dummy)
167 167
168 spin_lock(&dn_rt_hash_table[i].lock); 168 spin_lock(&dn_rt_hash_table[i].lock);
169 while((rt=*rtp) != NULL) { 169 while((rt=*rtp) != NULL) {
170 if (atomic_read(&rt->u.dst.__refcnt) || 170 if (atomic_read(&rt->dst.__refcnt) ||
171 (now - rt->u.dst.lastuse) < expire) { 171 (now - rt->dst.lastuse) < expire) {
172 rtp = &rt->u.dst.dn_next; 172 rtp = &rt->dst.dn_next;
173 continue; 173 continue;
174 } 174 }
175 *rtp = rt->u.dst.dn_next; 175 *rtp = rt->dst.dn_next;
176 rt->u.dst.dn_next = NULL; 176 rt->dst.dn_next = NULL;
177 dnrt_free(rt); 177 dnrt_free(rt);
178 } 178 }
179 spin_unlock(&dn_rt_hash_table[i].lock); 179 spin_unlock(&dn_rt_hash_table[i].lock);
@@ -198,13 +198,13 @@ static int dn_dst_gc(struct dst_ops *ops)
198 rtp = &dn_rt_hash_table[i].chain; 198 rtp = &dn_rt_hash_table[i].chain;
199 199
200 while((rt=*rtp) != NULL) { 200 while((rt=*rtp) != NULL) {
201 if (atomic_read(&rt->u.dst.__refcnt) || 201 if (atomic_read(&rt->dst.__refcnt) ||
202 (now - rt->u.dst.lastuse) < expire) { 202 (now - rt->dst.lastuse) < expire) {
203 rtp = &rt->u.dst.dn_next; 203 rtp = &rt->dst.dn_next;
204 continue; 204 continue;
205 } 205 }
206 *rtp = rt->u.dst.dn_next; 206 *rtp = rt->dst.dn_next;
207 rt->u.dst.dn_next = NULL; 207 rt->dst.dn_next = NULL;
208 dnrt_drop(rt); 208 dnrt_drop(rt);
209 break; 209 break;
210 } 210 }
@@ -287,25 +287,25 @@ static int dn_insert_route(struct dn_route *rt, unsigned hash, struct dn_route *
287 while((rth = *rthp) != NULL) { 287 while((rth = *rthp) != NULL) {
288 if (compare_keys(&rth->fl, &rt->fl)) { 288 if (compare_keys(&rth->fl, &rt->fl)) {
289 /* Put it first */ 289 /* Put it first */
290 *rthp = rth->u.dst.dn_next; 290 *rthp = rth->dst.dn_next;
291 rcu_assign_pointer(rth->u.dst.dn_next, 291 rcu_assign_pointer(rth->dst.dn_next,
292 dn_rt_hash_table[hash].chain); 292 dn_rt_hash_table[hash].chain);
293 rcu_assign_pointer(dn_rt_hash_table[hash].chain, rth); 293 rcu_assign_pointer(dn_rt_hash_table[hash].chain, rth);
294 294
295 dst_use(&rth->u.dst, now); 295 dst_use(&rth->dst, now);
296 spin_unlock_bh(&dn_rt_hash_table[hash].lock); 296 spin_unlock_bh(&dn_rt_hash_table[hash].lock);
297 297
298 dnrt_drop(rt); 298 dnrt_drop(rt);
299 *rp = rth; 299 *rp = rth;
300 return 0; 300 return 0;
301 } 301 }
302 rthp = &rth->u.dst.dn_next; 302 rthp = &rth->dst.dn_next;
303 } 303 }
304 304
305 rcu_assign_pointer(rt->u.dst.dn_next, dn_rt_hash_table[hash].chain); 305 rcu_assign_pointer(rt->dst.dn_next, dn_rt_hash_table[hash].chain);
306 rcu_assign_pointer(dn_rt_hash_table[hash].chain, rt); 306 rcu_assign_pointer(dn_rt_hash_table[hash].chain, rt);
307 307
308 dst_use(&rt->u.dst, now); 308 dst_use(&rt->dst, now);
309 spin_unlock_bh(&dn_rt_hash_table[hash].lock); 309 spin_unlock_bh(&dn_rt_hash_table[hash].lock);
310 *rp = rt; 310 *rp = rt;
311 return 0; 311 return 0;
@@ -323,8 +323,8 @@ static void dn_run_flush(unsigned long dummy)
323 goto nothing_to_declare; 323 goto nothing_to_declare;
324 324
325 for(; rt; rt=next) { 325 for(; rt; rt=next) {
326 next = rt->u.dst.dn_next; 326 next = rt->dst.dn_next;
327 rt->u.dst.dn_next = NULL; 327 rt->dst.dn_next = NULL;
328 dst_free((struct dst_entry *)rt); 328 dst_free((struct dst_entry *)rt);
329 } 329 }
330 330
@@ -743,7 +743,7 @@ static int dn_forward(struct sk_buff *skb)
743 /* Ensure that we have enough space for headers */ 743 /* Ensure that we have enough space for headers */
744 rt = (struct dn_route *)skb_dst(skb); 744 rt = (struct dn_route *)skb_dst(skb);
745 header_len = dn_db->use_long ? 21 : 6; 745 header_len = dn_db->use_long ? 21 : 6;
746 if (skb_cow(skb, LL_RESERVED_SPACE(rt->u.dst.dev)+header_len)) 746 if (skb_cow(skb, LL_RESERVED_SPACE(rt->dst.dev)+header_len))
747 goto drop; 747 goto drop;
748 748
749 /* 749 /*
@@ -752,7 +752,7 @@ static int dn_forward(struct sk_buff *skb)
752 if (++cb->hops > 30) 752 if (++cb->hops > 30)
753 goto drop; 753 goto drop;
754 754
755 skb->dev = rt->u.dst.dev; 755 skb->dev = rt->dst.dev;
756 756
757 /* 757 /*
758 * If packet goes out same interface it came in on, then set 758 * If packet goes out same interface it came in on, then set
@@ -792,7 +792,7 @@ static int dn_rt_bug(struct sk_buff *skb)
792static int dn_rt_set_next_hop(struct dn_route *rt, struct dn_fib_res *res) 792static int dn_rt_set_next_hop(struct dn_route *rt, struct dn_fib_res *res)
793{ 793{
794 struct dn_fib_info *fi = res->fi; 794 struct dn_fib_info *fi = res->fi;
795 struct net_device *dev = rt->u.dst.dev; 795 struct net_device *dev = rt->dst.dev;
796 struct neighbour *n; 796 struct neighbour *n;
797 unsigned mss; 797 unsigned mss;
798 798
@@ -800,25 +800,25 @@ static int dn_rt_set_next_hop(struct dn_route *rt, struct dn_fib_res *res)
800 if (DN_FIB_RES_GW(*res) && 800 if (DN_FIB_RES_GW(*res) &&
801 DN_FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK) 801 DN_FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)
802 rt->rt_gateway = DN_FIB_RES_GW(*res); 802 rt->rt_gateway = DN_FIB_RES_GW(*res);
803 memcpy(rt->u.dst.metrics, fi->fib_metrics, 803 memcpy(rt->dst.metrics, fi->fib_metrics,
804 sizeof(rt->u.dst.metrics)); 804 sizeof(rt->dst.metrics));
805 } 805 }
806 rt->rt_type = res->type; 806 rt->rt_type = res->type;
807 807
808 if (dev != NULL && rt->u.dst.neighbour == NULL) { 808 if (dev != NULL && rt->dst.neighbour == NULL) {
809 n = __neigh_lookup_errno(&dn_neigh_table, &rt->rt_gateway, dev); 809 n = __neigh_lookup_errno(&dn_neigh_table, &rt->rt_gateway, dev);
810 if (IS_ERR(n)) 810 if (IS_ERR(n))
811 return PTR_ERR(n); 811 return PTR_ERR(n);
812 rt->u.dst.neighbour = n; 812 rt->dst.neighbour = n;
813 } 813 }
814 814
815 if (dst_metric(&rt->u.dst, RTAX_MTU) == 0 || 815 if (dst_metric(&rt->dst, RTAX_MTU) == 0 ||
816 dst_metric(&rt->u.dst, RTAX_MTU) > rt->u.dst.dev->mtu) 816 dst_metric(&rt->dst, RTAX_MTU) > rt->dst.dev->mtu)
817 rt->u.dst.metrics[RTAX_MTU-1] = rt->u.dst.dev->mtu; 817 rt->dst.metrics[RTAX_MTU-1] = rt->dst.dev->mtu;
818 mss = dn_mss_from_pmtu(dev, dst_mtu(&rt->u.dst)); 818 mss = dn_mss_from_pmtu(dev, dst_mtu(&rt->dst));
819 if (dst_metric(&rt->u.dst, RTAX_ADVMSS) == 0 || 819 if (dst_metric(&rt->dst, RTAX_ADVMSS) == 0 ||
820 dst_metric(&rt->u.dst, RTAX_ADVMSS) > mss) 820 dst_metric(&rt->dst, RTAX_ADVMSS) > mss)
821 rt->u.dst.metrics[RTAX_ADVMSS-1] = mss; 821 rt->dst.metrics[RTAX_ADVMSS-1] = mss;
822 return 0; 822 return 0;
823} 823}
824 824
@@ -1096,8 +1096,8 @@ make_route:
1096 if (rt == NULL) 1096 if (rt == NULL)
1097 goto e_nobufs; 1097 goto e_nobufs;
1098 1098
1099 atomic_set(&rt->u.dst.__refcnt, 1); 1099 atomic_set(&rt->dst.__refcnt, 1);
1100 rt->u.dst.flags = DST_HOST; 1100 rt->dst.flags = DST_HOST;
1101 1101
1102 rt->fl.fld_src = oldflp->fld_src; 1102 rt->fl.fld_src = oldflp->fld_src;
1103 rt->fl.fld_dst = oldflp->fld_dst; 1103 rt->fl.fld_dst = oldflp->fld_dst;
@@ -1113,17 +1113,17 @@ make_route:
1113 rt->rt_dst_map = fl.fld_dst; 1113 rt->rt_dst_map = fl.fld_dst;
1114 rt->rt_src_map = fl.fld_src; 1114 rt->rt_src_map = fl.fld_src;
1115 1115
1116 rt->u.dst.dev = dev_out; 1116 rt->dst.dev = dev_out;
1117 dev_hold(dev_out); 1117 dev_hold(dev_out);
1118 rt->u.dst.neighbour = neigh; 1118 rt->dst.neighbour = neigh;
1119 neigh = NULL; 1119 neigh = NULL;
1120 1120
1121 rt->u.dst.lastuse = jiffies; 1121 rt->dst.lastuse = jiffies;
1122 rt->u.dst.output = dn_output; 1122 rt->dst.output = dn_output;
1123 rt->u.dst.input = dn_rt_bug; 1123 rt->dst.input = dn_rt_bug;
1124 rt->rt_flags = flags; 1124 rt->rt_flags = flags;
1125 if (flags & RTCF_LOCAL) 1125 if (flags & RTCF_LOCAL)
1126 rt->u.dst.input = dn_nsp_rx; 1126 rt->dst.input = dn_nsp_rx;
1127 1127
1128 err = dn_rt_set_next_hop(rt, &res); 1128 err = dn_rt_set_next_hop(rt, &res);
1129 if (err) 1129 if (err)
@@ -1152,7 +1152,7 @@ e_nobufs:
1152 err = -ENOBUFS; 1152 err = -ENOBUFS;
1153 goto done; 1153 goto done;
1154e_neighbour: 1154e_neighbour:
1155 dst_free(&rt->u.dst); 1155 dst_free(&rt->dst);
1156 goto e_nobufs; 1156 goto e_nobufs;
1157} 1157}
1158 1158
@@ -1168,15 +1168,15 @@ static int __dn_route_output_key(struct dst_entry **pprt, const struct flowi *fl
1168 if (!(flags & MSG_TRYHARD)) { 1168 if (!(flags & MSG_TRYHARD)) {
1169 rcu_read_lock_bh(); 1169 rcu_read_lock_bh();
1170 for (rt = rcu_dereference_bh(dn_rt_hash_table[hash].chain); rt; 1170 for (rt = rcu_dereference_bh(dn_rt_hash_table[hash].chain); rt;
1171 rt = rcu_dereference_bh(rt->u.dst.dn_next)) { 1171 rt = rcu_dereference_bh(rt->dst.dn_next)) {
1172 if ((flp->fld_dst == rt->fl.fld_dst) && 1172 if ((flp->fld_dst == rt->fl.fld_dst) &&
1173 (flp->fld_src == rt->fl.fld_src) && 1173 (flp->fld_src == rt->fl.fld_src) &&
1174 (flp->mark == rt->fl.mark) && 1174 (flp->mark == rt->fl.mark) &&
1175 (rt->fl.iif == 0) && 1175 (rt->fl.iif == 0) &&
1176 (rt->fl.oif == flp->oif)) { 1176 (rt->fl.oif == flp->oif)) {
1177 dst_use(&rt->u.dst, jiffies); 1177 dst_use(&rt->dst, jiffies);
1178 rcu_read_unlock_bh(); 1178 rcu_read_unlock_bh();
1179 *pprt = &rt->u.dst; 1179 *pprt = &rt->dst;
1180 return 0; 1180 return 0;
1181 } 1181 }
1182 } 1182 }
@@ -1375,29 +1375,29 @@ make_route:
1375 rt->fl.iif = in_dev->ifindex; 1375 rt->fl.iif = in_dev->ifindex;
1376 rt->fl.mark = fl.mark; 1376 rt->fl.mark = fl.mark;
1377 1377
1378 rt->u.dst.flags = DST_HOST; 1378 rt->dst.flags = DST_HOST;
1379 rt->u.dst.neighbour = neigh; 1379 rt->dst.neighbour = neigh;
1380 rt->u.dst.dev = out_dev; 1380 rt->dst.dev = out_dev;
1381 rt->u.dst.lastuse = jiffies; 1381 rt->dst.lastuse = jiffies;
1382 rt->u.dst.output = dn_rt_bug; 1382 rt->dst.output = dn_rt_bug;
1383 switch(res.type) { 1383 switch(res.type) {
1384 case RTN_UNICAST: 1384 case RTN_UNICAST:
1385 rt->u.dst.input = dn_forward; 1385 rt->dst.input = dn_forward;
1386 break; 1386 break;
1387 case RTN_LOCAL: 1387 case RTN_LOCAL:
1388 rt->u.dst.output = dn_output; 1388 rt->dst.output = dn_output;
1389 rt->u.dst.input = dn_nsp_rx; 1389 rt->dst.input = dn_nsp_rx;
1390 rt->u.dst.dev = in_dev; 1390 rt->dst.dev = in_dev;
1391 flags |= RTCF_LOCAL; 1391 flags |= RTCF_LOCAL;
1392 break; 1392 break;
1393 default: 1393 default:
1394 case RTN_UNREACHABLE: 1394 case RTN_UNREACHABLE:
1395 case RTN_BLACKHOLE: 1395 case RTN_BLACKHOLE:
1396 rt->u.dst.input = dst_discard; 1396 rt->dst.input = dst_discard;
1397 } 1397 }
1398 rt->rt_flags = flags; 1398 rt->rt_flags = flags;
1399 if (rt->u.dst.dev) 1399 if (rt->dst.dev)
1400 dev_hold(rt->u.dst.dev); 1400 dev_hold(rt->dst.dev);
1401 1401
1402 err = dn_rt_set_next_hop(rt, &res); 1402 err = dn_rt_set_next_hop(rt, &res);
1403 if (err) 1403 if (err)
@@ -1405,7 +1405,7 @@ make_route:
1405 1405
1406 hash = dn_hash(rt->fl.fld_src, rt->fl.fld_dst); 1406 hash = dn_hash(rt->fl.fld_src, rt->fl.fld_dst);
1407 dn_insert_route(rt, hash, &rt); 1407 dn_insert_route(rt, hash, &rt);
1408 skb_dst_set(skb, &rt->u.dst); 1408 skb_dst_set(skb, &rt->dst);
1409 1409
1410done: 1410done:
1411 if (neigh) 1411 if (neigh)
@@ -1427,7 +1427,7 @@ e_nobufs:
1427 goto done; 1427 goto done;
1428 1428
1429e_neighbour: 1429e_neighbour:
1430 dst_free(&rt->u.dst); 1430 dst_free(&rt->dst);
1431 goto done; 1431 goto done;
1432} 1432}
1433 1433
@@ -1442,13 +1442,13 @@ static int dn_route_input(struct sk_buff *skb)
1442 1442
1443 rcu_read_lock(); 1443 rcu_read_lock();
1444 for(rt = rcu_dereference(dn_rt_hash_table[hash].chain); rt != NULL; 1444 for(rt = rcu_dereference(dn_rt_hash_table[hash].chain); rt != NULL;
1445 rt = rcu_dereference(rt->u.dst.dn_next)) { 1445 rt = rcu_dereference(rt->dst.dn_next)) {
1446 if ((rt->fl.fld_src == cb->src) && 1446 if ((rt->fl.fld_src == cb->src) &&
1447 (rt->fl.fld_dst == cb->dst) && 1447 (rt->fl.fld_dst == cb->dst) &&
1448 (rt->fl.oif == 0) && 1448 (rt->fl.oif == 0) &&
1449 (rt->fl.mark == skb->mark) && 1449 (rt->fl.mark == skb->mark) &&
1450 (rt->fl.iif == cb->iif)) { 1450 (rt->fl.iif == cb->iif)) {
1451 dst_use(&rt->u.dst, jiffies); 1451 dst_use(&rt->dst, jiffies);
1452 rcu_read_unlock(); 1452 rcu_read_unlock();
1453 skb_dst_set(skb, (struct dst_entry *)rt); 1453 skb_dst_set(skb, (struct dst_entry *)rt);
1454 return 0; 1454 return 0;
@@ -1487,8 +1487,8 @@ static int dn_rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
1487 r->rtm_src_len = 16; 1487 r->rtm_src_len = 16;
1488 RTA_PUT(skb, RTA_SRC, 2, &rt->fl.fld_src); 1488 RTA_PUT(skb, RTA_SRC, 2, &rt->fl.fld_src);
1489 } 1489 }
1490 if (rt->u.dst.dev) 1490 if (rt->dst.dev)
1491 RTA_PUT(skb, RTA_OIF, sizeof(int), &rt->u.dst.dev->ifindex); 1491 RTA_PUT(skb, RTA_OIF, sizeof(int), &rt->dst.dev->ifindex);
1492 /* 1492 /*
1493 * Note to self - change this if input routes reverse direction when 1493 * Note to self - change this if input routes reverse direction when
1494 * they deal only with inputs and not with replies like they do 1494 * they deal only with inputs and not with replies like they do
@@ -1497,11 +1497,11 @@ static int dn_rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
1497 RTA_PUT(skb, RTA_PREFSRC, 2, &rt->rt_local_src); 1497 RTA_PUT(skb, RTA_PREFSRC, 2, &rt->rt_local_src);
1498 if (rt->rt_daddr != rt->rt_gateway) 1498 if (rt->rt_daddr != rt->rt_gateway)
1499 RTA_PUT(skb, RTA_GATEWAY, 2, &rt->rt_gateway); 1499 RTA_PUT(skb, RTA_GATEWAY, 2, &rt->rt_gateway);
1500 if (rtnetlink_put_metrics(skb, rt->u.dst.metrics) < 0) 1500 if (rtnetlink_put_metrics(skb, rt->dst.metrics) < 0)
1501 goto rtattr_failure; 1501 goto rtattr_failure;
1502 expires = rt->u.dst.expires ? rt->u.dst.expires - jiffies : 0; 1502 expires = rt->dst.expires ? rt->dst.expires - jiffies : 0;
1503 if (rtnl_put_cacheinfo(skb, &rt->u.dst, 0, 0, 0, expires, 1503 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, 0, 0, expires,
1504 rt->u.dst.error) < 0) 1504 rt->dst.error) < 0)
1505 goto rtattr_failure; 1505 goto rtattr_failure;
1506 if (rt->fl.iif) 1506 if (rt->fl.iif)
1507 RTA_PUT(skb, RTA_IIF, sizeof(int), &rt->fl.iif); 1507 RTA_PUT(skb, RTA_IIF, sizeof(int), &rt->fl.iif);
@@ -1568,8 +1568,8 @@ static int dn_cache_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void
1568 local_bh_enable(); 1568 local_bh_enable();
1569 memset(cb, 0, sizeof(struct dn_skb_cb)); 1569 memset(cb, 0, sizeof(struct dn_skb_cb));
1570 rt = (struct dn_route *)skb_dst(skb); 1570 rt = (struct dn_route *)skb_dst(skb);
1571 if (!err && -rt->u.dst.error) 1571 if (!err && -rt->dst.error)
1572 err = rt->u.dst.error; 1572 err = rt->dst.error;
1573 } else { 1573 } else {
1574 int oif = 0; 1574 int oif = 0;
1575 if (rta[RTA_OIF - 1]) 1575 if (rta[RTA_OIF - 1])
@@ -1583,7 +1583,7 @@ static int dn_cache_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void
1583 skb->dev = NULL; 1583 skb->dev = NULL;
1584 if (err) 1584 if (err)
1585 goto out_free; 1585 goto out_free;
1586 skb_dst_set(skb, &rt->u.dst); 1586 skb_dst_set(skb, &rt->dst);
1587 if (rtm->rtm_flags & RTM_F_NOTIFY) 1587 if (rtm->rtm_flags & RTM_F_NOTIFY)
1588 rt->rt_flags |= RTCF_NOTIFY; 1588 rt->rt_flags |= RTCF_NOTIFY;
1589 1589
@@ -1632,10 +1632,10 @@ int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb)
1632 rcu_read_lock_bh(); 1632 rcu_read_lock_bh();
1633 for(rt = rcu_dereference_bh(dn_rt_hash_table[h].chain), idx = 0; 1633 for(rt = rcu_dereference_bh(dn_rt_hash_table[h].chain), idx = 0;
1634 rt; 1634 rt;
1635 rt = rcu_dereference_bh(rt->u.dst.dn_next), idx++) { 1635 rt = rcu_dereference_bh(rt->dst.dn_next), idx++) {
1636 if (idx < s_idx) 1636 if (idx < s_idx)
1637 continue; 1637 continue;
1638 skb_dst_set(skb, dst_clone(&rt->u.dst)); 1638 skb_dst_set(skb, dst_clone(&rt->dst));
1639 if (dn_rt_fill_info(skb, NETLINK_CB(cb->skb).pid, 1639 if (dn_rt_fill_info(skb, NETLINK_CB(cb->skb).pid,
1640 cb->nlh->nlmsg_seq, RTM_NEWROUTE, 1640 cb->nlh->nlmsg_seq, RTM_NEWROUTE,
1641 1, NLM_F_MULTI) <= 0) { 1641 1, NLM_F_MULTI) <= 0) {
@@ -1678,7 +1678,7 @@ static struct dn_route *dn_rt_cache_get_next(struct seq_file *seq, struct dn_rou
1678{ 1678{
1679 struct dn_rt_cache_iter_state *s = seq->private; 1679 struct dn_rt_cache_iter_state *s = seq->private;
1680 1680
1681 rt = rt->u.dst.dn_next; 1681 rt = rt->dst.dn_next;
1682 while(!rt) { 1682 while(!rt) {
1683 rcu_read_unlock_bh(); 1683 rcu_read_unlock_bh();
1684 if (--s->bucket < 0) 1684 if (--s->bucket < 0)
@@ -1719,12 +1719,12 @@ static int dn_rt_cache_seq_show(struct seq_file *seq, void *v)
1719 char buf1[DN_ASCBUF_LEN], buf2[DN_ASCBUF_LEN]; 1719 char buf1[DN_ASCBUF_LEN], buf2[DN_ASCBUF_LEN];
1720 1720
1721 seq_printf(seq, "%-8s %-7s %-7s %04d %04d %04d\n", 1721 seq_printf(seq, "%-8s %-7s %-7s %04d %04d %04d\n",
1722 rt->u.dst.dev ? rt->u.dst.dev->name : "*", 1722 rt->dst.dev ? rt->dst.dev->name : "*",
1723 dn_addr2asc(le16_to_cpu(rt->rt_daddr), buf1), 1723 dn_addr2asc(le16_to_cpu(rt->rt_daddr), buf1),
1724 dn_addr2asc(le16_to_cpu(rt->rt_saddr), buf2), 1724 dn_addr2asc(le16_to_cpu(rt->rt_saddr), buf2),
1725 atomic_read(&rt->u.dst.__refcnt), 1725 atomic_read(&rt->dst.__refcnt),
1726 rt->u.dst.__use, 1726 rt->dst.__use,
1727 (int) dst_metric(&rt->u.dst, RTAX_RTT)); 1727 (int) dst_metric(&rt->dst, RTAX_RTT));
1728 return 0; 1728 return 0;
1729} 1729}
1730 1730
diff --git a/net/dns_resolver/Kconfig b/net/dns_resolver/Kconfig
new file mode 100644
index 000000000000..50d49f7e0472
--- /dev/null
+++ b/net/dns_resolver/Kconfig
@@ -0,0 +1,27 @@
1#
2# Configuration for DNS Resolver
3#
4config DNS_RESOLVER
5 tristate "DNS Resolver support"
6 depends on NET && KEYS
7 help
8 Saying Y here will include support for the DNS Resolver key type
9 which can be used to make upcalls to perform DNS lookups in
10 userspace.
11
12 DNS Resolver is used to query DNS server for information. Examples
13 being resolving a UNC hostname element to an IP address for CIFS or
14 performing a DNS query for AFSDB records so that AFS can locate a
15 cell's volume location database servers.
16
17 DNS Resolver is used by the CIFS and AFS modules, and would support
18 SMB2 later. DNS Resolver is supported by the userspace upcall
19 helper "/sbin/dns.resolver" via /etc/request-key.conf.
20
21 See <file:Documentation/networking/dns_resolver.txt> for further
22 information.
23
24 To compile this as a module, choose M here: the module will be called
25 dnsresolver.
26
27 If unsure, say N.
diff --git a/net/dns_resolver/Makefile b/net/dns_resolver/Makefile
new file mode 100644
index 000000000000..c0ef4e71dc49
--- /dev/null
+++ b/net/dns_resolver/Makefile
@@ -0,0 +1,7 @@
1#
2# Makefile for the Linux DNS Resolver.
3#
4
5obj-$(CONFIG_DNS_RESOLVER) += dns_resolver.o
6
7dns_resolver-objs := dns_key.o dns_query.o
diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c
new file mode 100644
index 000000000000..400a04d5c9a1
--- /dev/null
+++ b/net/dns_resolver/dns_key.c
@@ -0,0 +1,211 @@
1/* Key type used to cache DNS lookups made by the kernel
2 *
3 * See Documentation/networking/dns_resolver.txt
4 *
5 * Copyright (c) 2007 Igor Mammedov
6 * Author(s): Igor Mammedov (niallain@gmail.com)
7 * Steve French (sfrench@us.ibm.com)
8 * Wang Lei (wang840925@gmail.com)
9 * David Howells (dhowells@redhat.com)
10 *
11 * This library is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU Lesser General Public License as published
13 * by the Free Software Foundation; either version 2.1 of the License, or
14 * (at your option) any later version.
15 *
16 * This library is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
19 * the GNU Lesser General Public License for more details.
20 *
21 * You should have received a copy of the GNU Lesser General Public License
22 * along with this library; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 */
25#include <linux/module.h>
26#include <linux/moduleparam.h>
27#include <linux/slab.h>
28#include <linux/string.h>
29#include <linux/kernel.h>
30#include <linux/keyctl.h>
31#include <linux/err.h>
32#include <keys/dns_resolver-type.h>
33#include <keys/user-type.h>
34#include "internal.h"
35
36MODULE_DESCRIPTION("DNS Resolver");
37MODULE_AUTHOR("Wang Lei");
38MODULE_LICENSE("GPL");
39
40unsigned dns_resolver_debug;
41module_param_named(debug, dns_resolver_debug, uint, S_IWUSR | S_IRUGO);
42MODULE_PARM_DESC(debug, "DNS Resolver debugging mask");
43
44const struct cred *dns_resolver_cache;
45
46/*
47 * Instantiate a user defined key for dns_resolver.
48 *
49 * The data must be a NUL-terminated string, with the NUL char accounted in
50 * datalen.
51 *
52 * If the data contains a '#' characters, then we take the clause after each
53 * one to be an option of the form 'key=value'. The actual data of interest is
54 * the string leading up to the first '#'. For instance:
55 *
56 * "ip1,ip2,...#foo=bar"
57 */
58static int
59dns_resolver_instantiate(struct key *key, const void *_data, size_t datalen)
60{
61 struct user_key_payload *upayload;
62 int ret;
63 size_t result_len = 0;
64 const char *data = _data, *opt;
65
66 kenter("%%%d,%s,'%s',%zu",
67 key->serial, key->description, data, datalen);
68
69 if (datalen <= 1 || !data || data[datalen - 1] != '\0')
70 return -EINVAL;
71 datalen--;
72
73 /* deal with any options embedded in the data */
74 opt = memchr(data, '#', datalen);
75 if (!opt) {
76 kdebug("no options currently supported");
77 return -EINVAL;
78 }
79
80 result_len = datalen;
81 ret = key_payload_reserve(key, result_len);
82 if (ret < 0)
83 return -EINVAL;
84
85 upayload = kmalloc(sizeof(*upayload) + result_len + 1, GFP_KERNEL);
86 if (!upayload) {
87 kleave(" = -ENOMEM");
88 return -ENOMEM;
89 }
90
91 upayload->datalen = result_len;
92 memcpy(upayload->data, data, result_len);
93 upayload->data[result_len] = '\0';
94 rcu_assign_pointer(key->payload.data, upayload);
95
96 kleave(" = 0");
97 return 0;
98}
99
100/*
101 * The description is of the form "[<type>:]<domain_name>"
102 *
103 * The domain name may be a simple name or an absolute domain name (which
104 * should end with a period). The domain name is case-independent.
105 */
106static int
107dns_resolver_match(const struct key *key, const void *description)
108{
109 int slen, dlen, ret = 0;
110 const char *src = key->description, *dsp = description;
111
112 kenter("%s,%s", src, dsp);
113
114 if (!src || !dsp)
115 goto no_match;
116
117 if (strcasecmp(src, dsp) == 0)
118 goto matched;
119
120 slen = strlen(src);
121 dlen = strlen(dsp);
122 if (slen <= 0 || dlen <= 0)
123 goto no_match;
124 if (src[slen - 1] == '.')
125 slen--;
126 if (dsp[dlen - 1] == '.')
127 dlen--;
128 if (slen != dlen || strncasecmp(src, dsp, slen) != 0)
129 goto no_match;
130
131matched:
132 ret = 1;
133no_match:
134 kleave(" = %d", ret);
135 return ret;
136}
137
138struct key_type key_type_dns_resolver = {
139 .name = "dns_resolver",
140 .instantiate = dns_resolver_instantiate,
141 .match = dns_resolver_match,
142 .revoke = user_revoke,
143 .destroy = user_destroy,
144 .describe = user_describe,
145 .read = user_read,
146};
147
148static int __init init_dns_resolver(void)
149{
150 struct cred *cred;
151 struct key *keyring;
152 int ret;
153
154 printk(KERN_NOTICE "Registering the %s key type\n",
155 key_type_dns_resolver.name);
156
157 /* create an override credential set with a special thread keyring in
158 * which DNS requests are cached
159 *
160 * this is used to prevent malicious redirections from being installed
161 * with add_key().
162 */
163 cred = prepare_kernel_cred(NULL);
164 if (!cred)
165 return -ENOMEM;
166
167 keyring = key_alloc(&key_type_keyring, ".dns_resolver", 0, 0, cred,
168 (KEY_POS_ALL & ~KEY_POS_SETATTR) |
169 KEY_USR_VIEW | KEY_USR_READ,
170 KEY_ALLOC_NOT_IN_QUOTA);
171 if (IS_ERR(keyring)) {
172 ret = PTR_ERR(keyring);
173 goto failed_put_cred;
174 }
175
176 ret = key_instantiate_and_link(keyring, NULL, 0, NULL, NULL);
177 if (ret < 0)
178 goto failed_put_key;
179
180 ret = register_key_type(&key_type_dns_resolver);
181 if (ret < 0)
182 goto failed_put_key;
183
184 /* instruct request_key() to use this special keyring as a cache for
185 * the results it looks up */
186 cred->thread_keyring = keyring;
187 cred->jit_keyring = KEY_REQKEY_DEFL_THREAD_KEYRING;
188 dns_resolver_cache = cred;
189
190 kdebug("DNS resolver keyring: %d\n", key_serial(keyring));
191 return 0;
192
193failed_put_key:
194 key_put(keyring);
195failed_put_cred:
196 put_cred(cred);
197 return ret;
198}
199
200static void __exit exit_dns_resolver(void)
201{
202 key_revoke(dns_resolver_cache->thread_keyring);
203 unregister_key_type(&key_type_dns_resolver);
204 put_cred(dns_resolver_cache);
205 printk(KERN_NOTICE "Unregistered %s key type\n",
206 key_type_dns_resolver.name);
207}
208
209module_init(init_dns_resolver)
210module_exit(exit_dns_resolver)
211MODULE_LICENSE("GPL");
diff --git a/net/dns_resolver/dns_query.c b/net/dns_resolver/dns_query.c
new file mode 100644
index 000000000000..03d5255f5cf2
--- /dev/null
+++ b/net/dns_resolver/dns_query.c
@@ -0,0 +1,160 @@
1/* Upcall routine, designed to work as a key type and working through
2 * /sbin/request-key to contact userspace when handling DNS queries.
3 *
4 * See Documentation/networking/dns_resolver.txt
5 *
6 * Copyright (c) 2007 Igor Mammedov
7 * Author(s): Igor Mammedov (niallain@gmail.com)
8 * Steve French (sfrench@us.ibm.com)
9 * Wang Lei (wang840925@gmail.com)
10 * David Howells (dhowells@redhat.com)
11 *
12 * The upcall wrapper used to make an arbitrary DNS query.
13 *
14 * This function requires the appropriate userspace tool dns.upcall to be
15 * installed and something like the following lines should be added to the
16 * /etc/request-key.conf file:
17 *
18 * create dns_resolver * * /sbin/dns.upcall %k
19 *
20 * For example to use this module to query AFSDB RR:
21 *
22 * create dns_resolver afsdb:* * /sbin/dns.afsdb %k
23 *
24 * This library is free software; you can redistribute it and/or modify
25 * it under the terms of the GNU Lesser General Public License as published
26 * by the Free Software Foundation; either version 2.1 of the License, or
27 * (at your option) any later version.
28 *
29 * This library is distributed in the hope that it will be useful,
30 * but WITHOUT ANY WARRANTY; without even the implied warranty of
31 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
32 * the GNU Lesser General Public License for more details.
33 *
34 * You should have received a copy of the GNU Lesser General Public License
35 * along with this library; if not, write to the Free Software
36 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
37 */
38
39#include <linux/module.h>
40#include <linux/slab.h>
41#include <linux/dns_resolver.h>
42#include <linux/err.h>
43#include <keys/dns_resolver-type.h>
44#include <keys/user-type.h>
45
46#include "internal.h"
47
48/**
49 * dns_query - Query the DNS
50 * @type: Query type (or NULL for straight host->IP lookup)
51 * @name: Name to look up
52 * @namelen: Length of name
53 * @options: Request options (or NULL if no options)
54 * @_result: Where to place the returned data.
55 * @_expiry: Where to store the result expiry time (or NULL)
56 *
57 * The data will be returned in the pointer at *result, and the caller is
58 * responsible for freeing it.
59 *
60 * The description should be of the form "[<query_type>:]<domain_name>", and
61 * the options need to be appropriate for the query type requested. If no
62 * query_type is given, then the query is a straight hostname to IP address
63 * lookup.
64 *
65 * The DNS resolution lookup is performed by upcalling to userspace by way of
66 * requesting a key of type dns_resolver.
67 *
68 * Returns the size of the result on success, -ve error code otherwise.
69 */
70int dns_query(const char *type, const char *name, size_t namelen,
71 const char *options, char **_result, time_t *_expiry)
72{
73 struct key *rkey;
74 struct user_key_payload *upayload;
75 const struct cred *saved_cred;
76 size_t typelen, desclen;
77 char *desc, *cp;
78 int ret, len;
79
80 kenter("%s,%*.*s,%zu,%s",
81 type, (int)namelen, (int)namelen, name, namelen, options);
82
83 if (!name || namelen == 0 || !_result)
84 return -EINVAL;
85
86 /* construct the query key description as "[<type>:]<name>" */
87 typelen = 0;
88 desclen = 0;
89 if (type) {
90 typelen = strlen(type);
91 if (typelen < 1)
92 return -EINVAL;
93 desclen += typelen + 1;
94 }
95
96 if (!namelen)
97 namelen = strlen(name);
98 if (namelen < 3)
99 return -EINVAL;
100 desclen += namelen + 1;
101
102 desc = kmalloc(desclen, GFP_KERNEL);
103 if (!desc)
104 return -ENOMEM;
105
106 cp = desc;
107 if (type) {
108 memcpy(cp, type, typelen);
109 cp += typelen;
110 *cp++ = ':';
111 }
112 memcpy(cp, name, namelen);
113 cp += namelen;
114 *cp = '\0';
115
116 if (!options)
117 options = "";
118 kdebug("call request_key(,%s,%s)", desc, options);
119
120 /* make the upcall, using special credentials to prevent the use of
121 * add_key() to preinstall malicious redirections
122 */
123 saved_cred = override_creds(dns_resolver_cache);
124 rkey = request_key(&key_type_dns_resolver, desc, options);
125 revert_creds(saved_cred);
126 kfree(desc);
127 if (IS_ERR(rkey)) {
128 ret = PTR_ERR(rkey);
129 goto out;
130 }
131
132 down_read(&rkey->sem);
133 rkey->perm |= KEY_USR_VIEW;
134
135 ret = key_validate(rkey);
136 if (ret < 0)
137 goto put;
138
139 upayload = rcu_dereference_protected(rkey->payload.data,
140 lockdep_is_held(&rkey->sem));
141 len = upayload->datalen;
142
143 ret = -ENOMEM;
144 *_result = kmalloc(len + 1, GFP_KERNEL);
145 if (!*_result)
146 goto put;
147
148 memcpy(*_result, upayload->data, len + 1);
149 if (_expiry)
150 *_expiry = rkey->expiry;
151
152 ret = len;
153put:
154 up_read(&rkey->sem);
155 key_put(rkey);
156out:
157 kleave(" = %d", ret);
158 return ret;
159}
160EXPORT_SYMBOL(dns_query);
diff --git a/net/dns_resolver/internal.h b/net/dns_resolver/internal.h
new file mode 100644
index 000000000000..189ca9e9b785
--- /dev/null
+++ b/net/dns_resolver/internal.h
@@ -0,0 +1,44 @@
1/*
2 * Copyright (c) 2010 Wang Lei
3 * Author(s): Wang Lei (wang840925@gmail.com). All Rights Reserved.
4 *
5 * Internal DNS Rsolver stuff
6 *
7 * This library is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU Lesser General Public License as published
9 * by the Free Software Foundation; either version 2.1 of the License, or
10 * (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
15 * the GNU Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public License
18 * along with this library; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22#include <linux/compiler.h>
23#include <linux/kernel.h>
24#include <linux/sched.h>
25
26/*
27 * dns_key.c
28 */
29extern const struct cred *dns_resolver_cache;
30
31/*
32 * debug tracing
33 */
34extern unsigned dns_resolver_debug;
35
36#define kdebug(FMT, ...) \
37do { \
38 if (unlikely(dns_resolver_debug)) \
39 printk(KERN_DEBUG "[%-6.6s] "FMT"\n", \
40 current->comm, ##__VA_ARGS__); \
41} while (0)
42
43#define kenter(FMT, ...) kdebug("==> %s("FMT")", __func__, ##__VA_ARGS__)
44#define kleave(FMT, ...) kdebug("<== %s()"FMT"", __func__, ##__VA_ARGS__)
diff --git a/net/dsa/Kconfig b/net/dsa/Kconfig
index c51b55400dc5..11201784d29a 100644
--- a/net/dsa/Kconfig
+++ b/net/dsa/Kconfig
@@ -1,7 +1,7 @@
1menuconfig NET_DSA 1menuconfig NET_DSA
2 bool "Distributed Switch Architecture support" 2 bool "Distributed Switch Architecture support"
3 default n 3 default n
4 depends on EXPERIMENTAL && !S390 4 depends on EXPERIMENTAL && NET_ETHERNET && !S390
5 select PHYLIB 5 select PHYLIB
6 ---help--- 6 ---help---
7 This allows you to use hardware switch chips that use 7 This allows you to use hardware switch chips that use
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 8fdca56bb08f..64ca2a6fa0d4 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -164,10 +164,9 @@ out:
164static int dsa_slave_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 164static int dsa_slave_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
165{ 165{
166 struct dsa_slave_priv *p = netdev_priv(dev); 166 struct dsa_slave_priv *p = netdev_priv(dev);
167 struct mii_ioctl_data *mii_data = if_mii(ifr);
168 167
169 if (p->phy != NULL) 168 if (p->phy != NULL)
170 return phy_mii_ioctl(p->phy, mii_data, cmd); 169 return phy_mii_ioctl(p->phy, ifr, cmd);
171 170
172 return -EOPNOTSUPP; 171 return -EOPNOTSUPP;
173} 172}
diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c
index 2a5a8053e000..dc54bd0d083b 100644
--- a/net/econet/af_econet.c
+++ b/net/econet/af_econet.c
@@ -48,7 +48,7 @@
48 48
49static const struct proto_ops econet_ops; 49static const struct proto_ops econet_ops;
50static struct hlist_head econet_sklist; 50static struct hlist_head econet_sklist;
51static DEFINE_RWLOCK(econet_lock); 51static DEFINE_SPINLOCK(econet_lock);
52static DEFINE_MUTEX(econet_mutex); 52static DEFINE_MUTEX(econet_mutex);
53 53
54/* Since there are only 256 possible network numbers (or fewer, depends 54/* Since there are only 256 possible network numbers (or fewer, depends
@@ -98,16 +98,16 @@ struct ec_cb
98 98
99static void econet_remove_socket(struct hlist_head *list, struct sock *sk) 99static void econet_remove_socket(struct hlist_head *list, struct sock *sk)
100{ 100{
101 write_lock_bh(&econet_lock); 101 spin_lock_bh(&econet_lock);
102 sk_del_node_init(sk); 102 sk_del_node_init(sk);
103 write_unlock_bh(&econet_lock); 103 spin_unlock_bh(&econet_lock);
104} 104}
105 105
106static void econet_insert_socket(struct hlist_head *list, struct sock *sk) 106static void econet_insert_socket(struct hlist_head *list, struct sock *sk)
107{ 107{
108 write_lock_bh(&econet_lock); 108 spin_lock_bh(&econet_lock);
109 sk_add_node(sk, list); 109 sk_add_node(sk, list);
110 write_unlock_bh(&econet_lock); 110 spin_unlock_bh(&econet_lock);
111} 111}
112 112
113/* 113/*
@@ -782,15 +782,19 @@ static struct sock *ec_listening_socket(unsigned char port, unsigned char
782 struct sock *sk; 782 struct sock *sk;
783 struct hlist_node *node; 783 struct hlist_node *node;
784 784
785 spin_lock(&econet_lock);
785 sk_for_each(sk, node, &econet_sklist) { 786 sk_for_each(sk, node, &econet_sklist) {
786 struct econet_sock *opt = ec_sk(sk); 787 struct econet_sock *opt = ec_sk(sk);
787 if ((opt->port == port || opt->port == 0) && 788 if ((opt->port == port || opt->port == 0) &&
788 (opt->station == station || opt->station == 0) && 789 (opt->station == station || opt->station == 0) &&
789 (opt->net == net || opt->net == 0)) 790 (opt->net == net || opt->net == 0)) {
791 sock_hold(sk);
790 goto found; 792 goto found;
793 }
791 } 794 }
792 sk = NULL; 795 sk = NULL;
793found: 796found:
797 spin_unlock(&econet_lock);
794 return sk; 798 return sk;
795} 799}
796 800
@@ -852,7 +856,7 @@ static void aun_incoming(struct sk_buff *skb, struct aunhdr *ah, size_t len)
852{ 856{
853 struct iphdr *ip = ip_hdr(skb); 857 struct iphdr *ip = ip_hdr(skb);
854 unsigned char stn = ntohl(ip->saddr) & 0xff; 858 unsigned char stn = ntohl(ip->saddr) & 0xff;
855 struct sock *sk; 859 struct sock *sk = NULL;
856 struct sk_buff *newskb; 860 struct sk_buff *newskb;
857 struct ec_device *edev = skb->dev->ec_ptr; 861 struct ec_device *edev = skb->dev->ec_ptr;
858 862
@@ -882,10 +886,13 @@ static void aun_incoming(struct sk_buff *skb, struct aunhdr *ah, size_t len)
882 } 886 }
883 887
884 aun_send_response(ip->saddr, ah->handle, 3, 0); 888 aun_send_response(ip->saddr, ah->handle, 3, 0);
889 sock_put(sk);
885 return; 890 return;
886 891
887bad: 892bad:
888 aun_send_response(ip->saddr, ah->handle, 4, 0); 893 aun_send_response(ip->saddr, ah->handle, 4, 0);
894 if (sk)
895 sock_put(sk);
889} 896}
890 897
891/* 898/*
@@ -1050,7 +1057,7 @@ release:
1050static int econet_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) 1057static int econet_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
1051{ 1058{
1052 struct ec_framehdr *hdr; 1059 struct ec_framehdr *hdr;
1053 struct sock *sk; 1060 struct sock *sk = NULL;
1054 struct ec_device *edev = dev->ec_ptr; 1061 struct ec_device *edev = dev->ec_ptr;
1055 1062
1056 if (!net_eq(dev_net(dev), &init_net)) 1063 if (!net_eq(dev_net(dev), &init_net))
@@ -1085,10 +1092,12 @@ static int econet_rcv(struct sk_buff *skb, struct net_device *dev, struct packet
1085 if (ec_queue_packet(sk, skb, edev->net, hdr->src_stn, hdr->cb, 1092 if (ec_queue_packet(sk, skb, edev->net, hdr->src_stn, hdr->cb,
1086 hdr->port)) 1093 hdr->port))
1087 goto drop; 1094 goto drop;
1088 1095 sock_put(sk);
1089 return NET_RX_SUCCESS; 1096 return NET_RX_SUCCESS;
1090 1097
1091drop: 1098drop:
1099 if (sk)
1100 sock_put(sk);
1092 kfree_skb(skb); 1101 kfree_skb(skb);
1093 return NET_RX_DROP; 1102 return NET_RX_DROP;
1094} 1103}
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index 61ec0329316c..215c83986a9d 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -158,7 +158,6 @@ EXPORT_SYMBOL(eth_rebuild_header);
158__be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev) 158__be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev)
159{ 159{
160 struct ethhdr *eth; 160 struct ethhdr *eth;
161 unsigned char *rawp;
162 161
163 skb->dev = dev; 162 skb->dev = dev;
164 skb_reset_mac_header(skb); 163 skb_reset_mac_header(skb);
@@ -199,15 +198,13 @@ __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev)
199 if (ntohs(eth->h_proto) >= 1536) 198 if (ntohs(eth->h_proto) >= 1536)
200 return eth->h_proto; 199 return eth->h_proto;
201 200
202 rawp = skb->data;
203
204 /* 201 /*
205 * This is a magic hack to spot IPX packets. Older Novell breaks 202 * This is a magic hack to spot IPX packets. Older Novell breaks
206 * the protocol design and runs IPX over 802.3 without an 802.2 LLC 203 * the protocol design and runs IPX over 802.3 without an 802.2 LLC
207 * layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This 204 * layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This
208 * won't work for fault tolerant netware but does for the rest. 205 * won't work for fault tolerant netware but does for the rest.
209 */ 206 */
210 if (*(unsigned short *)rawp == 0xFFFF) 207 if (skb->len >= 2 && *(unsigned short *)(skb->data) == 0xFFFF)
211 return htons(ETH_P_802_3); 208 return htons(ETH_P_802_3);
212 209
213 /* 210 /*
diff --git a/net/ethernet/pe2.c b/net/ethernet/pe2.c
index eb00796758c3..85d574addbc1 100644
--- a/net/ethernet/pe2.c
+++ b/net/ethernet/pe2.c
@@ -28,11 +28,10 @@ struct datalink_proto *make_EII_client(void)
28 28
29 return proto; 29 return proto;
30} 30}
31EXPORT_SYMBOL(make_EII_client);
31 32
32void destroy_EII_client(struct datalink_proto *dl) 33void destroy_EII_client(struct datalink_proto *dl)
33{ 34{
34 kfree(dl); 35 kfree(dl);
35} 36}
36
37EXPORT_SYMBOL(destroy_EII_client); 37EXPORT_SYMBOL(destroy_EII_client);
38EXPORT_SYMBOL(make_EII_client);
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 551ce564b035..6a1100c25a9f 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -355,6 +355,8 @@ lookup_protocol:
355 inet = inet_sk(sk); 355 inet = inet_sk(sk);
356 inet->is_icsk = (INET_PROTOSW_ICSK & answer_flags) != 0; 356 inet->is_icsk = (INET_PROTOSW_ICSK & answer_flags) != 0;
357 357
358 inet->nodefrag = 0;
359
358 if (SOCK_RAW == sock->type) { 360 if (SOCK_RAW == sock->type) {
359 inet->inet_num = protocol; 361 inet->inet_num = protocol;
360 if (IPPROTO_RAW == protocol) 362 if (IPPROTO_RAW == protocol)
@@ -725,28 +727,31 @@ int inet_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
725 sock_rps_record_flow(sk); 727 sock_rps_record_flow(sk);
726 728
727 /* We may need to bind the socket. */ 729 /* We may need to bind the socket. */
728 if (!inet_sk(sk)->inet_num && inet_autobind(sk)) 730 if (!inet_sk(sk)->inet_num && !sk->sk_prot->no_autobind &&
731 inet_autobind(sk))
729 return -EAGAIN; 732 return -EAGAIN;
730 733
731 return sk->sk_prot->sendmsg(iocb, sk, msg, size); 734 return sk->sk_prot->sendmsg(iocb, sk, msg, size);
732} 735}
733EXPORT_SYMBOL(inet_sendmsg); 736EXPORT_SYMBOL(inet_sendmsg);
734 737
735static ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset, 738ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset,
736 size_t size, int flags) 739 size_t size, int flags)
737{ 740{
738 struct sock *sk = sock->sk; 741 struct sock *sk = sock->sk;
739 742
740 sock_rps_record_flow(sk); 743 sock_rps_record_flow(sk);
741 744
742 /* We may need to bind the socket. */ 745 /* We may need to bind the socket. */
743 if (!inet_sk(sk)->inet_num && inet_autobind(sk)) 746 if (!inet_sk(sk)->inet_num && !sk->sk_prot->no_autobind &&
747 inet_autobind(sk))
744 return -EAGAIN; 748 return -EAGAIN;
745 749
746 if (sk->sk_prot->sendpage) 750 if (sk->sk_prot->sendpage)
747 return sk->sk_prot->sendpage(sk, page, offset, size, flags); 751 return sk->sk_prot->sendpage(sk, page, offset, size, flags);
748 return sock_no_sendpage(sock, page, offset, size, flags); 752 return sock_no_sendpage(sock, page, offset, size, flags);
749} 753}
754EXPORT_SYMBOL(inet_sendpage);
750 755
751int inet_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, 756int inet_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
752 size_t size, int flags) 757 size_t size, int flags)
@@ -892,10 +897,10 @@ const struct proto_ops inet_stream_ops = {
892 .shutdown = inet_shutdown, 897 .shutdown = inet_shutdown,
893 .setsockopt = sock_common_setsockopt, 898 .setsockopt = sock_common_setsockopt,
894 .getsockopt = sock_common_getsockopt, 899 .getsockopt = sock_common_getsockopt,
895 .sendmsg = tcp_sendmsg, 900 .sendmsg = inet_sendmsg,
896 .recvmsg = inet_recvmsg, 901 .recvmsg = inet_recvmsg,
897 .mmap = sock_no_mmap, 902 .mmap = sock_no_mmap,
898 .sendpage = tcp_sendpage, 903 .sendpage = inet_sendpage,
899 .splice_read = tcp_splice_read, 904 .splice_read = tcp_splice_read,
900#ifdef CONFIG_COMPAT 905#ifdef CONFIG_COMPAT
901 .compat_setsockopt = compat_sock_common_setsockopt, 906 .compat_setsockopt = compat_sock_common_setsockopt,
@@ -1100,7 +1105,7 @@ static int inet_sk_reselect_saddr(struct sock *sk)
1100 if (err) 1105 if (err)
1101 return err; 1106 return err;
1102 1107
1103 sk_setup_caps(sk, &rt->u.dst); 1108 sk_setup_caps(sk, &rt->dst);
1104 1109
1105 new_saddr = rt->rt_src; 1110 new_saddr = rt->rt_src;
1106 1111
@@ -1166,7 +1171,7 @@ int inet_sk_rebuild_header(struct sock *sk)
1166 err = ip_route_output_flow(sock_net(sk), &rt, &fl, sk, 0); 1171 err = ip_route_output_flow(sock_net(sk), &rt, &fl, sk, 0);
1167} 1172}
1168 if (!err) 1173 if (!err)
1169 sk_setup_caps(sk, &rt->u.dst); 1174 sk_setup_caps(sk, &rt->dst);
1170 else { 1175 else {
1171 /* Routing failed... */ 1176 /* Routing failed... */
1172 sk->sk_route_caps = 0; 1177 sk->sk_route_caps = 0;
@@ -1425,13 +1430,49 @@ unsigned long snmp_fold_field(void __percpu *mib[], int offt)
1425} 1430}
1426EXPORT_SYMBOL_GPL(snmp_fold_field); 1431EXPORT_SYMBOL_GPL(snmp_fold_field);
1427 1432
1428int snmp_mib_init(void __percpu *ptr[2], size_t mibsize) 1433#if BITS_PER_LONG==32
1434
1435u64 snmp_fold_field64(void __percpu *mib[], int offt, size_t syncp_offset)
1436{
1437 u64 res = 0;
1438 int cpu;
1439
1440 for_each_possible_cpu(cpu) {
1441 void *bhptr, *userptr;
1442 struct u64_stats_sync *syncp;
1443 u64 v_bh, v_user;
1444 unsigned int start;
1445
1446 /* first mib used by softirq context, we must use _bh() accessors */
1447 bhptr = per_cpu_ptr(SNMP_STAT_BHPTR(mib), cpu);
1448 syncp = (struct u64_stats_sync *)(bhptr + syncp_offset);
1449 do {
1450 start = u64_stats_fetch_begin_bh(syncp);
1451 v_bh = *(((u64 *) bhptr) + offt);
1452 } while (u64_stats_fetch_retry_bh(syncp, start));
1453
1454 /* second mib used in USER context */
1455 userptr = per_cpu_ptr(SNMP_STAT_USRPTR(mib), cpu);
1456 syncp = (struct u64_stats_sync *)(userptr + syncp_offset);
1457 do {
1458 start = u64_stats_fetch_begin(syncp);
1459 v_user = *(((u64 *) userptr) + offt);
1460 } while (u64_stats_fetch_retry(syncp, start));
1461
1462 res += v_bh + v_user;
1463 }
1464 return res;
1465}
1466EXPORT_SYMBOL_GPL(snmp_fold_field64);
1467#endif
1468
1469int snmp_mib_init(void __percpu *ptr[2], size_t mibsize, size_t align)
1429{ 1470{
1430 BUG_ON(ptr == NULL); 1471 BUG_ON(ptr == NULL);
1431 ptr[0] = __alloc_percpu(mibsize, __alignof__(unsigned long)); 1472 ptr[0] = __alloc_percpu(mibsize, align);
1432 if (!ptr[0]) 1473 if (!ptr[0])
1433 goto err0; 1474 goto err0;
1434 ptr[1] = __alloc_percpu(mibsize, __alignof__(unsigned long)); 1475 ptr[1] = __alloc_percpu(mibsize, align);
1435 if (!ptr[1]) 1476 if (!ptr[1])
1436 goto err1; 1477 goto err1;
1437 return 0; 1478 return 0;
@@ -1488,25 +1529,32 @@ static const struct net_protocol icmp_protocol = {
1488static __net_init int ipv4_mib_init_net(struct net *net) 1529static __net_init int ipv4_mib_init_net(struct net *net)
1489{ 1530{
1490 if (snmp_mib_init((void __percpu **)net->mib.tcp_statistics, 1531 if (snmp_mib_init((void __percpu **)net->mib.tcp_statistics,
1491 sizeof(struct tcp_mib)) < 0) 1532 sizeof(struct tcp_mib),
1533 __alignof__(struct tcp_mib)) < 0)
1492 goto err_tcp_mib; 1534 goto err_tcp_mib;
1493 if (snmp_mib_init((void __percpu **)net->mib.ip_statistics, 1535 if (snmp_mib_init((void __percpu **)net->mib.ip_statistics,
1494 sizeof(struct ipstats_mib)) < 0) 1536 sizeof(struct ipstats_mib),
1537 __alignof__(struct ipstats_mib)) < 0)
1495 goto err_ip_mib; 1538 goto err_ip_mib;
1496 if (snmp_mib_init((void __percpu **)net->mib.net_statistics, 1539 if (snmp_mib_init((void __percpu **)net->mib.net_statistics,
1497 sizeof(struct linux_mib)) < 0) 1540 sizeof(struct linux_mib),
1541 __alignof__(struct linux_mib)) < 0)
1498 goto err_net_mib; 1542 goto err_net_mib;
1499 if (snmp_mib_init((void __percpu **)net->mib.udp_statistics, 1543 if (snmp_mib_init((void __percpu **)net->mib.udp_statistics,
1500 sizeof(struct udp_mib)) < 0) 1544 sizeof(struct udp_mib),
1545 __alignof__(struct udp_mib)) < 0)
1501 goto err_udp_mib; 1546 goto err_udp_mib;
1502 if (snmp_mib_init((void __percpu **)net->mib.udplite_statistics, 1547 if (snmp_mib_init((void __percpu **)net->mib.udplite_statistics,
1503 sizeof(struct udp_mib)) < 0) 1548 sizeof(struct udp_mib),
1549 __alignof__(struct udp_mib)) < 0)
1504 goto err_udplite_mib; 1550 goto err_udplite_mib;
1505 if (snmp_mib_init((void __percpu **)net->mib.icmp_statistics, 1551 if (snmp_mib_init((void __percpu **)net->mib.icmp_statistics,
1506 sizeof(struct icmp_mib)) < 0) 1552 sizeof(struct icmp_mib),
1553 __alignof__(struct icmp_mib)) < 0)
1507 goto err_icmp_mib; 1554 goto err_icmp_mib;
1508 if (snmp_mib_init((void __percpu **)net->mib.icmpmsg_statistics, 1555 if (snmp_mib_init((void __percpu **)net->mib.icmpmsg_statistics,
1509 sizeof(struct icmpmsg_mib)) < 0) 1556 sizeof(struct icmpmsg_mib),
1557 __alignof__(struct icmpmsg_mib)) < 0)
1510 goto err_icmpmsg_mib; 1558 goto err_icmpmsg_mib;
1511 1559
1512 tcp_mib_init(net); 1560 tcp_mib_init(net);
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index f094b75810db..96c1955b3e2f 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -116,6 +116,7 @@
116#if defined(CONFIG_ATM_CLIP) || defined(CONFIG_ATM_CLIP_MODULE) 116#if defined(CONFIG_ATM_CLIP) || defined(CONFIG_ATM_CLIP_MODULE)
117#include <net/atmclip.h> 117#include <net/atmclip.h>
118struct neigh_table *clip_tbl_hook; 118struct neigh_table *clip_tbl_hook;
119EXPORT_SYMBOL(clip_tbl_hook);
119#endif 120#endif
120 121
121#include <asm/system.h> 122#include <asm/system.h>
@@ -169,6 +170,7 @@ const struct neigh_ops arp_broken_ops = {
169 .hh_output = dev_queue_xmit, 170 .hh_output = dev_queue_xmit,
170 .queue_xmit = dev_queue_xmit, 171 .queue_xmit = dev_queue_xmit,
171}; 172};
173EXPORT_SYMBOL(arp_broken_ops);
172 174
173struct neigh_table arp_tbl = { 175struct neigh_table arp_tbl = {
174 .family = AF_INET, 176 .family = AF_INET,
@@ -198,6 +200,7 @@ struct neigh_table arp_tbl = {
198 .gc_thresh2 = 512, 200 .gc_thresh2 = 512,
199 .gc_thresh3 = 1024, 201 .gc_thresh3 = 1024,
200}; 202};
203EXPORT_SYMBOL(arp_tbl);
201 204
202int arp_mc_map(__be32 addr, u8 *haddr, struct net_device *dev, int dir) 205int arp_mc_map(__be32 addr, u8 *haddr, struct net_device *dev, int dir)
203{ 206{
@@ -333,11 +336,14 @@ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb)
333 struct net_device *dev = neigh->dev; 336 struct net_device *dev = neigh->dev;
334 __be32 target = *(__be32*)neigh->primary_key; 337 __be32 target = *(__be32*)neigh->primary_key;
335 int probes = atomic_read(&neigh->probes); 338 int probes = atomic_read(&neigh->probes);
336 struct in_device *in_dev = in_dev_get(dev); 339 struct in_device *in_dev;
337 340
338 if (!in_dev) 341 rcu_read_lock();
342 in_dev = __in_dev_get_rcu(dev);
343 if (!in_dev) {
344 rcu_read_unlock();
339 return; 345 return;
340 346 }
341 switch (IN_DEV_ARP_ANNOUNCE(in_dev)) { 347 switch (IN_DEV_ARP_ANNOUNCE(in_dev)) {
342 default: 348 default:
343 case 0: /* By default announce any local IP */ 349 case 0: /* By default announce any local IP */
@@ -358,9 +364,8 @@ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb)
358 case 2: /* Avoid secondary IPs, get a primary/preferred one */ 364 case 2: /* Avoid secondary IPs, get a primary/preferred one */
359 break; 365 break;
360 } 366 }
367 rcu_read_unlock();
361 368
362 if (in_dev)
363 in_dev_put(in_dev);
364 if (!saddr) 369 if (!saddr)
365 saddr = inet_select_addr(dev, target, RT_SCOPE_LINK); 370 saddr = inet_select_addr(dev, target, RT_SCOPE_LINK);
366 371
@@ -427,7 +432,7 @@ static int arp_filter(__be32 sip, __be32 tip, struct net_device *dev)
427 432
428 if (ip_route_output_key(net, &rt, &fl) < 0) 433 if (ip_route_output_key(net, &rt, &fl) < 0)
429 return 1; 434 return 1;
430 if (rt->u.dst.dev != dev) { 435 if (rt->dst.dev != dev) {
431 NET_INC_STATS_BH(net, LINUX_MIB_ARPFILTER); 436 NET_INC_STATS_BH(net, LINUX_MIB_ARPFILTER);
432 flag = 1; 437 flag = 1;
433 } 438 }
@@ -497,6 +502,7 @@ int arp_find(unsigned char *haddr, struct sk_buff *skb)
497 kfree_skb(skb); 502 kfree_skb(skb);
498 return 1; 503 return 1;
499} 504}
505EXPORT_SYMBOL(arp_find);
500 506
501/* END OF OBSOLETE FUNCTIONS */ 507/* END OF OBSOLETE FUNCTIONS */
502 508
@@ -532,7 +538,7 @@ static inline int arp_fwd_proxy(struct in_device *in_dev,
532 struct in_device *out_dev; 538 struct in_device *out_dev;
533 int imi, omi = -1; 539 int imi, omi = -1;
534 540
535 if (rt->u.dst.dev == dev) 541 if (rt->dst.dev == dev)
536 return 0; 542 return 0;
537 543
538 if (!IN_DEV_PROXY_ARP(in_dev)) 544 if (!IN_DEV_PROXY_ARP(in_dev))
@@ -545,10 +551,10 @@ static inline int arp_fwd_proxy(struct in_device *in_dev,
545 551
546 /* place to check for proxy_arp for routes */ 552 /* place to check for proxy_arp for routes */
547 553
548 if ((out_dev = in_dev_get(rt->u.dst.dev)) != NULL) { 554 out_dev = __in_dev_get_rcu(rt->dst.dev);
555 if (out_dev)
549 omi = IN_DEV_MEDIUM_ID(out_dev); 556 omi = IN_DEV_MEDIUM_ID(out_dev);
550 in_dev_put(out_dev); 557
551 }
552 return (omi != imi && omi != -1); 558 return (omi != imi && omi != -1);
553} 559}
554 560
@@ -576,7 +582,7 @@ static inline int arp_fwd_pvlan(struct in_device *in_dev,
576 __be32 sip, __be32 tip) 582 __be32 sip, __be32 tip)
577{ 583{
578 /* Private VLAN is only concerned about the same ethernet segment */ 584 /* Private VLAN is only concerned about the same ethernet segment */
579 if (rt->u.dst.dev != dev) 585 if (rt->dst.dev != dev)
580 return 0; 586 return 0;
581 587
582 /* Don't reply on self probes (often done by windowz boxes)*/ 588 /* Don't reply on self probes (often done by windowz boxes)*/
@@ -698,6 +704,7 @@ out:
698 kfree_skb(skb); 704 kfree_skb(skb);
699 return NULL; 705 return NULL;
700} 706}
707EXPORT_SYMBOL(arp_create);
701 708
702/* 709/*
703 * Send an arp packet. 710 * Send an arp packet.
@@ -707,6 +714,7 @@ void arp_xmit(struct sk_buff *skb)
707 /* Send it off, maybe filter it using firewalling first. */ 714 /* Send it off, maybe filter it using firewalling first. */
708 NF_HOOK(NFPROTO_ARP, NF_ARP_OUT, skb, NULL, skb->dev, dev_queue_xmit); 715 NF_HOOK(NFPROTO_ARP, NF_ARP_OUT, skb, NULL, skb->dev, dev_queue_xmit);
709} 716}
717EXPORT_SYMBOL(arp_xmit);
710 718
711/* 719/*
712 * Create and send an arp packet. 720 * Create and send an arp packet.
@@ -733,6 +741,7 @@ void arp_send(int type, int ptype, __be32 dest_ip,
733 741
734 arp_xmit(skb); 742 arp_xmit(skb);
735} 743}
744EXPORT_SYMBOL(arp_send);
736 745
737/* 746/*
738 * Process an arp request. 747 * Process an arp request.
@@ -741,7 +750,7 @@ void arp_send(int type, int ptype, __be32 dest_ip,
741static int arp_process(struct sk_buff *skb) 750static int arp_process(struct sk_buff *skb)
742{ 751{
743 struct net_device *dev = skb->dev; 752 struct net_device *dev = skb->dev;
744 struct in_device *in_dev = in_dev_get(dev); 753 struct in_device *in_dev = __in_dev_get_rcu(dev);
745 struct arphdr *arp; 754 struct arphdr *arp;
746 unsigned char *arp_ptr; 755 unsigned char *arp_ptr;
747 struct rtable *rt; 756 struct rtable *rt;
@@ -890,7 +899,6 @@ static int arp_process(struct sk_buff *skb)
890 arp_send(ARPOP_REPLY,ETH_P_ARP,sip,dev,tip,sha,dev->dev_addr,sha); 899 arp_send(ARPOP_REPLY,ETH_P_ARP,sip,dev,tip,sha,dev->dev_addr,sha);
891 } else { 900 } else {
892 pneigh_enqueue(&arp_tbl, in_dev->arp_parms, skb); 901 pneigh_enqueue(&arp_tbl, in_dev->arp_parms, skb);
893 in_dev_put(in_dev);
894 return 0; 902 return 0;
895 } 903 }
896 goto out; 904 goto out;
@@ -936,8 +944,6 @@ static int arp_process(struct sk_buff *skb)
936 } 944 }
937 945
938out: 946out:
939 if (in_dev)
940 in_dev_put(in_dev);
941 consume_skb(skb); 947 consume_skb(skb);
942 return 0; 948 return 0;
943} 949}
@@ -1045,7 +1051,7 @@ static int arp_req_set(struct net *net, struct arpreq *r,
1045 struct rtable * rt; 1051 struct rtable * rt;
1046 if ((err = ip_route_output_key(net, &rt, &fl)) != 0) 1052 if ((err = ip_route_output_key(net, &rt, &fl)) != 0)
1047 return err; 1053 return err;
1048 dev = rt->u.dst.dev; 1054 dev = rt->dst.dev;
1049 ip_rt_put(rt); 1055 ip_rt_put(rt);
1050 if (!dev) 1056 if (!dev)
1051 return -EINVAL; 1057 return -EINVAL;
@@ -1152,7 +1158,7 @@ static int arp_req_delete(struct net *net, struct arpreq *r,
1152 struct rtable * rt; 1158 struct rtable * rt;
1153 if ((err = ip_route_output_key(net, &rt, &fl)) != 0) 1159 if ((err = ip_route_output_key(net, &rt, &fl)) != 0)
1154 return err; 1160 return err;
1155 dev = rt->u.dst.dev; 1161 dev = rt->dst.dev;
1156 ip_rt_put(rt); 1162 ip_rt_put(rt);
1157 if (!dev) 1163 if (!dev)
1158 return -EINVAL; 1164 return -EINVAL;
@@ -1453,14 +1459,3 @@ static int __init arp_proc_init(void)
1453} 1459}
1454 1460
1455#endif /* CONFIG_PROC_FS */ 1461#endif /* CONFIG_PROC_FS */
1456
1457EXPORT_SYMBOL(arp_broken_ops);
1458EXPORT_SYMBOL(arp_find);
1459EXPORT_SYMBOL(arp_create);
1460EXPORT_SYMBOL(arp_xmit);
1461EXPORT_SYMBOL(arp_send);
1462EXPORT_SYMBOL(arp_tbl);
1463
1464#if defined(CONFIG_ATM_CLIP) || defined(CONFIG_ATM_CLIP_MODULE)
1465EXPORT_SYMBOL(clip_tbl_hook);
1466#endif
diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c
index fb2465811b48..f0550941df7b 100644
--- a/net/ipv4/datagram.c
+++ b/net/ipv4/datagram.c
@@ -69,9 +69,7 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
69 sk->sk_state = TCP_ESTABLISHED; 69 sk->sk_state = TCP_ESTABLISHED;
70 inet->inet_id = jiffies; 70 inet->inet_id = jiffies;
71 71
72 sk_dst_set(sk, &rt->u.dst); 72 sk_dst_set(sk, &rt->dst);
73 return(0); 73 return(0);
74} 74}
75
76EXPORT_SYMBOL(ip4_datagram_connect); 75EXPORT_SYMBOL(ip4_datagram_connect);
77
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 382bc768ed56..da14c49284f4 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1081,6 +1081,7 @@ static int inetdev_event(struct notifier_block *this, unsigned long event,
1081 } 1081 }
1082 ip_mc_up(in_dev); 1082 ip_mc_up(in_dev);
1083 /* fall through */ 1083 /* fall through */
1084 case NETDEV_NOTIFY_PEERS:
1084 case NETDEV_CHANGEADDR: 1085 case NETDEV_CHANGEADDR:
1085 /* Send gratuitous ARP to notify of link change */ 1086 /* Send gratuitous ARP to notify of link change */
1086 if (IN_DEV_ARP_NOTIFY(in_dev)) { 1087 if (IN_DEV_ARP_NOTIFY(in_dev)) {
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 4f0ed458c883..a43968918350 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -175,6 +175,7 @@ out:
175 fib_res_put(&res); 175 fib_res_put(&res);
176 return dev; 176 return dev;
177} 177}
178EXPORT_SYMBOL(ip_dev_find);
178 179
179/* 180/*
180 * Find address type as if only "dev" was present in the system. If 181 * Find address type as if only "dev" was present in the system. If
@@ -214,12 +215,14 @@ unsigned int inet_addr_type(struct net *net, __be32 addr)
214{ 215{
215 return __inet_dev_addr_type(net, NULL, addr); 216 return __inet_dev_addr_type(net, NULL, addr);
216} 217}
218EXPORT_SYMBOL(inet_addr_type);
217 219
218unsigned int inet_dev_addr_type(struct net *net, const struct net_device *dev, 220unsigned int inet_dev_addr_type(struct net *net, const struct net_device *dev,
219 __be32 addr) 221 __be32 addr)
220{ 222{
221 return __inet_dev_addr_type(net, dev, addr); 223 return __inet_dev_addr_type(net, dev, addr);
222} 224}
225EXPORT_SYMBOL(inet_dev_addr_type);
223 226
224/* Given (packet source, input interface) and optional (dst, oif, tos): 227/* Given (packet source, input interface) and optional (dst, oif, tos):
225 - (main) check, that source is valid i.e. not broadcast or our local 228 - (main) check, that source is valid i.e. not broadcast or our local
@@ -284,7 +287,7 @@ int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif,
284 if (no_addr) 287 if (no_addr)
285 goto last_resort; 288 goto last_resort;
286 if (rpf == 1) 289 if (rpf == 1)
287 goto e_inval; 290 goto e_rpf;
288 fl.oif = dev->ifindex; 291 fl.oif = dev->ifindex;
289 292
290 ret = 0; 293 ret = 0;
@@ -299,7 +302,7 @@ int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif,
299 302
300last_resort: 303last_resort:
301 if (rpf) 304 if (rpf)
302 goto e_inval; 305 goto e_rpf;
303 *spec_dst = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE); 306 *spec_dst = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE);
304 *itag = 0; 307 *itag = 0;
305 return 0; 308 return 0;
@@ -308,6 +311,8 @@ e_inval_res:
308 fib_res_put(&res); 311 fib_res_put(&res);
309e_inval: 312e_inval:
310 return -EINVAL; 313 return -EINVAL;
314e_rpf:
315 return -EXDEV;
311} 316}
312 317
313static inline __be32 sk_extract_addr(struct sockaddr *addr) 318static inline __be32 sk_extract_addr(struct sockaddr *addr)
@@ -1075,7 +1080,3 @@ void __init ip_fib_init(void)
1075 1080
1076 fib_hash_init(); 1081 fib_hash_init();
1077} 1082}
1078
1079EXPORT_SYMBOL(inet_addr_type);
1080EXPORT_SYMBOL(inet_dev_addr_type);
1081EXPORT_SYMBOL(ip_dev_find);
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index d65e9215bcd7..a0d847c7cba5 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -181,6 +181,7 @@ const struct icmp_err icmp_err_convert[] = {
181 .fatal = 1, 181 .fatal = 1,
182 }, 182 },
183}; 183};
184EXPORT_SYMBOL(icmp_err_convert);
184 185
185/* 186/*
186 * ICMP control array. This specifies what to do with each ICMP. 187 * ICMP control array. This specifies what to do with each ICMP.
@@ -267,11 +268,12 @@ int xrlim_allow(struct dst_entry *dst, int timeout)
267 dst->rate_tokens = token; 268 dst->rate_tokens = token;
268 return rc; 269 return rc;
269} 270}
271EXPORT_SYMBOL(xrlim_allow);
270 272
271static inline int icmpv4_xrlim_allow(struct net *net, struct rtable *rt, 273static inline int icmpv4_xrlim_allow(struct net *net, struct rtable *rt,
272 int type, int code) 274 int type, int code)
273{ 275{
274 struct dst_entry *dst = &rt->u.dst; 276 struct dst_entry *dst = &rt->dst;
275 int rc = 1; 277 int rc = 1;
276 278
277 if (type > NR_ICMP_TYPES) 279 if (type > NR_ICMP_TYPES)
@@ -327,7 +329,7 @@ static void icmp_push_reply(struct icmp_bxm *icmp_param,
327 struct sock *sk; 329 struct sock *sk;
328 struct sk_buff *skb; 330 struct sk_buff *skb;
329 331
330 sk = icmp_sk(dev_net((*rt)->u.dst.dev)); 332 sk = icmp_sk(dev_net((*rt)->dst.dev));
331 if (ip_append_data(sk, icmp_glue_bits, icmp_param, 333 if (ip_append_data(sk, icmp_glue_bits, icmp_param,
332 icmp_param->data_len+icmp_param->head_len, 334 icmp_param->data_len+icmp_param->head_len,
333 icmp_param->head_len, 335 icmp_param->head_len,
@@ -359,7 +361,7 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
359{ 361{
360 struct ipcm_cookie ipc; 362 struct ipcm_cookie ipc;
361 struct rtable *rt = skb_rtable(skb); 363 struct rtable *rt = skb_rtable(skb);
362 struct net *net = dev_net(rt->u.dst.dev); 364 struct net *net = dev_net(rt->dst.dev);
363 struct sock *sk; 365 struct sock *sk;
364 struct inet_sock *inet; 366 struct inet_sock *inet;
365 __be32 daddr; 367 __be32 daddr;
@@ -427,7 +429,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
427 429
428 if (!rt) 430 if (!rt)
429 goto out; 431 goto out;
430 net = dev_net(rt->u.dst.dev); 432 net = dev_net(rt->dst.dev);
431 433
432 /* 434 /*
433 * Find the original header. It is expected to be valid, of course. 435 * Find the original header. It is expected to be valid, of course.
@@ -596,9 +598,9 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
596 /* Ugh! */ 598 /* Ugh! */
597 orefdst = skb_in->_skb_refdst; /* save old refdst */ 599 orefdst = skb_in->_skb_refdst; /* save old refdst */
598 err = ip_route_input(skb_in, fl.fl4_dst, fl.fl4_src, 600 err = ip_route_input(skb_in, fl.fl4_dst, fl.fl4_src,
599 RT_TOS(tos), rt2->u.dst.dev); 601 RT_TOS(tos), rt2->dst.dev);
600 602
601 dst_release(&rt2->u.dst); 603 dst_release(&rt2->dst);
602 rt2 = skb_rtable(skb_in); 604 rt2 = skb_rtable(skb_in);
603 skb_in->_skb_refdst = orefdst; /* restore old refdst */ 605 skb_in->_skb_refdst = orefdst; /* restore old refdst */
604 } 606 }
@@ -610,7 +612,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
610 XFRM_LOOKUP_ICMP); 612 XFRM_LOOKUP_ICMP);
611 switch (err) { 613 switch (err) {
612 case 0: 614 case 0:
613 dst_release(&rt->u.dst); 615 dst_release(&rt->dst);
614 rt = rt2; 616 rt = rt2;
615 break; 617 break;
616 case -EPERM: 618 case -EPERM:
@@ -629,7 +631,7 @@ route_done:
629 631
630 /* RFC says return as much as we can without exceeding 576 bytes. */ 632 /* RFC says return as much as we can without exceeding 576 bytes. */
631 633
632 room = dst_mtu(&rt->u.dst); 634 room = dst_mtu(&rt->dst);
633 if (room > 576) 635 if (room > 576)
634 room = 576; 636 room = 576;
635 room -= sizeof(struct iphdr) + icmp_param.replyopts.optlen; 637 room -= sizeof(struct iphdr) + icmp_param.replyopts.optlen;
@@ -647,6 +649,7 @@ out_unlock:
647 icmp_xmit_unlock(sk); 649 icmp_xmit_unlock(sk);
648out:; 650out:;
649} 651}
652EXPORT_SYMBOL(icmp_send);
650 653
651 654
652/* 655/*
@@ -925,6 +928,7 @@ static void icmp_address(struct sk_buff *skb)
925/* 928/*
926 * RFC1812 (4.3.3.9). A router SHOULD listen all replies, and complain 929 * RFC1812 (4.3.3.9). A router SHOULD listen all replies, and complain
927 * loudly if an inconsistency is found. 930 * loudly if an inconsistency is found.
931 * called with rcu_read_lock()
928 */ 932 */
929 933
930static void icmp_address_reply(struct sk_buff *skb) 934static void icmp_address_reply(struct sk_buff *skb)
@@ -935,12 +939,12 @@ static void icmp_address_reply(struct sk_buff *skb)
935 struct in_ifaddr *ifa; 939 struct in_ifaddr *ifa;
936 940
937 if (skb->len < 4 || !(rt->rt_flags&RTCF_DIRECTSRC)) 941 if (skb->len < 4 || !(rt->rt_flags&RTCF_DIRECTSRC))
938 goto out; 942 return;
939 943
940 in_dev = in_dev_get(dev); 944 in_dev = __in_dev_get_rcu(dev);
941 if (!in_dev) 945 if (!in_dev)
942 goto out; 946 return;
943 rcu_read_lock(); 947
944 if (in_dev->ifa_list && 948 if (in_dev->ifa_list &&
945 IN_DEV_LOG_MARTIANS(in_dev) && 949 IN_DEV_LOG_MARTIANS(in_dev) &&
946 IN_DEV_FORWARD(in_dev)) { 950 IN_DEV_FORWARD(in_dev)) {
@@ -958,9 +962,6 @@ static void icmp_address_reply(struct sk_buff *skb)
958 mp, dev->name, &rt->rt_src); 962 mp, dev->name, &rt->rt_src);
959 } 963 }
960 } 964 }
961 rcu_read_unlock();
962 in_dev_put(in_dev);
963out:;
964} 965}
965 966
966static void icmp_discard(struct sk_buff *skb) 967static void icmp_discard(struct sk_buff *skb)
@@ -974,7 +975,7 @@ int icmp_rcv(struct sk_buff *skb)
974{ 975{
975 struct icmphdr *icmph; 976 struct icmphdr *icmph;
976 struct rtable *rt = skb_rtable(skb); 977 struct rtable *rt = skb_rtable(skb);
977 struct net *net = dev_net(rt->u.dst.dev); 978 struct net *net = dev_net(rt->dst.dev);
978 979
979 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { 980 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
980 struct sec_path *sp = skb_sec_path(skb); 981 struct sec_path *sp = skb_sec_path(skb);
@@ -1216,7 +1217,3 @@ int __init icmp_init(void)
1216{ 1217{
1217 return register_pernet_subsys(&icmp_sk_ops); 1218 return register_pernet_subsys(&icmp_sk_ops);
1218} 1219}
1219
1220EXPORT_SYMBOL(icmp_err_convert);
1221EXPORT_SYMBOL(icmp_send);
1222EXPORT_SYMBOL(xrlim_allow);
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 5fff865a4fa7..a1ad0e7180d2 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -312,7 +312,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
312 return NULL; 312 return NULL;
313 } 313 }
314 314
315 skb_dst_set(skb, &rt->u.dst); 315 skb_dst_set(skb, &rt->dst);
316 skb->dev = dev; 316 skb->dev = dev;
317 317
318 skb_reserve(skb, LL_RESERVED_SPACE(dev)); 318 skb_reserve(skb, LL_RESERVED_SPACE(dev));
@@ -330,7 +330,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
330 pip->saddr = rt->rt_src; 330 pip->saddr = rt->rt_src;
331 pip->protocol = IPPROTO_IGMP; 331 pip->protocol = IPPROTO_IGMP;
332 pip->tot_len = 0; /* filled in later */ 332 pip->tot_len = 0; /* filled in later */
333 ip_select_ident(pip, &rt->u.dst, NULL); 333 ip_select_ident(pip, &rt->dst, NULL);
334 ((u8*)&pip[1])[0] = IPOPT_RA; 334 ((u8*)&pip[1])[0] = IPOPT_RA;
335 ((u8*)&pip[1])[1] = 4; 335 ((u8*)&pip[1])[1] = 4;
336 ((u8*)&pip[1])[2] = 0; 336 ((u8*)&pip[1])[2] = 0;
@@ -660,7 +660,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
660 return -1; 660 return -1;
661 } 661 }
662 662
663 skb_dst_set(skb, &rt->u.dst); 663 skb_dst_set(skb, &rt->dst);
664 664
665 skb_reserve(skb, LL_RESERVED_SPACE(dev)); 665 skb_reserve(skb, LL_RESERVED_SPACE(dev));
666 666
@@ -676,7 +676,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
676 iph->daddr = dst; 676 iph->daddr = dst;
677 iph->saddr = rt->rt_src; 677 iph->saddr = rt->rt_src;
678 iph->protocol = IPPROTO_IGMP; 678 iph->protocol = IPPROTO_IGMP;
679 ip_select_ident(iph, &rt->u.dst, NULL); 679 ip_select_ident(iph, &rt->dst, NULL);
680 ((u8*)&iph[1])[0] = IPOPT_RA; 680 ((u8*)&iph[1])[0] = IPOPT_RA;
681 ((u8*)&iph[1])[1] = 4; 681 ((u8*)&iph[1])[1] = 4;
682 ((u8*)&iph[1])[2] = 0; 682 ((u8*)&iph[1])[2] = 0;
@@ -916,18 +916,19 @@ static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
916 read_unlock(&in_dev->mc_list_lock); 916 read_unlock(&in_dev->mc_list_lock);
917} 917}
918 918
919/* called in rcu_read_lock() section */
919int igmp_rcv(struct sk_buff *skb) 920int igmp_rcv(struct sk_buff *skb)
920{ 921{
921 /* This basically follows the spec line by line -- see RFC1112 */ 922 /* This basically follows the spec line by line -- see RFC1112 */
922 struct igmphdr *ih; 923 struct igmphdr *ih;
923 struct in_device *in_dev = in_dev_get(skb->dev); 924 struct in_device *in_dev = __in_dev_get_rcu(skb->dev);
924 int len = skb->len; 925 int len = skb->len;
925 926
926 if (in_dev == NULL) 927 if (in_dev == NULL)
927 goto drop; 928 goto drop;
928 929
929 if (!pskb_may_pull(skb, sizeof(struct igmphdr))) 930 if (!pskb_may_pull(skb, sizeof(struct igmphdr)))
930 goto drop_ref; 931 goto drop;
931 932
932 switch (skb->ip_summed) { 933 switch (skb->ip_summed) {
933 case CHECKSUM_COMPLETE: 934 case CHECKSUM_COMPLETE:
@@ -937,7 +938,7 @@ int igmp_rcv(struct sk_buff *skb)
937 case CHECKSUM_NONE: 938 case CHECKSUM_NONE:
938 skb->csum = 0; 939 skb->csum = 0;
939 if (__skb_checksum_complete(skb)) 940 if (__skb_checksum_complete(skb))
940 goto drop_ref; 941 goto drop;
941 } 942 }
942 943
943 ih = igmp_hdr(skb); 944 ih = igmp_hdr(skb);
@@ -957,7 +958,6 @@ int igmp_rcv(struct sk_buff *skb)
957 break; 958 break;
958 case IGMP_PIM: 959 case IGMP_PIM:
959#ifdef CONFIG_IP_PIMSM_V1 960#ifdef CONFIG_IP_PIMSM_V1
960 in_dev_put(in_dev);
961 return pim_rcv_v1(skb); 961 return pim_rcv_v1(skb);
962#endif 962#endif
963 case IGMPV3_HOST_MEMBERSHIP_REPORT: 963 case IGMPV3_HOST_MEMBERSHIP_REPORT:
@@ -971,8 +971,6 @@ int igmp_rcv(struct sk_buff *skb)
971 break; 971 break;
972 } 972 }
973 973
974drop_ref:
975 in_dev_put(in_dev);
976drop: 974drop:
977 kfree_skb(skb); 975 kfree_skb(skb);
978 return 0; 976 return 0;
@@ -1246,6 +1244,7 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
1246out: 1244out:
1247 return; 1245 return;
1248} 1246}
1247EXPORT_SYMBOL(ip_mc_inc_group);
1249 1248
1250/* 1249/*
1251 * Resend IGMP JOIN report; used for bonding. 1250 * Resend IGMP JOIN report; used for bonding.
@@ -1268,6 +1267,7 @@ void ip_mc_rejoin_group(struct ip_mc_list *im)
1268 igmp_ifc_event(in_dev); 1267 igmp_ifc_event(in_dev);
1269#endif 1268#endif
1270} 1269}
1270EXPORT_SYMBOL(ip_mc_rejoin_group);
1271 1271
1272/* 1272/*
1273 * A socket has left a multicast group on device dev 1273 * A socket has left a multicast group on device dev
@@ -1298,6 +1298,7 @@ void ip_mc_dec_group(struct in_device *in_dev, __be32 addr)
1298 } 1298 }
1299 } 1299 }
1300} 1300}
1301EXPORT_SYMBOL(ip_mc_dec_group);
1301 1302
1302/* Device changing type */ 1303/* Device changing type */
1303 1304
@@ -1427,7 +1428,7 @@ static struct in_device *ip_mc_find_dev(struct net *net, struct ip_mreqn *imr)
1427 } 1428 }
1428 1429
1429 if (!dev && !ip_route_output_key(net, &rt, &fl)) { 1430 if (!dev && !ip_route_output_key(net, &rt, &fl)) {
1430 dev = rt->u.dst.dev; 1431 dev = rt->dst.dev;
1431 ip_rt_put(rt); 1432 ip_rt_put(rt);
1432 } 1433 }
1433 if (dev) { 1434 if (dev) {
@@ -1646,8 +1647,7 @@ static int sf_setstate(struct ip_mc_list *pmc)
1646 if (dpsf->sf_inaddr == psf->sf_inaddr) 1647 if (dpsf->sf_inaddr == psf->sf_inaddr)
1647 break; 1648 break;
1648 if (!dpsf) { 1649 if (!dpsf) {
1649 dpsf = (struct ip_sf_list *) 1650 dpsf = kmalloc(sizeof(*dpsf), GFP_ATOMIC);
1650 kmalloc(sizeof(*dpsf), GFP_ATOMIC);
1651 if (!dpsf) 1651 if (!dpsf)
1652 continue; 1652 continue;
1653 *dpsf = *psf; 1653 *dpsf = *psf;
@@ -1807,6 +1807,7 @@ done:
1807 rtnl_unlock(); 1807 rtnl_unlock();
1808 return err; 1808 return err;
1809} 1809}
1810EXPORT_SYMBOL(ip_mc_join_group);
1810 1811
1811static void ip_sf_socklist_reclaim(struct rcu_head *rp) 1812static void ip_sf_socklist_reclaim(struct rcu_head *rp)
1812{ 1813{
@@ -2679,8 +2680,3 @@ int __init igmp_mc_proc_init(void)
2679 return register_pernet_subsys(&igmp_net_ops); 2680 return register_pernet_subsys(&igmp_net_ops);
2680} 2681}
2681#endif 2682#endif
2682
2683EXPORT_SYMBOL(ip_mc_dec_group);
2684EXPORT_SYMBOL(ip_mc_inc_group);
2685EXPORT_SYMBOL(ip_mc_join_group);
2686EXPORT_SYMBOL(ip_mc_rejoin_group);
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 70eb3507c406..7174370b1195 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -84,7 +84,6 @@ int inet_csk_bind_conflict(const struct sock *sk,
84 } 84 }
85 return node != NULL; 85 return node != NULL;
86} 86}
87
88EXPORT_SYMBOL_GPL(inet_csk_bind_conflict); 87EXPORT_SYMBOL_GPL(inet_csk_bind_conflict);
89 88
90/* Obtain a reference to a local port for the given sock, 89/* Obtain a reference to a local port for the given sock,
@@ -212,7 +211,6 @@ fail:
212 local_bh_enable(); 211 local_bh_enable();
213 return ret; 212 return ret;
214} 213}
215
216EXPORT_SYMBOL_GPL(inet_csk_get_port); 214EXPORT_SYMBOL_GPL(inet_csk_get_port);
217 215
218/* 216/*
@@ -305,7 +303,6 @@ out_err:
305 *err = error; 303 *err = error;
306 goto out; 304 goto out;
307} 305}
308
309EXPORT_SYMBOL(inet_csk_accept); 306EXPORT_SYMBOL(inet_csk_accept);
310 307
311/* 308/*
@@ -327,7 +324,6 @@ void inet_csk_init_xmit_timers(struct sock *sk,
327 setup_timer(&sk->sk_timer, keepalive_handler, (unsigned long)sk); 324 setup_timer(&sk->sk_timer, keepalive_handler, (unsigned long)sk);
328 icsk->icsk_pending = icsk->icsk_ack.pending = 0; 325 icsk->icsk_pending = icsk->icsk_ack.pending = 0;
329} 326}
330
331EXPORT_SYMBOL(inet_csk_init_xmit_timers); 327EXPORT_SYMBOL(inet_csk_init_xmit_timers);
332 328
333void inet_csk_clear_xmit_timers(struct sock *sk) 329void inet_csk_clear_xmit_timers(struct sock *sk)
@@ -340,21 +336,18 @@ void inet_csk_clear_xmit_timers(struct sock *sk)
340 sk_stop_timer(sk, &icsk->icsk_delack_timer); 336 sk_stop_timer(sk, &icsk->icsk_delack_timer);
341 sk_stop_timer(sk, &sk->sk_timer); 337 sk_stop_timer(sk, &sk->sk_timer);
342} 338}
343
344EXPORT_SYMBOL(inet_csk_clear_xmit_timers); 339EXPORT_SYMBOL(inet_csk_clear_xmit_timers);
345 340
346void inet_csk_delete_keepalive_timer(struct sock *sk) 341void inet_csk_delete_keepalive_timer(struct sock *sk)
347{ 342{
348 sk_stop_timer(sk, &sk->sk_timer); 343 sk_stop_timer(sk, &sk->sk_timer);
349} 344}
350
351EXPORT_SYMBOL(inet_csk_delete_keepalive_timer); 345EXPORT_SYMBOL(inet_csk_delete_keepalive_timer);
352 346
353void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long len) 347void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long len)
354{ 348{
355 sk_reset_timer(sk, &sk->sk_timer, jiffies + len); 349 sk_reset_timer(sk, &sk->sk_timer, jiffies + len);
356} 350}
357
358EXPORT_SYMBOL(inet_csk_reset_keepalive_timer); 351EXPORT_SYMBOL(inet_csk_reset_keepalive_timer);
359 352
360struct dst_entry *inet_csk_route_req(struct sock *sk, 353struct dst_entry *inet_csk_route_req(struct sock *sk,
@@ -383,7 +376,7 @@ struct dst_entry *inet_csk_route_req(struct sock *sk,
383 goto no_route; 376 goto no_route;
384 if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway) 377 if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway)
385 goto route_err; 378 goto route_err;
386 return &rt->u.dst; 379 return &rt->dst;
387 380
388route_err: 381route_err:
389 ip_rt_put(rt); 382 ip_rt_put(rt);
@@ -391,7 +384,6 @@ no_route:
391 IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES); 384 IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES);
392 return NULL; 385 return NULL;
393} 386}
394
395EXPORT_SYMBOL_GPL(inet_csk_route_req); 387EXPORT_SYMBOL_GPL(inet_csk_route_req);
396 388
397static inline u32 inet_synq_hash(const __be32 raddr, const __be16 rport, 389static inline u32 inet_synq_hash(const __be32 raddr, const __be16 rport,
@@ -433,7 +425,6 @@ struct request_sock *inet_csk_search_req(const struct sock *sk,
433 425
434 return req; 426 return req;
435} 427}
436
437EXPORT_SYMBOL_GPL(inet_csk_search_req); 428EXPORT_SYMBOL_GPL(inet_csk_search_req);
438 429
439void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req, 430void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
@@ -447,11 +438,11 @@ void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
447 reqsk_queue_hash_req(&icsk->icsk_accept_queue, h, req, timeout); 438 reqsk_queue_hash_req(&icsk->icsk_accept_queue, h, req, timeout);
448 inet_csk_reqsk_queue_added(sk, timeout); 439 inet_csk_reqsk_queue_added(sk, timeout);
449} 440}
441EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_hash_add);
450 442
451/* Only thing we need from tcp.h */ 443/* Only thing we need from tcp.h */
452extern int sysctl_tcp_synack_retries; 444extern int sysctl_tcp_synack_retries;
453 445
454EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_hash_add);
455 446
456/* Decide when to expire the request and when to resend SYN-ACK */ 447/* Decide when to expire the request and when to resend SYN-ACK */
457static inline void syn_ack_recalc(struct request_sock *req, const int thresh, 448static inline void syn_ack_recalc(struct request_sock *req, const int thresh,
@@ -569,7 +560,6 @@ void inet_csk_reqsk_queue_prune(struct sock *parent,
569 if (lopt->qlen) 560 if (lopt->qlen)
570 inet_csk_reset_keepalive_timer(parent, interval); 561 inet_csk_reset_keepalive_timer(parent, interval);
571} 562}
572
573EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_prune); 563EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_prune);
574 564
575struct sock *inet_csk_clone(struct sock *sk, const struct request_sock *req, 565struct sock *inet_csk_clone(struct sock *sk, const struct request_sock *req,
@@ -599,7 +589,6 @@ struct sock *inet_csk_clone(struct sock *sk, const struct request_sock *req,
599 } 589 }
600 return newsk; 590 return newsk;
601} 591}
602
603EXPORT_SYMBOL_GPL(inet_csk_clone); 592EXPORT_SYMBOL_GPL(inet_csk_clone);
604 593
605/* 594/*
@@ -630,7 +619,6 @@ void inet_csk_destroy_sock(struct sock *sk)
630 percpu_counter_dec(sk->sk_prot->orphan_count); 619 percpu_counter_dec(sk->sk_prot->orphan_count);
631 sock_put(sk); 620 sock_put(sk);
632} 621}
633
634EXPORT_SYMBOL(inet_csk_destroy_sock); 622EXPORT_SYMBOL(inet_csk_destroy_sock);
635 623
636int inet_csk_listen_start(struct sock *sk, const int nr_table_entries) 624int inet_csk_listen_start(struct sock *sk, const int nr_table_entries)
@@ -665,7 +653,6 @@ int inet_csk_listen_start(struct sock *sk, const int nr_table_entries)
665 __reqsk_queue_destroy(&icsk->icsk_accept_queue); 653 __reqsk_queue_destroy(&icsk->icsk_accept_queue);
666 return -EADDRINUSE; 654 return -EADDRINUSE;
667} 655}
668
669EXPORT_SYMBOL_GPL(inet_csk_listen_start); 656EXPORT_SYMBOL_GPL(inet_csk_listen_start);
670 657
671/* 658/*
@@ -720,7 +707,6 @@ void inet_csk_listen_stop(struct sock *sk)
720 } 707 }
721 WARN_ON(sk->sk_ack_backlog); 708 WARN_ON(sk->sk_ack_backlog);
722} 709}
723
724EXPORT_SYMBOL_GPL(inet_csk_listen_stop); 710EXPORT_SYMBOL_GPL(inet_csk_listen_stop);
725 711
726void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr) 712void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr)
@@ -732,7 +718,6 @@ void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr)
732 sin->sin_addr.s_addr = inet->inet_daddr; 718 sin->sin_addr.s_addr = inet->inet_daddr;
733 sin->sin_port = inet->inet_dport; 719 sin->sin_port = inet->inet_dport;
734} 720}
735
736EXPORT_SYMBOL_GPL(inet_csk_addr2sockaddr); 721EXPORT_SYMBOL_GPL(inet_csk_addr2sockaddr);
737 722
738#ifdef CONFIG_COMPAT 723#ifdef CONFIG_COMPAT
@@ -747,7 +732,6 @@ int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname,
747 return icsk->icsk_af_ops->getsockopt(sk, level, optname, 732 return icsk->icsk_af_ops->getsockopt(sk, level, optname,
748 optval, optlen); 733 optval, optlen);
749} 734}
750
751EXPORT_SYMBOL_GPL(inet_csk_compat_getsockopt); 735EXPORT_SYMBOL_GPL(inet_csk_compat_getsockopt);
752 736
753int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname, 737int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname,
@@ -761,6 +745,5 @@ int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname,
761 return icsk->icsk_af_ops->setsockopt(sk, level, optname, 745 return icsk->icsk_af_ops->setsockopt(sk, level, optname,
762 optval, optlen); 746 optval, optlen);
763} 747}
764
765EXPORT_SYMBOL_GPL(inet_csk_compat_setsockopt); 748EXPORT_SYMBOL_GPL(inet_csk_compat_setsockopt);
766#endif 749#endif
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index a2ca6aed763b..5ff2a51b6d0c 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -114,7 +114,6 @@ void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f)
114 fq->last_in |= INET_FRAG_COMPLETE; 114 fq->last_in |= INET_FRAG_COMPLETE;
115 } 115 }
116} 116}
117
118EXPORT_SYMBOL(inet_frag_kill); 117EXPORT_SYMBOL(inet_frag_kill);
119 118
120static inline void frag_kfree_skb(struct netns_frags *nf, struct inet_frags *f, 119static inline void frag_kfree_skb(struct netns_frags *nf, struct inet_frags *f,
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index d3e160a88219..fb7ad5a21ff3 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -99,7 +99,6 @@ void inet_put_port(struct sock *sk)
99 __inet_put_port(sk); 99 __inet_put_port(sk);
100 local_bh_enable(); 100 local_bh_enable();
101} 101}
102
103EXPORT_SYMBOL(inet_put_port); 102EXPORT_SYMBOL(inet_put_port);
104 103
105void __inet_inherit_port(struct sock *sk, struct sock *child) 104void __inet_inherit_port(struct sock *sk, struct sock *child)
@@ -116,7 +115,6 @@ void __inet_inherit_port(struct sock *sk, struct sock *child)
116 inet_csk(child)->icsk_bind_hash = tb; 115 inet_csk(child)->icsk_bind_hash = tb;
117 spin_unlock(&head->lock); 116 spin_unlock(&head->lock);
118} 117}
119
120EXPORT_SYMBOL_GPL(__inet_inherit_port); 118EXPORT_SYMBOL_GPL(__inet_inherit_port);
121 119
122static inline int compute_score(struct sock *sk, struct net *net, 120static inline int compute_score(struct sock *sk, struct net *net,
@@ -546,7 +544,6 @@ int inet_hash_connect(struct inet_timewait_death_row *death_row,
546 return __inet_hash_connect(death_row, sk, inet_sk_port_offset(sk), 544 return __inet_hash_connect(death_row, sk, inet_sk_port_offset(sk),
547 __inet_check_established, __inet_hash_nolisten); 545 __inet_check_established, __inet_hash_nolisten);
548} 546}
549
550EXPORT_SYMBOL_GPL(inet_hash_connect); 547EXPORT_SYMBOL_GPL(inet_hash_connect);
551 548
552void inet_hashinfo_init(struct inet_hashinfo *h) 549void inet_hashinfo_init(struct inet_hashinfo *h)
@@ -560,5 +557,4 @@ void inet_hashinfo_init(struct inet_hashinfo *h)
560 i + LISTENING_NULLS_BASE); 557 i + LISTENING_NULLS_BASE);
561 } 558 }
562} 559}
563
564EXPORT_SYMBOL_GPL(inet_hashinfo_init); 560EXPORT_SYMBOL_GPL(inet_hashinfo_init);
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index 6bcfe52a9c87..9ffa24b9a804 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -51,8 +51,8 @@
51 * lookups performed with disabled BHs. 51 * lookups performed with disabled BHs.
52 * 52 *
53 * Serialisation issues. 53 * Serialisation issues.
54 * 1. Nodes may appear in the tree only with the pool write lock held. 54 * 1. Nodes may appear in the tree only with the pool lock held.
55 * 2. Nodes may disappear from the tree only with the pool write lock held 55 * 2. Nodes may disappear from the tree only with the pool lock held
56 * AND reference count being 0. 56 * AND reference count being 0.
57 * 3. Nodes appears and disappears from unused node list only under 57 * 3. Nodes appears and disappears from unused node list only under
58 * "inet_peer_unused_lock". 58 * "inet_peer_unused_lock".
@@ -64,23 +64,31 @@
64 * usually under some other lock to prevent node disappearing 64 * usually under some other lock to prevent node disappearing
65 * dtime: unused node list lock 65 * dtime: unused node list lock
66 * v4daddr: unchangeable 66 * v4daddr: unchangeable
67 * ip_id_count: idlock 67 * ip_id_count: atomic value (no lock needed)
68 */ 68 */
69 69
70static struct kmem_cache *peer_cachep __read_mostly; 70static struct kmem_cache *peer_cachep __read_mostly;
71 71
72#define node_height(x) x->avl_height 72#define node_height(x) x->avl_height
73static struct inet_peer peer_fake_node = { 73
74 .avl_left = &peer_fake_node, 74#define peer_avl_empty ((struct inet_peer *)&peer_fake_node)
75 .avl_right = &peer_fake_node, 75static const struct inet_peer peer_fake_node = {
76 .avl_left = peer_avl_empty,
77 .avl_right = peer_avl_empty,
76 .avl_height = 0 78 .avl_height = 0
77}; 79};
78#define peer_avl_empty (&peer_fake_node) 80
79static struct inet_peer *peer_root = peer_avl_empty; 81static struct {
80static DEFINE_RWLOCK(peer_pool_lock); 82 struct inet_peer *root;
83 spinlock_t lock;
84 int total;
85} peers = {
86 .root = peer_avl_empty,
87 .lock = __SPIN_LOCK_UNLOCKED(peers.lock),
88 .total = 0,
89};
81#define PEER_MAXDEPTH 40 /* sufficient for about 2^27 nodes */ 90#define PEER_MAXDEPTH 40 /* sufficient for about 2^27 nodes */
82 91
83static int peer_total;
84/* Exported for sysctl_net_ipv4. */ 92/* Exported for sysctl_net_ipv4. */
85int inet_peer_threshold __read_mostly = 65536 + 128; /* start to throw entries more 93int inet_peer_threshold __read_mostly = 65536 + 128; /* start to throw entries more
86 * aggressively at this stage */ 94 * aggressively at this stage */
@@ -89,8 +97,13 @@ int inet_peer_maxttl __read_mostly = 10 * 60 * HZ; /* usual time to live: 10 min
89int inet_peer_gc_mintime __read_mostly = 10 * HZ; 97int inet_peer_gc_mintime __read_mostly = 10 * HZ;
90int inet_peer_gc_maxtime __read_mostly = 120 * HZ; 98int inet_peer_gc_maxtime __read_mostly = 120 * HZ;
91 99
92static LIST_HEAD(unused_peers); 100static struct {
93static DEFINE_SPINLOCK(inet_peer_unused_lock); 101 struct list_head list;
102 spinlock_t lock;
103} unused_peers = {
104 .list = LIST_HEAD_INIT(unused_peers.list),
105 .lock = __SPIN_LOCK_UNLOCKED(unused_peers.lock),
106};
94 107
95static void peer_check_expire(unsigned long dummy); 108static void peer_check_expire(unsigned long dummy);
96static DEFINE_TIMER(peer_periodic_timer, peer_check_expire, 0, 0); 109static DEFINE_TIMER(peer_periodic_timer, peer_check_expire, 0, 0);
@@ -116,7 +129,7 @@ void __init inet_initpeers(void)
116 129
117 peer_cachep = kmem_cache_create("inet_peer_cache", 130 peer_cachep = kmem_cache_create("inet_peer_cache",
118 sizeof(struct inet_peer), 131 sizeof(struct inet_peer),
119 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, 132 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
120 NULL); 133 NULL);
121 134
122 /* All the timers, started at system startup tend 135 /* All the timers, started at system startup tend
@@ -131,38 +144,69 @@ void __init inet_initpeers(void)
131/* Called with or without local BH being disabled. */ 144/* Called with or without local BH being disabled. */
132static void unlink_from_unused(struct inet_peer *p) 145static void unlink_from_unused(struct inet_peer *p)
133{ 146{
134 spin_lock_bh(&inet_peer_unused_lock); 147 if (!list_empty(&p->unused)) {
135 list_del_init(&p->unused); 148 spin_lock_bh(&unused_peers.lock);
136 spin_unlock_bh(&inet_peer_unused_lock); 149 list_del_init(&p->unused);
150 spin_unlock_bh(&unused_peers.lock);
151 }
137} 152}
138 153
139/* 154/*
140 * Called with local BH disabled and the pool lock held. 155 * Called with local BH disabled and the pool lock held.
141 * _stack is known to be NULL or not at compile time,
142 * so compiler will optimize the if (_stack) tests.
143 */ 156 */
144#define lookup(_daddr, _stack) \ 157#define lookup(_daddr, _stack) \
145({ \ 158({ \
146 struct inet_peer *u, **v; \ 159 struct inet_peer *u, **v; \
147 if (_stack != NULL) { \ 160 \
148 stackptr = _stack; \ 161 stackptr = _stack; \
149 *stackptr++ = &peer_root; \ 162 *stackptr++ = &peers.root; \
150 } \ 163 for (u = peers.root; u != peer_avl_empty; ) { \
151 for (u = peer_root; u != peer_avl_empty; ) { \
152 if (_daddr == u->v4daddr) \ 164 if (_daddr == u->v4daddr) \
153 break; \ 165 break; \
154 if ((__force __u32)_daddr < (__force __u32)u->v4daddr) \ 166 if ((__force __u32)_daddr < (__force __u32)u->v4daddr) \
155 v = &u->avl_left; \ 167 v = &u->avl_left; \
156 else \ 168 else \
157 v = &u->avl_right; \ 169 v = &u->avl_right; \
158 if (_stack != NULL) \ 170 *stackptr++ = v; \
159 *stackptr++ = v; \
160 u = *v; \ 171 u = *v; \
161 } \ 172 } \
162 u; \ 173 u; \
163}) 174})
164 175
165/* Called with local BH disabled and the pool write lock held. */ 176/*
177 * Called with rcu_read_lock_bh()
178 * Because we hold no lock against a writer, its quite possible we fall
179 * in an endless loop.
180 * But every pointer we follow is guaranteed to be valid thanks to RCU.
181 * We exit from this function if number of links exceeds PEER_MAXDEPTH
182 */
183static struct inet_peer *lookup_rcu_bh(__be32 daddr)
184{
185 struct inet_peer *u = rcu_dereference_bh(peers.root);
186 int count = 0;
187
188 while (u != peer_avl_empty) {
189 if (daddr == u->v4daddr) {
190 /* Before taking a reference, check if this entry was
191 * deleted, unlink_from_pool() sets refcnt=-1 to make
192 * distinction between an unused entry (refcnt=0) and
193 * a freed one.
194 */
195 if (unlikely(!atomic_add_unless(&u->refcnt, 1, -1)))
196 u = NULL;
197 return u;
198 }
199 if ((__force __u32)daddr < (__force __u32)u->v4daddr)
200 u = rcu_dereference_bh(u->avl_left);
201 else
202 u = rcu_dereference_bh(u->avl_right);
203 if (unlikely(++count == PEER_MAXDEPTH))
204 break;
205 }
206 return NULL;
207}
208
209/* Called with local BH disabled and the pool lock held. */
166#define lookup_rightempty(start) \ 210#define lookup_rightempty(start) \
167({ \ 211({ \
168 struct inet_peer *u, **v; \ 212 struct inet_peer *u, **v; \
@@ -176,9 +220,10 @@ static void unlink_from_unused(struct inet_peer *p)
176 u; \ 220 u; \
177}) 221})
178 222
179/* Called with local BH disabled and the pool write lock held. 223/* Called with local BH disabled and the pool lock held.
180 * Variable names are the proof of operation correctness. 224 * Variable names are the proof of operation correctness.
181 * Look into mm/map_avl.c for more detail description of the ideas. */ 225 * Look into mm/map_avl.c for more detail description of the ideas.
226 */
182static void peer_avl_rebalance(struct inet_peer **stack[], 227static void peer_avl_rebalance(struct inet_peer **stack[],
183 struct inet_peer ***stackend) 228 struct inet_peer ***stackend)
184{ 229{
@@ -254,15 +299,21 @@ static void peer_avl_rebalance(struct inet_peer **stack[],
254 } 299 }
255} 300}
256 301
257/* Called with local BH disabled and the pool write lock held. */ 302/* Called with local BH disabled and the pool lock held. */
258#define link_to_pool(n) \ 303#define link_to_pool(n) \
259do { \ 304do { \
260 n->avl_height = 1; \ 305 n->avl_height = 1; \
261 n->avl_left = peer_avl_empty; \ 306 n->avl_left = peer_avl_empty; \
262 n->avl_right = peer_avl_empty; \ 307 n->avl_right = peer_avl_empty; \
308 smp_wmb(); /* lockless readers can catch us now */ \
263 **--stackptr = n; \ 309 **--stackptr = n; \
264 peer_avl_rebalance(stack, stackptr); \ 310 peer_avl_rebalance(stack, stackptr); \
265} while(0) 311} while (0)
312
313static void inetpeer_free_rcu(struct rcu_head *head)
314{
315 kmem_cache_free(peer_cachep, container_of(head, struct inet_peer, rcu));
316}
266 317
267/* May be called with local BH enabled. */ 318/* May be called with local BH enabled. */
268static void unlink_from_pool(struct inet_peer *p) 319static void unlink_from_pool(struct inet_peer *p)
@@ -271,13 +322,14 @@ static void unlink_from_pool(struct inet_peer *p)
271 322
272 do_free = 0; 323 do_free = 0;
273 324
274 write_lock_bh(&peer_pool_lock); 325 spin_lock_bh(&peers.lock);
275 /* Check the reference counter. It was artificially incremented by 1 326 /* Check the reference counter. It was artificially incremented by 1
276 * in cleanup() function to prevent sudden disappearing. If the 327 * in cleanup() function to prevent sudden disappearing. If we can
277 * reference count is still 1 then the node is referenced only as `p' 328 * atomically (because of lockless readers) take this last reference,
278 * here and from the pool. So under the exclusive pool lock it's safe 329 * it's safe to remove the node and free it later.
279 * to remove the node and free it later. */ 330 * We use refcnt=-1 to alert lockless readers this entry is deleted.
280 if (atomic_read(&p->refcnt) == 1) { 331 */
332 if (atomic_cmpxchg(&p->refcnt, 1, -1) == 1) {
281 struct inet_peer **stack[PEER_MAXDEPTH]; 333 struct inet_peer **stack[PEER_MAXDEPTH];
282 struct inet_peer ***stackptr, ***delp; 334 struct inet_peer ***stackptr, ***delp;
283 if (lookup(p->v4daddr, stack) != p) 335 if (lookup(p->v4daddr, stack) != p)
@@ -303,20 +355,21 @@ static void unlink_from_pool(struct inet_peer *p)
303 delp[1] = &t->avl_left; /* was &p->avl_left */ 355 delp[1] = &t->avl_left; /* was &p->avl_left */
304 } 356 }
305 peer_avl_rebalance(stack, stackptr); 357 peer_avl_rebalance(stack, stackptr);
306 peer_total--; 358 peers.total--;
307 do_free = 1; 359 do_free = 1;
308 } 360 }
309 write_unlock_bh(&peer_pool_lock); 361 spin_unlock_bh(&peers.lock);
310 362
311 if (do_free) 363 if (do_free)
312 kmem_cache_free(peer_cachep, p); 364 call_rcu_bh(&p->rcu, inetpeer_free_rcu);
313 else 365 else
314 /* The node is used again. Decrease the reference counter 366 /* The node is used again. Decrease the reference counter
315 * back. The loop "cleanup -> unlink_from_unused 367 * back. The loop "cleanup -> unlink_from_unused
316 * -> unlink_from_pool -> putpeer -> link_to_unused 368 * -> unlink_from_pool -> putpeer -> link_to_unused
317 * -> cleanup (for the same node)" 369 * -> cleanup (for the same node)"
318 * doesn't really exist because the entry will have a 370 * doesn't really exist because the entry will have a
319 * recent deletion time and will not be cleaned again soon. */ 371 * recent deletion time and will not be cleaned again soon.
372 */
320 inet_putpeer(p); 373 inet_putpeer(p);
321} 374}
322 375
@@ -326,16 +379,16 @@ static int cleanup_once(unsigned long ttl)
326 struct inet_peer *p = NULL; 379 struct inet_peer *p = NULL;
327 380
328 /* Remove the first entry from the list of unused nodes. */ 381 /* Remove the first entry from the list of unused nodes. */
329 spin_lock_bh(&inet_peer_unused_lock); 382 spin_lock_bh(&unused_peers.lock);
330 if (!list_empty(&unused_peers)) { 383 if (!list_empty(&unused_peers.list)) {
331 __u32 delta; 384 __u32 delta;
332 385
333 p = list_first_entry(&unused_peers, struct inet_peer, unused); 386 p = list_first_entry(&unused_peers.list, struct inet_peer, unused);
334 delta = (__u32)jiffies - p->dtime; 387 delta = (__u32)jiffies - p->dtime;
335 388
336 if (delta < ttl) { 389 if (delta < ttl) {
337 /* Do not prune fresh entries. */ 390 /* Do not prune fresh entries. */
338 spin_unlock_bh(&inet_peer_unused_lock); 391 spin_unlock_bh(&unused_peers.lock);
339 return -1; 392 return -1;
340 } 393 }
341 394
@@ -345,7 +398,7 @@ static int cleanup_once(unsigned long ttl)
345 * before unlink_from_pool() call. */ 398 * before unlink_from_pool() call. */
346 atomic_inc(&p->refcnt); 399 atomic_inc(&p->refcnt);
347 } 400 }
348 spin_unlock_bh(&inet_peer_unused_lock); 401 spin_unlock_bh(&unused_peers.lock);
349 402
350 if (p == NULL) 403 if (p == NULL)
351 /* It means that the total number of USED entries has 404 /* It means that the total number of USED entries has
@@ -360,62 +413,56 @@ static int cleanup_once(unsigned long ttl)
360/* Called with or without local BH being disabled. */ 413/* Called with or without local BH being disabled. */
361struct inet_peer *inet_getpeer(__be32 daddr, int create) 414struct inet_peer *inet_getpeer(__be32 daddr, int create)
362{ 415{
363 struct inet_peer *p, *n; 416 struct inet_peer *p;
364 struct inet_peer **stack[PEER_MAXDEPTH], ***stackptr; 417 struct inet_peer **stack[PEER_MAXDEPTH], ***stackptr;
365 418
366 /* Look up for the address quickly. */ 419 /* Look up for the address quickly, lockless.
367 read_lock_bh(&peer_pool_lock); 420 * Because of a concurrent writer, we might not find an existing entry.
368 p = lookup(daddr, NULL); 421 */
369 if (p != peer_avl_empty) 422 rcu_read_lock_bh();
370 atomic_inc(&p->refcnt); 423 p = lookup_rcu_bh(daddr);
371 read_unlock_bh(&peer_pool_lock); 424 rcu_read_unlock_bh();
425
426 if (p) {
427 /* The existing node has been found.
428 * Remove the entry from unused list if it was there.
429 */
430 unlink_from_unused(p);
431 return p;
432 }
372 433
434 /* retry an exact lookup, taking the lock before.
435 * At least, nodes should be hot in our cache.
436 */
437 spin_lock_bh(&peers.lock);
438 p = lookup(daddr, stack);
373 if (p != peer_avl_empty) { 439 if (p != peer_avl_empty) {
374 /* The existing node has been found. */ 440 atomic_inc(&p->refcnt);
441 spin_unlock_bh(&peers.lock);
375 /* Remove the entry from unused list if it was there. */ 442 /* Remove the entry from unused list if it was there. */
376 unlink_from_unused(p); 443 unlink_from_unused(p);
377 return p; 444 return p;
378 } 445 }
446 p = create ? kmem_cache_alloc(peer_cachep, GFP_ATOMIC) : NULL;
447 if (p) {
448 p->v4daddr = daddr;
449 atomic_set(&p->refcnt, 1);
450 atomic_set(&p->rid, 0);
451 atomic_set(&p->ip_id_count, secure_ip_id(daddr));
452 p->tcp_ts_stamp = 0;
453 INIT_LIST_HEAD(&p->unused);
454
455
456 /* Link the node. */
457 link_to_pool(p);
458 peers.total++;
459 }
460 spin_unlock_bh(&peers.lock);
379 461
380 if (!create) 462 if (peers.total >= inet_peer_threshold)
381 return NULL;
382
383 /* Allocate the space outside the locked region. */
384 n = kmem_cache_alloc(peer_cachep, GFP_ATOMIC);
385 if (n == NULL)
386 return NULL;
387 n->v4daddr = daddr;
388 atomic_set(&n->refcnt, 1);
389 atomic_set(&n->rid, 0);
390 atomic_set(&n->ip_id_count, secure_ip_id(daddr));
391 n->tcp_ts_stamp = 0;
392
393 write_lock_bh(&peer_pool_lock);
394 /* Check if an entry has suddenly appeared. */
395 p = lookup(daddr, stack);
396 if (p != peer_avl_empty)
397 goto out_free;
398
399 /* Link the node. */
400 link_to_pool(n);
401 INIT_LIST_HEAD(&n->unused);
402 peer_total++;
403 write_unlock_bh(&peer_pool_lock);
404
405 if (peer_total >= inet_peer_threshold)
406 /* Remove one less-recently-used entry. */ 463 /* Remove one less-recently-used entry. */
407 cleanup_once(0); 464 cleanup_once(0);
408 465
409 return n;
410
411out_free:
412 /* The appropriate node is already in the pool. */
413 atomic_inc(&p->refcnt);
414 write_unlock_bh(&peer_pool_lock);
415 /* Remove the entry from unused list if it was there. */
416 unlink_from_unused(p);
417 /* Free preallocated the preallocated node. */
418 kmem_cache_free(peer_cachep, n);
419 return p; 466 return p;
420} 467}
421 468
@@ -425,12 +472,12 @@ static void peer_check_expire(unsigned long dummy)
425 unsigned long now = jiffies; 472 unsigned long now = jiffies;
426 int ttl; 473 int ttl;
427 474
428 if (peer_total >= inet_peer_threshold) 475 if (peers.total >= inet_peer_threshold)
429 ttl = inet_peer_minttl; 476 ttl = inet_peer_minttl;
430 else 477 else
431 ttl = inet_peer_maxttl 478 ttl = inet_peer_maxttl
432 - (inet_peer_maxttl - inet_peer_minttl) / HZ * 479 - (inet_peer_maxttl - inet_peer_minttl) / HZ *
433 peer_total / inet_peer_threshold * HZ; 480 peers.total / inet_peer_threshold * HZ;
434 while (!cleanup_once(ttl)) { 481 while (!cleanup_once(ttl)) {
435 if (jiffies != now) 482 if (jiffies != now)
436 break; 483 break;
@@ -439,22 +486,25 @@ static void peer_check_expire(unsigned long dummy)
439 /* Trigger the timer after inet_peer_gc_mintime .. inet_peer_gc_maxtime 486 /* Trigger the timer after inet_peer_gc_mintime .. inet_peer_gc_maxtime
440 * interval depending on the total number of entries (more entries, 487 * interval depending on the total number of entries (more entries,
441 * less interval). */ 488 * less interval). */
442 if (peer_total >= inet_peer_threshold) 489 if (peers.total >= inet_peer_threshold)
443 peer_periodic_timer.expires = jiffies + inet_peer_gc_mintime; 490 peer_periodic_timer.expires = jiffies + inet_peer_gc_mintime;
444 else 491 else
445 peer_periodic_timer.expires = jiffies 492 peer_periodic_timer.expires = jiffies
446 + inet_peer_gc_maxtime 493 + inet_peer_gc_maxtime
447 - (inet_peer_gc_maxtime - inet_peer_gc_mintime) / HZ * 494 - (inet_peer_gc_maxtime - inet_peer_gc_mintime) / HZ *
448 peer_total / inet_peer_threshold * HZ; 495 peers.total / inet_peer_threshold * HZ;
449 add_timer(&peer_periodic_timer); 496 add_timer(&peer_periodic_timer);
450} 497}
451 498
452void inet_putpeer(struct inet_peer *p) 499void inet_putpeer(struct inet_peer *p)
453{ 500{
454 spin_lock_bh(&inet_peer_unused_lock); 501 local_bh_disable();
455 if (atomic_dec_and_test(&p->refcnt)) { 502
456 list_add_tail(&p->unused, &unused_peers); 503 if (atomic_dec_and_lock(&p->refcnt, &unused_peers.lock)) {
504 list_add_tail(&p->unused, &unused_peers.list);
457 p->dtime = (__u32)jiffies; 505 p->dtime = (__u32)jiffies;
506 spin_unlock(&unused_peers.lock);
458 } 507 }
459 spin_unlock_bh(&inet_peer_unused_lock); 508
509 local_bh_enable();
460} 510}
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index 56cdf68a074c..99461f09320f 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -87,16 +87,16 @@ int ip_forward(struct sk_buff *skb)
87 if (opt->is_strictroute && rt->rt_dst != rt->rt_gateway) 87 if (opt->is_strictroute && rt->rt_dst != rt->rt_gateway)
88 goto sr_failed; 88 goto sr_failed;
89 89
90 if (unlikely(skb->len > dst_mtu(&rt->u.dst) && !skb_is_gso(skb) && 90 if (unlikely(skb->len > dst_mtu(&rt->dst) && !skb_is_gso(skb) &&
91 (ip_hdr(skb)->frag_off & htons(IP_DF))) && !skb->local_df) { 91 (ip_hdr(skb)->frag_off & htons(IP_DF))) && !skb->local_df) {
92 IP_INC_STATS(dev_net(rt->u.dst.dev), IPSTATS_MIB_FRAGFAILS); 92 IP_INC_STATS(dev_net(rt->dst.dev), IPSTATS_MIB_FRAGFAILS);
93 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, 93 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
94 htonl(dst_mtu(&rt->u.dst))); 94 htonl(dst_mtu(&rt->dst)));
95 goto drop; 95 goto drop;
96 } 96 }
97 97
98 /* We are about to mangle packet. Copy it! */ 98 /* We are about to mangle packet. Copy it! */
99 if (skb_cow(skb, LL_RESERVED_SPACE(rt->u.dst.dev)+rt->u.dst.header_len)) 99 if (skb_cow(skb, LL_RESERVED_SPACE(rt->dst.dev)+rt->dst.header_len))
100 goto drop; 100 goto drop;
101 iph = ip_hdr(skb); 101 iph = ip_hdr(skb);
102 102
@@ -113,7 +113,7 @@ int ip_forward(struct sk_buff *skb)
113 skb->priority = rt_tos2priority(iph->tos); 113 skb->priority = rt_tos2priority(iph->tos);
114 114
115 return NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD, skb, skb->dev, 115 return NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD, skb, skb->dev,
116 rt->u.dst.dev, ip_forward_finish); 116 rt->dst.dev, ip_forward_finish);
117 117
118sr_failed: 118sr_failed:
119 /* 119 /*
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 75347ea70ea0..b7c41654dde5 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -124,11 +124,8 @@ static int ip4_frag_match(struct inet_frag_queue *q, void *a)
124} 124}
125 125
126/* Memory Tracking Functions. */ 126/* Memory Tracking Functions. */
127static __inline__ void frag_kfree_skb(struct netns_frags *nf, 127static void frag_kfree_skb(struct netns_frags *nf, struct sk_buff *skb)
128 struct sk_buff *skb, int *work)
129{ 128{
130 if (work)
131 *work -= skb->truesize;
132 atomic_sub(skb->truesize, &nf->mem); 129 atomic_sub(skb->truesize, &nf->mem);
133 kfree_skb(skb); 130 kfree_skb(skb);
134} 131}
@@ -309,7 +306,7 @@ static int ip_frag_reinit(struct ipq *qp)
309 fp = qp->q.fragments; 306 fp = qp->q.fragments;
310 do { 307 do {
311 struct sk_buff *xp = fp->next; 308 struct sk_buff *xp = fp->next;
312 frag_kfree_skb(qp->q.net, fp, NULL); 309 frag_kfree_skb(qp->q.net, fp);
313 fp = xp; 310 fp = xp;
314 } while (fp); 311 } while (fp);
315 312
@@ -317,6 +314,7 @@ static int ip_frag_reinit(struct ipq *qp)
317 qp->q.len = 0; 314 qp->q.len = 0;
318 qp->q.meat = 0; 315 qp->q.meat = 0;
319 qp->q.fragments = NULL; 316 qp->q.fragments = NULL;
317 qp->q.fragments_tail = NULL;
320 qp->iif = 0; 318 qp->iif = 0;
321 319
322 return 0; 320 return 0;
@@ -389,6 +387,11 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
389 * in the chain of fragments so far. We must know where to put 387 * in the chain of fragments so far. We must know where to put
390 * this fragment, right? 388 * this fragment, right?
391 */ 389 */
390 prev = qp->q.fragments_tail;
391 if (!prev || FRAG_CB(prev)->offset < offset) {
392 next = NULL;
393 goto found;
394 }
392 prev = NULL; 395 prev = NULL;
393 for (next = qp->q.fragments; next != NULL; next = next->next) { 396 for (next = qp->q.fragments; next != NULL; next = next->next) {
394 if (FRAG_CB(next)->offset >= offset) 397 if (FRAG_CB(next)->offset >= offset)
@@ -396,6 +399,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
396 prev = next; 399 prev = next;
397 } 400 }
398 401
402found:
399 /* We found where to put this one. Check for overlap with 403 /* We found where to put this one. Check for overlap with
400 * preceding fragment, and, if needed, align things so that 404 * preceding fragment, and, if needed, align things so that
401 * any overlaps are eliminated. 405 * any overlaps are eliminated.
@@ -446,7 +450,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
446 qp->q.fragments = next; 450 qp->q.fragments = next;
447 451
448 qp->q.meat -= free_it->len; 452 qp->q.meat -= free_it->len;
449 frag_kfree_skb(qp->q.net, free_it, NULL); 453 frag_kfree_skb(qp->q.net, free_it);
450 } 454 }
451 } 455 }
452 456
@@ -454,6 +458,8 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
454 458
455 /* Insert this fragment in the chain of fragments. */ 459 /* Insert this fragment in the chain of fragments. */
456 skb->next = next; 460 skb->next = next;
461 if (!next)
462 qp->q.fragments_tail = skb;
457 if (prev) 463 if (prev)
458 prev->next = skb; 464 prev->next = skb;
459 else 465 else
@@ -507,6 +513,8 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
507 goto out_nomem; 513 goto out_nomem;
508 514
509 fp->next = head->next; 515 fp->next = head->next;
516 if (!fp->next)
517 qp->q.fragments_tail = fp;
510 prev->next = fp; 518 prev->next = fp;
511 519
512 skb_morph(head, qp->q.fragments); 520 skb_morph(head, qp->q.fragments);
@@ -556,7 +564,6 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
556 564
557 skb_shinfo(head)->frag_list = head->next; 565 skb_shinfo(head)->frag_list = head->next;
558 skb_push(head, head->data - skb_network_header(head)); 566 skb_push(head, head->data - skb_network_header(head));
559 atomic_sub(head->truesize, &qp->q.net->mem);
560 567
561 for (fp=head->next; fp; fp = fp->next) { 568 for (fp=head->next; fp; fp = fp->next) {
562 head->data_len += fp->len; 569 head->data_len += fp->len;
@@ -566,8 +573,8 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
566 else if (head->ip_summed == CHECKSUM_COMPLETE) 573 else if (head->ip_summed == CHECKSUM_COMPLETE)
567 head->csum = csum_add(head->csum, fp->csum); 574 head->csum = csum_add(head->csum, fp->csum);
568 head->truesize += fp->truesize; 575 head->truesize += fp->truesize;
569 atomic_sub(fp->truesize, &qp->q.net->mem);
570 } 576 }
577 atomic_sub(head->truesize, &qp->q.net->mem);
571 578
572 head->next = NULL; 579 head->next = NULL;
573 head->dev = dev; 580 head->dev = dev;
@@ -578,6 +585,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
578 iph->tot_len = htons(len); 585 iph->tot_len = htons(len);
579 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS); 586 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS);
580 qp->q.fragments = NULL; 587 qp->q.fragments = NULL;
588 qp->q.fragments_tail = NULL;
581 return 0; 589 return 0;
582 590
583out_nomem: 591out_nomem:
@@ -624,6 +632,7 @@ int ip_defrag(struct sk_buff *skb, u32 user)
624 kfree_skb(skb); 632 kfree_skb(skb);
625 return -ENOMEM; 633 return -ENOMEM;
626} 634}
635EXPORT_SYMBOL(ip_defrag);
627 636
628#ifdef CONFIG_SYSCTL 637#ifdef CONFIG_SYSCTL
629static int zero; 638static int zero;
@@ -777,5 +786,3 @@ void __init ipfrag_init(void)
777 ip4_frags.secret_interval = 10 * 60 * HZ; 786 ip4_frags.secret_interval = 10 * 60 * HZ;
778 inet_frags_init(&ip4_frags); 787 inet_frags_init(&ip4_frags);
779} 788}
780
781EXPORT_SYMBOL(ip_defrag);
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 32618e11076d..945b20a5ad50 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -731,6 +731,8 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
731 tos = 0; 731 tos = 0;
732 if (skb->protocol == htons(ETH_P_IP)) 732 if (skb->protocol == htons(ETH_P_IP))
733 tos = old_iph->tos; 733 tos = old_iph->tos;
734 else if (skb->protocol == htons(ETH_P_IPV6))
735 tos = ipv6_get_dsfield((struct ipv6hdr *)old_iph);
734 } 736 }
735 737
736 { 738 {
@@ -745,7 +747,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
745 goto tx_error; 747 goto tx_error;
746 } 748 }
747 } 749 }
748 tdev = rt->u.dst.dev; 750 tdev = rt->dst.dev;
749 751
750 if (tdev == dev) { 752 if (tdev == dev) {
751 ip_rt_put(rt); 753 ip_rt_put(rt);
@@ -755,7 +757,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
755 757
756 df = tiph->frag_off; 758 df = tiph->frag_off;
757 if (df) 759 if (df)
758 mtu = dst_mtu(&rt->u.dst) - dev->hard_header_len - tunnel->hlen; 760 mtu = dst_mtu(&rt->dst) - dev->hard_header_len - tunnel->hlen;
759 else 761 else
760 mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu; 762 mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
761 763
@@ -803,7 +805,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
803 tunnel->err_count = 0; 805 tunnel->err_count = 0;
804 } 806 }
805 807
806 max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen + rt->u.dst.header_len; 808 max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen + rt->dst.header_len;
807 809
808 if (skb_headroom(skb) < max_headroom || skb_shared(skb)|| 810 if (skb_headroom(skb) < max_headroom || skb_shared(skb)||
809 (skb_cloned(skb) && !skb_clone_writable(skb, 0))) { 811 (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
@@ -830,7 +832,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
830 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | 832 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
831 IPSKB_REROUTED); 833 IPSKB_REROUTED);
832 skb_dst_drop(skb); 834 skb_dst_drop(skb);
833 skb_dst_set(skb, &rt->u.dst); 835 skb_dst_set(skb, &rt->dst);
834 836
835 /* 837 /*
836 * Push down and install the IPIP header. 838 * Push down and install the IPIP header.
@@ -853,7 +855,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
853 iph->ttl = ((struct ipv6hdr *)old_iph)->hop_limit; 855 iph->ttl = ((struct ipv6hdr *)old_iph)->hop_limit;
854#endif 856#endif
855 else 857 else
856 iph->ttl = dst_metric(&rt->u.dst, RTAX_HOPLIMIT); 858 iph->ttl = dst_metric(&rt->dst, RTAX_HOPLIMIT);
857 } 859 }
858 860
859 ((__be16 *)(iph + 1))[0] = tunnel->parms.o_flags; 861 ((__be16 *)(iph + 1))[0] = tunnel->parms.o_flags;
@@ -915,7 +917,7 @@ static int ipgre_tunnel_bind_dev(struct net_device *dev)
915 .proto = IPPROTO_GRE }; 917 .proto = IPPROTO_GRE };
916 struct rtable *rt; 918 struct rtable *rt;
917 if (!ip_route_output_key(dev_net(dev), &rt, &fl)) { 919 if (!ip_route_output_key(dev_net(dev), &rt, &fl)) {
918 tdev = rt->u.dst.dev; 920 tdev = rt->dst.dev;
919 ip_rt_put(rt); 921 ip_rt_put(rt);
920 } 922 }
921 923
@@ -1174,7 +1176,7 @@ static int ipgre_open(struct net_device *dev)
1174 struct rtable *rt; 1176 struct rtable *rt;
1175 if (ip_route_output_key(dev_net(dev), &rt, &fl)) 1177 if (ip_route_output_key(dev_net(dev), &rt, &fl))
1176 return -EADDRNOTAVAIL; 1178 return -EADDRNOTAVAIL;
1177 dev = rt->u.dst.dev; 1179 dev = rt->dst.dev;
1178 ip_rt_put(rt); 1180 ip_rt_put(rt);
1179 if (__in_dev_get_rtnl(dev) == NULL) 1181 if (__in_dev_get_rtnl(dev) == NULL)
1180 return -EADDRNOTAVAIL; 1182 return -EADDRNOTAVAIL;
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index d930dc5e4d85..d859bcc26cb7 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -146,7 +146,7 @@
146#include <linux/netlink.h> 146#include <linux/netlink.h>
147 147
148/* 148/*
149 * Process Router Attention IP option 149 * Process Router Attention IP option (RFC 2113)
150 */ 150 */
151int ip_call_ra_chain(struct sk_buff *skb) 151int ip_call_ra_chain(struct sk_buff *skb)
152{ 152{
@@ -155,8 +155,7 @@ int ip_call_ra_chain(struct sk_buff *skb)
155 struct sock *last = NULL; 155 struct sock *last = NULL;
156 struct net_device *dev = skb->dev; 156 struct net_device *dev = skb->dev;
157 157
158 read_lock(&ip_ra_lock); 158 for (ra = rcu_dereference(ip_ra_chain); ra; ra = rcu_dereference(ra->next)) {
159 for (ra = ip_ra_chain; ra; ra = ra->next) {
160 struct sock *sk = ra->sk; 159 struct sock *sk = ra->sk;
161 160
162 /* If socket is bound to an interface, only report 161 /* If socket is bound to an interface, only report
@@ -167,10 +166,8 @@ int ip_call_ra_chain(struct sk_buff *skb)
167 sk->sk_bound_dev_if == dev->ifindex) && 166 sk->sk_bound_dev_if == dev->ifindex) &&
168 net_eq(sock_net(sk), dev_net(dev))) { 167 net_eq(sock_net(sk), dev_net(dev))) {
169 if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) { 168 if (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) {
170 if (ip_defrag(skb, IP_DEFRAG_CALL_RA_CHAIN)) { 169 if (ip_defrag(skb, IP_DEFRAG_CALL_RA_CHAIN))
171 read_unlock(&ip_ra_lock);
172 return 1; 170 return 1;
173 }
174 } 171 }
175 if (last) { 172 if (last) {
176 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); 173 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
@@ -183,10 +180,8 @@ int ip_call_ra_chain(struct sk_buff *skb)
183 180
184 if (last) { 181 if (last) {
185 raw_rcv(last, skb); 182 raw_rcv(last, skb);
186 read_unlock(&ip_ra_lock);
187 return 1; 183 return 1;
188 } 184 }
189 read_unlock(&ip_ra_lock);
190 return 0; 185 return 0;
191} 186}
192 187
@@ -298,18 +293,16 @@ static inline int ip_rcv_options(struct sk_buff *skb)
298 } 293 }
299 294
300 if (unlikely(opt->srr)) { 295 if (unlikely(opt->srr)) {
301 struct in_device *in_dev = in_dev_get(dev); 296 struct in_device *in_dev = __in_dev_get_rcu(dev);
297
302 if (in_dev) { 298 if (in_dev) {
303 if (!IN_DEV_SOURCE_ROUTE(in_dev)) { 299 if (!IN_DEV_SOURCE_ROUTE(in_dev)) {
304 if (IN_DEV_LOG_MARTIANS(in_dev) && 300 if (IN_DEV_LOG_MARTIANS(in_dev) &&
305 net_ratelimit()) 301 net_ratelimit())
306 printk(KERN_INFO "source route option %pI4 -> %pI4\n", 302 printk(KERN_INFO "source route option %pI4 -> %pI4\n",
307 &iph->saddr, &iph->daddr); 303 &iph->saddr, &iph->daddr);
308 in_dev_put(in_dev);
309 goto drop; 304 goto drop;
310 } 305 }
311
312 in_dev_put(in_dev);
313 } 306 }
314 307
315 if (ip_options_rcv_srr(skb)) 308 if (ip_options_rcv_srr(skb))
@@ -340,13 +333,16 @@ static int ip_rcv_finish(struct sk_buff *skb)
340 else if (err == -ENETUNREACH) 333 else if (err == -ENETUNREACH)
341 IP_INC_STATS_BH(dev_net(skb->dev), 334 IP_INC_STATS_BH(dev_net(skb->dev),
342 IPSTATS_MIB_INNOROUTES); 335 IPSTATS_MIB_INNOROUTES);
336 else if (err == -EXDEV)
337 NET_INC_STATS_BH(dev_net(skb->dev),
338 LINUX_MIB_IPRPFILTER);
343 goto drop; 339 goto drop;
344 } 340 }
345 } 341 }
346 342
347#ifdef CONFIG_NET_CLS_ROUTE 343#ifdef CONFIG_NET_CLS_ROUTE
348 if (unlikely(skb_dst(skb)->tclassid)) { 344 if (unlikely(skb_dst(skb)->tclassid)) {
349 struct ip_rt_acct *st = per_cpu_ptr(ip_rt_acct, smp_processor_id()); 345 struct ip_rt_acct *st = this_cpu_ptr(ip_rt_acct);
350 u32 idx = skb_dst(skb)->tclassid; 346 u32 idx = skb_dst(skb)->tclassid;
351 st[idx&0xFF].o_packets++; 347 st[idx&0xFF].o_packets++;
352 st[idx&0xFF].o_bytes += skb->len; 348 st[idx&0xFF].o_bytes += skb->len;
@@ -360,10 +356,10 @@ static int ip_rcv_finish(struct sk_buff *skb)
360 356
361 rt = skb_rtable(skb); 357 rt = skb_rtable(skb);
362 if (rt->rt_type == RTN_MULTICAST) { 358 if (rt->rt_type == RTN_MULTICAST) {
363 IP_UPD_PO_STATS_BH(dev_net(rt->u.dst.dev), IPSTATS_MIB_INMCAST, 359 IP_UPD_PO_STATS_BH(dev_net(rt->dst.dev), IPSTATS_MIB_INMCAST,
364 skb->len); 360 skb->len);
365 } else if (rt->rt_type == RTN_BROADCAST) 361 } else if (rt->rt_type == RTN_BROADCAST)
366 IP_UPD_PO_STATS_BH(dev_net(rt->u.dst.dev), IPSTATS_MIB_INBCAST, 362 IP_UPD_PO_STATS_BH(dev_net(rt->dst.dev), IPSTATS_MIB_INBCAST,
367 skb->len); 363 skb->len);
368 364
369 return dst_input(skb); 365 return dst_input(skb);
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 9a4a6c96cb0d..04b69896df5f 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -89,6 +89,7 @@ __inline__ void ip_send_check(struct iphdr *iph)
89 iph->check = 0; 89 iph->check = 0;
90 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); 90 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
91} 91}
92EXPORT_SYMBOL(ip_send_check);
92 93
93int __ip_local_out(struct sk_buff *skb) 94int __ip_local_out(struct sk_buff *skb)
94{ 95{
@@ -151,15 +152,15 @@ int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
151 iph->version = 4; 152 iph->version = 4;
152 iph->ihl = 5; 153 iph->ihl = 5;
153 iph->tos = inet->tos; 154 iph->tos = inet->tos;
154 if (ip_dont_fragment(sk, &rt->u.dst)) 155 if (ip_dont_fragment(sk, &rt->dst))
155 iph->frag_off = htons(IP_DF); 156 iph->frag_off = htons(IP_DF);
156 else 157 else
157 iph->frag_off = 0; 158 iph->frag_off = 0;
158 iph->ttl = ip_select_ttl(inet, &rt->u.dst); 159 iph->ttl = ip_select_ttl(inet, &rt->dst);
159 iph->daddr = rt->rt_dst; 160 iph->daddr = rt->rt_dst;
160 iph->saddr = rt->rt_src; 161 iph->saddr = rt->rt_src;
161 iph->protocol = sk->sk_protocol; 162 iph->protocol = sk->sk_protocol;
162 ip_select_ident(iph, &rt->u.dst, sk); 163 ip_select_ident(iph, &rt->dst, sk);
163 164
164 if (opt && opt->optlen) { 165 if (opt && opt->optlen) {
165 iph->ihl += opt->optlen>>2; 166 iph->ihl += opt->optlen>>2;
@@ -172,7 +173,6 @@ int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
172 /* Send it out. */ 173 /* Send it out. */
173 return ip_local_out(skb); 174 return ip_local_out(skb);
174} 175}
175
176EXPORT_SYMBOL_GPL(ip_build_and_send_pkt); 176EXPORT_SYMBOL_GPL(ip_build_and_send_pkt);
177 177
178static inline int ip_finish_output2(struct sk_buff *skb) 178static inline int ip_finish_output2(struct sk_buff *skb)
@@ -240,7 +240,7 @@ int ip_mc_output(struct sk_buff *skb)
240{ 240{
241 struct sock *sk = skb->sk; 241 struct sock *sk = skb->sk;
242 struct rtable *rt = skb_rtable(skb); 242 struct rtable *rt = skb_rtable(skb);
243 struct net_device *dev = rt->u.dst.dev; 243 struct net_device *dev = rt->dst.dev;
244 244
245 /* 245 /*
246 * If the indicated interface is up and running, send the packet. 246 * If the indicated interface is up and running, send the packet.
@@ -359,9 +359,9 @@ int ip_queue_xmit(struct sk_buff *skb)
359 if (ip_route_output_flow(sock_net(sk), &rt, &fl, sk, 0)) 359 if (ip_route_output_flow(sock_net(sk), &rt, &fl, sk, 0))
360 goto no_route; 360 goto no_route;
361 } 361 }
362 sk_setup_caps(sk, &rt->u.dst); 362 sk_setup_caps(sk, &rt->dst);
363 } 363 }
364 skb_dst_set_noref(skb, &rt->u.dst); 364 skb_dst_set_noref(skb, &rt->dst);
365 365
366packet_routed: 366packet_routed:
367 if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway) 367 if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway)
@@ -372,11 +372,11 @@ packet_routed:
372 skb_reset_network_header(skb); 372 skb_reset_network_header(skb);
373 iph = ip_hdr(skb); 373 iph = ip_hdr(skb);
374 *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff)); 374 *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff));
375 if (ip_dont_fragment(sk, &rt->u.dst) && !skb->local_df) 375 if (ip_dont_fragment(sk, &rt->dst) && !skb->local_df)
376 iph->frag_off = htons(IP_DF); 376 iph->frag_off = htons(IP_DF);
377 else 377 else
378 iph->frag_off = 0; 378 iph->frag_off = 0;
379 iph->ttl = ip_select_ttl(inet, &rt->u.dst); 379 iph->ttl = ip_select_ttl(inet, &rt->dst);
380 iph->protocol = sk->sk_protocol; 380 iph->protocol = sk->sk_protocol;
381 iph->saddr = rt->rt_src; 381 iph->saddr = rt->rt_src;
382 iph->daddr = rt->rt_dst; 382 iph->daddr = rt->rt_dst;
@@ -387,7 +387,7 @@ packet_routed:
387 ip_options_build(skb, opt, inet->inet_daddr, rt, 0); 387 ip_options_build(skb, opt, inet->inet_daddr, rt, 0);
388 } 388 }
389 389
390 ip_select_ident_more(iph, &rt->u.dst, sk, 390 ip_select_ident_more(iph, &rt->dst, sk,
391 (skb_shinfo(skb)->gso_segs ?: 1) - 1); 391 (skb_shinfo(skb)->gso_segs ?: 1) - 1);
392 392
393 skb->priority = sk->sk_priority; 393 skb->priority = sk->sk_priority;
@@ -403,6 +403,7 @@ no_route:
403 kfree_skb(skb); 403 kfree_skb(skb);
404 return -EHOSTUNREACH; 404 return -EHOSTUNREACH;
405} 405}
406EXPORT_SYMBOL(ip_queue_xmit);
406 407
407 408
408static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from) 409static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
@@ -411,7 +412,7 @@ static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
411 to->priority = from->priority; 412 to->priority = from->priority;
412 to->protocol = from->protocol; 413 to->protocol = from->protocol;
413 skb_dst_drop(to); 414 skb_dst_drop(to);
414 skb_dst_set(to, dst_clone(skb_dst(from))); 415 skb_dst_copy(to, from);
415 to->dev = from->dev; 416 to->dev = from->dev;
416 to->mark = from->mark; 417 to->mark = from->mark;
417 418
@@ -442,17 +443,16 @@ static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
442int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) 443int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
443{ 444{
444 struct iphdr *iph; 445 struct iphdr *iph;
445 int raw = 0;
446 int ptr; 446 int ptr;
447 struct net_device *dev; 447 struct net_device *dev;
448 struct sk_buff *skb2; 448 struct sk_buff *skb2;
449 unsigned int mtu, hlen, left, len, ll_rs, pad; 449 unsigned int mtu, hlen, left, len, ll_rs;
450 int offset; 450 int offset;
451 __be16 not_last_frag; 451 __be16 not_last_frag;
452 struct rtable *rt = skb_rtable(skb); 452 struct rtable *rt = skb_rtable(skb);
453 int err = 0; 453 int err = 0;
454 454
455 dev = rt->u.dst.dev; 455 dev = rt->dst.dev;
456 456
457 /* 457 /*
458 * Point into the IP datagram header. 458 * Point into the IP datagram header.
@@ -473,7 +473,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
473 */ 473 */
474 474
475 hlen = iph->ihl * 4; 475 hlen = iph->ihl * 4;
476 mtu = dst_mtu(&rt->u.dst) - hlen; /* Size of data space */ 476 mtu = dst_mtu(&rt->dst) - hlen; /* Size of data space */
477#ifdef CONFIG_BRIDGE_NETFILTER 477#ifdef CONFIG_BRIDGE_NETFILTER
478 if (skb->nf_bridge) 478 if (skb->nf_bridge)
479 mtu -= nf_bridge_mtu_reduction(skb); 479 mtu -= nf_bridge_mtu_reduction(skb);
@@ -580,14 +580,12 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
580 580
581slow_path: 581slow_path:
582 left = skb->len - hlen; /* Space per frame */ 582 left = skb->len - hlen; /* Space per frame */
583 ptr = raw + hlen; /* Where to start from */ 583 ptr = hlen; /* Where to start from */
584 584
585 /* for bridged IP traffic encapsulated inside f.e. a vlan header, 585 /* for bridged IP traffic encapsulated inside f.e. a vlan header,
586 * we need to make room for the encapsulating header 586 * we need to make room for the encapsulating header
587 */ 587 */
588 pad = nf_bridge_pad(skb); 588 ll_rs = LL_RESERVED_SPACE_EXTRA(rt->dst.dev, nf_bridge_pad(skb));
589 ll_rs = LL_RESERVED_SPACE_EXTRA(rt->u.dst.dev, pad);
590 mtu -= pad;
591 589
592 /* 590 /*
593 * Fragment the datagram. 591 * Fragment the datagram.
@@ -697,7 +695,6 @@ fail:
697 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS); 695 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
698 return err; 696 return err;
699} 697}
700
701EXPORT_SYMBOL(ip_fragment); 698EXPORT_SYMBOL(ip_fragment);
702 699
703int 700int
@@ -716,6 +713,7 @@ ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk
716 } 713 }
717 return 0; 714 return 0;
718} 715}
716EXPORT_SYMBOL(ip_generic_getfrag);
719 717
720static inline __wsum 718static inline __wsum
721csum_page(struct page *page, int offset, int copy) 719csum_page(struct page *page, int offset, int copy)
@@ -833,13 +831,13 @@ int ip_append_data(struct sock *sk,
833 */ 831 */
834 *rtp = NULL; 832 *rtp = NULL;
835 inet->cork.fragsize = mtu = inet->pmtudisc == IP_PMTUDISC_PROBE ? 833 inet->cork.fragsize = mtu = inet->pmtudisc == IP_PMTUDISC_PROBE ?
836 rt->u.dst.dev->mtu : 834 rt->dst.dev->mtu :
837 dst_mtu(rt->u.dst.path); 835 dst_mtu(rt->dst.path);
838 inet->cork.dst = &rt->u.dst; 836 inet->cork.dst = &rt->dst;
839 inet->cork.length = 0; 837 inet->cork.length = 0;
840 sk->sk_sndmsg_page = NULL; 838 sk->sk_sndmsg_page = NULL;
841 sk->sk_sndmsg_off = 0; 839 sk->sk_sndmsg_off = 0;
842 if ((exthdrlen = rt->u.dst.header_len) != 0) { 840 if ((exthdrlen = rt->dst.header_len) != 0) {
843 length += exthdrlen; 841 length += exthdrlen;
844 transhdrlen += exthdrlen; 842 transhdrlen += exthdrlen;
845 } 843 }
@@ -852,7 +850,7 @@ int ip_append_data(struct sock *sk,
852 exthdrlen = 0; 850 exthdrlen = 0;
853 mtu = inet->cork.fragsize; 851 mtu = inet->cork.fragsize;
854 } 852 }
855 hh_len = LL_RESERVED_SPACE(rt->u.dst.dev); 853 hh_len = LL_RESERVED_SPACE(rt->dst.dev);
856 854
857 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0); 855 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
858 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen; 856 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
@@ -869,14 +867,16 @@ int ip_append_data(struct sock *sk,
869 */ 867 */
870 if (transhdrlen && 868 if (transhdrlen &&
871 length + fragheaderlen <= mtu && 869 length + fragheaderlen <= mtu &&
872 rt->u.dst.dev->features & NETIF_F_V4_CSUM && 870 rt->dst.dev->features & NETIF_F_V4_CSUM &&
873 !exthdrlen) 871 !exthdrlen)
874 csummode = CHECKSUM_PARTIAL; 872 csummode = CHECKSUM_PARTIAL;
875 873
874 skb = skb_peek_tail(&sk->sk_write_queue);
875
876 inet->cork.length += length; 876 inet->cork.length += length;
877 if (((length> mtu) || !skb_queue_empty(&sk->sk_write_queue)) && 877 if (((length > mtu) || (skb && skb_is_gso(skb))) &&
878 (sk->sk_protocol == IPPROTO_UDP) && 878 (sk->sk_protocol == IPPROTO_UDP) &&
879 (rt->u.dst.dev->features & NETIF_F_UFO)) { 879 (rt->dst.dev->features & NETIF_F_UFO)) {
880 err = ip_ufo_append_data(sk, getfrag, from, length, hh_len, 880 err = ip_ufo_append_data(sk, getfrag, from, length, hh_len,
881 fragheaderlen, transhdrlen, mtu, 881 fragheaderlen, transhdrlen, mtu,
882 flags); 882 flags);
@@ -892,7 +892,7 @@ int ip_append_data(struct sock *sk,
892 * adding appropriate IP header. 892 * adding appropriate IP header.
893 */ 893 */
894 894
895 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) 895 if (!skb)
896 goto alloc_new_skb; 896 goto alloc_new_skb;
897 897
898 while (length > 0) { 898 while (length > 0) {
@@ -924,7 +924,7 @@ alloc_new_skb:
924 fraglen = datalen + fragheaderlen; 924 fraglen = datalen + fragheaderlen;
925 925
926 if ((flags & MSG_MORE) && 926 if ((flags & MSG_MORE) &&
927 !(rt->u.dst.dev->features&NETIF_F_SG)) 927 !(rt->dst.dev->features&NETIF_F_SG))
928 alloclen = mtu; 928 alloclen = mtu;
929 else 929 else
930 alloclen = datalen + fragheaderlen; 930 alloclen = datalen + fragheaderlen;
@@ -935,7 +935,7 @@ alloc_new_skb:
935 * the last. 935 * the last.
936 */ 936 */
937 if (datalen == length + fraggap) 937 if (datalen == length + fraggap)
938 alloclen += rt->u.dst.trailer_len; 938 alloclen += rt->dst.trailer_len;
939 939
940 if (transhdrlen) { 940 if (transhdrlen) {
941 skb = sock_alloc_send_skb(sk, 941 skb = sock_alloc_send_skb(sk,
@@ -1008,7 +1008,7 @@ alloc_new_skb:
1008 if (copy > length) 1008 if (copy > length)
1009 copy = length; 1009 copy = length;
1010 1010
1011 if (!(rt->u.dst.dev->features&NETIF_F_SG)) { 1011 if (!(rt->dst.dev->features&NETIF_F_SG)) {
1012 unsigned int off; 1012 unsigned int off;
1013 1013
1014 off = skb->len; 1014 off = skb->len;
@@ -1103,10 +1103,10 @@ ssize_t ip_append_page(struct sock *sk, struct page *page,
1103 if (inet->cork.flags & IPCORK_OPT) 1103 if (inet->cork.flags & IPCORK_OPT)
1104 opt = inet->cork.opt; 1104 opt = inet->cork.opt;
1105 1105
1106 if (!(rt->u.dst.dev->features&NETIF_F_SG)) 1106 if (!(rt->dst.dev->features&NETIF_F_SG))
1107 return -EOPNOTSUPP; 1107 return -EOPNOTSUPP;
1108 1108
1109 hh_len = LL_RESERVED_SPACE(rt->u.dst.dev); 1109 hh_len = LL_RESERVED_SPACE(rt->dst.dev);
1110 mtu = inet->cork.fragsize; 1110 mtu = inet->cork.fragsize;
1111 1111
1112 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0); 1112 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
@@ -1121,8 +1121,9 @@ ssize_t ip_append_page(struct sock *sk, struct page *page,
1121 return -EINVAL; 1121 return -EINVAL;
1122 1122
1123 inet->cork.length += size; 1123 inet->cork.length += size;
1124 if ((sk->sk_protocol == IPPROTO_UDP) && 1124 if ((size + skb->len > mtu) &&
1125 (rt->u.dst.dev->features & NETIF_F_UFO)) { 1125 (sk->sk_protocol == IPPROTO_UDP) &&
1126 (rt->dst.dev->features & NETIF_F_UFO)) {
1126 skb_shinfo(skb)->gso_size = mtu - fragheaderlen; 1127 skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
1127 skb_shinfo(skb)->gso_type = SKB_GSO_UDP; 1128 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
1128 } 1129 }
@@ -1274,8 +1275,8 @@ int ip_push_pending_frames(struct sock *sk)
1274 * If local_df is set too, we still allow to fragment this frame 1275 * If local_df is set too, we still allow to fragment this frame
1275 * locally. */ 1276 * locally. */
1276 if (inet->pmtudisc >= IP_PMTUDISC_DO || 1277 if (inet->pmtudisc >= IP_PMTUDISC_DO ||
1277 (skb->len <= dst_mtu(&rt->u.dst) && 1278 (skb->len <= dst_mtu(&rt->dst) &&
1278 ip_dont_fragment(sk, &rt->u.dst))) 1279 ip_dont_fragment(sk, &rt->dst)))
1279 df = htons(IP_DF); 1280 df = htons(IP_DF);
1280 1281
1281 if (inet->cork.flags & IPCORK_OPT) 1282 if (inet->cork.flags & IPCORK_OPT)
@@ -1284,7 +1285,7 @@ int ip_push_pending_frames(struct sock *sk)
1284 if (rt->rt_type == RTN_MULTICAST) 1285 if (rt->rt_type == RTN_MULTICAST)
1285 ttl = inet->mc_ttl; 1286 ttl = inet->mc_ttl;
1286 else 1287 else
1287 ttl = ip_select_ttl(inet, &rt->u.dst); 1288 ttl = ip_select_ttl(inet, &rt->dst);
1288 1289
1289 iph = (struct iphdr *)skb->data; 1290 iph = (struct iphdr *)skb->data;
1290 iph->version = 4; 1291 iph->version = 4;
@@ -1295,7 +1296,7 @@ int ip_push_pending_frames(struct sock *sk)
1295 } 1296 }
1296 iph->tos = inet->tos; 1297 iph->tos = inet->tos;
1297 iph->frag_off = df; 1298 iph->frag_off = df;
1298 ip_select_ident(iph, &rt->u.dst, sk); 1299 ip_select_ident(iph, &rt->dst, sk);
1299 iph->ttl = ttl; 1300 iph->ttl = ttl;
1300 iph->protocol = sk->sk_protocol; 1301 iph->protocol = sk->sk_protocol;
1301 iph->saddr = rt->rt_src; 1302 iph->saddr = rt->rt_src;
@@ -1308,7 +1309,7 @@ int ip_push_pending_frames(struct sock *sk)
1308 * on dst refcount 1309 * on dst refcount
1309 */ 1310 */
1310 inet->cork.dst = NULL; 1311 inet->cork.dst = NULL;
1311 skb_dst_set(skb, &rt->u.dst); 1312 skb_dst_set(skb, &rt->dst);
1312 1313
1313 if (iph->protocol == IPPROTO_ICMP) 1314 if (iph->protocol == IPPROTO_ICMP)
1314 icmp_out_count(net, ((struct icmphdr *) 1315 icmp_out_count(net, ((struct icmphdr *)
@@ -1445,7 +1446,3 @@ void __init ip_init(void)
1445 igmp_mc_proc_init(); 1446 igmp_mc_proc_init();
1446#endif 1447#endif
1447} 1448}
1448
1449EXPORT_SYMBOL(ip_generic_getfrag);
1450EXPORT_SYMBOL(ip_queue_xmit);
1451EXPORT_SYMBOL(ip_send_check);
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index ce231780a2b1..6c40a8c46e79 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -239,7 +239,16 @@ int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc)
239 sent to multicast group to reach destination designated router. 239 sent to multicast group to reach destination designated router.
240 */ 240 */
241struct ip_ra_chain *ip_ra_chain; 241struct ip_ra_chain *ip_ra_chain;
242DEFINE_RWLOCK(ip_ra_lock); 242static DEFINE_SPINLOCK(ip_ra_lock);
243
244
245static void ip_ra_destroy_rcu(struct rcu_head *head)
246{
247 struct ip_ra_chain *ra = container_of(head, struct ip_ra_chain, rcu);
248
249 sock_put(ra->saved_sk);
250 kfree(ra);
251}
243 252
244int ip_ra_control(struct sock *sk, unsigned char on, 253int ip_ra_control(struct sock *sk, unsigned char on,
245 void (*destructor)(struct sock *)) 254 void (*destructor)(struct sock *))
@@ -251,35 +260,42 @@ int ip_ra_control(struct sock *sk, unsigned char on,
251 260
252 new_ra = on ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL; 261 new_ra = on ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL;
253 262
254 write_lock_bh(&ip_ra_lock); 263 spin_lock_bh(&ip_ra_lock);
255 for (rap = &ip_ra_chain; (ra = *rap) != NULL; rap = &ra->next) { 264 for (rap = &ip_ra_chain; (ra = *rap) != NULL; rap = &ra->next) {
256 if (ra->sk == sk) { 265 if (ra->sk == sk) {
257 if (on) { 266 if (on) {
258 write_unlock_bh(&ip_ra_lock); 267 spin_unlock_bh(&ip_ra_lock);
259 kfree(new_ra); 268 kfree(new_ra);
260 return -EADDRINUSE; 269 return -EADDRINUSE;
261 } 270 }
262 *rap = ra->next; 271 /* dont let ip_call_ra_chain() use sk again */
263 write_unlock_bh(&ip_ra_lock); 272 ra->sk = NULL;
273 rcu_assign_pointer(*rap, ra->next);
274 spin_unlock_bh(&ip_ra_lock);
264 275
265 if (ra->destructor) 276 if (ra->destructor)
266 ra->destructor(sk); 277 ra->destructor(sk);
267 sock_put(sk); 278 /*
268 kfree(ra); 279 * Delay sock_put(sk) and kfree(ra) after one rcu grace
280 * period. This guarantee ip_call_ra_chain() dont need
281 * to mess with socket refcounts.
282 */
283 ra->saved_sk = sk;
284 call_rcu(&ra->rcu, ip_ra_destroy_rcu);
269 return 0; 285 return 0;
270 } 286 }
271 } 287 }
272 if (new_ra == NULL) { 288 if (new_ra == NULL) {
273 write_unlock_bh(&ip_ra_lock); 289 spin_unlock_bh(&ip_ra_lock);
274 return -ENOBUFS; 290 return -ENOBUFS;
275 } 291 }
276 new_ra->sk = sk; 292 new_ra->sk = sk;
277 new_ra->destructor = destructor; 293 new_ra->destructor = destructor;
278 294
279 new_ra->next = ra; 295 new_ra->next = ra;
280 *rap = new_ra; 296 rcu_assign_pointer(*rap, new_ra);
281 sock_hold(sk); 297 sock_hold(sk);
282 write_unlock_bh(&ip_ra_lock); 298 spin_unlock_bh(&ip_ra_lock);
283 299
284 return 0; 300 return 0;
285} 301}
@@ -449,7 +465,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
449 (1<<IP_MTU_DISCOVER) | (1<<IP_RECVERR) | 465 (1<<IP_MTU_DISCOVER) | (1<<IP_RECVERR) |
450 (1<<IP_ROUTER_ALERT) | (1<<IP_FREEBIND) | 466 (1<<IP_ROUTER_ALERT) | (1<<IP_FREEBIND) |
451 (1<<IP_PASSSEC) | (1<<IP_TRANSPARENT) | 467 (1<<IP_PASSSEC) | (1<<IP_TRANSPARENT) |
452 (1<<IP_MINTTL))) || 468 (1<<IP_MINTTL) | (1<<IP_NODEFRAG))) ||
453 optname == IP_MULTICAST_TTL || 469 optname == IP_MULTICAST_TTL ||
454 optname == IP_MULTICAST_ALL || 470 optname == IP_MULTICAST_ALL ||
455 optname == IP_MULTICAST_LOOP || 471 optname == IP_MULTICAST_LOOP ||
@@ -572,6 +588,13 @@ static int do_ip_setsockopt(struct sock *sk, int level,
572 } 588 }
573 inet->hdrincl = val ? 1 : 0; 589 inet->hdrincl = val ? 1 : 0;
574 break; 590 break;
591 case IP_NODEFRAG:
592 if (sk->sk_type != SOCK_RAW) {
593 err = -ENOPROTOOPT;
594 break;
595 }
596 inet->nodefrag = val ? 1 : 0;
597 break;
575 case IP_MTU_DISCOVER: 598 case IP_MTU_DISCOVER:
576 if (val < IP_PMTUDISC_DONT || val > IP_PMTUDISC_PROBE) 599 if (val < IP_PMTUDISC_DONT || val > IP_PMTUDISC_PROBE)
577 goto e_inval; 600 goto e_inval;
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index b9d84e800cf4..3a6e1ec5e9ae 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -665,6 +665,13 @@ ic_dhcp_init_options(u8 *options)
665 memcpy(e, ic_req_params, sizeof(ic_req_params)); 665 memcpy(e, ic_req_params, sizeof(ic_req_params));
666 e += sizeof(ic_req_params); 666 e += sizeof(ic_req_params);
667 667
668 if (ic_host_name_set) {
669 *e++ = 12; /* host-name */
670 len = strlen(utsname()->nodename);
671 *e++ = len;
672 memcpy(e, utsname()->nodename, len);
673 e += len;
674 }
668 if (*vendor_class_identifier) { 675 if (*vendor_class_identifier) {
669 printk(KERN_INFO "DHCP: sending class identifier \"%s\"\n", 676 printk(KERN_INFO "DHCP: sending class identifier \"%s\"\n",
670 vendor_class_identifier); 677 vendor_class_identifier);
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 7fd636711037..ec036731a70b 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -435,7 +435,7 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
435 goto tx_error_icmp; 435 goto tx_error_icmp;
436 } 436 }
437 } 437 }
438 tdev = rt->u.dst.dev; 438 tdev = rt->dst.dev;
439 439
440 if (tdev == dev) { 440 if (tdev == dev) {
441 ip_rt_put(rt); 441 ip_rt_put(rt);
@@ -446,7 +446,7 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
446 df |= old_iph->frag_off & htons(IP_DF); 446 df |= old_iph->frag_off & htons(IP_DF);
447 447
448 if (df) { 448 if (df) {
449 mtu = dst_mtu(&rt->u.dst) - sizeof(struct iphdr); 449 mtu = dst_mtu(&rt->dst) - sizeof(struct iphdr);
450 450
451 if (mtu < 68) { 451 if (mtu < 68) {
452 stats->collisions++; 452 stats->collisions++;
@@ -503,7 +503,7 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
503 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | 503 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
504 IPSKB_REROUTED); 504 IPSKB_REROUTED);
505 skb_dst_drop(skb); 505 skb_dst_drop(skb);
506 skb_dst_set(skb, &rt->u.dst); 506 skb_dst_set(skb, &rt->dst);
507 507
508 /* 508 /*
509 * Push down and install the IPIP header. 509 * Push down and install the IPIP header.
@@ -552,7 +552,7 @@ static void ipip_tunnel_bind_dev(struct net_device *dev)
552 .proto = IPPROTO_IPIP }; 552 .proto = IPPROTO_IPIP };
553 struct rtable *rt; 553 struct rtable *rt;
554 if (!ip_route_output_key(dev_net(dev), &rt, &fl)) { 554 if (!ip_route_output_key(dev_net(dev), &rt, &fl)) {
555 tdev = rt->u.dst.dev; 555 tdev = rt->dst.dev;
556 ip_rt_put(rt); 556 ip_rt_put(rt);
557 } 557 }
558 dev->flags |= IFF_POINTOPOINT; 558 dev->flags |= IFF_POINTOPOINT;
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 757f25eb9b4b..179fcab866fc 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -442,8 +442,10 @@ static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
442 int err; 442 int err;
443 443
444 err = ipmr_fib_lookup(net, &fl, &mrt); 444 err = ipmr_fib_lookup(net, &fl, &mrt);
445 if (err < 0) 445 if (err < 0) {
446 kfree_skb(skb);
446 return err; 447 return err;
448 }
447 449
448 read_lock(&mrt_lock); 450 read_lock(&mrt_lock);
449 dev->stats.tx_bytes += skb->len; 451 dev->stats.tx_bytes += skb->len;
@@ -1553,9 +1555,9 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
1553 goto out_free; 1555 goto out_free;
1554 } 1556 }
1555 1557
1556 dev = rt->u.dst.dev; 1558 dev = rt->dst.dev;
1557 1559
1558 if (skb->len+encap > dst_mtu(&rt->u.dst) && (ntohs(iph->frag_off) & IP_DF)) { 1560 if (skb->len+encap > dst_mtu(&rt->dst) && (ntohs(iph->frag_off) & IP_DF)) {
1559 /* Do not fragment multicasts. Alas, IPv4 does not 1561 /* Do not fragment multicasts. Alas, IPv4 does not
1560 allow to send ICMP, so that packets will disappear 1562 allow to send ICMP, so that packets will disappear
1561 to blackhole. 1563 to blackhole.
@@ -1566,7 +1568,7 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
1566 goto out_free; 1568 goto out_free;
1567 } 1569 }
1568 1570
1569 encap += LL_RESERVED_SPACE(dev) + rt->u.dst.header_len; 1571 encap += LL_RESERVED_SPACE(dev) + rt->dst.header_len;
1570 1572
1571 if (skb_cow(skb, encap)) { 1573 if (skb_cow(skb, encap)) {
1572 ip_rt_put(rt); 1574 ip_rt_put(rt);
@@ -1577,7 +1579,7 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
1577 vif->bytes_out += skb->len; 1579 vif->bytes_out += skb->len;
1578 1580
1579 skb_dst_drop(skb); 1581 skb_dst_drop(skb);
1580 skb_dst_set(skb, &rt->u.dst); 1582 skb_dst_set(skb, &rt->dst);
1581 ip_decrease_ttl(ip_hdr(skb)); 1583 ip_decrease_ttl(ip_hdr(skb));
1582 1584
1583 /* FIXME: forward and output firewalls used to be called here. 1585 /* FIXME: forward and output firewalls used to be called here.
@@ -1728,8 +1730,10 @@ int ip_mr_input(struct sk_buff *skb)
1728 goto dont_forward; 1730 goto dont_forward;
1729 1731
1730 err = ipmr_fib_lookup(net, &skb_rtable(skb)->fl, &mrt); 1732 err = ipmr_fib_lookup(net, &skb_rtable(skb)->fl, &mrt);
1731 if (err < 0) 1733 if (err < 0) {
1734 kfree_skb(skb);
1732 return err; 1735 return err;
1736 }
1733 1737
1734 if (!local) { 1738 if (!local) {
1735 if (IPCB(skb)->opt.router_alert) { 1739 if (IPCB(skb)->opt.router_alert) {
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c
index 07de855e2175..d88a46c54fd1 100644
--- a/net/ipv4/netfilter.c
+++ b/net/ipv4/netfilter.c
@@ -43,7 +43,7 @@ int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type)
43 43
44 /* Drop old route. */ 44 /* Drop old route. */
45 skb_dst_drop(skb); 45 skb_dst_drop(skb);
46 skb_dst_set(skb, &rt->u.dst); 46 skb_dst_set(skb, &rt->dst);
47 } else { 47 } else {
48 /* non-local src, find valid iif to satisfy 48 /* non-local src, find valid iif to satisfy
49 * rp-filter when calling ip_route_input. */ 49 * rp-filter when calling ip_route_input. */
@@ -53,11 +53,11 @@ int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type)
53 53
54 orefdst = skb->_skb_refdst; 54 orefdst = skb->_skb_refdst;
55 if (ip_route_input(skb, iph->daddr, iph->saddr, 55 if (ip_route_input(skb, iph->daddr, iph->saddr,
56 RT_TOS(iph->tos), rt->u.dst.dev) != 0) { 56 RT_TOS(iph->tos), rt->dst.dev) != 0) {
57 dst_release(&rt->u.dst); 57 dst_release(&rt->dst);
58 return -1; 58 return -1;
59 } 59 }
60 dst_release(&rt->u.dst); 60 dst_release(&rt->dst);
61 refdst_drop(orefdst); 61 refdst_drop(orefdst);
62 } 62 }
63 63
@@ -212,9 +212,7 @@ static __sum16 nf_ip_checksum_partial(struct sk_buff *skb, unsigned int hook,
212 skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr, protocol, 212 skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr, protocol,
213 skb->len - dataoff, 0); 213 skb->len - dataoff, 0);
214 skb->ip_summed = CHECKSUM_NONE; 214 skb->ip_summed = CHECKSUM_NONE;
215 csum = __skb_checksum_complete_head(skb, dataoff + len); 215 return __skb_checksum_complete_head(skb, dataoff + len);
216 if (!csum)
217 skb->ip_summed = CHECKSUM_UNNECESSARY;
218 } 216 }
219 return csum; 217 return csum;
220} 218}
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 1ac01b128621..6bccba31d132 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -283,16 +283,13 @@ unsigned int arpt_do_table(struct sk_buff *skb,
283 arp = arp_hdr(skb); 283 arp = arp_hdr(skb);
284 do { 284 do {
285 const struct arpt_entry_target *t; 285 const struct arpt_entry_target *t;
286 int hdr_len;
287 286
288 if (!arp_packet_match(arp, skb->dev, indev, outdev, &e->arp)) { 287 if (!arp_packet_match(arp, skb->dev, indev, outdev, &e->arp)) {
289 e = arpt_next_entry(e); 288 e = arpt_next_entry(e);
290 continue; 289 continue;
291 } 290 }
292 291
293 hdr_len = sizeof(*arp) + (2 * sizeof(struct in_addr)) + 292 ADD_COUNTER(e->counters, arp_hdr_len(skb->dev), 1);
294 (2 * skb->dev->addr_len);
295 ADD_COUNTER(e->counters, hdr_len, 1);
296 293
297 t = arpt_get_target_c(e); 294 t = arpt_get_target_c(e);
298 295
@@ -713,7 +710,7 @@ static void get_counters(const struct xt_table_info *t,
713 struct arpt_entry *iter; 710 struct arpt_entry *iter;
714 unsigned int cpu; 711 unsigned int cpu;
715 unsigned int i; 712 unsigned int i;
716 unsigned int curcpu; 713 unsigned int curcpu = get_cpu();
717 714
718 /* Instead of clearing (by a previous call to memset()) 715 /* Instead of clearing (by a previous call to memset())
719 * the counters and using adds, we set the counters 716 * the counters and using adds, we set the counters
@@ -723,14 +720,16 @@ static void get_counters(const struct xt_table_info *t,
723 * if new softirq were to run and call ipt_do_table 720 * if new softirq were to run and call ipt_do_table
724 */ 721 */
725 local_bh_disable(); 722 local_bh_disable();
726 curcpu = smp_processor_id();
727
728 i = 0; 723 i = 0;
729 xt_entry_foreach(iter, t->entries[curcpu], t->size) { 724 xt_entry_foreach(iter, t->entries[curcpu], t->size) {
730 SET_COUNTER(counters[i], iter->counters.bcnt, 725 SET_COUNTER(counters[i], iter->counters.bcnt,
731 iter->counters.pcnt); 726 iter->counters.pcnt);
732 ++i; 727 ++i;
733 } 728 }
729 local_bh_enable();
730 /* Processing counters from other cpus, we can let bottom half enabled,
731 * (preemption is disabled)
732 */
734 733
735 for_each_possible_cpu(cpu) { 734 for_each_possible_cpu(cpu) {
736 if (cpu == curcpu) 735 if (cpu == curcpu)
@@ -744,7 +743,7 @@ static void get_counters(const struct xt_table_info *t,
744 } 743 }
745 xt_info_wrunlock(cpu); 744 xt_info_wrunlock(cpu);
746 } 745 }
747 local_bh_enable(); 746 put_cpu();
748} 747}
749 748
750static struct xt_counters *alloc_counters(const struct xt_table *table) 749static struct xt_counters *alloc_counters(const struct xt_table *table)
@@ -758,7 +757,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
758 * about). 757 * about).
759 */ 758 */
760 countersize = sizeof(struct xt_counters) * private->number; 759 countersize = sizeof(struct xt_counters) * private->number;
761 counters = vmalloc_node(countersize, numa_node_id()); 760 counters = vmalloc(countersize);
762 761
763 if (counters == NULL) 762 if (counters == NULL)
764 return ERR_PTR(-ENOMEM); 763 return ERR_PTR(-ENOMEM);
@@ -1005,8 +1004,7 @@ static int __do_replace(struct net *net, const char *name,
1005 struct arpt_entry *iter; 1004 struct arpt_entry *iter;
1006 1005
1007 ret = 0; 1006 ret = 0;
1008 counters = vmalloc_node(num_counters * sizeof(struct xt_counters), 1007 counters = vmalloc(num_counters * sizeof(struct xt_counters));
1009 numa_node_id());
1010 if (!counters) { 1008 if (!counters) {
1011 ret = -ENOMEM; 1009 ret = -ENOMEM;
1012 goto out; 1010 goto out;
@@ -1159,7 +1157,7 @@ static int do_add_counters(struct net *net, const void __user *user,
1159 if (len != size + num_counters * sizeof(struct xt_counters)) 1157 if (len != size + num_counters * sizeof(struct xt_counters))
1160 return -EINVAL; 1158 return -EINVAL;
1161 1159
1162 paddc = vmalloc_node(len - size, numa_node_id()); 1160 paddc = vmalloc(len - size);
1163 if (!paddc) 1161 if (!paddc)
1164 return -ENOMEM; 1162 return -ENOMEM;
1165 1163
diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c
index a4e5fc5df4bf..d2c1311cb28d 100644
--- a/net/ipv4/netfilter/ip_queue.c
+++ b/net/ipv4/netfilter/ip_queue.c
@@ -42,7 +42,7 @@ typedef int (*ipq_cmpfn)(struct nf_queue_entry *, unsigned long);
42 42
43static unsigned char copy_mode __read_mostly = IPQ_COPY_NONE; 43static unsigned char copy_mode __read_mostly = IPQ_COPY_NONE;
44static unsigned int queue_maxlen __read_mostly = IPQ_QMAX_DEFAULT; 44static unsigned int queue_maxlen __read_mostly = IPQ_QMAX_DEFAULT;
45static DEFINE_RWLOCK(queue_lock); 45static DEFINE_SPINLOCK(queue_lock);
46static int peer_pid __read_mostly; 46static int peer_pid __read_mostly;
47static unsigned int copy_range __read_mostly; 47static unsigned int copy_range __read_mostly;
48static unsigned int queue_total; 48static unsigned int queue_total;
@@ -72,10 +72,10 @@ __ipq_set_mode(unsigned char mode, unsigned int range)
72 break; 72 break;
73 73
74 case IPQ_COPY_PACKET: 74 case IPQ_COPY_PACKET:
75 copy_mode = mode; 75 if (range > 0xFFFF)
76 range = 0xFFFF;
76 copy_range = range; 77 copy_range = range;
77 if (copy_range > 0xFFFF) 78 copy_mode = mode;
78 copy_range = 0xFFFF;
79 break; 79 break;
80 80
81 default: 81 default:
@@ -101,7 +101,7 @@ ipq_find_dequeue_entry(unsigned long id)
101{ 101{
102 struct nf_queue_entry *entry = NULL, *i; 102 struct nf_queue_entry *entry = NULL, *i;
103 103
104 write_lock_bh(&queue_lock); 104 spin_lock_bh(&queue_lock);
105 105
106 list_for_each_entry(i, &queue_list, list) { 106 list_for_each_entry(i, &queue_list, list) {
107 if ((unsigned long)i == id) { 107 if ((unsigned long)i == id) {
@@ -115,7 +115,7 @@ ipq_find_dequeue_entry(unsigned long id)
115 queue_total--; 115 queue_total--;
116 } 116 }
117 117
118 write_unlock_bh(&queue_lock); 118 spin_unlock_bh(&queue_lock);
119 return entry; 119 return entry;
120} 120}
121 121
@@ -136,9 +136,9 @@ __ipq_flush(ipq_cmpfn cmpfn, unsigned long data)
136static void 136static void
137ipq_flush(ipq_cmpfn cmpfn, unsigned long data) 137ipq_flush(ipq_cmpfn cmpfn, unsigned long data)
138{ 138{
139 write_lock_bh(&queue_lock); 139 spin_lock_bh(&queue_lock);
140 __ipq_flush(cmpfn, data); 140 __ipq_flush(cmpfn, data);
141 write_unlock_bh(&queue_lock); 141 spin_unlock_bh(&queue_lock);
142} 142}
143 143
144static struct sk_buff * 144static struct sk_buff *
@@ -152,9 +152,7 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)
152 struct nlmsghdr *nlh; 152 struct nlmsghdr *nlh;
153 struct timeval tv; 153 struct timeval tv;
154 154
155 read_lock_bh(&queue_lock); 155 switch (ACCESS_ONCE(copy_mode)) {
156
157 switch (copy_mode) {
158 case IPQ_COPY_META: 156 case IPQ_COPY_META:
159 case IPQ_COPY_NONE: 157 case IPQ_COPY_NONE:
160 size = NLMSG_SPACE(sizeof(*pmsg)); 158 size = NLMSG_SPACE(sizeof(*pmsg));
@@ -162,26 +160,21 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)
162 160
163 case IPQ_COPY_PACKET: 161 case IPQ_COPY_PACKET:
164 if (entry->skb->ip_summed == CHECKSUM_PARTIAL && 162 if (entry->skb->ip_summed == CHECKSUM_PARTIAL &&
165 (*errp = skb_checksum_help(entry->skb))) { 163 (*errp = skb_checksum_help(entry->skb)))
166 read_unlock_bh(&queue_lock);
167 return NULL; 164 return NULL;
168 } 165
169 if (copy_range == 0 || copy_range > entry->skb->len) 166 data_len = ACCESS_ONCE(copy_range);
167 if (data_len == 0 || data_len > entry->skb->len)
170 data_len = entry->skb->len; 168 data_len = entry->skb->len;
171 else
172 data_len = copy_range;
173 169
174 size = NLMSG_SPACE(sizeof(*pmsg) + data_len); 170 size = NLMSG_SPACE(sizeof(*pmsg) + data_len);
175 break; 171 break;
176 172
177 default: 173 default:
178 *errp = -EINVAL; 174 *errp = -EINVAL;
179 read_unlock_bh(&queue_lock);
180 return NULL; 175 return NULL;
181 } 176 }
182 177
183 read_unlock_bh(&queue_lock);
184
185 skb = alloc_skb(size, GFP_ATOMIC); 178 skb = alloc_skb(size, GFP_ATOMIC);
186 if (!skb) 179 if (!skb)
187 goto nlmsg_failure; 180 goto nlmsg_failure;
@@ -242,7 +235,7 @@ ipq_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
242 if (nskb == NULL) 235 if (nskb == NULL)
243 return status; 236 return status;
244 237
245 write_lock_bh(&queue_lock); 238 spin_lock_bh(&queue_lock);
246 239
247 if (!peer_pid) 240 if (!peer_pid)
248 goto err_out_free_nskb; 241 goto err_out_free_nskb;
@@ -266,14 +259,14 @@ ipq_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
266 259
267 __ipq_enqueue_entry(entry); 260 __ipq_enqueue_entry(entry);
268 261
269 write_unlock_bh(&queue_lock); 262 spin_unlock_bh(&queue_lock);
270 return status; 263 return status;
271 264
272err_out_free_nskb: 265err_out_free_nskb:
273 kfree_skb(nskb); 266 kfree_skb(nskb);
274 267
275err_out_unlock: 268err_out_unlock:
276 write_unlock_bh(&queue_lock); 269 spin_unlock_bh(&queue_lock);
277 return status; 270 return status;
278} 271}
279 272
@@ -342,9 +335,9 @@ ipq_set_mode(unsigned char mode, unsigned int range)
342{ 335{
343 int status; 336 int status;
344 337
345 write_lock_bh(&queue_lock); 338 spin_lock_bh(&queue_lock);
346 status = __ipq_set_mode(mode, range); 339 status = __ipq_set_mode(mode, range);
347 write_unlock_bh(&queue_lock); 340 spin_unlock_bh(&queue_lock);
348 return status; 341 return status;
349} 342}
350 343
@@ -440,11 +433,11 @@ __ipq_rcv_skb(struct sk_buff *skb)
440 if (security_netlink_recv(skb, CAP_NET_ADMIN)) 433 if (security_netlink_recv(skb, CAP_NET_ADMIN))
441 RCV_SKB_FAIL(-EPERM); 434 RCV_SKB_FAIL(-EPERM);
442 435
443 write_lock_bh(&queue_lock); 436 spin_lock_bh(&queue_lock);
444 437
445 if (peer_pid) { 438 if (peer_pid) {
446 if (peer_pid != pid) { 439 if (peer_pid != pid) {
447 write_unlock_bh(&queue_lock); 440 spin_unlock_bh(&queue_lock);
448 RCV_SKB_FAIL(-EBUSY); 441 RCV_SKB_FAIL(-EBUSY);
449 } 442 }
450 } else { 443 } else {
@@ -452,7 +445,7 @@ __ipq_rcv_skb(struct sk_buff *skb)
452 peer_pid = pid; 445 peer_pid = pid;
453 } 446 }
454 447
455 write_unlock_bh(&queue_lock); 448 spin_unlock_bh(&queue_lock);
456 449
457 status = ipq_receive_peer(NLMSG_DATA(nlh), type, 450 status = ipq_receive_peer(NLMSG_DATA(nlh), type,
458 nlmsglen - NLMSG_LENGTH(0)); 451 nlmsglen - NLMSG_LENGTH(0));
@@ -497,10 +490,10 @@ ipq_rcv_nl_event(struct notifier_block *this,
497 struct netlink_notify *n = ptr; 490 struct netlink_notify *n = ptr;
498 491
499 if (event == NETLINK_URELEASE && n->protocol == NETLINK_FIREWALL) { 492 if (event == NETLINK_URELEASE && n->protocol == NETLINK_FIREWALL) {
500 write_lock_bh(&queue_lock); 493 spin_lock_bh(&queue_lock);
501 if ((net_eq(n->net, &init_net)) && (n->pid == peer_pid)) 494 if ((net_eq(n->net, &init_net)) && (n->pid == peer_pid))
502 __ipq_reset(); 495 __ipq_reset();
503 write_unlock_bh(&queue_lock); 496 spin_unlock_bh(&queue_lock);
504 } 497 }
505 return NOTIFY_DONE; 498 return NOTIFY_DONE;
506} 499}
@@ -527,7 +520,7 @@ static ctl_table ipq_table[] = {
527#ifdef CONFIG_PROC_FS 520#ifdef CONFIG_PROC_FS
528static int ip_queue_show(struct seq_file *m, void *v) 521static int ip_queue_show(struct seq_file *m, void *v)
529{ 522{
530 read_lock_bh(&queue_lock); 523 spin_lock_bh(&queue_lock);
531 524
532 seq_printf(m, 525 seq_printf(m,
533 "Peer PID : %d\n" 526 "Peer PID : %d\n"
@@ -545,7 +538,7 @@ static int ip_queue_show(struct seq_file *m, void *v)
545 queue_dropped, 538 queue_dropped,
546 queue_user_dropped); 539 queue_user_dropped);
547 540
548 read_unlock_bh(&queue_lock); 541 spin_unlock_bh(&queue_lock);
549 return 0; 542 return 0;
550} 543}
551 544
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 4b6c5ca610fc..c439721b165a 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -364,7 +364,7 @@ ipt_do_table(struct sk_buff *skb,
364 goto no_match; 364 goto no_match;
365 } 365 }
366 366
367 ADD_COUNTER(e->counters, ntohs(ip->tot_len), 1); 367 ADD_COUNTER(e->counters, skb->len, 1);
368 368
369 t = ipt_get_target(e); 369 t = ipt_get_target(e);
370 IP_NF_ASSERT(t->u.kernel.target); 370 IP_NF_ASSERT(t->u.kernel.target);
@@ -884,7 +884,7 @@ get_counters(const struct xt_table_info *t,
884 struct ipt_entry *iter; 884 struct ipt_entry *iter;
885 unsigned int cpu; 885 unsigned int cpu;
886 unsigned int i; 886 unsigned int i;
887 unsigned int curcpu; 887 unsigned int curcpu = get_cpu();
888 888
889 /* Instead of clearing (by a previous call to memset()) 889 /* Instead of clearing (by a previous call to memset())
890 * the counters and using adds, we set the counters 890 * the counters and using adds, we set the counters
@@ -894,14 +894,16 @@ get_counters(const struct xt_table_info *t,
894 * if new softirq were to run and call ipt_do_table 894 * if new softirq were to run and call ipt_do_table
895 */ 895 */
896 local_bh_disable(); 896 local_bh_disable();
897 curcpu = smp_processor_id();
898
899 i = 0; 897 i = 0;
900 xt_entry_foreach(iter, t->entries[curcpu], t->size) { 898 xt_entry_foreach(iter, t->entries[curcpu], t->size) {
901 SET_COUNTER(counters[i], iter->counters.bcnt, 899 SET_COUNTER(counters[i], iter->counters.bcnt,
902 iter->counters.pcnt); 900 iter->counters.pcnt);
903 ++i; 901 ++i;
904 } 902 }
903 local_bh_enable();
904 /* Processing counters from other cpus, we can let bottom half enabled,
905 * (preemption is disabled)
906 */
905 907
906 for_each_possible_cpu(cpu) { 908 for_each_possible_cpu(cpu) {
907 if (cpu == curcpu) 909 if (cpu == curcpu)
@@ -915,7 +917,7 @@ get_counters(const struct xt_table_info *t,
915 } 917 }
916 xt_info_wrunlock(cpu); 918 xt_info_wrunlock(cpu);
917 } 919 }
918 local_bh_enable(); 920 put_cpu();
919} 921}
920 922
921static struct xt_counters *alloc_counters(const struct xt_table *table) 923static struct xt_counters *alloc_counters(const struct xt_table *table)
@@ -928,7 +930,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
928 (other than comefrom, which userspace doesn't care 930 (other than comefrom, which userspace doesn't care
929 about). */ 931 about). */
930 countersize = sizeof(struct xt_counters) * private->number; 932 countersize = sizeof(struct xt_counters) * private->number;
931 counters = vmalloc_node(countersize, numa_node_id()); 933 counters = vmalloc(countersize);
932 934
933 if (counters == NULL) 935 if (counters == NULL)
934 return ERR_PTR(-ENOMEM); 936 return ERR_PTR(-ENOMEM);
@@ -1352,7 +1354,7 @@ do_add_counters(struct net *net, const void __user *user,
1352 if (len != size + num_counters * sizeof(struct xt_counters)) 1354 if (len != size + num_counters * sizeof(struct xt_counters))
1353 return -EINVAL; 1355 return -EINVAL;
1354 1356
1355 paddc = vmalloc_node(len - size, numa_node_id()); 1357 paddc = vmalloc(len - size);
1356 if (!paddc) 1358 if (!paddc)
1357 return -ENOMEM; 1359 return -ENOMEM;
1358 1360
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index f91c94b9a790..3a43cf36db87 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -53,12 +53,13 @@ struct clusterip_config {
53#endif 53#endif
54 enum clusterip_hashmode hash_mode; /* which hashing mode */ 54 enum clusterip_hashmode hash_mode; /* which hashing mode */
55 u_int32_t hash_initval; /* hash initialization */ 55 u_int32_t hash_initval; /* hash initialization */
56 struct rcu_head rcu;
56}; 57};
57 58
58static LIST_HEAD(clusterip_configs); 59static LIST_HEAD(clusterip_configs);
59 60
60/* clusterip_lock protects the clusterip_configs list */ 61/* clusterip_lock protects the clusterip_configs list */
61static DEFINE_RWLOCK(clusterip_lock); 62static DEFINE_SPINLOCK(clusterip_lock);
62 63
63#ifdef CONFIG_PROC_FS 64#ifdef CONFIG_PROC_FS
64static const struct file_operations clusterip_proc_fops; 65static const struct file_operations clusterip_proc_fops;
@@ -71,11 +72,17 @@ clusterip_config_get(struct clusterip_config *c)
71 atomic_inc(&c->refcount); 72 atomic_inc(&c->refcount);
72} 73}
73 74
75
76static void clusterip_config_rcu_free(struct rcu_head *head)
77{
78 kfree(container_of(head, struct clusterip_config, rcu));
79}
80
74static inline void 81static inline void
75clusterip_config_put(struct clusterip_config *c) 82clusterip_config_put(struct clusterip_config *c)
76{ 83{
77 if (atomic_dec_and_test(&c->refcount)) 84 if (atomic_dec_and_test(&c->refcount))
78 kfree(c); 85 call_rcu_bh(&c->rcu, clusterip_config_rcu_free);
79} 86}
80 87
81/* decrease the count of entries using/referencing this config. If last 88/* decrease the count of entries using/referencing this config. If last
@@ -84,10 +91,11 @@ clusterip_config_put(struct clusterip_config *c)
84static inline void 91static inline void
85clusterip_config_entry_put(struct clusterip_config *c) 92clusterip_config_entry_put(struct clusterip_config *c)
86{ 93{
87 write_lock_bh(&clusterip_lock); 94 local_bh_disable();
88 if (atomic_dec_and_test(&c->entries)) { 95 if (atomic_dec_and_lock(&c->entries, &clusterip_lock)) {
89 list_del(&c->list); 96 list_del_rcu(&c->list);
90 write_unlock_bh(&clusterip_lock); 97 spin_unlock(&clusterip_lock);
98 local_bh_enable();
91 99
92 dev_mc_del(c->dev, c->clustermac); 100 dev_mc_del(c->dev, c->clustermac);
93 dev_put(c->dev); 101 dev_put(c->dev);
@@ -100,7 +108,7 @@ clusterip_config_entry_put(struct clusterip_config *c)
100#endif 108#endif
101 return; 109 return;
102 } 110 }
103 write_unlock_bh(&clusterip_lock); 111 local_bh_enable();
104} 112}
105 113
106static struct clusterip_config * 114static struct clusterip_config *
@@ -108,7 +116,7 @@ __clusterip_config_find(__be32 clusterip)
108{ 116{
109 struct clusterip_config *c; 117 struct clusterip_config *c;
110 118
111 list_for_each_entry(c, &clusterip_configs, list) { 119 list_for_each_entry_rcu(c, &clusterip_configs, list) {
112 if (c->clusterip == clusterip) 120 if (c->clusterip == clusterip)
113 return c; 121 return c;
114 } 122 }
@@ -121,16 +129,15 @@ clusterip_config_find_get(__be32 clusterip, int entry)
121{ 129{
122 struct clusterip_config *c; 130 struct clusterip_config *c;
123 131
124 read_lock_bh(&clusterip_lock); 132 rcu_read_lock_bh();
125 c = __clusterip_config_find(clusterip); 133 c = __clusterip_config_find(clusterip);
126 if (!c) { 134 if (c) {
127 read_unlock_bh(&clusterip_lock); 135 if (unlikely(!atomic_inc_not_zero(&c->refcount)))
128 return NULL; 136 c = NULL;
137 else if (entry)
138 atomic_inc(&c->entries);
129 } 139 }
130 atomic_inc(&c->refcount); 140 rcu_read_unlock_bh();
131 if (entry)
132 atomic_inc(&c->entries);
133 read_unlock_bh(&clusterip_lock);
134 141
135 return c; 142 return c;
136} 143}
@@ -181,9 +188,9 @@ clusterip_config_init(const struct ipt_clusterip_tgt_info *i, __be32 ip,
181 } 188 }
182#endif 189#endif
183 190
184 write_lock_bh(&clusterip_lock); 191 spin_lock_bh(&clusterip_lock);
185 list_add(&c->list, &clusterip_configs); 192 list_add_rcu(&c->list, &clusterip_configs);
186 write_unlock_bh(&clusterip_lock); 193 spin_unlock_bh(&clusterip_lock);
187 194
188 return c; 195 return c;
189} 196}
@@ -462,7 +469,7 @@ struct arp_payload {
462 __be32 src_ip; 469 __be32 src_ip;
463 u_int8_t dst_hw[ETH_ALEN]; 470 u_int8_t dst_hw[ETH_ALEN];
464 __be32 dst_ip; 471 __be32 dst_ip;
465} __attribute__ ((packed)); 472} __packed;
466 473
467#ifdef DEBUG 474#ifdef DEBUG
468static void arp_print(struct arp_payload *payload) 475static void arp_print(struct arp_payload *payload)
@@ -733,6 +740,9 @@ static void __exit clusterip_tg_exit(void)
733#endif 740#endif
734 nf_unregister_hook(&cip_arp_ops); 741 nf_unregister_hook(&cip_arp_ops);
735 xt_unregister_target(&clusterip_tg_reg); 742 xt_unregister_target(&clusterip_tg_reg);
743
744 /* Wait for completion of call_rcu_bh()'s (clusterip_config_rcu_free) */
745 rcu_barrier_bh();
736} 746}
737 747
738module_init(clusterip_tg_init); 748module_init(clusterip_tg_init);
diff --git a/net/ipv4/netfilter/ipt_LOG.c b/net/ipv4/netfilter/ipt_LOG.c
index 5234f4f3499a..915fc17d7ce2 100644
--- a/net/ipv4/netfilter/ipt_LOG.c
+++ b/net/ipv4/netfilter/ipt_LOG.c
@@ -13,6 +13,7 @@
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/spinlock.h> 14#include <linux/spinlock.h>
15#include <linux/skbuff.h> 15#include <linux/skbuff.h>
16#include <linux/if_arp.h>
16#include <linux/ip.h> 17#include <linux/ip.h>
17#include <net/icmp.h> 18#include <net/icmp.h>
18#include <net/udp.h> 19#include <net/udp.h>
@@ -363,6 +364,42 @@ static void dump_packet(const struct nf_loginfo *info,
363 /* maxlen = 230+ 91 + 230 + 252 = 803 */ 364 /* maxlen = 230+ 91 + 230 + 252 = 803 */
364} 365}
365 366
367static void dump_mac_header(const struct nf_loginfo *info,
368 const struct sk_buff *skb)
369{
370 struct net_device *dev = skb->dev;
371 unsigned int logflags = 0;
372
373 if (info->type == NF_LOG_TYPE_LOG)
374 logflags = info->u.log.logflags;
375
376 if (!(logflags & IPT_LOG_MACDECODE))
377 goto fallback;
378
379 switch (dev->type) {
380 case ARPHRD_ETHER:
381 printk("MACSRC=%pM MACDST=%pM MACPROTO=%04x ",
382 eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
383 ntohs(eth_hdr(skb)->h_proto));
384 return;
385 default:
386 break;
387 }
388
389fallback:
390 printk("MAC=");
391 if (dev->hard_header_len &&
392 skb->mac_header != skb->network_header) {
393 const unsigned char *p = skb_mac_header(skb);
394 unsigned int i;
395
396 printk("%02x", *p++);
397 for (i = 1; i < dev->hard_header_len; i++, p++)
398 printk(":%02x", *p);
399 }
400 printk(" ");
401}
402
366static struct nf_loginfo default_loginfo = { 403static struct nf_loginfo default_loginfo = {
367 .type = NF_LOG_TYPE_LOG, 404 .type = NF_LOG_TYPE_LOG,
368 .u = { 405 .u = {
@@ -404,20 +441,9 @@ ipt_log_packet(u_int8_t pf,
404 } 441 }
405#endif 442#endif
406 443
407 if (in && !out) { 444 /* MAC logging for input path only. */
408 /* MAC logging for input chain only. */ 445 if (in && !out)
409 printk("MAC="); 446 dump_mac_header(loginfo, skb);
410 if (skb->dev && skb->dev->hard_header_len &&
411 skb->mac_header != skb->network_header) {
412 int i;
413 const unsigned char *p = skb_mac_header(skb);
414 for (i = 0; i < skb->dev->hard_header_len; i++,p++)
415 printk("%02x%c", *p,
416 i==skb->dev->hard_header_len - 1
417 ? ' ':':');
418 } else
419 printk(" ");
420 }
421 447
422 dump_packet(loginfo, skb, 0); 448 dump_packet(loginfo, skb, 0);
423 printk("\n"); 449 printk("\n");
diff --git a/net/ipv4/netfilter/ipt_NETMAP.c b/net/ipv4/netfilter/ipt_NETMAP.c
index f43867d1697f..6cdb298f1035 100644
--- a/net/ipv4/netfilter/ipt_NETMAP.c
+++ b/net/ipv4/netfilter/ipt_NETMAP.c
@@ -48,7 +48,8 @@ netmap_tg(struct sk_buff *skb, const struct xt_action_param *par)
48 48
49 NF_CT_ASSERT(par->hooknum == NF_INET_PRE_ROUTING || 49 NF_CT_ASSERT(par->hooknum == NF_INET_PRE_ROUTING ||
50 par->hooknum == NF_INET_POST_ROUTING || 50 par->hooknum == NF_INET_POST_ROUTING ||
51 par->hooknum == NF_INET_LOCAL_OUT); 51 par->hooknum == NF_INET_LOCAL_OUT ||
52 par->hooknum == NF_INET_LOCAL_IN);
52 ct = nf_ct_get(skb, &ctinfo); 53 ct = nf_ct_get(skb, &ctinfo);
53 54
54 netmask = ~(mr->range[0].min_ip ^ mr->range[0].max_ip); 55 netmask = ~(mr->range[0].min_ip ^ mr->range[0].max_ip);
@@ -77,7 +78,8 @@ static struct xt_target netmap_tg_reg __read_mostly = {
77 .table = "nat", 78 .table = "nat",
78 .hooks = (1 << NF_INET_PRE_ROUTING) | 79 .hooks = (1 << NF_INET_PRE_ROUTING) |
79 (1 << NF_INET_POST_ROUTING) | 80 (1 << NF_INET_POST_ROUTING) |
80 (1 << NF_INET_LOCAL_OUT), 81 (1 << NF_INET_LOCAL_OUT) |
82 (1 << NF_INET_LOCAL_IN),
81 .checkentry = netmap_tg_check, 83 .checkentry = netmap_tg_check,
82 .me = THIS_MODULE 84 .me = THIS_MODULE
83}; 85};
diff --git a/net/ipv4/netfilter/ipt_REJECT.c b/net/ipv4/netfilter/ipt_REJECT.c
index f5f4a888e4ec..b254dafaf429 100644
--- a/net/ipv4/netfilter/ipt_REJECT.c
+++ b/net/ipv4/netfilter/ipt_REJECT.c
@@ -95,10 +95,11 @@ static void send_reset(struct sk_buff *oldskb, int hook)
95 } 95 }
96 96
97 tcph->rst = 1; 97 tcph->rst = 1;
98 tcph->check = tcp_v4_check(sizeof(struct tcphdr), 98 tcph->check = ~tcp_v4_check(sizeof(struct tcphdr), niph->saddr,
99 niph->saddr, niph->daddr, 99 niph->daddr, 0);
100 csum_partial(tcph, 100 nskb->ip_summed = CHECKSUM_PARTIAL;
101 sizeof(struct tcphdr), 0)); 101 nskb->csum_start = (unsigned char *)tcph - nskb->head;
102 nskb->csum_offset = offsetof(struct tcphdr, check);
102 103
103 addr_type = RTN_UNSPEC; 104 addr_type = RTN_UNSPEC;
104 if (hook != NF_INET_FORWARD 105 if (hook != NF_INET_FORWARD
@@ -109,13 +110,12 @@ static void send_reset(struct sk_buff *oldskb, int hook)
109 addr_type = RTN_LOCAL; 110 addr_type = RTN_LOCAL;
110 111
111 /* ip_route_me_harder expects skb->dst to be set */ 112 /* ip_route_me_harder expects skb->dst to be set */
112 skb_dst_set(nskb, dst_clone(skb_dst(oldskb))); 113 skb_dst_set_noref(nskb, skb_dst(oldskb));
113 114
114 if (ip_route_me_harder(nskb, addr_type)) 115 if (ip_route_me_harder(nskb, addr_type))
115 goto free_nskb; 116 goto free_nskb;
116 117
117 niph->ttl = dst_metric(skb_dst(nskb), RTAX_HOPLIMIT); 118 niph->ttl = dst_metric(skb_dst(nskb), RTAX_HOPLIMIT);
118 nskb->ip_summed = CHECKSUM_NONE;
119 119
120 /* "Never happens" */ 120 /* "Never happens" */
121 if (nskb->len > dst_mtu(skb_dst(nskb))) 121 if (nskb->len > dst_mtu(skb_dst(nskb)))
diff --git a/net/ipv4/netfilter/nf_defrag_ipv4.c b/net/ipv4/netfilter/nf_defrag_ipv4.c
index cb763ae9ed90..eab8de32f200 100644
--- a/net/ipv4/netfilter/nf_defrag_ipv4.c
+++ b/net/ipv4/netfilter/nf_defrag_ipv4.c
@@ -66,6 +66,11 @@ static unsigned int ipv4_conntrack_defrag(unsigned int hooknum,
66 const struct net_device *out, 66 const struct net_device *out,
67 int (*okfn)(struct sk_buff *)) 67 int (*okfn)(struct sk_buff *))
68{ 68{
69 struct inet_sock *inet = inet_sk(skb->sk);
70
71 if (inet && inet->nodefrag)
72 return NF_ACCEPT;
73
69#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 74#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
70#if !defined(CONFIG_NF_NAT) && !defined(CONFIG_NF_NAT_MODULE) 75#if !defined(CONFIG_NF_NAT) && !defined(CONFIG_NF_NAT_MODULE)
71 /* Previously seen (loopback)? Ignore. Do this before 76 /* Previously seen (loopback)? Ignore. Do this before
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
index 4f8bddb760c9..8c8632d9b93c 100644
--- a/net/ipv4/netfilter/nf_nat_core.c
+++ b/net/ipv4/netfilter/nf_nat_core.c
@@ -261,14 +261,9 @@ get_unique_tuple(struct nf_conntrack_tuple *tuple,
261 rcu_read_lock(); 261 rcu_read_lock();
262 proto = __nf_nat_proto_find(orig_tuple->dst.protonum); 262 proto = __nf_nat_proto_find(orig_tuple->dst.protonum);
263 263
264 /* Change protocol info to have some randomization */
265 if (range->flags & IP_NAT_RANGE_PROTO_RANDOM) {
266 proto->unique_tuple(tuple, range, maniptype, ct);
267 goto out;
268 }
269
270 /* Only bother mapping if it's not already in range and unique */ 264 /* Only bother mapping if it's not already in range and unique */
271 if ((!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED) || 265 if (!(range->flags & IP_NAT_RANGE_PROTO_RANDOM) &&
266 (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED) ||
272 proto->in_range(tuple, maniptype, &range->min, &range->max)) && 267 proto->in_range(tuple, maniptype, &range->min, &range->max)) &&
273 !nf_nat_used_tuple(tuple, ct)) 268 !nf_nat_used_tuple(tuple, ct))
274 goto out; 269 goto out;
@@ -440,7 +435,7 @@ int nf_nat_icmp_reply_translation(struct nf_conn *ct,
440 if (!skb_make_writable(skb, hdrlen + sizeof(*inside))) 435 if (!skb_make_writable(skb, hdrlen + sizeof(*inside)))
441 return 0; 436 return 0;
442 437
443 inside = (void *)skb->data + ip_hdrlen(skb); 438 inside = (void *)skb->data + hdrlen;
444 439
445 /* We're actually going to mangle it beyond trivial checksum 440 /* We're actually going to mangle it beyond trivial checksum
446 adjustment, so make sure the current checksum is correct. */ 441 adjustment, so make sure the current checksum is correct. */
@@ -470,12 +465,10 @@ int nf_nat_icmp_reply_translation(struct nf_conn *ct,
470 /* rcu_read_lock()ed by nf_hook_slow */ 465 /* rcu_read_lock()ed by nf_hook_slow */
471 l4proto = __nf_ct_l4proto_find(PF_INET, inside->ip.protocol); 466 l4proto = __nf_ct_l4proto_find(PF_INET, inside->ip.protocol);
472 467
473 if (!nf_ct_get_tuple(skb, 468 if (!nf_ct_get_tuple(skb, hdrlen + sizeof(struct icmphdr),
474 ip_hdrlen(skb) + sizeof(struct icmphdr), 469 (hdrlen +
475 (ip_hdrlen(skb) +
476 sizeof(struct icmphdr) + inside->ip.ihl * 4), 470 sizeof(struct icmphdr) + inside->ip.ihl * 4),
477 (u_int16_t)AF_INET, 471 (u_int16_t)AF_INET, inside->ip.protocol,
478 inside->ip.protocol,
479 &inner, l3proto, l4proto)) 472 &inner, l3proto, l4proto))
480 return 0; 473 return 0;
481 474
@@ -484,15 +477,13 @@ int nf_nat_icmp_reply_translation(struct nf_conn *ct,
484 pass all hooks (locally-generated ICMP). Consider incoming 477 pass all hooks (locally-generated ICMP). Consider incoming
485 packet: PREROUTING (DST manip), routing produces ICMP, goes 478 packet: PREROUTING (DST manip), routing produces ICMP, goes
486 through POSTROUTING (which must correct the DST manip). */ 479 through POSTROUTING (which must correct the DST manip). */
487 if (!manip_pkt(inside->ip.protocol, skb, 480 if (!manip_pkt(inside->ip.protocol, skb, hdrlen + sizeof(inside->icmp),
488 ip_hdrlen(skb) + sizeof(inside->icmp), 481 &ct->tuplehash[!dir].tuple, !manip))
489 &ct->tuplehash[!dir].tuple,
490 !manip))
491 return 0; 482 return 0;
492 483
493 if (skb->ip_summed != CHECKSUM_PARTIAL) { 484 if (skb->ip_summed != CHECKSUM_PARTIAL) {
494 /* Reloading "inside" here since manip_pkt inner. */ 485 /* Reloading "inside" here since manip_pkt inner. */
495 inside = (void *)skb->data + ip_hdrlen(skb); 486 inside = (void *)skb->data + hdrlen;
496 inside->icmp.checksum = 0; 487 inside->icmp.checksum = 0;
497 inside->icmp.checksum = 488 inside->icmp.checksum =
498 csum_fold(skb_checksum(skb, hdrlen, 489 csum_fold(skb_checksum(skb, hdrlen,
@@ -742,7 +733,7 @@ static int __init nf_nat_init(void)
742 spin_unlock_bh(&nf_nat_lock); 733 spin_unlock_bh(&nf_nat_lock);
743 734
744 /* Initialize fake conntrack so that NAT will skip it */ 735 /* Initialize fake conntrack so that NAT will skip it */
745 nf_conntrack_untracked.status |= IPS_NAT_DONE_MASK; 736 nf_ct_untracked_status_or(IPS_NAT_DONE_MASK);
746 737
747 l3proto = nf_ct_l3proto_find_get((u_int16_t)AF_INET); 738 l3proto = nf_ct_l3proto_find_get((u_int16_t)AF_INET);
748 739
diff --git a/net/ipv4/netfilter/nf_nat_proto_common.c b/net/ipv4/netfilter/nf_nat_proto_common.c
index 6c4f11f51446..3e61faf23a9a 100644
--- a/net/ipv4/netfilter/nf_nat_proto_common.c
+++ b/net/ipv4/netfilter/nf_nat_proto_common.c
@@ -34,7 +34,7 @@ bool nf_nat_proto_in_range(const struct nf_conntrack_tuple *tuple,
34} 34}
35EXPORT_SYMBOL_GPL(nf_nat_proto_in_range); 35EXPORT_SYMBOL_GPL(nf_nat_proto_in_range);
36 36
37bool nf_nat_proto_unique_tuple(struct nf_conntrack_tuple *tuple, 37void nf_nat_proto_unique_tuple(struct nf_conntrack_tuple *tuple,
38 const struct nf_nat_range *range, 38 const struct nf_nat_range *range,
39 enum nf_nat_manip_type maniptype, 39 enum nf_nat_manip_type maniptype,
40 const struct nf_conn *ct, 40 const struct nf_conn *ct,
@@ -53,7 +53,7 @@ bool nf_nat_proto_unique_tuple(struct nf_conntrack_tuple *tuple,
53 if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED)) { 53 if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED)) {
54 /* If it's dst rewrite, can't change port */ 54 /* If it's dst rewrite, can't change port */
55 if (maniptype == IP_NAT_MANIP_DST) 55 if (maniptype == IP_NAT_MANIP_DST)
56 return false; 56 return;
57 57
58 if (ntohs(*portptr) < 1024) { 58 if (ntohs(*portptr) < 1024) {
59 /* Loose convention: >> 512 is credential passing */ 59 /* Loose convention: >> 512 is credential passing */
@@ -81,15 +81,15 @@ bool nf_nat_proto_unique_tuple(struct nf_conntrack_tuple *tuple,
81 else 81 else
82 off = *rover; 82 off = *rover;
83 83
84 for (i = 0; i < range_size; i++, off++) { 84 for (i = 0; ; ++off) {
85 *portptr = htons(min + off % range_size); 85 *portptr = htons(min + off % range_size);
86 if (nf_nat_used_tuple(tuple, ct)) 86 if (++i != range_size && nf_nat_used_tuple(tuple, ct))
87 continue; 87 continue;
88 if (!(range->flags & IP_NAT_RANGE_PROTO_RANDOM)) 88 if (!(range->flags & IP_NAT_RANGE_PROTO_RANDOM))
89 *rover = off; 89 *rover = off;
90 return true; 90 return;
91 } 91 }
92 return false; 92 return;
93} 93}
94EXPORT_SYMBOL_GPL(nf_nat_proto_unique_tuple); 94EXPORT_SYMBOL_GPL(nf_nat_proto_unique_tuple);
95 95
diff --git a/net/ipv4/netfilter/nf_nat_proto_dccp.c b/net/ipv4/netfilter/nf_nat_proto_dccp.c
index 22485ce306d4..570faf2667b2 100644
--- a/net/ipv4/netfilter/nf_nat_proto_dccp.c
+++ b/net/ipv4/netfilter/nf_nat_proto_dccp.c
@@ -22,14 +22,14 @@
22 22
23static u_int16_t dccp_port_rover; 23static u_int16_t dccp_port_rover;
24 24
25static bool 25static void
26dccp_unique_tuple(struct nf_conntrack_tuple *tuple, 26dccp_unique_tuple(struct nf_conntrack_tuple *tuple,
27 const struct nf_nat_range *range, 27 const struct nf_nat_range *range,
28 enum nf_nat_manip_type maniptype, 28 enum nf_nat_manip_type maniptype,
29 const struct nf_conn *ct) 29 const struct nf_conn *ct)
30{ 30{
31 return nf_nat_proto_unique_tuple(tuple, range, maniptype, ct, 31 nf_nat_proto_unique_tuple(tuple, range, maniptype, ct,
32 &dccp_port_rover); 32 &dccp_port_rover);
33} 33}
34 34
35static bool 35static bool
diff --git a/net/ipv4/netfilter/nf_nat_proto_gre.c b/net/ipv4/netfilter/nf_nat_proto_gre.c
index d7e89201351e..bc8d83a31c73 100644
--- a/net/ipv4/netfilter/nf_nat_proto_gre.c
+++ b/net/ipv4/netfilter/nf_nat_proto_gre.c
@@ -37,7 +37,7 @@ MODULE_AUTHOR("Harald Welte <laforge@gnumonks.org>");
37MODULE_DESCRIPTION("Netfilter NAT protocol helper module for GRE"); 37MODULE_DESCRIPTION("Netfilter NAT protocol helper module for GRE");
38 38
39/* generate unique tuple ... */ 39/* generate unique tuple ... */
40static bool 40static void
41gre_unique_tuple(struct nf_conntrack_tuple *tuple, 41gre_unique_tuple(struct nf_conntrack_tuple *tuple,
42 const struct nf_nat_range *range, 42 const struct nf_nat_range *range,
43 enum nf_nat_manip_type maniptype, 43 enum nf_nat_manip_type maniptype,
@@ -50,7 +50,7 @@ gre_unique_tuple(struct nf_conntrack_tuple *tuple,
50 /* If there is no master conntrack we are not PPTP, 50 /* If there is no master conntrack we are not PPTP,
51 do not change tuples */ 51 do not change tuples */
52 if (!ct->master) 52 if (!ct->master)
53 return false; 53 return;
54 54
55 if (maniptype == IP_NAT_MANIP_SRC) 55 if (maniptype == IP_NAT_MANIP_SRC)
56 keyptr = &tuple->src.u.gre.key; 56 keyptr = &tuple->src.u.gre.key;
@@ -68,14 +68,14 @@ gre_unique_tuple(struct nf_conntrack_tuple *tuple,
68 68
69 pr_debug("min = %u, range_size = %u\n", min, range_size); 69 pr_debug("min = %u, range_size = %u\n", min, range_size);
70 70
71 for (i = 0; i < range_size; i++, key++) { 71 for (i = 0; ; ++key) {
72 *keyptr = htons(min + key % range_size); 72 *keyptr = htons(min + key % range_size);
73 if (!nf_nat_used_tuple(tuple, ct)) 73 if (++i == range_size || !nf_nat_used_tuple(tuple, ct))
74 return true; 74 return;
75 } 75 }
76 76
77 pr_debug("%p: no NAT mapping\n", ct); 77 pr_debug("%p: no NAT mapping\n", ct);
78 return false; 78 return;
79} 79}
80 80
81/* manipulate a GRE packet according to maniptype */ 81/* manipulate a GRE packet according to maniptype */
diff --git a/net/ipv4/netfilter/nf_nat_proto_icmp.c b/net/ipv4/netfilter/nf_nat_proto_icmp.c
index 19a8b0b07d8e..5744c3ec847c 100644
--- a/net/ipv4/netfilter/nf_nat_proto_icmp.c
+++ b/net/ipv4/netfilter/nf_nat_proto_icmp.c
@@ -27,7 +27,7 @@ icmp_in_range(const struct nf_conntrack_tuple *tuple,
27 ntohs(tuple->src.u.icmp.id) <= ntohs(max->icmp.id); 27 ntohs(tuple->src.u.icmp.id) <= ntohs(max->icmp.id);
28} 28}
29 29
30static bool 30static void
31icmp_unique_tuple(struct nf_conntrack_tuple *tuple, 31icmp_unique_tuple(struct nf_conntrack_tuple *tuple,
32 const struct nf_nat_range *range, 32 const struct nf_nat_range *range,
33 enum nf_nat_manip_type maniptype, 33 enum nf_nat_manip_type maniptype,
@@ -42,13 +42,13 @@ icmp_unique_tuple(struct nf_conntrack_tuple *tuple,
42 if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED)) 42 if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED))
43 range_size = 0xFFFF; 43 range_size = 0xFFFF;
44 44
45 for (i = 0; i < range_size; i++, id++) { 45 for (i = 0; ; ++id) {
46 tuple->src.u.icmp.id = htons(ntohs(range->min.icmp.id) + 46 tuple->src.u.icmp.id = htons(ntohs(range->min.icmp.id) +
47 (id % range_size)); 47 (id % range_size));
48 if (!nf_nat_used_tuple(tuple, ct)) 48 if (++i == range_size || !nf_nat_used_tuple(tuple, ct))
49 return true; 49 return;
50 } 50 }
51 return false; 51 return;
52} 52}
53 53
54static bool 54static bool
diff --git a/net/ipv4/netfilter/nf_nat_proto_sctp.c b/net/ipv4/netfilter/nf_nat_proto_sctp.c
index 3fc598eeeb1a..756331d42661 100644
--- a/net/ipv4/netfilter/nf_nat_proto_sctp.c
+++ b/net/ipv4/netfilter/nf_nat_proto_sctp.c
@@ -16,14 +16,14 @@
16 16
17static u_int16_t nf_sctp_port_rover; 17static u_int16_t nf_sctp_port_rover;
18 18
19static bool 19static void
20sctp_unique_tuple(struct nf_conntrack_tuple *tuple, 20sctp_unique_tuple(struct nf_conntrack_tuple *tuple,
21 const struct nf_nat_range *range, 21 const struct nf_nat_range *range,
22 enum nf_nat_manip_type maniptype, 22 enum nf_nat_manip_type maniptype,
23 const struct nf_conn *ct) 23 const struct nf_conn *ct)
24{ 24{
25 return nf_nat_proto_unique_tuple(tuple, range, maniptype, ct, 25 nf_nat_proto_unique_tuple(tuple, range, maniptype, ct,
26 &nf_sctp_port_rover); 26 &nf_sctp_port_rover);
27} 27}
28 28
29static bool 29static bool
diff --git a/net/ipv4/netfilter/nf_nat_proto_tcp.c b/net/ipv4/netfilter/nf_nat_proto_tcp.c
index 399e2cfa263b..aa460a595d5d 100644
--- a/net/ipv4/netfilter/nf_nat_proto_tcp.c
+++ b/net/ipv4/netfilter/nf_nat_proto_tcp.c
@@ -20,14 +20,13 @@
20 20
21static u_int16_t tcp_port_rover; 21static u_int16_t tcp_port_rover;
22 22
23static bool 23static void
24tcp_unique_tuple(struct nf_conntrack_tuple *tuple, 24tcp_unique_tuple(struct nf_conntrack_tuple *tuple,
25 const struct nf_nat_range *range, 25 const struct nf_nat_range *range,
26 enum nf_nat_manip_type maniptype, 26 enum nf_nat_manip_type maniptype,
27 const struct nf_conn *ct) 27 const struct nf_conn *ct)
28{ 28{
29 return nf_nat_proto_unique_tuple(tuple, range, maniptype, ct, 29 nf_nat_proto_unique_tuple(tuple, range, maniptype, ct, &tcp_port_rover);
30 &tcp_port_rover);
31} 30}
32 31
33static bool 32static bool
diff --git a/net/ipv4/netfilter/nf_nat_proto_udp.c b/net/ipv4/netfilter/nf_nat_proto_udp.c
index 9e61c79492e4..dfe65c7e2925 100644
--- a/net/ipv4/netfilter/nf_nat_proto_udp.c
+++ b/net/ipv4/netfilter/nf_nat_proto_udp.c
@@ -19,14 +19,13 @@
19 19
20static u_int16_t udp_port_rover; 20static u_int16_t udp_port_rover;
21 21
22static bool 22static void
23udp_unique_tuple(struct nf_conntrack_tuple *tuple, 23udp_unique_tuple(struct nf_conntrack_tuple *tuple,
24 const struct nf_nat_range *range, 24 const struct nf_nat_range *range,
25 enum nf_nat_manip_type maniptype, 25 enum nf_nat_manip_type maniptype,
26 const struct nf_conn *ct) 26 const struct nf_conn *ct)
27{ 27{
28 return nf_nat_proto_unique_tuple(tuple, range, maniptype, ct, 28 nf_nat_proto_unique_tuple(tuple, range, maniptype, ct, &udp_port_rover);
29 &udp_port_rover);
30} 29}
31 30
32static bool 31static bool
diff --git a/net/ipv4/netfilter/nf_nat_proto_udplite.c b/net/ipv4/netfilter/nf_nat_proto_udplite.c
index 440a229bbd87..3cc8c8af39ef 100644
--- a/net/ipv4/netfilter/nf_nat_proto_udplite.c
+++ b/net/ipv4/netfilter/nf_nat_proto_udplite.c
@@ -18,14 +18,14 @@
18 18
19static u_int16_t udplite_port_rover; 19static u_int16_t udplite_port_rover;
20 20
21static bool 21static void
22udplite_unique_tuple(struct nf_conntrack_tuple *tuple, 22udplite_unique_tuple(struct nf_conntrack_tuple *tuple,
23 const struct nf_nat_range *range, 23 const struct nf_nat_range *range,
24 enum nf_nat_manip_type maniptype, 24 enum nf_nat_manip_type maniptype,
25 const struct nf_conn *ct) 25 const struct nf_conn *ct)
26{ 26{
27 return nf_nat_proto_unique_tuple(tuple, range, maniptype, ct, 27 nf_nat_proto_unique_tuple(tuple, range, maniptype, ct,
28 &udplite_port_rover); 28 &udplite_port_rover);
29} 29}
30 30
31static bool 31static bool
diff --git a/net/ipv4/netfilter/nf_nat_proto_unknown.c b/net/ipv4/netfilter/nf_nat_proto_unknown.c
index 14381c62acea..a50f2bc1c732 100644
--- a/net/ipv4/netfilter/nf_nat_proto_unknown.c
+++ b/net/ipv4/netfilter/nf_nat_proto_unknown.c
@@ -26,14 +26,14 @@ static bool unknown_in_range(const struct nf_conntrack_tuple *tuple,
26 return true; 26 return true;
27} 27}
28 28
29static bool unknown_unique_tuple(struct nf_conntrack_tuple *tuple, 29static void unknown_unique_tuple(struct nf_conntrack_tuple *tuple,
30 const struct nf_nat_range *range, 30 const struct nf_nat_range *range,
31 enum nf_nat_manip_type maniptype, 31 enum nf_nat_manip_type maniptype,
32 const struct nf_conn *ct) 32 const struct nf_conn *ct)
33{ 33{
34 /* Sorry: we can't help you; if it's not unique, we can't frob 34 /* Sorry: we can't help you; if it's not unique, we can't frob
35 anything. */ 35 anything. */
36 return false; 36 return;
37} 37}
38 38
39static bool 39static bool
diff --git a/net/ipv4/netfilter/nf_nat_rule.c b/net/ipv4/netfilter/nf_nat_rule.c
index 98ed78281aee..ebbd319f62f5 100644
--- a/net/ipv4/netfilter/nf_nat_rule.c
+++ b/net/ipv4/netfilter/nf_nat_rule.c
@@ -28,7 +28,8 @@
28 28
29#define NAT_VALID_HOOKS ((1 << NF_INET_PRE_ROUTING) | \ 29#define NAT_VALID_HOOKS ((1 << NF_INET_PRE_ROUTING) | \
30 (1 << NF_INET_POST_ROUTING) | \ 30 (1 << NF_INET_POST_ROUTING) | \
31 (1 << NF_INET_LOCAL_OUT)) 31 (1 << NF_INET_LOCAL_OUT) | \
32 (1 << NF_INET_LOCAL_IN))
32 33
33static const struct xt_table nat_table = { 34static const struct xt_table nat_table = {
34 .name = "nat", 35 .name = "nat",
@@ -45,7 +46,8 @@ ipt_snat_target(struct sk_buff *skb, const struct xt_action_param *par)
45 enum ip_conntrack_info ctinfo; 46 enum ip_conntrack_info ctinfo;
46 const struct nf_nat_multi_range_compat *mr = par->targinfo; 47 const struct nf_nat_multi_range_compat *mr = par->targinfo;
47 48
48 NF_CT_ASSERT(par->hooknum == NF_INET_POST_ROUTING); 49 NF_CT_ASSERT(par->hooknum == NF_INET_POST_ROUTING ||
50 par->hooknum == NF_INET_LOCAL_IN);
49 51
50 ct = nf_ct_get(skb, &ctinfo); 52 ct = nf_ct_get(skb, &ctinfo);
51 53
@@ -99,7 +101,7 @@ static int ipt_dnat_checkentry(const struct xt_tgchk_param *par)
99 return 0; 101 return 0;
100} 102}
101 103
102unsigned int 104static unsigned int
103alloc_null_binding(struct nf_conn *ct, unsigned int hooknum) 105alloc_null_binding(struct nf_conn *ct, unsigned int hooknum)
104{ 106{
105 /* Force range to this IP; let proto decide mapping for 107 /* Force range to this IP; let proto decide mapping for
@@ -141,7 +143,7 @@ static struct xt_target ipt_snat_reg __read_mostly = {
141 .target = ipt_snat_target, 143 .target = ipt_snat_target,
142 .targetsize = sizeof(struct nf_nat_multi_range_compat), 144 .targetsize = sizeof(struct nf_nat_multi_range_compat),
143 .table = "nat", 145 .table = "nat",
144 .hooks = 1 << NF_INET_POST_ROUTING, 146 .hooks = (1 << NF_INET_POST_ROUTING) | (1 << NF_INET_LOCAL_IN),
145 .checkentry = ipt_snat_checkentry, 147 .checkentry = ipt_snat_checkentry,
146 .family = AF_INET, 148 .family = AF_INET,
147}; 149};
diff --git a/net/ipv4/netfilter/nf_nat_standalone.c b/net/ipv4/netfilter/nf_nat_standalone.c
index beb25819c9c9..95481fee8bdb 100644
--- a/net/ipv4/netfilter/nf_nat_standalone.c
+++ b/net/ipv4/netfilter/nf_nat_standalone.c
@@ -98,7 +98,7 @@ nf_nat_fn(unsigned int hooknum,
98 return NF_ACCEPT; 98 return NF_ACCEPT;
99 99
100 /* Don't try to NAT if this packet is not conntracked */ 100 /* Don't try to NAT if this packet is not conntracked */
101 if (ct == &nf_conntrack_untracked) 101 if (nf_ct_is_untracked(ct))
102 return NF_ACCEPT; 102 return NF_ACCEPT;
103 103
104 nat = nfct_nat(ct); 104 nat = nfct_nat(ct);
@@ -131,13 +131,7 @@ nf_nat_fn(unsigned int hooknum,
131 if (!nf_nat_initialized(ct, maniptype)) { 131 if (!nf_nat_initialized(ct, maniptype)) {
132 unsigned int ret; 132 unsigned int ret;
133 133
134 if (hooknum == NF_INET_LOCAL_IN) 134 ret = nf_nat_rule_find(skb, hooknum, in, out, ct);
135 /* LOCAL_IN hook doesn't have a chain! */
136 ret = alloc_null_binding(ct, hooknum);
137 else
138 ret = nf_nat_rule_find(skb, hooknum, in, out,
139 ct);
140
141 if (ret != NF_ACCEPT) 135 if (ret != NF_ACCEPT)
142 return ret; 136 return ret;
143 } else 137 } else
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 3dc9914c1dce..4ae1f203f7cb 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -252,6 +252,7 @@ static const struct snmp_mib snmp4_net_list[] = {
252 SNMP_MIB_ITEM("TCPBacklogDrop", LINUX_MIB_TCPBACKLOGDROP), 252 SNMP_MIB_ITEM("TCPBacklogDrop", LINUX_MIB_TCPBACKLOGDROP),
253 SNMP_MIB_ITEM("TCPMinTTLDrop", LINUX_MIB_TCPMINTTLDROP), 253 SNMP_MIB_ITEM("TCPMinTTLDrop", LINUX_MIB_TCPMINTTLDROP),
254 SNMP_MIB_ITEM("TCPDeferAcceptDrop", LINUX_MIB_TCPDEFERACCEPTDROP), 254 SNMP_MIB_ITEM("TCPDeferAcceptDrop", LINUX_MIB_TCPDEFERACCEPTDROP),
255 SNMP_MIB_ITEM("IPReversePathFilter", LINUX_MIB_IPRPFILTER),
255 SNMP_MIB_SENTINEL 256 SNMP_MIB_SENTINEL
256}; 257};
257 258
@@ -342,10 +343,12 @@ static int snmp_seq_show(struct seq_file *seq, void *v)
342 IPV4_DEVCONF_ALL(net, FORWARDING) ? 1 : 2, 343 IPV4_DEVCONF_ALL(net, FORWARDING) ? 1 : 2,
343 sysctl_ip_default_ttl); 344 sysctl_ip_default_ttl);
344 345
346 BUILD_BUG_ON(offsetof(struct ipstats_mib, mibs) != 0);
345 for (i = 0; snmp4_ipstats_list[i].name != NULL; i++) 347 for (i = 0; snmp4_ipstats_list[i].name != NULL; i++)
346 seq_printf(seq, " %lu", 348 seq_printf(seq, " %llu",
347 snmp_fold_field((void __percpu **)net->mib.ip_statistics, 349 snmp_fold_field64((void __percpu **)net->mib.ip_statistics,
348 snmp4_ipstats_list[i].entry)); 350 snmp4_ipstats_list[i].entry,
351 offsetof(struct ipstats_mib, syncp)));
349 352
350 icmp_put(seq); /* RFC 2011 compatibility */ 353 icmp_put(seq); /* RFC 2011 compatibility */
351 icmpmsg_put(seq); 354 icmpmsg_put(seq);
@@ -431,9 +434,10 @@ static int netstat_seq_show(struct seq_file *seq, void *v)
431 434
432 seq_puts(seq, "\nIpExt:"); 435 seq_puts(seq, "\nIpExt:");
433 for (i = 0; snmp4_ipextstats_list[i].name != NULL; i++) 436 for (i = 0; snmp4_ipextstats_list[i].name != NULL; i++)
434 seq_printf(seq, " %lu", 437 seq_printf(seq, " %llu",
435 snmp_fold_field((void __percpu **)net->mib.ip_statistics, 438 snmp_fold_field64((void __percpu **)net->mib.ip_statistics,
436 snmp4_ipextstats_list[i].entry)); 439 snmp4_ipextstats_list[i].entry,
440 offsetof(struct ipstats_mib, syncp)));
437 441
438 seq_putc(seq, '\n'); 442 seq_putc(seq, '\n');
439 return 0; 443 return 0;
diff --git a/net/ipv4/protocol.c b/net/ipv4/protocol.c
index 542f22fc98b3..f2d297351405 100644
--- a/net/ipv4/protocol.c
+++ b/net/ipv4/protocol.c
@@ -52,6 +52,7 @@ int inet_add_protocol(const struct net_protocol *prot, unsigned char protocol)
52 52
53 return ret; 53 return ret;
54} 54}
55EXPORT_SYMBOL(inet_add_protocol);
55 56
56/* 57/*
57 * Remove a protocol from the hash tables. 58 * Remove a protocol from the hash tables.
@@ -76,6 +77,4 @@ int inet_del_protocol(const struct net_protocol *prot, unsigned char protocol)
76 77
77 return ret; 78 return ret;
78} 79}
79
80EXPORT_SYMBOL(inet_add_protocol);
81EXPORT_SYMBOL(inet_del_protocol); 80EXPORT_SYMBOL(inet_del_protocol);
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 2c7a1639388a..009a7b2aa1ef 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -314,7 +314,7 @@ int raw_rcv(struct sock *sk, struct sk_buff *skb)
314} 314}
315 315
316static int raw_send_hdrinc(struct sock *sk, void *from, size_t length, 316static int raw_send_hdrinc(struct sock *sk, void *from, size_t length,
317 struct rtable *rt, 317 struct rtable **rtp,
318 unsigned int flags) 318 unsigned int flags)
319{ 319{
320 struct inet_sock *inet = inet_sk(sk); 320 struct inet_sock *inet = inet_sk(sk);
@@ -323,25 +323,27 @@ static int raw_send_hdrinc(struct sock *sk, void *from, size_t length,
323 struct sk_buff *skb; 323 struct sk_buff *skb;
324 unsigned int iphlen; 324 unsigned int iphlen;
325 int err; 325 int err;
326 struct rtable *rt = *rtp;
326 327
327 if (length > rt->u.dst.dev->mtu) { 328 if (length > rt->dst.dev->mtu) {
328 ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->inet_dport, 329 ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->inet_dport,
329 rt->u.dst.dev->mtu); 330 rt->dst.dev->mtu);
330 return -EMSGSIZE; 331 return -EMSGSIZE;
331 } 332 }
332 if (flags&MSG_PROBE) 333 if (flags&MSG_PROBE)
333 goto out; 334 goto out;
334 335
335 skb = sock_alloc_send_skb(sk, 336 skb = sock_alloc_send_skb(sk,
336 length + LL_ALLOCATED_SPACE(rt->u.dst.dev) + 15, 337 length + LL_ALLOCATED_SPACE(rt->dst.dev) + 15,
337 flags & MSG_DONTWAIT, &err); 338 flags & MSG_DONTWAIT, &err);
338 if (skb == NULL) 339 if (skb == NULL)
339 goto error; 340 goto error;
340 skb_reserve(skb, LL_RESERVED_SPACE(rt->u.dst.dev)); 341 skb_reserve(skb, LL_RESERVED_SPACE(rt->dst.dev));
341 342
342 skb->priority = sk->sk_priority; 343 skb->priority = sk->sk_priority;
343 skb->mark = sk->sk_mark; 344 skb->mark = sk->sk_mark;
344 skb_dst_set(skb, dst_clone(&rt->u.dst)); 345 skb_dst_set(skb, &rt->dst);
346 *rtp = NULL;
345 347
346 skb_reset_network_header(skb); 348 skb_reset_network_header(skb);
347 iph = ip_hdr(skb); 349 iph = ip_hdr(skb);
@@ -373,7 +375,7 @@ static int raw_send_hdrinc(struct sock *sk, void *from, size_t length,
373 iph->check = 0; 375 iph->check = 0;
374 iph->tot_len = htons(length); 376 iph->tot_len = htons(length);
375 if (!iph->id) 377 if (!iph->id)
376 ip_select_ident(iph, &rt->u.dst, NULL); 378 ip_select_ident(iph, &rt->dst, NULL);
377 379
378 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); 380 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
379 } 381 }
@@ -382,7 +384,7 @@ static int raw_send_hdrinc(struct sock *sk, void *from, size_t length,
382 skb_transport_header(skb))->type); 384 skb_transport_header(skb))->type);
383 385
384 err = NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_OUT, skb, NULL, 386 err = NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_OUT, skb, NULL,
385 rt->u.dst.dev, dst_output); 387 rt->dst.dev, dst_output);
386 if (err > 0) 388 if (err > 0)
387 err = net_xmit_errno(err); 389 err = net_xmit_errno(err);
388 if (err) 390 if (err)
@@ -576,7 +578,7 @@ back_from_confirm:
576 578
577 if (inet->hdrincl) 579 if (inet->hdrincl)
578 err = raw_send_hdrinc(sk, msg->msg_iov, len, 580 err = raw_send_hdrinc(sk, msg->msg_iov, len,
579 rt, msg->msg_flags); 581 &rt, msg->msg_flags);
580 582
581 else { 583 else {
582 if (!ipc.addr) 584 if (!ipc.addr)
@@ -604,7 +606,7 @@ out:
604 return len; 606 return len;
605 607
606do_confirm: 608do_confirm:
607 dst_confirm(&rt->u.dst); 609 dst_confirm(&rt->dst);
608 if (!(msg->msg_flags & MSG_PROBE) || len) 610 if (!(msg->msg_flags & MSG_PROBE) || len)
609 goto back_from_confirm; 611 goto back_from_confirm;
610 err = 0; 612 err = 0;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 560acc677ce4..3f56b6e6c6aa 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -253,8 +253,7 @@ static unsigned rt_hash_mask __read_mostly;
253static unsigned int rt_hash_log __read_mostly; 253static unsigned int rt_hash_log __read_mostly;
254 254
255static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat); 255static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
256#define RT_CACHE_STAT_INC(field) \ 256#define RT_CACHE_STAT_INC(field) __this_cpu_inc(rt_cache_stat.field)
257 (__raw_get_cpu_var(rt_cache_stat).field++)
258 257
259static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx, 258static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
260 int genid) 259 int genid)
@@ -287,10 +286,10 @@ static struct rtable *rt_cache_get_first(struct seq_file *seq)
287 rcu_read_lock_bh(); 286 rcu_read_lock_bh();
288 r = rcu_dereference_bh(rt_hash_table[st->bucket].chain); 287 r = rcu_dereference_bh(rt_hash_table[st->bucket].chain);
289 while (r) { 288 while (r) {
290 if (dev_net(r->u.dst.dev) == seq_file_net(seq) && 289 if (dev_net(r->dst.dev) == seq_file_net(seq) &&
291 r->rt_genid == st->genid) 290 r->rt_genid == st->genid)
292 return r; 291 return r;
293 r = rcu_dereference_bh(r->u.dst.rt_next); 292 r = rcu_dereference_bh(r->dst.rt_next);
294 } 293 }
295 rcu_read_unlock_bh(); 294 rcu_read_unlock_bh();
296 } 295 }
@@ -302,7 +301,7 @@ static struct rtable *__rt_cache_get_next(struct seq_file *seq,
302{ 301{
303 struct rt_cache_iter_state *st = seq->private; 302 struct rt_cache_iter_state *st = seq->private;
304 303
305 r = r->u.dst.rt_next; 304 r = r->dst.rt_next;
306 while (!r) { 305 while (!r) {
307 rcu_read_unlock_bh(); 306 rcu_read_unlock_bh();
308 do { 307 do {
@@ -320,7 +319,7 @@ static struct rtable *rt_cache_get_next(struct seq_file *seq,
320{ 319{
321 struct rt_cache_iter_state *st = seq->private; 320 struct rt_cache_iter_state *st = seq->private;
322 while ((r = __rt_cache_get_next(seq, r)) != NULL) { 321 while ((r = __rt_cache_get_next(seq, r)) != NULL) {
323 if (dev_net(r->u.dst.dev) != seq_file_net(seq)) 322 if (dev_net(r->dst.dev) != seq_file_net(seq))
324 continue; 323 continue;
325 if (r->rt_genid == st->genid) 324 if (r->rt_genid == st->genid)
326 break; 325 break;
@@ -378,19 +377,19 @@ static int rt_cache_seq_show(struct seq_file *seq, void *v)
378 377
379 seq_printf(seq, "%s\t%08X\t%08X\t%8X\t%d\t%u\t%d\t" 378 seq_printf(seq, "%s\t%08X\t%08X\t%8X\t%d\t%u\t%d\t"
380 "%08X\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n", 379 "%08X\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n",
381 r->u.dst.dev ? r->u.dst.dev->name : "*", 380 r->dst.dev ? r->dst.dev->name : "*",
382 (__force u32)r->rt_dst, 381 (__force u32)r->rt_dst,
383 (__force u32)r->rt_gateway, 382 (__force u32)r->rt_gateway,
384 r->rt_flags, atomic_read(&r->u.dst.__refcnt), 383 r->rt_flags, atomic_read(&r->dst.__refcnt),
385 r->u.dst.__use, 0, (__force u32)r->rt_src, 384 r->dst.__use, 0, (__force u32)r->rt_src,
386 (dst_metric(&r->u.dst, RTAX_ADVMSS) ? 385 (dst_metric(&r->dst, RTAX_ADVMSS) ?
387 (int)dst_metric(&r->u.dst, RTAX_ADVMSS) + 40 : 0), 386 (int)dst_metric(&r->dst, RTAX_ADVMSS) + 40 : 0),
388 dst_metric(&r->u.dst, RTAX_WINDOW), 387 dst_metric(&r->dst, RTAX_WINDOW),
389 (int)((dst_metric(&r->u.dst, RTAX_RTT) >> 3) + 388 (int)((dst_metric(&r->dst, RTAX_RTT) >> 3) +
390 dst_metric(&r->u.dst, RTAX_RTTVAR)), 389 dst_metric(&r->dst, RTAX_RTTVAR)),
391 r->fl.fl4_tos, 390 r->fl.fl4_tos,
392 r->u.dst.hh ? atomic_read(&r->u.dst.hh->hh_refcnt) : -1, 391 r->dst.hh ? atomic_read(&r->dst.hh->hh_refcnt) : -1,
393 r->u.dst.hh ? (r->u.dst.hh->hh_output == 392 r->dst.hh ? (r->dst.hh->hh_output ==
394 dev_queue_xmit) : 0, 393 dev_queue_xmit) : 0,
395 r->rt_spec_dst, &len); 394 r->rt_spec_dst, &len);
396 395
@@ -609,13 +608,13 @@ static inline int ip_rt_proc_init(void)
609 608
610static inline void rt_free(struct rtable *rt) 609static inline void rt_free(struct rtable *rt)
611{ 610{
612 call_rcu_bh(&rt->u.dst.rcu_head, dst_rcu_free); 611 call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
613} 612}
614 613
615static inline void rt_drop(struct rtable *rt) 614static inline void rt_drop(struct rtable *rt)
616{ 615{
617 ip_rt_put(rt); 616 ip_rt_put(rt);
618 call_rcu_bh(&rt->u.dst.rcu_head, dst_rcu_free); 617 call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
619} 618}
620 619
621static inline int rt_fast_clean(struct rtable *rth) 620static inline int rt_fast_clean(struct rtable *rth)
@@ -623,13 +622,13 @@ static inline int rt_fast_clean(struct rtable *rth)
623 /* Kill broadcast/multicast entries very aggresively, if they 622 /* Kill broadcast/multicast entries very aggresively, if they
624 collide in hash table with more useful entries */ 623 collide in hash table with more useful entries */
625 return (rth->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) && 624 return (rth->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) &&
626 rth->fl.iif && rth->u.dst.rt_next; 625 rth->fl.iif && rth->dst.rt_next;
627} 626}
628 627
629static inline int rt_valuable(struct rtable *rth) 628static inline int rt_valuable(struct rtable *rth)
630{ 629{
631 return (rth->rt_flags & (RTCF_REDIRECTED | RTCF_NOTIFY)) || 630 return (rth->rt_flags & (RTCF_REDIRECTED | RTCF_NOTIFY)) ||
632 rth->u.dst.expires; 631 rth->dst.expires;
633} 632}
634 633
635static int rt_may_expire(struct rtable *rth, unsigned long tmo1, unsigned long tmo2) 634static int rt_may_expire(struct rtable *rth, unsigned long tmo1, unsigned long tmo2)
@@ -637,15 +636,15 @@ static int rt_may_expire(struct rtable *rth, unsigned long tmo1, unsigned long t
637 unsigned long age; 636 unsigned long age;
638 int ret = 0; 637 int ret = 0;
639 638
640 if (atomic_read(&rth->u.dst.__refcnt)) 639 if (atomic_read(&rth->dst.__refcnt))
641 goto out; 640 goto out;
642 641
643 ret = 1; 642 ret = 1;
644 if (rth->u.dst.expires && 643 if (rth->dst.expires &&
645 time_after_eq(jiffies, rth->u.dst.expires)) 644 time_after_eq(jiffies, rth->dst.expires))
646 goto out; 645 goto out;
647 646
648 age = jiffies - rth->u.dst.lastuse; 647 age = jiffies - rth->dst.lastuse;
649 ret = 0; 648 ret = 0;
650 if ((age <= tmo1 && !rt_fast_clean(rth)) || 649 if ((age <= tmo1 && !rt_fast_clean(rth)) ||
651 (age <= tmo2 && rt_valuable(rth))) 650 (age <= tmo2 && rt_valuable(rth)))
@@ -661,7 +660,7 @@ out: return ret;
661 */ 660 */
662static inline u32 rt_score(struct rtable *rt) 661static inline u32 rt_score(struct rtable *rt)
663{ 662{
664 u32 score = jiffies - rt->u.dst.lastuse; 663 u32 score = jiffies - rt->dst.lastuse;
665 664
666 score = ~score & ~(3<<30); 665 score = ~score & ~(3<<30);
667 666
@@ -701,12 +700,12 @@ static inline int compare_keys(struct flowi *fl1, struct flowi *fl2)
701 700
702static inline int compare_netns(struct rtable *rt1, struct rtable *rt2) 701static inline int compare_netns(struct rtable *rt1, struct rtable *rt2)
703{ 702{
704 return net_eq(dev_net(rt1->u.dst.dev), dev_net(rt2->u.dst.dev)); 703 return net_eq(dev_net(rt1->dst.dev), dev_net(rt2->dst.dev));
705} 704}
706 705
707static inline int rt_is_expired(struct rtable *rth) 706static inline int rt_is_expired(struct rtable *rth)
708{ 707{
709 return rth->rt_genid != rt_genid(dev_net(rth->u.dst.dev)); 708 return rth->rt_genid != rt_genid(dev_net(rth->dst.dev));
710} 709}
711 710
712/* 711/*
@@ -735,7 +734,7 @@ static void rt_do_flush(int process_context)
735 rth = rt_hash_table[i].chain; 734 rth = rt_hash_table[i].chain;
736 735
737 /* defer releasing the head of the list after spin_unlock */ 736 /* defer releasing the head of the list after spin_unlock */
738 for (tail = rth; tail; tail = tail->u.dst.rt_next) 737 for (tail = rth; tail; tail = tail->dst.rt_next)
739 if (!rt_is_expired(tail)) 738 if (!rt_is_expired(tail))
740 break; 739 break;
741 if (rth != tail) 740 if (rth != tail)
@@ -744,9 +743,9 @@ static void rt_do_flush(int process_context)
744 /* call rt_free on entries after the tail requiring flush */ 743 /* call rt_free on entries after the tail requiring flush */
745 prev = &rt_hash_table[i].chain; 744 prev = &rt_hash_table[i].chain;
746 for (p = *prev; p; p = next) { 745 for (p = *prev; p; p = next) {
747 next = p->u.dst.rt_next; 746 next = p->dst.rt_next;
748 if (!rt_is_expired(p)) { 747 if (!rt_is_expired(p)) {
749 prev = &p->u.dst.rt_next; 748 prev = &p->dst.rt_next;
750 } else { 749 } else {
751 *prev = next; 750 *prev = next;
752 rt_free(p); 751 rt_free(p);
@@ -761,7 +760,7 @@ static void rt_do_flush(int process_context)
761 spin_unlock_bh(rt_hash_lock_addr(i)); 760 spin_unlock_bh(rt_hash_lock_addr(i));
762 761
763 for (; rth != tail; rth = next) { 762 for (; rth != tail; rth = next) {
764 next = rth->u.dst.rt_next; 763 next = rth->dst.rt_next;
765 rt_free(rth); 764 rt_free(rth);
766 } 765 }
767 } 766 }
@@ -792,7 +791,7 @@ static int has_noalias(const struct rtable *head, const struct rtable *rth)
792 while (aux != rth) { 791 while (aux != rth) {
793 if (compare_hash_inputs(&aux->fl, &rth->fl)) 792 if (compare_hash_inputs(&aux->fl, &rth->fl))
794 return 0; 793 return 0;
795 aux = aux->u.dst.rt_next; 794 aux = aux->dst.rt_next;
796 } 795 }
797 return ONE; 796 return ONE;
798} 797}
@@ -832,18 +831,18 @@ static void rt_check_expire(void)
832 length = 0; 831 length = 0;
833 spin_lock_bh(rt_hash_lock_addr(i)); 832 spin_lock_bh(rt_hash_lock_addr(i));
834 while ((rth = *rthp) != NULL) { 833 while ((rth = *rthp) != NULL) {
835 prefetch(rth->u.dst.rt_next); 834 prefetch(rth->dst.rt_next);
836 if (rt_is_expired(rth)) { 835 if (rt_is_expired(rth)) {
837 *rthp = rth->u.dst.rt_next; 836 *rthp = rth->dst.rt_next;
838 rt_free(rth); 837 rt_free(rth);
839 continue; 838 continue;
840 } 839 }
841 if (rth->u.dst.expires) { 840 if (rth->dst.expires) {
842 /* Entry is expired even if it is in use */ 841 /* Entry is expired even if it is in use */
843 if (time_before_eq(jiffies, rth->u.dst.expires)) { 842 if (time_before_eq(jiffies, rth->dst.expires)) {
844nofree: 843nofree:
845 tmo >>= 1; 844 tmo >>= 1;
846 rthp = &rth->u.dst.rt_next; 845 rthp = &rth->dst.rt_next;
847 /* 846 /*
848 * We only count entries on 847 * We only count entries on
849 * a chain with equal hash inputs once 848 * a chain with equal hash inputs once
@@ -859,7 +858,7 @@ nofree:
859 goto nofree; 858 goto nofree;
860 859
861 /* Cleanup aged off entries. */ 860 /* Cleanup aged off entries. */
862 *rthp = rth->u.dst.rt_next; 861 *rthp = rth->dst.rt_next;
863 rt_free(rth); 862 rt_free(rth);
864 } 863 }
865 spin_unlock_bh(rt_hash_lock_addr(i)); 864 spin_unlock_bh(rt_hash_lock_addr(i));
@@ -1000,10 +999,10 @@ static int rt_garbage_collect(struct dst_ops *ops)
1000 if (!rt_is_expired(rth) && 999 if (!rt_is_expired(rth) &&
1001 !rt_may_expire(rth, tmo, expire)) { 1000 !rt_may_expire(rth, tmo, expire)) {
1002 tmo >>= 1; 1001 tmo >>= 1;
1003 rthp = &rth->u.dst.rt_next; 1002 rthp = &rth->dst.rt_next;
1004 continue; 1003 continue;
1005 } 1004 }
1006 *rthp = rth->u.dst.rt_next; 1005 *rthp = rth->dst.rt_next;
1007 rt_free(rth); 1006 rt_free(rth);
1008 goal--; 1007 goal--;
1009 } 1008 }
@@ -1069,7 +1068,7 @@ static int slow_chain_length(const struct rtable *head)
1069 1068
1070 while (rth) { 1069 while (rth) {
1071 length += has_noalias(head, rth); 1070 length += has_noalias(head, rth);
1072 rth = rth->u.dst.rt_next; 1071 rth = rth->dst.rt_next;
1073 } 1072 }
1074 return length >> FRACT_BITS; 1073 return length >> FRACT_BITS;
1075} 1074}
@@ -1091,7 +1090,7 @@ restart:
1091 candp = NULL; 1090 candp = NULL;
1092 now = jiffies; 1091 now = jiffies;
1093 1092
1094 if (!rt_caching(dev_net(rt->u.dst.dev))) { 1093 if (!rt_caching(dev_net(rt->dst.dev))) {
1095 /* 1094 /*
1096 * If we're not caching, just tell the caller we 1095 * If we're not caching, just tell the caller we
1097 * were successful and don't touch the route. The 1096 * were successful and don't touch the route. The
@@ -1109,7 +1108,7 @@ restart:
1109 */ 1108 */
1110 1109
1111 if (rt->rt_type == RTN_UNICAST || rt->fl.iif == 0) { 1110 if (rt->rt_type == RTN_UNICAST || rt->fl.iif == 0) {
1112 int err = arp_bind_neighbour(&rt->u.dst); 1111 int err = arp_bind_neighbour(&rt->dst);
1113 if (err) { 1112 if (err) {
1114 if (net_ratelimit()) 1113 if (net_ratelimit())
1115 printk(KERN_WARNING 1114 printk(KERN_WARNING
@@ -1128,19 +1127,19 @@ restart:
1128 spin_lock_bh(rt_hash_lock_addr(hash)); 1127 spin_lock_bh(rt_hash_lock_addr(hash));
1129 while ((rth = *rthp) != NULL) { 1128 while ((rth = *rthp) != NULL) {
1130 if (rt_is_expired(rth)) { 1129 if (rt_is_expired(rth)) {
1131 *rthp = rth->u.dst.rt_next; 1130 *rthp = rth->dst.rt_next;
1132 rt_free(rth); 1131 rt_free(rth);
1133 continue; 1132 continue;
1134 } 1133 }
1135 if (compare_keys(&rth->fl, &rt->fl) && compare_netns(rth, rt)) { 1134 if (compare_keys(&rth->fl, &rt->fl) && compare_netns(rth, rt)) {
1136 /* Put it first */ 1135 /* Put it first */
1137 *rthp = rth->u.dst.rt_next; 1136 *rthp = rth->dst.rt_next;
1138 /* 1137 /*
1139 * Since lookup is lockfree, the deletion 1138 * Since lookup is lockfree, the deletion
1140 * must be visible to another weakly ordered CPU before 1139 * must be visible to another weakly ordered CPU before
1141 * the insertion at the start of the hash chain. 1140 * the insertion at the start of the hash chain.
1142 */ 1141 */
1143 rcu_assign_pointer(rth->u.dst.rt_next, 1142 rcu_assign_pointer(rth->dst.rt_next,
1144 rt_hash_table[hash].chain); 1143 rt_hash_table[hash].chain);
1145 /* 1144 /*
1146 * Since lookup is lockfree, the update writes 1145 * Since lookup is lockfree, the update writes
@@ -1148,18 +1147,18 @@ restart:
1148 */ 1147 */
1149 rcu_assign_pointer(rt_hash_table[hash].chain, rth); 1148 rcu_assign_pointer(rt_hash_table[hash].chain, rth);
1150 1149
1151 dst_use(&rth->u.dst, now); 1150 dst_use(&rth->dst, now);
1152 spin_unlock_bh(rt_hash_lock_addr(hash)); 1151 spin_unlock_bh(rt_hash_lock_addr(hash));
1153 1152
1154 rt_drop(rt); 1153 rt_drop(rt);
1155 if (rp) 1154 if (rp)
1156 *rp = rth; 1155 *rp = rth;
1157 else 1156 else
1158 skb_dst_set(skb, &rth->u.dst); 1157 skb_dst_set(skb, &rth->dst);
1159 return 0; 1158 return 0;
1160 } 1159 }
1161 1160
1162 if (!atomic_read(&rth->u.dst.__refcnt)) { 1161 if (!atomic_read(&rth->dst.__refcnt)) {
1163 u32 score = rt_score(rth); 1162 u32 score = rt_score(rth);
1164 1163
1165 if (score <= min_score) { 1164 if (score <= min_score) {
@@ -1171,7 +1170,7 @@ restart:
1171 1170
1172 chain_length++; 1171 chain_length++;
1173 1172
1174 rthp = &rth->u.dst.rt_next; 1173 rthp = &rth->dst.rt_next;
1175 } 1174 }
1176 1175
1177 if (cand) { 1176 if (cand) {
@@ -1182,17 +1181,17 @@ restart:
1182 * only 2 entries per bucket. We will see. 1181 * only 2 entries per bucket. We will see.
1183 */ 1182 */
1184 if (chain_length > ip_rt_gc_elasticity) { 1183 if (chain_length > ip_rt_gc_elasticity) {
1185 *candp = cand->u.dst.rt_next; 1184 *candp = cand->dst.rt_next;
1186 rt_free(cand); 1185 rt_free(cand);
1187 } 1186 }
1188 } else { 1187 } else {
1189 if (chain_length > rt_chain_length_max && 1188 if (chain_length > rt_chain_length_max &&
1190 slow_chain_length(rt_hash_table[hash].chain) > rt_chain_length_max) { 1189 slow_chain_length(rt_hash_table[hash].chain) > rt_chain_length_max) {
1191 struct net *net = dev_net(rt->u.dst.dev); 1190 struct net *net = dev_net(rt->dst.dev);
1192 int num = ++net->ipv4.current_rt_cache_rebuild_count; 1191 int num = ++net->ipv4.current_rt_cache_rebuild_count;
1193 if (!rt_caching(net)) { 1192 if (!rt_caching(net)) {
1194 printk(KERN_WARNING "%s: %d rebuilds is over limit, route caching disabled\n", 1193 printk(KERN_WARNING "%s: %d rebuilds is over limit, route caching disabled\n",
1195 rt->u.dst.dev->name, num); 1194 rt->dst.dev->name, num);
1196 } 1195 }
1197 rt_emergency_hash_rebuild(net); 1196 rt_emergency_hash_rebuild(net);
1198 spin_unlock_bh(rt_hash_lock_addr(hash)); 1197 spin_unlock_bh(rt_hash_lock_addr(hash));
@@ -1207,7 +1206,7 @@ restart:
1207 route or unicast forwarding path. 1206 route or unicast forwarding path.
1208 */ 1207 */
1209 if (rt->rt_type == RTN_UNICAST || rt->fl.iif == 0) { 1208 if (rt->rt_type == RTN_UNICAST || rt->fl.iif == 0) {
1210 int err = arp_bind_neighbour(&rt->u.dst); 1209 int err = arp_bind_neighbour(&rt->dst);
1211 if (err) { 1210 if (err) {
1212 spin_unlock_bh(rt_hash_lock_addr(hash)); 1211 spin_unlock_bh(rt_hash_lock_addr(hash));
1213 1212
@@ -1238,14 +1237,14 @@ restart:
1238 } 1237 }
1239 } 1238 }
1240 1239
1241 rt->u.dst.rt_next = rt_hash_table[hash].chain; 1240 rt->dst.rt_next = rt_hash_table[hash].chain;
1242 1241
1243#if RT_CACHE_DEBUG >= 2 1242#if RT_CACHE_DEBUG >= 2
1244 if (rt->u.dst.rt_next) { 1243 if (rt->dst.rt_next) {
1245 struct rtable *trt; 1244 struct rtable *trt;
1246 printk(KERN_DEBUG "rt_cache @%02x: %pI4", 1245 printk(KERN_DEBUG "rt_cache @%02x: %pI4",
1247 hash, &rt->rt_dst); 1246 hash, &rt->rt_dst);
1248 for (trt = rt->u.dst.rt_next; trt; trt = trt->u.dst.rt_next) 1247 for (trt = rt->dst.rt_next; trt; trt = trt->dst.rt_next)
1249 printk(" . %pI4", &trt->rt_dst); 1248 printk(" . %pI4", &trt->rt_dst);
1250 printk("\n"); 1249 printk("\n");
1251 } 1250 }
@@ -1263,7 +1262,7 @@ skip_hashing:
1263 if (rp) 1262 if (rp)
1264 *rp = rt; 1263 *rp = rt;
1265 else 1264 else
1266 skb_dst_set(skb, &rt->u.dst); 1265 skb_dst_set(skb, &rt->dst);
1267 return 0; 1266 return 0;
1268} 1267}
1269 1268
@@ -1325,6 +1324,7 @@ void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
1325 1324
1326 ip_select_fb_ident(iph); 1325 ip_select_fb_ident(iph);
1327} 1326}
1327EXPORT_SYMBOL(__ip_select_ident);
1328 1328
1329static void rt_del(unsigned hash, struct rtable *rt) 1329static void rt_del(unsigned hash, struct rtable *rt)
1330{ 1330{
@@ -1335,20 +1335,21 @@ static void rt_del(unsigned hash, struct rtable *rt)
1335 ip_rt_put(rt); 1335 ip_rt_put(rt);
1336 while ((aux = *rthp) != NULL) { 1336 while ((aux = *rthp) != NULL) {
1337 if (aux == rt || rt_is_expired(aux)) { 1337 if (aux == rt || rt_is_expired(aux)) {
1338 *rthp = aux->u.dst.rt_next; 1338 *rthp = aux->dst.rt_next;
1339 rt_free(aux); 1339 rt_free(aux);
1340 continue; 1340 continue;
1341 } 1341 }
1342 rthp = &aux->u.dst.rt_next; 1342 rthp = &aux->dst.rt_next;
1343 } 1343 }
1344 spin_unlock_bh(rt_hash_lock_addr(hash)); 1344 spin_unlock_bh(rt_hash_lock_addr(hash));
1345} 1345}
1346 1346
1347/* called in rcu_read_lock() section */
1347void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw, 1348void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1348 __be32 saddr, struct net_device *dev) 1349 __be32 saddr, struct net_device *dev)
1349{ 1350{
1350 int i, k; 1351 int i, k;
1351 struct in_device *in_dev = in_dev_get(dev); 1352 struct in_device *in_dev = __in_dev_get_rcu(dev);
1352 struct rtable *rth, **rthp; 1353 struct rtable *rth, **rthp;
1353 __be32 skeys[2] = { saddr, 0 }; 1354 __be32 skeys[2] = { saddr, 0 };
1354 int ikeys[2] = { dev->ifindex, 0 }; 1355 int ikeys[2] = { dev->ifindex, 0 };
@@ -1384,7 +1385,6 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1384 1385
1385 rthp=&rt_hash_table[hash].chain; 1386 rthp=&rt_hash_table[hash].chain;
1386 1387
1387 rcu_read_lock();
1388 while ((rth = rcu_dereference(*rthp)) != NULL) { 1388 while ((rth = rcu_dereference(*rthp)) != NULL) {
1389 struct rtable *rt; 1389 struct rtable *rt;
1390 1390
@@ -1393,44 +1393,42 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1393 rth->fl.oif != ikeys[k] || 1393 rth->fl.oif != ikeys[k] ||
1394 rth->fl.iif != 0 || 1394 rth->fl.iif != 0 ||
1395 rt_is_expired(rth) || 1395 rt_is_expired(rth) ||
1396 !net_eq(dev_net(rth->u.dst.dev), net)) { 1396 !net_eq(dev_net(rth->dst.dev), net)) {
1397 rthp = &rth->u.dst.rt_next; 1397 rthp = &rth->dst.rt_next;
1398 continue; 1398 continue;
1399 } 1399 }
1400 1400
1401 if (rth->rt_dst != daddr || 1401 if (rth->rt_dst != daddr ||
1402 rth->rt_src != saddr || 1402 rth->rt_src != saddr ||
1403 rth->u.dst.error || 1403 rth->dst.error ||
1404 rth->rt_gateway != old_gw || 1404 rth->rt_gateway != old_gw ||
1405 rth->u.dst.dev != dev) 1405 rth->dst.dev != dev)
1406 break; 1406 break;
1407 1407
1408 dst_hold(&rth->u.dst); 1408 dst_hold(&rth->dst);
1409 rcu_read_unlock();
1410 1409
1411 rt = dst_alloc(&ipv4_dst_ops); 1410 rt = dst_alloc(&ipv4_dst_ops);
1412 if (rt == NULL) { 1411 if (rt == NULL) {
1413 ip_rt_put(rth); 1412 ip_rt_put(rth);
1414 in_dev_put(in_dev);
1415 return; 1413 return;
1416 } 1414 }
1417 1415
1418 /* Copy all the information. */ 1416 /* Copy all the information. */
1419 *rt = *rth; 1417 *rt = *rth;
1420 rt->u.dst.__use = 1; 1418 rt->dst.__use = 1;
1421 atomic_set(&rt->u.dst.__refcnt, 1); 1419 atomic_set(&rt->dst.__refcnt, 1);
1422 rt->u.dst.child = NULL; 1420 rt->dst.child = NULL;
1423 if (rt->u.dst.dev) 1421 if (rt->dst.dev)
1424 dev_hold(rt->u.dst.dev); 1422 dev_hold(rt->dst.dev);
1425 if (rt->idev) 1423 if (rt->idev)
1426 in_dev_hold(rt->idev); 1424 in_dev_hold(rt->idev);
1427 rt->u.dst.obsolete = -1; 1425 rt->dst.obsolete = -1;
1428 rt->u.dst.lastuse = jiffies; 1426 rt->dst.lastuse = jiffies;
1429 rt->u.dst.path = &rt->u.dst; 1427 rt->dst.path = &rt->dst;
1430 rt->u.dst.neighbour = NULL; 1428 rt->dst.neighbour = NULL;
1431 rt->u.dst.hh = NULL; 1429 rt->dst.hh = NULL;
1432#ifdef CONFIG_XFRM 1430#ifdef CONFIG_XFRM
1433 rt->u.dst.xfrm = NULL; 1431 rt->dst.xfrm = NULL;
1434#endif 1432#endif
1435 rt->rt_genid = rt_genid(net); 1433 rt->rt_genid = rt_genid(net);
1436 rt->rt_flags |= RTCF_REDIRECTED; 1434 rt->rt_flags |= RTCF_REDIRECTED;
@@ -1439,23 +1437,23 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1439 rt->rt_gateway = new_gw; 1437 rt->rt_gateway = new_gw;
1440 1438
1441 /* Redirect received -> path was valid */ 1439 /* Redirect received -> path was valid */
1442 dst_confirm(&rth->u.dst); 1440 dst_confirm(&rth->dst);
1443 1441
1444 if (rt->peer) 1442 if (rt->peer)
1445 atomic_inc(&rt->peer->refcnt); 1443 atomic_inc(&rt->peer->refcnt);
1446 1444
1447 if (arp_bind_neighbour(&rt->u.dst) || 1445 if (arp_bind_neighbour(&rt->dst) ||
1448 !(rt->u.dst.neighbour->nud_state & 1446 !(rt->dst.neighbour->nud_state &
1449 NUD_VALID)) { 1447 NUD_VALID)) {
1450 if (rt->u.dst.neighbour) 1448 if (rt->dst.neighbour)
1451 neigh_event_send(rt->u.dst.neighbour, NULL); 1449 neigh_event_send(rt->dst.neighbour, NULL);
1452 ip_rt_put(rth); 1450 ip_rt_put(rth);
1453 rt_drop(rt); 1451 rt_drop(rt);
1454 goto do_next; 1452 goto do_next;
1455 } 1453 }
1456 1454
1457 netevent.old = &rth->u.dst; 1455 netevent.old = &rth->dst;
1458 netevent.new = &rt->u.dst; 1456 netevent.new = &rt->dst;
1459 call_netevent_notifiers(NETEVENT_REDIRECT, 1457 call_netevent_notifiers(NETEVENT_REDIRECT,
1460 &netevent); 1458 &netevent);
1461 1459
@@ -1464,12 +1462,10 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1464 ip_rt_put(rt); 1462 ip_rt_put(rt);
1465 goto do_next; 1463 goto do_next;
1466 } 1464 }
1467 rcu_read_unlock();
1468 do_next: 1465 do_next:
1469 ; 1466 ;
1470 } 1467 }
1471 } 1468 }
1472 in_dev_put(in_dev);
1473 return; 1469 return;
1474 1470
1475reject_redirect: 1471reject_redirect:
@@ -1480,7 +1476,7 @@ reject_redirect:
1480 &old_gw, dev->name, &new_gw, 1476 &old_gw, dev->name, &new_gw,
1481 &saddr, &daddr); 1477 &saddr, &daddr);
1482#endif 1478#endif
1483 in_dev_put(in_dev); 1479 ;
1484} 1480}
1485 1481
1486static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst) 1482static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
@@ -1493,8 +1489,8 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
1493 ip_rt_put(rt); 1489 ip_rt_put(rt);
1494 ret = NULL; 1490 ret = NULL;
1495 } else if ((rt->rt_flags & RTCF_REDIRECTED) || 1491 } else if ((rt->rt_flags & RTCF_REDIRECTED) ||
1496 (rt->u.dst.expires && 1492 (rt->dst.expires &&
1497 time_after_eq(jiffies, rt->u.dst.expires))) { 1493 time_after_eq(jiffies, rt->dst.expires))) {
1498 unsigned hash = rt_hash(rt->fl.fl4_dst, rt->fl.fl4_src, 1494 unsigned hash = rt_hash(rt->fl.fl4_dst, rt->fl.fl4_src,
1499 rt->fl.oif, 1495 rt->fl.oif,
1500 rt_genid(dev_net(dst->dev))); 1496 rt_genid(dev_net(dst->dev)));
@@ -1532,7 +1528,7 @@ void ip_rt_send_redirect(struct sk_buff *skb)
1532 int log_martians; 1528 int log_martians;
1533 1529
1534 rcu_read_lock(); 1530 rcu_read_lock();
1535 in_dev = __in_dev_get_rcu(rt->u.dst.dev); 1531 in_dev = __in_dev_get_rcu(rt->dst.dev);
1536 if (!in_dev || !IN_DEV_TX_REDIRECTS(in_dev)) { 1532 if (!in_dev || !IN_DEV_TX_REDIRECTS(in_dev)) {
1537 rcu_read_unlock(); 1533 rcu_read_unlock();
1538 return; 1534 return;
@@ -1543,30 +1539,30 @@ void ip_rt_send_redirect(struct sk_buff *skb)
1543 /* No redirected packets during ip_rt_redirect_silence; 1539 /* No redirected packets during ip_rt_redirect_silence;
1544 * reset the algorithm. 1540 * reset the algorithm.
1545 */ 1541 */
1546 if (time_after(jiffies, rt->u.dst.rate_last + ip_rt_redirect_silence)) 1542 if (time_after(jiffies, rt->dst.rate_last + ip_rt_redirect_silence))
1547 rt->u.dst.rate_tokens = 0; 1543 rt->dst.rate_tokens = 0;
1548 1544
1549 /* Too many ignored redirects; do not send anything 1545 /* Too many ignored redirects; do not send anything
1550 * set u.dst.rate_last to the last seen redirected packet. 1546 * set dst.rate_last to the last seen redirected packet.
1551 */ 1547 */
1552 if (rt->u.dst.rate_tokens >= ip_rt_redirect_number) { 1548 if (rt->dst.rate_tokens >= ip_rt_redirect_number) {
1553 rt->u.dst.rate_last = jiffies; 1549 rt->dst.rate_last = jiffies;
1554 return; 1550 return;
1555 } 1551 }
1556 1552
1557 /* Check for load limit; set rate_last to the latest sent 1553 /* Check for load limit; set rate_last to the latest sent
1558 * redirect. 1554 * redirect.
1559 */ 1555 */
1560 if (rt->u.dst.rate_tokens == 0 || 1556 if (rt->dst.rate_tokens == 0 ||
1561 time_after(jiffies, 1557 time_after(jiffies,
1562 (rt->u.dst.rate_last + 1558 (rt->dst.rate_last +
1563 (ip_rt_redirect_load << rt->u.dst.rate_tokens)))) { 1559 (ip_rt_redirect_load << rt->dst.rate_tokens)))) {
1564 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway); 1560 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway);
1565 rt->u.dst.rate_last = jiffies; 1561 rt->dst.rate_last = jiffies;
1566 ++rt->u.dst.rate_tokens; 1562 ++rt->dst.rate_tokens;
1567#ifdef CONFIG_IP_ROUTE_VERBOSE 1563#ifdef CONFIG_IP_ROUTE_VERBOSE
1568 if (log_martians && 1564 if (log_martians &&
1569 rt->u.dst.rate_tokens == ip_rt_redirect_number && 1565 rt->dst.rate_tokens == ip_rt_redirect_number &&
1570 net_ratelimit()) 1566 net_ratelimit())
1571 printk(KERN_WARNING "host %pI4/if%d ignores redirects for %pI4 to %pI4.\n", 1567 printk(KERN_WARNING "host %pI4/if%d ignores redirects for %pI4 to %pI4.\n",
1572 &rt->rt_src, rt->rt_iif, 1568 &rt->rt_src, rt->rt_iif,
@@ -1581,7 +1577,7 @@ static int ip_error(struct sk_buff *skb)
1581 unsigned long now; 1577 unsigned long now;
1582 int code; 1578 int code;
1583 1579
1584 switch (rt->u.dst.error) { 1580 switch (rt->dst.error) {
1585 case EINVAL: 1581 case EINVAL:
1586 default: 1582 default:
1587 goto out; 1583 goto out;
@@ -1590,7 +1586,7 @@ static int ip_error(struct sk_buff *skb)
1590 break; 1586 break;
1591 case ENETUNREACH: 1587 case ENETUNREACH:
1592 code = ICMP_NET_UNREACH; 1588 code = ICMP_NET_UNREACH;
1593 IP_INC_STATS_BH(dev_net(rt->u.dst.dev), 1589 IP_INC_STATS_BH(dev_net(rt->dst.dev),
1594 IPSTATS_MIB_INNOROUTES); 1590 IPSTATS_MIB_INNOROUTES);
1595 break; 1591 break;
1596 case EACCES: 1592 case EACCES:
@@ -1599,12 +1595,12 @@ static int ip_error(struct sk_buff *skb)
1599 } 1595 }
1600 1596
1601 now = jiffies; 1597 now = jiffies;
1602 rt->u.dst.rate_tokens += now - rt->u.dst.rate_last; 1598 rt->dst.rate_tokens += now - rt->dst.rate_last;
1603 if (rt->u.dst.rate_tokens > ip_rt_error_burst) 1599 if (rt->dst.rate_tokens > ip_rt_error_burst)
1604 rt->u.dst.rate_tokens = ip_rt_error_burst; 1600 rt->dst.rate_tokens = ip_rt_error_burst;
1605 rt->u.dst.rate_last = now; 1601 rt->dst.rate_last = now;
1606 if (rt->u.dst.rate_tokens >= ip_rt_error_cost) { 1602 if (rt->dst.rate_tokens >= ip_rt_error_cost) {
1607 rt->u.dst.rate_tokens -= ip_rt_error_cost; 1603 rt->dst.rate_tokens -= ip_rt_error_cost;
1608 icmp_send(skb, ICMP_DEST_UNREACH, code, 0); 1604 icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
1609 } 1605 }
1610 1606
@@ -1649,7 +1645,7 @@ unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph,
1649 1645
1650 rcu_read_lock(); 1646 rcu_read_lock();
1651 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth; 1647 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
1652 rth = rcu_dereference(rth->u.dst.rt_next)) { 1648 rth = rcu_dereference(rth->dst.rt_next)) {
1653 unsigned short mtu = new_mtu; 1649 unsigned short mtu = new_mtu;
1654 1650
1655 if (rth->fl.fl4_dst != daddr || 1651 if (rth->fl.fl4_dst != daddr ||
@@ -1658,8 +1654,8 @@ unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph,
1658 rth->rt_src != iph->saddr || 1654 rth->rt_src != iph->saddr ||
1659 rth->fl.oif != ikeys[k] || 1655 rth->fl.oif != ikeys[k] ||
1660 rth->fl.iif != 0 || 1656 rth->fl.iif != 0 ||
1661 dst_metric_locked(&rth->u.dst, RTAX_MTU) || 1657 dst_metric_locked(&rth->dst, RTAX_MTU) ||
1662 !net_eq(dev_net(rth->u.dst.dev), net) || 1658 !net_eq(dev_net(rth->dst.dev), net) ||
1663 rt_is_expired(rth)) 1659 rt_is_expired(rth))
1664 continue; 1660 continue;
1665 1661
@@ -1667,22 +1663,22 @@ unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph,
1667 1663
1668 /* BSD 4.2 compatibility hack :-( */ 1664 /* BSD 4.2 compatibility hack :-( */
1669 if (mtu == 0 && 1665 if (mtu == 0 &&
1670 old_mtu >= dst_mtu(&rth->u.dst) && 1666 old_mtu >= dst_mtu(&rth->dst) &&
1671 old_mtu >= 68 + (iph->ihl << 2)) 1667 old_mtu >= 68 + (iph->ihl << 2))
1672 old_mtu -= iph->ihl << 2; 1668 old_mtu -= iph->ihl << 2;
1673 1669
1674 mtu = guess_mtu(old_mtu); 1670 mtu = guess_mtu(old_mtu);
1675 } 1671 }
1676 if (mtu <= dst_mtu(&rth->u.dst)) { 1672 if (mtu <= dst_mtu(&rth->dst)) {
1677 if (mtu < dst_mtu(&rth->u.dst)) { 1673 if (mtu < dst_mtu(&rth->dst)) {
1678 dst_confirm(&rth->u.dst); 1674 dst_confirm(&rth->dst);
1679 if (mtu < ip_rt_min_pmtu) { 1675 if (mtu < ip_rt_min_pmtu) {
1680 mtu = ip_rt_min_pmtu; 1676 mtu = ip_rt_min_pmtu;
1681 rth->u.dst.metrics[RTAX_LOCK-1] |= 1677 rth->dst.metrics[RTAX_LOCK-1] |=
1682 (1 << RTAX_MTU); 1678 (1 << RTAX_MTU);
1683 } 1679 }
1684 rth->u.dst.metrics[RTAX_MTU-1] = mtu; 1680 rth->dst.metrics[RTAX_MTU-1] = mtu;
1685 dst_set_expires(&rth->u.dst, 1681 dst_set_expires(&rth->dst,
1686 ip_rt_mtu_expires); 1682 ip_rt_mtu_expires);
1687 } 1683 }
1688 est_mtu = mtu; 1684 est_mtu = mtu;
@@ -1755,7 +1751,7 @@ static void ipv4_link_failure(struct sk_buff *skb)
1755 1751
1756 rt = skb_rtable(skb); 1752 rt = skb_rtable(skb);
1757 if (rt) 1753 if (rt)
1758 dst_set_expires(&rt->u.dst, 0); 1754 dst_set_expires(&rt->dst, 0);
1759} 1755}
1760 1756
1761static int ip_rt_bug(struct sk_buff *skb) 1757static int ip_rt_bug(struct sk_buff *skb)
@@ -1783,11 +1779,11 @@ void ip_rt_get_source(u8 *addr, struct rtable *rt)
1783 1779
1784 if (rt->fl.iif == 0) 1780 if (rt->fl.iif == 0)
1785 src = rt->rt_src; 1781 src = rt->rt_src;
1786 else if (fib_lookup(dev_net(rt->u.dst.dev), &rt->fl, &res) == 0) { 1782 else if (fib_lookup(dev_net(rt->dst.dev), &rt->fl, &res) == 0) {
1787 src = FIB_RES_PREFSRC(res); 1783 src = FIB_RES_PREFSRC(res);
1788 fib_res_put(&res); 1784 fib_res_put(&res);
1789 } else 1785 } else
1790 src = inet_select_addr(rt->u.dst.dev, rt->rt_gateway, 1786 src = inet_select_addr(rt->dst.dev, rt->rt_gateway,
1791 RT_SCOPE_UNIVERSE); 1787 RT_SCOPE_UNIVERSE);
1792 memcpy(addr, &src, 4); 1788 memcpy(addr, &src, 4);
1793} 1789}
@@ -1795,10 +1791,10 @@ void ip_rt_get_source(u8 *addr, struct rtable *rt)
1795#ifdef CONFIG_NET_CLS_ROUTE 1791#ifdef CONFIG_NET_CLS_ROUTE
1796static void set_class_tag(struct rtable *rt, u32 tag) 1792static void set_class_tag(struct rtable *rt, u32 tag)
1797{ 1793{
1798 if (!(rt->u.dst.tclassid & 0xFFFF)) 1794 if (!(rt->dst.tclassid & 0xFFFF))
1799 rt->u.dst.tclassid |= tag & 0xFFFF; 1795 rt->dst.tclassid |= tag & 0xFFFF;
1800 if (!(rt->u.dst.tclassid & 0xFFFF0000)) 1796 if (!(rt->dst.tclassid & 0xFFFF0000))
1801 rt->u.dst.tclassid |= tag & 0xFFFF0000; 1797 rt->dst.tclassid |= tag & 0xFFFF0000;
1802} 1798}
1803#endif 1799#endif
1804 1800
@@ -1810,30 +1806,30 @@ static void rt_set_nexthop(struct rtable *rt, struct fib_result *res, u32 itag)
1810 if (FIB_RES_GW(*res) && 1806 if (FIB_RES_GW(*res) &&
1811 FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK) 1807 FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)
1812 rt->rt_gateway = FIB_RES_GW(*res); 1808 rt->rt_gateway = FIB_RES_GW(*res);
1813 memcpy(rt->u.dst.metrics, fi->fib_metrics, 1809 memcpy(rt->dst.metrics, fi->fib_metrics,
1814 sizeof(rt->u.dst.metrics)); 1810 sizeof(rt->dst.metrics));
1815 if (fi->fib_mtu == 0) { 1811 if (fi->fib_mtu == 0) {
1816 rt->u.dst.metrics[RTAX_MTU-1] = rt->u.dst.dev->mtu; 1812 rt->dst.metrics[RTAX_MTU-1] = rt->dst.dev->mtu;
1817 if (dst_metric_locked(&rt->u.dst, RTAX_MTU) && 1813 if (dst_metric_locked(&rt->dst, RTAX_MTU) &&
1818 rt->rt_gateway != rt->rt_dst && 1814 rt->rt_gateway != rt->rt_dst &&
1819 rt->u.dst.dev->mtu > 576) 1815 rt->dst.dev->mtu > 576)
1820 rt->u.dst.metrics[RTAX_MTU-1] = 576; 1816 rt->dst.metrics[RTAX_MTU-1] = 576;
1821 } 1817 }
1822#ifdef CONFIG_NET_CLS_ROUTE 1818#ifdef CONFIG_NET_CLS_ROUTE
1823 rt->u.dst.tclassid = FIB_RES_NH(*res).nh_tclassid; 1819 rt->dst.tclassid = FIB_RES_NH(*res).nh_tclassid;
1824#endif 1820#endif
1825 } else 1821 } else
1826 rt->u.dst.metrics[RTAX_MTU-1]= rt->u.dst.dev->mtu; 1822 rt->dst.metrics[RTAX_MTU-1]= rt->dst.dev->mtu;
1827 1823
1828 if (dst_metric(&rt->u.dst, RTAX_HOPLIMIT) == 0) 1824 if (dst_metric(&rt->dst, RTAX_HOPLIMIT) == 0)
1829 rt->u.dst.metrics[RTAX_HOPLIMIT-1] = sysctl_ip_default_ttl; 1825 rt->dst.metrics[RTAX_HOPLIMIT-1] = sysctl_ip_default_ttl;
1830 if (dst_mtu(&rt->u.dst) > IP_MAX_MTU) 1826 if (dst_mtu(&rt->dst) > IP_MAX_MTU)
1831 rt->u.dst.metrics[RTAX_MTU-1] = IP_MAX_MTU; 1827 rt->dst.metrics[RTAX_MTU-1] = IP_MAX_MTU;
1832 if (dst_metric(&rt->u.dst, RTAX_ADVMSS) == 0) 1828 if (dst_metric(&rt->dst, RTAX_ADVMSS) == 0)
1833 rt->u.dst.metrics[RTAX_ADVMSS-1] = max_t(unsigned int, rt->u.dst.dev->mtu - 40, 1829 rt->dst.metrics[RTAX_ADVMSS-1] = max_t(unsigned int, rt->dst.dev->mtu - 40,
1834 ip_rt_min_advmss); 1830 ip_rt_min_advmss);
1835 if (dst_metric(&rt->u.dst, RTAX_ADVMSS) > 65535 - 40) 1831 if (dst_metric(&rt->dst, RTAX_ADVMSS) > 65535 - 40)
1836 rt->u.dst.metrics[RTAX_ADVMSS-1] = 65535 - 40; 1832 rt->dst.metrics[RTAX_ADVMSS-1] = 65535 - 40;
1837 1833
1838#ifdef CONFIG_NET_CLS_ROUTE 1834#ifdef CONFIG_NET_CLS_ROUTE
1839#ifdef CONFIG_IP_MULTIPLE_TABLES 1835#ifdef CONFIG_IP_MULTIPLE_TABLES
@@ -1844,14 +1840,16 @@ static void rt_set_nexthop(struct rtable *rt, struct fib_result *res, u32 itag)
1844 rt->rt_type = res->type; 1840 rt->rt_type = res->type;
1845} 1841}
1846 1842
1843/* called in rcu_read_lock() section */
1847static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, 1844static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1848 u8 tos, struct net_device *dev, int our) 1845 u8 tos, struct net_device *dev, int our)
1849{ 1846{
1850 unsigned hash; 1847 unsigned int hash;
1851 struct rtable *rth; 1848 struct rtable *rth;
1852 __be32 spec_dst; 1849 __be32 spec_dst;
1853 struct in_device *in_dev = in_dev_get(dev); 1850 struct in_device *in_dev = __in_dev_get_rcu(dev);
1854 u32 itag = 0; 1851 u32 itag = 0;
1852 int err;
1855 1853
1856 /* Primary sanity checks. */ 1854 /* Primary sanity checks. */
1857 1855
@@ -1866,21 +1864,23 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1866 if (!ipv4_is_local_multicast(daddr)) 1864 if (!ipv4_is_local_multicast(daddr))
1867 goto e_inval; 1865 goto e_inval;
1868 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK); 1866 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
1869 } else if (fib_validate_source(saddr, 0, tos, 0, 1867 } else {
1870 dev, &spec_dst, &itag, 0) < 0) 1868 err = fib_validate_source(saddr, 0, tos, 0, dev, &spec_dst,
1871 goto e_inval; 1869 &itag, 0);
1872 1870 if (err < 0)
1871 goto e_err;
1872 }
1873 rth = dst_alloc(&ipv4_dst_ops); 1873 rth = dst_alloc(&ipv4_dst_ops);
1874 if (!rth) 1874 if (!rth)
1875 goto e_nobufs; 1875 goto e_nobufs;
1876 1876
1877 rth->u.dst.output = ip_rt_bug; 1877 rth->dst.output = ip_rt_bug;
1878 rth->u.dst.obsolete = -1; 1878 rth->dst.obsolete = -1;
1879 1879
1880 atomic_set(&rth->u.dst.__refcnt, 1); 1880 atomic_set(&rth->dst.__refcnt, 1);
1881 rth->u.dst.flags= DST_HOST; 1881 rth->dst.flags= DST_HOST;
1882 if (IN_DEV_CONF_GET(in_dev, NOPOLICY)) 1882 if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
1883 rth->u.dst.flags |= DST_NOPOLICY; 1883 rth->dst.flags |= DST_NOPOLICY;
1884 rth->fl.fl4_dst = daddr; 1884 rth->fl.fl4_dst = daddr;
1885 rth->rt_dst = daddr; 1885 rth->rt_dst = daddr;
1886 rth->fl.fl4_tos = tos; 1886 rth->fl.fl4_tos = tos;
@@ -1888,13 +1888,13 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1888 rth->fl.fl4_src = saddr; 1888 rth->fl.fl4_src = saddr;
1889 rth->rt_src = saddr; 1889 rth->rt_src = saddr;
1890#ifdef CONFIG_NET_CLS_ROUTE 1890#ifdef CONFIG_NET_CLS_ROUTE
1891 rth->u.dst.tclassid = itag; 1891 rth->dst.tclassid = itag;
1892#endif 1892#endif
1893 rth->rt_iif = 1893 rth->rt_iif =
1894 rth->fl.iif = dev->ifindex; 1894 rth->fl.iif = dev->ifindex;
1895 rth->u.dst.dev = init_net.loopback_dev; 1895 rth->dst.dev = init_net.loopback_dev;
1896 dev_hold(rth->u.dst.dev); 1896 dev_hold(rth->dst.dev);
1897 rth->idev = in_dev_get(rth->u.dst.dev); 1897 rth->idev = in_dev_get(rth->dst.dev);
1898 rth->fl.oif = 0; 1898 rth->fl.oif = 0;
1899 rth->rt_gateway = daddr; 1899 rth->rt_gateway = daddr;
1900 rth->rt_spec_dst= spec_dst; 1900 rth->rt_spec_dst= spec_dst;
@@ -1902,27 +1902,25 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1902 rth->rt_flags = RTCF_MULTICAST; 1902 rth->rt_flags = RTCF_MULTICAST;
1903 rth->rt_type = RTN_MULTICAST; 1903 rth->rt_type = RTN_MULTICAST;
1904 if (our) { 1904 if (our) {
1905 rth->u.dst.input= ip_local_deliver; 1905 rth->dst.input= ip_local_deliver;
1906 rth->rt_flags |= RTCF_LOCAL; 1906 rth->rt_flags |= RTCF_LOCAL;
1907 } 1907 }
1908 1908
1909#ifdef CONFIG_IP_MROUTE 1909#ifdef CONFIG_IP_MROUTE
1910 if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev)) 1910 if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev))
1911 rth->u.dst.input = ip_mr_input; 1911 rth->dst.input = ip_mr_input;
1912#endif 1912#endif
1913 RT_CACHE_STAT_INC(in_slow_mc); 1913 RT_CACHE_STAT_INC(in_slow_mc);
1914 1914
1915 in_dev_put(in_dev);
1916 hash = rt_hash(daddr, saddr, dev->ifindex, rt_genid(dev_net(dev))); 1915 hash = rt_hash(daddr, saddr, dev->ifindex, rt_genid(dev_net(dev)));
1917 return rt_intern_hash(hash, rth, NULL, skb, dev->ifindex); 1916 return rt_intern_hash(hash, rth, NULL, skb, dev->ifindex);
1918 1917
1919e_nobufs: 1918e_nobufs:
1920 in_dev_put(in_dev);
1921 return -ENOBUFS; 1919 return -ENOBUFS;
1922
1923e_inval: 1920e_inval:
1924 in_dev_put(in_dev);
1925 return -EINVAL; 1921 return -EINVAL;
1922e_err:
1923 return err;
1926} 1924}
1927 1925
1928 1926
@@ -1956,22 +1954,22 @@ static void ip_handle_martian_source(struct net_device *dev,
1956#endif 1954#endif
1957} 1955}
1958 1956
1957/* called in rcu_read_lock() section */
1959static int __mkroute_input(struct sk_buff *skb, 1958static int __mkroute_input(struct sk_buff *skb,
1960 struct fib_result *res, 1959 struct fib_result *res,
1961 struct in_device *in_dev, 1960 struct in_device *in_dev,
1962 __be32 daddr, __be32 saddr, u32 tos, 1961 __be32 daddr, __be32 saddr, u32 tos,
1963 struct rtable **result) 1962 struct rtable **result)
1964{ 1963{
1965
1966 struct rtable *rth; 1964 struct rtable *rth;
1967 int err; 1965 int err;
1968 struct in_device *out_dev; 1966 struct in_device *out_dev;
1969 unsigned flags = 0; 1967 unsigned int flags = 0;
1970 __be32 spec_dst; 1968 __be32 spec_dst;
1971 u32 itag; 1969 u32 itag;
1972 1970
1973 /* get a working reference to the output device */ 1971 /* get a working reference to the output device */
1974 out_dev = in_dev_get(FIB_RES_DEV(*res)); 1972 out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res));
1975 if (out_dev == NULL) { 1973 if (out_dev == NULL) {
1976 if (net_ratelimit()) 1974 if (net_ratelimit())
1977 printk(KERN_CRIT "Bug in ip_route_input" \ 1975 printk(KERN_CRIT "Bug in ip_route_input" \
@@ -1986,7 +1984,6 @@ static int __mkroute_input(struct sk_buff *skb,
1986 ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr, 1984 ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
1987 saddr); 1985 saddr);
1988 1986
1989 err = -EINVAL;
1990 goto cleanup; 1987 goto cleanup;
1991 } 1988 }
1992 1989
@@ -2020,12 +2017,12 @@ static int __mkroute_input(struct sk_buff *skb,
2020 goto cleanup; 2017 goto cleanup;
2021 } 2018 }
2022 2019
2023 atomic_set(&rth->u.dst.__refcnt, 1); 2020 atomic_set(&rth->dst.__refcnt, 1);
2024 rth->u.dst.flags= DST_HOST; 2021 rth->dst.flags= DST_HOST;
2025 if (IN_DEV_CONF_GET(in_dev, NOPOLICY)) 2022 if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
2026 rth->u.dst.flags |= DST_NOPOLICY; 2023 rth->dst.flags |= DST_NOPOLICY;
2027 if (IN_DEV_CONF_GET(out_dev, NOXFRM)) 2024 if (IN_DEV_CONF_GET(out_dev, NOXFRM))
2028 rth->u.dst.flags |= DST_NOXFRM; 2025 rth->dst.flags |= DST_NOXFRM;
2029 rth->fl.fl4_dst = daddr; 2026 rth->fl.fl4_dst = daddr;
2030 rth->rt_dst = daddr; 2027 rth->rt_dst = daddr;
2031 rth->fl.fl4_tos = tos; 2028 rth->fl.fl4_tos = tos;
@@ -2035,16 +2032,16 @@ static int __mkroute_input(struct sk_buff *skb,
2035 rth->rt_gateway = daddr; 2032 rth->rt_gateway = daddr;
2036 rth->rt_iif = 2033 rth->rt_iif =
2037 rth->fl.iif = in_dev->dev->ifindex; 2034 rth->fl.iif = in_dev->dev->ifindex;
2038 rth->u.dst.dev = (out_dev)->dev; 2035 rth->dst.dev = (out_dev)->dev;
2039 dev_hold(rth->u.dst.dev); 2036 dev_hold(rth->dst.dev);
2040 rth->idev = in_dev_get(rth->u.dst.dev); 2037 rth->idev = in_dev_get(rth->dst.dev);
2041 rth->fl.oif = 0; 2038 rth->fl.oif = 0;
2042 rth->rt_spec_dst= spec_dst; 2039 rth->rt_spec_dst= spec_dst;
2043 2040
2044 rth->u.dst.obsolete = -1; 2041 rth->dst.obsolete = -1;
2045 rth->u.dst.input = ip_forward; 2042 rth->dst.input = ip_forward;
2046 rth->u.dst.output = ip_output; 2043 rth->dst.output = ip_output;
2047 rth->rt_genid = rt_genid(dev_net(rth->u.dst.dev)); 2044 rth->rt_genid = rt_genid(dev_net(rth->dst.dev));
2048 2045
2049 rt_set_nexthop(rth, res, itag); 2046 rt_set_nexthop(rth, res, itag);
2050 2047
@@ -2053,8 +2050,6 @@ static int __mkroute_input(struct sk_buff *skb,
2053 *result = rth; 2050 *result = rth;
2054 err = 0; 2051 err = 0;
2055 cleanup: 2052 cleanup:
2056 /* release the working reference to the output device */
2057 in_dev_put(out_dev);
2058 return err; 2053 return err;
2059} 2054}
2060 2055
@@ -2080,7 +2075,7 @@ static int ip_mkroute_input(struct sk_buff *skb,
2080 2075
2081 /* put it into the cache */ 2076 /* put it into the cache */
2082 hash = rt_hash(daddr, saddr, fl->iif, 2077 hash = rt_hash(daddr, saddr, fl->iif,
2083 rt_genid(dev_net(rth->u.dst.dev))); 2078 rt_genid(dev_net(rth->dst.dev)));
2084 return rt_intern_hash(hash, rth, NULL, skb, fl->iif); 2079 return rt_intern_hash(hash, rth, NULL, skb, fl->iif);
2085} 2080}
2086 2081
@@ -2098,7 +2093,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2098 u8 tos, struct net_device *dev) 2093 u8 tos, struct net_device *dev)
2099{ 2094{
2100 struct fib_result res; 2095 struct fib_result res;
2101 struct in_device *in_dev = in_dev_get(dev); 2096 struct in_device *in_dev = __in_dev_get_rcu(dev);
2102 struct flowi fl = { .nl_u = { .ip4_u = 2097 struct flowi fl = { .nl_u = { .ip4_u =
2103 { .daddr = daddr, 2098 { .daddr = daddr,
2104 .saddr = saddr, 2099 .saddr = saddr,
@@ -2158,13 +2153,12 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2158 goto brd_input; 2153 goto brd_input;
2159 2154
2160 if (res.type == RTN_LOCAL) { 2155 if (res.type == RTN_LOCAL) {
2161 int result; 2156 err = fib_validate_source(saddr, daddr, tos,
2162 result = fib_validate_source(saddr, daddr, tos,
2163 net->loopback_dev->ifindex, 2157 net->loopback_dev->ifindex,
2164 dev, &spec_dst, &itag, skb->mark); 2158 dev, &spec_dst, &itag, skb->mark);
2165 if (result < 0) 2159 if (err < 0)
2166 goto martian_source; 2160 goto martian_source_keep_err;
2167 if (result) 2161 if (err)
2168 flags |= RTCF_DIRECTSRC; 2162 flags |= RTCF_DIRECTSRC;
2169 spec_dst = daddr; 2163 spec_dst = daddr;
2170 goto local_input; 2164 goto local_input;
@@ -2177,7 +2171,6 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2177 2171
2178 err = ip_mkroute_input(skb, &res, &fl, in_dev, daddr, saddr, tos); 2172 err = ip_mkroute_input(skb, &res, &fl, in_dev, daddr, saddr, tos);
2179done: 2173done:
2180 in_dev_put(in_dev);
2181 if (free_res) 2174 if (free_res)
2182 fib_res_put(&res); 2175 fib_res_put(&res);
2183out: return err; 2176out: return err;
@@ -2192,7 +2185,7 @@ brd_input:
2192 err = fib_validate_source(saddr, 0, tos, 0, dev, &spec_dst, 2185 err = fib_validate_source(saddr, 0, tos, 0, dev, &spec_dst,
2193 &itag, skb->mark); 2186 &itag, skb->mark);
2194 if (err < 0) 2187 if (err < 0)
2195 goto martian_source; 2188 goto martian_source_keep_err;
2196 if (err) 2189 if (err)
2197 flags |= RTCF_DIRECTSRC; 2190 flags |= RTCF_DIRECTSRC;
2198 } 2191 }
@@ -2205,14 +2198,14 @@ local_input:
2205 if (!rth) 2198 if (!rth)
2206 goto e_nobufs; 2199 goto e_nobufs;
2207 2200
2208 rth->u.dst.output= ip_rt_bug; 2201 rth->dst.output= ip_rt_bug;
2209 rth->u.dst.obsolete = -1; 2202 rth->dst.obsolete = -1;
2210 rth->rt_genid = rt_genid(net); 2203 rth->rt_genid = rt_genid(net);
2211 2204
2212 atomic_set(&rth->u.dst.__refcnt, 1); 2205 atomic_set(&rth->dst.__refcnt, 1);
2213 rth->u.dst.flags= DST_HOST; 2206 rth->dst.flags= DST_HOST;
2214 if (IN_DEV_CONF_GET(in_dev, NOPOLICY)) 2207 if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
2215 rth->u.dst.flags |= DST_NOPOLICY; 2208 rth->dst.flags |= DST_NOPOLICY;
2216 rth->fl.fl4_dst = daddr; 2209 rth->fl.fl4_dst = daddr;
2217 rth->rt_dst = daddr; 2210 rth->rt_dst = daddr;
2218 rth->fl.fl4_tos = tos; 2211 rth->fl.fl4_tos = tos;
@@ -2220,20 +2213,20 @@ local_input:
2220 rth->fl.fl4_src = saddr; 2213 rth->fl.fl4_src = saddr;
2221 rth->rt_src = saddr; 2214 rth->rt_src = saddr;
2222#ifdef CONFIG_NET_CLS_ROUTE 2215#ifdef CONFIG_NET_CLS_ROUTE
2223 rth->u.dst.tclassid = itag; 2216 rth->dst.tclassid = itag;
2224#endif 2217#endif
2225 rth->rt_iif = 2218 rth->rt_iif =
2226 rth->fl.iif = dev->ifindex; 2219 rth->fl.iif = dev->ifindex;
2227 rth->u.dst.dev = net->loopback_dev; 2220 rth->dst.dev = net->loopback_dev;
2228 dev_hold(rth->u.dst.dev); 2221 dev_hold(rth->dst.dev);
2229 rth->idev = in_dev_get(rth->u.dst.dev); 2222 rth->idev = in_dev_get(rth->dst.dev);
2230 rth->rt_gateway = daddr; 2223 rth->rt_gateway = daddr;
2231 rth->rt_spec_dst= spec_dst; 2224 rth->rt_spec_dst= spec_dst;
2232 rth->u.dst.input= ip_local_deliver; 2225 rth->dst.input= ip_local_deliver;
2233 rth->rt_flags = flags|RTCF_LOCAL; 2226 rth->rt_flags = flags|RTCF_LOCAL;
2234 if (res.type == RTN_UNREACHABLE) { 2227 if (res.type == RTN_UNREACHABLE) {
2235 rth->u.dst.input= ip_error; 2228 rth->dst.input= ip_error;
2236 rth->u.dst.error= -err; 2229 rth->dst.error= -err;
2237 rth->rt_flags &= ~RTCF_LOCAL; 2230 rth->rt_flags &= ~RTCF_LOCAL;
2238 } 2231 }
2239 rth->rt_type = res.type; 2232 rth->rt_type = res.type;
@@ -2273,8 +2266,10 @@ e_nobufs:
2273 goto done; 2266 goto done;
2274 2267
2275martian_source: 2268martian_source:
2269 err = -EINVAL;
2270martian_source_keep_err:
2276 ip_handle_martian_source(dev, in_dev, skb, daddr, saddr); 2271 ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
2277 goto e_inval; 2272 goto done;
2278} 2273}
2279 2274
2280int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr, 2275int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr,
@@ -2284,32 +2279,34 @@ int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2284 unsigned hash; 2279 unsigned hash;
2285 int iif = dev->ifindex; 2280 int iif = dev->ifindex;
2286 struct net *net; 2281 struct net *net;
2282 int res;
2287 2283
2288 net = dev_net(dev); 2284 net = dev_net(dev);
2289 2285
2286 rcu_read_lock();
2287
2290 if (!rt_caching(net)) 2288 if (!rt_caching(net))
2291 goto skip_cache; 2289 goto skip_cache;
2292 2290
2293 tos &= IPTOS_RT_MASK; 2291 tos &= IPTOS_RT_MASK;
2294 hash = rt_hash(daddr, saddr, iif, rt_genid(net)); 2292 hash = rt_hash(daddr, saddr, iif, rt_genid(net));
2295 2293
2296 rcu_read_lock();
2297 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth; 2294 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
2298 rth = rcu_dereference(rth->u.dst.rt_next)) { 2295 rth = rcu_dereference(rth->dst.rt_next)) {
2299 if ((((__force u32)rth->fl.fl4_dst ^ (__force u32)daddr) | 2296 if ((((__force u32)rth->fl.fl4_dst ^ (__force u32)daddr) |
2300 ((__force u32)rth->fl.fl4_src ^ (__force u32)saddr) | 2297 ((__force u32)rth->fl.fl4_src ^ (__force u32)saddr) |
2301 (rth->fl.iif ^ iif) | 2298 (rth->fl.iif ^ iif) |
2302 rth->fl.oif | 2299 rth->fl.oif |
2303 (rth->fl.fl4_tos ^ tos)) == 0 && 2300 (rth->fl.fl4_tos ^ tos)) == 0 &&
2304 rth->fl.mark == skb->mark && 2301 rth->fl.mark == skb->mark &&
2305 net_eq(dev_net(rth->u.dst.dev), net) && 2302 net_eq(dev_net(rth->dst.dev), net) &&
2306 !rt_is_expired(rth)) { 2303 !rt_is_expired(rth)) {
2307 if (noref) { 2304 if (noref) {
2308 dst_use_noref(&rth->u.dst, jiffies); 2305 dst_use_noref(&rth->dst, jiffies);
2309 skb_dst_set_noref(skb, &rth->u.dst); 2306 skb_dst_set_noref(skb, &rth->dst);
2310 } else { 2307 } else {
2311 dst_use(&rth->u.dst, jiffies); 2308 dst_use(&rth->dst, jiffies);
2312 skb_dst_set(skb, &rth->u.dst); 2309 skb_dst_set(skb, &rth->dst);
2313 } 2310 }
2314 RT_CACHE_STAT_INC(in_hit); 2311 RT_CACHE_STAT_INC(in_hit);
2315 rcu_read_unlock(); 2312 rcu_read_unlock();
@@ -2317,7 +2314,6 @@ int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2317 } 2314 }
2318 RT_CACHE_STAT_INC(in_hlist_search); 2315 RT_CACHE_STAT_INC(in_hlist_search);
2319 } 2316 }
2320 rcu_read_unlock();
2321 2317
2322skip_cache: 2318skip_cache:
2323 /* Multicast recognition logic is moved from route cache to here. 2319 /* Multicast recognition logic is moved from route cache to here.
@@ -2332,12 +2328,11 @@ skip_cache:
2332 route cache entry is created eventually. 2328 route cache entry is created eventually.
2333 */ 2329 */
2334 if (ipv4_is_multicast(daddr)) { 2330 if (ipv4_is_multicast(daddr)) {
2335 struct in_device *in_dev; 2331 struct in_device *in_dev = __in_dev_get_rcu(dev);
2336 2332
2337 rcu_read_lock(); 2333 if (in_dev) {
2338 if ((in_dev = __in_dev_get_rcu(dev)) != NULL) {
2339 int our = ip_check_mc(in_dev, daddr, saddr, 2334 int our = ip_check_mc(in_dev, daddr, saddr,
2340 ip_hdr(skb)->protocol); 2335 ip_hdr(skb)->protocol);
2341 if (our 2336 if (our
2342#ifdef CONFIG_IP_MROUTE 2337#ifdef CONFIG_IP_MROUTE
2343 || 2338 ||
@@ -2345,15 +2340,18 @@ skip_cache:
2345 IN_DEV_MFORWARD(in_dev)) 2340 IN_DEV_MFORWARD(in_dev))
2346#endif 2341#endif
2347 ) { 2342 ) {
2343 int res = ip_route_input_mc(skb, daddr, saddr,
2344 tos, dev, our);
2348 rcu_read_unlock(); 2345 rcu_read_unlock();
2349 return ip_route_input_mc(skb, daddr, saddr, 2346 return res;
2350 tos, dev, our);
2351 } 2347 }
2352 } 2348 }
2353 rcu_read_unlock(); 2349 rcu_read_unlock();
2354 return -EINVAL; 2350 return -EINVAL;
2355 } 2351 }
2356 return ip_route_input_slow(skb, daddr, saddr, tos, dev); 2352 res = ip_route_input_slow(skb, daddr, saddr, tos, dev);
2353 rcu_read_unlock();
2354 return res;
2357} 2355}
2358EXPORT_SYMBOL(ip_route_input_common); 2356EXPORT_SYMBOL(ip_route_input_common);
2359 2357
@@ -2415,12 +2413,12 @@ static int __mkroute_output(struct rtable **result,
2415 goto cleanup; 2413 goto cleanup;
2416 } 2414 }
2417 2415
2418 atomic_set(&rth->u.dst.__refcnt, 1); 2416 atomic_set(&rth->dst.__refcnt, 1);
2419 rth->u.dst.flags= DST_HOST; 2417 rth->dst.flags= DST_HOST;
2420 if (IN_DEV_CONF_GET(in_dev, NOXFRM)) 2418 if (IN_DEV_CONF_GET(in_dev, NOXFRM))
2421 rth->u.dst.flags |= DST_NOXFRM; 2419 rth->dst.flags |= DST_NOXFRM;
2422 if (IN_DEV_CONF_GET(in_dev, NOPOLICY)) 2420 if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
2423 rth->u.dst.flags |= DST_NOPOLICY; 2421 rth->dst.flags |= DST_NOPOLICY;
2424 2422
2425 rth->fl.fl4_dst = oldflp->fl4_dst; 2423 rth->fl.fl4_dst = oldflp->fl4_dst;
2426 rth->fl.fl4_tos = tos; 2424 rth->fl.fl4_tos = tos;
@@ -2432,35 +2430,35 @@ static int __mkroute_output(struct rtable **result,
2432 rth->rt_iif = oldflp->oif ? : dev_out->ifindex; 2430 rth->rt_iif = oldflp->oif ? : dev_out->ifindex;
2433 /* get references to the devices that are to be hold by the routing 2431 /* get references to the devices that are to be hold by the routing
2434 cache entry */ 2432 cache entry */
2435 rth->u.dst.dev = dev_out; 2433 rth->dst.dev = dev_out;
2436 dev_hold(dev_out); 2434 dev_hold(dev_out);
2437 rth->idev = in_dev_get(dev_out); 2435 rth->idev = in_dev_get(dev_out);
2438 rth->rt_gateway = fl->fl4_dst; 2436 rth->rt_gateway = fl->fl4_dst;
2439 rth->rt_spec_dst= fl->fl4_src; 2437 rth->rt_spec_dst= fl->fl4_src;
2440 2438
2441 rth->u.dst.output=ip_output; 2439 rth->dst.output=ip_output;
2442 rth->u.dst.obsolete = -1; 2440 rth->dst.obsolete = -1;
2443 rth->rt_genid = rt_genid(dev_net(dev_out)); 2441 rth->rt_genid = rt_genid(dev_net(dev_out));
2444 2442
2445 RT_CACHE_STAT_INC(out_slow_tot); 2443 RT_CACHE_STAT_INC(out_slow_tot);
2446 2444
2447 if (flags & RTCF_LOCAL) { 2445 if (flags & RTCF_LOCAL) {
2448 rth->u.dst.input = ip_local_deliver; 2446 rth->dst.input = ip_local_deliver;
2449 rth->rt_spec_dst = fl->fl4_dst; 2447 rth->rt_spec_dst = fl->fl4_dst;
2450 } 2448 }
2451 if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) { 2449 if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
2452 rth->rt_spec_dst = fl->fl4_src; 2450 rth->rt_spec_dst = fl->fl4_src;
2453 if (flags & RTCF_LOCAL && 2451 if (flags & RTCF_LOCAL &&
2454 !(dev_out->flags & IFF_LOOPBACK)) { 2452 !(dev_out->flags & IFF_LOOPBACK)) {
2455 rth->u.dst.output = ip_mc_output; 2453 rth->dst.output = ip_mc_output;
2456 RT_CACHE_STAT_INC(out_slow_mc); 2454 RT_CACHE_STAT_INC(out_slow_mc);
2457 } 2455 }
2458#ifdef CONFIG_IP_MROUTE 2456#ifdef CONFIG_IP_MROUTE
2459 if (res->type == RTN_MULTICAST) { 2457 if (res->type == RTN_MULTICAST) {
2460 if (IN_DEV_MFORWARD(in_dev) && 2458 if (IN_DEV_MFORWARD(in_dev) &&
2461 !ipv4_is_local_multicast(oldflp->fl4_dst)) { 2459 !ipv4_is_local_multicast(oldflp->fl4_dst)) {
2462 rth->u.dst.input = ip_mr_input; 2460 rth->dst.input = ip_mr_input;
2463 rth->u.dst.output = ip_mc_output; 2461 rth->dst.output = ip_mc_output;
2464 } 2462 }
2465 } 2463 }
2466#endif 2464#endif
@@ -2715,7 +2713,7 @@ int __ip_route_output_key(struct net *net, struct rtable **rp,
2715 2713
2716 rcu_read_lock_bh(); 2714 rcu_read_lock_bh();
2717 for (rth = rcu_dereference_bh(rt_hash_table[hash].chain); rth; 2715 for (rth = rcu_dereference_bh(rt_hash_table[hash].chain); rth;
2718 rth = rcu_dereference_bh(rth->u.dst.rt_next)) { 2716 rth = rcu_dereference_bh(rth->dst.rt_next)) {
2719 if (rth->fl.fl4_dst == flp->fl4_dst && 2717 if (rth->fl.fl4_dst == flp->fl4_dst &&
2720 rth->fl.fl4_src == flp->fl4_src && 2718 rth->fl.fl4_src == flp->fl4_src &&
2721 rth->fl.iif == 0 && 2719 rth->fl.iif == 0 &&
@@ -2723,9 +2721,9 @@ int __ip_route_output_key(struct net *net, struct rtable **rp,
2723 rth->fl.mark == flp->mark && 2721 rth->fl.mark == flp->mark &&
2724 !((rth->fl.fl4_tos ^ flp->fl4_tos) & 2722 !((rth->fl.fl4_tos ^ flp->fl4_tos) &
2725 (IPTOS_RT_MASK | RTO_ONLINK)) && 2723 (IPTOS_RT_MASK | RTO_ONLINK)) &&
2726 net_eq(dev_net(rth->u.dst.dev), net) && 2724 net_eq(dev_net(rth->dst.dev), net) &&
2727 !rt_is_expired(rth)) { 2725 !rt_is_expired(rth)) {
2728 dst_use(&rth->u.dst, jiffies); 2726 dst_use(&rth->dst, jiffies);
2729 RT_CACHE_STAT_INC(out_hit); 2727 RT_CACHE_STAT_INC(out_hit);
2730 rcu_read_unlock_bh(); 2728 rcu_read_unlock_bh();
2731 *rp = rth; 2729 *rp = rth;
@@ -2738,7 +2736,6 @@ int __ip_route_output_key(struct net *net, struct rtable **rp,
2738slow_output: 2736slow_output:
2739 return ip_route_output_slow(net, rp, flp); 2737 return ip_route_output_slow(net, rp, flp);
2740} 2738}
2741
2742EXPORT_SYMBOL_GPL(__ip_route_output_key); 2739EXPORT_SYMBOL_GPL(__ip_route_output_key);
2743 2740
2744static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu) 2741static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
@@ -2762,15 +2759,15 @@ static int ipv4_dst_blackhole(struct net *net, struct rtable **rp, struct flowi
2762 dst_alloc(&ipv4_dst_blackhole_ops); 2759 dst_alloc(&ipv4_dst_blackhole_ops);
2763 2760
2764 if (rt) { 2761 if (rt) {
2765 struct dst_entry *new = &rt->u.dst; 2762 struct dst_entry *new = &rt->dst;
2766 2763
2767 atomic_set(&new->__refcnt, 1); 2764 atomic_set(&new->__refcnt, 1);
2768 new->__use = 1; 2765 new->__use = 1;
2769 new->input = dst_discard; 2766 new->input = dst_discard;
2770 new->output = dst_discard; 2767 new->output = dst_discard;
2771 memcpy(new->metrics, ort->u.dst.metrics, RTAX_MAX*sizeof(u32)); 2768 memcpy(new->metrics, ort->dst.metrics, RTAX_MAX*sizeof(u32));
2772 2769
2773 new->dev = ort->u.dst.dev; 2770 new->dev = ort->dst.dev;
2774 if (new->dev) 2771 if (new->dev)
2775 dev_hold(new->dev); 2772 dev_hold(new->dev);
2776 2773
@@ -2794,7 +2791,7 @@ static int ipv4_dst_blackhole(struct net *net, struct rtable **rp, struct flowi
2794 dst_free(new); 2791 dst_free(new);
2795 } 2792 }
2796 2793
2797 dst_release(&(*rp)->u.dst); 2794 dst_release(&(*rp)->dst);
2798 *rp = rt; 2795 *rp = rt;
2799 return (rt ? 0 : -ENOMEM); 2796 return (rt ? 0 : -ENOMEM);
2800} 2797}
@@ -2822,13 +2819,13 @@ int ip_route_output_flow(struct net *net, struct rtable **rp, struct flowi *flp,
2822 2819
2823 return 0; 2820 return 0;
2824} 2821}
2825
2826EXPORT_SYMBOL_GPL(ip_route_output_flow); 2822EXPORT_SYMBOL_GPL(ip_route_output_flow);
2827 2823
2828int ip_route_output_key(struct net *net, struct rtable **rp, struct flowi *flp) 2824int ip_route_output_key(struct net *net, struct rtable **rp, struct flowi *flp)
2829{ 2825{
2830 return ip_route_output_flow(net, rp, flp, NULL, 0); 2826 return ip_route_output_flow(net, rp, flp, NULL, 0);
2831} 2827}
2828EXPORT_SYMBOL(ip_route_output_key);
2832 2829
2833static int rt_fill_info(struct net *net, 2830static int rt_fill_info(struct net *net,
2834 struct sk_buff *skb, u32 pid, u32 seq, int event, 2831 struct sk_buff *skb, u32 pid, u32 seq, int event,
@@ -2864,11 +2861,11 @@ static int rt_fill_info(struct net *net,
2864 r->rtm_src_len = 32; 2861 r->rtm_src_len = 32;
2865 NLA_PUT_BE32(skb, RTA_SRC, rt->fl.fl4_src); 2862 NLA_PUT_BE32(skb, RTA_SRC, rt->fl.fl4_src);
2866 } 2863 }
2867 if (rt->u.dst.dev) 2864 if (rt->dst.dev)
2868 NLA_PUT_U32(skb, RTA_OIF, rt->u.dst.dev->ifindex); 2865 NLA_PUT_U32(skb, RTA_OIF, rt->dst.dev->ifindex);
2869#ifdef CONFIG_NET_CLS_ROUTE 2866#ifdef CONFIG_NET_CLS_ROUTE
2870 if (rt->u.dst.tclassid) 2867 if (rt->dst.tclassid)
2871 NLA_PUT_U32(skb, RTA_FLOW, rt->u.dst.tclassid); 2868 NLA_PUT_U32(skb, RTA_FLOW, rt->dst.tclassid);
2872#endif 2869#endif
2873 if (rt->fl.iif) 2870 if (rt->fl.iif)
2874 NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_spec_dst); 2871 NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_spec_dst);
@@ -2878,12 +2875,16 @@ static int rt_fill_info(struct net *net,
2878 if (rt->rt_dst != rt->rt_gateway) 2875 if (rt->rt_dst != rt->rt_gateway)
2879 NLA_PUT_BE32(skb, RTA_GATEWAY, rt->rt_gateway); 2876 NLA_PUT_BE32(skb, RTA_GATEWAY, rt->rt_gateway);
2880 2877
2881 if (rtnetlink_put_metrics(skb, rt->u.dst.metrics) < 0) 2878 if (rtnetlink_put_metrics(skb, rt->dst.metrics) < 0)
2882 goto nla_put_failure; 2879 goto nla_put_failure;
2883 2880
2884 error = rt->u.dst.error; 2881 if (rt->fl.mark)
2885 expires = rt->u.dst.expires ? rt->u.dst.expires - jiffies : 0; 2882 NLA_PUT_BE32(skb, RTA_MARK, rt->fl.mark);
2883
2884 error = rt->dst.error;
2885 expires = rt->dst.expires ? rt->dst.expires - jiffies : 0;
2886 if (rt->peer) { 2886 if (rt->peer) {
2887 inet_peer_refcheck(rt->peer);
2887 id = atomic_read(&rt->peer->ip_id_count) & 0xffff; 2888 id = atomic_read(&rt->peer->ip_id_count) & 0xffff;
2888 if (rt->peer->tcp_ts_stamp) { 2889 if (rt->peer->tcp_ts_stamp) {
2889 ts = rt->peer->tcp_ts; 2890 ts = rt->peer->tcp_ts;
@@ -2914,7 +2915,7 @@ static int rt_fill_info(struct net *net,
2914 NLA_PUT_U32(skb, RTA_IIF, rt->fl.iif); 2915 NLA_PUT_U32(skb, RTA_IIF, rt->fl.iif);
2915 } 2916 }
2916 2917
2917 if (rtnl_put_cacheinfo(skb, &rt->u.dst, id, ts, tsage, 2918 if (rtnl_put_cacheinfo(skb, &rt->dst, id, ts, tsage,
2918 expires, error) < 0) 2919 expires, error) < 0)
2919 goto nla_put_failure; 2920 goto nla_put_failure;
2920 2921
@@ -2935,6 +2936,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void
2935 __be32 src = 0; 2936 __be32 src = 0;
2936 u32 iif; 2937 u32 iif;
2937 int err; 2938 int err;
2939 int mark;
2938 struct sk_buff *skb; 2940 struct sk_buff *skb;
2939 2941
2940 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy); 2942 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy);
@@ -2962,6 +2964,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void
2962 src = tb[RTA_SRC] ? nla_get_be32(tb[RTA_SRC]) : 0; 2964 src = tb[RTA_SRC] ? nla_get_be32(tb[RTA_SRC]) : 0;
2963 dst = tb[RTA_DST] ? nla_get_be32(tb[RTA_DST]) : 0; 2965 dst = tb[RTA_DST] ? nla_get_be32(tb[RTA_DST]) : 0;
2964 iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0; 2966 iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
2967 mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0;
2965 2968
2966 if (iif) { 2969 if (iif) {
2967 struct net_device *dev; 2970 struct net_device *dev;
@@ -2974,13 +2977,14 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void
2974 2977
2975 skb->protocol = htons(ETH_P_IP); 2978 skb->protocol = htons(ETH_P_IP);
2976 skb->dev = dev; 2979 skb->dev = dev;
2980 skb->mark = mark;
2977 local_bh_disable(); 2981 local_bh_disable();
2978 err = ip_route_input(skb, dst, src, rtm->rtm_tos, dev); 2982 err = ip_route_input(skb, dst, src, rtm->rtm_tos, dev);
2979 local_bh_enable(); 2983 local_bh_enable();
2980 2984
2981 rt = skb_rtable(skb); 2985 rt = skb_rtable(skb);
2982 if (err == 0 && rt->u.dst.error) 2986 if (err == 0 && rt->dst.error)
2983 err = -rt->u.dst.error; 2987 err = -rt->dst.error;
2984 } else { 2988 } else {
2985 struct flowi fl = { 2989 struct flowi fl = {
2986 .nl_u = { 2990 .nl_u = {
@@ -2991,6 +2995,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void
2991 }, 2995 },
2992 }, 2996 },
2993 .oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0, 2997 .oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0,
2998 .mark = mark,
2994 }; 2999 };
2995 err = ip_route_output_key(net, &rt, &fl); 3000 err = ip_route_output_key(net, &rt, &fl);
2996 } 3001 }
@@ -2998,7 +3003,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void
2998 if (err) 3003 if (err)
2999 goto errout_free; 3004 goto errout_free;
3000 3005
3001 skb_dst_set(skb, &rt->u.dst); 3006 skb_dst_set(skb, &rt->dst);
3002 if (rtm->rtm_flags & RTM_F_NOTIFY) 3007 if (rtm->rtm_flags & RTM_F_NOTIFY)
3003 rt->rt_flags |= RTCF_NOTIFY; 3008 rt->rt_flags |= RTCF_NOTIFY;
3004 3009
@@ -3034,12 +3039,12 @@ int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb)
3034 continue; 3039 continue;
3035 rcu_read_lock_bh(); 3040 rcu_read_lock_bh();
3036 for (rt = rcu_dereference_bh(rt_hash_table[h].chain), idx = 0; rt; 3041 for (rt = rcu_dereference_bh(rt_hash_table[h].chain), idx = 0; rt;
3037 rt = rcu_dereference_bh(rt->u.dst.rt_next), idx++) { 3042 rt = rcu_dereference_bh(rt->dst.rt_next), idx++) {
3038 if (!net_eq(dev_net(rt->u.dst.dev), net) || idx < s_idx) 3043 if (!net_eq(dev_net(rt->dst.dev), net) || idx < s_idx)
3039 continue; 3044 continue;
3040 if (rt_is_expired(rt)) 3045 if (rt_is_expired(rt))
3041 continue; 3046 continue;
3042 skb_dst_set_noref(skb, &rt->u.dst); 3047 skb_dst_set_noref(skb, &rt->dst);
3043 if (rt_fill_info(net, skb, NETLINK_CB(cb->skb).pid, 3048 if (rt_fill_info(net, skb, NETLINK_CB(cb->skb).pid,
3044 cb->nlh->nlmsg_seq, RTM_NEWROUTE, 3049 cb->nlh->nlmsg_seq, RTM_NEWROUTE,
3045 1, NLM_F_MULTI) <= 0) { 3050 1, NLM_F_MULTI) <= 0) {
@@ -3365,6 +3370,3 @@ void __init ip_static_sysctl_init(void)
3365 register_sysctl_paths(ipv4_path, ipv4_skeleton); 3370 register_sysctl_paths(ipv4_path, ipv4_skeleton);
3366} 3371}
3367#endif 3372#endif
3368
3369EXPORT_SYMBOL(__ip_select_ident);
3370EXPORT_SYMBOL(ip_route_output_key);
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 9f6b22206c52..650cace2180d 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -18,8 +18,8 @@
18#include <net/tcp.h> 18#include <net/tcp.h>
19#include <net/route.h> 19#include <net/route.h>
20 20
21/* Timestamps: lowest 9 bits store TCP options */ 21/* Timestamps: lowest bits store TCP options */
22#define TSBITS 9 22#define TSBITS 6
23#define TSMASK (((__u32)1 << TSBITS) - 1) 23#define TSMASK (((__u32)1 << TSBITS) - 1)
24 24
25extern int sysctl_tcp_syncookies; 25extern int sysctl_tcp_syncookies;
@@ -58,7 +58,7 @@ static u32 cookie_hash(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport,
58 58
59/* 59/*
60 * when syncookies are in effect and tcp timestamps are enabled we encode 60 * when syncookies are in effect and tcp timestamps are enabled we encode
61 * tcp options in the lowest 9 bits of the timestamp value that will be 61 * tcp options in the lower bits of the timestamp value that will be
62 * sent in the syn-ack. 62 * sent in the syn-ack.
63 * Since subsequent timestamps use the normal tcp_time_stamp value, we 63 * Since subsequent timestamps use the normal tcp_time_stamp value, we
64 * must make sure that the resulting initial timestamp is <= tcp_time_stamp. 64 * must make sure that the resulting initial timestamp is <= tcp_time_stamp.
@@ -70,11 +70,10 @@ __u32 cookie_init_timestamp(struct request_sock *req)
70 u32 options = 0; 70 u32 options = 0;
71 71
72 ireq = inet_rsk(req); 72 ireq = inet_rsk(req);
73 if (ireq->wscale_ok) { 73
74 options = ireq->snd_wscale; 74 options = ireq->wscale_ok ? ireq->snd_wscale : 0xf;
75 options |= ireq->rcv_wscale << 4; 75 options |= ireq->sack_ok << 4;
76 } 76 options |= ireq->ecn_ok << 5;
77 options |= ireq->sack_ok << 8;
78 77
79 ts = ts_now & ~TSMASK; 78 ts = ts_now & ~TSMASK;
80 ts |= options; 79 ts |= options;
@@ -138,23 +137,23 @@ static __u32 check_tcp_syn_cookie(__u32 cookie, __be32 saddr, __be32 daddr,
138} 137}
139 138
140/* 139/*
141 * This table has to be sorted and terminated with (__u16)-1. 140 * MSS Values are taken from the 2009 paper
142 * XXX generate a better table. 141 * 'Measuring TCP Maximum Segment Size' by S. Alcock and R. Nelson:
143 * Unresolved Issues: HIPPI with a 64k MSS is not well supported. 142 * - values 1440 to 1460 accounted for 80% of observed mss values
143 * - values outside the 536-1460 range are rare (<0.2%).
144 *
145 * Table must be sorted.
144 */ 146 */
145static __u16 const msstab[] = { 147static __u16 const msstab[] = {
146 64 - 1, 148 64,
147 256 - 1, 149 512,
148 512 - 1, 150 536,
149 536 - 1, 151 1024,
150 1024 - 1, 152 1440,
151 1440 - 1, 153 1460,
152 1460 - 1, 154 4312,
153 4312 - 1, 155 8960,
154 (__u16)-1
155}; 156};
156/* The number doesn't include the -1 terminator */
157#define NUM_MSS (ARRAY_SIZE(msstab) - 1)
158 157
159/* 158/*
160 * Generate a syncookie. mssp points to the mss, which is returned 159 * Generate a syncookie. mssp points to the mss, which is returned
@@ -169,10 +168,10 @@ __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp)
169 168
170 tcp_synq_overflow(sk); 169 tcp_synq_overflow(sk);
171 170
172 /* XXX sort msstab[] by probability? Binary search? */ 171 for (mssind = ARRAY_SIZE(msstab) - 1; mssind ; mssind--)
173 for (mssind = 0; mss > msstab[mssind + 1]; mssind++) 172 if (mss >= msstab[mssind])
174 ; 173 break;
175 *mssp = msstab[mssind] + 1; 174 *mssp = msstab[mssind];
176 175
177 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT); 176 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
178 177
@@ -202,7 +201,7 @@ static inline int cookie_check(struct sk_buff *skb, __u32 cookie)
202 jiffies / (HZ * 60), 201 jiffies / (HZ * 60),
203 COUNTER_TRIES); 202 COUNTER_TRIES);
204 203
205 return mssind < NUM_MSS ? msstab[mssind] + 1 : 0; 204 return mssind < ARRAY_SIZE(msstab) ? msstab[mssind] : 0;
206} 205}
207 206
208static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb, 207static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb,
@@ -227,26 +226,38 @@ static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb,
227 * additional tcp options in the timestamp. 226 * additional tcp options in the timestamp.
228 * This extracts these options from the timestamp echo. 227 * This extracts these options from the timestamp echo.
229 * 228 *
230 * The lowest 4 bits are for snd_wscale 229 * The lowest 4 bits store snd_wscale.
231 * The next 4 lsb are for rcv_wscale 230 * next 2 bits indicate SACK and ECN support.
232 * The next lsb is for sack_ok 231 *
232 * return false if we decode an option that should not be.
233 */ 233 */
234void cookie_check_timestamp(struct tcp_options_received *tcp_opt) 234bool cookie_check_timestamp(struct tcp_options_received *tcp_opt, bool *ecn_ok)
235{ 235{
236 /* echoed timestamp, 9 lowest bits contain options */ 236 /* echoed timestamp, lowest bits contain options */
237 u32 options = tcp_opt->rcv_tsecr & TSMASK; 237 u32 options = tcp_opt->rcv_tsecr & TSMASK;
238 238
239 tcp_opt->snd_wscale = options & 0xf; 239 if (!tcp_opt->saw_tstamp) {
240 options >>= 4; 240 tcp_clear_options(tcp_opt);
241 tcp_opt->rcv_wscale = options & 0xf; 241 return true;
242 }
243
244 if (!sysctl_tcp_timestamps)
245 return false;
242 246
243 tcp_opt->sack_ok = (options >> 4) & 0x1; 247 tcp_opt->sack_ok = (options >> 4) & 0x1;
248 *ecn_ok = (options >> 5) & 1;
249 if (*ecn_ok && !sysctl_tcp_ecn)
250 return false;
251
252 if (tcp_opt->sack_ok && !sysctl_tcp_sack)
253 return false;
244 254
245 if (tcp_opt->sack_ok) 255 if ((options & 0xf) == 0xf)
246 tcp_sack_reset(tcp_opt); 256 return true; /* no window scaling */
247 257
248 if (tcp_opt->snd_wscale || tcp_opt->rcv_wscale) 258 tcp_opt->wscale_ok = 1;
249 tcp_opt->wscale_ok = 1; 259 tcp_opt->snd_wscale = options & 0xf;
260 return sysctl_tcp_window_scaling != 0;
250} 261}
251EXPORT_SYMBOL(cookie_check_timestamp); 262EXPORT_SYMBOL(cookie_check_timestamp);
252 263
@@ -265,8 +276,9 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
265 int mss; 276 int mss;
266 struct rtable *rt; 277 struct rtable *rt;
267 __u8 rcv_wscale; 278 __u8 rcv_wscale;
279 bool ecn_ok;
268 280
269 if (!sysctl_tcp_syncookies || !th->ack) 281 if (!sysctl_tcp_syncookies || !th->ack || th->rst)
270 goto out; 282 goto out;
271 283
272 if (tcp_synq_no_recent_overflow(sk) || 284 if (tcp_synq_no_recent_overflow(sk) ||
@@ -281,8 +293,8 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
281 memset(&tcp_opt, 0, sizeof(tcp_opt)); 293 memset(&tcp_opt, 0, sizeof(tcp_opt));
282 tcp_parse_options(skb, &tcp_opt, &hash_location, 0); 294 tcp_parse_options(skb, &tcp_opt, &hash_location, 0);
283 295
284 if (tcp_opt.saw_tstamp) 296 if (!cookie_check_timestamp(&tcp_opt, &ecn_ok))
285 cookie_check_timestamp(&tcp_opt); 297 goto out;
286 298
287 ret = NULL; 299 ret = NULL;
288 req = inet_reqsk_alloc(&tcp_request_sock_ops); /* for safety */ 300 req = inet_reqsk_alloc(&tcp_request_sock_ops); /* for safety */
@@ -298,9 +310,8 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
298 ireq->rmt_port = th->source; 310 ireq->rmt_port = th->source;
299 ireq->loc_addr = ip_hdr(skb)->daddr; 311 ireq->loc_addr = ip_hdr(skb)->daddr;
300 ireq->rmt_addr = ip_hdr(skb)->saddr; 312 ireq->rmt_addr = ip_hdr(skb)->saddr;
301 ireq->ecn_ok = 0; 313 ireq->ecn_ok = ecn_ok;
302 ireq->snd_wscale = tcp_opt.snd_wscale; 314 ireq->snd_wscale = tcp_opt.snd_wscale;
303 ireq->rcv_wscale = tcp_opt.rcv_wscale;
304 ireq->sack_ok = tcp_opt.sack_ok; 315 ireq->sack_ok = tcp_opt.sack_ok;
305 ireq->wscale_ok = tcp_opt.wscale_ok; 316 ireq->wscale_ok = tcp_opt.wscale_ok;
306 ireq->tstamp_ok = tcp_opt.saw_tstamp; 317 ireq->tstamp_ok = tcp_opt.saw_tstamp;
@@ -354,15 +365,15 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
354 } 365 }
355 366
356 /* Try to redo what tcp_v4_send_synack did. */ 367 /* Try to redo what tcp_v4_send_synack did. */
357 req->window_clamp = tp->window_clamp ? :dst_metric(&rt->u.dst, RTAX_WINDOW); 368 req->window_clamp = tp->window_clamp ? :dst_metric(&rt->dst, RTAX_WINDOW);
358 369
359 tcp_select_initial_window(tcp_full_space(sk), req->mss, 370 tcp_select_initial_window(tcp_full_space(sk), req->mss,
360 &req->rcv_wnd, &req->window_clamp, 371 &req->rcv_wnd, &req->window_clamp,
361 ireq->wscale_ok, &rcv_wscale, 372 ireq->wscale_ok, &rcv_wscale,
362 dst_metric(&rt->u.dst, RTAX_INITRWND)); 373 dst_metric(&rt->dst, RTAX_INITRWND));
363 374
364 ireq->rcv_wscale = rcv_wscale; 375 ireq->rcv_wscale = rcv_wscale;
365 376
366 ret = get_cookie_sock(sk, skb, req, &rt->u.dst); 377 ret = get_cookie_sock(sk, skb, req, &rt->dst);
367out: return ret; 378out: return ret;
368} 379}
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 6596b4feeddc..176e11aaea77 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -315,7 +315,6 @@ struct tcp_splice_state {
315 * is strict, actions are advisory and have some latency. 315 * is strict, actions are advisory and have some latency.
316 */ 316 */
317int tcp_memory_pressure __read_mostly; 317int tcp_memory_pressure __read_mostly;
318
319EXPORT_SYMBOL(tcp_memory_pressure); 318EXPORT_SYMBOL(tcp_memory_pressure);
320 319
321void tcp_enter_memory_pressure(struct sock *sk) 320void tcp_enter_memory_pressure(struct sock *sk)
@@ -325,7 +324,6 @@ void tcp_enter_memory_pressure(struct sock *sk)
325 tcp_memory_pressure = 1; 324 tcp_memory_pressure = 1;
326 } 325 }
327} 326}
328
329EXPORT_SYMBOL(tcp_enter_memory_pressure); 327EXPORT_SYMBOL(tcp_enter_memory_pressure);
330 328
331/* Convert seconds to retransmits based on initial and max timeout */ 329/* Convert seconds to retransmits based on initial and max timeout */
@@ -460,6 +458,7 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
460 } 458 }
461 return mask; 459 return mask;
462} 460}
461EXPORT_SYMBOL(tcp_poll);
463 462
464int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg) 463int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
465{ 464{
@@ -508,10 +507,11 @@ int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
508 507
509 return put_user(answ, (int __user *)arg); 508 return put_user(answ, (int __user *)arg);
510} 509}
510EXPORT_SYMBOL(tcp_ioctl);
511 511
512static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb) 512static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
513{ 513{
514 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH; 514 TCP_SKB_CB(skb)->flags |= TCPHDR_PSH;
515 tp->pushed_seq = tp->write_seq; 515 tp->pushed_seq = tp->write_seq;
516} 516}
517 517
@@ -527,7 +527,7 @@ static inline void skb_entail(struct sock *sk, struct sk_buff *skb)
527 527
528 skb->csum = 0; 528 skb->csum = 0;
529 tcb->seq = tcb->end_seq = tp->write_seq; 529 tcb->seq = tcb->end_seq = tp->write_seq;
530 tcb->flags = TCPCB_FLAG_ACK; 530 tcb->flags = TCPHDR_ACK;
531 tcb->sacked = 0; 531 tcb->sacked = 0;
532 skb_header_release(skb); 532 skb_header_release(skb);
533 tcp_add_write_queue_tail(sk, skb); 533 tcp_add_write_queue_tail(sk, skb);
@@ -608,6 +608,7 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
608 ssize_t spliced; 608 ssize_t spliced;
609 int ret; 609 int ret;
610 610
611 sock_rps_record_flow(sk);
611 /* 612 /*
612 * We can't seek on a socket input 613 * We can't seek on a socket input
613 */ 614 */
@@ -675,6 +676,7 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
675 676
676 return ret; 677 return ret;
677} 678}
679EXPORT_SYMBOL(tcp_splice_read);
678 680
679struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp) 681struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
680{ 682{
@@ -815,7 +817,7 @@ new_segment:
815 skb_shinfo(skb)->gso_segs = 0; 817 skb_shinfo(skb)->gso_segs = 0;
816 818
817 if (!copied) 819 if (!copied)
818 TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH; 820 TCP_SKB_CB(skb)->flags &= ~TCPHDR_PSH;
819 821
820 copied += copy; 822 copied += copy;
821 poffset += copy; 823 poffset += copy;
@@ -856,15 +858,15 @@ out_err:
856 return sk_stream_error(sk, flags, err); 858 return sk_stream_error(sk, flags, err);
857} 859}
858 860
859ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset, 861int tcp_sendpage(struct sock *sk, struct page *page, int offset,
860 size_t size, int flags) 862 size_t size, int flags)
861{ 863{
862 ssize_t res; 864 ssize_t res;
863 struct sock *sk = sock->sk;
864 865
865 if (!(sk->sk_route_caps & NETIF_F_SG) || 866 if (!(sk->sk_route_caps & NETIF_F_SG) ||
866 !(sk->sk_route_caps & NETIF_F_ALL_CSUM)) 867 !(sk->sk_route_caps & NETIF_F_ALL_CSUM))
867 return sock_no_sendpage(sock, page, offset, size, flags); 868 return sock_no_sendpage(sk->sk_socket, page, offset, size,
869 flags);
868 870
869 lock_sock(sk); 871 lock_sock(sk);
870 TCP_CHECK_TIMER(sk); 872 TCP_CHECK_TIMER(sk);
@@ -873,6 +875,7 @@ ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset,
873 release_sock(sk); 875 release_sock(sk);
874 return res; 876 return res;
875} 877}
878EXPORT_SYMBOL(tcp_sendpage);
876 879
877#define TCP_PAGE(sk) (sk->sk_sndmsg_page) 880#define TCP_PAGE(sk) (sk->sk_sndmsg_page)
878#define TCP_OFF(sk) (sk->sk_sndmsg_off) 881#define TCP_OFF(sk) (sk->sk_sndmsg_off)
@@ -897,10 +900,9 @@ static inline int select_size(struct sock *sk, int sg)
897 return tmp; 900 return tmp;
898} 901}
899 902
900int tcp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, 903int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
901 size_t size) 904 size_t size)
902{ 905{
903 struct sock *sk = sock->sk;
904 struct iovec *iov; 906 struct iovec *iov;
905 struct tcp_sock *tp = tcp_sk(sk); 907 struct tcp_sock *tp = tcp_sk(sk);
906 struct sk_buff *skb; 908 struct sk_buff *skb;
@@ -1061,7 +1063,7 @@ new_segment:
1061 } 1063 }
1062 1064
1063 if (!copied) 1065 if (!copied)
1064 TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH; 1066 TCP_SKB_CB(skb)->flags &= ~TCPHDR_PSH;
1065 1067
1066 tp->write_seq += copy; 1068 tp->write_seq += copy;
1067 TCP_SKB_CB(skb)->end_seq += copy; 1069 TCP_SKB_CB(skb)->end_seq += copy;
@@ -1121,6 +1123,7 @@ out_err:
1121 release_sock(sk); 1123 release_sock(sk);
1122 return err; 1124 return err;
1123} 1125}
1126EXPORT_SYMBOL(tcp_sendmsg);
1124 1127
1125/* 1128/*
1126 * Handle reading urgent data. BSD has very simple semantics for 1129 * Handle reading urgent data. BSD has very simple semantics for
@@ -1380,6 +1383,7 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
1380 tcp_cleanup_rbuf(sk, copied); 1383 tcp_cleanup_rbuf(sk, copied);
1381 return copied; 1384 return copied;
1382} 1385}
1386EXPORT_SYMBOL(tcp_read_sock);
1383 1387
1384/* 1388/*
1385 * This routine copies from a sock struct into the user buffer. 1389 * This routine copies from a sock struct into the user buffer.
@@ -1774,6 +1778,7 @@ recv_urg:
1774 err = tcp_recv_urg(sk, msg, len, flags); 1778 err = tcp_recv_urg(sk, msg, len, flags);
1775 goto out; 1779 goto out;
1776} 1780}
1781EXPORT_SYMBOL(tcp_recvmsg);
1777 1782
1778void tcp_set_state(struct sock *sk, int state) 1783void tcp_set_state(struct sock *sk, int state)
1779{ 1784{
@@ -1866,6 +1871,7 @@ void tcp_shutdown(struct sock *sk, int how)
1866 tcp_send_fin(sk); 1871 tcp_send_fin(sk);
1867 } 1872 }
1868} 1873}
1874EXPORT_SYMBOL(tcp_shutdown);
1869 1875
1870void tcp_close(struct sock *sk, long timeout) 1876void tcp_close(struct sock *sk, long timeout)
1871{ 1877{
@@ -1898,6 +1904,10 @@ void tcp_close(struct sock *sk, long timeout)
1898 1904
1899 sk_mem_reclaim(sk); 1905 sk_mem_reclaim(sk);
1900 1906
1907 /* If socket has been already reset (e.g. in tcp_reset()) - kill it. */
1908 if (sk->sk_state == TCP_CLOSE)
1909 goto adjudge_to_death;
1910
1901 /* As outlined in RFC 2525, section 2.17, we send a RST here because 1911 /* As outlined in RFC 2525, section 2.17, we send a RST here because
1902 * data was lost. To witness the awful effects of the old behavior of 1912 * data was lost. To witness the awful effects of the old behavior of
1903 * always doing a FIN, run an older 2.1.x kernel or 2.0.x, start a bulk 1913 * always doing a FIN, run an older 2.1.x kernel or 2.0.x, start a bulk
@@ -2025,6 +2035,7 @@ out:
2025 local_bh_enable(); 2035 local_bh_enable();
2026 sock_put(sk); 2036 sock_put(sk);
2027} 2037}
2038EXPORT_SYMBOL(tcp_close);
2028 2039
2029/* These states need RST on ABORT according to RFC793 */ 2040/* These states need RST on ABORT according to RFC793 */
2030 2041
@@ -2098,6 +2109,7 @@ int tcp_disconnect(struct sock *sk, int flags)
2098 sk->sk_error_report(sk); 2109 sk->sk_error_report(sk);
2099 return err; 2110 return err;
2100} 2111}
2112EXPORT_SYMBOL(tcp_disconnect);
2101 2113
2102/* 2114/*
2103 * Socket option code for TCP. 2115 * Socket option code for TCP.
@@ -2175,6 +2187,8 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
2175 GFP_KERNEL); 2187 GFP_KERNEL);
2176 if (cvp == NULL) 2188 if (cvp == NULL)
2177 return -ENOMEM; 2189 return -ENOMEM;
2190
2191 kref_init(&cvp->kref);
2178 } 2192 }
2179 lock_sock(sk); 2193 lock_sock(sk);
2180 tp->rx_opt.cookie_in_always = 2194 tp->rx_opt.cookie_in_always =
@@ -2189,12 +2203,11 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
2189 */ 2203 */
2190 kref_put(&tp->cookie_values->kref, 2204 kref_put(&tp->cookie_values->kref,
2191 tcp_cookie_values_release); 2205 tcp_cookie_values_release);
2192 kref_init(&cvp->kref);
2193 tp->cookie_values = cvp;
2194 } else { 2206 } else {
2195 cvp = tp->cookie_values; 2207 cvp = tp->cookie_values;
2196 } 2208 }
2197 } 2209 }
2210
2198 if (cvp != NULL) { 2211 if (cvp != NULL) {
2199 cvp->cookie_desired = ctd.tcpct_cookie_desired; 2212 cvp->cookie_desired = ctd.tcpct_cookie_desired;
2200 2213
@@ -2208,6 +2221,8 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
2208 cvp->s_data_desired = ctd.tcpct_s_data_desired; 2221 cvp->s_data_desired = ctd.tcpct_s_data_desired;
2209 cvp->s_data_constant = 0; /* false */ 2222 cvp->s_data_constant = 0; /* false */
2210 } 2223 }
2224
2225 tp->cookie_values = cvp;
2211 } 2226 }
2212 release_sock(sk); 2227 release_sock(sk);
2213 return err; 2228 return err;
@@ -2396,6 +2411,7 @@ int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
2396 optval, optlen); 2411 optval, optlen);
2397 return do_tcp_setsockopt(sk, level, optname, optval, optlen); 2412 return do_tcp_setsockopt(sk, level, optname, optval, optlen);
2398} 2413}
2414EXPORT_SYMBOL(tcp_setsockopt);
2399 2415
2400#ifdef CONFIG_COMPAT 2416#ifdef CONFIG_COMPAT
2401int compat_tcp_setsockopt(struct sock *sk, int level, int optname, 2417int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
@@ -2406,7 +2422,6 @@ int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
2406 optval, optlen); 2422 optval, optlen);
2407 return do_tcp_setsockopt(sk, level, optname, optval, optlen); 2423 return do_tcp_setsockopt(sk, level, optname, optval, optlen);
2408} 2424}
2409
2410EXPORT_SYMBOL(compat_tcp_setsockopt); 2425EXPORT_SYMBOL(compat_tcp_setsockopt);
2411#endif 2426#endif
2412 2427
@@ -2472,7 +2487,6 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
2472 2487
2473 info->tcpi_total_retrans = tp->total_retrans; 2488 info->tcpi_total_retrans = tp->total_retrans;
2474} 2489}
2475
2476EXPORT_SYMBOL_GPL(tcp_get_info); 2490EXPORT_SYMBOL_GPL(tcp_get_info);
2477 2491
2478static int do_tcp_getsockopt(struct sock *sk, int level, 2492static int do_tcp_getsockopt(struct sock *sk, int level,
@@ -2590,6 +2604,12 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
2590 return -EFAULT; 2604 return -EFAULT;
2591 return 0; 2605 return 0;
2592 } 2606 }
2607 case TCP_THIN_LINEAR_TIMEOUTS:
2608 val = tp->thin_lto;
2609 break;
2610 case TCP_THIN_DUPACK:
2611 val = tp->thin_dupack;
2612 break;
2593 default: 2613 default:
2594 return -ENOPROTOOPT; 2614 return -ENOPROTOOPT;
2595 } 2615 }
@@ -2611,6 +2631,7 @@ int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
2611 optval, optlen); 2631 optval, optlen);
2612 return do_tcp_getsockopt(sk, level, optname, optval, optlen); 2632 return do_tcp_getsockopt(sk, level, optname, optval, optlen);
2613} 2633}
2634EXPORT_SYMBOL(tcp_getsockopt);
2614 2635
2615#ifdef CONFIG_COMPAT 2636#ifdef CONFIG_COMPAT
2616int compat_tcp_getsockopt(struct sock *sk, int level, int optname, 2637int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
@@ -2621,7 +2642,6 @@ int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
2621 optval, optlen); 2642 optval, optlen);
2622 return do_tcp_getsockopt(sk, level, optname, optval, optlen); 2643 return do_tcp_getsockopt(sk, level, optname, optval, optlen);
2623} 2644}
2624
2625EXPORT_SYMBOL(compat_tcp_getsockopt); 2645EXPORT_SYMBOL(compat_tcp_getsockopt);
2626#endif 2646#endif
2627 2647
@@ -2858,7 +2878,6 @@ void tcp_free_md5sig_pool(void)
2858 if (pool) 2878 if (pool)
2859 __tcp_free_md5sig_pool(pool); 2879 __tcp_free_md5sig_pool(pool);
2860} 2880}
2861
2862EXPORT_SYMBOL(tcp_free_md5sig_pool); 2881EXPORT_SYMBOL(tcp_free_md5sig_pool);
2863 2882
2864static struct tcp_md5sig_pool * __percpu * 2883static struct tcp_md5sig_pool * __percpu *
@@ -2934,7 +2953,6 @@ retry:
2934 } 2953 }
2935 return pool; 2954 return pool;
2936} 2955}
2937
2938EXPORT_SYMBOL(tcp_alloc_md5sig_pool); 2956EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
2939 2957
2940 2958
@@ -2958,7 +2976,7 @@ struct tcp_md5sig_pool *tcp_get_md5sig_pool(void)
2958 spin_unlock(&tcp_md5sig_pool_lock); 2976 spin_unlock(&tcp_md5sig_pool_lock);
2959 2977
2960 if (p) 2978 if (p)
2961 return *per_cpu_ptr(p, smp_processor_id()); 2979 return *this_cpu_ptr(p);
2962 2980
2963 local_bh_enable(); 2981 local_bh_enable();
2964 return NULL; 2982 return NULL;
@@ -2986,7 +3004,6 @@ int tcp_md5_hash_header(struct tcp_md5sig_pool *hp,
2986 th->check = old_checksum; 3004 th->check = old_checksum;
2987 return err; 3005 return err;
2988} 3006}
2989
2990EXPORT_SYMBOL(tcp_md5_hash_header); 3007EXPORT_SYMBOL(tcp_md5_hash_header);
2991 3008
2992int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp, 3009int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp,
@@ -2999,6 +3016,7 @@ int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp,
2999 const unsigned head_data_len = skb_headlen(skb) > header_len ? 3016 const unsigned head_data_len = skb_headlen(skb) > header_len ?
3000 skb_headlen(skb) - header_len : 0; 3017 skb_headlen(skb) - header_len : 0;
3001 const struct skb_shared_info *shi = skb_shinfo(skb); 3018 const struct skb_shared_info *shi = skb_shinfo(skb);
3019 struct sk_buff *frag_iter;
3002 3020
3003 sg_init_table(&sg, 1); 3021 sg_init_table(&sg, 1);
3004 3022
@@ -3013,9 +3031,12 @@ int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp,
3013 return 1; 3031 return 1;
3014 } 3032 }
3015 3033
3034 skb_walk_frags(skb, frag_iter)
3035 if (tcp_md5_hash_skb_data(hp, frag_iter, 0))
3036 return 1;
3037
3016 return 0; 3038 return 0;
3017} 3039}
3018
3019EXPORT_SYMBOL(tcp_md5_hash_skb_data); 3040EXPORT_SYMBOL(tcp_md5_hash_skb_data);
3020 3041
3021int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, struct tcp_md5sig_key *key) 3042int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, struct tcp_md5sig_key *key)
@@ -3025,7 +3046,6 @@ int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, struct tcp_md5sig_key *key)
3025 sg_init_one(&sg, key->key, key->keylen); 3046 sg_init_one(&sg, key->key, key->keylen);
3026 return crypto_hash_update(&hp->md5_desc, &sg, key->keylen); 3047 return crypto_hash_update(&hp->md5_desc, &sg, key->keylen);
3027} 3048}
3028
3029EXPORT_SYMBOL(tcp_md5_hash_key); 3049EXPORT_SYMBOL(tcp_md5_hash_key);
3030 3050
3031#endif 3051#endif
@@ -3297,16 +3317,3 @@ void __init tcp_init(void)
3297 tcp_secret_retiring = &tcp_secret_two; 3317 tcp_secret_retiring = &tcp_secret_two;
3298 tcp_secret_secondary = &tcp_secret_two; 3318 tcp_secret_secondary = &tcp_secret_two;
3299} 3319}
3300
3301EXPORT_SYMBOL(tcp_close);
3302EXPORT_SYMBOL(tcp_disconnect);
3303EXPORT_SYMBOL(tcp_getsockopt);
3304EXPORT_SYMBOL(tcp_ioctl);
3305EXPORT_SYMBOL(tcp_poll);
3306EXPORT_SYMBOL(tcp_read_sock);
3307EXPORT_SYMBOL(tcp_recvmsg);
3308EXPORT_SYMBOL(tcp_sendmsg);
3309EXPORT_SYMBOL(tcp_splice_read);
3310EXPORT_SYMBOL(tcp_sendpage);
3311EXPORT_SYMBOL(tcp_setsockopt);
3312EXPORT_SYMBOL(tcp_shutdown);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 548d575e6cc6..3c426cb318e7 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -78,10 +78,13 @@ int sysctl_tcp_window_scaling __read_mostly = 1;
78int sysctl_tcp_sack __read_mostly = 1; 78int sysctl_tcp_sack __read_mostly = 1;
79int sysctl_tcp_fack __read_mostly = 1; 79int sysctl_tcp_fack __read_mostly = 1;
80int sysctl_tcp_reordering __read_mostly = TCP_FASTRETRANS_THRESH; 80int sysctl_tcp_reordering __read_mostly = TCP_FASTRETRANS_THRESH;
81EXPORT_SYMBOL(sysctl_tcp_reordering);
81int sysctl_tcp_ecn __read_mostly = 2; 82int sysctl_tcp_ecn __read_mostly = 2;
83EXPORT_SYMBOL(sysctl_tcp_ecn);
82int sysctl_tcp_dsack __read_mostly = 1; 84int sysctl_tcp_dsack __read_mostly = 1;
83int sysctl_tcp_app_win __read_mostly = 31; 85int sysctl_tcp_app_win __read_mostly = 31;
84int sysctl_tcp_adv_win_scale __read_mostly = 2; 86int sysctl_tcp_adv_win_scale __read_mostly = 2;
87EXPORT_SYMBOL(sysctl_tcp_adv_win_scale);
85 88
86int sysctl_tcp_stdurg __read_mostly; 89int sysctl_tcp_stdurg __read_mostly;
87int sysctl_tcp_rfc1337 __read_mostly; 90int sysctl_tcp_rfc1337 __read_mostly;
@@ -419,6 +422,7 @@ void tcp_initialize_rcv_mss(struct sock *sk)
419 422
420 inet_csk(sk)->icsk_ack.rcv_mss = hint; 423 inet_csk(sk)->icsk_ack.rcv_mss = hint;
421} 424}
425EXPORT_SYMBOL(tcp_initialize_rcv_mss);
422 426
423/* Receiver "autotuning" code. 427/* Receiver "autotuning" code.
424 * 428 *
@@ -2938,6 +2942,7 @@ void tcp_simple_retransmit(struct sock *sk)
2938 } 2942 }
2939 tcp_xmit_retransmit_queue(sk); 2943 tcp_xmit_retransmit_queue(sk);
2940} 2944}
2945EXPORT_SYMBOL(tcp_simple_retransmit);
2941 2946
2942/* Process an event, which can update packets-in-flight not trivially. 2947/* Process an event, which can update packets-in-flight not trivially.
2943 * Main goal of this function is to calculate new estimate for left_out, 2948 * Main goal of this function is to calculate new estimate for left_out,
@@ -3286,7 +3291,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
3286 * connection startup slow start one packet too 3291 * connection startup slow start one packet too
3287 * quickly. This is severely frowned upon behavior. 3292 * quickly. This is severely frowned upon behavior.
3288 */ 3293 */
3289 if (!(scb->flags & TCPCB_FLAG_SYN)) { 3294 if (!(scb->flags & TCPHDR_SYN)) {
3290 flag |= FLAG_DATA_ACKED; 3295 flag |= FLAG_DATA_ACKED;
3291 } else { 3296 } else {
3292 flag |= FLAG_SYN_ACKED; 3297 flag |= FLAG_SYN_ACKED;
@@ -3858,6 +3863,7 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
3858 } 3863 }
3859 } 3864 }
3860} 3865}
3866EXPORT_SYMBOL(tcp_parse_options);
3861 3867
3862static int tcp_parse_aligned_timestamp(struct tcp_sock *tp, struct tcphdr *th) 3868static int tcp_parse_aligned_timestamp(struct tcp_sock *tp, struct tcphdr *th)
3863{ 3869{
@@ -3931,6 +3937,7 @@ u8 *tcp_parse_md5sig_option(struct tcphdr *th)
3931 } 3937 }
3932 return NULL; 3938 return NULL;
3933} 3939}
3940EXPORT_SYMBOL(tcp_parse_md5sig_option);
3934#endif 3941#endif
3935 3942
3936static inline void tcp_store_ts_recent(struct tcp_sock *tp) 3943static inline void tcp_store_ts_recent(struct tcp_sock *tp)
@@ -5432,6 +5439,7 @@ discard:
5432 __kfree_skb(skb); 5439 __kfree_skb(skb);
5433 return 0; 5440 return 0;
5434} 5441}
5442EXPORT_SYMBOL(tcp_rcv_established);
5435 5443
5436static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, 5444static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
5437 struct tcphdr *th, unsigned len) 5445 struct tcphdr *th, unsigned len)
@@ -5931,14 +5939,4 @@ discard:
5931 } 5939 }
5932 return 0; 5940 return 0;
5933} 5941}
5934
5935EXPORT_SYMBOL(sysctl_tcp_ecn);
5936EXPORT_SYMBOL(sysctl_tcp_reordering);
5937EXPORT_SYMBOL(sysctl_tcp_adv_win_scale);
5938EXPORT_SYMBOL(tcp_parse_options);
5939#ifdef CONFIG_TCP_MD5SIG
5940EXPORT_SYMBOL(tcp_parse_md5sig_option);
5941#endif
5942EXPORT_SYMBOL(tcp_rcv_established);
5943EXPORT_SYMBOL(tcp_rcv_state_process); 5942EXPORT_SYMBOL(tcp_rcv_state_process);
5944EXPORT_SYMBOL(tcp_initialize_rcv_mss);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index fe193e53af44..020766292bb0 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -84,6 +84,7 @@
84 84
85int sysctl_tcp_tw_reuse __read_mostly; 85int sysctl_tcp_tw_reuse __read_mostly;
86int sysctl_tcp_low_latency __read_mostly; 86int sysctl_tcp_low_latency __read_mostly;
87EXPORT_SYMBOL(sysctl_tcp_low_latency);
87 88
88 89
89#ifdef CONFIG_TCP_MD5SIG 90#ifdef CONFIG_TCP_MD5SIG
@@ -100,6 +101,7 @@ struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr)
100#endif 101#endif
101 102
102struct inet_hashinfo tcp_hashinfo; 103struct inet_hashinfo tcp_hashinfo;
104EXPORT_SYMBOL(tcp_hashinfo);
103 105
104static inline __u32 tcp_v4_init_sequence(struct sk_buff *skb) 106static inline __u32 tcp_v4_init_sequence(struct sk_buff *skb)
105{ 107{
@@ -139,7 +141,6 @@ int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
139 141
140 return 0; 142 return 0;
141} 143}
142
143EXPORT_SYMBOL_GPL(tcp_twsk_unique); 144EXPORT_SYMBOL_GPL(tcp_twsk_unique);
144 145
145/* This will initiate an outgoing connection. */ 146/* This will initiate an outgoing connection. */
@@ -204,10 +205,12 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
204 * TIME-WAIT * and initialize rx_opt.ts_recent from it, 205 * TIME-WAIT * and initialize rx_opt.ts_recent from it,
205 * when trying new connection. 206 * when trying new connection.
206 */ 207 */
207 if (peer != NULL && 208 if (peer) {
208 (u32)get_seconds() - peer->tcp_ts_stamp <= TCP_PAWS_MSL) { 209 inet_peer_refcheck(peer);
209 tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp; 210 if ((u32)get_seconds() - peer->tcp_ts_stamp <= TCP_PAWS_MSL) {
210 tp->rx_opt.ts_recent = peer->tcp_ts; 211 tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
212 tp->rx_opt.ts_recent = peer->tcp_ts;
213 }
211 } 214 }
212 } 215 }
213 216
@@ -237,7 +240,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
237 240
238 /* OK, now commit destination to socket. */ 241 /* OK, now commit destination to socket. */
239 sk->sk_gso_type = SKB_GSO_TCPV4; 242 sk->sk_gso_type = SKB_GSO_TCPV4;
240 sk_setup_caps(sk, &rt->u.dst); 243 sk_setup_caps(sk, &rt->dst);
241 244
242 if (!tp->write_seq) 245 if (!tp->write_seq)
243 tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr, 246 tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
@@ -265,6 +268,7 @@ failure:
265 inet->inet_dport = 0; 268 inet->inet_dport = 0;
266 return err; 269 return err;
267} 270}
271EXPORT_SYMBOL(tcp_v4_connect);
268 272
269/* 273/*
270 * This routine does path mtu discovery as defined in RFC1191. 274 * This routine does path mtu discovery as defined in RFC1191.
@@ -543,6 +547,7 @@ void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
543 547
544 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr); 548 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
545} 549}
550EXPORT_SYMBOL(tcp_v4_send_check);
546 551
547int tcp_v4_gso_send_check(struct sk_buff *skb) 552int tcp_v4_gso_send_check(struct sk_buff *skb)
548{ 553{
@@ -793,19 +798,20 @@ static void tcp_v4_reqsk_destructor(struct request_sock *req)
793 kfree(inet_rsk(req)->opt); 798 kfree(inet_rsk(req)->opt);
794} 799}
795 800
796#ifdef CONFIG_SYN_COOKIES 801static void syn_flood_warning(const struct sk_buff *skb)
797static void syn_flood_warning(struct sk_buff *skb)
798{ 802{
799 static unsigned long warntime; 803 const char *msg;
800 804
801 if (time_after(jiffies, (warntime + HZ * 60))) { 805#ifdef CONFIG_SYN_COOKIES
802 warntime = jiffies; 806 if (sysctl_tcp_syncookies)
803 printk(KERN_INFO 807 msg = "Sending cookies";
804 "possible SYN flooding on port %d. Sending cookies.\n", 808 else
805 ntohs(tcp_hdr(skb)->dest));
806 }
807}
808#endif 809#endif
810 msg = "Dropping request";
811
812 pr_info("TCP: Possible SYN flooding on port %d. %s.\n",
813 ntohs(tcp_hdr(skb)->dest), msg);
814}
809 815
810/* 816/*
811 * Save and compile IPv4 options into the request_sock if needed. 817 * Save and compile IPv4 options into the request_sock if needed.
@@ -857,7 +863,6 @@ struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
857{ 863{
858 return tcp_v4_md5_do_lookup(sk, inet_sk(addr_sk)->inet_daddr); 864 return tcp_v4_md5_do_lookup(sk, inet_sk(addr_sk)->inet_daddr);
859} 865}
860
861EXPORT_SYMBOL(tcp_v4_md5_lookup); 866EXPORT_SYMBOL(tcp_v4_md5_lookup);
862 867
863static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk, 868static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
@@ -924,7 +929,6 @@ int tcp_v4_md5_do_add(struct sock *sk, __be32 addr,
924 } 929 }
925 return 0; 930 return 0;
926} 931}
927
928EXPORT_SYMBOL(tcp_v4_md5_do_add); 932EXPORT_SYMBOL(tcp_v4_md5_do_add);
929 933
930static int tcp_v4_md5_add_func(struct sock *sk, struct sock *addr_sk, 934static int tcp_v4_md5_add_func(struct sock *sk, struct sock *addr_sk,
@@ -962,7 +966,6 @@ int tcp_v4_md5_do_del(struct sock *sk, __be32 addr)
962 } 966 }
963 return -ENOENT; 967 return -ENOENT;
964} 968}
965
966EXPORT_SYMBOL(tcp_v4_md5_do_del); 969EXPORT_SYMBOL(tcp_v4_md5_do_del);
967 970
968static void tcp_v4_clear_md5_list(struct sock *sk) 971static void tcp_v4_clear_md5_list(struct sock *sk)
@@ -1135,7 +1138,6 @@ clear_hash_noput:
1135 memset(md5_hash, 0, 16); 1138 memset(md5_hash, 0, 16);
1136 return 1; 1139 return 1;
1137} 1140}
1138
1139EXPORT_SYMBOL(tcp_v4_md5_hash_skb); 1141EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1140 1142
1141static int tcp_v4_inbound_md5_hash(struct sock *sk, struct sk_buff *skb) 1143static int tcp_v4_inbound_md5_hash(struct sock *sk, struct sk_buff *skb)
@@ -1243,6 +1245,8 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1243 * evidently real one. 1245 * evidently real one.
1244 */ 1246 */
1245 if (inet_csk_reqsk_queue_is_full(sk) && !isn) { 1247 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1248 if (net_ratelimit())
1249 syn_flood_warning(skb);
1246#ifdef CONFIG_SYN_COOKIES 1250#ifdef CONFIG_SYN_COOKIES
1247 if (sysctl_tcp_syncookies) { 1251 if (sysctl_tcp_syncookies) {
1248 want_cookie = 1; 1252 want_cookie = 1;
@@ -1323,15 +1327,12 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1323 if (security_inet_conn_request(sk, skb, req)) 1327 if (security_inet_conn_request(sk, skb, req))
1324 goto drop_and_free; 1328 goto drop_and_free;
1325 1329
1326 if (!want_cookie) 1330 if (!want_cookie || tmp_opt.tstamp_ok)
1327 TCP_ECN_create_request(req, tcp_hdr(skb)); 1331 TCP_ECN_create_request(req, tcp_hdr(skb));
1328 1332
1329 if (want_cookie) { 1333 if (want_cookie) {
1330#ifdef CONFIG_SYN_COOKIES
1331 syn_flood_warning(skb);
1332 req->cookie_ts = tmp_opt.tstamp_ok;
1333#endif
1334 isn = cookie_v4_init_sequence(sk, skb, &req->mss); 1334 isn = cookie_v4_init_sequence(sk, skb, &req->mss);
1335 req->cookie_ts = tmp_opt.tstamp_ok;
1335 } else if (!isn) { 1336 } else if (!isn) {
1336 struct inet_peer *peer = NULL; 1337 struct inet_peer *peer = NULL;
1337 1338
@@ -1349,6 +1350,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1349 (dst = inet_csk_route_req(sk, req)) != NULL && 1350 (dst = inet_csk_route_req(sk, req)) != NULL &&
1350 (peer = rt_get_peer((struct rtable *)dst)) != NULL && 1351 (peer = rt_get_peer((struct rtable *)dst)) != NULL &&
1351 peer->v4daddr == saddr) { 1352 peer->v4daddr == saddr) {
1353 inet_peer_refcheck(peer);
1352 if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL && 1354 if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
1353 (s32)(peer->tcp_ts - req->ts_recent) > 1355 (s32)(peer->tcp_ts - req->ts_recent) >
1354 TCP_PAWS_WINDOW) { 1356 TCP_PAWS_WINDOW) {
@@ -1393,6 +1395,7 @@ drop_and_free:
1393drop: 1395drop:
1394 return 0; 1396 return 0;
1395} 1397}
1398EXPORT_SYMBOL(tcp_v4_conn_request);
1396 1399
1397 1400
1398/* 1401/*
@@ -1478,6 +1481,7 @@ exit:
1478 dst_release(dst); 1481 dst_release(dst);
1479 return NULL; 1482 return NULL;
1480} 1483}
1484EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1481 1485
1482static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb) 1486static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1483{ 1487{
@@ -1504,7 +1508,7 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1504 } 1508 }
1505 1509
1506#ifdef CONFIG_SYN_COOKIES 1510#ifdef CONFIG_SYN_COOKIES
1507 if (!th->rst && !th->syn && th->ack) 1511 if (!th->syn)
1508 sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt)); 1512 sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
1509#endif 1513#endif
1510 return sk; 1514 return sk;
@@ -1607,6 +1611,7 @@ csum_err:
1607 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); 1611 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1608 goto discard; 1612 goto discard;
1609} 1613}
1614EXPORT_SYMBOL(tcp_v4_do_rcv);
1610 1615
1611/* 1616/*
1612 * From tcp_input.c 1617 * From tcp_input.c
@@ -1793,6 +1798,7 @@ int tcp_v4_remember_stamp(struct sock *sk)
1793 1798
1794 return 0; 1799 return 0;
1795} 1800}
1801EXPORT_SYMBOL(tcp_v4_remember_stamp);
1796 1802
1797int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw) 1803int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw)
1798{ 1804{
@@ -1832,6 +1838,7 @@ const struct inet_connection_sock_af_ops ipv4_specific = {
1832 .compat_getsockopt = compat_ip_getsockopt, 1838 .compat_getsockopt = compat_ip_getsockopt,
1833#endif 1839#endif
1834}; 1840};
1841EXPORT_SYMBOL(ipv4_specific);
1835 1842
1836#ifdef CONFIG_TCP_MD5SIG 1843#ifdef CONFIG_TCP_MD5SIG
1837static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = { 1844static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
@@ -1960,7 +1967,6 @@ void tcp_v4_destroy_sock(struct sock *sk)
1960 1967
1961 percpu_counter_dec(&tcp_sockets_allocated); 1968 percpu_counter_dec(&tcp_sockets_allocated);
1962} 1969}
1963
1964EXPORT_SYMBOL(tcp_v4_destroy_sock); 1970EXPORT_SYMBOL(tcp_v4_destroy_sock);
1965 1971
1966#ifdef CONFIG_PROC_FS 1972#ifdef CONFIG_PROC_FS
@@ -1978,6 +1984,11 @@ static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw)
1978 hlist_nulls_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL; 1984 hlist_nulls_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
1979} 1985}
1980 1986
1987/*
1988 * Get next listener socket follow cur. If cur is NULL, get first socket
1989 * starting from bucket given in st->bucket; when st->bucket is zero the
1990 * very first socket in the hash table is returned.
1991 */
1981static void *listening_get_next(struct seq_file *seq, void *cur) 1992static void *listening_get_next(struct seq_file *seq, void *cur)
1982{ 1993{
1983 struct inet_connection_sock *icsk; 1994 struct inet_connection_sock *icsk;
@@ -1988,14 +1999,15 @@ static void *listening_get_next(struct seq_file *seq, void *cur)
1988 struct net *net = seq_file_net(seq); 1999 struct net *net = seq_file_net(seq);
1989 2000
1990 if (!sk) { 2001 if (!sk) {
1991 st->bucket = 0; 2002 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1992 ilb = &tcp_hashinfo.listening_hash[0];
1993 spin_lock_bh(&ilb->lock); 2003 spin_lock_bh(&ilb->lock);
1994 sk = sk_nulls_head(&ilb->head); 2004 sk = sk_nulls_head(&ilb->head);
2005 st->offset = 0;
1995 goto get_sk; 2006 goto get_sk;
1996 } 2007 }
1997 ilb = &tcp_hashinfo.listening_hash[st->bucket]; 2008 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1998 ++st->num; 2009 ++st->num;
2010 ++st->offset;
1999 2011
2000 if (st->state == TCP_SEQ_STATE_OPENREQ) { 2012 if (st->state == TCP_SEQ_STATE_OPENREQ) {
2001 struct request_sock *req = cur; 2013 struct request_sock *req = cur;
@@ -2010,6 +2022,7 @@ static void *listening_get_next(struct seq_file *seq, void *cur)
2010 } 2022 }
2011 req = req->dl_next; 2023 req = req->dl_next;
2012 } 2024 }
2025 st->offset = 0;
2013 if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries) 2026 if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
2014 break; 2027 break;
2015get_req: 2028get_req:
@@ -2045,6 +2058,7 @@ start_req:
2045 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock); 2058 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2046 } 2059 }
2047 spin_unlock_bh(&ilb->lock); 2060 spin_unlock_bh(&ilb->lock);
2061 st->offset = 0;
2048 if (++st->bucket < INET_LHTABLE_SIZE) { 2062 if (++st->bucket < INET_LHTABLE_SIZE) {
2049 ilb = &tcp_hashinfo.listening_hash[st->bucket]; 2063 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2050 spin_lock_bh(&ilb->lock); 2064 spin_lock_bh(&ilb->lock);
@@ -2058,7 +2072,12 @@ out:
2058 2072
2059static void *listening_get_idx(struct seq_file *seq, loff_t *pos) 2073static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2060{ 2074{
2061 void *rc = listening_get_next(seq, NULL); 2075 struct tcp_iter_state *st = seq->private;
2076 void *rc;
2077
2078 st->bucket = 0;
2079 st->offset = 0;
2080 rc = listening_get_next(seq, NULL);
2062 2081
2063 while (rc && *pos) { 2082 while (rc && *pos) {
2064 rc = listening_get_next(seq, rc); 2083 rc = listening_get_next(seq, rc);
@@ -2073,13 +2092,18 @@ static inline int empty_bucket(struct tcp_iter_state *st)
2073 hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].twchain); 2092 hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].twchain);
2074} 2093}
2075 2094
2095/*
2096 * Get first established socket starting from bucket given in st->bucket.
2097 * If st->bucket is zero, the very first socket in the hash is returned.
2098 */
2076static void *established_get_first(struct seq_file *seq) 2099static void *established_get_first(struct seq_file *seq)
2077{ 2100{
2078 struct tcp_iter_state *st = seq->private; 2101 struct tcp_iter_state *st = seq->private;
2079 struct net *net = seq_file_net(seq); 2102 struct net *net = seq_file_net(seq);
2080 void *rc = NULL; 2103 void *rc = NULL;
2081 2104
2082 for (st->bucket = 0; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) { 2105 st->offset = 0;
2106 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
2083 struct sock *sk; 2107 struct sock *sk;
2084 struct hlist_nulls_node *node; 2108 struct hlist_nulls_node *node;
2085 struct inet_timewait_sock *tw; 2109 struct inet_timewait_sock *tw;
@@ -2124,6 +2148,7 @@ static void *established_get_next(struct seq_file *seq, void *cur)
2124 struct net *net = seq_file_net(seq); 2148 struct net *net = seq_file_net(seq);
2125 2149
2126 ++st->num; 2150 ++st->num;
2151 ++st->offset;
2127 2152
2128 if (st->state == TCP_SEQ_STATE_TIME_WAIT) { 2153 if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
2129 tw = cur; 2154 tw = cur;
@@ -2140,6 +2165,7 @@ get_tw:
2140 st->state = TCP_SEQ_STATE_ESTABLISHED; 2165 st->state = TCP_SEQ_STATE_ESTABLISHED;
2141 2166
2142 /* Look for next non empty bucket */ 2167 /* Look for next non empty bucket */
2168 st->offset = 0;
2143 while (++st->bucket <= tcp_hashinfo.ehash_mask && 2169 while (++st->bucket <= tcp_hashinfo.ehash_mask &&
2144 empty_bucket(st)) 2170 empty_bucket(st))
2145 ; 2171 ;
@@ -2167,7 +2193,11 @@ out:
2167 2193
2168static void *established_get_idx(struct seq_file *seq, loff_t pos) 2194static void *established_get_idx(struct seq_file *seq, loff_t pos)
2169{ 2195{
2170 void *rc = established_get_first(seq); 2196 struct tcp_iter_state *st = seq->private;
2197 void *rc;
2198
2199 st->bucket = 0;
2200 rc = established_get_first(seq);
2171 2201
2172 while (rc && pos) { 2202 while (rc && pos) {
2173 rc = established_get_next(seq, rc); 2203 rc = established_get_next(seq, rc);
@@ -2192,24 +2222,72 @@ static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2192 return rc; 2222 return rc;
2193} 2223}
2194 2224
2225static void *tcp_seek_last_pos(struct seq_file *seq)
2226{
2227 struct tcp_iter_state *st = seq->private;
2228 int offset = st->offset;
2229 int orig_num = st->num;
2230 void *rc = NULL;
2231
2232 switch (st->state) {
2233 case TCP_SEQ_STATE_OPENREQ:
2234 case TCP_SEQ_STATE_LISTENING:
2235 if (st->bucket >= INET_LHTABLE_SIZE)
2236 break;
2237 st->state = TCP_SEQ_STATE_LISTENING;
2238 rc = listening_get_next(seq, NULL);
2239 while (offset-- && rc)
2240 rc = listening_get_next(seq, rc);
2241 if (rc)
2242 break;
2243 st->bucket = 0;
2244 /* Fallthrough */
2245 case TCP_SEQ_STATE_ESTABLISHED:
2246 case TCP_SEQ_STATE_TIME_WAIT:
2247 st->state = TCP_SEQ_STATE_ESTABLISHED;
2248 if (st->bucket > tcp_hashinfo.ehash_mask)
2249 break;
2250 rc = established_get_first(seq);
2251 while (offset-- && rc)
2252 rc = established_get_next(seq, rc);
2253 }
2254
2255 st->num = orig_num;
2256
2257 return rc;
2258}
2259
2195static void *tcp_seq_start(struct seq_file *seq, loff_t *pos) 2260static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2196{ 2261{
2197 struct tcp_iter_state *st = seq->private; 2262 struct tcp_iter_state *st = seq->private;
2263 void *rc;
2264
2265 if (*pos && *pos == st->last_pos) {
2266 rc = tcp_seek_last_pos(seq);
2267 if (rc)
2268 goto out;
2269 }
2270
2198 st->state = TCP_SEQ_STATE_LISTENING; 2271 st->state = TCP_SEQ_STATE_LISTENING;
2199 st->num = 0; 2272 st->num = 0;
2200 return *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; 2273 st->bucket = 0;
2274 st->offset = 0;
2275 rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2276
2277out:
2278 st->last_pos = *pos;
2279 return rc;
2201} 2280}
2202 2281
2203static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos) 2282static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2204{ 2283{
2284 struct tcp_iter_state *st = seq->private;
2205 void *rc = NULL; 2285 void *rc = NULL;
2206 struct tcp_iter_state *st;
2207 2286
2208 if (v == SEQ_START_TOKEN) { 2287 if (v == SEQ_START_TOKEN) {
2209 rc = tcp_get_idx(seq, 0); 2288 rc = tcp_get_idx(seq, 0);
2210 goto out; 2289 goto out;
2211 } 2290 }
2212 st = seq->private;
2213 2291
2214 switch (st->state) { 2292 switch (st->state) {
2215 case TCP_SEQ_STATE_OPENREQ: 2293 case TCP_SEQ_STATE_OPENREQ:
@@ -2217,6 +2295,8 @@ static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2217 rc = listening_get_next(seq, v); 2295 rc = listening_get_next(seq, v);
2218 if (!rc) { 2296 if (!rc) {
2219 st->state = TCP_SEQ_STATE_ESTABLISHED; 2297 st->state = TCP_SEQ_STATE_ESTABLISHED;
2298 st->bucket = 0;
2299 st->offset = 0;
2220 rc = established_get_first(seq); 2300 rc = established_get_first(seq);
2221 } 2301 }
2222 break; 2302 break;
@@ -2227,6 +2307,7 @@ static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2227 } 2307 }
2228out: 2308out:
2229 ++*pos; 2309 ++*pos;
2310 st->last_pos = *pos;
2230 return rc; 2311 return rc;
2231} 2312}
2232 2313
@@ -2265,6 +2346,7 @@ static int tcp_seq_open(struct inode *inode, struct file *file)
2265 2346
2266 s = ((struct seq_file *)file->private_data)->private; 2347 s = ((struct seq_file *)file->private_data)->private;
2267 s->family = afinfo->family; 2348 s->family = afinfo->family;
2349 s->last_pos = 0;
2268 return 0; 2350 return 0;
2269} 2351}
2270 2352
@@ -2288,11 +2370,13 @@ int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2288 rc = -ENOMEM; 2370 rc = -ENOMEM;
2289 return rc; 2371 return rc;
2290} 2372}
2373EXPORT_SYMBOL(tcp_proc_register);
2291 2374
2292void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo) 2375void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2293{ 2376{
2294 proc_net_remove(net, afinfo->name); 2377 proc_net_remove(net, afinfo->name);
2295} 2378}
2379EXPORT_SYMBOL(tcp_proc_unregister);
2296 2380
2297static void get_openreq4(struct sock *sk, struct request_sock *req, 2381static void get_openreq4(struct sock *sk, struct request_sock *req,
2298 struct seq_file *f, int i, int uid, int *len) 2382 struct seq_file *f, int i, int uid, int *len)
@@ -2516,6 +2600,8 @@ struct proto tcp_prot = {
2516 .setsockopt = tcp_setsockopt, 2600 .setsockopt = tcp_setsockopt,
2517 .getsockopt = tcp_getsockopt, 2601 .getsockopt = tcp_getsockopt,
2518 .recvmsg = tcp_recvmsg, 2602 .recvmsg = tcp_recvmsg,
2603 .sendmsg = tcp_sendmsg,
2604 .sendpage = tcp_sendpage,
2519 .backlog_rcv = tcp_v4_do_rcv, 2605 .backlog_rcv = tcp_v4_do_rcv,
2520 .hash = inet_hash, 2606 .hash = inet_hash,
2521 .unhash = inet_unhash, 2607 .unhash = inet_unhash,
@@ -2534,11 +2620,13 @@ struct proto tcp_prot = {
2534 .twsk_prot = &tcp_timewait_sock_ops, 2620 .twsk_prot = &tcp_timewait_sock_ops,
2535 .rsk_prot = &tcp_request_sock_ops, 2621 .rsk_prot = &tcp_request_sock_ops,
2536 .h.hashinfo = &tcp_hashinfo, 2622 .h.hashinfo = &tcp_hashinfo,
2623 .no_autobind = true,
2537#ifdef CONFIG_COMPAT 2624#ifdef CONFIG_COMPAT
2538 .compat_setsockopt = compat_tcp_setsockopt, 2625 .compat_setsockopt = compat_tcp_setsockopt,
2539 .compat_getsockopt = compat_tcp_getsockopt, 2626 .compat_getsockopt = compat_tcp_getsockopt,
2540#endif 2627#endif
2541}; 2628};
2629EXPORT_SYMBOL(tcp_prot);
2542 2630
2543 2631
2544static int __net_init tcp_sk_init(struct net *net) 2632static int __net_init tcp_sk_init(struct net *net)
@@ -2569,20 +2657,3 @@ void __init tcp_v4_init(void)
2569 if (register_pernet_subsys(&tcp_sk_ops)) 2657 if (register_pernet_subsys(&tcp_sk_ops))
2570 panic("Failed to create the TCP control socket.\n"); 2658 panic("Failed to create the TCP control socket.\n");
2571} 2659}
2572
2573EXPORT_SYMBOL(ipv4_specific);
2574EXPORT_SYMBOL(tcp_hashinfo);
2575EXPORT_SYMBOL(tcp_prot);
2576EXPORT_SYMBOL(tcp_v4_conn_request);
2577EXPORT_SYMBOL(tcp_v4_connect);
2578EXPORT_SYMBOL(tcp_v4_do_rcv);
2579EXPORT_SYMBOL(tcp_v4_remember_stamp);
2580EXPORT_SYMBOL(tcp_v4_send_check);
2581EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
2582
2583#ifdef CONFIG_PROC_FS
2584EXPORT_SYMBOL(tcp_proc_register);
2585EXPORT_SYMBOL(tcp_proc_unregister);
2586#endif
2587EXPORT_SYMBOL(sysctl_tcp_low_latency);
2588
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 794c2e122a41..f25b56cb85cb 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -47,7 +47,6 @@ struct inet_timewait_death_row tcp_death_row = {
47 .twcal_timer = TIMER_INITIALIZER(inet_twdr_twcal_tick, 0, 47 .twcal_timer = TIMER_INITIALIZER(inet_twdr_twcal_tick, 0,
48 (unsigned long)&tcp_death_row), 48 (unsigned long)&tcp_death_row),
49}; 49};
50
51EXPORT_SYMBOL_GPL(tcp_death_row); 50EXPORT_SYMBOL_GPL(tcp_death_row);
52 51
53static __inline__ int tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win) 52static __inline__ int tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
@@ -262,6 +261,7 @@ kill:
262 inet_twsk_put(tw); 261 inet_twsk_put(tw);
263 return TCP_TW_SUCCESS; 262 return TCP_TW_SUCCESS;
264} 263}
264EXPORT_SYMBOL(tcp_timewait_state_process);
265 265
266/* 266/*
267 * Move a socket to time-wait or dead fin-wait-2 state. 267 * Move a socket to time-wait or dead fin-wait-2 state.
@@ -362,7 +362,6 @@ void tcp_twsk_destructor(struct sock *sk)
362 tcp_free_md5sig_pool(); 362 tcp_free_md5sig_pool();
363#endif 363#endif
364} 364}
365
366EXPORT_SYMBOL_GPL(tcp_twsk_destructor); 365EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
367 366
368static inline void TCP_ECN_openreq_child(struct tcp_sock *tp, 367static inline void TCP_ECN_openreq_child(struct tcp_sock *tp,
@@ -510,6 +509,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
510 } 509 }
511 return newsk; 510 return newsk;
512} 511}
512EXPORT_SYMBOL(tcp_create_openreq_child);
513 513
514/* 514/*
515 * Process an incoming packet for SYN_RECV sockets represented 515 * Process an incoming packet for SYN_RECV sockets represented
@@ -706,6 +706,7 @@ embryonic_reset:
706 inet_csk_reqsk_queue_drop(sk, req, prev); 706 inet_csk_reqsk_queue_drop(sk, req, prev);
707 return NULL; 707 return NULL;
708} 708}
709EXPORT_SYMBOL(tcp_check_req);
709 710
710/* 711/*
711 * Queue segment on the new socket if the new socket is active, 712 * Queue segment on the new socket if the new socket is active,
@@ -737,8 +738,4 @@ int tcp_child_process(struct sock *parent, struct sock *child,
737 sock_put(child); 738 sock_put(child);
738 return ret; 739 return ret;
739} 740}
740
741EXPORT_SYMBOL(tcp_check_req);
742EXPORT_SYMBOL(tcp_child_process); 741EXPORT_SYMBOL(tcp_child_process);
743EXPORT_SYMBOL(tcp_create_openreq_child);
744EXPORT_SYMBOL(tcp_timewait_state_process);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index b4ed957f201a..de3bd8458588 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -247,6 +247,7 @@ void tcp_select_initial_window(int __space, __u32 mss,
247 /* Set the clamp no higher than max representable value */ 247 /* Set the clamp no higher than max representable value */
248 (*window_clamp) = min(65535U << (*rcv_wscale), *window_clamp); 248 (*window_clamp) = min(65535U << (*rcv_wscale), *window_clamp);
249} 249}
250EXPORT_SYMBOL(tcp_select_initial_window);
250 251
251/* Chose a new window to advertise, update state in tcp_sock for the 252/* Chose a new window to advertise, update state in tcp_sock for the
252 * socket, and return result with RFC1323 scaling applied. The return 253 * socket, and return result with RFC1323 scaling applied. The return
@@ -294,9 +295,9 @@ static u16 tcp_select_window(struct sock *sk)
294/* Packet ECN state for a SYN-ACK */ 295/* Packet ECN state for a SYN-ACK */
295static inline void TCP_ECN_send_synack(struct tcp_sock *tp, struct sk_buff *skb) 296static inline void TCP_ECN_send_synack(struct tcp_sock *tp, struct sk_buff *skb)
296{ 297{
297 TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_CWR; 298 TCP_SKB_CB(skb)->flags &= ~TCPHDR_CWR;
298 if (!(tp->ecn_flags & TCP_ECN_OK)) 299 if (!(tp->ecn_flags & TCP_ECN_OK))
299 TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_ECE; 300 TCP_SKB_CB(skb)->flags &= ~TCPHDR_ECE;
300} 301}
301 302
302/* Packet ECN state for a SYN. */ 303/* Packet ECN state for a SYN. */
@@ -306,7 +307,7 @@ static inline void TCP_ECN_send_syn(struct sock *sk, struct sk_buff *skb)
306 307
307 tp->ecn_flags = 0; 308 tp->ecn_flags = 0;
308 if (sysctl_tcp_ecn == 1) { 309 if (sysctl_tcp_ecn == 1) {
309 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_ECE | TCPCB_FLAG_CWR; 310 TCP_SKB_CB(skb)->flags |= TCPHDR_ECE | TCPHDR_CWR;
310 tp->ecn_flags = TCP_ECN_OK; 311 tp->ecn_flags = TCP_ECN_OK;
311 } 312 }
312} 313}
@@ -361,7 +362,7 @@ static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags)
361 skb_shinfo(skb)->gso_type = 0; 362 skb_shinfo(skb)->gso_type = 0;
362 363
363 TCP_SKB_CB(skb)->seq = seq; 364 TCP_SKB_CB(skb)->seq = seq;
364 if (flags & (TCPCB_FLAG_SYN | TCPCB_FLAG_FIN)) 365 if (flags & (TCPHDR_SYN | TCPHDR_FIN))
365 seq++; 366 seq++;
366 TCP_SKB_CB(skb)->end_seq = seq; 367 TCP_SKB_CB(skb)->end_seq = seq;
367} 368}
@@ -820,7 +821,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
820 tcb = TCP_SKB_CB(skb); 821 tcb = TCP_SKB_CB(skb);
821 memset(&opts, 0, sizeof(opts)); 822 memset(&opts, 0, sizeof(opts));
822 823
823 if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) 824 if (unlikely(tcb->flags & TCPHDR_SYN))
824 tcp_options_size = tcp_syn_options(sk, skb, &opts, &md5); 825 tcp_options_size = tcp_syn_options(sk, skb, &opts, &md5);
825 else 826 else
826 tcp_options_size = tcp_established_options(sk, skb, &opts, 827 tcp_options_size = tcp_established_options(sk, skb, &opts,
@@ -843,7 +844,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
843 *(((__be16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) | 844 *(((__be16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) |
844 tcb->flags); 845 tcb->flags);
845 846
846 if (unlikely(tcb->flags & TCPCB_FLAG_SYN)) { 847 if (unlikely(tcb->flags & TCPHDR_SYN)) {
847 /* RFC1323: The window in SYN & SYN/ACK segments 848 /* RFC1323: The window in SYN & SYN/ACK segments
848 * is never scaled. 849 * is never scaled.
849 */ 850 */
@@ -866,7 +867,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
866 } 867 }
867 868
868 tcp_options_write((__be32 *)(th + 1), tp, &opts); 869 tcp_options_write((__be32 *)(th + 1), tp, &opts);
869 if (likely((tcb->flags & TCPCB_FLAG_SYN) == 0)) 870 if (likely((tcb->flags & TCPHDR_SYN) == 0))
870 TCP_ECN_send(sk, skb, tcp_header_size); 871 TCP_ECN_send(sk, skb, tcp_header_size);
871 872
872#ifdef CONFIG_TCP_MD5SIG 873#ifdef CONFIG_TCP_MD5SIG
@@ -880,7 +881,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
880 881
881 icsk->icsk_af_ops->send_check(sk, skb); 882 icsk->icsk_af_ops->send_check(sk, skb);
882 883
883 if (likely(tcb->flags & TCPCB_FLAG_ACK)) 884 if (likely(tcb->flags & TCPHDR_ACK))
884 tcp_event_ack_sent(sk, tcp_skb_pcount(skb)); 885 tcp_event_ack_sent(sk, tcp_skb_pcount(skb));
885 886
886 if (skb->len != tcp_header_size) 887 if (skb->len != tcp_header_size)
@@ -1023,7 +1024,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
1023 1024
1024 /* PSH and FIN should only be set in the second packet. */ 1025 /* PSH and FIN should only be set in the second packet. */
1025 flags = TCP_SKB_CB(skb)->flags; 1026 flags = TCP_SKB_CB(skb)->flags;
1026 TCP_SKB_CB(skb)->flags = flags & ~(TCPCB_FLAG_FIN | TCPCB_FLAG_PSH); 1027 TCP_SKB_CB(skb)->flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
1027 TCP_SKB_CB(buff)->flags = flags; 1028 TCP_SKB_CB(buff)->flags = flags;
1028 TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked; 1029 TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked;
1029 1030
@@ -1189,6 +1190,7 @@ void tcp_mtup_init(struct sock *sk)
1189 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, sysctl_tcp_base_mss); 1190 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, sysctl_tcp_base_mss);
1190 icsk->icsk_mtup.probe_size = 0; 1191 icsk->icsk_mtup.probe_size = 0;
1191} 1192}
1193EXPORT_SYMBOL(tcp_mtup_init);
1192 1194
1193/* This function synchronize snd mss to current pmtu/exthdr set. 1195/* This function synchronize snd mss to current pmtu/exthdr set.
1194 1196
@@ -1232,6 +1234,7 @@ unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu)
1232 1234
1233 return mss_now; 1235 return mss_now;
1234} 1236}
1237EXPORT_SYMBOL(tcp_sync_mss);
1235 1238
1236/* Compute the current effective MSS, taking SACKs and IP options, 1239/* Compute the current effective MSS, taking SACKs and IP options,
1237 * and even PMTU discovery events into account. 1240 * and even PMTU discovery events into account.
@@ -1328,8 +1331,7 @@ static inline unsigned int tcp_cwnd_test(struct tcp_sock *tp,
1328 u32 in_flight, cwnd; 1331 u32 in_flight, cwnd;
1329 1332
1330 /* Don't be strict about the congestion window for the final FIN. */ 1333 /* Don't be strict about the congestion window for the final FIN. */
1331 if ((TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) && 1334 if ((TCP_SKB_CB(skb)->flags & TCPHDR_FIN) && tcp_skb_pcount(skb) == 1)
1332 tcp_skb_pcount(skb) == 1)
1333 return 1; 1335 return 1;
1334 1336
1335 in_flight = tcp_packets_in_flight(tp); 1337 in_flight = tcp_packets_in_flight(tp);
@@ -1398,7 +1400,7 @@ static inline int tcp_nagle_test(struct tcp_sock *tp, struct sk_buff *skb,
1398 * Nagle can be ignored during F-RTO too (see RFC4138). 1400 * Nagle can be ignored during F-RTO too (see RFC4138).
1399 */ 1401 */
1400 if (tcp_urg_mode(tp) || (tp->frto_counter == 2) || 1402 if (tcp_urg_mode(tp) || (tp->frto_counter == 2) ||
1401 (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN)) 1403 (TCP_SKB_CB(skb)->flags & TCPHDR_FIN))
1402 return 1; 1404 return 1;
1403 1405
1404 if (!tcp_nagle_check(tp, skb, cur_mss, nonagle)) 1406 if (!tcp_nagle_check(tp, skb, cur_mss, nonagle))
@@ -1461,7 +1463,7 @@ int tcp_may_send_now(struct sock *sk)
1461 * packet has never been sent out before (and thus is not cloned). 1463 * packet has never been sent out before (and thus is not cloned).
1462 */ 1464 */
1463static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, 1465static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
1464 unsigned int mss_now) 1466 unsigned int mss_now, gfp_t gfp)
1465{ 1467{
1466 struct sk_buff *buff; 1468 struct sk_buff *buff;
1467 int nlen = skb->len - len; 1469 int nlen = skb->len - len;
@@ -1471,7 +1473,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
1471 if (skb->len != skb->data_len) 1473 if (skb->len != skb->data_len)
1472 return tcp_fragment(sk, skb, len, mss_now); 1474 return tcp_fragment(sk, skb, len, mss_now);
1473 1475
1474 buff = sk_stream_alloc_skb(sk, 0, GFP_ATOMIC); 1476 buff = sk_stream_alloc_skb(sk, 0, gfp);
1475 if (unlikely(buff == NULL)) 1477 if (unlikely(buff == NULL))
1476 return -ENOMEM; 1478 return -ENOMEM;
1477 1479
@@ -1487,7 +1489,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
1487 1489
1488 /* PSH and FIN should only be set in the second packet. */ 1490 /* PSH and FIN should only be set in the second packet. */
1489 flags = TCP_SKB_CB(skb)->flags; 1491 flags = TCP_SKB_CB(skb)->flags;
1490 TCP_SKB_CB(skb)->flags = flags & ~(TCPCB_FLAG_FIN | TCPCB_FLAG_PSH); 1492 TCP_SKB_CB(skb)->flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
1491 TCP_SKB_CB(buff)->flags = flags; 1493 TCP_SKB_CB(buff)->flags = flags;
1492 1494
1493 /* This packet was never sent out yet, so no SACK bits. */ 1495 /* This packet was never sent out yet, so no SACK bits. */
@@ -1518,7 +1520,7 @@ static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
1518 const struct inet_connection_sock *icsk = inet_csk(sk); 1520 const struct inet_connection_sock *icsk = inet_csk(sk);
1519 u32 send_win, cong_win, limit, in_flight; 1521 u32 send_win, cong_win, limit, in_flight;
1520 1522
1521 if (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) 1523 if (TCP_SKB_CB(skb)->flags & TCPHDR_FIN)
1522 goto send_now; 1524 goto send_now;
1523 1525
1524 if (icsk->icsk_ca_state != TCP_CA_Open) 1526 if (icsk->icsk_ca_state != TCP_CA_Open)
@@ -1644,7 +1646,7 @@ static int tcp_mtu_probe(struct sock *sk)
1644 1646
1645 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq; 1647 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq;
1646 TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size; 1648 TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size;
1647 TCP_SKB_CB(nskb)->flags = TCPCB_FLAG_ACK; 1649 TCP_SKB_CB(nskb)->flags = TCPHDR_ACK;
1648 TCP_SKB_CB(nskb)->sacked = 0; 1650 TCP_SKB_CB(nskb)->sacked = 0;
1649 nskb->csum = 0; 1651 nskb->csum = 0;
1650 nskb->ip_summed = skb->ip_summed; 1652 nskb->ip_summed = skb->ip_summed;
@@ -1669,7 +1671,7 @@ static int tcp_mtu_probe(struct sock *sk)
1669 sk_wmem_free_skb(sk, skb); 1671 sk_wmem_free_skb(sk, skb);
1670 } else { 1672 } else {
1671 TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags & 1673 TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags &
1672 ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH); 1674 ~(TCPHDR_FIN|TCPHDR_PSH);
1673 if (!skb_shinfo(skb)->nr_frags) { 1675 if (!skb_shinfo(skb)->nr_frags) {
1674 skb_pull(skb, copy); 1676 skb_pull(skb, copy);
1675 if (skb->ip_summed != CHECKSUM_PARTIAL) 1677 if (skb->ip_summed != CHECKSUM_PARTIAL)
@@ -1769,7 +1771,7 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
1769 cwnd_quota); 1771 cwnd_quota);
1770 1772
1771 if (skb->len > limit && 1773 if (skb->len > limit &&
1772 unlikely(tso_fragment(sk, skb, limit, mss_now))) 1774 unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
1773 break; 1775 break;
1774 1776
1775 TCP_SKB_CB(skb)->when = tcp_time_stamp; 1777 TCP_SKB_CB(skb)->when = tcp_time_stamp;
@@ -2020,7 +2022,7 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to,
2020 2022
2021 if (!sysctl_tcp_retrans_collapse) 2023 if (!sysctl_tcp_retrans_collapse)
2022 return; 2024 return;
2023 if (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_SYN) 2025 if (TCP_SKB_CB(skb)->flags & TCPHDR_SYN)
2024 return; 2026 return;
2025 2027
2026 tcp_for_write_queue_from_safe(skb, tmp, sk) { 2028 tcp_for_write_queue_from_safe(skb, tmp, sk) {
@@ -2112,7 +2114,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
2112 * since it is cheap to do so and saves bytes on the network. 2114 * since it is cheap to do so and saves bytes on the network.
2113 */ 2115 */
2114 if (skb->len > 0 && 2116 if (skb->len > 0 &&
2115 (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) && 2117 (TCP_SKB_CB(skb)->flags & TCPHDR_FIN) &&
2116 tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) { 2118 tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) {
2117 if (!pskb_trim(skb, 0)) { 2119 if (!pskb_trim(skb, 0)) {
2118 /* Reuse, even though it does some unnecessary work */ 2120 /* Reuse, even though it does some unnecessary work */
@@ -2208,6 +2210,9 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
2208 int mib_idx; 2210 int mib_idx;
2209 int fwd_rexmitting = 0; 2211 int fwd_rexmitting = 0;
2210 2212
2213 if (!tp->packets_out)
2214 return;
2215
2211 if (!tp->lost_out) 2216 if (!tp->lost_out)
2212 tp->retransmit_high = tp->snd_una; 2217 tp->retransmit_high = tp->snd_una;
2213 2218
@@ -2301,7 +2306,7 @@ void tcp_send_fin(struct sock *sk)
2301 mss_now = tcp_current_mss(sk); 2306 mss_now = tcp_current_mss(sk);
2302 2307
2303 if (tcp_send_head(sk) != NULL) { 2308 if (tcp_send_head(sk) != NULL) {
2304 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_FIN; 2309 TCP_SKB_CB(skb)->flags |= TCPHDR_FIN;
2305 TCP_SKB_CB(skb)->end_seq++; 2310 TCP_SKB_CB(skb)->end_seq++;
2306 tp->write_seq++; 2311 tp->write_seq++;
2307 } else { 2312 } else {
@@ -2318,7 +2323,7 @@ void tcp_send_fin(struct sock *sk)
2318 skb_reserve(skb, MAX_TCP_HEADER); 2323 skb_reserve(skb, MAX_TCP_HEADER);
2319 /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */ 2324 /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
2320 tcp_init_nondata_skb(skb, tp->write_seq, 2325 tcp_init_nondata_skb(skb, tp->write_seq,
2321 TCPCB_FLAG_ACK | TCPCB_FLAG_FIN); 2326 TCPHDR_ACK | TCPHDR_FIN);
2322 tcp_queue_skb(sk, skb); 2327 tcp_queue_skb(sk, skb);
2323 } 2328 }
2324 __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF); 2329 __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF);
@@ -2343,7 +2348,7 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority)
2343 /* Reserve space for headers and prepare control bits. */ 2348 /* Reserve space for headers and prepare control bits. */
2344 skb_reserve(skb, MAX_TCP_HEADER); 2349 skb_reserve(skb, MAX_TCP_HEADER);
2345 tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk), 2350 tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk),
2346 TCPCB_FLAG_ACK | TCPCB_FLAG_RST); 2351 TCPHDR_ACK | TCPHDR_RST);
2347 /* Send it off. */ 2352 /* Send it off. */
2348 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2353 TCP_SKB_CB(skb)->when = tcp_time_stamp;
2349 if (tcp_transmit_skb(sk, skb, 0, priority)) 2354 if (tcp_transmit_skb(sk, skb, 0, priority))
@@ -2363,11 +2368,11 @@ int tcp_send_synack(struct sock *sk)
2363 struct sk_buff *skb; 2368 struct sk_buff *skb;
2364 2369
2365 skb = tcp_write_queue_head(sk); 2370 skb = tcp_write_queue_head(sk);
2366 if (skb == NULL || !(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_SYN)) { 2371 if (skb == NULL || !(TCP_SKB_CB(skb)->flags & TCPHDR_SYN)) {
2367 printk(KERN_DEBUG "tcp_send_synack: wrong queue state\n"); 2372 printk(KERN_DEBUG "tcp_send_synack: wrong queue state\n");
2368 return -EFAULT; 2373 return -EFAULT;
2369 } 2374 }
2370 if (!(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_ACK)) { 2375 if (!(TCP_SKB_CB(skb)->flags & TCPHDR_ACK)) {
2371 if (skb_cloned(skb)) { 2376 if (skb_cloned(skb)) {
2372 struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC); 2377 struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC);
2373 if (nskb == NULL) 2378 if (nskb == NULL)
@@ -2381,7 +2386,7 @@ int tcp_send_synack(struct sock *sk)
2381 skb = nskb; 2386 skb = nskb;
2382 } 2387 }
2383 2388
2384 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_ACK; 2389 TCP_SKB_CB(skb)->flags |= TCPHDR_ACK;
2385 TCP_ECN_send_synack(tcp_sk(sk), skb); 2390 TCP_ECN_send_synack(tcp_sk(sk), skb);
2386 } 2391 }
2387 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2392 TCP_SKB_CB(skb)->when = tcp_time_stamp;
@@ -2460,7 +2465,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2460 * not even correctly set) 2465 * not even correctly set)
2461 */ 2466 */
2462 tcp_init_nondata_skb(skb, tcp_rsk(req)->snt_isn, 2467 tcp_init_nondata_skb(skb, tcp_rsk(req)->snt_isn,
2463 TCPCB_FLAG_SYN | TCPCB_FLAG_ACK); 2468 TCPHDR_SYN | TCPHDR_ACK);
2464 2469
2465 if (OPTION_COOKIE_EXTENSION & opts.options) { 2470 if (OPTION_COOKIE_EXTENSION & opts.options) {
2466 if (s_data_desired) { 2471 if (s_data_desired) {
@@ -2515,6 +2520,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2515 2520
2516 return skb; 2521 return skb;
2517} 2522}
2523EXPORT_SYMBOL(tcp_make_synack);
2518 2524
2519/* Do all connect socket setups that can be done AF independent. */ 2525/* Do all connect socket setups that can be done AF independent. */
2520static void tcp_connect_init(struct sock *sk) 2526static void tcp_connect_init(struct sock *sk)
@@ -2592,7 +2598,7 @@ int tcp_connect(struct sock *sk)
2592 skb_reserve(buff, MAX_TCP_HEADER); 2598 skb_reserve(buff, MAX_TCP_HEADER);
2593 2599
2594 tp->snd_nxt = tp->write_seq; 2600 tp->snd_nxt = tp->write_seq;
2595 tcp_init_nondata_skb(buff, tp->write_seq++, TCPCB_FLAG_SYN); 2601 tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN);
2596 TCP_ECN_send_syn(sk, buff); 2602 TCP_ECN_send_syn(sk, buff);
2597 2603
2598 /* Send it off. */ 2604 /* Send it off. */
@@ -2617,6 +2623,7 @@ int tcp_connect(struct sock *sk)
2617 inet_csk(sk)->icsk_rto, TCP_RTO_MAX); 2623 inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
2618 return 0; 2624 return 0;
2619} 2625}
2626EXPORT_SYMBOL(tcp_connect);
2620 2627
2621/* Send out a delayed ack, the caller does the policy checking 2628/* Send out a delayed ack, the caller does the policy checking
2622 * to see if we should even be here. See tcp_input.c:tcp_ack_snd_check() 2629 * to see if we should even be here. See tcp_input.c:tcp_ack_snd_check()
@@ -2698,7 +2705,7 @@ void tcp_send_ack(struct sock *sk)
2698 2705
2699 /* Reserve space for headers and prepare control bits. */ 2706 /* Reserve space for headers and prepare control bits. */
2700 skb_reserve(buff, MAX_TCP_HEADER); 2707 skb_reserve(buff, MAX_TCP_HEADER);
2701 tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPCB_FLAG_ACK); 2708 tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK);
2702 2709
2703 /* Send it off, this clears delayed acks for us. */ 2710 /* Send it off, this clears delayed acks for us. */
2704 TCP_SKB_CB(buff)->when = tcp_time_stamp; 2711 TCP_SKB_CB(buff)->when = tcp_time_stamp;
@@ -2732,7 +2739,7 @@ static int tcp_xmit_probe_skb(struct sock *sk, int urgent)
2732 * end to send an ack. Don't queue or clone SKB, just 2739 * end to send an ack. Don't queue or clone SKB, just
2733 * send it. 2740 * send it.
2734 */ 2741 */
2735 tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPCB_FLAG_ACK); 2742 tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK);
2736 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2743 TCP_SKB_CB(skb)->when = tcp_time_stamp;
2737 return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC); 2744 return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC);
2738} 2745}
@@ -2762,13 +2769,13 @@ int tcp_write_wakeup(struct sock *sk)
2762 if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq || 2769 if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq ||
2763 skb->len > mss) { 2770 skb->len > mss) {
2764 seg_size = min(seg_size, mss); 2771 seg_size = min(seg_size, mss);
2765 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH; 2772 TCP_SKB_CB(skb)->flags |= TCPHDR_PSH;
2766 if (tcp_fragment(sk, skb, seg_size, mss)) 2773 if (tcp_fragment(sk, skb, seg_size, mss))
2767 return -1; 2774 return -1;
2768 } else if (!tcp_skb_pcount(skb)) 2775 } else if (!tcp_skb_pcount(skb))
2769 tcp_set_skb_tso_segs(sk, skb, mss); 2776 tcp_set_skb_tso_segs(sk, skb, mss);
2770 2777
2771 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH; 2778 TCP_SKB_CB(skb)->flags |= TCPHDR_PSH;
2772 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2779 TCP_SKB_CB(skb)->when = tcp_time_stamp;
2773 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 2780 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
2774 if (!err) 2781 if (!err)
@@ -2821,10 +2828,3 @@ void tcp_send_probe0(struct sock *sk)
2821 TCP_RTO_MAX); 2828 TCP_RTO_MAX);
2822 } 2829 }
2823} 2830}
2824
2825EXPORT_SYMBOL(tcp_select_initial_window);
2826EXPORT_SYMBOL(tcp_connect);
2827EXPORT_SYMBOL(tcp_make_synack);
2828EXPORT_SYMBOL(tcp_simple_retransmit);
2829EXPORT_SYMBOL(tcp_sync_mss);
2830EXPORT_SYMBOL(tcp_mtup_init);
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 440a5c6004f6..808bb920c9f5 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -41,7 +41,6 @@ void tcp_init_xmit_timers(struct sock *sk)
41 inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer, 41 inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer,
42 &tcp_keepalive_timer); 42 &tcp_keepalive_timer);
43} 43}
44
45EXPORT_SYMBOL(tcp_init_xmit_timers); 44EXPORT_SYMBOL(tcp_init_xmit_timers);
46 45
47static void tcp_write_err(struct sock *sk) 46static void tcp_write_err(struct sock *sk)
diff --git a/net/ipv4/tunnel4.c b/net/ipv4/tunnel4.c
index 3b3813cc80b9..59186ca7808a 100644
--- a/net/ipv4/tunnel4.c
+++ b/net/ipv4/tunnel4.c
@@ -48,7 +48,6 @@ err:
48 48
49 return ret; 49 return ret;
50} 50}
51
52EXPORT_SYMBOL(xfrm4_tunnel_register); 51EXPORT_SYMBOL(xfrm4_tunnel_register);
53 52
54int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family) 53int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family)
@@ -72,7 +71,6 @@ int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family)
72 71
73 return ret; 72 return ret;
74} 73}
75
76EXPORT_SYMBOL(xfrm4_tunnel_deregister); 74EXPORT_SYMBOL(xfrm4_tunnel_deregister);
77 75
78static int tunnel4_rcv(struct sk_buff *skb) 76static int tunnel4_rcv(struct sk_buff *skb)
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index eec4ff456e33..32e0bef60d0a 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -914,7 +914,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
914 !sock_flag(sk, SOCK_BROADCAST)) 914 !sock_flag(sk, SOCK_BROADCAST))
915 goto out; 915 goto out;
916 if (connected) 916 if (connected)
917 sk_dst_set(sk, dst_clone(&rt->u.dst)); 917 sk_dst_set(sk, dst_clone(&rt->dst));
918 } 918 }
919 919
920 if (msg->msg_flags&MSG_CONFIRM) 920 if (msg->msg_flags&MSG_CONFIRM)
@@ -978,7 +978,7 @@ out:
978 return err; 978 return err;
979 979
980do_confirm: 980do_confirm:
981 dst_confirm(&rt->u.dst); 981 dst_confirm(&rt->dst);
982 if (!(msg->msg_flags&MSG_PROBE) || len) 982 if (!(msg->msg_flags&MSG_PROBE) || len)
983 goto back_from_confirm; 983 goto back_from_confirm;
984 err = 0; 984 err = 0;
diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c
index 6610bf76369f..ab76aa928fa9 100644
--- a/net/ipv4/udplite.c
+++ b/net/ipv4/udplite.c
@@ -58,6 +58,7 @@ struct proto udplite_prot = {
58 .compat_getsockopt = compat_udp_getsockopt, 58 .compat_getsockopt = compat_udp_getsockopt,
59#endif 59#endif
60}; 60};
61EXPORT_SYMBOL(udplite_prot);
61 62
62static struct inet_protosw udplite4_protosw = { 63static struct inet_protosw udplite4_protosw = {
63 .type = SOCK_DGRAM, 64 .type = SOCK_DGRAM,
@@ -127,5 +128,3 @@ out_unregister_proto:
127out_register_err: 128out_register_err:
128 printk(KERN_CRIT "%s: Cannot add UDP-Lite protocol.\n", __func__); 129 printk(KERN_CRIT "%s: Cannot add UDP-Lite protocol.\n", __func__);
129} 130}
130
131EXPORT_SYMBOL(udplite_prot);
diff --git a/net/ipv4/xfrm4_input.c b/net/ipv4/xfrm4_input.c
index ad8fbb871aa0..06814b6216dc 100644
--- a/net/ipv4/xfrm4_input.c
+++ b/net/ipv4/xfrm4_input.c
@@ -163,5 +163,4 @@ int xfrm4_rcv(struct sk_buff *skb)
163{ 163{
164 return xfrm4_rcv_spi(skb, ip_hdr(skb)->protocol, 0); 164 return xfrm4_rcv_spi(skb, ip_hdr(skb)->protocol, 0);
165} 165}
166
167EXPORT_SYMBOL(xfrm4_rcv); 166EXPORT_SYMBOL(xfrm4_rcv);
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 1705476670ef..869078d4eeb9 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -37,7 +37,7 @@ static struct dst_entry *xfrm4_dst_lookup(struct net *net, int tos,
37 fl.fl4_src = saddr->a4; 37 fl.fl4_src = saddr->a4;
38 38
39 err = __ip_route_output_key(net, &rt, &fl); 39 err = __ip_route_output_key(net, &rt, &fl);
40 dst = &rt->u.dst; 40 dst = &rt->dst;
41 if (err) 41 if (err)
42 dst = ERR_PTR(err); 42 dst = ERR_PTR(err);
43 return dst; 43 return dst;
@@ -108,6 +108,8 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
108 u8 *xprth = skb_network_header(skb) + iph->ihl * 4; 108 u8 *xprth = skb_network_header(skb) + iph->ihl * 4;
109 109
110 memset(fl, 0, sizeof(struct flowi)); 110 memset(fl, 0, sizeof(struct flowi));
111 fl->mark = skb->mark;
112
111 if (!(iph->frag_off & htons(IP_MF | IP_OFFSET))) { 113 if (!(iph->frag_off & htons(IP_MF | IP_OFFSET))) {
112 switch (iph->protocol) { 114 switch (iph->protocol) {
113 case IPPROTO_UDP: 115 case IPPROTO_UDP:
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index e1a698df5706..ab70a3fbcafa 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -121,8 +121,6 @@ static inline void addrconf_sysctl_unregister(struct inet6_dev *idev)
121static int __ipv6_regen_rndid(struct inet6_dev *idev); 121static int __ipv6_regen_rndid(struct inet6_dev *idev);
122static int __ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr); 122static int __ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr);
123static void ipv6_regen_rndid(unsigned long data); 123static void ipv6_regen_rndid(unsigned long data);
124
125static int desync_factor = MAX_DESYNC_FACTOR * HZ;
126#endif 124#endif
127 125
128static int ipv6_generate_eui64(u8 *eui, struct net_device *dev); 126static int ipv6_generate_eui64(u8 *eui, struct net_device *dev);
@@ -284,13 +282,16 @@ static void addrconf_mod_timer(struct inet6_ifaddr *ifp,
284static int snmp6_alloc_dev(struct inet6_dev *idev) 282static int snmp6_alloc_dev(struct inet6_dev *idev)
285{ 283{
286 if (snmp_mib_init((void __percpu **)idev->stats.ipv6, 284 if (snmp_mib_init((void __percpu **)idev->stats.ipv6,
287 sizeof(struct ipstats_mib)) < 0) 285 sizeof(struct ipstats_mib),
286 __alignof__(struct ipstats_mib)) < 0)
288 goto err_ip; 287 goto err_ip;
289 if (snmp_mib_init((void __percpu **)idev->stats.icmpv6, 288 if (snmp_mib_init((void __percpu **)idev->stats.icmpv6,
290 sizeof(struct icmpv6_mib)) < 0) 289 sizeof(struct icmpv6_mib),
290 __alignof__(struct icmpv6_mib)) < 0)
291 goto err_icmp; 291 goto err_icmp;
292 if (snmp_mib_init((void __percpu **)idev->stats.icmpv6msg, 292 if (snmp_mib_init((void __percpu **)idev->stats.icmpv6msg,
293 sizeof(struct icmpv6msg_mib)) < 0) 293 sizeof(struct icmpv6msg_mib),
294 __alignof__(struct icmpv6msg_mib)) < 0)
294 goto err_icmpmsg; 295 goto err_icmpmsg;
295 296
296 return 0; 297 return 0;
@@ -557,7 +558,7 @@ void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp)
557 pr_warning("Freeing alive inet6 address %p\n", ifp); 558 pr_warning("Freeing alive inet6 address %p\n", ifp);
558 return; 559 return;
559 } 560 }
560 dst_release(&ifp->rt->u.dst); 561 dst_release(&ifp->rt->dst);
561 562
562 call_rcu(&ifp->rcu, inet6_ifa_finish_destroy_rcu); 563 call_rcu(&ifp->rcu, inet6_ifa_finish_destroy_rcu);
563} 564}
@@ -823,7 +824,7 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)
823 rt->rt6i_flags |= RTF_EXPIRES; 824 rt->rt6i_flags |= RTF_EXPIRES;
824 } 825 }
825 } 826 }
826 dst_release(&rt->u.dst); 827 dst_release(&rt->dst);
827 } 828 }
828 829
829out: 830out:
@@ -890,7 +891,8 @@ retry:
890 idev->cnf.temp_valid_lft); 891 idev->cnf.temp_valid_lft);
891 tmp_prefered_lft = min_t(__u32, 892 tmp_prefered_lft = min_t(__u32,
892 ifp->prefered_lft, 893 ifp->prefered_lft,
893 idev->cnf.temp_prefered_lft - desync_factor / HZ); 894 idev->cnf.temp_prefered_lft -
895 idev->cnf.max_desync_factor);
894 tmp_plen = ifp->prefix_len; 896 tmp_plen = ifp->prefix_len;
895 max_addresses = idev->cnf.max_addresses; 897 max_addresses = idev->cnf.max_addresses;
896 tmp_cstamp = ifp->cstamp; 898 tmp_cstamp = ifp->cstamp;
@@ -1650,7 +1652,8 @@ static void ipv6_regen_rndid(unsigned long data)
1650 1652
1651 expires = jiffies + 1653 expires = jiffies +
1652 idev->cnf.temp_prefered_lft * HZ - 1654 idev->cnf.temp_prefered_lft * HZ -
1653 idev->cnf.regen_max_retry * idev->cnf.dad_transmits * idev->nd_parms->retrans_time - desync_factor; 1655 idev->cnf.regen_max_retry * idev->cnf.dad_transmits * idev->nd_parms->retrans_time -
1656 idev->cnf.max_desync_factor * HZ;
1654 if (time_before(expires, jiffies)) { 1657 if (time_before(expires, jiffies)) {
1655 printk(KERN_WARNING 1658 printk(KERN_WARNING
1656 "ipv6_regen_rndid(): too short regeneration interval; timer disabled for %s.\n", 1659 "ipv6_regen_rndid(): too short regeneration interval; timer disabled for %s.\n",
@@ -1760,7 +1763,10 @@ static struct inet6_dev *addrconf_add_dev(struct net_device *dev)
1760 1763
1761 idev = ipv6_find_idev(dev); 1764 idev = ipv6_find_idev(dev);
1762 if (!idev) 1765 if (!idev)
1763 return NULL; 1766 return ERR_PTR(-ENOBUFS);
1767
1768 if (idev->cnf.disable_ipv6)
1769 return ERR_PTR(-EACCES);
1764 1770
1765 /* Add default multicast route */ 1771 /* Add default multicast route */
1766 addrconf_add_mroute(dev); 1772 addrconf_add_mroute(dev);
@@ -1863,7 +1869,7 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len)
1863 dev, expires, flags); 1869 dev, expires, flags);
1864 } 1870 }
1865 if (rt) 1871 if (rt)
1866 dst_release(&rt->u.dst); 1872 dst_release(&rt->dst);
1867 } 1873 }
1868 1874
1869 /* Try to figure out our local address for this prefix */ 1875 /* Try to figure out our local address for this prefix */
@@ -2129,8 +2135,9 @@ static int inet6_addr_add(struct net *net, int ifindex, struct in6_addr *pfx,
2129 if (!dev) 2135 if (!dev)
2130 return -ENODEV; 2136 return -ENODEV;
2131 2137
2132 if ((idev = addrconf_add_dev(dev)) == NULL) 2138 idev = addrconf_add_dev(dev);
2133 return -ENOBUFS; 2139 if (IS_ERR(idev))
2140 return PTR_ERR(idev);
2134 2141
2135 scope = ipv6_addr_scope(pfx); 2142 scope = ipv6_addr_scope(pfx);
2136 2143
@@ -2377,7 +2384,7 @@ static void addrconf_dev_config(struct net_device *dev)
2377 } 2384 }
2378 2385
2379 idev = addrconf_add_dev(dev); 2386 idev = addrconf_add_dev(dev);
2380 if (idev == NULL) 2387 if (IS_ERR(idev))
2381 return; 2388 return;
2382 2389
2383 memset(&addr, 0, sizeof(struct in6_addr)); 2390 memset(&addr, 0, sizeof(struct in6_addr));
@@ -2468,7 +2475,7 @@ static void addrconf_ip6_tnl_config(struct net_device *dev)
2468 ASSERT_RTNL(); 2475 ASSERT_RTNL();
2469 2476
2470 idev = addrconf_add_dev(dev); 2477 idev = addrconf_add_dev(dev);
2471 if (!idev) { 2478 if (IS_ERR(idev)) {
2472 printk(KERN_DEBUG "init ip6-ip6: add_dev failed\n"); 2479 printk(KERN_DEBUG "init ip6-ip6: add_dev failed\n");
2473 return; 2480 return;
2474 } 2481 }
@@ -3492,8 +3499,12 @@ static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa,
3492 preferred -= tval; 3499 preferred -= tval;
3493 else 3500 else
3494 preferred = 0; 3501 preferred = 0;
3495 if (valid != INFINITY_LIFE_TIME) 3502 if (valid != INFINITY_LIFE_TIME) {
3496 valid -= tval; 3503 if (valid > tval)
3504 valid -= tval;
3505 else
3506 valid = 0;
3507 }
3497 } 3508 }
3498 } else { 3509 } else {
3499 preferred = INFINITY_LIFE_TIME; 3510 preferred = INFINITY_LIFE_TIME;
@@ -3855,12 +3866,28 @@ static inline void __snmp6_fill_stats(u64 *stats, void __percpu **mib,
3855 memset(&stats[items], 0, pad); 3866 memset(&stats[items], 0, pad);
3856} 3867}
3857 3868
3869static inline void __snmp6_fill_stats64(u64 *stats, void __percpu **mib,
3870 int items, int bytes, size_t syncpoff)
3871{
3872 int i;
3873 int pad = bytes - sizeof(u64) * items;
3874 BUG_ON(pad < 0);
3875
3876 /* Use put_unaligned() because stats may not be aligned for u64. */
3877 put_unaligned(items, &stats[0]);
3878 for (i = 1; i < items; i++)
3879 put_unaligned(snmp_fold_field64(mib, i, syncpoff), &stats[i]);
3880
3881 memset(&stats[items], 0, pad);
3882}
3883
3858static void snmp6_fill_stats(u64 *stats, struct inet6_dev *idev, int attrtype, 3884static void snmp6_fill_stats(u64 *stats, struct inet6_dev *idev, int attrtype,
3859 int bytes) 3885 int bytes)
3860{ 3886{
3861 switch (attrtype) { 3887 switch (attrtype) {
3862 case IFLA_INET6_STATS: 3888 case IFLA_INET6_STATS:
3863 __snmp6_fill_stats(stats, (void __percpu **)idev->stats.ipv6, IPSTATS_MIB_MAX, bytes); 3889 __snmp6_fill_stats64(stats, (void __percpu **)idev->stats.ipv6,
3890 IPSTATS_MIB_MAX, bytes, offsetof(struct ipstats_mib, syncp));
3864 break; 3891 break;
3865 case IFLA_INET6_ICMP6STATS: 3892 case IFLA_INET6_ICMP6STATS:
3866 __snmp6_fill_stats(stats, (void __percpu **)idev->stats.icmpv6, ICMP6_MIB_MAX, bytes); 3893 __snmp6_fill_stats(stats, (void __percpu **)idev->stats.icmpv6, ICMP6_MIB_MAX, bytes);
@@ -4093,11 +4120,11 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
4093 if (ifp->idev->cnf.forwarding) 4120 if (ifp->idev->cnf.forwarding)
4094 addrconf_leave_anycast(ifp); 4121 addrconf_leave_anycast(ifp);
4095 addrconf_leave_solict(ifp->idev, &ifp->addr); 4122 addrconf_leave_solict(ifp->idev, &ifp->addr);
4096 dst_hold(&ifp->rt->u.dst); 4123 dst_hold(&ifp->rt->dst);
4097 4124
4098 if (ifp->state == INET6_IFADDR_STATE_DEAD && 4125 if (ifp->state == INET6_IFADDR_STATE_DEAD &&
4099 ip6_del_rt(ifp->rt)) 4126 ip6_del_rt(ifp->rt))
4100 dst_free(&ifp->rt->u.dst); 4127 dst_free(&ifp->rt->dst);
4101 break; 4128 break;
4102 } 4129 }
4103} 4130}
diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c
index 8c4348cb1950..f0e774cea386 100644
--- a/net/ipv6/addrlabel.c
+++ b/net/ipv6/addrlabel.c
@@ -53,11 +53,7 @@ static struct ip6addrlbl_table
53static inline 53static inline
54struct net *ip6addrlbl_net(const struct ip6addrlbl_entry *lbl) 54struct net *ip6addrlbl_net(const struct ip6addrlbl_entry *lbl)
55{ 55{
56#ifdef CONFIG_NET_NS 56 return read_pnet(&lbl->lbl_net);
57 return lbl->lbl_net;
58#else
59 return &init_net;
60#endif
61} 57}
62 58
63/* 59/*
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index e733942dafe1..56b9bf2516f4 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -522,10 +522,10 @@ const struct proto_ops inet6_stream_ops = {
522 .shutdown = inet_shutdown, /* ok */ 522 .shutdown = inet_shutdown, /* ok */
523 .setsockopt = sock_common_setsockopt, /* ok */ 523 .setsockopt = sock_common_setsockopt, /* ok */
524 .getsockopt = sock_common_getsockopt, /* ok */ 524 .getsockopt = sock_common_getsockopt, /* ok */
525 .sendmsg = tcp_sendmsg, /* ok */ 525 .sendmsg = inet_sendmsg, /* ok */
526 .recvmsg = sock_common_recvmsg, /* ok */ 526 .recvmsg = inet_recvmsg, /* ok */
527 .mmap = sock_no_mmap, 527 .mmap = sock_no_mmap,
528 .sendpage = tcp_sendpage, 528 .sendpage = inet_sendpage,
529 .splice_read = tcp_splice_read, 529 .splice_read = tcp_splice_read,
530#ifdef CONFIG_COMPAT 530#ifdef CONFIG_COMPAT
531 .compat_setsockopt = compat_sock_common_setsockopt, 531 .compat_setsockopt = compat_sock_common_setsockopt,
@@ -549,7 +549,7 @@ const struct proto_ops inet6_dgram_ops = {
549 .setsockopt = sock_common_setsockopt, /* ok */ 549 .setsockopt = sock_common_setsockopt, /* ok */
550 .getsockopt = sock_common_getsockopt, /* ok */ 550 .getsockopt = sock_common_getsockopt, /* ok */
551 .sendmsg = inet_sendmsg, /* ok */ 551 .sendmsg = inet_sendmsg, /* ok */
552 .recvmsg = sock_common_recvmsg, /* ok */ 552 .recvmsg = inet_recvmsg, /* ok */
553 .mmap = sock_no_mmap, 553 .mmap = sock_no_mmap,
554 .sendpage = sock_no_sendpage, 554 .sendpage = sock_no_sendpage,
555#ifdef CONFIG_COMPAT 555#ifdef CONFIG_COMPAT
@@ -651,7 +651,7 @@ int inet6_sk_rebuild_header(struct sock *sk)
651 651
652 if (dst == NULL) { 652 if (dst == NULL) {
653 struct inet_sock *inet = inet_sk(sk); 653 struct inet_sock *inet = inet_sk(sk);
654 struct in6_addr *final_p = NULL, final; 654 struct in6_addr *final_p, final;
655 struct flowi fl; 655 struct flowi fl;
656 656
657 memset(&fl, 0, sizeof(fl)); 657 memset(&fl, 0, sizeof(fl));
@@ -665,12 +665,7 @@ int inet6_sk_rebuild_header(struct sock *sk)
665 fl.fl_ip_sport = inet->inet_sport; 665 fl.fl_ip_sport = inet->inet_sport;
666 security_sk_classify_flow(sk, &fl); 666 security_sk_classify_flow(sk, &fl);
667 667
668 if (np->opt && np->opt->srcrt) { 668 final_p = fl6_update_dst(&fl, np->opt, &final);
669 struct rt0_hdr *rt0 = (struct rt0_hdr *) np->opt->srcrt;
670 ipv6_addr_copy(&final, &fl.fl6_dst);
671 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
672 final_p = &final;
673 }
674 669
675 err = ip6_dst_lookup(sk, &dst, &fl); 670 err = ip6_dst_lookup(sk, &dst, &fl);
676 if (err) { 671 if (err) {
@@ -976,19 +971,24 @@ static void ipv6_packet_cleanup(void)
976static int __net_init ipv6_init_mibs(struct net *net) 971static int __net_init ipv6_init_mibs(struct net *net)
977{ 972{
978 if (snmp_mib_init((void __percpu **)net->mib.udp_stats_in6, 973 if (snmp_mib_init((void __percpu **)net->mib.udp_stats_in6,
979 sizeof (struct udp_mib)) < 0) 974 sizeof(struct udp_mib),
975 __alignof__(struct udp_mib)) < 0)
980 return -ENOMEM; 976 return -ENOMEM;
981 if (snmp_mib_init((void __percpu **)net->mib.udplite_stats_in6, 977 if (snmp_mib_init((void __percpu **)net->mib.udplite_stats_in6,
982 sizeof (struct udp_mib)) < 0) 978 sizeof(struct udp_mib),
979 __alignof__(struct udp_mib)) < 0)
983 goto err_udplite_mib; 980 goto err_udplite_mib;
984 if (snmp_mib_init((void __percpu **)net->mib.ipv6_statistics, 981 if (snmp_mib_init((void __percpu **)net->mib.ipv6_statistics,
985 sizeof(struct ipstats_mib)) < 0) 982 sizeof(struct ipstats_mib),
983 __alignof__(struct ipstats_mib)) < 0)
986 goto err_ip_mib; 984 goto err_ip_mib;
987 if (snmp_mib_init((void __percpu **)net->mib.icmpv6_statistics, 985 if (snmp_mib_init((void __percpu **)net->mib.icmpv6_statistics,
988 sizeof(struct icmpv6_mib)) < 0) 986 sizeof(struct icmpv6_mib),
987 __alignof__(struct icmpv6_mib)) < 0)
989 goto err_icmp_mib; 988 goto err_icmp_mib;
990 if (snmp_mib_init((void __percpu **)net->mib.icmpv6msg_statistics, 989 if (snmp_mib_init((void __percpu **)net->mib.icmpv6msg_statistics,
991 sizeof(struct icmpv6msg_mib)) < 0) 990 sizeof(struct icmpv6msg_mib),
991 __alignof__(struct icmpv6msg_mib)) < 0)
992 goto err_icmpmsg_mib; 992 goto err_icmpmsg_mib;
993 return 0; 993 return 0;
994 994
diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c
index b5b07054508a..0e5e943446f0 100644
--- a/net/ipv6/anycast.c
+++ b/net/ipv6/anycast.c
@@ -77,41 +77,40 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, struct in6_addr *addr)
77 pac->acl_next = NULL; 77 pac->acl_next = NULL;
78 ipv6_addr_copy(&pac->acl_addr, addr); 78 ipv6_addr_copy(&pac->acl_addr, addr);
79 79
80 rcu_read_lock();
80 if (ifindex == 0) { 81 if (ifindex == 0) {
81 struct rt6_info *rt; 82 struct rt6_info *rt;
82 83
83 rt = rt6_lookup(net, addr, NULL, 0, 0); 84 rt = rt6_lookup(net, addr, NULL, 0, 0);
84 if (rt) { 85 if (rt) {
85 dev = rt->rt6i_dev; 86 dev = rt->rt6i_dev;
86 dev_hold(dev); 87 dst_release(&rt->dst);
87 dst_release(&rt->u.dst);
88 } else if (ishost) { 88 } else if (ishost) {
89 err = -EADDRNOTAVAIL; 89 err = -EADDRNOTAVAIL;
90 goto out_free_pac; 90 goto error;
91 } else { 91 } else {
92 /* router, no matching interface: just pick one */ 92 /* router, no matching interface: just pick one */
93 93 dev = dev_get_by_flags_rcu(net, IFF_UP,
94 dev = dev_get_by_flags(net, IFF_UP, IFF_UP|IFF_LOOPBACK); 94 IFF_UP | IFF_LOOPBACK);
95 } 95 }
96 } else 96 } else
97 dev = dev_get_by_index(net, ifindex); 97 dev = dev_get_by_index_rcu(net, ifindex);
98 98
99 if (dev == NULL) { 99 if (dev == NULL) {
100 err = -ENODEV; 100 err = -ENODEV;
101 goto out_free_pac; 101 goto error;
102 } 102 }
103 103
104 idev = in6_dev_get(dev); 104 idev = __in6_dev_get(dev);
105 if (!idev) { 105 if (!idev) {
106 if (ifindex) 106 if (ifindex)
107 err = -ENODEV; 107 err = -ENODEV;
108 else 108 else
109 err = -EADDRNOTAVAIL; 109 err = -EADDRNOTAVAIL;
110 goto out_dev_put; 110 goto error;
111 } 111 }
112 /* reset ishost, now that we have a specific device */ 112 /* reset ishost, now that we have a specific device */
113 ishost = !idev->cnf.forwarding; 113 ishost = !idev->cnf.forwarding;
114 in6_dev_put(idev);
115 114
116 pac->acl_ifindex = dev->ifindex; 115 pac->acl_ifindex = dev->ifindex;
117 116
@@ -124,26 +123,22 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, struct in6_addr *addr)
124 if (ishost) 123 if (ishost)
125 err = -EADDRNOTAVAIL; 124 err = -EADDRNOTAVAIL;
126 if (err) 125 if (err)
127 goto out_dev_put; 126 goto error;
128 } 127 }
129 128
130 err = ipv6_dev_ac_inc(dev, addr); 129 err = ipv6_dev_ac_inc(dev, addr);
131 if (err) 130 if (!err) {
132 goto out_dev_put; 131 write_lock_bh(&ipv6_sk_ac_lock);
133 132 pac->acl_next = np->ipv6_ac_list;
134 write_lock_bh(&ipv6_sk_ac_lock); 133 np->ipv6_ac_list = pac;
135 pac->acl_next = np->ipv6_ac_list; 134 write_unlock_bh(&ipv6_sk_ac_lock);
136 np->ipv6_ac_list = pac; 135 pac = NULL;
137 write_unlock_bh(&ipv6_sk_ac_lock); 136 }
138
139 dev_put(dev);
140
141 return 0;
142 137
143out_dev_put: 138error:
144 dev_put(dev); 139 rcu_read_unlock();
145out_free_pac: 140 if (pac)
146 sock_kfree_s(sk, pac, sizeof(*pac)); 141 sock_kfree_s(sk, pac, sizeof(*pac));
147 return err; 142 return err;
148} 143}
149 144
@@ -176,11 +171,12 @@ int ipv6_sock_ac_drop(struct sock *sk, int ifindex, struct in6_addr *addr)
176 171
177 write_unlock_bh(&ipv6_sk_ac_lock); 172 write_unlock_bh(&ipv6_sk_ac_lock);
178 173
179 dev = dev_get_by_index(net, pac->acl_ifindex); 174 rcu_read_lock();
180 if (dev) { 175 dev = dev_get_by_index_rcu(net, pac->acl_ifindex);
176 if (dev)
181 ipv6_dev_ac_dec(dev, &pac->acl_addr); 177 ipv6_dev_ac_dec(dev, &pac->acl_addr);
182 dev_put(dev); 178 rcu_read_unlock();
183 } 179
184 sock_kfree_s(sk, pac, sizeof(*pac)); 180 sock_kfree_s(sk, pac, sizeof(*pac));
185 return 0; 181 return 0;
186} 182}
@@ -199,13 +195,12 @@ void ipv6_sock_ac_close(struct sock *sk)
199 write_unlock_bh(&ipv6_sk_ac_lock); 195 write_unlock_bh(&ipv6_sk_ac_lock);
200 196
201 prev_index = 0; 197 prev_index = 0;
198 rcu_read_lock();
202 while (pac) { 199 while (pac) {
203 struct ipv6_ac_socklist *next = pac->acl_next; 200 struct ipv6_ac_socklist *next = pac->acl_next;
204 201
205 if (pac->acl_ifindex != prev_index) { 202 if (pac->acl_ifindex != prev_index) {
206 if (dev) 203 dev = dev_get_by_index_rcu(net, pac->acl_ifindex);
207 dev_put(dev);
208 dev = dev_get_by_index(net, pac->acl_ifindex);
209 prev_index = pac->acl_ifindex; 204 prev_index = pac->acl_ifindex;
210 } 205 }
211 if (dev) 206 if (dev)
@@ -213,8 +208,7 @@ void ipv6_sock_ac_close(struct sock *sk)
213 sock_kfree_s(sk, pac, sizeof(*pac)); 208 sock_kfree_s(sk, pac, sizeof(*pac));
214 pac = next; 209 pac = next;
215 } 210 }
216 if (dev) 211 rcu_read_unlock();
217 dev_put(dev);
218} 212}
219 213
220#if 0 214#if 0
@@ -250,7 +244,7 @@ static void aca_put(struct ifacaddr6 *ac)
250{ 244{
251 if (atomic_dec_and_test(&ac->aca_refcnt)) { 245 if (atomic_dec_and_test(&ac->aca_refcnt)) {
252 in6_dev_put(ac->aca_idev); 246 in6_dev_put(ac->aca_idev);
253 dst_release(&ac->aca_rt->u.dst); 247 dst_release(&ac->aca_rt->dst);
254 kfree(ac); 248 kfree(ac);
255 } 249 }
256} 250}
@@ -356,40 +350,39 @@ int __ipv6_dev_ac_dec(struct inet6_dev *idev, struct in6_addr *addr)
356 write_unlock_bh(&idev->lock); 350 write_unlock_bh(&idev->lock);
357 addrconf_leave_solict(idev, &aca->aca_addr); 351 addrconf_leave_solict(idev, &aca->aca_addr);
358 352
359 dst_hold(&aca->aca_rt->u.dst); 353 dst_hold(&aca->aca_rt->dst);
360 ip6_del_rt(aca->aca_rt); 354 ip6_del_rt(aca->aca_rt);
361 355
362 aca_put(aca); 356 aca_put(aca);
363 return 0; 357 return 0;
364} 358}
365 359
360/* called with rcu_read_lock() */
366static int ipv6_dev_ac_dec(struct net_device *dev, struct in6_addr *addr) 361static int ipv6_dev_ac_dec(struct net_device *dev, struct in6_addr *addr)
367{ 362{
368 int ret; 363 struct inet6_dev *idev = __in6_dev_get(dev);
369 struct inet6_dev *idev = in6_dev_get(dev); 364
370 if (idev == NULL) 365 if (idev == NULL)
371 return -ENODEV; 366 return -ENODEV;
372 ret = __ipv6_dev_ac_dec(idev, addr); 367 return __ipv6_dev_ac_dec(idev, addr);
373 in6_dev_put(idev);
374 return ret;
375} 368}
376 369
377/* 370/*
378 * check if the interface has this anycast address 371 * check if the interface has this anycast address
372 * called with rcu_read_lock()
379 */ 373 */
380static int ipv6_chk_acast_dev(struct net_device *dev, struct in6_addr *addr) 374static int ipv6_chk_acast_dev(struct net_device *dev, struct in6_addr *addr)
381{ 375{
382 struct inet6_dev *idev; 376 struct inet6_dev *idev;
383 struct ifacaddr6 *aca; 377 struct ifacaddr6 *aca;
384 378
385 idev = in6_dev_get(dev); 379 idev = __in6_dev_get(dev);
386 if (idev) { 380 if (idev) {
387 read_lock_bh(&idev->lock); 381 read_lock_bh(&idev->lock);
388 for (aca = idev->ac_list; aca; aca = aca->aca_next) 382 for (aca = idev->ac_list; aca; aca = aca->aca_next)
389 if (ipv6_addr_equal(&aca->aca_addr, addr)) 383 if (ipv6_addr_equal(&aca->aca_addr, addr))
390 break; 384 break;
391 read_unlock_bh(&idev->lock); 385 read_unlock_bh(&idev->lock);
392 in6_dev_put(idev);
393 return aca != NULL; 386 return aca != NULL;
394 } 387 }
395 return 0; 388 return 0;
@@ -403,14 +396,15 @@ int ipv6_chk_acast_addr(struct net *net, struct net_device *dev,
403{ 396{
404 int found = 0; 397 int found = 0;
405 398
406 if (dev)
407 return ipv6_chk_acast_dev(dev, addr);
408 rcu_read_lock(); 399 rcu_read_lock();
409 for_each_netdev_rcu(net, dev) 400 if (dev)
410 if (ipv6_chk_acast_dev(dev, addr)) { 401 found = ipv6_chk_acast_dev(dev, addr);
411 found = 1; 402 else
412 break; 403 for_each_netdev_rcu(net, dev)
413 } 404 if (ipv6_chk_acast_dev(dev, addr)) {
405 found = 1;
406 break;
407 }
414 rcu_read_unlock(); 408 rcu_read_unlock();
415 return found; 409 return found;
416} 410}
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 712684687c9a..7d929a22cbc2 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -38,10 +38,11 @@ int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
38 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr; 38 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
39 struct inet_sock *inet = inet_sk(sk); 39 struct inet_sock *inet = inet_sk(sk);
40 struct ipv6_pinfo *np = inet6_sk(sk); 40 struct ipv6_pinfo *np = inet6_sk(sk);
41 struct in6_addr *daddr, *final_p = NULL, final; 41 struct in6_addr *daddr, *final_p, final;
42 struct dst_entry *dst; 42 struct dst_entry *dst;
43 struct flowi fl; 43 struct flowi fl;
44 struct ip6_flowlabel *flowlabel = NULL; 44 struct ip6_flowlabel *flowlabel = NULL;
45 struct ipv6_txoptions *opt;
45 int addr_type; 46 int addr_type;
46 int err; 47 int err;
47 48
@@ -155,19 +156,8 @@ ipv4_connected:
155 156
156 security_sk_classify_flow(sk, &fl); 157 security_sk_classify_flow(sk, &fl);
157 158
158 if (flowlabel) { 159 opt = flowlabel ? flowlabel->opt : np->opt;
159 if (flowlabel->opt && flowlabel->opt->srcrt) { 160 final_p = fl6_update_dst(&fl, opt, &final);
160 struct rt0_hdr *rt0 = (struct rt0_hdr *) flowlabel->opt->srcrt;
161 ipv6_addr_copy(&final, &fl.fl6_dst);
162 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
163 final_p = &final;
164 }
165 } else if (np->opt && np->opt->srcrt) {
166 struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt;
167 ipv6_addr_copy(&final, &fl.fl6_dst);
168 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
169 final_p = &final;
170 }
171 161
172 err = ip6_dst_lookup(sk, &dst, &fl); 162 err = ip6_dst_lookup(sk, &dst, &fl);
173 if (err) 163 if (err)
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index 8a659f92d17a..262f105d23b9 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -312,6 +312,7 @@ static int ipv6_destopt_rcv(struct sk_buff *skb)
312 Routing header. 312 Routing header.
313 ********************************/ 313 ********************************/
314 314
315/* called with rcu_read_lock() */
315static int ipv6_rthdr_rcv(struct sk_buff *skb) 316static int ipv6_rthdr_rcv(struct sk_buff *skb)
316{ 317{
317 struct inet6_skb_parm *opt = IP6CB(skb); 318 struct inet6_skb_parm *opt = IP6CB(skb);
@@ -324,12 +325,9 @@ static int ipv6_rthdr_rcv(struct sk_buff *skb)
324 struct net *net = dev_net(skb->dev); 325 struct net *net = dev_net(skb->dev);
325 int accept_source_route = net->ipv6.devconf_all->accept_source_route; 326 int accept_source_route = net->ipv6.devconf_all->accept_source_route;
326 327
327 idev = in6_dev_get(skb->dev); 328 idev = __in6_dev_get(skb->dev);
328 if (idev) { 329 if (idev && accept_source_route > idev->cnf.accept_source_route)
329 if (accept_source_route > idev->cnf.accept_source_route) 330 accept_source_route = idev->cnf.accept_source_route;
330 accept_source_route = idev->cnf.accept_source_route;
331 in6_dev_put(idev);
332 }
333 331
334 if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) || 332 if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) ||
335 !pskb_may_pull(skb, (skb_transport_offset(skb) + 333 !pskb_may_pull(skb, (skb_transport_offset(skb) +
@@ -874,3 +872,27 @@ struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space,
874 return opt; 872 return opt;
875} 873}
876 874
875/**
876 * fl6_update_dst - update flowi destination address with info given
877 * by srcrt option, if any.
878 *
879 * @fl: flowi for which fl6_dst is to be updated
880 * @opt: struct ipv6_txoptions in which to look for srcrt opt
881 * @orig: copy of original fl6_dst address if modified
882 *
883 * Returns NULL if no txoptions or no srcrt, otherwise returns orig
884 * and initial value of fl->fl6_dst set in orig
885 */
886struct in6_addr *fl6_update_dst(struct flowi *fl,
887 const struct ipv6_txoptions *opt,
888 struct in6_addr *orig)
889{
890 if (!opt || !opt->srcrt)
891 return NULL;
892
893 ipv6_addr_copy(orig, &fl->fl6_dst);
894 ipv6_addr_copy(&fl->fl6_dst, ((struct rt0_hdr *)opt->srcrt)->addr);
895 return orig;
896}
897
898EXPORT_SYMBOL_GPL(fl6_update_dst);
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index 8e44f8f9c188..b1108ede18e1 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -43,8 +43,8 @@ struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi *fl,
43 if (arg.result) 43 if (arg.result)
44 return arg.result; 44 return arg.result;
45 45
46 dst_hold(&net->ipv6.ip6_null_entry->u.dst); 46 dst_hold(&net->ipv6.ip6_null_entry->dst);
47 return &net->ipv6.ip6_null_entry->u.dst; 47 return &net->ipv6.ip6_null_entry->dst;
48} 48}
49 49
50static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp, 50static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
@@ -86,7 +86,7 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
86 struct in6_addr saddr; 86 struct in6_addr saddr;
87 87
88 if (ipv6_dev_get_saddr(net, 88 if (ipv6_dev_get_saddr(net,
89 ip6_dst_idev(&rt->u.dst)->dev, 89 ip6_dst_idev(&rt->dst)->dev,
90 &flp->fl6_dst, 90 &flp->fl6_dst,
91 rt6_flags2srcprefs(flags), 91 rt6_flags2srcprefs(flags),
92 &saddr)) 92 &saddr))
@@ -99,12 +99,12 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
99 goto out; 99 goto out;
100 } 100 }
101again: 101again:
102 dst_release(&rt->u.dst); 102 dst_release(&rt->dst);
103 rt = NULL; 103 rt = NULL;
104 goto out; 104 goto out;
105 105
106discard_pkt: 106discard_pkt:
107 dst_hold(&rt->u.dst); 107 dst_hold(&rt->dst);
108out: 108out:
109 arg->result = rt; 109 arg->result = rt;
110 return rt == NULL ? -EAGAIN : 0; 110 return rt == NULL ? -EAGAIN : 0;
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index 0c5e3c3b7fd5..8a1628023bd1 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -185,7 +185,7 @@ int inet6_csk_xmit(struct sk_buff *skb)
185 struct ipv6_pinfo *np = inet6_sk(sk); 185 struct ipv6_pinfo *np = inet6_sk(sk);
186 struct flowi fl; 186 struct flowi fl;
187 struct dst_entry *dst; 187 struct dst_entry *dst;
188 struct in6_addr *final_p = NULL, final; 188 struct in6_addr *final_p, final;
189 189
190 memset(&fl, 0, sizeof(fl)); 190 memset(&fl, 0, sizeof(fl));
191 fl.proto = sk->sk_protocol; 191 fl.proto = sk->sk_protocol;
@@ -199,12 +199,7 @@ int inet6_csk_xmit(struct sk_buff *skb)
199 fl.fl_ip_dport = inet->inet_dport; 199 fl.fl_ip_dport = inet->inet_dport;
200 security_sk_classify_flow(sk, &fl); 200 security_sk_classify_flow(sk, &fl);
201 201
202 if (np->opt && np->opt->srcrt) { 202 final_p = fl6_update_dst(&fl, np->opt, &final);
203 struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt;
204 ipv6_addr_copy(&final, &fl.fl6_dst);
205 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
206 final_p = &final;
207 }
208 203
209 dst = __inet6_csk_dst_check(sk, np->dst_cookie); 204 dst = __inet6_csk_dst_check(sk, np->dst_cookie);
210 205
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 92a122b7795d..b6a585909d35 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -165,7 +165,7 @@ static __inline__ void node_free(struct fib6_node * fn)
165static __inline__ void rt6_release(struct rt6_info *rt) 165static __inline__ void rt6_release(struct rt6_info *rt)
166{ 166{
167 if (atomic_dec_and_test(&rt->rt6i_ref)) 167 if (atomic_dec_and_test(&rt->rt6i_ref))
168 dst_free(&rt->u.dst); 168 dst_free(&rt->dst);
169} 169}
170 170
171static void fib6_link_table(struct net *net, struct fib6_table *tb) 171static void fib6_link_table(struct net *net, struct fib6_table *tb)
@@ -278,7 +278,7 @@ static int fib6_dump_node(struct fib6_walker_t *w)
278 int res; 278 int res;
279 struct rt6_info *rt; 279 struct rt6_info *rt;
280 280
281 for (rt = w->leaf; rt; rt = rt->u.dst.rt6_next) { 281 for (rt = w->leaf; rt; rt = rt->dst.rt6_next) {
282 res = rt6_dump_route(rt, w->args); 282 res = rt6_dump_route(rt, w->args);
283 if (res < 0) { 283 if (res < 0) {
284 /* Frame is full, suspend walking */ 284 /* Frame is full, suspend walking */
@@ -619,7 +619,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
619 619
620 ins = &fn->leaf; 620 ins = &fn->leaf;
621 621
622 for (iter = fn->leaf; iter; iter=iter->u.dst.rt6_next) { 622 for (iter = fn->leaf; iter; iter=iter->dst.rt6_next) {
623 /* 623 /*
624 * Search for duplicates 624 * Search for duplicates
625 */ 625 */
@@ -647,7 +647,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
647 if (iter->rt6i_metric > rt->rt6i_metric) 647 if (iter->rt6i_metric > rt->rt6i_metric)
648 break; 648 break;
649 649
650 ins = &iter->u.dst.rt6_next; 650 ins = &iter->dst.rt6_next;
651 } 651 }
652 652
653 /* Reset round-robin state, if necessary */ 653 /* Reset round-robin state, if necessary */
@@ -658,7 +658,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
658 * insert node 658 * insert node
659 */ 659 */
660 660
661 rt->u.dst.rt6_next = iter; 661 rt->dst.rt6_next = iter;
662 *ins = rt; 662 *ins = rt;
663 rt->rt6i_node = fn; 663 rt->rt6i_node = fn;
664 atomic_inc(&rt->rt6i_ref); 664 atomic_inc(&rt->rt6i_ref);
@@ -799,7 +799,7 @@ out:
799 atomic_inc(&pn->leaf->rt6i_ref); 799 atomic_inc(&pn->leaf->rt6i_ref);
800 } 800 }
801#endif 801#endif
802 dst_free(&rt->u.dst); 802 dst_free(&rt->dst);
803 } 803 }
804 return err; 804 return err;
805 805
@@ -810,7 +810,7 @@ out:
810st_failure: 810st_failure:
811 if (fn && !(fn->fn_flags & (RTN_RTINFO|RTN_ROOT))) 811 if (fn && !(fn->fn_flags & (RTN_RTINFO|RTN_ROOT)))
812 fib6_repair_tree(info->nl_net, fn); 812 fib6_repair_tree(info->nl_net, fn);
813 dst_free(&rt->u.dst); 813 dst_free(&rt->dst);
814 return err; 814 return err;
815#endif 815#endif
816} 816}
@@ -1108,7 +1108,7 @@ static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp,
1108 RT6_TRACE("fib6_del_route\n"); 1108 RT6_TRACE("fib6_del_route\n");
1109 1109
1110 /* Unlink it */ 1110 /* Unlink it */
1111 *rtp = rt->u.dst.rt6_next; 1111 *rtp = rt->dst.rt6_next;
1112 rt->rt6i_node = NULL; 1112 rt->rt6i_node = NULL;
1113 net->ipv6.rt6_stats->fib_rt_entries--; 1113 net->ipv6.rt6_stats->fib_rt_entries--;
1114 net->ipv6.rt6_stats->fib_discarded_routes++; 1114 net->ipv6.rt6_stats->fib_discarded_routes++;
@@ -1122,14 +1122,14 @@ static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp,
1122 FOR_WALKERS(w) { 1122 FOR_WALKERS(w) {
1123 if (w->state == FWS_C && w->leaf == rt) { 1123 if (w->state == FWS_C && w->leaf == rt) {
1124 RT6_TRACE("walker %p adjusted by delroute\n", w); 1124 RT6_TRACE("walker %p adjusted by delroute\n", w);
1125 w->leaf = rt->u.dst.rt6_next; 1125 w->leaf = rt->dst.rt6_next;
1126 if (w->leaf == NULL) 1126 if (w->leaf == NULL)
1127 w->state = FWS_U; 1127 w->state = FWS_U;
1128 } 1128 }
1129 } 1129 }
1130 read_unlock(&fib6_walker_lock); 1130 read_unlock(&fib6_walker_lock);
1131 1131
1132 rt->u.dst.rt6_next = NULL; 1132 rt->dst.rt6_next = NULL;
1133 1133
1134 /* If it was last route, expunge its radix tree node */ 1134 /* If it was last route, expunge its radix tree node */
1135 if (fn->leaf == NULL) { 1135 if (fn->leaf == NULL) {
@@ -1168,7 +1168,7 @@ int fib6_del(struct rt6_info *rt, struct nl_info *info)
1168 struct rt6_info **rtp; 1168 struct rt6_info **rtp;
1169 1169
1170#if RT6_DEBUG >= 2 1170#if RT6_DEBUG >= 2
1171 if (rt->u.dst.obsolete>0) { 1171 if (rt->dst.obsolete>0) {
1172 WARN_ON(fn != NULL); 1172 WARN_ON(fn != NULL);
1173 return -ENOENT; 1173 return -ENOENT;
1174 } 1174 }
@@ -1195,7 +1195,7 @@ int fib6_del(struct rt6_info *rt, struct nl_info *info)
1195 * Walk the leaf entries looking for ourself 1195 * Walk the leaf entries looking for ourself
1196 */ 1196 */
1197 1197
1198 for (rtp = &fn->leaf; *rtp; rtp = &(*rtp)->u.dst.rt6_next) { 1198 for (rtp = &fn->leaf; *rtp; rtp = &(*rtp)->dst.rt6_next) {
1199 if (*rtp == rt) { 1199 if (*rtp == rt) {
1200 fib6_del_route(fn, rtp, info); 1200 fib6_del_route(fn, rtp, info);
1201 return 0; 1201 return 0;
@@ -1334,7 +1334,7 @@ static int fib6_clean_node(struct fib6_walker_t *w)
1334 .nl_net = c->net, 1334 .nl_net = c->net,
1335 }; 1335 };
1336 1336
1337 for (rt = w->leaf; rt; rt = rt->u.dst.rt6_next) { 1337 for (rt = w->leaf; rt; rt = rt->dst.rt6_next) {
1338 res = c->func(rt, c->arg); 1338 res = c->func(rt, c->arg);
1339 if (res < 0) { 1339 if (res < 0) {
1340 w->leaf = rt; 1340 w->leaf = rt;
@@ -1448,8 +1448,8 @@ static int fib6_age(struct rt6_info *rt, void *arg)
1448 } 1448 }
1449 gc_args.more++; 1449 gc_args.more++;
1450 } else if (rt->rt6i_flags & RTF_CACHE) { 1450 } else if (rt->rt6i_flags & RTF_CACHE) {
1451 if (atomic_read(&rt->u.dst.__refcnt) == 0 && 1451 if (atomic_read(&rt->dst.__refcnt) == 0 &&
1452 time_after_eq(now, rt->u.dst.lastuse + gc_args.timeout)) { 1452 time_after_eq(now, rt->dst.lastuse + gc_args.timeout)) {
1453 RT6_TRACE("aging clone %p\n", rt); 1453 RT6_TRACE("aging clone %p\n", rt);
1454 return -1; 1454 return -1;
1455 } else if ((rt->rt6i_flags & RTF_GATEWAY) && 1455 } else if ((rt->rt6i_flags & RTF_GATEWAY) &&
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 89425af0684c..d40b330c0ee6 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -698,7 +698,7 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
698 ipv6_hdr(skb)->payload_len = htons(first_len - 698 ipv6_hdr(skb)->payload_len = htons(first_len -
699 sizeof(struct ipv6hdr)); 699 sizeof(struct ipv6hdr));
700 700
701 dst_hold(&rt->u.dst); 701 dst_hold(&rt->dst);
702 702
703 for (;;) { 703 for (;;) {
704 /* Prepare header of the next frame, 704 /* Prepare header of the next frame,
@@ -726,7 +726,7 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
726 726
727 err = output(skb); 727 err = output(skb);
728 if(!err) 728 if(!err)
729 IP6_INC_STATS(net, ip6_dst_idev(&rt->u.dst), 729 IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
730 IPSTATS_MIB_FRAGCREATES); 730 IPSTATS_MIB_FRAGCREATES);
731 731
732 if (err || !frag) 732 if (err || !frag)
@@ -740,9 +740,9 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
740 kfree(tmp_hdr); 740 kfree(tmp_hdr);
741 741
742 if (err == 0) { 742 if (err == 0) {
743 IP6_INC_STATS(net, ip6_dst_idev(&rt->u.dst), 743 IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
744 IPSTATS_MIB_FRAGOKS); 744 IPSTATS_MIB_FRAGOKS);
745 dst_release(&rt->u.dst); 745 dst_release(&rt->dst);
746 return 0; 746 return 0;
747 } 747 }
748 748
@@ -752,9 +752,9 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
752 frag = skb; 752 frag = skb;
753 } 753 }
754 754
755 IP6_INC_STATS(net, ip6_dst_idev(&rt->u.dst), 755 IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
756 IPSTATS_MIB_FRAGFAILS); 756 IPSTATS_MIB_FRAGFAILS);
757 dst_release(&rt->u.dst); 757 dst_release(&rt->dst);
758 return err; 758 return err;
759 } 759 }
760 760
@@ -785,7 +785,7 @@ slow_path:
785 * Allocate buffer. 785 * Allocate buffer.
786 */ 786 */
787 787
788 if ((frag = alloc_skb(len+hlen+sizeof(struct frag_hdr)+LL_ALLOCATED_SPACE(rt->u.dst.dev), GFP_ATOMIC)) == NULL) { 788 if ((frag = alloc_skb(len+hlen+sizeof(struct frag_hdr)+LL_ALLOCATED_SPACE(rt->dst.dev), GFP_ATOMIC)) == NULL) {
789 NETDEBUG(KERN_INFO "IPv6: frag: no memory for new fragment!\n"); 789 NETDEBUG(KERN_INFO "IPv6: frag: no memory for new fragment!\n");
790 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), 790 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
791 IPSTATS_MIB_FRAGFAILS); 791 IPSTATS_MIB_FRAGFAILS);
@@ -798,7 +798,7 @@ slow_path:
798 */ 798 */
799 799
800 ip6_copy_metadata(frag, skb); 800 ip6_copy_metadata(frag, skb);
801 skb_reserve(frag, LL_RESERVED_SPACE(rt->u.dst.dev)); 801 skb_reserve(frag, LL_RESERVED_SPACE(rt->dst.dev));
802 skb_put(frag, len + hlen + sizeof(struct frag_hdr)); 802 skb_put(frag, len + hlen + sizeof(struct frag_hdr));
803 skb_reset_network_header(frag); 803 skb_reset_network_header(frag);
804 fh = (struct frag_hdr *)(skb_network_header(frag) + hlen); 804 fh = (struct frag_hdr *)(skb_network_header(frag) + hlen);
@@ -1156,24 +1156,24 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1156 1156
1157 /* need source address above miyazawa*/ 1157 /* need source address above miyazawa*/
1158 } 1158 }
1159 dst_hold(&rt->u.dst); 1159 dst_hold(&rt->dst);
1160 inet->cork.dst = &rt->u.dst; 1160 inet->cork.dst = &rt->dst;
1161 inet->cork.fl = *fl; 1161 inet->cork.fl = *fl;
1162 np->cork.hop_limit = hlimit; 1162 np->cork.hop_limit = hlimit;
1163 np->cork.tclass = tclass; 1163 np->cork.tclass = tclass;
1164 mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ? 1164 mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ?
1165 rt->u.dst.dev->mtu : dst_mtu(rt->u.dst.path); 1165 rt->dst.dev->mtu : dst_mtu(rt->dst.path);
1166 if (np->frag_size < mtu) { 1166 if (np->frag_size < mtu) {
1167 if (np->frag_size) 1167 if (np->frag_size)
1168 mtu = np->frag_size; 1168 mtu = np->frag_size;
1169 } 1169 }
1170 inet->cork.fragsize = mtu; 1170 inet->cork.fragsize = mtu;
1171 if (dst_allfrag(rt->u.dst.path)) 1171 if (dst_allfrag(rt->dst.path))
1172 inet->cork.flags |= IPCORK_ALLFRAG; 1172 inet->cork.flags |= IPCORK_ALLFRAG;
1173 inet->cork.length = 0; 1173 inet->cork.length = 0;
1174 sk->sk_sndmsg_page = NULL; 1174 sk->sk_sndmsg_page = NULL;
1175 sk->sk_sndmsg_off = 0; 1175 sk->sk_sndmsg_off = 0;
1176 exthdrlen = rt->u.dst.header_len + (opt ? opt->opt_flen : 0) - 1176 exthdrlen = rt->dst.header_len + (opt ? opt->opt_flen : 0) -
1177 rt->rt6i_nfheader_len; 1177 rt->rt6i_nfheader_len;
1178 length += exthdrlen; 1178 length += exthdrlen;
1179 transhdrlen += exthdrlen; 1179 transhdrlen += exthdrlen;
@@ -1186,7 +1186,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1186 mtu = inet->cork.fragsize; 1186 mtu = inet->cork.fragsize;
1187 } 1187 }
1188 1188
1189 hh_len = LL_RESERVED_SPACE(rt->u.dst.dev); 1189 hh_len = LL_RESERVED_SPACE(rt->dst.dev);
1190 1190
1191 fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len + 1191 fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len +
1192 (opt ? opt->opt_nflen : 0); 1192 (opt ? opt->opt_nflen : 0);
@@ -1224,7 +1224,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1224 } 1224 }
1225 1225
1226 if (proto == IPPROTO_UDP && 1226 if (proto == IPPROTO_UDP &&
1227 (rt->u.dst.dev->features & NETIF_F_UFO)) { 1227 (rt->dst.dev->features & NETIF_F_UFO)) {
1228 1228
1229 err = ip6_ufo_append_data(sk, getfrag, from, length, 1229 err = ip6_ufo_append_data(sk, getfrag, from, length,
1230 hh_len, fragheaderlen, 1230 hh_len, fragheaderlen,
@@ -1270,7 +1270,7 @@ alloc_new_skb:
1270 1270
1271 fraglen = datalen + fragheaderlen; 1271 fraglen = datalen + fragheaderlen;
1272 if ((flags & MSG_MORE) && 1272 if ((flags & MSG_MORE) &&
1273 !(rt->u.dst.dev->features&NETIF_F_SG)) 1273 !(rt->dst.dev->features&NETIF_F_SG))
1274 alloclen = mtu; 1274 alloclen = mtu;
1275 else 1275 else
1276 alloclen = datalen + fragheaderlen; 1276 alloclen = datalen + fragheaderlen;
@@ -1281,7 +1281,7 @@ alloc_new_skb:
1281 * because we have no idea if we're the last one. 1281 * because we have no idea if we're the last one.
1282 */ 1282 */
1283 if (datalen == length + fraggap) 1283 if (datalen == length + fraggap)
1284 alloclen += rt->u.dst.trailer_len; 1284 alloclen += rt->dst.trailer_len;
1285 1285
1286 /* 1286 /*
1287 * We just reserve space for fragment header. 1287 * We just reserve space for fragment header.
@@ -1358,7 +1358,7 @@ alloc_new_skb:
1358 if (copy > length) 1358 if (copy > length)
1359 copy = length; 1359 copy = length;
1360 1360
1361 if (!(rt->u.dst.dev->features&NETIF_F_SG)) { 1361 if (!(rt->dst.dev->features&NETIF_F_SG)) {
1362 unsigned int off; 1362 unsigned int off;
1363 1363
1364 off = skb->len; 1364 off = skb->len;
@@ -1503,7 +1503,7 @@ int ip6_push_pending_frames(struct sock *sk)
1503 skb->priority = sk->sk_priority; 1503 skb->priority = sk->sk_priority;
1504 skb->mark = sk->sk_mark; 1504 skb->mark = sk->sk_mark;
1505 1505
1506 skb_dst_set(skb, dst_clone(&rt->u.dst)); 1506 skb_dst_set(skb, dst_clone(&rt->dst));
1507 IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len); 1507 IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
1508 if (proto == IPPROTO_ICMPV6) { 1508 if (proto == IPPROTO_ICMPV6) {
1509 struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb)); 1509 struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 8f39893d8081..0fd027f3f47e 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -552,7 +552,7 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
552 if (ip_route_output_key(dev_net(skb->dev), &rt, &fl)) 552 if (ip_route_output_key(dev_net(skb->dev), &rt, &fl))
553 goto out; 553 goto out;
554 554
555 skb2->dev = rt->u.dst.dev; 555 skb2->dev = rt->dst.dev;
556 556
557 /* route "incoming" packet */ 557 /* route "incoming" packet */
558 if (rt->rt_flags & RTCF_LOCAL) { 558 if (rt->rt_flags & RTCF_LOCAL) {
@@ -562,7 +562,7 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
562 fl.fl4_src = eiph->saddr; 562 fl.fl4_src = eiph->saddr;
563 fl.fl4_tos = eiph->tos; 563 fl.fl4_tos = eiph->tos;
564 if (ip_route_output_key(dev_net(skb->dev), &rt, &fl) || 564 if (ip_route_output_key(dev_net(skb->dev), &rt, &fl) ||
565 rt->u.dst.dev->type != ARPHRD_TUNNEL) { 565 rt->dst.dev->type != ARPHRD_TUNNEL) {
566 ip_rt_put(rt); 566 ip_rt_put(rt);
567 goto out; 567 goto out;
568 } 568 }
@@ -626,7 +626,7 @@ ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
626 icmpv6_send(skb2, rel_type, rel_code, rel_info); 626 icmpv6_send(skb2, rel_type, rel_code, rel_info);
627 627
628 if (rt) 628 if (rt)
629 dst_release(&rt->u.dst); 629 dst_release(&rt->dst);
630 630
631 kfree_skb(skb2); 631 kfree_skb(skb2);
632 } 632 }
@@ -1135,7 +1135,7 @@ static void ip6_tnl_link_config(struct ip6_tnl *t)
1135 if (dev->mtu < IPV6_MIN_MTU) 1135 if (dev->mtu < IPV6_MIN_MTU)
1136 dev->mtu = IPV6_MIN_MTU; 1136 dev->mtu = IPV6_MIN_MTU;
1137 } 1137 }
1138 dst_release(&rt->u.dst); 1138 dst_release(&rt->dst);
1139 } 1139 }
1140} 1140}
1141 1141
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index bd43f0152c21..a7f66bc8f0b0 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -55,8 +55,6 @@
55 55
56#include <asm/uaccess.h> 56#include <asm/uaccess.h>
57 57
58DEFINE_SNMP_STAT(struct ipstats_mib, ipv6_statistics) __read_mostly;
59
60struct ip6_ra_chain *ip6_ra_chain; 58struct ip6_ra_chain *ip6_ra_chain;
61DEFINE_RWLOCK(ip6_ra_lock); 59DEFINE_RWLOCK(ip6_ra_lock);
62 60
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index ab1622d7d409..d1444b95ad7e 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -152,18 +152,19 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
152 mc_lst->next = NULL; 152 mc_lst->next = NULL;
153 ipv6_addr_copy(&mc_lst->addr, addr); 153 ipv6_addr_copy(&mc_lst->addr, addr);
154 154
155 rcu_read_lock();
155 if (ifindex == 0) { 156 if (ifindex == 0) {
156 struct rt6_info *rt; 157 struct rt6_info *rt;
157 rt = rt6_lookup(net, addr, NULL, 0, 0); 158 rt = rt6_lookup(net, addr, NULL, 0, 0);
158 if (rt) { 159 if (rt) {
159 dev = rt->rt6i_dev; 160 dev = rt->rt6i_dev;
160 dev_hold(dev); 161 dst_release(&rt->dst);
161 dst_release(&rt->u.dst);
162 } 162 }
163 } else 163 } else
164 dev = dev_get_by_index(net, ifindex); 164 dev = dev_get_by_index_rcu(net, ifindex);
165 165
166 if (dev == NULL) { 166 if (dev == NULL) {
167 rcu_read_unlock();
167 sock_kfree_s(sk, mc_lst, sizeof(*mc_lst)); 168 sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
168 return -ENODEV; 169 return -ENODEV;
169 } 170 }
@@ -180,8 +181,8 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
180 err = ipv6_dev_mc_inc(dev, addr); 181 err = ipv6_dev_mc_inc(dev, addr);
181 182
182 if (err) { 183 if (err) {
184 rcu_read_unlock();
183 sock_kfree_s(sk, mc_lst, sizeof(*mc_lst)); 185 sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
184 dev_put(dev);
185 return err; 186 return err;
186 } 187 }
187 188
@@ -190,7 +191,7 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
190 np->ipv6_mc_list = mc_lst; 191 np->ipv6_mc_list = mc_lst;
191 write_unlock_bh(&ipv6_sk_mc_lock); 192 write_unlock_bh(&ipv6_sk_mc_lock);
192 193
193 dev_put(dev); 194 rcu_read_unlock();
194 195
195 return 0; 196 return 0;
196} 197}
@@ -213,18 +214,17 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
213 *lnk = mc_lst->next; 214 *lnk = mc_lst->next;
214 write_unlock_bh(&ipv6_sk_mc_lock); 215 write_unlock_bh(&ipv6_sk_mc_lock);
215 216
216 dev = dev_get_by_index(net, mc_lst->ifindex); 217 rcu_read_lock();
218 dev = dev_get_by_index_rcu(net, mc_lst->ifindex);
217 if (dev != NULL) { 219 if (dev != NULL) {
218 struct inet6_dev *idev = in6_dev_get(dev); 220 struct inet6_dev *idev = __in6_dev_get(dev);
219 221
220 (void) ip6_mc_leave_src(sk, mc_lst, idev); 222 (void) ip6_mc_leave_src(sk, mc_lst, idev);
221 if (idev) { 223 if (idev)
222 __ipv6_dev_mc_dec(idev, &mc_lst->addr); 224 __ipv6_dev_mc_dec(idev, &mc_lst->addr);
223 in6_dev_put(idev);
224 }
225 dev_put(dev);
226 } else 225 } else
227 (void) ip6_mc_leave_src(sk, mc_lst, NULL); 226 (void) ip6_mc_leave_src(sk, mc_lst, NULL);
227 rcu_read_unlock();
228 sock_kfree_s(sk, mc_lst, sizeof(*mc_lst)); 228 sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
229 return 0; 229 return 0;
230 } 230 }
@@ -234,43 +234,36 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
234 return -EADDRNOTAVAIL; 234 return -EADDRNOTAVAIL;
235} 235}
236 236
237static struct inet6_dev *ip6_mc_find_dev(struct net *net, 237/* called with rcu_read_lock() */
238 struct in6_addr *group, 238static struct inet6_dev *ip6_mc_find_dev_rcu(struct net *net,
239 int ifindex) 239 struct in6_addr *group,
240 int ifindex)
240{ 241{
241 struct net_device *dev = NULL; 242 struct net_device *dev = NULL;
242 struct inet6_dev *idev = NULL; 243 struct inet6_dev *idev = NULL;
243 244
244 if (ifindex == 0) { 245 if (ifindex == 0) {
245 struct rt6_info *rt; 246 struct rt6_info *rt = rt6_lookup(net, group, NULL, 0, 0);
246 247
247 rt = rt6_lookup(net, group, NULL, 0, 0);
248 if (rt) { 248 if (rt) {
249 dev = rt->rt6i_dev; 249 dev = rt->rt6i_dev;
250 dev_hold(dev); 250 dev_hold(dev);
251 dst_release(&rt->u.dst); 251 dst_release(&rt->dst);
252 } 252 }
253 } else 253 } else
254 dev = dev_get_by_index(net, ifindex); 254 dev = dev_get_by_index_rcu(net, ifindex);
255 255
256 if (!dev) 256 if (!dev)
257 goto nodev; 257 return NULL;
258 idev = in6_dev_get(dev); 258 idev = __in6_dev_get(dev);
259 if (!idev) 259 if (!idev)
260 goto release; 260 return NULL;;
261 read_lock_bh(&idev->lock); 261 read_lock_bh(&idev->lock);
262 if (idev->dead) 262 if (idev->dead) {
263 goto unlock_release; 263 read_unlock_bh(&idev->lock);
264 264 return NULL;
265 }
265 return idev; 266 return idev;
266
267unlock_release:
268 read_unlock_bh(&idev->lock);
269 in6_dev_put(idev);
270release:
271 dev_put(dev);
272nodev:
273 return NULL;
274} 267}
275 268
276void ipv6_sock_mc_close(struct sock *sk) 269void ipv6_sock_mc_close(struct sock *sk)
@@ -286,19 +279,17 @@ void ipv6_sock_mc_close(struct sock *sk)
286 np->ipv6_mc_list = mc_lst->next; 279 np->ipv6_mc_list = mc_lst->next;
287 write_unlock_bh(&ipv6_sk_mc_lock); 280 write_unlock_bh(&ipv6_sk_mc_lock);
288 281
289 dev = dev_get_by_index(net, mc_lst->ifindex); 282 rcu_read_lock();
283 dev = dev_get_by_index_rcu(net, mc_lst->ifindex);
290 if (dev) { 284 if (dev) {
291 struct inet6_dev *idev = in6_dev_get(dev); 285 struct inet6_dev *idev = __in6_dev_get(dev);
292 286
293 (void) ip6_mc_leave_src(sk, mc_lst, idev); 287 (void) ip6_mc_leave_src(sk, mc_lst, idev);
294 if (idev) { 288 if (idev)
295 __ipv6_dev_mc_dec(idev, &mc_lst->addr); 289 __ipv6_dev_mc_dec(idev, &mc_lst->addr);
296 in6_dev_put(idev);
297 }
298 dev_put(dev);
299 } else 290 } else
300 (void) ip6_mc_leave_src(sk, mc_lst, NULL); 291 (void) ip6_mc_leave_src(sk, mc_lst, NULL);
301 292 rcu_read_unlock();
302 sock_kfree_s(sk, mc_lst, sizeof(*mc_lst)); 293 sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
303 294
304 write_lock_bh(&ipv6_sk_mc_lock); 295 write_lock_bh(&ipv6_sk_mc_lock);
@@ -327,14 +318,17 @@ int ip6_mc_source(int add, int omode, struct sock *sk,
327 if (!ipv6_addr_is_multicast(group)) 318 if (!ipv6_addr_is_multicast(group))
328 return -EINVAL; 319 return -EINVAL;
329 320
330 idev = ip6_mc_find_dev(net, group, pgsr->gsr_interface); 321 rcu_read_lock();
331 if (!idev) 322 idev = ip6_mc_find_dev_rcu(net, group, pgsr->gsr_interface);
323 if (!idev) {
324 rcu_read_unlock();
332 return -ENODEV; 325 return -ENODEV;
326 }
333 dev = idev->dev; 327 dev = idev->dev;
334 328
335 err = -EADDRNOTAVAIL; 329 err = -EADDRNOTAVAIL;
336 330
337 read_lock_bh(&ipv6_sk_mc_lock); 331 read_lock(&ipv6_sk_mc_lock);
338 for (pmc=inet6->ipv6_mc_list; pmc; pmc=pmc->next) { 332 for (pmc=inet6->ipv6_mc_list; pmc; pmc=pmc->next) {
339 if (pgsr->gsr_interface && pmc->ifindex != pgsr->gsr_interface) 333 if (pgsr->gsr_interface && pmc->ifindex != pgsr->gsr_interface)
340 continue; 334 continue;
@@ -358,7 +352,7 @@ int ip6_mc_source(int add, int omode, struct sock *sk,
358 pmc->sfmode = omode; 352 pmc->sfmode = omode;
359 } 353 }
360 354
361 write_lock_bh(&pmc->sflock); 355 write_lock(&pmc->sflock);
362 pmclocked = 1; 356 pmclocked = 1;
363 357
364 psl = pmc->sflist; 358 psl = pmc->sflist;
@@ -433,11 +427,10 @@ int ip6_mc_source(int add, int omode, struct sock *sk,
433 ip6_mc_add_src(idev, group, omode, 1, source, 1); 427 ip6_mc_add_src(idev, group, omode, 1, source, 1);
434done: 428done:
435 if (pmclocked) 429 if (pmclocked)
436 write_unlock_bh(&pmc->sflock); 430 write_unlock(&pmc->sflock);
437 read_unlock_bh(&ipv6_sk_mc_lock); 431 read_unlock(&ipv6_sk_mc_lock);
438 read_unlock_bh(&idev->lock); 432 read_unlock_bh(&idev->lock);
439 in6_dev_put(idev); 433 rcu_read_unlock();
440 dev_put(dev);
441 if (leavegroup) 434 if (leavegroup)
442 return ipv6_sock_mc_drop(sk, pgsr->gsr_interface, group); 435 return ipv6_sock_mc_drop(sk, pgsr->gsr_interface, group);
443 return err; 436 return err;
@@ -463,14 +456,17 @@ int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf)
463 gsf->gf_fmode != MCAST_EXCLUDE) 456 gsf->gf_fmode != MCAST_EXCLUDE)
464 return -EINVAL; 457 return -EINVAL;
465 458
466 idev = ip6_mc_find_dev(net, group, gsf->gf_interface); 459 rcu_read_lock();
460 idev = ip6_mc_find_dev_rcu(net, group, gsf->gf_interface);
467 461
468 if (!idev) 462 if (!idev) {
463 rcu_read_unlock();
469 return -ENODEV; 464 return -ENODEV;
465 }
470 dev = idev->dev; 466 dev = idev->dev;
471 467
472 err = 0; 468 err = 0;
473 read_lock_bh(&ipv6_sk_mc_lock); 469 read_lock(&ipv6_sk_mc_lock);
474 470
475 if (gsf->gf_fmode == MCAST_INCLUDE && gsf->gf_numsrc == 0) { 471 if (gsf->gf_fmode == MCAST_INCLUDE && gsf->gf_numsrc == 0) {
476 leavegroup = 1; 472 leavegroup = 1;
@@ -512,7 +508,7 @@ int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf)
512 (void) ip6_mc_add_src(idev, group, gsf->gf_fmode, 0, NULL, 0); 508 (void) ip6_mc_add_src(idev, group, gsf->gf_fmode, 0, NULL, 0);
513 } 509 }
514 510
515 write_lock_bh(&pmc->sflock); 511 write_lock(&pmc->sflock);
516 psl = pmc->sflist; 512 psl = pmc->sflist;
517 if (psl) { 513 if (psl) {
518 (void) ip6_mc_del_src(idev, group, pmc->sfmode, 514 (void) ip6_mc_del_src(idev, group, pmc->sfmode,
@@ -522,13 +518,12 @@ int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf)
522 (void) ip6_mc_del_src(idev, group, pmc->sfmode, 0, NULL, 0); 518 (void) ip6_mc_del_src(idev, group, pmc->sfmode, 0, NULL, 0);
523 pmc->sflist = newpsl; 519 pmc->sflist = newpsl;
524 pmc->sfmode = gsf->gf_fmode; 520 pmc->sfmode = gsf->gf_fmode;
525 write_unlock_bh(&pmc->sflock); 521 write_unlock(&pmc->sflock);
526 err = 0; 522 err = 0;
527done: 523done:
528 read_unlock_bh(&ipv6_sk_mc_lock); 524 read_unlock(&ipv6_sk_mc_lock);
529 read_unlock_bh(&idev->lock); 525 read_unlock_bh(&idev->lock);
530 in6_dev_put(idev); 526 rcu_read_unlock();
531 dev_put(dev);
532 if (leavegroup) 527 if (leavegroup)
533 err = ipv6_sock_mc_drop(sk, gsf->gf_interface, group); 528 err = ipv6_sock_mc_drop(sk, gsf->gf_interface, group);
534 return err; 529 return err;
@@ -551,11 +546,13 @@ int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf,
551 if (!ipv6_addr_is_multicast(group)) 546 if (!ipv6_addr_is_multicast(group))
552 return -EINVAL; 547 return -EINVAL;
553 548
554 idev = ip6_mc_find_dev(net, group, gsf->gf_interface); 549 rcu_read_lock();
550 idev = ip6_mc_find_dev_rcu(net, group, gsf->gf_interface);
555 551
556 if (!idev) 552 if (!idev) {
553 rcu_read_unlock();
557 return -ENODEV; 554 return -ENODEV;
558 555 }
559 dev = idev->dev; 556 dev = idev->dev;
560 557
561 err = -EADDRNOTAVAIL; 558 err = -EADDRNOTAVAIL;
@@ -577,8 +574,7 @@ int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf,
577 psl = pmc->sflist; 574 psl = pmc->sflist;
578 count = psl ? psl->sl_count : 0; 575 count = psl ? psl->sl_count : 0;
579 read_unlock_bh(&idev->lock); 576 read_unlock_bh(&idev->lock);
580 in6_dev_put(idev); 577 rcu_read_unlock();
581 dev_put(dev);
582 578
583 copycount = count < gsf->gf_numsrc ? count : gsf->gf_numsrc; 579 copycount = count < gsf->gf_numsrc ? count : gsf->gf_numsrc;
584 gsf->gf_numsrc = count; 580 gsf->gf_numsrc = count;
@@ -604,8 +600,7 @@ int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf,
604 return 0; 600 return 0;
605done: 601done:
606 read_unlock_bh(&idev->lock); 602 read_unlock_bh(&idev->lock);
607 in6_dev_put(idev); 603 rcu_read_unlock();
608 dev_put(dev);
609 return err; 604 return err;
610} 605}
611 606
@@ -822,6 +817,7 @@ int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr)
822 struct ifmcaddr6 *mc; 817 struct ifmcaddr6 *mc;
823 struct inet6_dev *idev; 818 struct inet6_dev *idev;
824 819
820 /* we need to take a reference on idev */
825 idev = in6_dev_get(dev); 821 idev = in6_dev_get(dev);
826 822
827 if (idev == NULL) 823 if (idev == NULL)
@@ -860,7 +856,7 @@ int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr)
860 setup_timer(&mc->mca_timer, igmp6_timer_handler, (unsigned long)mc); 856 setup_timer(&mc->mca_timer, igmp6_timer_handler, (unsigned long)mc);
861 857
862 ipv6_addr_copy(&mc->mca_addr, addr); 858 ipv6_addr_copy(&mc->mca_addr, addr);
863 mc->idev = idev; 859 mc->idev = idev; /* (reference taken) */
864 mc->mca_users = 1; 860 mc->mca_users = 1;
865 /* mca_stamp should be updated upon changes */ 861 /* mca_stamp should be updated upon changes */
866 mc->mca_cstamp = mc->mca_tstamp = jiffies; 862 mc->mca_cstamp = mc->mca_tstamp = jiffies;
@@ -915,16 +911,18 @@ int __ipv6_dev_mc_dec(struct inet6_dev *idev, const struct in6_addr *addr)
915 911
916int ipv6_dev_mc_dec(struct net_device *dev, const struct in6_addr *addr) 912int ipv6_dev_mc_dec(struct net_device *dev, const struct in6_addr *addr)
917{ 913{
918 struct inet6_dev *idev = in6_dev_get(dev); 914 struct inet6_dev *idev;
919 int err; 915 int err;
920 916
921 if (!idev) 917 rcu_read_lock();
922 return -ENODEV;
923
924 err = __ipv6_dev_mc_dec(idev, addr);
925 918
926 in6_dev_put(idev); 919 idev = __in6_dev_get(dev);
920 if (!idev)
921 err = -ENODEV;
922 else
923 err = __ipv6_dev_mc_dec(idev, addr);
927 924
925 rcu_read_unlock();
928 return err; 926 return err;
929} 927}
930 928
@@ -965,7 +963,8 @@ int ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group,
965 struct ifmcaddr6 *mc; 963 struct ifmcaddr6 *mc;
966 int rv = 0; 964 int rv = 0;
967 965
968 idev = in6_dev_get(dev); 966 rcu_read_lock();
967 idev = __in6_dev_get(dev);
969 if (idev) { 968 if (idev) {
970 read_lock_bh(&idev->lock); 969 read_lock_bh(&idev->lock);
971 for (mc = idev->mc_list; mc; mc=mc->next) { 970 for (mc = idev->mc_list; mc; mc=mc->next) {
@@ -992,8 +991,8 @@ int ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group,
992 rv = 1; /* don't filter unspecified source */ 991 rv = 1; /* don't filter unspecified source */
993 } 992 }
994 read_unlock_bh(&idev->lock); 993 read_unlock_bh(&idev->lock);
995 in6_dev_put(idev);
996 } 994 }
995 rcu_read_unlock();
997 return rv; 996 return rv;
998} 997}
999 998
@@ -1104,6 +1103,7 @@ static int mld_marksources(struct ifmcaddr6 *pmc, int nsrcs,
1104 return 1; 1103 return 1;
1105} 1104}
1106 1105
1106/* called with rcu_read_lock() */
1107int igmp6_event_query(struct sk_buff *skb) 1107int igmp6_event_query(struct sk_buff *skb)
1108{ 1108{
1109 struct mld2_query *mlh2 = NULL; 1109 struct mld2_query *mlh2 = NULL;
@@ -1127,7 +1127,7 @@ int igmp6_event_query(struct sk_buff *skb)
1127 if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL)) 1127 if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL))
1128 return -EINVAL; 1128 return -EINVAL;
1129 1129
1130 idev = in6_dev_get(skb->dev); 1130 idev = __in6_dev_get(skb->dev);
1131 1131
1132 if (idev == NULL) 1132 if (idev == NULL)
1133 return 0; 1133 return 0;
@@ -1137,10 +1137,8 @@ int igmp6_event_query(struct sk_buff *skb)
1137 group_type = ipv6_addr_type(group); 1137 group_type = ipv6_addr_type(group);
1138 1138
1139 if (group_type != IPV6_ADDR_ANY && 1139 if (group_type != IPV6_ADDR_ANY &&
1140 !(group_type&IPV6_ADDR_MULTICAST)) { 1140 !(group_type&IPV6_ADDR_MULTICAST))
1141 in6_dev_put(idev);
1142 return -EINVAL; 1141 return -EINVAL;
1143 }
1144 1142
1145 if (len == 24) { 1143 if (len == 24) {
1146 int switchback; 1144 int switchback;
@@ -1161,10 +1159,9 @@ int igmp6_event_query(struct sk_buff *skb)
1161 } else if (len >= 28) { 1159 } else if (len >= 28) {
1162 int srcs_offset = sizeof(struct mld2_query) - 1160 int srcs_offset = sizeof(struct mld2_query) -
1163 sizeof(struct icmp6hdr); 1161 sizeof(struct icmp6hdr);
1164 if (!pskb_may_pull(skb, srcs_offset)) { 1162 if (!pskb_may_pull(skb, srcs_offset))
1165 in6_dev_put(idev);
1166 return -EINVAL; 1163 return -EINVAL;
1167 } 1164
1168 mlh2 = (struct mld2_query *)skb_transport_header(skb); 1165 mlh2 = (struct mld2_query *)skb_transport_header(skb);
1169 max_delay = (MLDV2_MRC(ntohs(mlh2->mld2q_mrc))*HZ)/1000; 1166 max_delay = (MLDV2_MRC(ntohs(mlh2->mld2q_mrc))*HZ)/1000;
1170 if (!max_delay) 1167 if (!max_delay)
@@ -1173,28 +1170,23 @@ int igmp6_event_query(struct sk_buff *skb)
1173 if (mlh2->mld2q_qrv) 1170 if (mlh2->mld2q_qrv)
1174 idev->mc_qrv = mlh2->mld2q_qrv; 1171 idev->mc_qrv = mlh2->mld2q_qrv;
1175 if (group_type == IPV6_ADDR_ANY) { /* general query */ 1172 if (group_type == IPV6_ADDR_ANY) { /* general query */
1176 if (mlh2->mld2q_nsrcs) { 1173 if (mlh2->mld2q_nsrcs)
1177 in6_dev_put(idev);
1178 return -EINVAL; /* no sources allowed */ 1174 return -EINVAL; /* no sources allowed */
1179 } 1175
1180 mld_gq_start_timer(idev); 1176 mld_gq_start_timer(idev);
1181 in6_dev_put(idev);
1182 return 0; 1177 return 0;
1183 } 1178 }
1184 /* mark sources to include, if group & source-specific */ 1179 /* mark sources to include, if group & source-specific */
1185 if (mlh2->mld2q_nsrcs != 0) { 1180 if (mlh2->mld2q_nsrcs != 0) {
1186 if (!pskb_may_pull(skb, srcs_offset + 1181 if (!pskb_may_pull(skb, srcs_offset +
1187 ntohs(mlh2->mld2q_nsrcs) * sizeof(struct in6_addr))) { 1182 ntohs(mlh2->mld2q_nsrcs) * sizeof(struct in6_addr)))
1188 in6_dev_put(idev);
1189 return -EINVAL; 1183 return -EINVAL;
1190 } 1184
1191 mlh2 = (struct mld2_query *)skb_transport_header(skb); 1185 mlh2 = (struct mld2_query *)skb_transport_header(skb);
1192 mark = 1; 1186 mark = 1;
1193 } 1187 }
1194 } else { 1188 } else
1195 in6_dev_put(idev);
1196 return -EINVAL; 1189 return -EINVAL;
1197 }
1198 1190
1199 read_lock_bh(&idev->lock); 1191 read_lock_bh(&idev->lock);
1200 if (group_type == IPV6_ADDR_ANY) { 1192 if (group_type == IPV6_ADDR_ANY) {
@@ -1227,12 +1219,11 @@ int igmp6_event_query(struct sk_buff *skb)
1227 } 1219 }
1228 } 1220 }
1229 read_unlock_bh(&idev->lock); 1221 read_unlock_bh(&idev->lock);
1230 in6_dev_put(idev);
1231 1222
1232 return 0; 1223 return 0;
1233} 1224}
1234 1225
1235 1226/* called with rcu_read_lock() */
1236int igmp6_event_report(struct sk_buff *skb) 1227int igmp6_event_report(struct sk_buff *skb)
1237{ 1228{
1238 struct ifmcaddr6 *ma; 1229 struct ifmcaddr6 *ma;
@@ -1260,7 +1251,7 @@ int igmp6_event_report(struct sk_buff *skb)
1260 !(addr_type&IPV6_ADDR_LINKLOCAL)) 1251 !(addr_type&IPV6_ADDR_LINKLOCAL))
1261 return -EINVAL; 1252 return -EINVAL;
1262 1253
1263 idev = in6_dev_get(skb->dev); 1254 idev = __in6_dev_get(skb->dev);
1264 if (idev == NULL) 1255 if (idev == NULL)
1265 return -ENODEV; 1256 return -ENODEV;
1266 1257
@@ -1280,7 +1271,6 @@ int igmp6_event_report(struct sk_buff *skb)
1280 } 1271 }
1281 } 1272 }
1282 read_unlock_bh(&idev->lock); 1273 read_unlock_bh(&idev->lock);
1283 in6_dev_put(idev);
1284 return 0; 1274 return 0;
1285} 1275}
1286 1276
@@ -1396,12 +1386,14 @@ static void mld_sendpack(struct sk_buff *skb)
1396 struct mld2_report *pmr = 1386 struct mld2_report *pmr =
1397 (struct mld2_report *)skb_transport_header(skb); 1387 (struct mld2_report *)skb_transport_header(skb);
1398 int payload_len, mldlen; 1388 int payload_len, mldlen;
1399 struct inet6_dev *idev = in6_dev_get(skb->dev); 1389 struct inet6_dev *idev;
1400 struct net *net = dev_net(skb->dev); 1390 struct net *net = dev_net(skb->dev);
1401 int err; 1391 int err;
1402 struct flowi fl; 1392 struct flowi fl;
1403 struct dst_entry *dst; 1393 struct dst_entry *dst;
1404 1394
1395 rcu_read_lock();
1396 idev = __in6_dev_get(skb->dev);
1405 IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len); 1397 IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len);
1406 1398
1407 payload_len = (skb->tail - skb->network_header) - sizeof(*pip6); 1399 payload_len = (skb->tail - skb->network_header) - sizeof(*pip6);
@@ -1441,8 +1433,7 @@ out:
1441 } else 1433 } else
1442 IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_OUTDISCARDS); 1434 IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_OUTDISCARDS);
1443 1435
1444 if (likely(idev != NULL)) 1436 rcu_read_unlock();
1445 in6_dev_put(idev);
1446 return; 1437 return;
1447 1438
1448err_out: 1439err_out:
@@ -1779,7 +1770,8 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
1779 IPPROTO_ICMPV6, 1770 IPPROTO_ICMPV6,
1780 csum_partial(hdr, len, 0)); 1771 csum_partial(hdr, len, 0));
1781 1772
1782 idev = in6_dev_get(skb->dev); 1773 rcu_read_lock();
1774 idev = __in6_dev_get(skb->dev);
1783 1775
1784 dst = icmp6_dst_alloc(skb->dev, NULL, &ipv6_hdr(skb)->daddr); 1776 dst = icmp6_dst_alloc(skb->dev, NULL, &ipv6_hdr(skb)->daddr);
1785 if (!dst) { 1777 if (!dst) {
@@ -1806,8 +1798,7 @@ out:
1806 } else 1798 } else
1807 IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS); 1799 IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
1808 1800
1809 if (likely(idev != NULL)) 1801 rcu_read_unlock();
1810 in6_dev_put(idev);
1811 return; 1802 return;
1812 1803
1813err_out: 1804err_out:
@@ -1998,8 +1989,7 @@ static int sf_setstate(struct ifmcaddr6 *pmc)
1998 &psf->sf_addr)) 1989 &psf->sf_addr))
1999 break; 1990 break;
2000 if (!dpsf) { 1991 if (!dpsf) {
2001 dpsf = (struct ip6_sf_list *) 1992 dpsf = kmalloc(sizeof(*dpsf), GFP_ATOMIC);
2002 kmalloc(sizeof(*dpsf), GFP_ATOMIC);
2003 if (!dpsf) 1993 if (!dpsf)
2004 continue; 1994 continue;
2005 *dpsf = *psf; 1995 *dpsf = *psf;
diff --git a/net/ipv6/mip6.c b/net/ipv6/mip6.c
index 2794b6002836..d6e9599d0705 100644
--- a/net/ipv6/mip6.c
+++ b/net/ipv6/mip6.c
@@ -347,11 +347,12 @@ static const struct xfrm_type mip6_destopt_type =
347 347
348static int mip6_rthdr_input(struct xfrm_state *x, struct sk_buff *skb) 348static int mip6_rthdr_input(struct xfrm_state *x, struct sk_buff *skb)
349{ 349{
350 struct ipv6hdr *iph = ipv6_hdr(skb);
350 struct rt2_hdr *rt2 = (struct rt2_hdr *)skb->data; 351 struct rt2_hdr *rt2 = (struct rt2_hdr *)skb->data;
351 int err = rt2->rt_hdr.nexthdr; 352 int err = rt2->rt_hdr.nexthdr;
352 353
353 spin_lock(&x->lock); 354 spin_lock(&x->lock);
354 if (!ipv6_addr_equal(&rt2->addr, (struct in6_addr *)x->coaddr) && 355 if (!ipv6_addr_equal(&iph->daddr, (struct in6_addr *)x->coaddr) &&
355 !ipv6_addr_any((struct in6_addr *)x->coaddr)) 356 !ipv6_addr_any((struct in6_addr *)x->coaddr))
356 err = -ENOENT; 357 err = -ENOENT;
357 spin_unlock(&x->lock); 358 spin_unlock(&x->lock);
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 0abdc242ddb7..58841c4ae947 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -586,6 +586,7 @@ static void ndisc_send_na(struct net_device *dev, struct neighbour *neigh,
586 src_addr = solicited_addr; 586 src_addr = solicited_addr;
587 if (ifp->flags & IFA_F_OPTIMISTIC) 587 if (ifp->flags & IFA_F_OPTIMISTIC)
588 override = 0; 588 override = 0;
589 inc_opt |= ifp->idev->cnf.force_tllao;
589 in6_ifa_put(ifp); 590 in6_ifa_put(ifp);
590 } else { 591 } else {
591 if (ipv6_dev_get_saddr(dev_net(dev), dev, daddr, 592 if (ipv6_dev_get_saddr(dev_net(dev), dev, daddr,
@@ -599,7 +600,6 @@ static void ndisc_send_na(struct net_device *dev, struct neighbour *neigh,
599 icmp6h.icmp6_solicited = solicited; 600 icmp6h.icmp6_solicited = solicited;
600 icmp6h.icmp6_override = override; 601 icmp6h.icmp6_override = override;
601 602
602 inc_opt |= ifp->idev->cnf.force_tllao;
603 __ndisc_send(dev, neigh, daddr, src_addr, 603 __ndisc_send(dev, neigh, daddr, src_addr,
604 &icmp6h, solicited_addr, 604 &icmp6h, solicited_addr,
605 inc_opt ? ND_OPT_TARGET_LL_ADDR : 0); 605 inc_opt ? ND_OPT_TARGET_LL_ADDR : 0);
@@ -1229,7 +1229,7 @@ static void ndisc_router_discovery(struct sk_buff *skb)
1229 ND_PRINTK0(KERN_ERR 1229 ND_PRINTK0(KERN_ERR
1230 "ICMPv6 RA: %s() got default router without neighbour.\n", 1230 "ICMPv6 RA: %s() got default router without neighbour.\n",
1231 __func__); 1231 __func__);
1232 dst_release(&rt->u.dst); 1232 dst_release(&rt->dst);
1233 in6_dev_put(in6_dev); 1233 in6_dev_put(in6_dev);
1234 return; 1234 return;
1235 } 1235 }
@@ -1244,7 +1244,7 @@ static void ndisc_router_discovery(struct sk_buff *skb)
1244 if (ra_msg->icmph.icmp6_hop_limit) { 1244 if (ra_msg->icmph.icmp6_hop_limit) {
1245 in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit; 1245 in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit;
1246 if (rt) 1246 if (rt)
1247 rt->u.dst.metrics[RTAX_HOPLIMIT-1] = ra_msg->icmph.icmp6_hop_limit; 1247 rt->dst.metrics[RTAX_HOPLIMIT-1] = ra_msg->icmph.icmp6_hop_limit;
1248 } 1248 }
1249 1249
1250skip_defrtr: 1250skip_defrtr:
@@ -1363,7 +1363,7 @@ skip_linkparms:
1363 in6_dev->cnf.mtu6 = mtu; 1363 in6_dev->cnf.mtu6 = mtu;
1364 1364
1365 if (rt) 1365 if (rt)
1366 rt->u.dst.metrics[RTAX_MTU-1] = mtu; 1366 rt->dst.metrics[RTAX_MTU-1] = mtu;
1367 1367
1368 rt6_mtu_change(skb->dev, mtu); 1368 rt6_mtu_change(skb->dev, mtu);
1369 } 1369 }
@@ -1384,7 +1384,7 @@ skip_linkparms:
1384 } 1384 }
1385out: 1385out:
1386 if (rt) 1386 if (rt)
1387 dst_release(&rt->u.dst); 1387 dst_release(&rt->dst);
1388 else if (neigh) 1388 else if (neigh)
1389 neigh_release(neigh); 1389 neigh_release(neigh);
1390 in6_dev_put(in6_dev); 1390 in6_dev_put(in6_dev);
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
index a74951c039b6..7155b2451d7c 100644
--- a/net/ipv6/netfilter.c
+++ b/net/ipv6/netfilter.c
@@ -151,9 +151,7 @@ static __sum16 nf_ip6_checksum_partial(struct sk_buff *skb, unsigned int hook,
151 protocol, 151 protocol,
152 csum_sub(0, hsum))); 152 csum_sub(0, hsum)));
153 skb->ip_summed = CHECKSUM_NONE; 153 skb->ip_summed = CHECKSUM_NONE;
154 csum = __skb_checksum_complete_head(skb, dataoff + len); 154 return __skb_checksum_complete_head(skb, dataoff + len);
155 if (!csum)
156 skb->ip_summed = CHECKSUM_UNNECESSARY;
157 } 155 }
158 return csum; 156 return csum;
159}; 157};
diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
index 8c201743d96d..413ab0754e1f 100644
--- a/net/ipv6/netfilter/ip6_queue.c
+++ b/net/ipv6/netfilter/ip6_queue.c
@@ -43,7 +43,7 @@ typedef int (*ipq_cmpfn)(struct nf_queue_entry *, unsigned long);
43 43
44static unsigned char copy_mode __read_mostly = IPQ_COPY_NONE; 44static unsigned char copy_mode __read_mostly = IPQ_COPY_NONE;
45static unsigned int queue_maxlen __read_mostly = IPQ_QMAX_DEFAULT; 45static unsigned int queue_maxlen __read_mostly = IPQ_QMAX_DEFAULT;
46static DEFINE_RWLOCK(queue_lock); 46static DEFINE_SPINLOCK(queue_lock);
47static int peer_pid __read_mostly; 47static int peer_pid __read_mostly;
48static unsigned int copy_range __read_mostly; 48static unsigned int copy_range __read_mostly;
49static unsigned int queue_total; 49static unsigned int queue_total;
@@ -73,10 +73,10 @@ __ipq_set_mode(unsigned char mode, unsigned int range)
73 break; 73 break;
74 74
75 case IPQ_COPY_PACKET: 75 case IPQ_COPY_PACKET:
76 copy_mode = mode; 76 if (range > 0xFFFF)
77 range = 0xFFFF;
77 copy_range = range; 78 copy_range = range;
78 if (copy_range > 0xFFFF) 79 copy_mode = mode;
79 copy_range = 0xFFFF;
80 break; 80 break;
81 81
82 default: 82 default:
@@ -102,7 +102,7 @@ ipq_find_dequeue_entry(unsigned long id)
102{ 102{
103 struct nf_queue_entry *entry = NULL, *i; 103 struct nf_queue_entry *entry = NULL, *i;
104 104
105 write_lock_bh(&queue_lock); 105 spin_lock_bh(&queue_lock);
106 106
107 list_for_each_entry(i, &queue_list, list) { 107 list_for_each_entry(i, &queue_list, list) {
108 if ((unsigned long)i == id) { 108 if ((unsigned long)i == id) {
@@ -116,7 +116,7 @@ ipq_find_dequeue_entry(unsigned long id)
116 queue_total--; 116 queue_total--;
117 } 117 }
118 118
119 write_unlock_bh(&queue_lock); 119 spin_unlock_bh(&queue_lock);
120 return entry; 120 return entry;
121} 121}
122 122
@@ -137,9 +137,9 @@ __ipq_flush(ipq_cmpfn cmpfn, unsigned long data)
137static void 137static void
138ipq_flush(ipq_cmpfn cmpfn, unsigned long data) 138ipq_flush(ipq_cmpfn cmpfn, unsigned long data)
139{ 139{
140 write_lock_bh(&queue_lock); 140 spin_lock_bh(&queue_lock);
141 __ipq_flush(cmpfn, data); 141 __ipq_flush(cmpfn, data);
142 write_unlock_bh(&queue_lock); 142 spin_unlock_bh(&queue_lock);
143} 143}
144 144
145static struct sk_buff * 145static struct sk_buff *
@@ -153,9 +153,7 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)
153 struct nlmsghdr *nlh; 153 struct nlmsghdr *nlh;
154 struct timeval tv; 154 struct timeval tv;
155 155
156 read_lock_bh(&queue_lock); 156 switch (ACCESS_ONCE(copy_mode)) {
157
158 switch (copy_mode) {
159 case IPQ_COPY_META: 157 case IPQ_COPY_META:
160 case IPQ_COPY_NONE: 158 case IPQ_COPY_NONE:
161 size = NLMSG_SPACE(sizeof(*pmsg)); 159 size = NLMSG_SPACE(sizeof(*pmsg));
@@ -163,26 +161,21 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)
163 161
164 case IPQ_COPY_PACKET: 162 case IPQ_COPY_PACKET:
165 if (entry->skb->ip_summed == CHECKSUM_PARTIAL && 163 if (entry->skb->ip_summed == CHECKSUM_PARTIAL &&
166 (*errp = skb_checksum_help(entry->skb))) { 164 (*errp = skb_checksum_help(entry->skb)))
167 read_unlock_bh(&queue_lock);
168 return NULL; 165 return NULL;
169 } 166
170 if (copy_range == 0 || copy_range > entry->skb->len) 167 data_len = ACCESS_ONCE(copy_range);
168 if (data_len == 0 || data_len > entry->skb->len)
171 data_len = entry->skb->len; 169 data_len = entry->skb->len;
172 else
173 data_len = copy_range;
174 170
175 size = NLMSG_SPACE(sizeof(*pmsg) + data_len); 171 size = NLMSG_SPACE(sizeof(*pmsg) + data_len);
176 break; 172 break;
177 173
178 default: 174 default:
179 *errp = -EINVAL; 175 *errp = -EINVAL;
180 read_unlock_bh(&queue_lock);
181 return NULL; 176 return NULL;
182 } 177 }
183 178
184 read_unlock_bh(&queue_lock);
185
186 skb = alloc_skb(size, GFP_ATOMIC); 179 skb = alloc_skb(size, GFP_ATOMIC);
187 if (!skb) 180 if (!skb)
188 goto nlmsg_failure; 181 goto nlmsg_failure;
@@ -242,7 +235,7 @@ ipq_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
242 if (nskb == NULL) 235 if (nskb == NULL)
243 return status; 236 return status;
244 237
245 write_lock_bh(&queue_lock); 238 spin_lock_bh(&queue_lock);
246 239
247 if (!peer_pid) 240 if (!peer_pid)
248 goto err_out_free_nskb; 241 goto err_out_free_nskb;
@@ -266,14 +259,14 @@ ipq_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
266 259
267 __ipq_enqueue_entry(entry); 260 __ipq_enqueue_entry(entry);
268 261
269 write_unlock_bh(&queue_lock); 262 spin_unlock_bh(&queue_lock);
270 return status; 263 return status;
271 264
272err_out_free_nskb: 265err_out_free_nskb:
273 kfree_skb(nskb); 266 kfree_skb(nskb);
274 267
275err_out_unlock: 268err_out_unlock:
276 write_unlock_bh(&queue_lock); 269 spin_unlock_bh(&queue_lock);
277 return status; 270 return status;
278} 271}
279 272
@@ -342,9 +335,9 @@ ipq_set_mode(unsigned char mode, unsigned int range)
342{ 335{
343 int status; 336 int status;
344 337
345 write_lock_bh(&queue_lock); 338 spin_lock_bh(&queue_lock);
346 status = __ipq_set_mode(mode, range); 339 status = __ipq_set_mode(mode, range);
347 write_unlock_bh(&queue_lock); 340 spin_unlock_bh(&queue_lock);
348 return status; 341 return status;
349} 342}
350 343
@@ -441,11 +434,11 @@ __ipq_rcv_skb(struct sk_buff *skb)
441 if (security_netlink_recv(skb, CAP_NET_ADMIN)) 434 if (security_netlink_recv(skb, CAP_NET_ADMIN))
442 RCV_SKB_FAIL(-EPERM); 435 RCV_SKB_FAIL(-EPERM);
443 436
444 write_lock_bh(&queue_lock); 437 spin_lock_bh(&queue_lock);
445 438
446 if (peer_pid) { 439 if (peer_pid) {
447 if (peer_pid != pid) { 440 if (peer_pid != pid) {
448 write_unlock_bh(&queue_lock); 441 spin_unlock_bh(&queue_lock);
449 RCV_SKB_FAIL(-EBUSY); 442 RCV_SKB_FAIL(-EBUSY);
450 } 443 }
451 } else { 444 } else {
@@ -453,7 +446,7 @@ __ipq_rcv_skb(struct sk_buff *skb)
453 peer_pid = pid; 446 peer_pid = pid;
454 } 447 }
455 448
456 write_unlock_bh(&queue_lock); 449 spin_unlock_bh(&queue_lock);
457 450
458 status = ipq_receive_peer(NLMSG_DATA(nlh), type, 451 status = ipq_receive_peer(NLMSG_DATA(nlh), type,
459 nlmsglen - NLMSG_LENGTH(0)); 452 nlmsglen - NLMSG_LENGTH(0));
@@ -498,10 +491,10 @@ ipq_rcv_nl_event(struct notifier_block *this,
498 struct netlink_notify *n = ptr; 491 struct netlink_notify *n = ptr;
499 492
500 if (event == NETLINK_URELEASE && n->protocol == NETLINK_IP6_FW) { 493 if (event == NETLINK_URELEASE && n->protocol == NETLINK_IP6_FW) {
501 write_lock_bh(&queue_lock); 494 spin_lock_bh(&queue_lock);
502 if ((net_eq(n->net, &init_net)) && (n->pid == peer_pid)) 495 if ((net_eq(n->net, &init_net)) && (n->pid == peer_pid))
503 __ipq_reset(); 496 __ipq_reset();
504 write_unlock_bh(&queue_lock); 497 spin_unlock_bh(&queue_lock);
505 } 498 }
506 return NOTIFY_DONE; 499 return NOTIFY_DONE;
507} 500}
@@ -528,7 +521,7 @@ static ctl_table ipq_table[] = {
528#ifdef CONFIG_PROC_FS 521#ifdef CONFIG_PROC_FS
529static int ip6_queue_show(struct seq_file *m, void *v) 522static int ip6_queue_show(struct seq_file *m, void *v)
530{ 523{
531 read_lock_bh(&queue_lock); 524 spin_lock_bh(&queue_lock);
532 525
533 seq_printf(m, 526 seq_printf(m,
534 "Peer PID : %d\n" 527 "Peer PID : %d\n"
@@ -546,7 +539,7 @@ static int ip6_queue_show(struct seq_file *m, void *v)
546 queue_dropped, 539 queue_dropped,
547 queue_user_dropped); 540 queue_user_dropped);
548 541
549 read_unlock_bh(&queue_lock); 542 spin_unlock_bh(&queue_lock);
550 return 0; 543 return 0;
551} 544}
552 545
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 9d2d68f0e605..5359ef4daac5 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -387,9 +387,7 @@ ip6t_do_table(struct sk_buff *skb,
387 goto no_match; 387 goto no_match;
388 } 388 }
389 389
390 ADD_COUNTER(e->counters, 390 ADD_COUNTER(e->counters, skb->len, 1);
391 ntohs(ipv6_hdr(skb)->payload_len) +
392 sizeof(struct ipv6hdr), 1);
393 391
394 t = ip6t_get_target_c(e); 392 t = ip6t_get_target_c(e);
395 IP_NF_ASSERT(t->u.kernel.target); 393 IP_NF_ASSERT(t->u.kernel.target);
@@ -899,7 +897,7 @@ get_counters(const struct xt_table_info *t,
899 struct ip6t_entry *iter; 897 struct ip6t_entry *iter;
900 unsigned int cpu; 898 unsigned int cpu;
901 unsigned int i; 899 unsigned int i;
902 unsigned int curcpu; 900 unsigned int curcpu = get_cpu();
903 901
904 /* Instead of clearing (by a previous call to memset()) 902 /* Instead of clearing (by a previous call to memset())
905 * the counters and using adds, we set the counters 903 * the counters and using adds, we set the counters
@@ -909,14 +907,16 @@ get_counters(const struct xt_table_info *t,
909 * if new softirq were to run and call ipt_do_table 907 * if new softirq were to run and call ipt_do_table
910 */ 908 */
911 local_bh_disable(); 909 local_bh_disable();
912 curcpu = smp_processor_id();
913
914 i = 0; 910 i = 0;
915 xt_entry_foreach(iter, t->entries[curcpu], t->size) { 911 xt_entry_foreach(iter, t->entries[curcpu], t->size) {
916 SET_COUNTER(counters[i], iter->counters.bcnt, 912 SET_COUNTER(counters[i], iter->counters.bcnt,
917 iter->counters.pcnt); 913 iter->counters.pcnt);
918 ++i; 914 ++i;
919 } 915 }
916 local_bh_enable();
917 /* Processing counters from other cpus, we can let bottom half enabled,
918 * (preemption is disabled)
919 */
920 920
921 for_each_possible_cpu(cpu) { 921 for_each_possible_cpu(cpu) {
922 if (cpu == curcpu) 922 if (cpu == curcpu)
@@ -930,7 +930,7 @@ get_counters(const struct xt_table_info *t,
930 } 930 }
931 xt_info_wrunlock(cpu); 931 xt_info_wrunlock(cpu);
932 } 932 }
933 local_bh_enable(); 933 put_cpu();
934} 934}
935 935
936static struct xt_counters *alloc_counters(const struct xt_table *table) 936static struct xt_counters *alloc_counters(const struct xt_table *table)
@@ -943,7 +943,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
943 (other than comefrom, which userspace doesn't care 943 (other than comefrom, which userspace doesn't care
944 about). */ 944 about). */
945 countersize = sizeof(struct xt_counters) * private->number; 945 countersize = sizeof(struct xt_counters) * private->number;
946 counters = vmalloc_node(countersize, numa_node_id()); 946 counters = vmalloc(countersize);
947 947
948 if (counters == NULL) 948 if (counters == NULL)
949 return ERR_PTR(-ENOMEM); 949 return ERR_PTR(-ENOMEM);
@@ -1213,8 +1213,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1213 struct ip6t_entry *iter; 1213 struct ip6t_entry *iter;
1214 1214
1215 ret = 0; 1215 ret = 0;
1216 counters = vmalloc_node(num_counters * sizeof(struct xt_counters), 1216 counters = vmalloc(num_counters * sizeof(struct xt_counters));
1217 numa_node_id());
1218 if (!counters) { 1217 if (!counters) {
1219 ret = -ENOMEM; 1218 ret = -ENOMEM;
1220 goto out; 1219 goto out;
@@ -1368,7 +1367,7 @@ do_add_counters(struct net *net, const void __user *user, unsigned int len,
1368 if (len != size + num_counters * sizeof(struct xt_counters)) 1367 if (len != size + num_counters * sizeof(struct xt_counters))
1369 return -EINVAL; 1368 return -EINVAL;
1370 1369
1371 paddc = vmalloc_node(len - size, numa_node_id()); 1370 paddc = vmalloc(len - size);
1372 if (!paddc) 1371 if (!paddc)
1373 return -ENOMEM; 1372 return -ENOMEM;
1374 1373
diff --git a/net/ipv6/netfilter/ip6t_LOG.c b/net/ipv6/netfilter/ip6t_LOG.c
index af4ee11f2066..0a07ae7b933f 100644
--- a/net/ipv6/netfilter/ip6t_LOG.c
+++ b/net/ipv6/netfilter/ip6t_LOG.c
@@ -373,6 +373,56 @@ static void dump_packet(const struct nf_loginfo *info,
373 printk("MARK=0x%x ", skb->mark); 373 printk("MARK=0x%x ", skb->mark);
374} 374}
375 375
376static void dump_mac_header(const struct nf_loginfo *info,
377 const struct sk_buff *skb)
378{
379 struct net_device *dev = skb->dev;
380 unsigned int logflags = 0;
381
382 if (info->type == NF_LOG_TYPE_LOG)
383 logflags = info->u.log.logflags;
384
385 if (!(logflags & IP6T_LOG_MACDECODE))
386 goto fallback;
387
388 switch (dev->type) {
389 case ARPHRD_ETHER:
390 printk("MACSRC=%pM MACDST=%pM MACPROTO=%04x ",
391 eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
392 ntohs(eth_hdr(skb)->h_proto));
393 return;
394 default:
395 break;
396 }
397
398fallback:
399 printk("MAC=");
400 if (dev->hard_header_len &&
401 skb->mac_header != skb->network_header) {
402 const unsigned char *p = skb_mac_header(skb);
403 unsigned int len = dev->hard_header_len;
404 unsigned int i;
405
406 if (dev->type == ARPHRD_SIT &&
407 (p -= ETH_HLEN) < skb->head)
408 p = NULL;
409
410 if (p != NULL) {
411 printk("%02x", *p++);
412 for (i = 1; i < len; i++)
413 printk(":%02x", p[i]);
414 }
415 printk(" ");
416
417 if (dev->type == ARPHRD_SIT) {
418 const struct iphdr *iph =
419 (struct iphdr *)skb_mac_header(skb);
420 printk("TUNNEL=%pI4->%pI4 ", &iph->saddr, &iph->daddr);
421 }
422 } else
423 printk(" ");
424}
425
376static struct nf_loginfo default_loginfo = { 426static struct nf_loginfo default_loginfo = {
377 .type = NF_LOG_TYPE_LOG, 427 .type = NF_LOG_TYPE_LOG,
378 .u = { 428 .u = {
@@ -400,35 +450,10 @@ ip6t_log_packet(u_int8_t pf,
400 prefix, 450 prefix,
401 in ? in->name : "", 451 in ? in->name : "",
402 out ? out->name : ""); 452 out ? out->name : "");
403 if (in && !out) {
404 unsigned int len;
405 /* MAC logging for input chain only. */
406 printk("MAC=");
407 if (skb->dev && (len = skb->dev->hard_header_len) &&
408 skb->mac_header != skb->network_header) {
409 const unsigned char *p = skb_mac_header(skb);
410 int i;
411
412 if (skb->dev->type == ARPHRD_SIT &&
413 (p -= ETH_HLEN) < skb->head)
414 p = NULL;
415
416 if (p != NULL) {
417 for (i = 0; i < len; i++)
418 printk("%02x%s", p[i],
419 i == len - 1 ? "" : ":");
420 }
421 printk(" ");
422 453
423 if (skb->dev->type == ARPHRD_SIT) { 454 /* MAC logging for input path only. */
424 const struct iphdr *iph = 455 if (in && !out)
425 (struct iphdr *)skb_mac_header(skb); 456 dump_mac_header(loginfo, skb);
426 printk("TUNNEL=%pI4->%pI4 ",
427 &iph->saddr, &iph->daddr);
428 }
429 } else
430 printk(" ");
431 }
432 457
433 dump_packet(loginfo, skb, skb_network_offset(skb), 1); 458 dump_packet(loginfo, skb, skb_network_offset(skb), 1);
434 printk("\n"); 459 printk("\n");
diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c
index 47d227713758..2933396e0281 100644
--- a/net/ipv6/netfilter/ip6t_REJECT.c
+++ b/net/ipv6/netfilter/ip6t_REJECT.c
@@ -97,9 +97,11 @@ static void send_reset(struct net *net, struct sk_buff *oldskb)
97 fl.fl_ip_dport = otcph.source; 97 fl.fl_ip_dport = otcph.source;
98 security_skb_classify_flow(oldskb, &fl); 98 security_skb_classify_flow(oldskb, &fl);
99 dst = ip6_route_output(net, NULL, &fl); 99 dst = ip6_route_output(net, NULL, &fl);
100 if (dst == NULL) 100 if (dst == NULL || dst->error) {
101 dst_release(dst);
101 return; 102 return;
102 if (dst->error || xfrm_lookup(net, &dst, &fl, NULL, 0)) 103 }
104 if (xfrm_lookup(net, &dst, &fl, NULL, 0))
103 return; 105 return;
104 106
105 hh_len = (dst->dev->hard_header_len + 15)&~15; 107 hh_len = (dst->dev->hard_header_len + 15)&~15;
diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
index 9be81776415e..1df3c8b6bf47 100644
--- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
@@ -208,7 +208,7 @@ icmpv6_error(struct net *net, struct nf_conn *tmpl,
208 type = icmp6h->icmp6_type - 130; 208 type = icmp6h->icmp6_type - 130;
209 if (type >= 0 && type < sizeof(noct_valid_new) && 209 if (type >= 0 && type < sizeof(noct_valid_new) &&
210 noct_valid_new[type]) { 210 noct_valid_new[type]) {
211 skb->nfct = &nf_conntrack_untracked.ct_general; 211 skb->nfct = &nf_ct_untracked_get()->ct_general;
212 skb->nfctinfo = IP_CT_NEW; 212 skb->nfctinfo = IP_CT_NEW;
213 nf_conntrack_get(skb->nfct); 213 nf_conntrack_get(skb->nfct);
214 return NF_ACCEPT; 214 return NF_ACCEPT;
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 6fb890187de0..13ef5bc05cf5 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -114,10 +114,8 @@ static void nf_skb_free(struct sk_buff *skb)
114} 114}
115 115
116/* Memory Tracking Functions. */ 116/* Memory Tracking Functions. */
117static inline void frag_kfree_skb(struct sk_buff *skb, unsigned int *work) 117static void frag_kfree_skb(struct sk_buff *skb)
118{ 118{
119 if (work)
120 *work -= skb->truesize;
121 atomic_sub(skb->truesize, &nf_init_frags.mem); 119 atomic_sub(skb->truesize, &nf_init_frags.mem);
122 nf_skb_free(skb); 120 nf_skb_free(skb);
123 kfree_skb(skb); 121 kfree_skb(skb);
@@ -201,7 +199,7 @@ static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb,
201 int offset, end; 199 int offset, end;
202 200
203 if (fq->q.last_in & INET_FRAG_COMPLETE) { 201 if (fq->q.last_in & INET_FRAG_COMPLETE) {
204 pr_debug("Allready completed\n"); 202 pr_debug("Already completed\n");
205 goto err; 203 goto err;
206 } 204 }
207 205
@@ -271,6 +269,11 @@ static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb,
271 * in the chain of fragments so far. We must know where to put 269 * in the chain of fragments so far. We must know where to put
272 * this fragment, right? 270 * this fragment, right?
273 */ 271 */
272 prev = fq->q.fragments_tail;
273 if (!prev || NFCT_FRAG6_CB(prev)->offset < offset) {
274 next = NULL;
275 goto found;
276 }
274 prev = NULL; 277 prev = NULL;
275 for (next = fq->q.fragments; next != NULL; next = next->next) { 278 for (next = fq->q.fragments; next != NULL; next = next->next) {
276 if (NFCT_FRAG6_CB(next)->offset >= offset) 279 if (NFCT_FRAG6_CB(next)->offset >= offset)
@@ -278,6 +281,7 @@ static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb,
278 prev = next; 281 prev = next;
279 } 282 }
280 283
284found:
281 /* We found where to put this one. Check for overlap with 285 /* We found where to put this one. Check for overlap with
282 * preceding fragment, and, if needed, align things so that 286 * preceding fragment, and, if needed, align things so that
283 * any overlaps are eliminated. 287 * any overlaps are eliminated.
@@ -335,7 +339,7 @@ static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb,
335 fq->q.fragments = next; 339 fq->q.fragments = next;
336 340
337 fq->q.meat -= free_it->len; 341 fq->q.meat -= free_it->len;
338 frag_kfree_skb(free_it, NULL); 342 frag_kfree_skb(free_it);
339 } 343 }
340 } 344 }
341 345
@@ -343,6 +347,8 @@ static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb,
343 347
344 /* Insert this fragment in the chain of fragments. */ 348 /* Insert this fragment in the chain of fragments. */
345 skb->next = next; 349 skb->next = next;
350 if (!next)
351 fq->q.fragments_tail = skb;
346 if (prev) 352 if (prev)
347 prev->next = skb; 353 prev->next = skb;
348 else 354 else
@@ -442,7 +448,6 @@ nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev)
442 skb_shinfo(head)->frag_list = head->next; 448 skb_shinfo(head)->frag_list = head->next;
443 skb_reset_transport_header(head); 449 skb_reset_transport_header(head);
444 skb_push(head, head->data - skb_network_header(head)); 450 skb_push(head, head->data - skb_network_header(head));
445 atomic_sub(head->truesize, &nf_init_frags.mem);
446 451
447 for (fp=head->next; fp; fp = fp->next) { 452 for (fp=head->next; fp; fp = fp->next) {
448 head->data_len += fp->len; 453 head->data_len += fp->len;
@@ -452,8 +457,8 @@ nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev)
452 else if (head->ip_summed == CHECKSUM_COMPLETE) 457 else if (head->ip_summed == CHECKSUM_COMPLETE)
453 head->csum = csum_add(head->csum, fp->csum); 458 head->csum = csum_add(head->csum, fp->csum);
454 head->truesize += fp->truesize; 459 head->truesize += fp->truesize;
455 atomic_sub(fp->truesize, &nf_init_frags.mem);
456 } 460 }
461 atomic_sub(head->truesize, &nf_init_frags.mem);
457 462
458 head->next = NULL; 463 head->next = NULL;
459 head->dev = dev; 464 head->dev = dev;
@@ -467,6 +472,7 @@ nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev)
467 head->csum); 472 head->csum);
468 473
469 fq->q.fragments = NULL; 474 fq->q.fragments = NULL;
475 fq->q.fragments_tail = NULL;
470 476
471 /* all original skbs are linked into the NFCT_FRAG6_CB(head).orig */ 477 /* all original skbs are linked into the NFCT_FRAG6_CB(head).orig */
472 fp = skb_shinfo(head)->frag_list; 478 fp = skb_shinfo(head)->frag_list;
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index 566798d69f37..d082eaeefa25 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -174,17 +174,28 @@ static void snmp6_seq_show_item(struct seq_file *seq, void __percpu **mib,
174 const struct snmp_mib *itemlist) 174 const struct snmp_mib *itemlist)
175{ 175{
176 int i; 176 int i;
177 for (i=0; itemlist[i].name; i++) 177
178 for (i = 0; itemlist[i].name; i++)
178 seq_printf(seq, "%-32s\t%lu\n", itemlist[i].name, 179 seq_printf(seq, "%-32s\t%lu\n", itemlist[i].name,
179 snmp_fold_field(mib, itemlist[i].entry)); 180 snmp_fold_field(mib, itemlist[i].entry));
180} 181}
181 182
183static void snmp6_seq_show_item64(struct seq_file *seq, void __percpu **mib,
184 const struct snmp_mib *itemlist, size_t syncpoff)
185{
186 int i;
187
188 for (i = 0; itemlist[i].name; i++)
189 seq_printf(seq, "%-32s\t%llu\n", itemlist[i].name,
190 snmp_fold_field64(mib, itemlist[i].entry, syncpoff));
191}
192
182static int snmp6_seq_show(struct seq_file *seq, void *v) 193static int snmp6_seq_show(struct seq_file *seq, void *v)
183{ 194{
184 struct net *net = (struct net *)seq->private; 195 struct net *net = (struct net *)seq->private;
185 196
186 snmp6_seq_show_item(seq, (void __percpu **)net->mib.ipv6_statistics, 197 snmp6_seq_show_item64(seq, (void __percpu **)net->mib.ipv6_statistics,
187 snmp6_ipstats_list); 198 snmp6_ipstats_list, offsetof(struct ipstats_mib, syncp));
188 snmp6_seq_show_item(seq, (void __percpu **)net->mib.icmpv6_statistics, 199 snmp6_seq_show_item(seq, (void __percpu **)net->mib.icmpv6_statistics,
189 snmp6_icmp6_list); 200 snmp6_icmp6_list);
190 snmp6_seq_show_icmpv6msg(seq, 201 snmp6_seq_show_icmpv6msg(seq,
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 4a4dcbe4f8b2..e677937a07fc 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -602,31 +602,33 @@ out:
602} 602}
603 603
604static int rawv6_send_hdrinc(struct sock *sk, void *from, int length, 604static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
605 struct flowi *fl, struct rt6_info *rt, 605 struct flowi *fl, struct dst_entry **dstp,
606 unsigned int flags) 606 unsigned int flags)
607{ 607{
608 struct ipv6_pinfo *np = inet6_sk(sk); 608 struct ipv6_pinfo *np = inet6_sk(sk);
609 struct ipv6hdr *iph; 609 struct ipv6hdr *iph;
610 struct sk_buff *skb; 610 struct sk_buff *skb;
611 int err; 611 int err;
612 struct rt6_info *rt = (struct rt6_info *)*dstp;
612 613
613 if (length > rt->u.dst.dev->mtu) { 614 if (length > rt->dst.dev->mtu) {
614 ipv6_local_error(sk, EMSGSIZE, fl, rt->u.dst.dev->mtu); 615 ipv6_local_error(sk, EMSGSIZE, fl, rt->dst.dev->mtu);
615 return -EMSGSIZE; 616 return -EMSGSIZE;
616 } 617 }
617 if (flags&MSG_PROBE) 618 if (flags&MSG_PROBE)
618 goto out; 619 goto out;
619 620
620 skb = sock_alloc_send_skb(sk, 621 skb = sock_alloc_send_skb(sk,
621 length + LL_ALLOCATED_SPACE(rt->u.dst.dev) + 15, 622 length + LL_ALLOCATED_SPACE(rt->dst.dev) + 15,
622 flags & MSG_DONTWAIT, &err); 623 flags & MSG_DONTWAIT, &err);
623 if (skb == NULL) 624 if (skb == NULL)
624 goto error; 625 goto error;
625 skb_reserve(skb, LL_RESERVED_SPACE(rt->u.dst.dev)); 626 skb_reserve(skb, LL_RESERVED_SPACE(rt->dst.dev));
626 627
627 skb->priority = sk->sk_priority; 628 skb->priority = sk->sk_priority;
628 skb->mark = sk->sk_mark; 629 skb->mark = sk->sk_mark;
629 skb_dst_set(skb, dst_clone(&rt->u.dst)); 630 skb_dst_set(skb, &rt->dst);
631 *dstp = NULL;
630 632
631 skb_put(skb, length); 633 skb_put(skb, length);
632 skb_reset_network_header(skb); 634 skb_reset_network_header(skb);
@@ -641,7 +643,7 @@ static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
641 643
642 IP6_UPD_PO_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len); 644 IP6_UPD_PO_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
643 err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL, 645 err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL,
644 rt->u.dst.dev, dst_output); 646 rt->dst.dev, dst_output);
645 if (err > 0) 647 if (err > 0)
646 err = net_xmit_errno(err); 648 err = net_xmit_errno(err);
647 if (err) 649 if (err)
@@ -725,7 +727,7 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
725{ 727{
726 struct ipv6_txoptions opt_space; 728 struct ipv6_txoptions opt_space;
727 struct sockaddr_in6 * sin6 = (struct sockaddr_in6 *) msg->msg_name; 729 struct sockaddr_in6 * sin6 = (struct sockaddr_in6 *) msg->msg_name;
728 struct in6_addr *daddr, *final_p = NULL, final; 730 struct in6_addr *daddr, *final_p, final;
729 struct inet_sock *inet = inet_sk(sk); 731 struct inet_sock *inet = inet_sk(sk);
730 struct ipv6_pinfo *np = inet6_sk(sk); 732 struct ipv6_pinfo *np = inet6_sk(sk);
731 struct raw6_sock *rp = raw6_sk(sk); 733 struct raw6_sock *rp = raw6_sk(sk);
@@ -847,13 +849,7 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
847 if (ipv6_addr_any(&fl.fl6_src) && !ipv6_addr_any(&np->saddr)) 849 if (ipv6_addr_any(&fl.fl6_src) && !ipv6_addr_any(&np->saddr))
848 ipv6_addr_copy(&fl.fl6_src, &np->saddr); 850 ipv6_addr_copy(&fl.fl6_src, &np->saddr);
849 851
850 /* merge ip6_build_xmit from ip6_output */ 852 final_p = fl6_update_dst(&fl, opt, &final);
851 if (opt && opt->srcrt) {
852 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
853 ipv6_addr_copy(&final, &fl.fl6_dst);
854 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
855 final_p = &final;
856 }
857 853
858 if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst)) 854 if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst))
859 fl.oif = np->mcast_oif; 855 fl.oif = np->mcast_oif;
@@ -892,9 +888,9 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
892 goto do_confirm; 888 goto do_confirm;
893 889
894back_from_confirm: 890back_from_confirm:
895 if (inet->hdrincl) { 891 if (inet->hdrincl)
896 err = rawv6_send_hdrinc(sk, msg->msg_iov, len, &fl, (struct rt6_info*)dst, msg->msg_flags); 892 err = rawv6_send_hdrinc(sk, msg->msg_iov, len, &fl, &dst, msg->msg_flags);
897 } else { 893 else {
898 lock_sock(sk); 894 lock_sock(sk);
899 err = ip6_append_data(sk, ip_generic_getfrag, msg->msg_iov, 895 err = ip6_append_data(sk, ip_generic_getfrag, msg->msg_iov,
900 len, 0, hlimit, tclass, opt, &fl, (struct rt6_info*)dst, 896 len, 0, hlimit, tclass, opt, &fl, (struct rt6_info*)dst,
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 6d4292ff5854..545c4141b755 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -150,11 +150,8 @@ int ip6_frag_match(struct inet_frag_queue *q, void *a)
150EXPORT_SYMBOL(ip6_frag_match); 150EXPORT_SYMBOL(ip6_frag_match);
151 151
152/* Memory Tracking Functions. */ 152/* Memory Tracking Functions. */
153static inline void frag_kfree_skb(struct netns_frags *nf, 153static void frag_kfree_skb(struct netns_frags *nf, struct sk_buff *skb)
154 struct sk_buff *skb, int *work)
155{ 154{
156 if (work)
157 *work -= skb->truesize;
158 atomic_sub(skb->truesize, &nf->mem); 155 atomic_sub(skb->truesize, &nf->mem);
159 kfree_skb(skb); 156 kfree_skb(skb);
160} 157}
@@ -336,6 +333,11 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
336 * in the chain of fragments so far. We must know where to put 333 * in the chain of fragments so far. We must know where to put
337 * this fragment, right? 334 * this fragment, right?
338 */ 335 */
336 prev = fq->q.fragments_tail;
337 if (!prev || FRAG6_CB(prev)->offset < offset) {
338 next = NULL;
339 goto found;
340 }
339 prev = NULL; 341 prev = NULL;
340 for(next = fq->q.fragments; next != NULL; next = next->next) { 342 for(next = fq->q.fragments; next != NULL; next = next->next) {
341 if (FRAG6_CB(next)->offset >= offset) 343 if (FRAG6_CB(next)->offset >= offset)
@@ -343,6 +345,7 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
343 prev = next; 345 prev = next;
344 } 346 }
345 347
348found:
346 /* We found where to put this one. Check for overlap with 349 /* We found where to put this one. Check for overlap with
347 * preceding fragment, and, if needed, align things so that 350 * preceding fragment, and, if needed, align things so that
348 * any overlaps are eliminated. 351 * any overlaps are eliminated.
@@ -392,7 +395,7 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
392 fq->q.fragments = next; 395 fq->q.fragments = next;
393 396
394 fq->q.meat -= free_it->len; 397 fq->q.meat -= free_it->len;
395 frag_kfree_skb(fq->q.net, free_it, NULL); 398 frag_kfree_skb(fq->q.net, free_it);
396 } 399 }
397 } 400 }
398 401
@@ -400,6 +403,8 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
400 403
401 /* Insert this fragment in the chain of fragments. */ 404 /* Insert this fragment in the chain of fragments. */
402 skb->next = next; 405 skb->next = next;
406 if (!next)
407 fq->q.fragments_tail = skb;
403 if (prev) 408 if (prev)
404 prev->next = skb; 409 prev->next = skb;
405 else 410 else
@@ -466,6 +471,8 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
466 goto out_oom; 471 goto out_oom;
467 472
468 fp->next = head->next; 473 fp->next = head->next;
474 if (!fp->next)
475 fq->q.fragments_tail = fp;
469 prev->next = fp; 476 prev->next = fp;
470 477
471 skb_morph(head, fq->q.fragments); 478 skb_morph(head, fq->q.fragments);
@@ -524,7 +531,6 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
524 skb_shinfo(head)->frag_list = head->next; 531 skb_shinfo(head)->frag_list = head->next;
525 skb_reset_transport_header(head); 532 skb_reset_transport_header(head);
526 skb_push(head, head->data - skb_network_header(head)); 533 skb_push(head, head->data - skb_network_header(head));
527 atomic_sub(head->truesize, &fq->q.net->mem);
528 534
529 for (fp=head->next; fp; fp = fp->next) { 535 for (fp=head->next; fp; fp = fp->next) {
530 head->data_len += fp->len; 536 head->data_len += fp->len;
@@ -534,8 +540,8 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
534 else if (head->ip_summed == CHECKSUM_COMPLETE) 540 else if (head->ip_summed == CHECKSUM_COMPLETE)
535 head->csum = csum_add(head->csum, fp->csum); 541 head->csum = csum_add(head->csum, fp->csum);
536 head->truesize += fp->truesize; 542 head->truesize += fp->truesize;
537 atomic_sub(fp->truesize, &fq->q.net->mem);
538 } 543 }
544 atomic_sub(head->truesize, &fq->q.net->mem);
539 545
540 head->next = NULL; 546 head->next = NULL;
541 head->dev = dev; 547 head->dev = dev;
@@ -553,6 +559,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
553 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS); 559 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS);
554 rcu_read_unlock(); 560 rcu_read_unlock();
555 fq->q.fragments = NULL; 561 fq->q.fragments = NULL;
562 fq->q.fragments_tail = NULL;
556 return 1; 563 return 1;
557 564
558out_oversize: 565out_oversize:
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 252d76199c41..8f2d0400cf8a 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -126,16 +126,14 @@ static struct dst_ops ip6_dst_blackhole_ops = {
126}; 126};
127 127
128static struct rt6_info ip6_null_entry_template = { 128static struct rt6_info ip6_null_entry_template = {
129 .u = { 129 .dst = {
130 .dst = { 130 .__refcnt = ATOMIC_INIT(1),
131 .__refcnt = ATOMIC_INIT(1), 131 .__use = 1,
132 .__use = 1, 132 .obsolete = -1,
133 .obsolete = -1, 133 .error = -ENETUNREACH,
134 .error = -ENETUNREACH, 134 .metrics = { [RTAX_HOPLIMIT - 1] = 255, },
135 .metrics = { [RTAX_HOPLIMIT - 1] = 255, }, 135 .input = ip6_pkt_discard,
136 .input = ip6_pkt_discard, 136 .output = ip6_pkt_discard_out,
137 .output = ip6_pkt_discard_out,
138 }
139 }, 137 },
140 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP), 138 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
141 .rt6i_protocol = RTPROT_KERNEL, 139 .rt6i_protocol = RTPROT_KERNEL,
@@ -149,16 +147,14 @@ static int ip6_pkt_prohibit(struct sk_buff *skb);
149static int ip6_pkt_prohibit_out(struct sk_buff *skb); 147static int ip6_pkt_prohibit_out(struct sk_buff *skb);
150 148
151static struct rt6_info ip6_prohibit_entry_template = { 149static struct rt6_info ip6_prohibit_entry_template = {
152 .u = { 150 .dst = {
153 .dst = { 151 .__refcnt = ATOMIC_INIT(1),
154 .__refcnt = ATOMIC_INIT(1), 152 .__use = 1,
155 .__use = 1, 153 .obsolete = -1,
156 .obsolete = -1, 154 .error = -EACCES,
157 .error = -EACCES, 155 .metrics = { [RTAX_HOPLIMIT - 1] = 255, },
158 .metrics = { [RTAX_HOPLIMIT - 1] = 255, }, 156 .input = ip6_pkt_prohibit,
159 .input = ip6_pkt_prohibit, 157 .output = ip6_pkt_prohibit_out,
160 .output = ip6_pkt_prohibit_out,
161 }
162 }, 158 },
163 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP), 159 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
164 .rt6i_protocol = RTPROT_KERNEL, 160 .rt6i_protocol = RTPROT_KERNEL,
@@ -167,16 +163,14 @@ static struct rt6_info ip6_prohibit_entry_template = {
167}; 163};
168 164
169static struct rt6_info ip6_blk_hole_entry_template = { 165static struct rt6_info ip6_blk_hole_entry_template = {
170 .u = { 166 .dst = {
171 .dst = { 167 .__refcnt = ATOMIC_INIT(1),
172 .__refcnt = ATOMIC_INIT(1), 168 .__use = 1,
173 .__use = 1, 169 .obsolete = -1,
174 .obsolete = -1, 170 .error = -EINVAL,
175 .error = -EINVAL, 171 .metrics = { [RTAX_HOPLIMIT - 1] = 255, },
176 .metrics = { [RTAX_HOPLIMIT - 1] = 255, }, 172 .input = dst_discard,
177 .input = dst_discard, 173 .output = dst_discard,
178 .output = dst_discard,
179 }
180 }, 174 },
181 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP), 175 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
182 .rt6i_protocol = RTPROT_KERNEL, 176 .rt6i_protocol = RTPROT_KERNEL,
@@ -249,7 +243,7 @@ static inline struct rt6_info *rt6_device_match(struct net *net,
249 if (!oif && ipv6_addr_any(saddr)) 243 if (!oif && ipv6_addr_any(saddr))
250 goto out; 244 goto out;
251 245
252 for (sprt = rt; sprt; sprt = sprt->u.dst.rt6_next) { 246 for (sprt = rt; sprt; sprt = sprt->dst.rt6_next) {
253 struct net_device *dev = sprt->rt6i_dev; 247 struct net_device *dev = sprt->rt6i_dev;
254 248
255 if (oif) { 249 if (oif) {
@@ -407,10 +401,10 @@ static struct rt6_info *find_rr_leaf(struct fib6_node *fn,
407 401
408 match = NULL; 402 match = NULL;
409 for (rt = rr_head; rt && rt->rt6i_metric == metric; 403 for (rt = rr_head; rt && rt->rt6i_metric == metric;
410 rt = rt->u.dst.rt6_next) 404 rt = rt->dst.rt6_next)
411 match = find_match(rt, oif, strict, &mpri, match); 405 match = find_match(rt, oif, strict, &mpri, match);
412 for (rt = fn->leaf; rt && rt != rr_head && rt->rt6i_metric == metric; 406 for (rt = fn->leaf; rt && rt != rr_head && rt->rt6i_metric == metric;
413 rt = rt->u.dst.rt6_next) 407 rt = rt->dst.rt6_next)
414 match = find_match(rt, oif, strict, &mpri, match); 408 match = find_match(rt, oif, strict, &mpri, match);
415 409
416 return match; 410 return match;
@@ -432,7 +426,7 @@ static struct rt6_info *rt6_select(struct fib6_node *fn, int oif, int strict)
432 426
433 if (!match && 427 if (!match &&
434 (strict & RT6_LOOKUP_F_REACHABLE)) { 428 (strict & RT6_LOOKUP_F_REACHABLE)) {
435 struct rt6_info *next = rt0->u.dst.rt6_next; 429 struct rt6_info *next = rt0->dst.rt6_next;
436 430
437 /* no entries matched; do round-robin */ 431 /* no entries matched; do round-robin */
438 if (!next || next->rt6i_metric != rt0->rt6i_metric) 432 if (!next || next->rt6i_metric != rt0->rt6i_metric)
@@ -517,7 +511,7 @@ int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
517 rt->rt6i_expires = jiffies + HZ * lifetime; 511 rt->rt6i_expires = jiffies + HZ * lifetime;
518 rt->rt6i_flags |= RTF_EXPIRES; 512 rt->rt6i_flags |= RTF_EXPIRES;
519 } 513 }
520 dst_release(&rt->u.dst); 514 dst_release(&rt->dst);
521 } 515 }
522 return 0; 516 return 0;
523} 517}
@@ -555,7 +549,7 @@ restart:
555 rt = rt6_device_match(net, rt, &fl->fl6_src, fl->oif, flags); 549 rt = rt6_device_match(net, rt, &fl->fl6_src, fl->oif, flags);
556 BACKTRACK(net, &fl->fl6_src); 550 BACKTRACK(net, &fl->fl6_src);
557out: 551out:
558 dst_use(&rt->u.dst, jiffies); 552 dst_use(&rt->dst, jiffies);
559 read_unlock_bh(&table->tb6_lock); 553 read_unlock_bh(&table->tb6_lock);
560 return rt; 554 return rt;
561 555
@@ -643,7 +637,7 @@ static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort, struct in6_addr *dad
643 ipv6_addr_copy(&rt->rt6i_dst.addr, daddr); 637 ipv6_addr_copy(&rt->rt6i_dst.addr, daddr);
644 rt->rt6i_dst.plen = 128; 638 rt->rt6i_dst.plen = 128;
645 rt->rt6i_flags |= RTF_CACHE; 639 rt->rt6i_flags |= RTF_CACHE;
646 rt->u.dst.flags |= DST_HOST; 640 rt->dst.flags |= DST_HOST;
647 641
648#ifdef CONFIG_IPV6_SUBTREES 642#ifdef CONFIG_IPV6_SUBTREES
649 if (rt->rt6i_src.plen && saddr) { 643 if (rt->rt6i_src.plen && saddr) {
@@ -677,7 +671,7 @@ static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort, struct in6_addr *dad
677 if (net_ratelimit()) 671 if (net_ratelimit())
678 printk(KERN_WARNING 672 printk(KERN_WARNING
679 "Neighbour table overflow.\n"); 673 "Neighbour table overflow.\n");
680 dst_free(&rt->u.dst); 674 dst_free(&rt->dst);
681 return NULL; 675 return NULL;
682 } 676 }
683 rt->rt6i_nexthop = neigh; 677 rt->rt6i_nexthop = neigh;
@@ -694,7 +688,7 @@ static struct rt6_info *rt6_alloc_clone(struct rt6_info *ort, struct in6_addr *d
694 ipv6_addr_copy(&rt->rt6i_dst.addr, daddr); 688 ipv6_addr_copy(&rt->rt6i_dst.addr, daddr);
695 rt->rt6i_dst.plen = 128; 689 rt->rt6i_dst.plen = 128;
696 rt->rt6i_flags |= RTF_CACHE; 690 rt->rt6i_flags |= RTF_CACHE;
697 rt->u.dst.flags |= DST_HOST; 691 rt->dst.flags |= DST_HOST;
698 rt->rt6i_nexthop = neigh_clone(ort->rt6i_nexthop); 692 rt->rt6i_nexthop = neigh_clone(ort->rt6i_nexthop);
699 } 693 }
700 return rt; 694 return rt;
@@ -726,7 +720,7 @@ restart:
726 rt->rt6i_flags & RTF_CACHE) 720 rt->rt6i_flags & RTF_CACHE)
727 goto out; 721 goto out;
728 722
729 dst_hold(&rt->u.dst); 723 dst_hold(&rt->dst);
730 read_unlock_bh(&table->tb6_lock); 724 read_unlock_bh(&table->tb6_lock);
731 725
732 if (!rt->rt6i_nexthop && !(rt->rt6i_flags & RTF_NONEXTHOP)) 726 if (!rt->rt6i_nexthop && !(rt->rt6i_flags & RTF_NONEXTHOP))
@@ -739,10 +733,10 @@ restart:
739#endif 733#endif
740 } 734 }
741 735
742 dst_release(&rt->u.dst); 736 dst_release(&rt->dst);
743 rt = nrt ? : net->ipv6.ip6_null_entry; 737 rt = nrt ? : net->ipv6.ip6_null_entry;
744 738
745 dst_hold(&rt->u.dst); 739 dst_hold(&rt->dst);
746 if (nrt) { 740 if (nrt) {
747 err = ip6_ins_rt(nrt); 741 err = ip6_ins_rt(nrt);
748 if (!err) 742 if (!err)
@@ -756,7 +750,7 @@ restart:
756 * Race condition! In the gap, when table->tb6_lock was 750 * Race condition! In the gap, when table->tb6_lock was
757 * released someone could insert this route. Relookup. 751 * released someone could insert this route. Relookup.
758 */ 752 */
759 dst_release(&rt->u.dst); 753 dst_release(&rt->dst);
760 goto relookup; 754 goto relookup;
761 755
762out: 756out:
@@ -764,11 +758,11 @@ out:
764 reachable = 0; 758 reachable = 0;
765 goto restart_2; 759 goto restart_2;
766 } 760 }
767 dst_hold(&rt->u.dst); 761 dst_hold(&rt->dst);
768 read_unlock_bh(&table->tb6_lock); 762 read_unlock_bh(&table->tb6_lock);
769out2: 763out2:
770 rt->u.dst.lastuse = jiffies; 764 rt->dst.lastuse = jiffies;
771 rt->u.dst.__use++; 765 rt->dst.__use++;
772 766
773 return rt; 767 return rt;
774} 768}
@@ -835,15 +829,15 @@ int ip6_dst_blackhole(struct sock *sk, struct dst_entry **dstp, struct flowi *fl
835 struct dst_entry *new = NULL; 829 struct dst_entry *new = NULL;
836 830
837 if (rt) { 831 if (rt) {
838 new = &rt->u.dst; 832 new = &rt->dst;
839 833
840 atomic_set(&new->__refcnt, 1); 834 atomic_set(&new->__refcnt, 1);
841 new->__use = 1; 835 new->__use = 1;
842 new->input = dst_discard; 836 new->input = dst_discard;
843 new->output = dst_discard; 837 new->output = dst_discard;
844 838
845 memcpy(new->metrics, ort->u.dst.metrics, RTAX_MAX*sizeof(u32)); 839 memcpy(new->metrics, ort->dst.metrics, RTAX_MAX*sizeof(u32));
846 new->dev = ort->u.dst.dev; 840 new->dev = ort->dst.dev;
847 if (new->dev) 841 if (new->dev)
848 dev_hold(new->dev); 842 dev_hold(new->dev);
849 rt->rt6i_idev = ort->rt6i_idev; 843 rt->rt6i_idev = ort->rt6i_idev;
@@ -912,7 +906,7 @@ static void ip6_link_failure(struct sk_buff *skb)
912 rt = (struct rt6_info *) skb_dst(skb); 906 rt = (struct rt6_info *) skb_dst(skb);
913 if (rt) { 907 if (rt) {
914 if (rt->rt6i_flags&RTF_CACHE) { 908 if (rt->rt6i_flags&RTF_CACHE) {
915 dst_set_expires(&rt->u.dst, 0); 909 dst_set_expires(&rt->dst, 0);
916 rt->rt6i_flags |= RTF_EXPIRES; 910 rt->rt6i_flags |= RTF_EXPIRES;
917 } else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT)) 911 } else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT))
918 rt->rt6i_node->fn_sernum = -1; 912 rt->rt6i_node->fn_sernum = -1;
@@ -986,14 +980,14 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
986 rt->rt6i_dev = dev; 980 rt->rt6i_dev = dev;
987 rt->rt6i_idev = idev; 981 rt->rt6i_idev = idev;
988 rt->rt6i_nexthop = neigh; 982 rt->rt6i_nexthop = neigh;
989 atomic_set(&rt->u.dst.__refcnt, 1); 983 atomic_set(&rt->dst.__refcnt, 1);
990 rt->u.dst.metrics[RTAX_HOPLIMIT-1] = 255; 984 rt->dst.metrics[RTAX_HOPLIMIT-1] = 255;
991 rt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(rt->rt6i_dev); 985 rt->dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(rt->rt6i_dev);
992 rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(net, dst_mtu(&rt->u.dst)); 986 rt->dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(net, dst_mtu(&rt->dst));
993 rt->u.dst.output = ip6_output; 987 rt->dst.output = ip6_output;
994 988
995#if 0 /* there's no chance to use these for ndisc */ 989#if 0 /* there's no chance to use these for ndisc */
996 rt->u.dst.flags = ipv6_addr_type(addr) & IPV6_ADDR_UNICAST 990 rt->dst.flags = ipv6_addr_type(addr) & IPV6_ADDR_UNICAST
997 ? DST_HOST 991 ? DST_HOST
998 : 0; 992 : 0;
999 ipv6_addr_copy(&rt->rt6i_dst.addr, addr); 993 ipv6_addr_copy(&rt->rt6i_dst.addr, addr);
@@ -1001,14 +995,14 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
1001#endif 995#endif
1002 996
1003 spin_lock_bh(&icmp6_dst_lock); 997 spin_lock_bh(&icmp6_dst_lock);
1004 rt->u.dst.next = icmp6_dst_gc_list; 998 rt->dst.next = icmp6_dst_gc_list;
1005 icmp6_dst_gc_list = &rt->u.dst; 999 icmp6_dst_gc_list = &rt->dst;
1006 spin_unlock_bh(&icmp6_dst_lock); 1000 spin_unlock_bh(&icmp6_dst_lock);
1007 1001
1008 fib6_force_start_gc(net); 1002 fib6_force_start_gc(net);
1009 1003
1010out: 1004out:
1011 return &rt->u.dst; 1005 return &rt->dst;
1012} 1006}
1013 1007
1014int icmp6_dst_gc(void) 1008int icmp6_dst_gc(void)
@@ -1090,11 +1084,11 @@ static int ipv6_get_mtu(struct net_device *dev)
1090 int mtu = IPV6_MIN_MTU; 1084 int mtu = IPV6_MIN_MTU;
1091 struct inet6_dev *idev; 1085 struct inet6_dev *idev;
1092 1086
1093 idev = in6_dev_get(dev); 1087 rcu_read_lock();
1094 if (idev) { 1088 idev = __in6_dev_get(dev);
1089 if (idev)
1095 mtu = idev->cnf.mtu6; 1090 mtu = idev->cnf.mtu6;
1096 in6_dev_put(idev); 1091 rcu_read_unlock();
1097 }
1098 return mtu; 1092 return mtu;
1099} 1093}
1100 1094
@@ -1103,12 +1097,15 @@ int ip6_dst_hoplimit(struct dst_entry *dst)
1103 int hoplimit = dst_metric(dst, RTAX_HOPLIMIT); 1097 int hoplimit = dst_metric(dst, RTAX_HOPLIMIT);
1104 if (hoplimit < 0) { 1098 if (hoplimit < 0) {
1105 struct net_device *dev = dst->dev; 1099 struct net_device *dev = dst->dev;
1106 struct inet6_dev *idev = in6_dev_get(dev); 1100 struct inet6_dev *idev;
1107 if (idev) { 1101
1102 rcu_read_lock();
1103 idev = __in6_dev_get(dev);
1104 if (idev)
1108 hoplimit = idev->cnf.hop_limit; 1105 hoplimit = idev->cnf.hop_limit;
1109 in6_dev_put(idev); 1106 else
1110 } else
1111 hoplimit = dev_net(dev)->ipv6.devconf_all->hop_limit; 1107 hoplimit = dev_net(dev)->ipv6.devconf_all->hop_limit;
1108 rcu_read_unlock();
1112 } 1109 }
1113 return hoplimit; 1110 return hoplimit;
1114} 1111}
@@ -1159,7 +1156,7 @@ int ip6_route_add(struct fib6_config *cfg)
1159 goto out; 1156 goto out;
1160 } 1157 }
1161 1158
1162 rt->u.dst.obsolete = -1; 1159 rt->dst.obsolete = -1;
1163 rt->rt6i_expires = (cfg->fc_flags & RTF_EXPIRES) ? 1160 rt->rt6i_expires = (cfg->fc_flags & RTF_EXPIRES) ?
1164 jiffies + clock_t_to_jiffies(cfg->fc_expires) : 1161 jiffies + clock_t_to_jiffies(cfg->fc_expires) :
1165 0; 1162 0;
@@ -1171,16 +1168,16 @@ int ip6_route_add(struct fib6_config *cfg)
1171 addr_type = ipv6_addr_type(&cfg->fc_dst); 1168 addr_type = ipv6_addr_type(&cfg->fc_dst);
1172 1169
1173 if (addr_type & IPV6_ADDR_MULTICAST) 1170 if (addr_type & IPV6_ADDR_MULTICAST)
1174 rt->u.dst.input = ip6_mc_input; 1171 rt->dst.input = ip6_mc_input;
1175 else 1172 else
1176 rt->u.dst.input = ip6_forward; 1173 rt->dst.input = ip6_forward;
1177 1174
1178 rt->u.dst.output = ip6_output; 1175 rt->dst.output = ip6_output;
1179 1176
1180 ipv6_addr_prefix(&rt->rt6i_dst.addr, &cfg->fc_dst, cfg->fc_dst_len); 1177 ipv6_addr_prefix(&rt->rt6i_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
1181 rt->rt6i_dst.plen = cfg->fc_dst_len; 1178 rt->rt6i_dst.plen = cfg->fc_dst_len;
1182 if (rt->rt6i_dst.plen == 128) 1179 if (rt->rt6i_dst.plen == 128)
1183 rt->u.dst.flags = DST_HOST; 1180 rt->dst.flags = DST_HOST;
1184 1181
1185#ifdef CONFIG_IPV6_SUBTREES 1182#ifdef CONFIG_IPV6_SUBTREES
1186 ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len); 1183 ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len);
@@ -1208,9 +1205,9 @@ int ip6_route_add(struct fib6_config *cfg)
1208 goto out; 1205 goto out;
1209 } 1206 }
1210 } 1207 }
1211 rt->u.dst.output = ip6_pkt_discard_out; 1208 rt->dst.output = ip6_pkt_discard_out;
1212 rt->u.dst.input = ip6_pkt_discard; 1209 rt->dst.input = ip6_pkt_discard;
1213 rt->u.dst.error = -ENETUNREACH; 1210 rt->dst.error = -ENETUNREACH;
1214 rt->rt6i_flags = RTF_REJECT|RTF_NONEXTHOP; 1211 rt->rt6i_flags = RTF_REJECT|RTF_NONEXTHOP;
1215 goto install_route; 1212 goto install_route;
1216 } 1213 }
@@ -1244,7 +1241,7 @@ int ip6_route_add(struct fib6_config *cfg)
1244 goto out; 1241 goto out;
1245 if (dev) { 1242 if (dev) {
1246 if (dev != grt->rt6i_dev) { 1243 if (dev != grt->rt6i_dev) {
1247 dst_release(&grt->u.dst); 1244 dst_release(&grt->dst);
1248 goto out; 1245 goto out;
1249 } 1246 }
1250 } else { 1247 } else {
@@ -1255,7 +1252,7 @@ int ip6_route_add(struct fib6_config *cfg)
1255 } 1252 }
1256 if (!(grt->rt6i_flags&RTF_GATEWAY)) 1253 if (!(grt->rt6i_flags&RTF_GATEWAY))
1257 err = 0; 1254 err = 0;
1258 dst_release(&grt->u.dst); 1255 dst_release(&grt->dst);
1259 1256
1260 if (err) 1257 if (err)
1261 goto out; 1258 goto out;
@@ -1294,18 +1291,18 @@ install_route:
1294 goto out; 1291 goto out;
1295 } 1292 }
1296 1293
1297 rt->u.dst.metrics[type - 1] = nla_get_u32(nla); 1294 rt->dst.metrics[type - 1] = nla_get_u32(nla);
1298 } 1295 }
1299 } 1296 }
1300 } 1297 }
1301 1298
1302 if (dst_metric(&rt->u.dst, RTAX_HOPLIMIT) == 0) 1299 if (dst_metric(&rt->dst, RTAX_HOPLIMIT) == 0)
1303 rt->u.dst.metrics[RTAX_HOPLIMIT-1] = -1; 1300 rt->dst.metrics[RTAX_HOPLIMIT-1] = -1;
1304 if (!dst_mtu(&rt->u.dst)) 1301 if (!dst_mtu(&rt->dst))
1305 rt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(dev); 1302 rt->dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(dev);
1306 if (!dst_metric(&rt->u.dst, RTAX_ADVMSS)) 1303 if (!dst_metric(&rt->dst, RTAX_ADVMSS))
1307 rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(net, dst_mtu(&rt->u.dst)); 1304 rt->dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(net, dst_mtu(&rt->dst));
1308 rt->u.dst.dev = dev; 1305 rt->dst.dev = dev;
1309 rt->rt6i_idev = idev; 1306 rt->rt6i_idev = idev;
1310 rt->rt6i_table = table; 1307 rt->rt6i_table = table;
1311 1308
@@ -1319,7 +1316,7 @@ out:
1319 if (idev) 1316 if (idev)
1320 in6_dev_put(idev); 1317 in6_dev_put(idev);
1321 if (rt) 1318 if (rt)
1322 dst_free(&rt->u.dst); 1319 dst_free(&rt->dst);
1323 return err; 1320 return err;
1324} 1321}
1325 1322
@@ -1336,7 +1333,7 @@ static int __ip6_del_rt(struct rt6_info *rt, struct nl_info *info)
1336 write_lock_bh(&table->tb6_lock); 1333 write_lock_bh(&table->tb6_lock);
1337 1334
1338 err = fib6_del(rt, info); 1335 err = fib6_del(rt, info);
1339 dst_release(&rt->u.dst); 1336 dst_release(&rt->dst);
1340 1337
1341 write_unlock_bh(&table->tb6_lock); 1338 write_unlock_bh(&table->tb6_lock);
1342 1339
@@ -1369,7 +1366,7 @@ static int ip6_route_del(struct fib6_config *cfg)
1369 &cfg->fc_src, cfg->fc_src_len); 1366 &cfg->fc_src, cfg->fc_src_len);
1370 1367
1371 if (fn) { 1368 if (fn) {
1372 for (rt = fn->leaf; rt; rt = rt->u.dst.rt6_next) { 1369 for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
1373 if (cfg->fc_ifindex && 1370 if (cfg->fc_ifindex &&
1374 (rt->rt6i_dev == NULL || 1371 (rt->rt6i_dev == NULL ||
1375 rt->rt6i_dev->ifindex != cfg->fc_ifindex)) 1372 rt->rt6i_dev->ifindex != cfg->fc_ifindex))
@@ -1379,7 +1376,7 @@ static int ip6_route_del(struct fib6_config *cfg)
1379 continue; 1376 continue;
1380 if (cfg->fc_metric && cfg->fc_metric != rt->rt6i_metric) 1377 if (cfg->fc_metric && cfg->fc_metric != rt->rt6i_metric)
1381 continue; 1378 continue;
1382 dst_hold(&rt->u.dst); 1379 dst_hold(&rt->dst);
1383 read_unlock_bh(&table->tb6_lock); 1380 read_unlock_bh(&table->tb6_lock);
1384 1381
1385 return __ip6_del_rt(rt, &cfg->fc_nlinfo); 1382 return __ip6_del_rt(rt, &cfg->fc_nlinfo);
@@ -1421,7 +1418,7 @@ static struct rt6_info *__ip6_route_redirect(struct net *net,
1421 read_lock_bh(&table->tb6_lock); 1418 read_lock_bh(&table->tb6_lock);
1422 fn = fib6_lookup(&table->tb6_root, &fl->fl6_dst, &fl->fl6_src); 1419 fn = fib6_lookup(&table->tb6_root, &fl->fl6_dst, &fl->fl6_src);
1423restart: 1420restart:
1424 for (rt = fn->leaf; rt; rt = rt->u.dst.rt6_next) { 1421 for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
1425 /* 1422 /*
1426 * Current route is on-link; redirect is always invalid. 1423 * Current route is on-link; redirect is always invalid.
1427 * 1424 *
@@ -1445,7 +1442,7 @@ restart:
1445 rt = net->ipv6.ip6_null_entry; 1442 rt = net->ipv6.ip6_null_entry;
1446 BACKTRACK(net, &fl->fl6_src); 1443 BACKTRACK(net, &fl->fl6_src);
1447out: 1444out:
1448 dst_hold(&rt->u.dst); 1445 dst_hold(&rt->dst);
1449 1446
1450 read_unlock_bh(&table->tb6_lock); 1447 read_unlock_bh(&table->tb6_lock);
1451 1448
@@ -1513,10 +1510,10 @@ void rt6_redirect(struct in6_addr *dest, struct in6_addr *src,
1513 * Look, redirects are sent only in response to data packets, 1510 * Look, redirects are sent only in response to data packets,
1514 * so that this nexthop apparently is reachable. --ANK 1511 * so that this nexthop apparently is reachable. --ANK
1515 */ 1512 */
1516 dst_confirm(&rt->u.dst); 1513 dst_confirm(&rt->dst);
1517 1514
1518 /* Duplicate redirect: silently ignore. */ 1515 /* Duplicate redirect: silently ignore. */
1519 if (neigh == rt->u.dst.neighbour) 1516 if (neigh == rt->dst.neighbour)
1520 goto out; 1517 goto out;
1521 1518
1522 nrt = ip6_rt_copy(rt); 1519 nrt = ip6_rt_copy(rt);
@@ -1529,20 +1526,20 @@ void rt6_redirect(struct in6_addr *dest, struct in6_addr *src,
1529 1526
1530 ipv6_addr_copy(&nrt->rt6i_dst.addr, dest); 1527 ipv6_addr_copy(&nrt->rt6i_dst.addr, dest);
1531 nrt->rt6i_dst.plen = 128; 1528 nrt->rt6i_dst.plen = 128;
1532 nrt->u.dst.flags |= DST_HOST; 1529 nrt->dst.flags |= DST_HOST;
1533 1530
1534 ipv6_addr_copy(&nrt->rt6i_gateway, (struct in6_addr*)neigh->primary_key); 1531 ipv6_addr_copy(&nrt->rt6i_gateway, (struct in6_addr*)neigh->primary_key);
1535 nrt->rt6i_nexthop = neigh_clone(neigh); 1532 nrt->rt6i_nexthop = neigh_clone(neigh);
1536 /* Reset pmtu, it may be better */ 1533 /* Reset pmtu, it may be better */
1537 nrt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(neigh->dev); 1534 nrt->dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(neigh->dev);
1538 nrt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(dev_net(neigh->dev), 1535 nrt->dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(dev_net(neigh->dev),
1539 dst_mtu(&nrt->u.dst)); 1536 dst_mtu(&nrt->dst));
1540 1537
1541 if (ip6_ins_rt(nrt)) 1538 if (ip6_ins_rt(nrt))
1542 goto out; 1539 goto out;
1543 1540
1544 netevent.old = &rt->u.dst; 1541 netevent.old = &rt->dst;
1545 netevent.new = &nrt->u.dst; 1542 netevent.new = &nrt->dst;
1546 call_netevent_notifiers(NETEVENT_REDIRECT, &netevent); 1543 call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);
1547 1544
1548 if (rt->rt6i_flags&RTF_CACHE) { 1545 if (rt->rt6i_flags&RTF_CACHE) {
@@ -1551,7 +1548,7 @@ void rt6_redirect(struct in6_addr *dest, struct in6_addr *src,
1551 } 1548 }
1552 1549
1553out: 1550out:
1554 dst_release(&rt->u.dst); 1551 dst_release(&rt->dst);
1555} 1552}
1556 1553
1557/* 1554/*
@@ -1570,7 +1567,7 @@ void rt6_pmtu_discovery(struct in6_addr *daddr, struct in6_addr *saddr,
1570 if (rt == NULL) 1567 if (rt == NULL)
1571 return; 1568 return;
1572 1569
1573 if (pmtu >= dst_mtu(&rt->u.dst)) 1570 if (pmtu >= dst_mtu(&rt->dst))
1574 goto out; 1571 goto out;
1575 1572
1576 if (pmtu < IPV6_MIN_MTU) { 1573 if (pmtu < IPV6_MIN_MTU) {
@@ -1588,7 +1585,7 @@ void rt6_pmtu_discovery(struct in6_addr *daddr, struct in6_addr *saddr,
1588 They are sent only in response to data packets, 1585 They are sent only in response to data packets,
1589 so that this nexthop apparently is reachable. --ANK 1586 so that this nexthop apparently is reachable. --ANK
1590 */ 1587 */
1591 dst_confirm(&rt->u.dst); 1588 dst_confirm(&rt->dst);
1592 1589
1593 /* Host route. If it is static, it would be better 1590 /* Host route. If it is static, it would be better
1594 not to override it, but add new one, so that 1591 not to override it, but add new one, so that
@@ -1596,10 +1593,10 @@ void rt6_pmtu_discovery(struct in6_addr *daddr, struct in6_addr *saddr,
1596 would return automatically. 1593 would return automatically.
1597 */ 1594 */
1598 if (rt->rt6i_flags & RTF_CACHE) { 1595 if (rt->rt6i_flags & RTF_CACHE) {
1599 rt->u.dst.metrics[RTAX_MTU-1] = pmtu; 1596 rt->dst.metrics[RTAX_MTU-1] = pmtu;
1600 if (allfrag) 1597 if (allfrag)
1601 rt->u.dst.metrics[RTAX_FEATURES-1] |= RTAX_FEATURE_ALLFRAG; 1598 rt->dst.metrics[RTAX_FEATURES-1] |= RTAX_FEATURE_ALLFRAG;
1602 dst_set_expires(&rt->u.dst, net->ipv6.sysctl.ip6_rt_mtu_expires); 1599 dst_set_expires(&rt->dst, net->ipv6.sysctl.ip6_rt_mtu_expires);
1603 rt->rt6i_flags |= RTF_MODIFIED|RTF_EXPIRES; 1600 rt->rt6i_flags |= RTF_MODIFIED|RTF_EXPIRES;
1604 goto out; 1601 goto out;
1605 } 1602 }
@@ -1615,9 +1612,9 @@ void rt6_pmtu_discovery(struct in6_addr *daddr, struct in6_addr *saddr,
1615 nrt = rt6_alloc_clone(rt, daddr); 1612 nrt = rt6_alloc_clone(rt, daddr);
1616 1613
1617 if (nrt) { 1614 if (nrt) {
1618 nrt->u.dst.metrics[RTAX_MTU-1] = pmtu; 1615 nrt->dst.metrics[RTAX_MTU-1] = pmtu;
1619 if (allfrag) 1616 if (allfrag)
1620 nrt->u.dst.metrics[RTAX_FEATURES-1] |= RTAX_FEATURE_ALLFRAG; 1617 nrt->dst.metrics[RTAX_FEATURES-1] |= RTAX_FEATURE_ALLFRAG;
1621 1618
1622 /* According to RFC 1981, detecting PMTU increase shouldn't be 1619 /* According to RFC 1981, detecting PMTU increase shouldn't be
1623 * happened within 5 mins, the recommended timer is 10 mins. 1620 * happened within 5 mins, the recommended timer is 10 mins.
@@ -1625,13 +1622,13 @@ void rt6_pmtu_discovery(struct in6_addr *daddr, struct in6_addr *saddr,
1625 * which is 10 mins. After 10 mins the decreased pmtu is expired 1622 * which is 10 mins. After 10 mins the decreased pmtu is expired
1626 * and detecting PMTU increase will be automatically happened. 1623 * and detecting PMTU increase will be automatically happened.
1627 */ 1624 */
1628 dst_set_expires(&nrt->u.dst, net->ipv6.sysctl.ip6_rt_mtu_expires); 1625 dst_set_expires(&nrt->dst, net->ipv6.sysctl.ip6_rt_mtu_expires);
1629 nrt->rt6i_flags |= RTF_DYNAMIC|RTF_EXPIRES; 1626 nrt->rt6i_flags |= RTF_DYNAMIC|RTF_EXPIRES;
1630 1627
1631 ip6_ins_rt(nrt); 1628 ip6_ins_rt(nrt);
1632 } 1629 }
1633out: 1630out:
1634 dst_release(&rt->u.dst); 1631 dst_release(&rt->dst);
1635} 1632}
1636 1633
1637/* 1634/*
@@ -1644,18 +1641,18 @@ static struct rt6_info * ip6_rt_copy(struct rt6_info *ort)
1644 struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops); 1641 struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops);
1645 1642
1646 if (rt) { 1643 if (rt) {
1647 rt->u.dst.input = ort->u.dst.input; 1644 rt->dst.input = ort->dst.input;
1648 rt->u.dst.output = ort->u.dst.output; 1645 rt->dst.output = ort->dst.output;
1649 1646
1650 memcpy(rt->u.dst.metrics, ort->u.dst.metrics, RTAX_MAX*sizeof(u32)); 1647 memcpy(rt->dst.metrics, ort->dst.metrics, RTAX_MAX*sizeof(u32));
1651 rt->u.dst.error = ort->u.dst.error; 1648 rt->dst.error = ort->dst.error;
1652 rt->u.dst.dev = ort->u.dst.dev; 1649 rt->dst.dev = ort->dst.dev;
1653 if (rt->u.dst.dev) 1650 if (rt->dst.dev)
1654 dev_hold(rt->u.dst.dev); 1651 dev_hold(rt->dst.dev);
1655 rt->rt6i_idev = ort->rt6i_idev; 1652 rt->rt6i_idev = ort->rt6i_idev;
1656 if (rt->rt6i_idev) 1653 if (rt->rt6i_idev)
1657 in6_dev_hold(rt->rt6i_idev); 1654 in6_dev_hold(rt->rt6i_idev);
1658 rt->u.dst.lastuse = jiffies; 1655 rt->dst.lastuse = jiffies;
1659 rt->rt6i_expires = 0; 1656 rt->rt6i_expires = 0;
1660 1657
1661 ipv6_addr_copy(&rt->rt6i_gateway, &ort->rt6i_gateway); 1658 ipv6_addr_copy(&rt->rt6i_gateway, &ort->rt6i_gateway);
@@ -1689,14 +1686,14 @@ static struct rt6_info *rt6_get_route_info(struct net *net,
1689 if (!fn) 1686 if (!fn)
1690 goto out; 1687 goto out;
1691 1688
1692 for (rt = fn->leaf; rt; rt = rt->u.dst.rt6_next) { 1689 for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
1693 if (rt->rt6i_dev->ifindex != ifindex) 1690 if (rt->rt6i_dev->ifindex != ifindex)
1694 continue; 1691 continue;
1695 if ((rt->rt6i_flags & (RTF_ROUTEINFO|RTF_GATEWAY)) != (RTF_ROUTEINFO|RTF_GATEWAY)) 1692 if ((rt->rt6i_flags & (RTF_ROUTEINFO|RTF_GATEWAY)) != (RTF_ROUTEINFO|RTF_GATEWAY))
1696 continue; 1693 continue;
1697 if (!ipv6_addr_equal(&rt->rt6i_gateway, gwaddr)) 1694 if (!ipv6_addr_equal(&rt->rt6i_gateway, gwaddr))
1698 continue; 1695 continue;
1699 dst_hold(&rt->u.dst); 1696 dst_hold(&rt->dst);
1700 break; 1697 break;
1701 } 1698 }
1702out: 1699out:
@@ -1744,14 +1741,14 @@ struct rt6_info *rt6_get_dflt_router(struct in6_addr *addr, struct net_device *d
1744 return NULL; 1741 return NULL;
1745 1742
1746 write_lock_bh(&table->tb6_lock); 1743 write_lock_bh(&table->tb6_lock);
1747 for (rt = table->tb6_root.leaf; rt; rt=rt->u.dst.rt6_next) { 1744 for (rt = table->tb6_root.leaf; rt; rt=rt->dst.rt6_next) {
1748 if (dev == rt->rt6i_dev && 1745 if (dev == rt->rt6i_dev &&
1749 ((rt->rt6i_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) && 1746 ((rt->rt6i_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) &&
1750 ipv6_addr_equal(&rt->rt6i_gateway, addr)) 1747 ipv6_addr_equal(&rt->rt6i_gateway, addr))
1751 break; 1748 break;
1752 } 1749 }
1753 if (rt) 1750 if (rt)
1754 dst_hold(&rt->u.dst); 1751 dst_hold(&rt->dst);
1755 write_unlock_bh(&table->tb6_lock); 1752 write_unlock_bh(&table->tb6_lock);
1756 return rt; 1753 return rt;
1757} 1754}
@@ -1790,9 +1787,9 @@ void rt6_purge_dflt_routers(struct net *net)
1790 1787
1791restart: 1788restart:
1792 read_lock_bh(&table->tb6_lock); 1789 read_lock_bh(&table->tb6_lock);
1793 for (rt = table->tb6_root.leaf; rt; rt = rt->u.dst.rt6_next) { 1790 for (rt = table->tb6_root.leaf; rt; rt = rt->dst.rt6_next) {
1794 if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) { 1791 if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) {
1795 dst_hold(&rt->u.dst); 1792 dst_hold(&rt->dst);
1796 read_unlock_bh(&table->tb6_lock); 1793 read_unlock_bh(&table->tb6_lock);
1797 ip6_del_rt(rt); 1794 ip6_del_rt(rt);
1798 goto restart; 1795 goto restart;
@@ -1930,15 +1927,15 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
1930 dev_hold(net->loopback_dev); 1927 dev_hold(net->loopback_dev);
1931 in6_dev_hold(idev); 1928 in6_dev_hold(idev);
1932 1929
1933 rt->u.dst.flags = DST_HOST; 1930 rt->dst.flags = DST_HOST;
1934 rt->u.dst.input = ip6_input; 1931 rt->dst.input = ip6_input;
1935 rt->u.dst.output = ip6_output; 1932 rt->dst.output = ip6_output;
1936 rt->rt6i_dev = net->loopback_dev; 1933 rt->rt6i_dev = net->loopback_dev;
1937 rt->rt6i_idev = idev; 1934 rt->rt6i_idev = idev;
1938 rt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(rt->rt6i_dev); 1935 rt->dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(rt->rt6i_dev);
1939 rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(net, dst_mtu(&rt->u.dst)); 1936 rt->dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(net, dst_mtu(&rt->dst));
1940 rt->u.dst.metrics[RTAX_HOPLIMIT-1] = -1; 1937 rt->dst.metrics[RTAX_HOPLIMIT-1] = -1;
1941 rt->u.dst.obsolete = -1; 1938 rt->dst.obsolete = -1;
1942 1939
1943 rt->rt6i_flags = RTF_UP | RTF_NONEXTHOP; 1940 rt->rt6i_flags = RTF_UP | RTF_NONEXTHOP;
1944 if (anycast) 1941 if (anycast)
@@ -1947,7 +1944,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
1947 rt->rt6i_flags |= RTF_LOCAL; 1944 rt->rt6i_flags |= RTF_LOCAL;
1948 neigh = ndisc_get_neigh(rt->rt6i_dev, &rt->rt6i_gateway); 1945 neigh = ndisc_get_neigh(rt->rt6i_dev, &rt->rt6i_gateway);
1949 if (IS_ERR(neigh)) { 1946 if (IS_ERR(neigh)) {
1950 dst_free(&rt->u.dst); 1947 dst_free(&rt->dst);
1951 1948
1952 /* We are casting this because that is the return 1949 /* We are casting this because that is the return
1953 * value type. But an errno encoded pointer is the 1950 * value type. But an errno encoded pointer is the
@@ -1962,7 +1959,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
1962 rt->rt6i_dst.plen = 128; 1959 rt->rt6i_dst.plen = 128;
1963 rt->rt6i_table = fib6_get_table(net, RT6_TABLE_LOCAL); 1960 rt->rt6i_table = fib6_get_table(net, RT6_TABLE_LOCAL);
1964 1961
1965 atomic_set(&rt->u.dst.__refcnt, 1); 1962 atomic_set(&rt->dst.__refcnt, 1);
1966 1963
1967 return rt; 1964 return rt;
1968} 1965}
@@ -2033,12 +2030,12 @@ static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
2033 PMTU discouvery. 2030 PMTU discouvery.
2034 */ 2031 */
2035 if (rt->rt6i_dev == arg->dev && 2032 if (rt->rt6i_dev == arg->dev &&
2036 !dst_metric_locked(&rt->u.dst, RTAX_MTU) && 2033 !dst_metric_locked(&rt->dst, RTAX_MTU) &&
2037 (dst_mtu(&rt->u.dst) >= arg->mtu || 2034 (dst_mtu(&rt->dst) >= arg->mtu ||
2038 (dst_mtu(&rt->u.dst) < arg->mtu && 2035 (dst_mtu(&rt->dst) < arg->mtu &&
2039 dst_mtu(&rt->u.dst) == idev->cnf.mtu6))) { 2036 dst_mtu(&rt->dst) == idev->cnf.mtu6))) {
2040 rt->u.dst.metrics[RTAX_MTU-1] = arg->mtu; 2037 rt->dst.metrics[RTAX_MTU-1] = arg->mtu;
2041 rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(net, arg->mtu); 2038 rt->dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(net, arg->mtu);
2042 } 2039 }
2043 return 0; 2040 return 0;
2044} 2041}
@@ -2252,20 +2249,20 @@ static int rt6_fill_node(struct net *net,
2252#endif 2249#endif
2253 NLA_PUT_U32(skb, RTA_IIF, iif); 2250 NLA_PUT_U32(skb, RTA_IIF, iif);
2254 } else if (dst) { 2251 } else if (dst) {
2255 struct inet6_dev *idev = ip6_dst_idev(&rt->u.dst); 2252 struct inet6_dev *idev = ip6_dst_idev(&rt->dst);
2256 struct in6_addr saddr_buf; 2253 struct in6_addr saddr_buf;
2257 if (ipv6_dev_get_saddr(net, idev ? idev->dev : NULL, 2254 if (ipv6_dev_get_saddr(net, idev ? idev->dev : NULL,
2258 dst, 0, &saddr_buf) == 0) 2255 dst, 0, &saddr_buf) == 0)
2259 NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf); 2256 NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf);
2260 } 2257 }
2261 2258
2262 if (rtnetlink_put_metrics(skb, rt->u.dst.metrics) < 0) 2259 if (rtnetlink_put_metrics(skb, rt->dst.metrics) < 0)
2263 goto nla_put_failure; 2260 goto nla_put_failure;
2264 2261
2265 if (rt->u.dst.neighbour) 2262 if (rt->dst.neighbour)
2266 NLA_PUT(skb, RTA_GATEWAY, 16, &rt->u.dst.neighbour->primary_key); 2263 NLA_PUT(skb, RTA_GATEWAY, 16, &rt->dst.neighbour->primary_key);
2267 2264
2268 if (rt->u.dst.dev) 2265 if (rt->dst.dev)
2269 NLA_PUT_U32(skb, RTA_OIF, rt->rt6i_dev->ifindex); 2266 NLA_PUT_U32(skb, RTA_OIF, rt->rt6i_dev->ifindex);
2270 2267
2271 NLA_PUT_U32(skb, RTA_PRIORITY, rt->rt6i_metric); 2268 NLA_PUT_U32(skb, RTA_PRIORITY, rt->rt6i_metric);
@@ -2277,8 +2274,8 @@ static int rt6_fill_node(struct net *net,
2277 else 2274 else
2278 expires = INT_MAX; 2275 expires = INT_MAX;
2279 2276
2280 if (rtnl_put_cacheinfo(skb, &rt->u.dst, 0, 0, 0, 2277 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, 0, 0,
2281 expires, rt->u.dst.error) < 0) 2278 expires, rt->dst.error) < 0)
2282 goto nla_put_failure; 2279 goto nla_put_failure;
2283 2280
2284 return nlmsg_end(skb, nlh); 2281 return nlmsg_end(skb, nlh);
@@ -2364,7 +2361,7 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void
2364 skb_reserve(skb, MAX_HEADER + sizeof(struct ipv6hdr)); 2361 skb_reserve(skb, MAX_HEADER + sizeof(struct ipv6hdr));
2365 2362
2366 rt = (struct rt6_info*) ip6_route_output(net, NULL, &fl); 2363 rt = (struct rt6_info*) ip6_route_output(net, NULL, &fl);
2367 skb_dst_set(skb, &rt->u.dst); 2364 skb_dst_set(skb, &rt->dst);
2368 2365
2369 err = rt6_fill_node(net, skb, rt, &fl.fl6_dst, &fl.fl6_src, iif, 2366 err = rt6_fill_node(net, skb, rt, &fl.fl6_dst, &fl.fl6_src, iif,
2370 RTM_NEWROUTE, NETLINK_CB(in_skb).pid, 2367 RTM_NEWROUTE, NETLINK_CB(in_skb).pid,
@@ -2416,12 +2413,12 @@ static int ip6_route_dev_notify(struct notifier_block *this,
2416 struct net *net = dev_net(dev); 2413 struct net *net = dev_net(dev);
2417 2414
2418 if (event == NETDEV_REGISTER && (dev->flags & IFF_LOOPBACK)) { 2415 if (event == NETDEV_REGISTER && (dev->flags & IFF_LOOPBACK)) {
2419 net->ipv6.ip6_null_entry->u.dst.dev = dev; 2416 net->ipv6.ip6_null_entry->dst.dev = dev;
2420 net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev); 2417 net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev);
2421#ifdef CONFIG_IPV6_MULTIPLE_TABLES 2418#ifdef CONFIG_IPV6_MULTIPLE_TABLES
2422 net->ipv6.ip6_prohibit_entry->u.dst.dev = dev; 2419 net->ipv6.ip6_prohibit_entry->dst.dev = dev;
2423 net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev); 2420 net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev);
2424 net->ipv6.ip6_blk_hole_entry->u.dst.dev = dev; 2421 net->ipv6.ip6_blk_hole_entry->dst.dev = dev;
2425 net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev); 2422 net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
2426#endif 2423#endif
2427 } 2424 }
@@ -2464,8 +2461,8 @@ static int rt6_info_route(struct rt6_info *rt, void *p_arg)
2464 seq_puts(m, "00000000000000000000000000000000"); 2461 seq_puts(m, "00000000000000000000000000000000");
2465 } 2462 }
2466 seq_printf(m, " %08x %08x %08x %08x %8s\n", 2463 seq_printf(m, " %08x %08x %08x %08x %8s\n",
2467 rt->rt6i_metric, atomic_read(&rt->u.dst.__refcnt), 2464 rt->rt6i_metric, atomic_read(&rt->dst.__refcnt),
2468 rt->u.dst.__use, rt->rt6i_flags, 2465 rt->dst.__use, rt->rt6i_flags,
2469 rt->rt6i_dev ? rt->rt6i_dev->name : ""); 2466 rt->rt6i_dev ? rt->rt6i_dev->name : "");
2470 return 0; 2467 return 0;
2471} 2468}
@@ -2646,9 +2643,9 @@ static int __net_init ip6_route_net_init(struct net *net)
2646 GFP_KERNEL); 2643 GFP_KERNEL);
2647 if (!net->ipv6.ip6_null_entry) 2644 if (!net->ipv6.ip6_null_entry)
2648 goto out_ip6_dst_ops; 2645 goto out_ip6_dst_ops;
2649 net->ipv6.ip6_null_entry->u.dst.path = 2646 net->ipv6.ip6_null_entry->dst.path =
2650 (struct dst_entry *)net->ipv6.ip6_null_entry; 2647 (struct dst_entry *)net->ipv6.ip6_null_entry;
2651 net->ipv6.ip6_null_entry->u.dst.ops = &net->ipv6.ip6_dst_ops; 2648 net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops;
2652 2649
2653#ifdef CONFIG_IPV6_MULTIPLE_TABLES 2650#ifdef CONFIG_IPV6_MULTIPLE_TABLES
2654 net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template, 2651 net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template,
@@ -2656,18 +2653,18 @@ static int __net_init ip6_route_net_init(struct net *net)
2656 GFP_KERNEL); 2653 GFP_KERNEL);
2657 if (!net->ipv6.ip6_prohibit_entry) 2654 if (!net->ipv6.ip6_prohibit_entry)
2658 goto out_ip6_null_entry; 2655 goto out_ip6_null_entry;
2659 net->ipv6.ip6_prohibit_entry->u.dst.path = 2656 net->ipv6.ip6_prohibit_entry->dst.path =
2660 (struct dst_entry *)net->ipv6.ip6_prohibit_entry; 2657 (struct dst_entry *)net->ipv6.ip6_prohibit_entry;
2661 net->ipv6.ip6_prohibit_entry->u.dst.ops = &net->ipv6.ip6_dst_ops; 2658 net->ipv6.ip6_prohibit_entry->dst.ops = &net->ipv6.ip6_dst_ops;
2662 2659
2663 net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template, 2660 net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template,
2664 sizeof(*net->ipv6.ip6_blk_hole_entry), 2661 sizeof(*net->ipv6.ip6_blk_hole_entry),
2665 GFP_KERNEL); 2662 GFP_KERNEL);
2666 if (!net->ipv6.ip6_blk_hole_entry) 2663 if (!net->ipv6.ip6_blk_hole_entry)
2667 goto out_ip6_prohibit_entry; 2664 goto out_ip6_prohibit_entry;
2668 net->ipv6.ip6_blk_hole_entry->u.dst.path = 2665 net->ipv6.ip6_blk_hole_entry->dst.path =
2669 (struct dst_entry *)net->ipv6.ip6_blk_hole_entry; 2666 (struct dst_entry *)net->ipv6.ip6_blk_hole_entry;
2670 net->ipv6.ip6_blk_hole_entry->u.dst.ops = &net->ipv6.ip6_dst_ops; 2667 net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops;
2671#endif 2668#endif
2672 2669
2673 net->ipv6.sysctl.flush_delay = 0; 2670 net->ipv6.sysctl.flush_delay = 0;
@@ -2742,12 +2739,12 @@ int __init ip6_route_init(void)
2742 /* Registering of the loopback is done before this portion of code, 2739 /* Registering of the loopback is done before this portion of code,
2743 * the loopback reference in rt6_info will not be taken, do it 2740 * the loopback reference in rt6_info will not be taken, do it
2744 * manually for init_net */ 2741 * manually for init_net */
2745 init_net.ipv6.ip6_null_entry->u.dst.dev = init_net.loopback_dev; 2742 init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev;
2746 init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev); 2743 init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
2747 #ifdef CONFIG_IPV6_MULTIPLE_TABLES 2744 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
2748 init_net.ipv6.ip6_prohibit_entry->u.dst.dev = init_net.loopback_dev; 2745 init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev;
2749 init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev); 2746 init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
2750 init_net.ipv6.ip6_blk_hole_entry->u.dst.dev = init_net.loopback_dev; 2747 init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev;
2751 init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev); 2748 init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
2752 #endif 2749 #endif
2753 ret = fib6_init(); 2750 ret = fib6_init();
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index e51e650ea80b..4699cd3c3118 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -249,8 +249,6 @@ failed:
249 return NULL; 249 return NULL;
250} 250}
251 251
252static DEFINE_SPINLOCK(ipip6_prl_lock);
253
254#define for_each_prl_rcu(start) \ 252#define for_each_prl_rcu(start) \
255 for (prl = rcu_dereference(start); \ 253 for (prl = rcu_dereference(start); \
256 prl; \ 254 prl; \
@@ -340,7 +338,7 @@ ipip6_tunnel_add_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a, int chg)
340 if (a->addr == htonl(INADDR_ANY)) 338 if (a->addr == htonl(INADDR_ANY))
341 return -EINVAL; 339 return -EINVAL;
342 340
343 spin_lock(&ipip6_prl_lock); 341 ASSERT_RTNL();
344 342
345 for (p = t->prl; p; p = p->next) { 343 for (p = t->prl; p; p = p->next) {
346 if (p->addr == a->addr) { 344 if (p->addr == a->addr) {
@@ -370,7 +368,6 @@ ipip6_tunnel_add_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a, int chg)
370 t->prl_count++; 368 t->prl_count++;
371 rcu_assign_pointer(t->prl, p); 369 rcu_assign_pointer(t->prl, p);
372out: 370out:
373 spin_unlock(&ipip6_prl_lock);
374 return err; 371 return err;
375} 372}
376 373
@@ -397,7 +394,7 @@ ipip6_tunnel_del_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a)
397 struct ip_tunnel_prl_entry *x, **p; 394 struct ip_tunnel_prl_entry *x, **p;
398 int err = 0; 395 int err = 0;
399 396
400 spin_lock(&ipip6_prl_lock); 397 ASSERT_RTNL();
401 398
402 if (a && a->addr != htonl(INADDR_ANY)) { 399 if (a && a->addr != htonl(INADDR_ANY)) {
403 for (p = &t->prl; *p; p = &(*p)->next) { 400 for (p = &t->prl; *p; p = &(*p)->next) {
@@ -419,7 +416,6 @@ ipip6_tunnel_del_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a)
419 } 416 }
420 } 417 }
421out: 418out:
422 spin_unlock(&ipip6_prl_lock);
423 return err; 419 return err;
424} 420}
425 421
@@ -716,7 +712,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
716 stats->tx_carrier_errors++; 712 stats->tx_carrier_errors++;
717 goto tx_error_icmp; 713 goto tx_error_icmp;
718 } 714 }
719 tdev = rt->u.dst.dev; 715 tdev = rt->dst.dev;
720 716
721 if (tdev == dev) { 717 if (tdev == dev) {
722 ip_rt_put(rt); 718 ip_rt_put(rt);
@@ -725,7 +721,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
725 } 721 }
726 722
727 if (df) { 723 if (df) {
728 mtu = dst_mtu(&rt->u.dst) - sizeof(struct iphdr); 724 mtu = dst_mtu(&rt->dst) - sizeof(struct iphdr);
729 725
730 if (mtu < 68) { 726 if (mtu < 68) {
731 stats->collisions++; 727 stats->collisions++;
@@ -784,7 +780,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
784 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); 780 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
785 IPCB(skb)->flags = 0; 781 IPCB(skb)->flags = 0;
786 skb_dst_drop(skb); 782 skb_dst_drop(skb);
787 skb_dst_set(skb, &rt->u.dst); 783 skb_dst_set(skb, &rt->dst);
788 784
789 /* 785 /*
790 * Push down and install the IPIP header. 786 * Push down and install the IPIP header.
@@ -833,7 +829,7 @@ static void ipip6_tunnel_bind_dev(struct net_device *dev)
833 .proto = IPPROTO_IPV6 }; 829 .proto = IPPROTO_IPV6 };
834 struct rtable *rt; 830 struct rtable *rt;
835 if (!ip_route_output_key(dev_net(dev), &rt, &fl)) { 831 if (!ip_route_output_key(dev_net(dev), &rt, &fl)) {
836 tdev = rt->u.dst.dev; 832 tdev = rt->dst.dev;
837 ip_rt_put(rt); 833 ip_rt_put(rt);
838 } 834 }
839 dev->flags |= IFF_POINTOPOINT; 835 dev->flags |= IFF_POINTOPOINT;
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 34d1f0690d7e..09fd34f0dbf2 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -27,28 +27,17 @@ extern __u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS];
27#define COOKIEBITS 24 /* Upper bits store count */ 27#define COOKIEBITS 24 /* Upper bits store count */
28#define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1) 28#define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1)
29 29
30/* 30/* Table must be sorted. */
31 * This table has to be sorted and terminated with (__u16)-1.
32 * XXX generate a better table.
33 * Unresolved Issues: HIPPI with a 64k MSS is not well supported.
34 *
35 * Taken directly from ipv4 implementation.
36 * Should this list be modified for ipv6 use or is it close enough?
37 * rfc 2460 8.3 suggests mss values 20 bytes less than ipv4 counterpart
38 */
39static __u16 const msstab[] = { 31static __u16 const msstab[] = {
40 64 - 1, 32 64,
41 256 - 1, 33 512,
42 512 - 1, 34 536,
43 536 - 1, 35 1280 - 60,
44 1024 - 1, 36 1480 - 60,
45 1440 - 1, 37 1500 - 60,
46 1460 - 1, 38 4460 - 60,
47 4312 - 1, 39 9000 - 60,
48 (__u16)-1
49}; 40};
50/* The number doesn't include the -1 terminator */
51#define NUM_MSS (ARRAY_SIZE(msstab) - 1)
52 41
53/* 42/*
54 * This (misnamed) value is the age of syncookie which is permitted. 43 * This (misnamed) value is the age of syncookie which is permitted.
@@ -134,9 +123,11 @@ __u32 cookie_v6_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp)
134 123
135 tcp_synq_overflow(sk); 124 tcp_synq_overflow(sk);
136 125
137 for (mssind = 0; mss > msstab[mssind + 1]; mssind++) 126 for (mssind = ARRAY_SIZE(msstab) - 1; mssind ; mssind--)
138 ; 127 if (mss >= msstab[mssind])
139 *mssp = msstab[mssind] + 1; 128 break;
129
130 *mssp = msstab[mssind];
140 131
141 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT); 132 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
142 133
@@ -154,7 +145,7 @@ static inline int cookie_check(struct sk_buff *skb, __u32 cookie)
154 th->source, th->dest, seq, 145 th->source, th->dest, seq,
155 jiffies / (HZ * 60), COUNTER_TRIES); 146 jiffies / (HZ * 60), COUNTER_TRIES);
156 147
157 return mssind < NUM_MSS ? msstab[mssind] + 1 : 0; 148 return mssind < ARRAY_SIZE(msstab) ? msstab[mssind] : 0;
158} 149}
159 150
160struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) 151struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
@@ -173,8 +164,9 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
173 int mss; 164 int mss;
174 struct dst_entry *dst; 165 struct dst_entry *dst;
175 __u8 rcv_wscale; 166 __u8 rcv_wscale;
167 bool ecn_ok;
176 168
177 if (!sysctl_tcp_syncookies || !th->ack) 169 if (!sysctl_tcp_syncookies || !th->ack || th->rst)
178 goto out; 170 goto out;
179 171
180 if (tcp_synq_no_recent_overflow(sk) || 172 if (tcp_synq_no_recent_overflow(sk) ||
@@ -189,8 +181,8 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
189 memset(&tcp_opt, 0, sizeof(tcp_opt)); 181 memset(&tcp_opt, 0, sizeof(tcp_opt));
190 tcp_parse_options(skb, &tcp_opt, &hash_location, 0); 182 tcp_parse_options(skb, &tcp_opt, &hash_location, 0);
191 183
192 if (tcp_opt.saw_tstamp) 184 if (!cookie_check_timestamp(&tcp_opt, &ecn_ok))
193 cookie_check_timestamp(&tcp_opt); 185 goto out;
194 186
195 ret = NULL; 187 ret = NULL;
196 req = inet6_reqsk_alloc(&tcp6_request_sock_ops); 188 req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
@@ -224,9 +216,8 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
224 216
225 req->expires = 0UL; 217 req->expires = 0UL;
226 req->retrans = 0; 218 req->retrans = 0;
227 ireq->ecn_ok = 0; 219 ireq->ecn_ok = ecn_ok;
228 ireq->snd_wscale = tcp_opt.snd_wscale; 220 ireq->snd_wscale = tcp_opt.snd_wscale;
229 ireq->rcv_wscale = tcp_opt.rcv_wscale;
230 ireq->sack_ok = tcp_opt.sack_ok; 221 ireq->sack_ok = tcp_opt.sack_ok;
231 ireq->wscale_ok = tcp_opt.wscale_ok; 222 ireq->wscale_ok = tcp_opt.wscale_ok;
232 ireq->tstamp_ok = tcp_opt.saw_tstamp; 223 ireq->tstamp_ok = tcp_opt.saw_tstamp;
@@ -240,17 +231,12 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
240 * me if there is a preferred way. 231 * me if there is a preferred way.
241 */ 232 */
242 { 233 {
243 struct in6_addr *final_p = NULL, final; 234 struct in6_addr *final_p, final;
244 struct flowi fl; 235 struct flowi fl;
245 memset(&fl, 0, sizeof(fl)); 236 memset(&fl, 0, sizeof(fl));
246 fl.proto = IPPROTO_TCP; 237 fl.proto = IPPROTO_TCP;
247 ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr); 238 ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr);
248 if (np->opt && np->opt->srcrt) { 239 final_p = fl6_update_dst(&fl, np->opt, &final);
249 struct rt0_hdr *rt0 = (struct rt0_hdr *) np->opt->srcrt;
250 ipv6_addr_copy(&final, &fl.fl6_dst);
251 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
252 final_p = &final;
253 }
254 ipv6_addr_copy(&fl.fl6_src, &ireq6->loc_addr); 240 ipv6_addr_copy(&fl.fl6_src, &ireq6->loc_addr);
255 fl.oif = sk->sk_bound_dev_if; 241 fl.oif = sk->sk_bound_dev_if;
256 fl.mark = sk->sk_mark; 242 fl.mark = sk->sk_mark;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 2b7c3a100e2c..fe6d40418c0b 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -129,7 +129,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
129 struct inet_connection_sock *icsk = inet_csk(sk); 129 struct inet_connection_sock *icsk = inet_csk(sk);
130 struct ipv6_pinfo *np = inet6_sk(sk); 130 struct ipv6_pinfo *np = inet6_sk(sk);
131 struct tcp_sock *tp = tcp_sk(sk); 131 struct tcp_sock *tp = tcp_sk(sk);
132 struct in6_addr *saddr = NULL, *final_p = NULL, final; 132 struct in6_addr *saddr = NULL, *final_p, final;
133 struct flowi fl; 133 struct flowi fl;
134 struct dst_entry *dst; 134 struct dst_entry *dst;
135 int addr_type; 135 int addr_type;
@@ -250,12 +250,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
250 fl.fl_ip_dport = usin->sin6_port; 250 fl.fl_ip_dport = usin->sin6_port;
251 fl.fl_ip_sport = inet->inet_sport; 251 fl.fl_ip_sport = inet->inet_sport;
252 252
253 if (np->opt && np->opt->srcrt) { 253 final_p = fl6_update_dst(&fl, np->opt, &final);
254 struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt;
255 ipv6_addr_copy(&final, &fl.fl6_dst);
256 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
257 final_p = &final;
258 }
259 254
260 security_sk_classify_flow(sk, &fl); 255 security_sk_classify_flow(sk, &fl);
261 256
@@ -477,7 +472,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
477 struct ipv6_pinfo *np = inet6_sk(sk); 472 struct ipv6_pinfo *np = inet6_sk(sk);
478 struct sk_buff * skb; 473 struct sk_buff * skb;
479 struct ipv6_txoptions *opt = NULL; 474 struct ipv6_txoptions *opt = NULL;
480 struct in6_addr * final_p = NULL, final; 475 struct in6_addr * final_p, final;
481 struct flowi fl; 476 struct flowi fl;
482 struct dst_entry *dst; 477 struct dst_entry *dst;
483 int err = -1; 478 int err = -1;
@@ -494,12 +489,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
494 security_req_classify_flow(req, &fl); 489 security_req_classify_flow(req, &fl);
495 490
496 opt = np->opt; 491 opt = np->opt;
497 if (opt && opt->srcrt) { 492 final_p = fl6_update_dst(&fl, opt, &final);
498 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
499 ipv6_addr_copy(&final, &fl.fl6_dst);
500 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
501 final_p = &final;
502 }
503 493
504 err = ip6_dst_lookup(sk, &dst, &fl); 494 err = ip6_dst_lookup(sk, &dst, &fl);
505 if (err) 495 if (err)
@@ -1167,7 +1157,7 @@ static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
1167 } 1157 }
1168 1158
1169#ifdef CONFIG_SYN_COOKIES 1159#ifdef CONFIG_SYN_COOKIES
1170 if (!th->rst && !th->syn && th->ack) 1160 if (!th->syn)
1171 sk = cookie_v6_check(sk, skb); 1161 sk = cookie_v6_check(sk, skb);
1172#endif 1162#endif
1173 return sk; 1163 return sk;
@@ -1279,13 +1269,10 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1279 treq = inet6_rsk(req); 1269 treq = inet6_rsk(req);
1280 ipv6_addr_copy(&treq->rmt_addr, &ipv6_hdr(skb)->saddr); 1270 ipv6_addr_copy(&treq->rmt_addr, &ipv6_hdr(skb)->saddr);
1281 ipv6_addr_copy(&treq->loc_addr, &ipv6_hdr(skb)->daddr); 1271 ipv6_addr_copy(&treq->loc_addr, &ipv6_hdr(skb)->daddr);
1282 if (!want_cookie) 1272 if (!want_cookie || tmp_opt.tstamp_ok)
1283 TCP_ECN_create_request(req, tcp_hdr(skb)); 1273 TCP_ECN_create_request(req, tcp_hdr(skb));
1284 1274
1285 if (want_cookie) { 1275 if (!isn) {
1286 isn = cookie_v6_init_sequence(sk, skb, &req->mss);
1287 req->cookie_ts = tmp_opt.tstamp_ok;
1288 } else if (!isn) {
1289 if (ipv6_opt_accepted(sk, skb) || 1276 if (ipv6_opt_accepted(sk, skb) ||
1290 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo || 1277 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
1291 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) { 1278 np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
@@ -1298,8 +1285,12 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1298 if (!sk->sk_bound_dev_if && 1285 if (!sk->sk_bound_dev_if &&
1299 ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL) 1286 ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
1300 treq->iif = inet6_iif(skb); 1287 treq->iif = inet6_iif(skb);
1301 1288 if (!want_cookie) {
1302 isn = tcp_v6_init_sequence(skb); 1289 isn = tcp_v6_init_sequence(skb);
1290 } else {
1291 isn = cookie_v6_init_sequence(sk, skb, &req->mss);
1292 req->cookie_ts = tmp_opt.tstamp_ok;
1293 }
1303 } 1294 }
1304 tcp_rsk(req)->snt_isn = isn; 1295 tcp_rsk(req)->snt_isn = isn;
1305 1296
@@ -1392,18 +1383,13 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1392 goto out_overflow; 1383 goto out_overflow;
1393 1384
1394 if (dst == NULL) { 1385 if (dst == NULL) {
1395 struct in6_addr *final_p = NULL, final; 1386 struct in6_addr *final_p, final;
1396 struct flowi fl; 1387 struct flowi fl;
1397 1388
1398 memset(&fl, 0, sizeof(fl)); 1389 memset(&fl, 0, sizeof(fl));
1399 fl.proto = IPPROTO_TCP; 1390 fl.proto = IPPROTO_TCP;
1400 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr); 1391 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
1401 if (opt && opt->srcrt) { 1392 final_p = fl6_update_dst(&fl, opt, &final);
1402 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
1403 ipv6_addr_copy(&final, &fl.fl6_dst);
1404 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
1405 final_p = &final;
1406 }
1407 ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr); 1393 ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
1408 fl.oif = sk->sk_bound_dev_if; 1394 fl.oif = sk->sk_bound_dev_if;
1409 fl.mark = sk->sk_mark; 1395 fl.mark = sk->sk_mark;
@@ -2156,6 +2142,8 @@ struct proto tcpv6_prot = {
2156 .setsockopt = tcp_setsockopt, 2142 .setsockopt = tcp_setsockopt,
2157 .getsockopt = tcp_getsockopt, 2143 .getsockopt = tcp_getsockopt,
2158 .recvmsg = tcp_recvmsg, 2144 .recvmsg = tcp_recvmsg,
2145 .sendmsg = tcp_sendmsg,
2146 .sendpage = tcp_sendpage,
2159 .backlog_rcv = tcp_v6_do_rcv, 2147 .backlog_rcv = tcp_v6_do_rcv,
2160 .hash = tcp_v6_hash, 2148 .hash = tcp_v6_hash,
2161 .unhash = inet_unhash, 2149 .unhash = inet_unhash,
@@ -2174,6 +2162,7 @@ struct proto tcpv6_prot = {
2174 .twsk_prot = &tcp6_timewait_sock_ops, 2162 .twsk_prot = &tcp6_timewait_sock_ops,
2175 .rsk_prot = &tcp6_request_sock_ops, 2163 .rsk_prot = &tcp6_request_sock_ops,
2176 .h.hashinfo = &tcp_hashinfo, 2164 .h.hashinfo = &tcp_hashinfo,
2165 .no_autobind = true,
2177#ifdef CONFIG_COMPAT 2166#ifdef CONFIG_COMPAT
2178 .compat_setsockopt = compat_tcp_setsockopt, 2167 .compat_setsockopt = compat_tcp_setsockopt,
2179 .compat_getsockopt = compat_tcp_getsockopt, 2168 .compat_getsockopt = compat_tcp_getsockopt,
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 87be58673b55..1dd1affdead2 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -927,7 +927,7 @@ int udpv6_sendmsg(struct kiocb *iocb, struct sock *sk,
927 struct inet_sock *inet = inet_sk(sk); 927 struct inet_sock *inet = inet_sk(sk);
928 struct ipv6_pinfo *np = inet6_sk(sk); 928 struct ipv6_pinfo *np = inet6_sk(sk);
929 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) msg->msg_name; 929 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) msg->msg_name;
930 struct in6_addr *daddr, *final_p = NULL, final; 930 struct in6_addr *daddr, *final_p, final;
931 struct ipv6_txoptions *opt = NULL; 931 struct ipv6_txoptions *opt = NULL;
932 struct ip6_flowlabel *flowlabel = NULL; 932 struct ip6_flowlabel *flowlabel = NULL;
933 struct flowi fl; 933 struct flowi fl;
@@ -1097,14 +1097,9 @@ do_udp_sendmsg:
1097 ipv6_addr_copy(&fl.fl6_src, &np->saddr); 1097 ipv6_addr_copy(&fl.fl6_src, &np->saddr);
1098 fl.fl_ip_sport = inet->inet_sport; 1098 fl.fl_ip_sport = inet->inet_sport;
1099 1099
1100 /* merge ip6_build_xmit from ip6_output */ 1100 final_p = fl6_update_dst(&fl, opt, &final);
1101 if (opt && opt->srcrt) { 1101 if (final_p)
1102 struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
1103 ipv6_addr_copy(&final, &fl.fl6_dst);
1104 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
1105 final_p = &final;
1106 connected = 0; 1102 connected = 0;
1107 }
1108 1103
1109 if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst)) { 1104 if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst)) {
1110 fl.oif = np->mcast_oif; 1105 fl.oif = np->mcast_oif;
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 4a0e77e14468..6baeabbbca82 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -124,6 +124,8 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
124 u8 nexthdr = nh[IP6CB(skb)->nhoff]; 124 u8 nexthdr = nh[IP6CB(skb)->nhoff];
125 125
126 memset(fl, 0, sizeof(struct flowi)); 126 memset(fl, 0, sizeof(struct flowi));
127 fl->mark = skb->mark;
128
127 ipv6_addr_copy(&fl->fl6_dst, reverse ? &hdr->saddr : &hdr->daddr); 129 ipv6_addr_copy(&fl->fl6_dst, reverse ? &hdr->saddr : &hdr->daddr);
128 ipv6_addr_copy(&fl->fl6_src, reverse ? &hdr->daddr : &hdr->saddr); 130 ipv6_addr_copy(&fl->fl6_src, reverse ? &hdr->daddr : &hdr->saddr);
129 131
diff --git a/net/irda/irnet/irnet_ppp.c b/net/irda/irnet/irnet_ppp.c
index 6a1a202710c5..800bc53b7f63 100644
--- a/net/irda/irnet/irnet_ppp.c
+++ b/net/irda/irnet/irnet_ppp.c
@@ -527,7 +527,7 @@ static int
527dev_irnet_close(struct inode * inode, 527dev_irnet_close(struct inode * inode,
528 struct file * file) 528 struct file * file)
529{ 529{
530 irnet_socket * ap = (struct irnet_socket *) file->private_data; 530 irnet_socket * ap = file->private_data;
531 531
532 DENTER(FS_TRACE, "(file=0x%p, ap=0x%p)\n", 532 DENTER(FS_TRACE, "(file=0x%p, ap=0x%p)\n",
533 file, ap); 533 file, ap);
@@ -564,7 +564,7 @@ dev_irnet_write(struct file * file,
564 size_t count, 564 size_t count,
565 loff_t * ppos) 565 loff_t * ppos)
566{ 566{
567 irnet_socket * ap = (struct irnet_socket *) file->private_data; 567 irnet_socket * ap = file->private_data;
568 568
569 DPASS(FS_TRACE, "(file=0x%p, ap=0x%p, count=%Zd)\n", 569 DPASS(FS_TRACE, "(file=0x%p, ap=0x%p, count=%Zd)\n",
570 file, ap, count); 570 file, ap, count);
@@ -588,7 +588,7 @@ dev_irnet_read(struct file * file,
588 size_t count, 588 size_t count,
589 loff_t * ppos) 589 loff_t * ppos)
590{ 590{
591 irnet_socket * ap = (struct irnet_socket *) file->private_data; 591 irnet_socket * ap = file->private_data;
592 592
593 DPASS(FS_TRACE, "(file=0x%p, ap=0x%p, count=%Zd)\n", 593 DPASS(FS_TRACE, "(file=0x%p, ap=0x%p, count=%Zd)\n",
594 file, ap, count); 594 file, ap, count);
@@ -609,7 +609,7 @@ static unsigned int
609dev_irnet_poll(struct file * file, 609dev_irnet_poll(struct file * file,
610 poll_table * wait) 610 poll_table * wait)
611{ 611{
612 irnet_socket * ap = (struct irnet_socket *) file->private_data; 612 irnet_socket * ap = file->private_data;
613 unsigned int mask; 613 unsigned int mask;
614 614
615 DENTER(FS_TRACE, "(file=0x%p, ap=0x%p)\n", 615 DENTER(FS_TRACE, "(file=0x%p, ap=0x%p)\n",
@@ -638,7 +638,7 @@ dev_irnet_ioctl(
638 unsigned int cmd, 638 unsigned int cmd,
639 unsigned long arg) 639 unsigned long arg)
640{ 640{
641 irnet_socket * ap = (struct irnet_socket *) file->private_data; 641 irnet_socket * ap = file->private_data;
642 int err; 642 int err;
643 int val; 643 int val;
644 void __user *argp = (void __user *)arg; 644 void __user *argp = (void __user *)arg;
diff --git a/net/irda/irttp.c b/net/irda/irttp.c
index 47db1d8a0d92..285761e77d90 100644
--- a/net/irda/irttp.c
+++ b/net/irda/irttp.c
@@ -1853,23 +1853,23 @@ static int irttp_seq_show(struct seq_file *seq, void *v)
1853 self->remote_credit); 1853 self->remote_credit);
1854 seq_printf(seq, "send credit: %d\n", 1854 seq_printf(seq, "send credit: %d\n",
1855 self->send_credit); 1855 self->send_credit);
1856 seq_printf(seq, " tx packets: %ld, ", 1856 seq_printf(seq, " tx packets: %lu, ",
1857 self->stats.tx_packets); 1857 self->stats.tx_packets);
1858 seq_printf(seq, "rx packets: %ld, ", 1858 seq_printf(seq, "rx packets: %lu, ",
1859 self->stats.rx_packets); 1859 self->stats.rx_packets);
1860 seq_printf(seq, "tx_queue len: %d ", 1860 seq_printf(seq, "tx_queue len: %u ",
1861 skb_queue_len(&self->tx_queue)); 1861 skb_queue_len(&self->tx_queue));
1862 seq_printf(seq, "rx_queue len: %d\n", 1862 seq_printf(seq, "rx_queue len: %u\n",
1863 skb_queue_len(&self->rx_queue)); 1863 skb_queue_len(&self->rx_queue));
1864 seq_printf(seq, " tx_sdu_busy: %s, ", 1864 seq_printf(seq, " tx_sdu_busy: %s, ",
1865 self->tx_sdu_busy? "TRUE":"FALSE"); 1865 self->tx_sdu_busy? "TRUE":"FALSE");
1866 seq_printf(seq, "rx_sdu_busy: %s\n", 1866 seq_printf(seq, "rx_sdu_busy: %s\n",
1867 self->rx_sdu_busy? "TRUE":"FALSE"); 1867 self->rx_sdu_busy? "TRUE":"FALSE");
1868 seq_printf(seq, " max_seg_size: %d, ", 1868 seq_printf(seq, " max_seg_size: %u, ",
1869 self->max_seg_size); 1869 self->max_seg_size);
1870 seq_printf(seq, "tx_max_sdu_size: %d, ", 1870 seq_printf(seq, "tx_max_sdu_size: %u, ",
1871 self->tx_max_sdu_size); 1871 self->tx_max_sdu_size);
1872 seq_printf(seq, "rx_max_sdu_size: %d\n", 1872 seq_printf(seq, "rx_max_sdu_size: %u\n",
1873 self->rx_max_sdu_size); 1873 self->rx_max_sdu_size);
1874 1874
1875 seq_printf(seq, " Used by (%s)\n\n", 1875 seq_printf(seq, " Used by (%s)\n\n",
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index f28ad2cc8428..499c045d6910 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -1463,7 +1463,7 @@ struct iucv_path_pending {
1463 u32 res3; 1463 u32 res3;
1464 u8 ippollfg; 1464 u8 ippollfg;
1465 u8 res4[3]; 1465 u8 res4[3];
1466} __attribute__ ((packed)); 1466} __packed;
1467 1467
1468static void iucv_path_pending(struct iucv_irq_data *data) 1468static void iucv_path_pending(struct iucv_irq_data *data)
1469{ 1469{
@@ -1524,7 +1524,7 @@ struct iucv_path_complete {
1524 u32 res3; 1524 u32 res3;
1525 u8 ippollfg; 1525 u8 ippollfg;
1526 u8 res4[3]; 1526 u8 res4[3];
1527} __attribute__ ((packed)); 1527} __packed;
1528 1528
1529static void iucv_path_complete(struct iucv_irq_data *data) 1529static void iucv_path_complete(struct iucv_irq_data *data)
1530{ 1530{
@@ -1554,7 +1554,7 @@ struct iucv_path_severed {
1554 u32 res4; 1554 u32 res4;
1555 u8 ippollfg; 1555 u8 ippollfg;
1556 u8 res5[3]; 1556 u8 res5[3];
1557} __attribute__ ((packed)); 1557} __packed;
1558 1558
1559static void iucv_path_severed(struct iucv_irq_data *data) 1559static void iucv_path_severed(struct iucv_irq_data *data)
1560{ 1560{
@@ -1590,7 +1590,7 @@ struct iucv_path_quiesced {
1590 u32 res4; 1590 u32 res4;
1591 u8 ippollfg; 1591 u8 ippollfg;
1592 u8 res5[3]; 1592 u8 res5[3];
1593} __attribute__ ((packed)); 1593} __packed;
1594 1594
1595static void iucv_path_quiesced(struct iucv_irq_data *data) 1595static void iucv_path_quiesced(struct iucv_irq_data *data)
1596{ 1596{
@@ -1618,7 +1618,7 @@ struct iucv_path_resumed {
1618 u32 res4; 1618 u32 res4;
1619 u8 ippollfg; 1619 u8 ippollfg;
1620 u8 res5[3]; 1620 u8 res5[3];
1621} __attribute__ ((packed)); 1621} __packed;
1622 1622
1623static void iucv_path_resumed(struct iucv_irq_data *data) 1623static void iucv_path_resumed(struct iucv_irq_data *data)
1624{ 1624{
@@ -1649,7 +1649,7 @@ struct iucv_message_complete {
1649 u32 ipbfln2f; 1649 u32 ipbfln2f;
1650 u8 ippollfg; 1650 u8 ippollfg;
1651 u8 res2[3]; 1651 u8 res2[3];
1652} __attribute__ ((packed)); 1652} __packed;
1653 1653
1654static void iucv_message_complete(struct iucv_irq_data *data) 1654static void iucv_message_complete(struct iucv_irq_data *data)
1655{ 1655{
@@ -1694,7 +1694,7 @@ struct iucv_message_pending {
1694 u32 ipbfln2f; 1694 u32 ipbfln2f;
1695 u8 ippollfg; 1695 u8 ippollfg;
1696 u8 res2[3]; 1696 u8 res2[3];
1697} __attribute__ ((packed)); 1697} __packed;
1698 1698
1699static void iucv_message_pending(struct iucv_irq_data *data) 1699static void iucv_message_pending(struct iucv_irq_data *data)
1700{ 1700{
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index 0852512d392c..226a0ae3bcfd 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -348,7 +348,7 @@ static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len
348 sk->sk_state = TCP_ESTABLISHED; 348 sk->sk_state = TCP_ESTABLISHED;
349 inet->inet_id = jiffies; 349 inet->inet_id = jiffies;
350 350
351 sk_dst_set(sk, &rt->u.dst); 351 sk_dst_set(sk, &rt->dst);
352 352
353 write_lock_bh(&l2tp_ip_lock); 353 write_lock_bh(&l2tp_ip_lock);
354 hlist_del_init(&sk->sk_bind_node); 354 hlist_del_init(&sk->sk_bind_node);
@@ -496,9 +496,9 @@ static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
496 if (ip_route_output_flow(sock_net(sk), &rt, &fl, sk, 0)) 496 if (ip_route_output_flow(sock_net(sk), &rt, &fl, sk, 0))
497 goto no_route; 497 goto no_route;
498 } 498 }
499 sk_setup_caps(sk, &rt->u.dst); 499 sk_setup_caps(sk, &rt->dst);
500 } 500 }
501 skb_dst_set(skb, dst_clone(&rt->u.dst)); 501 skb_dst_set(skb, dst_clone(&rt->dst));
502 502
503 /* Queue the packet to IP for output */ 503 /* Queue the packet to IP for output */
504 rc = ip_queue_xmit(skb); 504 rc = ip_queue_xmit(skb);
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index 8a91f6c0bb18..4d6f8653ec88 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -33,6 +33,13 @@ config MAC80211_RC_MINSTREL
33 ---help--- 33 ---help---
34 This option enables the 'minstrel' TX rate control algorithm 34 This option enables the 'minstrel' TX rate control algorithm
35 35
36config MAC80211_RC_MINSTREL_HT
37 bool "Minstrel 802.11n support" if EMBEDDED
38 depends on MAC80211_RC_MINSTREL
39 default y
40 ---help---
41 This option enables the 'minstrel_ht' TX rate control algorithm
42
36choice 43choice
37 prompt "Default rate control algorithm" 44 prompt "Default rate control algorithm"
38 depends on MAC80211_HAS_RC 45 depends on MAC80211_HAS_RC
@@ -62,6 +69,7 @@ endchoice
62 69
63config MAC80211_RC_DEFAULT 70config MAC80211_RC_DEFAULT
64 string 71 string
72 default "minstrel_ht" if MAC80211_RC_DEFAULT_MINSTREL && MAC80211_RC_MINSTREL_HT
65 default "minstrel" if MAC80211_RC_DEFAULT_MINSTREL 73 default "minstrel" if MAC80211_RC_DEFAULT_MINSTREL
66 default "pid" if MAC80211_RC_DEFAULT_PID 74 default "pid" if MAC80211_RC_DEFAULT_PID
67 default "" 75 default ""
diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile
index 84b48ba8a77e..fdb54e61d637 100644
--- a/net/mac80211/Makefile
+++ b/net/mac80211/Makefile
@@ -51,7 +51,11 @@ rc80211_pid-$(CONFIG_MAC80211_DEBUGFS) += rc80211_pid_debugfs.o
51rc80211_minstrel-y := rc80211_minstrel.o 51rc80211_minstrel-y := rc80211_minstrel.o
52rc80211_minstrel-$(CONFIG_MAC80211_DEBUGFS) += rc80211_minstrel_debugfs.o 52rc80211_minstrel-$(CONFIG_MAC80211_DEBUGFS) += rc80211_minstrel_debugfs.o
53 53
54rc80211_minstrel_ht-y := rc80211_minstrel_ht.o
55rc80211_minstrel_ht-$(CONFIG_MAC80211_DEBUGFS) += rc80211_minstrel_ht_debugfs.o
56
54mac80211-$(CONFIG_MAC80211_RC_PID) += $(rc80211_pid-y) 57mac80211-$(CONFIG_MAC80211_RC_PID) += $(rc80211_pid-y)
55mac80211-$(CONFIG_MAC80211_RC_MINSTREL) += $(rc80211_minstrel-y) 58mac80211-$(CONFIG_MAC80211_RC_MINSTREL) += $(rc80211_minstrel-y)
59mac80211-$(CONFIG_MAC80211_RC_MINSTREL_HT) += $(rc80211_minstrel_ht-y)
56 60
57ccflags-y += -D__CHECK_ENDIAN__ 61ccflags-y += -D__CHECK_ENDIAN__
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index 6bb9a9a94960..965b272499fd 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -6,39 +6,70 @@
6 * Copyright 2005-2006, Devicescape Software, Inc. 6 * Copyright 2005-2006, Devicescape Software, Inc.
7 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> 7 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
8 * Copyright 2007, Michael Wu <flamingice@sourmilk.net> 8 * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
9 * Copyright 2007-2008, Intel Corporation 9 * Copyright 2007-2010, Intel Corporation
10 * 10 *
11 * This program is free software; you can redistribute it and/or modify 11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as 12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation. 13 * published by the Free Software Foundation.
14 */ 14 */
15 15
16/**
17 * DOC: RX A-MPDU aggregation
18 *
19 * Aggregation on the RX side requires only implementing the
20 * @ampdu_action callback that is invoked to start/stop any
21 * block-ack sessions for RX aggregation.
22 *
23 * When RX aggregation is started by the peer, the driver is
24 * notified via @ampdu_action function, with the
25 * %IEEE80211_AMPDU_RX_START action, and may reject the request
26 * in which case a negative response is sent to the peer, if it
27 * accepts it a positive response is sent.
28 *
29 * While the session is active, the device/driver are required
30 * to de-aggregate frames and pass them up one by one to mac80211,
31 * which will handle the reorder buffer.
32 *
33 * When the aggregation session is stopped again by the peer or
34 * ourselves, the driver's @ampdu_action function will be called
35 * with the action %IEEE80211_AMPDU_RX_STOP. In this case, the
36 * call must not fail.
37 */
38
16#include <linux/ieee80211.h> 39#include <linux/ieee80211.h>
17#include <linux/slab.h> 40#include <linux/slab.h>
18#include <net/mac80211.h> 41#include <net/mac80211.h>
19#include "ieee80211_i.h" 42#include "ieee80211_i.h"
20#include "driver-ops.h" 43#include "driver-ops.h"
21 44
22static void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid, 45static void ieee80211_free_tid_rx(struct rcu_head *h)
23 u16 initiator, u16 reason,
24 bool from_timer)
25{ 46{
26 struct ieee80211_local *local = sta->local; 47 struct tid_ampdu_rx *tid_rx =
27 struct tid_ampdu_rx *tid_rx; 48 container_of(h, struct tid_ampdu_rx, rcu_head);
28 int i; 49 int i;
29 50
30 spin_lock_bh(&sta->lock); 51 for (i = 0; i < tid_rx->buf_size; i++)
52 dev_kfree_skb(tid_rx->reorder_buf[i]);
53 kfree(tid_rx->reorder_buf);
54 kfree(tid_rx->reorder_time);
55 kfree(tid_rx);
56}
31 57
32 /* check if TID is in operational state */ 58void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
33 if (!sta->ampdu_mlme.tid_active_rx[tid]) { 59 u16 initiator, u16 reason)
34 spin_unlock_bh(&sta->lock); 60{
35 return; 61 struct ieee80211_local *local = sta->local;
36 } 62 struct tid_ampdu_rx *tid_rx;
37 63
38 sta->ampdu_mlme.tid_active_rx[tid] = false; 64 lockdep_assert_held(&sta->ampdu_mlme.mtx);
39 65
40 tid_rx = sta->ampdu_mlme.tid_rx[tid]; 66 tid_rx = sta->ampdu_mlme.tid_rx[tid];
41 67
68 if (!tid_rx)
69 return;
70
71 rcu_assign_pointer(sta->ampdu_mlme.tid_rx[tid], NULL);
72
42#ifdef CONFIG_MAC80211_HT_DEBUG 73#ifdef CONFIG_MAC80211_HT_DEBUG
43 printk(KERN_DEBUG "Rx BA session stop requested for %pM tid %u\n", 74 printk(KERN_DEBUG "Rx BA session stop requested for %pM tid %u\n",
44 sta->sta.addr, tid); 75 sta->sta.addr, tid);
@@ -54,32 +85,17 @@ static void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
54 ieee80211_send_delba(sta->sdata, sta->sta.addr, 85 ieee80211_send_delba(sta->sdata, sta->sta.addr,
55 tid, 0, reason); 86 tid, 0, reason);
56 87
57 /* free the reordering buffer */ 88 del_timer_sync(&tid_rx->session_timer);
58 for (i = 0; i < tid_rx->buf_size; i++) {
59 if (tid_rx->reorder_buf[i]) {
60 /* release the reordered frames */
61 dev_kfree_skb(tid_rx->reorder_buf[i]);
62 tid_rx->stored_mpdu_num--;
63 tid_rx->reorder_buf[i] = NULL;
64 }
65 }
66
67 /* free resources */
68 kfree(tid_rx->reorder_buf);
69 kfree(tid_rx->reorder_time);
70 sta->ampdu_mlme.tid_rx[tid] = NULL;
71
72 spin_unlock_bh(&sta->lock);
73 89
74 if (!from_timer) 90 call_rcu(&tid_rx->rcu_head, ieee80211_free_tid_rx);
75 del_timer_sync(&tid_rx->session_timer);
76 kfree(tid_rx);
77} 91}
78 92
79void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid, 93void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
80 u16 initiator, u16 reason) 94 u16 initiator, u16 reason)
81{ 95{
82 ___ieee80211_stop_rx_ba_session(sta, tid, initiator, reason, false); 96 mutex_lock(&sta->ampdu_mlme.mtx);
97 ___ieee80211_stop_rx_ba_session(sta, tid, initiator, reason);
98 mutex_unlock(&sta->ampdu_mlme.mtx);
83} 99}
84 100
85/* 101/*
@@ -100,8 +116,8 @@ static void sta_rx_agg_session_timer_expired(unsigned long data)
100#ifdef CONFIG_MAC80211_HT_DEBUG 116#ifdef CONFIG_MAC80211_HT_DEBUG
101 printk(KERN_DEBUG "rx session timer expired on tid %d\n", (u16)*ptid); 117 printk(KERN_DEBUG "rx session timer expired on tid %d\n", (u16)*ptid);
102#endif 118#endif
103 ___ieee80211_stop_rx_ba_session(sta, *ptid, WLAN_BACK_RECIPIENT, 119 set_bit(*ptid, sta->ampdu_mlme.tid_rx_timer_expired);
104 WLAN_REASON_QSTA_TIMEOUT, true); 120 ieee80211_queue_work(&sta->local->hw, &sta->ampdu_mlme.work);
105} 121}
106 122
107static void ieee80211_send_addba_resp(struct ieee80211_sub_if_data *sdata, u8 *da, u16 tid, 123static void ieee80211_send_addba_resp(struct ieee80211_sub_if_data *sdata, u8 *da, u16 tid,
@@ -212,9 +228,9 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
212 228
213 229
214 /* examine state machine */ 230 /* examine state machine */
215 spin_lock_bh(&sta->lock); 231 mutex_lock(&sta->ampdu_mlme.mtx);
216 232
217 if (sta->ampdu_mlme.tid_active_rx[tid]) { 233 if (sta->ampdu_mlme.tid_rx[tid]) {
218#ifdef CONFIG_MAC80211_HT_DEBUG 234#ifdef CONFIG_MAC80211_HT_DEBUG
219 if (net_ratelimit()) 235 if (net_ratelimit())
220 printk(KERN_DEBUG "unexpected AddBA Req from " 236 printk(KERN_DEBUG "unexpected AddBA Req from "
@@ -225,9 +241,8 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
225 } 241 }
226 242
227 /* prepare A-MPDU MLME for Rx aggregation */ 243 /* prepare A-MPDU MLME for Rx aggregation */
228 sta->ampdu_mlme.tid_rx[tid] = 244 tid_agg_rx = kmalloc(sizeof(struct tid_ampdu_rx), GFP_ATOMIC);
229 kmalloc(sizeof(struct tid_ampdu_rx), GFP_ATOMIC); 245 if (!tid_agg_rx) {
230 if (!sta->ampdu_mlme.tid_rx[tid]) {
231#ifdef CONFIG_MAC80211_HT_DEBUG 246#ifdef CONFIG_MAC80211_HT_DEBUG
232 if (net_ratelimit()) 247 if (net_ratelimit())
233 printk(KERN_ERR "allocate rx mlme to tid %d failed\n", 248 printk(KERN_ERR "allocate rx mlme to tid %d failed\n",
@@ -235,14 +250,11 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
235#endif 250#endif
236 goto end; 251 goto end;
237 } 252 }
238 /* rx timer */
239 sta->ampdu_mlme.tid_rx[tid]->session_timer.function =
240 sta_rx_agg_session_timer_expired;
241 sta->ampdu_mlme.tid_rx[tid]->session_timer.data =
242 (unsigned long)&sta->timer_to_tid[tid];
243 init_timer(&sta->ampdu_mlme.tid_rx[tid]->session_timer);
244 253
245 tid_agg_rx = sta->ampdu_mlme.tid_rx[tid]; 254 /* rx timer */
255 tid_agg_rx->session_timer.function = sta_rx_agg_session_timer_expired;
256 tid_agg_rx->session_timer.data = (unsigned long)&sta->timer_to_tid[tid];
257 init_timer(&tid_agg_rx->session_timer);
246 258
247 /* prepare reordering buffer */ 259 /* prepare reordering buffer */
248 tid_agg_rx->reorder_buf = 260 tid_agg_rx->reorder_buf =
@@ -257,8 +269,7 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
257#endif 269#endif
258 kfree(tid_agg_rx->reorder_buf); 270 kfree(tid_agg_rx->reorder_buf);
259 kfree(tid_agg_rx->reorder_time); 271 kfree(tid_agg_rx->reorder_time);
260 kfree(sta->ampdu_mlme.tid_rx[tid]); 272 kfree(tid_agg_rx);
261 sta->ampdu_mlme.tid_rx[tid] = NULL;
262 goto end; 273 goto end;
263 } 274 }
264 275
@@ -270,13 +281,12 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
270 281
271 if (ret) { 282 if (ret) {
272 kfree(tid_agg_rx->reorder_buf); 283 kfree(tid_agg_rx->reorder_buf);
284 kfree(tid_agg_rx->reorder_time);
273 kfree(tid_agg_rx); 285 kfree(tid_agg_rx);
274 sta->ampdu_mlme.tid_rx[tid] = NULL;
275 goto end; 286 goto end;
276 } 287 }
277 288
278 /* change state and send addba resp */ 289 /* update data */
279 sta->ampdu_mlme.tid_active_rx[tid] = true;
280 tid_agg_rx->dialog_token = dialog_token; 290 tid_agg_rx->dialog_token = dialog_token;
281 tid_agg_rx->ssn = start_seq_num; 291 tid_agg_rx->ssn = start_seq_num;
282 tid_agg_rx->head_seq_num = start_seq_num; 292 tid_agg_rx->head_seq_num = start_seq_num;
@@ -284,8 +294,15 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
284 tid_agg_rx->timeout = timeout; 294 tid_agg_rx->timeout = timeout;
285 tid_agg_rx->stored_mpdu_num = 0; 295 tid_agg_rx->stored_mpdu_num = 0;
286 status = WLAN_STATUS_SUCCESS; 296 status = WLAN_STATUS_SUCCESS;
297
298 /* activate it for RX */
299 rcu_assign_pointer(sta->ampdu_mlme.tid_rx[tid], tid_agg_rx);
300
301 if (timeout)
302 mod_timer(&tid_agg_rx->session_timer, TU_TO_EXP_TIME(timeout));
303
287end: 304end:
288 spin_unlock_bh(&sta->lock); 305 mutex_unlock(&sta->ampdu_mlme.mtx);
289 306
290end_no_lock: 307end_no_lock:
291 ieee80211_send_addba_resp(sta->sdata, sta->sta.addr, tid, 308 ieee80211_send_addba_resp(sta->sdata, sta->sta.addr, tid,
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index 98258b7341e3..c893f236acea 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -6,7 +6,7 @@
6 * Copyright 2005-2006, Devicescape Software, Inc. 6 * Copyright 2005-2006, Devicescape Software, Inc.
7 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> 7 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
8 * Copyright 2007, Michael Wu <flamingice@sourmilk.net> 8 * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
9 * Copyright 2007-2009, Intel Corporation 9 * Copyright 2007-2010, Intel Corporation
10 * 10 *
11 * This program is free software; you can redistribute it and/or modify 11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as 12 * it under the terms of the GNU General Public License version 2 as
@@ -21,28 +21,39 @@
21#include "wme.h" 21#include "wme.h"
22 22
23/** 23/**
24 * DOC: TX aggregation 24 * DOC: TX A-MPDU aggregation
25 * 25 *
26 * Aggregation on the TX side requires setting the hardware flag 26 * Aggregation on the TX side requires setting the hardware flag
27 * %IEEE80211_HW_AMPDU_AGGREGATION as well as, if present, the @ampdu_queues 27 * %IEEE80211_HW_AMPDU_AGGREGATION. The driver will then be handed
28 * hardware parameter to the number of hardware AMPDU queues. If there are no 28 * packets with a flag indicating A-MPDU aggregation. The driver
29 * hardware queues then the driver will (currently) have to do all frame 29 * or device is responsible for actually aggregating the frames,
30 * buffering. 30 * as well as deciding how many and which to aggregate.
31 * 31 *
32 * When TX aggregation is started by some subsystem (usually the rate control 32 * When TX aggregation is started by some subsystem (usually the rate
33 * algorithm would be appropriate) by calling the 33 * control algorithm would be appropriate) by calling the
34 * ieee80211_start_tx_ba_session() function, the driver will be notified via 34 * ieee80211_start_tx_ba_session() function, the driver will be
35 * its @ampdu_action function, with the %IEEE80211_AMPDU_TX_START action. 35 * notified via its @ampdu_action function, with the
36 * %IEEE80211_AMPDU_TX_START action.
36 * 37 *
37 * In response to that, the driver is later required to call the 38 * In response to that, the driver is later required to call the
38 * ieee80211_start_tx_ba_cb() (or ieee80211_start_tx_ba_cb_irqsafe()) 39 * ieee80211_start_tx_ba_cb_irqsafe() function, which will really
39 * function, which will start the aggregation session. 40 * start the aggregation session after the peer has also responded.
41 * If the peer responds negatively, the session will be stopped
42 * again right away. Note that it is possible for the aggregation
43 * session to be stopped before the driver has indicated that it
44 * is done setting it up, in which case it must not indicate the
45 * setup completion.
40 * 46 *
41 * Similarly, when the aggregation session is stopped by 47 * Also note that, since we also need to wait for a response from
42 * ieee80211_stop_tx_ba_session(), the driver's @ampdu_action function will 48 * the peer, the driver is notified of the completion of the
43 * be called with the action %IEEE80211_AMPDU_TX_STOP. In this case, the 49 * handshake by the %IEEE80211_AMPDU_TX_OPERATIONAL action to the
44 * call must not fail, and the driver must later call ieee80211_stop_tx_ba_cb() 50 * @ampdu_action callback.
45 * (or ieee80211_stop_tx_ba_cb_irqsafe()). 51 *
52 * Similarly, when the aggregation session is stopped by the peer
53 * or something calling ieee80211_stop_tx_ba_session(), the driver's
54 * @ampdu_action function will be called with the action
55 * %IEEE80211_AMPDU_TX_STOP. In this case, the call must not fail,
56 * and the driver must later call ieee80211_stop_tx_ba_cb_irqsafe().
46 */ 57 */
47 58
48static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata, 59static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata,
@@ -125,25 +136,53 @@ void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u1
125 ieee80211_tx_skb(sdata, skb); 136 ieee80211_tx_skb(sdata, skb);
126} 137}
127 138
139static void kfree_tid_tx(struct rcu_head *rcu_head)
140{
141 struct tid_ampdu_tx *tid_tx =
142 container_of(rcu_head, struct tid_ampdu_tx, rcu_head);
143
144 kfree(tid_tx);
145}
146
128int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, 147int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
129 enum ieee80211_back_parties initiator) 148 enum ieee80211_back_parties initiator)
130{ 149{
131 struct ieee80211_local *local = sta->local; 150 struct ieee80211_local *local = sta->local;
151 struct tid_ampdu_tx *tid_tx = sta->ampdu_mlme.tid_tx[tid];
132 int ret; 152 int ret;
133 u8 *state; 153
154 lockdep_assert_held(&sta->ampdu_mlme.mtx);
155
156 if (!tid_tx)
157 return -ENOENT;
158
159 spin_lock_bh(&sta->lock);
160
161 if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) {
162 /* not even started yet! */
163 rcu_assign_pointer(sta->ampdu_mlme.tid_tx[tid], NULL);
164 spin_unlock_bh(&sta->lock);
165 call_rcu(&tid_tx->rcu_head, kfree_tid_tx);
166 return 0;
167 }
168
169 spin_unlock_bh(&sta->lock);
134 170
135#ifdef CONFIG_MAC80211_HT_DEBUG 171#ifdef CONFIG_MAC80211_HT_DEBUG
136 printk(KERN_DEBUG "Tx BA session stop requested for %pM tid %u\n", 172 printk(KERN_DEBUG "Tx BA session stop requested for %pM tid %u\n",
137 sta->sta.addr, tid); 173 sta->sta.addr, tid);
138#endif /* CONFIG_MAC80211_HT_DEBUG */ 174#endif /* CONFIG_MAC80211_HT_DEBUG */
139 175
140 state = &sta->ampdu_mlme.tid_state_tx[tid]; 176 set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state);
141 177
142 if (*state == HT_AGG_STATE_OPERATIONAL) 178 /*
143 sta->ampdu_mlme.addba_req_num[tid] = 0; 179 * After this packets are no longer handed right through
180 * to the driver but are put onto tid_tx->pending instead,
181 * with locking to ensure proper access.
182 */
183 clear_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state);
144 184
145 *state = HT_AGG_STATE_REQ_STOP_BA_MSK | 185 tid_tx->stop_initiator = initiator;
146 (initiator << HT_AGG_STATE_INITIATOR_SHIFT);
147 186
148 ret = drv_ampdu_action(local, sta->sdata, 187 ret = drv_ampdu_action(local, sta->sdata,
149 IEEE80211_AMPDU_TX_STOP, 188 IEEE80211_AMPDU_TX_STOP,
@@ -174,16 +213,14 @@ static void sta_addba_resp_timer_expired(unsigned long data)
174 u16 tid = *(u8 *)data; 213 u16 tid = *(u8 *)data;
175 struct sta_info *sta = container_of((void *)data, 214 struct sta_info *sta = container_of((void *)data,
176 struct sta_info, timer_to_tid[tid]); 215 struct sta_info, timer_to_tid[tid]);
177 u8 *state; 216 struct tid_ampdu_tx *tid_tx;
178
179 state = &sta->ampdu_mlme.tid_state_tx[tid];
180 217
181 /* check if the TID waits for addBA response */ 218 /* check if the TID waits for addBA response */
182 spin_lock_bh(&sta->lock); 219 rcu_read_lock();
183 if ((*state & (HT_ADDBA_REQUESTED_MSK | HT_ADDBA_RECEIVED_MSK | 220 tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]);
184 HT_AGG_STATE_REQ_STOP_BA_MSK)) != 221 if (!tid_tx ||
185 HT_ADDBA_REQUESTED_MSK) { 222 test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state)) {
186 spin_unlock_bh(&sta->lock); 223 rcu_read_unlock();
187#ifdef CONFIG_MAC80211_HT_DEBUG 224#ifdef CONFIG_MAC80211_HT_DEBUG
188 printk(KERN_DEBUG "timer expired on tid %d but we are not " 225 printk(KERN_DEBUG "timer expired on tid %d but we are not "
189 "(or no longer) expecting addBA response there\n", 226 "(or no longer) expecting addBA response there\n",
@@ -196,8 +233,8 @@ static void sta_addba_resp_timer_expired(unsigned long data)
196 printk(KERN_DEBUG "addBA response timer expired on tid %d\n", tid); 233 printk(KERN_DEBUG "addBA response timer expired on tid %d\n", tid);
197#endif 234#endif
198 235
199 ___ieee80211_stop_tx_ba_session(sta, tid, WLAN_BACK_INITIATOR); 236 ieee80211_stop_tx_ba_session(&sta->sta, tid);
200 spin_unlock_bh(&sta->lock); 237 rcu_read_unlock();
201} 238}
202 239
203static inline int ieee80211_ac_from_tid(int tid) 240static inline int ieee80211_ac_from_tid(int tid)
@@ -205,14 +242,112 @@ static inline int ieee80211_ac_from_tid(int tid)
205 return ieee802_1d_to_ac[tid & 7]; 242 return ieee802_1d_to_ac[tid & 7];
206} 243}
207 244
245/*
246 * When multiple aggregation sessions on multiple stations
247 * are being created/destroyed simultaneously, we need to
248 * refcount the global queue stop caused by that in order
249 * to not get into a situation where one of the aggregation
250 * setup or teardown re-enables queues before the other is
251 * ready to handle that.
252 *
253 * These two functions take care of this issue by keeping
254 * a global "agg_queue_stop" refcount.
255 */
256static void __acquires(agg_queue)
257ieee80211_stop_queue_agg(struct ieee80211_local *local, int tid)
258{
259 int queue = ieee80211_ac_from_tid(tid);
260
261 if (atomic_inc_return(&local->agg_queue_stop[queue]) == 1)
262 ieee80211_stop_queue_by_reason(
263 &local->hw, queue,
264 IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
265 __acquire(agg_queue);
266}
267
268static void __releases(agg_queue)
269ieee80211_wake_queue_agg(struct ieee80211_local *local, int tid)
270{
271 int queue = ieee80211_ac_from_tid(tid);
272
273 if (atomic_dec_return(&local->agg_queue_stop[queue]) == 0)
274 ieee80211_wake_queue_by_reason(
275 &local->hw, queue,
276 IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
277 __release(agg_queue);
278}
279
280void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
281{
282 struct tid_ampdu_tx *tid_tx = sta->ampdu_mlme.tid_tx[tid];
283 struct ieee80211_local *local = sta->local;
284 struct ieee80211_sub_if_data *sdata = sta->sdata;
285 u16 start_seq_num;
286 int ret;
287
288 lockdep_assert_held(&sta->ampdu_mlme.mtx);
289
290 /*
291 * While we're asking the driver about the aggregation,
292 * stop the AC queue so that we don't have to worry
293 * about frames that came in while we were doing that,
294 * which would require us to put them to the AC pending
295 * afterwards which just makes the code more complex.
296 */
297 ieee80211_stop_queue_agg(local, tid);
298
299 clear_bit(HT_AGG_STATE_WANT_START, &tid_tx->state);
300
301 /*
302 * make sure no packets are being processed to get
303 * valid starting sequence number
304 */
305 synchronize_net();
306
307 start_seq_num = sta->tid_seq[tid] >> 4;
308
309 ret = drv_ampdu_action(local, sdata, IEEE80211_AMPDU_TX_START,
310 &sta->sta, tid, &start_seq_num);
311 if (ret) {
312#ifdef CONFIG_MAC80211_HT_DEBUG
313 printk(KERN_DEBUG "BA request denied - HW unavailable for"
314 " tid %d\n", tid);
315#endif
316 spin_lock_bh(&sta->lock);
317 rcu_assign_pointer(sta->ampdu_mlme.tid_tx[tid], NULL);
318 spin_unlock_bh(&sta->lock);
319
320 ieee80211_wake_queue_agg(local, tid);
321 call_rcu(&tid_tx->rcu_head, kfree_tid_tx);
322 return;
323 }
324
325 /* we can take packets again now */
326 ieee80211_wake_queue_agg(local, tid);
327
328 /* activate the timer for the recipient's addBA response */
329 mod_timer(&tid_tx->addba_resp_timer, jiffies + ADDBA_RESP_INTERVAL);
330#ifdef CONFIG_MAC80211_HT_DEBUG
331 printk(KERN_DEBUG "activated addBA response timer on tid %d\n", tid);
332#endif
333
334 spin_lock_bh(&sta->lock);
335 sta->ampdu_mlme.addba_req_num[tid]++;
336 spin_unlock_bh(&sta->lock);
337
338 /* send AddBA request */
339 ieee80211_send_addba_request(sdata, sta->sta.addr, tid,
340 tid_tx->dialog_token, start_seq_num,
341 0x40, 5000);
342}
343
208int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid) 344int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
209{ 345{
210 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 346 struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
211 struct ieee80211_sub_if_data *sdata = sta->sdata; 347 struct ieee80211_sub_if_data *sdata = sta->sdata;
212 struct ieee80211_local *local = sdata->local; 348 struct ieee80211_local *local = sdata->local;
213 u8 *state; 349 struct tid_ampdu_tx *tid_tx;
214 int ret = 0; 350 int ret = 0;
215 u16 start_seq_num;
216 351
217 trace_api_start_tx_ba_session(pubsta, tid); 352 trace_api_start_tx_ba_session(pubsta, tid);
218 353
@@ -239,24 +374,15 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
239 sdata->vif.type != NL80211_IFTYPE_AP) 374 sdata->vif.type != NL80211_IFTYPE_AP)
240 return -EINVAL; 375 return -EINVAL;
241 376
242 if (test_sta_flags(sta, WLAN_STA_DISASSOC)) {
243#ifdef CONFIG_MAC80211_HT_DEBUG
244 printk(KERN_DEBUG "Disassociation is in progress. "
245 "Denying BA session request\n");
246#endif
247 return -EINVAL;
248 }
249
250 if (test_sta_flags(sta, WLAN_STA_BLOCK_BA)) { 377 if (test_sta_flags(sta, WLAN_STA_BLOCK_BA)) {
251#ifdef CONFIG_MAC80211_HT_DEBUG 378#ifdef CONFIG_MAC80211_HT_DEBUG
252 printk(KERN_DEBUG "Suspend in progress. " 379 printk(KERN_DEBUG "BA sessions blocked. "
253 "Denying BA session request\n"); 380 "Denying BA session request\n");
254#endif 381#endif
255 return -EINVAL; 382 return -EINVAL;
256 } 383 }
257 384
258 spin_lock_bh(&sta->lock); 385 spin_lock_bh(&sta->lock);
259 spin_lock(&local->ampdu_lock);
260 386
261 /* we have tried too many times, receiver does not want A-MPDU */ 387 /* we have tried too many times, receiver does not want A-MPDU */
262 if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_MAX_RETRIES) { 388 if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_MAX_RETRIES) {
@@ -264,9 +390,9 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
264 goto err_unlock_sta; 390 goto err_unlock_sta;
265 } 391 }
266 392
267 state = &sta->ampdu_mlme.tid_state_tx[tid]; 393 tid_tx = sta->ampdu_mlme.tid_tx[tid];
268 /* check if the TID is not in aggregation flow already */ 394 /* check if the TID is not in aggregation flow already */
269 if (*state != HT_AGG_STATE_IDLE) { 395 if (tid_tx) {
270#ifdef CONFIG_MAC80211_HT_DEBUG 396#ifdef CONFIG_MAC80211_HT_DEBUG
271 printk(KERN_DEBUG "BA request denied - session is not " 397 printk(KERN_DEBUG "BA request denied - session is not "
272 "idle on tid %u\n", tid); 398 "idle on tid %u\n", tid);
@@ -275,96 +401,37 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
275 goto err_unlock_sta; 401 goto err_unlock_sta;
276 } 402 }
277 403
278 /*
279 * While we're asking the driver about the aggregation,
280 * stop the AC queue so that we don't have to worry
281 * about frames that came in while we were doing that,
282 * which would require us to put them to the AC pending
283 * afterwards which just makes the code more complex.
284 */
285 ieee80211_stop_queue_by_reason(
286 &local->hw, ieee80211_ac_from_tid(tid),
287 IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
288
289 /* prepare A-MPDU MLME for Tx aggregation */ 404 /* prepare A-MPDU MLME for Tx aggregation */
290 sta->ampdu_mlme.tid_tx[tid] = 405 tid_tx = kzalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC);
291 kmalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC); 406 if (!tid_tx) {
292 if (!sta->ampdu_mlme.tid_tx[tid]) {
293#ifdef CONFIG_MAC80211_HT_DEBUG 407#ifdef CONFIG_MAC80211_HT_DEBUG
294 if (net_ratelimit()) 408 if (net_ratelimit())
295 printk(KERN_ERR "allocate tx mlme to tid %d failed\n", 409 printk(KERN_ERR "allocate tx mlme to tid %d failed\n",
296 tid); 410 tid);
297#endif 411#endif
298 ret = -ENOMEM; 412 ret = -ENOMEM;
299 goto err_wake_queue; 413 goto err_unlock_sta;
300 } 414 }
301 415
302 skb_queue_head_init(&sta->ampdu_mlme.tid_tx[tid]->pending); 416 skb_queue_head_init(&tid_tx->pending);
417 __set_bit(HT_AGG_STATE_WANT_START, &tid_tx->state);
303 418
304 /* Tx timer */ 419 /* Tx timer */
305 sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.function = 420 tid_tx->addba_resp_timer.function = sta_addba_resp_timer_expired;
306 sta_addba_resp_timer_expired; 421 tid_tx->addba_resp_timer.data = (unsigned long)&sta->timer_to_tid[tid];
307 sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.data = 422 init_timer(&tid_tx->addba_resp_timer);
308 (unsigned long)&sta->timer_to_tid[tid];
309 init_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
310
311 /* Ok, the Addba frame hasn't been sent yet, but if the driver calls the
312 * call back right away, it must see that the flow has begun */
313 *state |= HT_ADDBA_REQUESTED_MSK;
314
315 start_seq_num = sta->tid_seq[tid] >> 4;
316
317 ret = drv_ampdu_action(local, sdata, IEEE80211_AMPDU_TX_START,
318 pubsta, tid, &start_seq_num);
319 423
320 if (ret) { 424 /* assign a dialog token */
321#ifdef CONFIG_MAC80211_HT_DEBUG
322 printk(KERN_DEBUG "BA request denied - HW unavailable for"
323 " tid %d\n", tid);
324#endif /* CONFIG_MAC80211_HT_DEBUG */
325 *state = HT_AGG_STATE_IDLE;
326 goto err_free;
327 }
328
329 /* Driver vetoed or OKed, but we can take packets again now */
330 ieee80211_wake_queue_by_reason(
331 &local->hw, ieee80211_ac_from_tid(tid),
332 IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
333
334 spin_unlock(&local->ampdu_lock);
335
336 /* prepare tid data */
337 sta->ampdu_mlme.dialog_token_allocator++; 425 sta->ampdu_mlme.dialog_token_allocator++;
338 sta->ampdu_mlme.tid_tx[tid]->dialog_token = 426 tid_tx->dialog_token = sta->ampdu_mlme.dialog_token_allocator;
339 sta->ampdu_mlme.dialog_token_allocator;
340 sta->ampdu_mlme.tid_tx[tid]->ssn = start_seq_num;
341 427
342 spin_unlock_bh(&sta->lock); 428 /* finally, assign it to the array */
429 rcu_assign_pointer(sta->ampdu_mlme.tid_tx[tid], tid_tx);
343 430
344 /* send AddBA request */ 431 ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);
345 ieee80211_send_addba_request(sdata, pubsta->addr, tid, 432
346 sta->ampdu_mlme.tid_tx[tid]->dialog_token, 433 /* this flow continues off the work */
347 sta->ampdu_mlme.tid_tx[tid]->ssn,
348 0x40, 5000);
349 sta->ampdu_mlme.addba_req_num[tid]++;
350 /* activate the timer for the recipient's addBA response */
351 sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.expires =
352 jiffies + ADDBA_RESP_INTERVAL;
353 add_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
354#ifdef CONFIG_MAC80211_HT_DEBUG
355 printk(KERN_DEBUG "activated addBA response timer on tid %d\n", tid);
356#endif
357 return 0;
358
359 err_free:
360 kfree(sta->ampdu_mlme.tid_tx[tid]);
361 sta->ampdu_mlme.tid_tx[tid] = NULL;
362 err_wake_queue:
363 ieee80211_wake_queue_by_reason(
364 &local->hw, ieee80211_ac_from_tid(tid),
365 IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
366 err_unlock_sta: 434 err_unlock_sta:
367 spin_unlock(&local->ampdu_lock);
368 spin_unlock_bh(&sta->lock); 435 spin_unlock_bh(&sta->lock);
369 return ret; 436 return ret;
370} 437}
@@ -372,69 +439,65 @@ EXPORT_SYMBOL(ieee80211_start_tx_ba_session);
372 439
373/* 440/*
374 * splice packets from the STA's pending to the local pending, 441 * splice packets from the STA's pending to the local pending,
375 * requires a call to ieee80211_agg_splice_finish and holding 442 * requires a call to ieee80211_agg_splice_finish later
376 * local->ampdu_lock across both calls.
377 */ 443 */
378static void ieee80211_agg_splice_packets(struct ieee80211_local *local, 444static void __acquires(agg_queue)
379 struct sta_info *sta, u16 tid) 445ieee80211_agg_splice_packets(struct ieee80211_local *local,
446 struct tid_ampdu_tx *tid_tx, u16 tid)
380{ 447{
448 int queue = ieee80211_ac_from_tid(tid);
381 unsigned long flags; 449 unsigned long flags;
382 u16 queue = ieee80211_ac_from_tid(tid);
383
384 ieee80211_stop_queue_by_reason(
385 &local->hw, queue,
386 IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
387 450
388 if (!(sta->ampdu_mlme.tid_state_tx[tid] & HT_ADDBA_REQUESTED_MSK)) 451 ieee80211_stop_queue_agg(local, tid);
389 return;
390 452
391 if (WARN(!sta->ampdu_mlme.tid_tx[tid], 453 if (WARN(!tid_tx, "TID %d gone but expected when splicing aggregates"
392 "TID %d gone but expected when splicing aggregates from" 454 " from the pending queue\n", tid))
393 "the pending queue\n", tid))
394 return; 455 return;
395 456
396 if (!skb_queue_empty(&sta->ampdu_mlme.tid_tx[tid]->pending)) { 457 if (!skb_queue_empty(&tid_tx->pending)) {
397 spin_lock_irqsave(&local->queue_stop_reason_lock, flags); 458 spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
398 /* copy over remaining packets */ 459 /* copy over remaining packets */
399 skb_queue_splice_tail_init( 460 skb_queue_splice_tail_init(&tid_tx->pending,
400 &sta->ampdu_mlme.tid_tx[tid]->pending, 461 &local->pending[queue]);
401 &local->pending[queue]);
402 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); 462 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
403 } 463 }
404} 464}
405 465
406static void ieee80211_agg_splice_finish(struct ieee80211_local *local, 466static void __releases(agg_queue)
407 struct sta_info *sta, u16 tid) 467ieee80211_agg_splice_finish(struct ieee80211_local *local, u16 tid)
408{ 468{
409 u16 queue = ieee80211_ac_from_tid(tid); 469 ieee80211_wake_queue_agg(local, tid);
410
411 ieee80211_wake_queue_by_reason(
412 &local->hw, queue,
413 IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
414} 470}
415 471
416/* caller must hold sta->lock */
417static void ieee80211_agg_tx_operational(struct ieee80211_local *local, 472static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
418 struct sta_info *sta, u16 tid) 473 struct sta_info *sta, u16 tid)
419{ 474{
475 lockdep_assert_held(&sta->ampdu_mlme.mtx);
476
420#ifdef CONFIG_MAC80211_HT_DEBUG 477#ifdef CONFIG_MAC80211_HT_DEBUG
421 printk(KERN_DEBUG "Aggregation is on for tid %d\n", tid); 478 printk(KERN_DEBUG "Aggregation is on for tid %d\n", tid);
422#endif 479#endif
423 480
424 spin_lock(&local->ampdu_lock);
425 ieee80211_agg_splice_packets(local, sta, tid);
426 /*
427 * NB: we rely on sta->lock being taken in the TX
428 * processing here when adding to the pending queue,
429 * otherwise we could only change the state of the
430 * session to OPERATIONAL _here_.
431 */
432 ieee80211_agg_splice_finish(local, sta, tid);
433 spin_unlock(&local->ampdu_lock);
434
435 drv_ampdu_action(local, sta->sdata, 481 drv_ampdu_action(local, sta->sdata,
436 IEEE80211_AMPDU_TX_OPERATIONAL, 482 IEEE80211_AMPDU_TX_OPERATIONAL,
437 &sta->sta, tid, NULL); 483 &sta->sta, tid, NULL);
484
485 /*
486 * synchronize with TX path, while splicing the TX path
487 * should block so it won't put more packets onto pending.
488 */
489 spin_lock_bh(&sta->lock);
490
491 ieee80211_agg_splice_packets(local, sta->ampdu_mlme.tid_tx[tid], tid);
492 /*
493 * Now mark as operational. This will be visible
494 * in the TX path, and lets it go lock-free in
495 * the common case.
496 */
497 set_bit(HT_AGG_STATE_OPERATIONAL, &sta->ampdu_mlme.tid_tx[tid]->state);
498 ieee80211_agg_splice_finish(local, tid);
499
500 spin_unlock_bh(&sta->lock);
438} 501}
439 502
440void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid) 503void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid)
@@ -442,7 +505,7 @@ void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid)
442 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); 505 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
443 struct ieee80211_local *local = sdata->local; 506 struct ieee80211_local *local = sdata->local;
444 struct sta_info *sta; 507 struct sta_info *sta;
445 u8 *state; 508 struct tid_ampdu_tx *tid_tx;
446 509
447 trace_api_start_tx_ba_cb(sdata, ra, tid); 510 trace_api_start_tx_ba_cb(sdata, ra, tid);
448 511
@@ -454,42 +517,36 @@ void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid)
454 return; 517 return;
455 } 518 }
456 519
457 rcu_read_lock(); 520 mutex_lock(&local->sta_mtx);
458 sta = sta_info_get(sdata, ra); 521 sta = sta_info_get(sdata, ra);
459 if (!sta) { 522 if (!sta) {
460 rcu_read_unlock(); 523 mutex_unlock(&local->sta_mtx);
461#ifdef CONFIG_MAC80211_HT_DEBUG 524#ifdef CONFIG_MAC80211_HT_DEBUG
462 printk(KERN_DEBUG "Could not find station: %pM\n", ra); 525 printk(KERN_DEBUG "Could not find station: %pM\n", ra);
463#endif 526#endif
464 return; 527 return;
465 } 528 }
466 529
467 state = &sta->ampdu_mlme.tid_state_tx[tid]; 530 mutex_lock(&sta->ampdu_mlme.mtx);
468 spin_lock_bh(&sta->lock); 531 tid_tx = sta->ampdu_mlme.tid_tx[tid];
469 532
470 if (WARN_ON(!(*state & HT_ADDBA_REQUESTED_MSK))) { 533 if (WARN_ON(!tid_tx)) {
471#ifdef CONFIG_MAC80211_HT_DEBUG 534#ifdef CONFIG_MAC80211_HT_DEBUG
472 printk(KERN_DEBUG "addBA was not requested yet, state is %d\n", 535 printk(KERN_DEBUG "addBA was not requested!\n");
473 *state);
474#endif 536#endif
475 spin_unlock_bh(&sta->lock); 537 goto unlock;
476 rcu_read_unlock();
477 return;
478 } 538 }
479 539
480 if (WARN_ON(*state & HT_ADDBA_DRV_READY_MSK)) 540 if (WARN_ON(test_and_set_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state)))
481 goto out; 541 goto unlock;
482
483 *state |= HT_ADDBA_DRV_READY_MSK;
484 542
485 if (*state == HT_AGG_STATE_OPERATIONAL) 543 if (test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state))
486 ieee80211_agg_tx_operational(local, sta, tid); 544 ieee80211_agg_tx_operational(local, sta, tid);
487 545
488 out: 546 unlock:
489 spin_unlock_bh(&sta->lock); 547 mutex_unlock(&sta->ampdu_mlme.mtx);
490 rcu_read_unlock(); 548 mutex_unlock(&local->sta_mtx);
491} 549}
492EXPORT_SYMBOL(ieee80211_start_tx_ba_cb);
493 550
494void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif, 551void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
495 const u8 *ra, u16 tid) 552 const u8 *ra, u16 tid)
@@ -510,44 +567,36 @@ void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
510 ra_tid = (struct ieee80211_ra_tid *) &skb->cb; 567 ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
511 memcpy(&ra_tid->ra, ra, ETH_ALEN); 568 memcpy(&ra_tid->ra, ra, ETH_ALEN);
512 ra_tid->tid = tid; 569 ra_tid->tid = tid;
513 ra_tid->vif = vif;
514 570
515 skb->pkt_type = IEEE80211_ADDBA_MSG; 571 skb->pkt_type = IEEE80211_SDATA_QUEUE_AGG_START;
516 skb_queue_tail(&local->skb_queue, skb); 572 skb_queue_tail(&sdata->skb_queue, skb);
517 tasklet_schedule(&local->tasklet); 573 ieee80211_queue_work(&local->hw, &sdata->work);
518} 574}
519EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe); 575EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe);
520 576
521int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, 577int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
522 enum ieee80211_back_parties initiator) 578 enum ieee80211_back_parties initiator)
523{ 579{
524 u8 *state;
525 int ret; 580 int ret;
526 581
527 /* check if the TID is in aggregation */ 582 mutex_lock(&sta->ampdu_mlme.mtx);
528 state = &sta->ampdu_mlme.tid_state_tx[tid];
529 spin_lock_bh(&sta->lock);
530
531 if (*state != HT_AGG_STATE_OPERATIONAL) {
532 ret = -ENOENT;
533 goto unlock;
534 }
535 583
536 ret = ___ieee80211_stop_tx_ba_session(sta, tid, initiator); 584 ret = ___ieee80211_stop_tx_ba_session(sta, tid, initiator);
537 585
538 unlock: 586 mutex_unlock(&sta->ampdu_mlme.mtx);
539 spin_unlock_bh(&sta->lock); 587
540 return ret; 588 return ret;
541} 589}
542 590
543int ieee80211_stop_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid, 591int ieee80211_stop_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
544 enum ieee80211_back_parties initiator)
545{ 592{
546 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 593 struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
547 struct ieee80211_sub_if_data *sdata = sta->sdata; 594 struct ieee80211_sub_if_data *sdata = sta->sdata;
548 struct ieee80211_local *local = sdata->local; 595 struct ieee80211_local *local = sdata->local;
596 struct tid_ampdu_tx *tid_tx;
597 int ret = 0;
549 598
550 trace_api_stop_tx_ba_session(pubsta, tid, initiator); 599 trace_api_stop_tx_ba_session(pubsta, tid);
551 600
552 if (!local->ops->ampdu_action) 601 if (!local->ops->ampdu_action)
553 return -EINVAL; 602 return -EINVAL;
@@ -555,7 +604,26 @@ int ieee80211_stop_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
555 if (tid >= STA_TID_NUM) 604 if (tid >= STA_TID_NUM)
556 return -EINVAL; 605 return -EINVAL;
557 606
558 return __ieee80211_stop_tx_ba_session(sta, tid, initiator); 607 spin_lock_bh(&sta->lock);
608 tid_tx = sta->ampdu_mlme.tid_tx[tid];
609
610 if (!tid_tx) {
611 ret = -ENOENT;
612 goto unlock;
613 }
614
615 if (test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
616 /* already in progress stopping it */
617 ret = 0;
618 goto unlock;
619 }
620
621 set_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state);
622 ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);
623
624 unlock:
625 spin_unlock_bh(&sta->lock);
626 return ret;
559} 627}
560EXPORT_SYMBOL(ieee80211_stop_tx_ba_session); 628EXPORT_SYMBOL(ieee80211_stop_tx_ba_session);
561 629
@@ -564,7 +632,7 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid)
564 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); 632 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
565 struct ieee80211_local *local = sdata->local; 633 struct ieee80211_local *local = sdata->local;
566 struct sta_info *sta; 634 struct sta_info *sta;
567 u8 *state; 635 struct tid_ampdu_tx *tid_tx;
568 636
569 trace_api_stop_tx_ba_cb(sdata, ra, tid); 637 trace_api_stop_tx_ba_cb(sdata, ra, tid);
570 638
@@ -581,51 +649,56 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid)
581 ra, tid); 649 ra, tid);
582#endif /* CONFIG_MAC80211_HT_DEBUG */ 650#endif /* CONFIG_MAC80211_HT_DEBUG */
583 651
584 rcu_read_lock(); 652 mutex_lock(&local->sta_mtx);
653
585 sta = sta_info_get(sdata, ra); 654 sta = sta_info_get(sdata, ra);
586 if (!sta) { 655 if (!sta) {
587#ifdef CONFIG_MAC80211_HT_DEBUG 656#ifdef CONFIG_MAC80211_HT_DEBUG
588 printk(KERN_DEBUG "Could not find station: %pM\n", ra); 657 printk(KERN_DEBUG "Could not find station: %pM\n", ra);
589#endif 658#endif
590 rcu_read_unlock(); 659 goto unlock;
591 return;
592 } 660 }
593 state = &sta->ampdu_mlme.tid_state_tx[tid];
594 661
595 /* NOTE: no need to use sta->lock in this state check, as 662 mutex_lock(&sta->ampdu_mlme.mtx);
596 * ieee80211_stop_tx_ba_session will let only one stop call to 663 spin_lock_bh(&sta->lock);
597 * pass through per sta/tid 664 tid_tx = sta->ampdu_mlme.tid_tx[tid];
598 */ 665
599 if ((*state & HT_AGG_STATE_REQ_STOP_BA_MSK) == 0) { 666 if (!tid_tx || !test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
600#ifdef CONFIG_MAC80211_HT_DEBUG 667#ifdef CONFIG_MAC80211_HT_DEBUG
601 printk(KERN_DEBUG "unexpected callback to A-MPDU stop\n"); 668 printk(KERN_DEBUG "unexpected callback to A-MPDU stop\n");
602#endif 669#endif
603 rcu_read_unlock(); 670 goto unlock_sta;
604 return;
605 } 671 }
606 672
607 if (*state & HT_AGG_STATE_INITIATOR_MSK) 673 if (tid_tx->stop_initiator == WLAN_BACK_INITIATOR)
608 ieee80211_send_delba(sta->sdata, ra, tid, 674 ieee80211_send_delba(sta->sdata, ra, tid,
609 WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE); 675 WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE);
610 676
611 spin_lock_bh(&sta->lock); 677 /*
612 spin_lock(&local->ampdu_lock); 678 * When we get here, the TX path will not be lockless any more wrt.
679 * aggregation, since the OPERATIONAL bit has long been cleared.
680 * Thus it will block on getting the lock, if it occurs. So if we
681 * stop the queue now, we will not get any more packets, and any
682 * that might be being processed will wait for us here, thereby
683 * guaranteeing that no packets go to the tid_tx pending queue any
684 * more.
685 */
613 686
614 ieee80211_agg_splice_packets(local, sta, tid); 687 ieee80211_agg_splice_packets(local, tid_tx, tid);
615 688
616 *state = HT_AGG_STATE_IDLE; 689 /* future packets must not find the tid_tx struct any more */
617 /* from now on packets are no longer put onto sta->pending */ 690 rcu_assign_pointer(sta->ampdu_mlme.tid_tx[tid], NULL);
618 kfree(sta->ampdu_mlme.tid_tx[tid]);
619 sta->ampdu_mlme.tid_tx[tid] = NULL;
620 691
621 ieee80211_agg_splice_finish(local, sta, tid); 692 ieee80211_agg_splice_finish(local, tid);
622 693
623 spin_unlock(&local->ampdu_lock); 694 call_rcu(&tid_tx->rcu_head, kfree_tid_tx);
624 spin_unlock_bh(&sta->lock);
625 695
626 rcu_read_unlock(); 696 unlock_sta:
697 spin_unlock_bh(&sta->lock);
698 mutex_unlock(&sta->ampdu_mlme.mtx);
699 unlock:
700 mutex_unlock(&local->sta_mtx);
627} 701}
628EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb);
629 702
630void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif, 703void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
631 const u8 *ra, u16 tid) 704 const u8 *ra, u16 tid)
@@ -646,11 +719,10 @@ void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
646 ra_tid = (struct ieee80211_ra_tid *) &skb->cb; 719 ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
647 memcpy(&ra_tid->ra, ra, ETH_ALEN); 720 memcpy(&ra_tid->ra, ra, ETH_ALEN);
648 ra_tid->tid = tid; 721 ra_tid->tid = tid;
649 ra_tid->vif = vif;
650 722
651 skb->pkt_type = IEEE80211_DELBA_MSG; 723 skb->pkt_type = IEEE80211_SDATA_QUEUE_AGG_STOP;
652 skb_queue_tail(&local->skb_queue, skb); 724 skb_queue_tail(&sdata->skb_queue, skb);
653 tasklet_schedule(&local->tasklet); 725 ieee80211_queue_work(&local->hw, &sdata->work);
654} 726}
655EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb_irqsafe); 727EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb_irqsafe);
656 728
@@ -660,40 +732,40 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
660 struct ieee80211_mgmt *mgmt, 732 struct ieee80211_mgmt *mgmt,
661 size_t len) 733 size_t len)
662{ 734{
735 struct tid_ampdu_tx *tid_tx;
663 u16 capab, tid; 736 u16 capab, tid;
664 u8 *state;
665 737
666 capab = le16_to_cpu(mgmt->u.action.u.addba_resp.capab); 738 capab = le16_to_cpu(mgmt->u.action.u.addba_resp.capab);
667 tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2; 739 tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2;
668 740
669 state = &sta->ampdu_mlme.tid_state_tx[tid]; 741 mutex_lock(&sta->ampdu_mlme.mtx);
670
671 spin_lock_bh(&sta->lock);
672 742
673 if (!(*state & HT_ADDBA_REQUESTED_MSK)) 743 tid_tx = sta->ampdu_mlme.tid_tx[tid];
744 if (!tid_tx)
674 goto out; 745 goto out;
675 746
676 if (mgmt->u.action.u.addba_resp.dialog_token != 747 if (mgmt->u.action.u.addba_resp.dialog_token != tid_tx->dialog_token) {
677 sta->ampdu_mlme.tid_tx[tid]->dialog_token) {
678#ifdef CONFIG_MAC80211_HT_DEBUG 748#ifdef CONFIG_MAC80211_HT_DEBUG
679 printk(KERN_DEBUG "wrong addBA response token, tid %d\n", tid); 749 printk(KERN_DEBUG "wrong addBA response token, tid %d\n", tid);
680#endif /* CONFIG_MAC80211_HT_DEBUG */ 750#endif
681 goto out; 751 goto out;
682 } 752 }
683 753
684 del_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer); 754 del_timer(&tid_tx->addba_resp_timer);
685 755
686#ifdef CONFIG_MAC80211_HT_DEBUG 756#ifdef CONFIG_MAC80211_HT_DEBUG
687 printk(KERN_DEBUG "switched off addBA timer for tid %d\n", tid); 757 printk(KERN_DEBUG "switched off addBA timer for tid %d\n", tid);
688#endif /* CONFIG_MAC80211_HT_DEBUG */ 758#endif
689 759
690 if (le16_to_cpu(mgmt->u.action.u.addba_resp.status) 760 if (le16_to_cpu(mgmt->u.action.u.addba_resp.status)
691 == WLAN_STATUS_SUCCESS) { 761 == WLAN_STATUS_SUCCESS) {
692 u8 curstate = *state; 762 if (test_and_set_bit(HT_AGG_STATE_RESPONSE_RECEIVED,
693 763 &tid_tx->state)) {
694 *state |= HT_ADDBA_RECEIVED_MSK; 764 /* ignore duplicate response */
765 goto out;
766 }
695 767
696 if (*state != curstate && *state == HT_AGG_STATE_OPERATIONAL) 768 if (test_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state))
697 ieee80211_agg_tx_operational(local, sta, tid); 769 ieee80211_agg_tx_operational(local, sta, tid);
698 770
699 sta->ampdu_mlme.addba_req_num[tid] = 0; 771 sta->ampdu_mlme.addba_req_num[tid] = 0;
@@ -702,5 +774,5 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
702 } 774 }
703 775
704 out: 776 out:
705 spin_unlock_bh(&sta->lock); 777 mutex_unlock(&sta->ampdu_mlme.mtx);
706} 778}
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index c7000a6ca379..29ac8e1a509e 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -120,6 +120,9 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev,
120 struct ieee80211_key *key; 120 struct ieee80211_key *key;
121 int err; 121 int err;
122 122
123 if (!netif_running(dev))
124 return -ENETDOWN;
125
123 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 126 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
124 127
125 switch (params->cipher) { 128 switch (params->cipher) {
@@ -140,17 +143,22 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev,
140 return -EINVAL; 143 return -EINVAL;
141 } 144 }
142 145
146 /* reject WEP and TKIP keys if WEP failed to initialize */
147 if ((alg == ALG_WEP || alg == ALG_TKIP) &&
148 IS_ERR(sdata->local->wep_tx_tfm))
149 return -EINVAL;
150
143 key = ieee80211_key_alloc(alg, key_idx, params->key_len, params->key, 151 key = ieee80211_key_alloc(alg, key_idx, params->key_len, params->key,
144 params->seq_len, params->seq); 152 params->seq_len, params->seq);
145 if (!key) 153 if (!key)
146 return -ENOMEM; 154 return -ENOMEM;
147 155
148 rcu_read_lock(); 156 mutex_lock(&sdata->local->sta_mtx);
149 157
150 if (mac_addr) { 158 if (mac_addr) {
151 sta = sta_info_get_bss(sdata, mac_addr); 159 sta = sta_info_get_bss(sdata, mac_addr);
152 if (!sta) { 160 if (!sta) {
153 ieee80211_key_free(key); 161 ieee80211_key_free(sdata->local, key);
154 err = -ENOENT; 162 err = -ENOENT;
155 goto out_unlock; 163 goto out_unlock;
156 } 164 }
@@ -160,7 +168,7 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev,
160 168
161 err = 0; 169 err = 0;
162 out_unlock: 170 out_unlock:
163 rcu_read_unlock(); 171 mutex_unlock(&sdata->local->sta_mtx);
164 172
165 return err; 173 return err;
166} 174}
@@ -174,7 +182,7 @@ static int ieee80211_del_key(struct wiphy *wiphy, struct net_device *dev,
174 182
175 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 183 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
176 184
177 rcu_read_lock(); 185 mutex_lock(&sdata->local->sta_mtx);
178 186
179 if (mac_addr) { 187 if (mac_addr) {
180 ret = -ENOENT; 188 ret = -ENOENT;
@@ -184,7 +192,7 @@ static int ieee80211_del_key(struct wiphy *wiphy, struct net_device *dev,
184 goto out_unlock; 192 goto out_unlock;
185 193
186 if (sta->key) { 194 if (sta->key) {
187 ieee80211_key_free(sta->key); 195 ieee80211_key_free(sdata->local, sta->key);
188 WARN_ON(sta->key); 196 WARN_ON(sta->key);
189 ret = 0; 197 ret = 0;
190 } 198 }
@@ -197,12 +205,12 @@ static int ieee80211_del_key(struct wiphy *wiphy, struct net_device *dev,
197 goto out_unlock; 205 goto out_unlock;
198 } 206 }
199 207
200 ieee80211_key_free(sdata->keys[key_idx]); 208 ieee80211_key_free(sdata->local, sdata->keys[key_idx]);
201 WARN_ON(sdata->keys[key_idx]); 209 WARN_ON(sdata->keys[key_idx]);
202 210
203 ret = 0; 211 ret = 0;
204 out_unlock: 212 out_unlock:
205 rcu_read_unlock(); 213 mutex_unlock(&sdata->local->sta_mtx);
206 214
207 return ret; 215 return ret;
208} 216}
@@ -305,15 +313,10 @@ static int ieee80211_config_default_key(struct wiphy *wiphy,
305 struct net_device *dev, 313 struct net_device *dev,
306 u8 key_idx) 314 u8 key_idx)
307{ 315{
308 struct ieee80211_sub_if_data *sdata; 316 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
309
310 rcu_read_lock();
311 317
312 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
313 ieee80211_set_default_key(sdata, key_idx); 318 ieee80211_set_default_key(sdata, key_idx);
314 319
315 rcu_read_unlock();
316
317 return 0; 320 return 0;
318} 321}
319 322
@@ -321,15 +324,10 @@ static int ieee80211_config_default_mgmt_key(struct wiphy *wiphy,
321 struct net_device *dev, 324 struct net_device *dev,
322 u8 key_idx) 325 u8 key_idx)
323{ 326{
324 struct ieee80211_sub_if_data *sdata; 327 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
325
326 rcu_read_lock();
327 328
328 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
329 ieee80211_set_default_mgmt_key(sdata, key_idx); 329 ieee80211_set_default_mgmt_key(sdata, key_idx);
330 330
331 rcu_read_unlock();
332
333 return 0; 331 return 0;
334} 332}
335 333
@@ -415,9 +413,6 @@ static int ieee80211_dump_survey(struct wiphy *wiphy, struct net_device *dev,
415{ 413{
416 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 414 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
417 415
418 if (!local->ops->get_survey)
419 return -EOPNOTSUPP;
420
421 return drv_get_survey(local, idx, survey); 416 return drv_get_survey(local, idx, survey);
422} 417}
423 418
@@ -600,7 +595,7 @@ struct iapp_layer2_update {
600 u8 ssap; /* 0 */ 595 u8 ssap; /* 0 */
601 u8 control; 596 u8 control;
602 u8 xid_info[3]; 597 u8 xid_info[3];
603} __attribute__ ((packed)); 598} __packed;
604 599
605static void ieee80211_send_layer2_update(struct sta_info *sta) 600static void ieee80211_send_layer2_update(struct sta_info *sta)
606{ 601{
@@ -632,7 +627,7 @@ static void ieee80211_send_layer2_update(struct sta_info *sta)
632 skb->dev = sta->sdata->dev; 627 skb->dev = sta->sdata->dev;
633 skb->protocol = eth_type_trans(skb, sta->sdata->dev); 628 skb->protocol = eth_type_trans(skb, sta->sdata->dev);
634 memset(skb->cb, 0, sizeof(skb->cb)); 629 memset(skb->cb, 0, sizeof(skb->cb));
635 netif_rx(skb); 630 netif_rx_ni(skb);
636} 631}
637 632
638static void sta_apply_parameters(struct ieee80211_local *local, 633static void sta_apply_parameters(struct ieee80211_local *local,
@@ -1154,10 +1149,6 @@ static int ieee80211_set_txq_params(struct wiphy *wiphy,
1154 return -EINVAL; 1149 return -EINVAL;
1155 } 1150 }
1156 1151
1157 /* enable WMM or activate new settings */
1158 local->hw.conf.flags |= IEEE80211_CONF_QOS;
1159 drv_config(local, IEEE80211_CONF_CHANGE_QOS);
1160
1161 return 0; 1152 return 0;
1162} 1153}
1163 1154
@@ -1331,28 +1322,28 @@ static int ieee80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
1331} 1322}
1332 1323
1333static int ieee80211_set_tx_power(struct wiphy *wiphy, 1324static int ieee80211_set_tx_power(struct wiphy *wiphy,
1334 enum tx_power_setting type, int dbm) 1325 enum nl80211_tx_power_setting type, int mbm)
1335{ 1326{
1336 struct ieee80211_local *local = wiphy_priv(wiphy); 1327 struct ieee80211_local *local = wiphy_priv(wiphy);
1337 struct ieee80211_channel *chan = local->hw.conf.channel; 1328 struct ieee80211_channel *chan = local->hw.conf.channel;
1338 u32 changes = 0; 1329 u32 changes = 0;
1339 1330
1340 switch (type) { 1331 switch (type) {
1341 case TX_POWER_AUTOMATIC: 1332 case NL80211_TX_POWER_AUTOMATIC:
1342 local->user_power_level = -1; 1333 local->user_power_level = -1;
1343 break; 1334 break;
1344 case TX_POWER_LIMITED: 1335 case NL80211_TX_POWER_LIMITED:
1345 if (dbm < 0) 1336 if (mbm < 0 || (mbm % 100))
1346 return -EINVAL; 1337 return -EOPNOTSUPP;
1347 local->user_power_level = dbm; 1338 local->user_power_level = MBM_TO_DBM(mbm);
1348 break; 1339 break;
1349 case TX_POWER_FIXED: 1340 case NL80211_TX_POWER_FIXED:
1350 if (dbm < 0) 1341 if (mbm < 0 || (mbm % 100))
1351 return -EINVAL; 1342 return -EOPNOTSUPP;
1352 /* TODO: move to cfg80211 when it knows the channel */ 1343 /* TODO: move to cfg80211 when it knows the channel */
1353 if (dbm > chan->max_power) 1344 if (MBM_TO_DBM(mbm) > chan->max_power)
1354 return -EINVAL; 1345 return -EINVAL;
1355 local->user_power_level = dbm; 1346 local->user_power_level = MBM_TO_DBM(mbm);
1356 break; 1347 break;
1357 } 1348 }
1358 1349
@@ -1448,7 +1439,6 @@ static int ieee80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
1448{ 1439{
1449 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 1440 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1450 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 1441 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
1451 struct ieee80211_conf *conf = &local->hw.conf;
1452 1442
1453 if (sdata->vif.type != NL80211_IFTYPE_STATION) 1443 if (sdata->vif.type != NL80211_IFTYPE_STATION)
1454 return -EOPNOTSUPP; 1444 return -EOPNOTSUPP;
@@ -1457,11 +1447,11 @@ static int ieee80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
1457 return -EOPNOTSUPP; 1447 return -EOPNOTSUPP;
1458 1448
1459 if (enabled == sdata->u.mgd.powersave && 1449 if (enabled == sdata->u.mgd.powersave &&
1460 timeout == conf->dynamic_ps_forced_timeout) 1450 timeout == local->dynamic_ps_forced_timeout)
1461 return 0; 1451 return 0;
1462 1452
1463 sdata->u.mgd.powersave = enabled; 1453 sdata->u.mgd.powersave = enabled;
1464 conf->dynamic_ps_forced_timeout = timeout; 1454 local->dynamic_ps_forced_timeout = timeout;
1465 1455
1466 /* no change, but if automatic follow powersave */ 1456 /* no change, but if automatic follow powersave */
1467 mutex_lock(&sdata->u.mgd.mtx); 1457 mutex_lock(&sdata->u.mgd.mtx);
@@ -1554,10 +1544,58 @@ static int ieee80211_cancel_remain_on_channel(struct wiphy *wiphy,
1554static int ieee80211_action(struct wiphy *wiphy, struct net_device *dev, 1544static int ieee80211_action(struct wiphy *wiphy, struct net_device *dev,
1555 struct ieee80211_channel *chan, 1545 struct ieee80211_channel *chan,
1556 enum nl80211_channel_type channel_type, 1546 enum nl80211_channel_type channel_type,
1547 bool channel_type_valid,
1557 const u8 *buf, size_t len, u64 *cookie) 1548 const u8 *buf, size_t len, u64 *cookie)
1558{ 1549{
1559 return ieee80211_mgd_action(IEEE80211_DEV_TO_SUB_IF(dev), chan, 1550 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1560 channel_type, buf, len, cookie); 1551 struct ieee80211_local *local = sdata->local;
1552 struct sk_buff *skb;
1553 struct sta_info *sta;
1554 const struct ieee80211_mgmt *mgmt = (void *)buf;
1555 u32 flags = IEEE80211_TX_INTFL_NL80211_FRAME_TX |
1556 IEEE80211_TX_CTL_REQ_TX_STATUS;
1557
1558 /* Check that we are on the requested channel for transmission */
1559 if (chan != local->tmp_channel &&
1560 chan != local->oper_channel)
1561 return -EBUSY;
1562 if (channel_type_valid &&
1563 (channel_type != local->tmp_channel_type &&
1564 channel_type != local->_oper_channel_type))
1565 return -EBUSY;
1566
1567 switch (sdata->vif.type) {
1568 case NL80211_IFTYPE_ADHOC:
1569 if (mgmt->u.action.category == WLAN_CATEGORY_PUBLIC)
1570 break;
1571 rcu_read_lock();
1572 sta = sta_info_get(sdata, mgmt->da);
1573 rcu_read_unlock();
1574 if (!sta)
1575 return -ENOLINK;
1576 break;
1577 case NL80211_IFTYPE_STATION:
1578 if (!(sdata->u.mgd.flags & IEEE80211_STA_MFP_ENABLED))
1579 flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
1580 break;
1581 default:
1582 return -EOPNOTSUPP;
1583 }
1584
1585 skb = dev_alloc_skb(local->hw.extra_tx_headroom + len);
1586 if (!skb)
1587 return -ENOMEM;
1588 skb_reserve(skb, local->hw.extra_tx_headroom);
1589
1590 memcpy(skb_put(skb, len), buf, len);
1591
1592 IEEE80211_SKB_CB(skb)->flags = flags;
1593
1594 skb->dev = sdata->dev;
1595 ieee80211_tx_skb(sdata, skb);
1596
1597 *cookie = (unsigned long) skb;
1598 return 0;
1561} 1599}
1562 1600
1563struct cfg80211_ops mac80211_config_ops = { 1601struct cfg80211_ops mac80211_config_ops = {
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index 637929b65ccc..a694c593ff6a 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -307,9 +307,6 @@ static const struct file_operations queues_ops = {
307 307
308/* statistics stuff */ 308/* statistics stuff */
309 309
310#define DEBUGFS_STATS_FILE(name, buflen, fmt, value...) \
311 DEBUGFS_READONLY_FILE(stats_ ##name, buflen, fmt, ##value)
312
313static ssize_t format_devstat_counter(struct ieee80211_local *local, 310static ssize_t format_devstat_counter(struct ieee80211_local *local,
314 char __user *userbuf, 311 char __user *userbuf,
315 size_t count, loff_t *ppos, 312 size_t count, loff_t *ppos,
@@ -351,75 +348,16 @@ static const struct file_operations stats_ ##name## _ops = { \
351 .open = mac80211_open_file_generic, \ 348 .open = mac80211_open_file_generic, \
352}; 349};
353 350
354#define DEBUGFS_STATS_ADD(name) \ 351#define DEBUGFS_STATS_ADD(name, field) \
352 debugfs_create_u32(#name, 0400, statsd, (u32 *) &field);
353#define DEBUGFS_DEVSTATS_ADD(name) \
355 debugfs_create_file(#name, 0400, statsd, local, &stats_ ##name## _ops); 354 debugfs_create_file(#name, 0400, statsd, local, &stats_ ##name## _ops);
356 355
357DEBUGFS_STATS_FILE(transmitted_fragment_count, 20, "%u",
358 local->dot11TransmittedFragmentCount);
359DEBUGFS_STATS_FILE(multicast_transmitted_frame_count, 20, "%u",
360 local->dot11MulticastTransmittedFrameCount);
361DEBUGFS_STATS_FILE(failed_count, 20, "%u",
362 local->dot11FailedCount);
363DEBUGFS_STATS_FILE(retry_count, 20, "%u",
364 local->dot11RetryCount);
365DEBUGFS_STATS_FILE(multiple_retry_count, 20, "%u",
366 local->dot11MultipleRetryCount);
367DEBUGFS_STATS_FILE(frame_duplicate_count, 20, "%u",
368 local->dot11FrameDuplicateCount);
369DEBUGFS_STATS_FILE(received_fragment_count, 20, "%u",
370 local->dot11ReceivedFragmentCount);
371DEBUGFS_STATS_FILE(multicast_received_frame_count, 20, "%u",
372 local->dot11MulticastReceivedFrameCount);
373DEBUGFS_STATS_FILE(transmitted_frame_count, 20, "%u",
374 local->dot11TransmittedFrameCount);
375#ifdef CONFIG_MAC80211_DEBUG_COUNTERS
376DEBUGFS_STATS_FILE(tx_handlers_drop, 20, "%u",
377 local->tx_handlers_drop);
378DEBUGFS_STATS_FILE(tx_handlers_queued, 20, "%u",
379 local->tx_handlers_queued);
380DEBUGFS_STATS_FILE(tx_handlers_drop_unencrypted, 20, "%u",
381 local->tx_handlers_drop_unencrypted);
382DEBUGFS_STATS_FILE(tx_handlers_drop_fragment, 20, "%u",
383 local->tx_handlers_drop_fragment);
384DEBUGFS_STATS_FILE(tx_handlers_drop_wep, 20, "%u",
385 local->tx_handlers_drop_wep);
386DEBUGFS_STATS_FILE(tx_handlers_drop_not_assoc, 20, "%u",
387 local->tx_handlers_drop_not_assoc);
388DEBUGFS_STATS_FILE(tx_handlers_drop_unauth_port, 20, "%u",
389 local->tx_handlers_drop_unauth_port);
390DEBUGFS_STATS_FILE(rx_handlers_drop, 20, "%u",
391 local->rx_handlers_drop);
392DEBUGFS_STATS_FILE(rx_handlers_queued, 20, "%u",
393 local->rx_handlers_queued);
394DEBUGFS_STATS_FILE(rx_handlers_drop_nullfunc, 20, "%u",
395 local->rx_handlers_drop_nullfunc);
396DEBUGFS_STATS_FILE(rx_handlers_drop_defrag, 20, "%u",
397 local->rx_handlers_drop_defrag);
398DEBUGFS_STATS_FILE(rx_handlers_drop_short, 20, "%u",
399 local->rx_handlers_drop_short);
400DEBUGFS_STATS_FILE(rx_handlers_drop_passive_scan, 20, "%u",
401 local->rx_handlers_drop_passive_scan);
402DEBUGFS_STATS_FILE(tx_expand_skb_head, 20, "%u",
403 local->tx_expand_skb_head);
404DEBUGFS_STATS_FILE(tx_expand_skb_head_cloned, 20, "%u",
405 local->tx_expand_skb_head_cloned);
406DEBUGFS_STATS_FILE(rx_expand_skb_head, 20, "%u",
407 local->rx_expand_skb_head);
408DEBUGFS_STATS_FILE(rx_expand_skb_head2, 20, "%u",
409 local->rx_expand_skb_head2);
410DEBUGFS_STATS_FILE(rx_handlers_fragments, 20, "%u",
411 local->rx_handlers_fragments);
412DEBUGFS_STATS_FILE(tx_status_drop, 20, "%u",
413 local->tx_status_drop);
414
415#endif
416
417DEBUGFS_DEVSTATS_FILE(dot11ACKFailureCount); 356DEBUGFS_DEVSTATS_FILE(dot11ACKFailureCount);
418DEBUGFS_DEVSTATS_FILE(dot11RTSFailureCount); 357DEBUGFS_DEVSTATS_FILE(dot11RTSFailureCount);
419DEBUGFS_DEVSTATS_FILE(dot11FCSErrorCount); 358DEBUGFS_DEVSTATS_FILE(dot11FCSErrorCount);
420DEBUGFS_DEVSTATS_FILE(dot11RTSSuccessCount); 359DEBUGFS_DEVSTATS_FILE(dot11RTSSuccessCount);
421 360
422
423void debugfs_hw_add(struct ieee80211_local *local) 361void debugfs_hw_add(struct ieee80211_local *local)
424{ 362{
425 struct dentry *phyd = local->hw.wiphy->debugfsdir; 363 struct dentry *phyd = local->hw.wiphy->debugfsdir;
@@ -448,38 +386,60 @@ void debugfs_hw_add(struct ieee80211_local *local)
448 if (!statsd) 386 if (!statsd)
449 return; 387 return;
450 388
451 DEBUGFS_STATS_ADD(transmitted_fragment_count); 389 DEBUGFS_STATS_ADD(transmitted_fragment_count,
452 DEBUGFS_STATS_ADD(multicast_transmitted_frame_count); 390 local->dot11TransmittedFragmentCount);
453 DEBUGFS_STATS_ADD(failed_count); 391 DEBUGFS_STATS_ADD(multicast_transmitted_frame_count,
454 DEBUGFS_STATS_ADD(retry_count); 392 local->dot11MulticastTransmittedFrameCount);
455 DEBUGFS_STATS_ADD(multiple_retry_count); 393 DEBUGFS_STATS_ADD(failed_count, local->dot11FailedCount);
456 DEBUGFS_STATS_ADD(frame_duplicate_count); 394 DEBUGFS_STATS_ADD(retry_count, local->dot11RetryCount);
457 DEBUGFS_STATS_ADD(received_fragment_count); 395 DEBUGFS_STATS_ADD(multiple_retry_count,
458 DEBUGFS_STATS_ADD(multicast_received_frame_count); 396 local->dot11MultipleRetryCount);
459 DEBUGFS_STATS_ADD(transmitted_frame_count); 397 DEBUGFS_STATS_ADD(frame_duplicate_count,
398 local->dot11FrameDuplicateCount);
399 DEBUGFS_STATS_ADD(received_fragment_count,
400 local->dot11ReceivedFragmentCount);
401 DEBUGFS_STATS_ADD(multicast_received_frame_count,
402 local->dot11MulticastReceivedFrameCount);
403 DEBUGFS_STATS_ADD(transmitted_frame_count,
404 local->dot11TransmittedFrameCount);
460#ifdef CONFIG_MAC80211_DEBUG_COUNTERS 405#ifdef CONFIG_MAC80211_DEBUG_COUNTERS
461 DEBUGFS_STATS_ADD(tx_handlers_drop); 406 DEBUGFS_STATS_ADD(tx_handlers_drop, local->tx_handlers_drop);
462 DEBUGFS_STATS_ADD(tx_handlers_queued); 407 DEBUGFS_STATS_ADD(tx_handlers_queued, local->tx_handlers_queued);
463 DEBUGFS_STATS_ADD(tx_handlers_drop_unencrypted); 408 DEBUGFS_STATS_ADD(tx_handlers_drop_unencrypted,
464 DEBUGFS_STATS_ADD(tx_handlers_drop_fragment); 409 local->tx_handlers_drop_unencrypted);
465 DEBUGFS_STATS_ADD(tx_handlers_drop_wep); 410 DEBUGFS_STATS_ADD(tx_handlers_drop_fragment,
466 DEBUGFS_STATS_ADD(tx_handlers_drop_not_assoc); 411 local->tx_handlers_drop_fragment);
467 DEBUGFS_STATS_ADD(tx_handlers_drop_unauth_port); 412 DEBUGFS_STATS_ADD(tx_handlers_drop_wep,
468 DEBUGFS_STATS_ADD(rx_handlers_drop); 413 local->tx_handlers_drop_wep);
469 DEBUGFS_STATS_ADD(rx_handlers_queued); 414 DEBUGFS_STATS_ADD(tx_handlers_drop_not_assoc,
470 DEBUGFS_STATS_ADD(rx_handlers_drop_nullfunc); 415 local->tx_handlers_drop_not_assoc);
471 DEBUGFS_STATS_ADD(rx_handlers_drop_defrag); 416 DEBUGFS_STATS_ADD(tx_handlers_drop_unauth_port,
472 DEBUGFS_STATS_ADD(rx_handlers_drop_short); 417 local->tx_handlers_drop_unauth_port);
473 DEBUGFS_STATS_ADD(rx_handlers_drop_passive_scan); 418 DEBUGFS_STATS_ADD(rx_handlers_drop, local->rx_handlers_drop);
474 DEBUGFS_STATS_ADD(tx_expand_skb_head); 419 DEBUGFS_STATS_ADD(rx_handlers_queued, local->rx_handlers_queued);
475 DEBUGFS_STATS_ADD(tx_expand_skb_head_cloned); 420 DEBUGFS_STATS_ADD(rx_handlers_drop_nullfunc,
476 DEBUGFS_STATS_ADD(rx_expand_skb_head); 421 local->rx_handlers_drop_nullfunc);
477 DEBUGFS_STATS_ADD(rx_expand_skb_head2); 422 DEBUGFS_STATS_ADD(rx_handlers_drop_defrag,
478 DEBUGFS_STATS_ADD(rx_handlers_fragments); 423 local->rx_handlers_drop_defrag);
479 DEBUGFS_STATS_ADD(tx_status_drop); 424 DEBUGFS_STATS_ADD(rx_handlers_drop_short,
425 local->rx_handlers_drop_short);
426 DEBUGFS_STATS_ADD(rx_handlers_drop_passive_scan,
427 local->rx_handlers_drop_passive_scan);
428 DEBUGFS_STATS_ADD(tx_expand_skb_head,
429 local->tx_expand_skb_head);
430 DEBUGFS_STATS_ADD(tx_expand_skb_head_cloned,
431 local->tx_expand_skb_head_cloned);
432 DEBUGFS_STATS_ADD(rx_expand_skb_head,
433 local->rx_expand_skb_head);
434 DEBUGFS_STATS_ADD(rx_expand_skb_head2,
435 local->rx_expand_skb_head2);
436 DEBUGFS_STATS_ADD(rx_handlers_fragments,
437 local->rx_handlers_fragments);
438 DEBUGFS_STATS_ADD(tx_status_drop,
439 local->tx_status_drop);
480#endif 440#endif
481 DEBUGFS_STATS_ADD(dot11ACKFailureCount); 441 DEBUGFS_DEVSTATS_ADD(dot11ACKFailureCount);
482 DEBUGFS_STATS_ADD(dot11RTSFailureCount); 442 DEBUGFS_DEVSTATS_ADD(dot11RTSFailureCount);
483 DEBUGFS_STATS_ADD(dot11FCSErrorCount); 443 DEBUGFS_DEVSTATS_ADD(dot11FCSErrorCount);
484 DEBUGFS_STATS_ADD(dot11RTSSuccessCount); 444 DEBUGFS_DEVSTATS_ADD(dot11RTSSuccessCount);
485} 445}
diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c
index 97c9e46e859e..fa5e76e658ef 100644
--- a/net/mac80211/debugfs_key.c
+++ b/net/mac80211/debugfs_key.c
@@ -143,7 +143,7 @@ static ssize_t key_rx_spec_read(struct file *file, char __user *userbuf,
143 len = p - buf; 143 len = p - buf;
144 break; 144 break;
145 case ALG_CCMP: 145 case ALG_CCMP:
146 for (i = 0; i < NUM_RX_DATA_QUEUES; i++) { 146 for (i = 0; i < NUM_RX_DATA_QUEUES + 1; i++) {
147 rpn = key->u.ccmp.rx_pn[i]; 147 rpn = key->u.ccmp.rx_pn[i];
148 p += scnprintf(p, sizeof(buf)+buf-p, 148 p += scnprintf(p, sizeof(buf)+buf-p,
149 "%02x%02x%02x%02x%02x%02x\n", 149 "%02x%02x%02x%02x%02x%02x\n",
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index e763f1529ddb..76839d4dfaac 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -30,7 +30,6 @@ static ssize_t sta_ ##name## _read(struct file *file, \
30} 30}
31#define STA_READ_D(name, field) STA_READ(name, 20, field, "%d\n") 31#define STA_READ_D(name, field) STA_READ(name, 20, field, "%d\n")
32#define STA_READ_U(name, field) STA_READ(name, 20, field, "%u\n") 32#define STA_READ_U(name, field) STA_READ(name, 20, field, "%u\n")
33#define STA_READ_LU(name, field) STA_READ(name, 20, field, "%lu\n")
34#define STA_READ_S(name, field) STA_READ(name, 20, field, "%s\n") 33#define STA_READ_S(name, field) STA_READ(name, 20, field, "%s\n")
35 34
36#define STA_OPS(name) \ 35#define STA_OPS(name) \
@@ -52,19 +51,7 @@ static const struct file_operations sta_ ##name## _ops = { \
52 51
53STA_FILE(aid, sta.aid, D); 52STA_FILE(aid, sta.aid, D);
54STA_FILE(dev, sdata->name, S); 53STA_FILE(dev, sdata->name, S);
55STA_FILE(rx_packets, rx_packets, LU);
56STA_FILE(tx_packets, tx_packets, LU);
57STA_FILE(rx_bytes, rx_bytes, LU);
58STA_FILE(tx_bytes, tx_bytes, LU);
59STA_FILE(rx_duplicates, num_duplicates, LU);
60STA_FILE(rx_fragments, rx_fragments, LU);
61STA_FILE(rx_dropped, rx_dropped, LU);
62STA_FILE(tx_fragments, tx_fragments, LU);
63STA_FILE(tx_filtered, tx_filtered_count, LU);
64STA_FILE(tx_retry_failed, tx_retry_failed, LU);
65STA_FILE(tx_retry_count, tx_retry_count, LU);
66STA_FILE(last_signal, last_signal, D); 54STA_FILE(last_signal, last_signal, D);
67STA_FILE(wep_weak_iv_count, wep_weak_iv_count, LU);
68 55
69static ssize_t sta_flags_read(struct file *file, char __user *userbuf, 56static ssize_t sta_flags_read(struct file *file, char __user *userbuf,
70 size_t count, loff_t *ppos) 57 size_t count, loff_t *ppos)
@@ -134,28 +121,25 @@ static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf,
134 p += scnprintf(p, sizeof(buf) + buf - p, "next dialog_token: %#02x\n", 121 p += scnprintf(p, sizeof(buf) + buf - p, "next dialog_token: %#02x\n",
135 sta->ampdu_mlme.dialog_token_allocator + 1); 122 sta->ampdu_mlme.dialog_token_allocator + 1);
136 p += scnprintf(p, sizeof(buf) + buf - p, 123 p += scnprintf(p, sizeof(buf) + buf - p,
137 "TID\t\tRX active\tDTKN\tSSN\t\tTX\tDTKN\tSSN\tpending\n"); 124 "TID\t\tRX active\tDTKN\tSSN\t\tTX\tDTKN\tpending\n");
138 for (i = 0; i < STA_TID_NUM; i++) { 125 for (i = 0; i < STA_TID_NUM; i++) {
139 p += scnprintf(p, sizeof(buf) + buf - p, "%02d", i); 126 p += scnprintf(p, sizeof(buf) + buf - p, "%02d", i);
140 p += scnprintf(p, sizeof(buf) + buf - p, "\t\t%x", 127 p += scnprintf(p, sizeof(buf) + buf - p, "\t\t%x",
141 sta->ampdu_mlme.tid_active_rx[i]); 128 !!sta->ampdu_mlme.tid_rx[i]);
142 p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.2x", 129 p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.2x",
143 sta->ampdu_mlme.tid_active_rx[i] ? 130 sta->ampdu_mlme.tid_rx[i] ?
144 sta->ampdu_mlme.tid_rx[i]->dialog_token : 0); 131 sta->ampdu_mlme.tid_rx[i]->dialog_token : 0);
145 p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.3x", 132 p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.3x",
146 sta->ampdu_mlme.tid_active_rx[i] ? 133 sta->ampdu_mlme.tid_rx[i] ?
147 sta->ampdu_mlme.tid_rx[i]->ssn : 0); 134 sta->ampdu_mlme.tid_rx[i]->ssn : 0);
148 135
149 p += scnprintf(p, sizeof(buf) + buf - p, "\t\t%x", 136 p += scnprintf(p, sizeof(buf) + buf - p, "\t\t%x",
150 sta->ampdu_mlme.tid_state_tx[i]); 137 !!sta->ampdu_mlme.tid_tx[i]);
151 p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.2x", 138 p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.2x",
152 sta->ampdu_mlme.tid_state_tx[i] ? 139 sta->ampdu_mlme.tid_tx[i] ?
153 sta->ampdu_mlme.tid_tx[i]->dialog_token : 0); 140 sta->ampdu_mlme.tid_tx[i]->dialog_token : 0);
154 p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.3x",
155 sta->ampdu_mlme.tid_state_tx[i] ?
156 sta->ampdu_mlme.tid_tx[i]->ssn : 0);
157 p += scnprintf(p, sizeof(buf) + buf - p, "\t%03d", 141 p += scnprintf(p, sizeof(buf) + buf - p, "\t%03d",
158 sta->ampdu_mlme.tid_state_tx[i] ? 142 sta->ampdu_mlme.tid_tx[i] ?
159 skb_queue_len(&sta->ampdu_mlme.tid_tx[i]->pending) : 0); 143 skb_queue_len(&sta->ampdu_mlme.tid_tx[i]->pending) : 0);
160 p += scnprintf(p, sizeof(buf) + buf - p, "\n"); 144 p += scnprintf(p, sizeof(buf) + buf - p, "\n");
161 } 145 }
@@ -210,8 +194,7 @@ static ssize_t sta_agg_status_write(struct file *file, const char __user *userbu
210 if (start) 194 if (start)
211 ret = ieee80211_start_tx_ba_session(&sta->sta, tid); 195 ret = ieee80211_start_tx_ba_session(&sta->sta, tid);
212 else 196 else
213 ret = ieee80211_stop_tx_ba_session(&sta->sta, tid, 197 ret = ieee80211_stop_tx_ba_session(&sta->sta, tid);
214 WLAN_BACK_RECIPIENT);
215 } else { 198 } else {
216 __ieee80211_stop_rx_ba_session(sta, tid, WLAN_BACK_RECIPIENT, 3); 199 __ieee80211_stop_rx_ba_session(sta, tid, WLAN_BACK_RECIPIENT, 3);
217 ret = 0; 200 ret = 0;
@@ -307,6 +290,13 @@ STA_OPS(ht_capa);
307 debugfs_create_file(#name, 0400, \ 290 debugfs_create_file(#name, 0400, \
308 sta->debugfs.dir, sta, &sta_ ##name## _ops); 291 sta->debugfs.dir, sta, &sta_ ##name## _ops);
309 292
293#define DEBUGFS_ADD_COUNTER(name, field) \
294 if (sizeof(sta->field) == sizeof(u32)) \
295 debugfs_create_u32(#name, 0400, sta->debugfs.dir, \
296 (u32 *) &sta->field); \
297 else \
298 debugfs_create_u64(#name, 0400, sta->debugfs.dir, \
299 (u64 *) &sta->field);
310 300
311void ieee80211_sta_debugfs_add(struct sta_info *sta) 301void ieee80211_sta_debugfs_add(struct sta_info *sta)
312{ 302{
@@ -339,20 +329,21 @@ void ieee80211_sta_debugfs_add(struct sta_info *sta)
339 DEBUGFS_ADD(last_seq_ctrl); 329 DEBUGFS_ADD(last_seq_ctrl);
340 DEBUGFS_ADD(agg_status); 330 DEBUGFS_ADD(agg_status);
341 DEBUGFS_ADD(dev); 331 DEBUGFS_ADD(dev);
342 DEBUGFS_ADD(rx_packets);
343 DEBUGFS_ADD(tx_packets);
344 DEBUGFS_ADD(rx_bytes);
345 DEBUGFS_ADD(tx_bytes);
346 DEBUGFS_ADD(rx_duplicates);
347 DEBUGFS_ADD(rx_fragments);
348 DEBUGFS_ADD(rx_dropped);
349 DEBUGFS_ADD(tx_fragments);
350 DEBUGFS_ADD(tx_filtered);
351 DEBUGFS_ADD(tx_retry_failed);
352 DEBUGFS_ADD(tx_retry_count);
353 DEBUGFS_ADD(last_signal); 332 DEBUGFS_ADD(last_signal);
354 DEBUGFS_ADD(wep_weak_iv_count);
355 DEBUGFS_ADD(ht_capa); 333 DEBUGFS_ADD(ht_capa);
334
335 DEBUGFS_ADD_COUNTER(rx_packets, rx_packets);
336 DEBUGFS_ADD_COUNTER(tx_packets, tx_packets);
337 DEBUGFS_ADD_COUNTER(rx_bytes, rx_bytes);
338 DEBUGFS_ADD_COUNTER(tx_bytes, tx_bytes);
339 DEBUGFS_ADD_COUNTER(rx_duplicates, num_duplicates);
340 DEBUGFS_ADD_COUNTER(rx_fragments, rx_fragments);
341 DEBUGFS_ADD_COUNTER(rx_dropped, rx_dropped);
342 DEBUGFS_ADD_COUNTER(tx_fragments, tx_fragments);
343 DEBUGFS_ADD_COUNTER(tx_filtered, tx_filtered_count);
344 DEBUGFS_ADD_COUNTER(tx_retry_failed, tx_retry_failed);
345 DEBUGFS_ADD_COUNTER(tx_retry_count, tx_retry_count);
346 DEBUGFS_ADD_COUNTER(wep_weak_iv_count, wep_weak_iv_count);
356} 347}
357 348
358void ieee80211_sta_debugfs_remove(struct sta_info *sta) 349void ieee80211_sta_debugfs_remove(struct sta_info *sta)
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 9c1da0809160..14123dce544b 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -16,10 +16,11 @@ static inline int drv_start(struct ieee80211_local *local)
16 16
17 might_sleep(); 17 might_sleep();
18 18
19 trace_drv_start(local);
19 local->started = true; 20 local->started = true;
20 smp_mb(); 21 smp_mb();
21 ret = local->ops->start(&local->hw); 22 ret = local->ops->start(&local->hw);
22 trace_drv_start(local, ret); 23 trace_drv_return_int(local, ret);
23 return ret; 24 return ret;
24} 25}
25 26
@@ -27,8 +28,9 @@ static inline void drv_stop(struct ieee80211_local *local)
27{ 28{
28 might_sleep(); 29 might_sleep();
29 30
30 local->ops->stop(&local->hw);
31 trace_drv_stop(local); 31 trace_drv_stop(local);
32 local->ops->stop(&local->hw);
33 trace_drv_return_void(local);
32 34
33 /* sync away all work on the tasklet before clearing started */ 35 /* sync away all work on the tasklet before clearing started */
34 tasklet_disable(&local->tasklet); 36 tasklet_disable(&local->tasklet);
@@ -46,8 +48,9 @@ static inline int drv_add_interface(struct ieee80211_local *local,
46 48
47 might_sleep(); 49 might_sleep();
48 50
51 trace_drv_add_interface(local, vif_to_sdata(vif));
49 ret = local->ops->add_interface(&local->hw, vif); 52 ret = local->ops->add_interface(&local->hw, vif);
50 trace_drv_add_interface(local, vif_to_sdata(vif), ret); 53 trace_drv_return_int(local, ret);
51 return ret; 54 return ret;
52} 55}
53 56
@@ -56,8 +59,9 @@ static inline void drv_remove_interface(struct ieee80211_local *local,
56{ 59{
57 might_sleep(); 60 might_sleep();
58 61
59 local->ops->remove_interface(&local->hw, vif);
60 trace_drv_remove_interface(local, vif_to_sdata(vif)); 62 trace_drv_remove_interface(local, vif_to_sdata(vif));
63 local->ops->remove_interface(&local->hw, vif);
64 trace_drv_return_void(local);
61} 65}
62 66
63static inline int drv_config(struct ieee80211_local *local, u32 changed) 67static inline int drv_config(struct ieee80211_local *local, u32 changed)
@@ -66,8 +70,9 @@ static inline int drv_config(struct ieee80211_local *local, u32 changed)
66 70
67 might_sleep(); 71 might_sleep();
68 72
73 trace_drv_config(local, changed);
69 ret = local->ops->config(&local->hw, changed); 74 ret = local->ops->config(&local->hw, changed);
70 trace_drv_config(local, changed, ret); 75 trace_drv_return_int(local, ret);
71 return ret; 76 return ret;
72} 77}
73 78
@@ -78,9 +83,10 @@ static inline void drv_bss_info_changed(struct ieee80211_local *local,
78{ 83{
79 might_sleep(); 84 might_sleep();
80 85
86 trace_drv_bss_info_changed(local, sdata, info, changed);
81 if (local->ops->bss_info_changed) 87 if (local->ops->bss_info_changed)
82 local->ops->bss_info_changed(&local->hw, &sdata->vif, info, changed); 88 local->ops->bss_info_changed(&local->hw, &sdata->vif, info, changed);
83 trace_drv_bss_info_changed(local, sdata, info, changed); 89 trace_drv_return_void(local);
84} 90}
85 91
86static inline u64 drv_prepare_multicast(struct ieee80211_local *local, 92static inline u64 drv_prepare_multicast(struct ieee80211_local *local,
@@ -88,10 +94,12 @@ static inline u64 drv_prepare_multicast(struct ieee80211_local *local,
88{ 94{
89 u64 ret = 0; 95 u64 ret = 0;
90 96
97 trace_drv_prepare_multicast(local, mc_list->count);
98
91 if (local->ops->prepare_multicast) 99 if (local->ops->prepare_multicast)
92 ret = local->ops->prepare_multicast(&local->hw, mc_list); 100 ret = local->ops->prepare_multicast(&local->hw, mc_list);
93 101
94 trace_drv_prepare_multicast(local, mc_list->count, ret); 102 trace_drv_return_u64(local, ret);
95 103
96 return ret; 104 return ret;
97} 105}
@@ -103,19 +111,21 @@ static inline void drv_configure_filter(struct ieee80211_local *local,
103{ 111{
104 might_sleep(); 112 might_sleep();
105 113
106 local->ops->configure_filter(&local->hw, changed_flags, total_flags,
107 multicast);
108 trace_drv_configure_filter(local, changed_flags, total_flags, 114 trace_drv_configure_filter(local, changed_flags, total_flags,
109 multicast); 115 multicast);
116 local->ops->configure_filter(&local->hw, changed_flags, total_flags,
117 multicast);
118 trace_drv_return_void(local);
110} 119}
111 120
112static inline int drv_set_tim(struct ieee80211_local *local, 121static inline int drv_set_tim(struct ieee80211_local *local,
113 struct ieee80211_sta *sta, bool set) 122 struct ieee80211_sta *sta, bool set)
114{ 123{
115 int ret = 0; 124 int ret = 0;
125 trace_drv_set_tim(local, sta, set);
116 if (local->ops->set_tim) 126 if (local->ops->set_tim)
117 ret = local->ops->set_tim(&local->hw, sta, set); 127 ret = local->ops->set_tim(&local->hw, sta, set);
118 trace_drv_set_tim(local, sta, set, ret); 128 trace_drv_return_int(local, ret);
119 return ret; 129 return ret;
120} 130}
121 131
@@ -129,8 +139,9 @@ static inline int drv_set_key(struct ieee80211_local *local,
129 139
130 might_sleep(); 140 might_sleep();
131 141
142 trace_drv_set_key(local, cmd, sdata, sta, key);
132 ret = local->ops->set_key(&local->hw, cmd, &sdata->vif, sta, key); 143 ret = local->ops->set_key(&local->hw, cmd, &sdata->vif, sta, key);
133 trace_drv_set_key(local, cmd, sdata, sta, key, ret); 144 trace_drv_return_int(local, ret);
134 return ret; 145 return ret;
135} 146}
136 147
@@ -145,10 +156,11 @@ static inline void drv_update_tkip_key(struct ieee80211_local *local,
145 if (sta) 156 if (sta)
146 ista = &sta->sta; 157 ista = &sta->sta;
147 158
159 trace_drv_update_tkip_key(local, sdata, conf, ista, iv32);
148 if (local->ops->update_tkip_key) 160 if (local->ops->update_tkip_key)
149 local->ops->update_tkip_key(&local->hw, &sdata->vif, conf, 161 local->ops->update_tkip_key(&local->hw, &sdata->vif, conf,
150 ista, iv32, phase1key); 162 ista, iv32, phase1key);
151 trace_drv_update_tkip_key(local, sdata, conf, ista, iv32); 163 trace_drv_return_void(local);
152} 164}
153 165
154static inline int drv_hw_scan(struct ieee80211_local *local, 166static inline int drv_hw_scan(struct ieee80211_local *local,
@@ -159,8 +171,9 @@ static inline int drv_hw_scan(struct ieee80211_local *local,
159 171
160 might_sleep(); 172 might_sleep();
161 173
174 trace_drv_hw_scan(local, sdata, req);
162 ret = local->ops->hw_scan(&local->hw, &sdata->vif, req); 175 ret = local->ops->hw_scan(&local->hw, &sdata->vif, req);
163 trace_drv_hw_scan(local, sdata, req, ret); 176 trace_drv_return_int(local, ret);
164 return ret; 177 return ret;
165} 178}
166 179
@@ -168,18 +181,20 @@ static inline void drv_sw_scan_start(struct ieee80211_local *local)
168{ 181{
169 might_sleep(); 182 might_sleep();
170 183
184 trace_drv_sw_scan_start(local);
171 if (local->ops->sw_scan_start) 185 if (local->ops->sw_scan_start)
172 local->ops->sw_scan_start(&local->hw); 186 local->ops->sw_scan_start(&local->hw);
173 trace_drv_sw_scan_start(local); 187 trace_drv_return_void(local);
174} 188}
175 189
176static inline void drv_sw_scan_complete(struct ieee80211_local *local) 190static inline void drv_sw_scan_complete(struct ieee80211_local *local)
177{ 191{
178 might_sleep(); 192 might_sleep();
179 193
194 trace_drv_sw_scan_complete(local);
180 if (local->ops->sw_scan_complete) 195 if (local->ops->sw_scan_complete)
181 local->ops->sw_scan_complete(&local->hw); 196 local->ops->sw_scan_complete(&local->hw);
182 trace_drv_sw_scan_complete(local); 197 trace_drv_return_void(local);
183} 198}
184 199
185static inline int drv_get_stats(struct ieee80211_local *local, 200static inline int drv_get_stats(struct ieee80211_local *local,
@@ -211,9 +226,10 @@ static inline int drv_set_rts_threshold(struct ieee80211_local *local,
211 226
212 might_sleep(); 227 might_sleep();
213 228
229 trace_drv_set_rts_threshold(local, value);
214 if (local->ops->set_rts_threshold) 230 if (local->ops->set_rts_threshold)
215 ret = local->ops->set_rts_threshold(&local->hw, value); 231 ret = local->ops->set_rts_threshold(&local->hw, value);
216 trace_drv_set_rts_threshold(local, value, ret); 232 trace_drv_return_int(local, ret);
217 return ret; 233 return ret;
218} 234}
219 235
@@ -223,12 +239,13 @@ static inline int drv_set_coverage_class(struct ieee80211_local *local,
223 int ret = 0; 239 int ret = 0;
224 might_sleep(); 240 might_sleep();
225 241
242 trace_drv_set_coverage_class(local, value);
226 if (local->ops->set_coverage_class) 243 if (local->ops->set_coverage_class)
227 local->ops->set_coverage_class(&local->hw, value); 244 local->ops->set_coverage_class(&local->hw, value);
228 else 245 else
229 ret = -EOPNOTSUPP; 246 ret = -EOPNOTSUPP;
230 247
231 trace_drv_set_coverage_class(local, value, ret); 248 trace_drv_return_int(local, ret);
232 return ret; 249 return ret;
233} 250}
234 251
@@ -237,9 +254,10 @@ static inline void drv_sta_notify(struct ieee80211_local *local,
237 enum sta_notify_cmd cmd, 254 enum sta_notify_cmd cmd,
238 struct ieee80211_sta *sta) 255 struct ieee80211_sta *sta)
239{ 256{
257 trace_drv_sta_notify(local, sdata, cmd, sta);
240 if (local->ops->sta_notify) 258 if (local->ops->sta_notify)
241 local->ops->sta_notify(&local->hw, &sdata->vif, cmd, sta); 259 local->ops->sta_notify(&local->hw, &sdata->vif, cmd, sta);
242 trace_drv_sta_notify(local, sdata, cmd, sta); 260 trace_drv_return_void(local);
243} 261}
244 262
245static inline int drv_sta_add(struct ieee80211_local *local, 263static inline int drv_sta_add(struct ieee80211_local *local,
@@ -250,13 +268,11 @@ static inline int drv_sta_add(struct ieee80211_local *local,
250 268
251 might_sleep(); 269 might_sleep();
252 270
271 trace_drv_sta_add(local, sdata, sta);
253 if (local->ops->sta_add) 272 if (local->ops->sta_add)
254 ret = local->ops->sta_add(&local->hw, &sdata->vif, sta); 273 ret = local->ops->sta_add(&local->hw, &sdata->vif, sta);
255 else if (local->ops->sta_notify)
256 local->ops->sta_notify(&local->hw, &sdata->vif,
257 STA_NOTIFY_ADD, sta);
258 274
259 trace_drv_sta_add(local, sdata, sta, ret); 275 trace_drv_return_int(local, ret);
260 276
261 return ret; 277 return ret;
262} 278}
@@ -267,13 +283,11 @@ static inline void drv_sta_remove(struct ieee80211_local *local,
267{ 283{
268 might_sleep(); 284 might_sleep();
269 285
286 trace_drv_sta_remove(local, sdata, sta);
270 if (local->ops->sta_remove) 287 if (local->ops->sta_remove)
271 local->ops->sta_remove(&local->hw, &sdata->vif, sta); 288 local->ops->sta_remove(&local->hw, &sdata->vif, sta);
272 else if (local->ops->sta_notify)
273 local->ops->sta_notify(&local->hw, &sdata->vif,
274 STA_NOTIFY_REMOVE, sta);
275 289
276 trace_drv_sta_remove(local, sdata, sta); 290 trace_drv_return_void(local);
277} 291}
278 292
279static inline int drv_conf_tx(struct ieee80211_local *local, u16 queue, 293static inline int drv_conf_tx(struct ieee80211_local *local, u16 queue,
@@ -283,9 +297,10 @@ static inline int drv_conf_tx(struct ieee80211_local *local, u16 queue,
283 297
284 might_sleep(); 298 might_sleep();
285 299
300 trace_drv_conf_tx(local, queue, params);
286 if (local->ops->conf_tx) 301 if (local->ops->conf_tx)
287 ret = local->ops->conf_tx(&local->hw, queue, params); 302 ret = local->ops->conf_tx(&local->hw, queue, params);
288 trace_drv_conf_tx(local, queue, params, ret); 303 trace_drv_return_int(local, ret);
289 return ret; 304 return ret;
290} 305}
291 306
@@ -295,9 +310,10 @@ static inline u64 drv_get_tsf(struct ieee80211_local *local)
295 310
296 might_sleep(); 311 might_sleep();
297 312
313 trace_drv_get_tsf(local);
298 if (local->ops->get_tsf) 314 if (local->ops->get_tsf)
299 ret = local->ops->get_tsf(&local->hw); 315 ret = local->ops->get_tsf(&local->hw);
300 trace_drv_get_tsf(local, ret); 316 trace_drv_return_u64(local, ret);
301 return ret; 317 return ret;
302} 318}
303 319
@@ -305,18 +321,20 @@ static inline void drv_set_tsf(struct ieee80211_local *local, u64 tsf)
305{ 321{
306 might_sleep(); 322 might_sleep();
307 323
324 trace_drv_set_tsf(local, tsf);
308 if (local->ops->set_tsf) 325 if (local->ops->set_tsf)
309 local->ops->set_tsf(&local->hw, tsf); 326 local->ops->set_tsf(&local->hw, tsf);
310 trace_drv_set_tsf(local, tsf); 327 trace_drv_return_void(local);
311} 328}
312 329
313static inline void drv_reset_tsf(struct ieee80211_local *local) 330static inline void drv_reset_tsf(struct ieee80211_local *local)
314{ 331{
315 might_sleep(); 332 might_sleep();
316 333
334 trace_drv_reset_tsf(local);
317 if (local->ops->reset_tsf) 335 if (local->ops->reset_tsf)
318 local->ops->reset_tsf(&local->hw); 336 local->ops->reset_tsf(&local->hw);
319 trace_drv_reset_tsf(local); 337 trace_drv_return_void(local);
320} 338}
321 339
322static inline int drv_tx_last_beacon(struct ieee80211_local *local) 340static inline int drv_tx_last_beacon(struct ieee80211_local *local)
@@ -325,9 +343,10 @@ static inline int drv_tx_last_beacon(struct ieee80211_local *local)
325 343
326 might_sleep(); 344 might_sleep();
327 345
346 trace_drv_tx_last_beacon(local);
328 if (local->ops->tx_last_beacon) 347 if (local->ops->tx_last_beacon)
329 ret = local->ops->tx_last_beacon(&local->hw); 348 ret = local->ops->tx_last_beacon(&local->hw);
330 trace_drv_tx_last_beacon(local, ret); 349 trace_drv_return_int(local, ret);
331 return ret; 350 return ret;
332} 351}
333 352
@@ -338,10 +357,17 @@ static inline int drv_ampdu_action(struct ieee80211_local *local,
338 u16 *ssn) 357 u16 *ssn)
339{ 358{
340 int ret = -EOPNOTSUPP; 359 int ret = -EOPNOTSUPP;
360
361 might_sleep();
362
363 trace_drv_ampdu_action(local, sdata, action, sta, tid, ssn);
364
341 if (local->ops->ampdu_action) 365 if (local->ops->ampdu_action)
342 ret = local->ops->ampdu_action(&local->hw, &sdata->vif, action, 366 ret = local->ops->ampdu_action(&local->hw, &sdata->vif, action,
343 sta, tid, ssn); 367 sta, tid, ssn);
344 trace_drv_ampdu_action(local, sdata, action, sta, tid, ssn, ret); 368
369 trace_drv_return_int(local, ret);
370
345 return ret; 371 return ret;
346} 372}
347 373
@@ -349,9 +375,14 @@ static inline int drv_get_survey(struct ieee80211_local *local, int idx,
349 struct survey_info *survey) 375 struct survey_info *survey)
350{ 376{
351 int ret = -EOPNOTSUPP; 377 int ret = -EOPNOTSUPP;
378
379 trace_drv_get_survey(local, idx, survey);
380
352 if (local->ops->get_survey) 381 if (local->ops->get_survey)
353 ret = local->ops->get_survey(&local->hw, idx, survey); 382 ret = local->ops->get_survey(&local->hw, idx, survey);
354 /* trace_drv_get_survey(local, idx, survey, ret); */ 383
384 trace_drv_return_int(local, ret);
385
355 return ret; 386 return ret;
356} 387}
357 388
@@ -370,6 +401,7 @@ static inline void drv_flush(struct ieee80211_local *local, bool drop)
370 trace_drv_flush(local, drop); 401 trace_drv_flush(local, drop);
371 if (local->ops->flush) 402 if (local->ops->flush)
372 local->ops->flush(&local->hw, drop); 403 local->ops->flush(&local->hw, drop);
404 trace_drv_return_void(local);
373} 405}
374 406
375static inline void drv_channel_switch(struct ieee80211_local *local, 407static inline void drv_channel_switch(struct ieee80211_local *local,
@@ -377,9 +409,9 @@ static inline void drv_channel_switch(struct ieee80211_local *local,
377{ 409{
378 might_sleep(); 410 might_sleep();
379 411
380 local->ops->channel_switch(&local->hw, ch_switch);
381
382 trace_drv_channel_switch(local, ch_switch); 412 trace_drv_channel_switch(local, ch_switch);
413 local->ops->channel_switch(&local->hw, ch_switch);
414 trace_drv_return_void(local);
383} 415}
384 416
385#endif /* __MAC80211_DRIVER_OPS */ 417#endif /* __MAC80211_DRIVER_OPS */
diff --git a/net/mac80211/driver-trace.h b/net/mac80211/driver-trace.h
index 6a9b2342a9c2..5d5d2a974668 100644
--- a/net/mac80211/driver-trace.h
+++ b/net/mac80211/driver-trace.h
@@ -36,20 +36,58 @@ static inline void trace_ ## name(proto) {}
36 * Tracing for driver callbacks. 36 * Tracing for driver callbacks.
37 */ 37 */
38 38
39TRACE_EVENT(drv_start, 39TRACE_EVENT(drv_return_void,
40 TP_PROTO(struct ieee80211_local *local, int ret), 40 TP_PROTO(struct ieee80211_local *local),
41 TP_ARGS(local),
42 TP_STRUCT__entry(
43 LOCAL_ENTRY
44 ),
45 TP_fast_assign(
46 LOCAL_ASSIGN;
47 ),
48 TP_printk(LOCAL_PR_FMT, LOCAL_PR_ARG)
49);
41 50
51TRACE_EVENT(drv_return_int,
52 TP_PROTO(struct ieee80211_local *local, int ret),
42 TP_ARGS(local, ret), 53 TP_ARGS(local, ret),
43
44 TP_STRUCT__entry( 54 TP_STRUCT__entry(
45 LOCAL_ENTRY 55 LOCAL_ENTRY
46 __field(int, ret) 56 __field(int, ret)
47 ), 57 ),
58 TP_fast_assign(
59 LOCAL_ASSIGN;
60 __entry->ret = ret;
61 ),
62 TP_printk(LOCAL_PR_FMT " - %d", LOCAL_PR_ARG, __entry->ret)
63);
48 64
65TRACE_EVENT(drv_return_u64,
66 TP_PROTO(struct ieee80211_local *local, u64 ret),
67 TP_ARGS(local, ret),
68 TP_STRUCT__entry(
69 LOCAL_ENTRY
70 __field(u64, ret)
71 ),
49 TP_fast_assign( 72 TP_fast_assign(
50 LOCAL_ASSIGN; 73 LOCAL_ASSIGN;
51 __entry->ret = ret; 74 __entry->ret = ret;
52 ), 75 ),
76 TP_printk(LOCAL_PR_FMT " - %llu", LOCAL_PR_ARG, __entry->ret)
77);
78
79TRACE_EVENT(drv_start,
80 TP_PROTO(struct ieee80211_local *local),
81
82 TP_ARGS(local),
83
84 TP_STRUCT__entry(
85 LOCAL_ENTRY
86 ),
87
88 TP_fast_assign(
89 LOCAL_ASSIGN;
90 ),
53 91
54 TP_printk( 92 TP_printk(
55 LOCAL_PR_FMT, LOCAL_PR_ARG 93 LOCAL_PR_FMT, LOCAL_PR_ARG
@@ -76,28 +114,25 @@ TRACE_EVENT(drv_stop,
76 114
77TRACE_EVENT(drv_add_interface, 115TRACE_EVENT(drv_add_interface,
78 TP_PROTO(struct ieee80211_local *local, 116 TP_PROTO(struct ieee80211_local *local,
79 struct ieee80211_sub_if_data *sdata, 117 struct ieee80211_sub_if_data *sdata),
80 int ret),
81 118
82 TP_ARGS(local, sdata, ret), 119 TP_ARGS(local, sdata),
83 120
84 TP_STRUCT__entry( 121 TP_STRUCT__entry(
85 LOCAL_ENTRY 122 LOCAL_ENTRY
86 VIF_ENTRY 123 VIF_ENTRY
87 __array(char, addr, 6) 124 __array(char, addr, 6)
88 __field(int, ret)
89 ), 125 ),
90 126
91 TP_fast_assign( 127 TP_fast_assign(
92 LOCAL_ASSIGN; 128 LOCAL_ASSIGN;
93 VIF_ASSIGN; 129 VIF_ASSIGN;
94 memcpy(__entry->addr, sdata->vif.addr, 6); 130 memcpy(__entry->addr, sdata->vif.addr, 6);
95 __entry->ret = ret;
96 ), 131 ),
97 132
98 TP_printk( 133 TP_printk(
99 LOCAL_PR_FMT VIF_PR_FMT " addr:%pM ret:%d", 134 LOCAL_PR_FMT VIF_PR_FMT " addr:%pM",
100 LOCAL_PR_ARG, VIF_PR_ARG, __entry->addr, __entry->ret 135 LOCAL_PR_ARG, VIF_PR_ARG, __entry->addr
101 ) 136 )
102); 137);
103 138
@@ -126,15 +161,13 @@ TRACE_EVENT(drv_remove_interface,
126 161
127TRACE_EVENT(drv_config, 162TRACE_EVENT(drv_config,
128 TP_PROTO(struct ieee80211_local *local, 163 TP_PROTO(struct ieee80211_local *local,
129 u32 changed, 164 u32 changed),
130 int ret),
131 165
132 TP_ARGS(local, changed, ret), 166 TP_ARGS(local, changed),
133 167
134 TP_STRUCT__entry( 168 TP_STRUCT__entry(
135 LOCAL_ENTRY 169 LOCAL_ENTRY
136 __field(u32, changed) 170 __field(u32, changed)
137 __field(int, ret)
138 __field(u32, flags) 171 __field(u32, flags)
139 __field(int, power_level) 172 __field(int, power_level)
140 __field(int, dynamic_ps_timeout) 173 __field(int, dynamic_ps_timeout)
@@ -150,7 +183,6 @@ TRACE_EVENT(drv_config,
150 TP_fast_assign( 183 TP_fast_assign(
151 LOCAL_ASSIGN; 184 LOCAL_ASSIGN;
152 __entry->changed = changed; 185 __entry->changed = changed;
153 __entry->ret = ret;
154 __entry->flags = local->hw.conf.flags; 186 __entry->flags = local->hw.conf.flags;
155 __entry->power_level = local->hw.conf.power_level; 187 __entry->power_level = local->hw.conf.power_level;
156 __entry->dynamic_ps_timeout = local->hw.conf.dynamic_ps_timeout; 188 __entry->dynamic_ps_timeout = local->hw.conf.dynamic_ps_timeout;
@@ -164,8 +196,8 @@ TRACE_EVENT(drv_config,
164 ), 196 ),
165 197
166 TP_printk( 198 TP_printk(
167 LOCAL_PR_FMT " ch:%#x freq:%d ret:%d", 199 LOCAL_PR_FMT " ch:%#x freq:%d",
168 LOCAL_PR_ARG, __entry->changed, __entry->center_freq, __entry->ret 200 LOCAL_PR_ARG, __entry->changed, __entry->center_freq
169 ) 201 )
170); 202);
171 203
@@ -220,26 +252,23 @@ TRACE_EVENT(drv_bss_info_changed,
220); 252);
221 253
222TRACE_EVENT(drv_prepare_multicast, 254TRACE_EVENT(drv_prepare_multicast,
223 TP_PROTO(struct ieee80211_local *local, int mc_count, u64 ret), 255 TP_PROTO(struct ieee80211_local *local, int mc_count),
224 256
225 TP_ARGS(local, mc_count, ret), 257 TP_ARGS(local, mc_count),
226 258
227 TP_STRUCT__entry( 259 TP_STRUCT__entry(
228 LOCAL_ENTRY 260 LOCAL_ENTRY
229 __field(int, mc_count) 261 __field(int, mc_count)
230 __field(u64, ret)
231 ), 262 ),
232 263
233 TP_fast_assign( 264 TP_fast_assign(
234 LOCAL_ASSIGN; 265 LOCAL_ASSIGN;
235 __entry->mc_count = mc_count; 266 __entry->mc_count = mc_count;
236 __entry->ret = ret;
237 ), 267 ),
238 268
239 TP_printk( 269 TP_printk(
240 LOCAL_PR_FMT " prepare mc (%d): %llx", 270 LOCAL_PR_FMT " prepare mc (%d)",
241 LOCAL_PR_ARG, __entry->mc_count, 271 LOCAL_PR_ARG, __entry->mc_count
242 (unsigned long long) __entry->ret
243 ) 272 )
244); 273);
245 274
@@ -273,27 +302,25 @@ TRACE_EVENT(drv_configure_filter,
273 302
274TRACE_EVENT(drv_set_tim, 303TRACE_EVENT(drv_set_tim,
275 TP_PROTO(struct ieee80211_local *local, 304 TP_PROTO(struct ieee80211_local *local,
276 struct ieee80211_sta *sta, bool set, int ret), 305 struct ieee80211_sta *sta, bool set),
277 306
278 TP_ARGS(local, sta, set, ret), 307 TP_ARGS(local, sta, set),
279 308
280 TP_STRUCT__entry( 309 TP_STRUCT__entry(
281 LOCAL_ENTRY 310 LOCAL_ENTRY
282 STA_ENTRY 311 STA_ENTRY
283 __field(bool, set) 312 __field(bool, set)
284 __field(int, ret)
285 ), 313 ),
286 314
287 TP_fast_assign( 315 TP_fast_assign(
288 LOCAL_ASSIGN; 316 LOCAL_ASSIGN;
289 STA_ASSIGN; 317 STA_ASSIGN;
290 __entry->set = set; 318 __entry->set = set;
291 __entry->ret = ret;
292 ), 319 ),
293 320
294 TP_printk( 321 TP_printk(
295 LOCAL_PR_FMT STA_PR_FMT " set:%d ret:%d", 322 LOCAL_PR_FMT STA_PR_FMT " set:%d",
296 LOCAL_PR_ARG, STA_PR_FMT, __entry->set, __entry->ret 323 LOCAL_PR_ARG, STA_PR_FMT, __entry->set
297 ) 324 )
298); 325);
299 326
@@ -301,9 +328,9 @@ TRACE_EVENT(drv_set_key,
301 TP_PROTO(struct ieee80211_local *local, 328 TP_PROTO(struct ieee80211_local *local,
302 enum set_key_cmd cmd, struct ieee80211_sub_if_data *sdata, 329 enum set_key_cmd cmd, struct ieee80211_sub_if_data *sdata,
303 struct ieee80211_sta *sta, 330 struct ieee80211_sta *sta,
304 struct ieee80211_key_conf *key, int ret), 331 struct ieee80211_key_conf *key),
305 332
306 TP_ARGS(local, cmd, sdata, sta, key, ret), 333 TP_ARGS(local, cmd, sdata, sta, key),
307 334
308 TP_STRUCT__entry( 335 TP_STRUCT__entry(
309 LOCAL_ENTRY 336 LOCAL_ENTRY
@@ -313,7 +340,6 @@ TRACE_EVENT(drv_set_key,
313 __field(u8, hw_key_idx) 340 __field(u8, hw_key_idx)
314 __field(u8, flags) 341 __field(u8, flags)
315 __field(s8, keyidx) 342 __field(s8, keyidx)
316 __field(int, ret)
317 ), 343 ),
318 344
319 TP_fast_assign( 345 TP_fast_assign(
@@ -324,12 +350,11 @@ TRACE_EVENT(drv_set_key,
324 __entry->flags = key->flags; 350 __entry->flags = key->flags;
325 __entry->keyidx = key->keyidx; 351 __entry->keyidx = key->keyidx;
326 __entry->hw_key_idx = key->hw_key_idx; 352 __entry->hw_key_idx = key->hw_key_idx;
327 __entry->ret = ret;
328 ), 353 ),
329 354
330 TP_printk( 355 TP_printk(
331 LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " ret:%d", 356 LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT,
332 LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->ret 357 LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG
333 ) 358 )
334); 359);
335 360
@@ -364,25 +389,23 @@ TRACE_EVENT(drv_update_tkip_key,
364TRACE_EVENT(drv_hw_scan, 389TRACE_EVENT(drv_hw_scan,
365 TP_PROTO(struct ieee80211_local *local, 390 TP_PROTO(struct ieee80211_local *local,
366 struct ieee80211_sub_if_data *sdata, 391 struct ieee80211_sub_if_data *sdata,
367 struct cfg80211_scan_request *req, int ret), 392 struct cfg80211_scan_request *req),
368 393
369 TP_ARGS(local, sdata, req, ret), 394 TP_ARGS(local, sdata, req),
370 395
371 TP_STRUCT__entry( 396 TP_STRUCT__entry(
372 LOCAL_ENTRY 397 LOCAL_ENTRY
373 VIF_ENTRY 398 VIF_ENTRY
374 __field(int, ret)
375 ), 399 ),
376 400
377 TP_fast_assign( 401 TP_fast_assign(
378 LOCAL_ASSIGN; 402 LOCAL_ASSIGN;
379 VIF_ASSIGN; 403 VIF_ASSIGN;
380 __entry->ret = ret;
381 ), 404 ),
382 405
383 TP_printk( 406 TP_printk(
384 LOCAL_PR_FMT VIF_PR_FMT " ret:%d", 407 LOCAL_PR_FMT VIF_PR_FMT,
385 LOCAL_PR_ARG,VIF_PR_ARG, __entry->ret 408 LOCAL_PR_ARG,VIF_PR_ARG
386 ) 409 )
387); 410);
388 411
@@ -479,48 +502,44 @@ TRACE_EVENT(drv_get_tkip_seq,
479); 502);
480 503
481TRACE_EVENT(drv_set_rts_threshold, 504TRACE_EVENT(drv_set_rts_threshold,
482 TP_PROTO(struct ieee80211_local *local, u32 value, int ret), 505 TP_PROTO(struct ieee80211_local *local, u32 value),
483 506
484 TP_ARGS(local, value, ret), 507 TP_ARGS(local, value),
485 508
486 TP_STRUCT__entry( 509 TP_STRUCT__entry(
487 LOCAL_ENTRY 510 LOCAL_ENTRY
488 __field(u32, value) 511 __field(u32, value)
489 __field(int, ret)
490 ), 512 ),
491 513
492 TP_fast_assign( 514 TP_fast_assign(
493 LOCAL_ASSIGN; 515 LOCAL_ASSIGN;
494 __entry->ret = ret;
495 __entry->value = value; 516 __entry->value = value;
496 ), 517 ),
497 518
498 TP_printk( 519 TP_printk(
499 LOCAL_PR_FMT " value:%d ret:%d", 520 LOCAL_PR_FMT " value:%d",
500 LOCAL_PR_ARG, __entry->value, __entry->ret 521 LOCAL_PR_ARG, __entry->value
501 ) 522 )
502); 523);
503 524
504TRACE_EVENT(drv_set_coverage_class, 525TRACE_EVENT(drv_set_coverage_class,
505 TP_PROTO(struct ieee80211_local *local, u8 value, int ret), 526 TP_PROTO(struct ieee80211_local *local, u8 value),
506 527
507 TP_ARGS(local, value, ret), 528 TP_ARGS(local, value),
508 529
509 TP_STRUCT__entry( 530 TP_STRUCT__entry(
510 LOCAL_ENTRY 531 LOCAL_ENTRY
511 __field(u8, value) 532 __field(u8, value)
512 __field(int, ret)
513 ), 533 ),
514 534
515 TP_fast_assign( 535 TP_fast_assign(
516 LOCAL_ASSIGN; 536 LOCAL_ASSIGN;
517 __entry->ret = ret;
518 __entry->value = value; 537 __entry->value = value;
519 ), 538 ),
520 539
521 TP_printk( 540 TP_printk(
522 LOCAL_PR_FMT " value:%d ret:%d", 541 LOCAL_PR_FMT " value:%d",
523 LOCAL_PR_ARG, __entry->value, __entry->ret 542 LOCAL_PR_ARG, __entry->value
524 ) 543 )
525); 544);
526 545
@@ -555,27 +574,25 @@ TRACE_EVENT(drv_sta_notify,
555TRACE_EVENT(drv_sta_add, 574TRACE_EVENT(drv_sta_add,
556 TP_PROTO(struct ieee80211_local *local, 575 TP_PROTO(struct ieee80211_local *local,
557 struct ieee80211_sub_if_data *sdata, 576 struct ieee80211_sub_if_data *sdata,
558 struct ieee80211_sta *sta, int ret), 577 struct ieee80211_sta *sta),
559 578
560 TP_ARGS(local, sdata, sta, ret), 579 TP_ARGS(local, sdata, sta),
561 580
562 TP_STRUCT__entry( 581 TP_STRUCT__entry(
563 LOCAL_ENTRY 582 LOCAL_ENTRY
564 VIF_ENTRY 583 VIF_ENTRY
565 STA_ENTRY 584 STA_ENTRY
566 __field(int, ret)
567 ), 585 ),
568 586
569 TP_fast_assign( 587 TP_fast_assign(
570 LOCAL_ASSIGN; 588 LOCAL_ASSIGN;
571 VIF_ASSIGN; 589 VIF_ASSIGN;
572 STA_ASSIGN; 590 STA_ASSIGN;
573 __entry->ret = ret;
574 ), 591 ),
575 592
576 TP_printk( 593 TP_printk(
577 LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " ret:%d", 594 LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT,
578 LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->ret 595 LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG
579 ) 596 )
580); 597);
581 598
@@ -606,10 +623,9 @@ TRACE_EVENT(drv_sta_remove,
606 623
607TRACE_EVENT(drv_conf_tx, 624TRACE_EVENT(drv_conf_tx,
608 TP_PROTO(struct ieee80211_local *local, u16 queue, 625 TP_PROTO(struct ieee80211_local *local, u16 queue,
609 const struct ieee80211_tx_queue_params *params, 626 const struct ieee80211_tx_queue_params *params),
610 int ret),
611 627
612 TP_ARGS(local, queue, params, ret), 628 TP_ARGS(local, queue, params),
613 629
614 TP_STRUCT__entry( 630 TP_STRUCT__entry(
615 LOCAL_ENTRY 631 LOCAL_ENTRY
@@ -618,13 +634,11 @@ TRACE_EVENT(drv_conf_tx,
618 __field(u16, cw_min) 634 __field(u16, cw_min)
619 __field(u16, cw_max) 635 __field(u16, cw_max)
620 __field(u8, aifs) 636 __field(u8, aifs)
621 __field(int, ret)
622 ), 637 ),
623 638
624 TP_fast_assign( 639 TP_fast_assign(
625 LOCAL_ASSIGN; 640 LOCAL_ASSIGN;
626 __entry->queue = queue; 641 __entry->queue = queue;
627 __entry->ret = ret;
628 __entry->txop = params->txop; 642 __entry->txop = params->txop;
629 __entry->cw_max = params->cw_max; 643 __entry->cw_max = params->cw_max;
630 __entry->cw_min = params->cw_min; 644 __entry->cw_min = params->cw_min;
@@ -632,29 +646,27 @@ TRACE_EVENT(drv_conf_tx,
632 ), 646 ),
633 647
634 TP_printk( 648 TP_printk(
635 LOCAL_PR_FMT " queue:%d ret:%d", 649 LOCAL_PR_FMT " queue:%d",
636 LOCAL_PR_ARG, __entry->queue, __entry->ret 650 LOCAL_PR_ARG, __entry->queue
637 ) 651 )
638); 652);
639 653
640TRACE_EVENT(drv_get_tsf, 654TRACE_EVENT(drv_get_tsf,
641 TP_PROTO(struct ieee80211_local *local, u64 ret), 655 TP_PROTO(struct ieee80211_local *local),
642 656
643 TP_ARGS(local, ret), 657 TP_ARGS(local),
644 658
645 TP_STRUCT__entry( 659 TP_STRUCT__entry(
646 LOCAL_ENTRY 660 LOCAL_ENTRY
647 __field(u64, ret)
648 ), 661 ),
649 662
650 TP_fast_assign( 663 TP_fast_assign(
651 LOCAL_ASSIGN; 664 LOCAL_ASSIGN;
652 __entry->ret = ret;
653 ), 665 ),
654 666
655 TP_printk( 667 TP_printk(
656 LOCAL_PR_FMT " ret:%llu", 668 LOCAL_PR_FMT,
657 LOCAL_PR_ARG, (unsigned long long)__entry->ret 669 LOCAL_PR_ARG
658 ) 670 )
659); 671);
660 672
@@ -698,23 +710,21 @@ TRACE_EVENT(drv_reset_tsf,
698); 710);
699 711
700TRACE_EVENT(drv_tx_last_beacon, 712TRACE_EVENT(drv_tx_last_beacon,
701 TP_PROTO(struct ieee80211_local *local, int ret), 713 TP_PROTO(struct ieee80211_local *local),
702 714
703 TP_ARGS(local, ret), 715 TP_ARGS(local),
704 716
705 TP_STRUCT__entry( 717 TP_STRUCT__entry(
706 LOCAL_ENTRY 718 LOCAL_ENTRY
707 __field(int, ret)
708 ), 719 ),
709 720
710 TP_fast_assign( 721 TP_fast_assign(
711 LOCAL_ASSIGN; 722 LOCAL_ASSIGN;
712 __entry->ret = ret;
713 ), 723 ),
714 724
715 TP_printk( 725 TP_printk(
716 LOCAL_PR_FMT " ret:%d", 726 LOCAL_PR_FMT,
717 LOCAL_PR_ARG, __entry->ret 727 LOCAL_PR_ARG
718 ) 728 )
719); 729);
720 730
@@ -723,9 +733,9 @@ TRACE_EVENT(drv_ampdu_action,
723 struct ieee80211_sub_if_data *sdata, 733 struct ieee80211_sub_if_data *sdata,
724 enum ieee80211_ampdu_mlme_action action, 734 enum ieee80211_ampdu_mlme_action action,
725 struct ieee80211_sta *sta, u16 tid, 735 struct ieee80211_sta *sta, u16 tid,
726 u16 *ssn, int ret), 736 u16 *ssn),
727 737
728 TP_ARGS(local, sdata, action, sta, tid, ssn, ret), 738 TP_ARGS(local, sdata, action, sta, tid, ssn),
729 739
730 TP_STRUCT__entry( 740 TP_STRUCT__entry(
731 LOCAL_ENTRY 741 LOCAL_ENTRY
@@ -733,7 +743,6 @@ TRACE_EVENT(drv_ampdu_action,
733 __field(u32, action) 743 __field(u32, action)
734 __field(u16, tid) 744 __field(u16, tid)
735 __field(u16, ssn) 745 __field(u16, ssn)
736 __field(int, ret)
737 VIF_ENTRY 746 VIF_ENTRY
738 ), 747 ),
739 748
@@ -741,15 +750,36 @@ TRACE_EVENT(drv_ampdu_action,
741 LOCAL_ASSIGN; 750 LOCAL_ASSIGN;
742 VIF_ASSIGN; 751 VIF_ASSIGN;
743 STA_ASSIGN; 752 STA_ASSIGN;
744 __entry->ret = ret;
745 __entry->action = action; 753 __entry->action = action;
746 __entry->tid = tid; 754 __entry->tid = tid;
747 __entry->ssn = ssn ? *ssn : 0; 755 __entry->ssn = ssn ? *ssn : 0;
748 ), 756 ),
749 757
750 TP_printk( 758 TP_printk(
751 LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " action:%d tid:%d ret:%d", 759 LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " action:%d tid:%d",
752 LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->action, __entry->tid, __entry->ret 760 LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->action, __entry->tid
761 )
762);
763
764TRACE_EVENT(drv_get_survey,
765 TP_PROTO(struct ieee80211_local *local, int idx,
766 struct survey_info *survey),
767
768 TP_ARGS(local, idx, survey),
769
770 TP_STRUCT__entry(
771 LOCAL_ENTRY
772 __field(int, idx)
773 ),
774
775 TP_fast_assign(
776 LOCAL_ASSIGN;
777 __entry->idx = idx;
778 ),
779
780 TP_printk(
781 LOCAL_PR_FMT " idx:%d",
782 LOCAL_PR_ARG, __entry->idx
753 ) 783 )
754); 784);
755 785
@@ -851,25 +881,23 @@ TRACE_EVENT(api_start_tx_ba_cb,
851); 881);
852 882
853TRACE_EVENT(api_stop_tx_ba_session, 883TRACE_EVENT(api_stop_tx_ba_session,
854 TP_PROTO(struct ieee80211_sta *sta, u16 tid, u16 initiator), 884 TP_PROTO(struct ieee80211_sta *sta, u16 tid),
855 885
856 TP_ARGS(sta, tid, initiator), 886 TP_ARGS(sta, tid),
857 887
858 TP_STRUCT__entry( 888 TP_STRUCT__entry(
859 STA_ENTRY 889 STA_ENTRY
860 __field(u16, tid) 890 __field(u16, tid)
861 __field(u16, initiator)
862 ), 891 ),
863 892
864 TP_fast_assign( 893 TP_fast_assign(
865 STA_ASSIGN; 894 STA_ASSIGN;
866 __entry->tid = tid; 895 __entry->tid = tid;
867 __entry->initiator = initiator;
868 ), 896 ),
869 897
870 TP_printk( 898 TP_printk(
871 STA_PR_FMT " tid:%d initiator:%d", 899 STA_PR_FMT " tid:%d",
872 STA_PR_ARG, __entry->tid, __entry->initiator 900 STA_PR_ARG, __entry->tid
873 ) 901 )
874); 902);
875 903
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index 2ab106a0a491..9d101fb33861 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -6,7 +6,7 @@
6 * Copyright 2005-2006, Devicescape Software, Inc. 6 * Copyright 2005-2006, Devicescape Software, Inc.
7 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> 7 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
8 * Copyright 2007, Michael Wu <flamingice@sourmilk.net> 8 * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
9 * Copyright 2007-2008, Intel Corporation 9 * Copyright 2007-2010, Intel Corporation
10 * 10 *
11 * This program is free software; you can redistribute it and/or modify 11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as 12 * it under the terms of the GNU General Public License version 2 as
@@ -29,7 +29,7 @@ void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_supported_band *sband,
29 29
30 memset(ht_cap, 0, sizeof(*ht_cap)); 30 memset(ht_cap, 0, sizeof(*ht_cap));
31 31
32 if (!ht_cap_ie) 32 if (!ht_cap_ie || !sband->ht_cap.ht_supported)
33 return; 33 return;
34 34
35 ht_cap->ht_supported = true; 35 ht_cap->ht_supported = true;
@@ -105,6 +105,8 @@ void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta)
105{ 105{
106 int i; 106 int i;
107 107
108 cancel_work_sync(&sta->ampdu_mlme.work);
109
108 for (i = 0; i < STA_TID_NUM; i++) { 110 for (i = 0; i < STA_TID_NUM; i++) {
109 __ieee80211_stop_tx_ba_session(sta, i, WLAN_BACK_INITIATOR); 111 __ieee80211_stop_tx_ba_session(sta, i, WLAN_BACK_INITIATOR);
110 __ieee80211_stop_rx_ba_session(sta, i, WLAN_BACK_RECIPIENT, 112 __ieee80211_stop_rx_ba_session(sta, i, WLAN_BACK_RECIPIENT,
@@ -112,6 +114,43 @@ void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta)
112 } 114 }
113} 115}
114 116
117void ieee80211_ba_session_work(struct work_struct *work)
118{
119 struct sta_info *sta =
120 container_of(work, struct sta_info, ampdu_mlme.work);
121 struct tid_ampdu_tx *tid_tx;
122 int tid;
123
124 /*
125 * When this flag is set, new sessions should be
126 * blocked, and existing sessions will be torn
127 * down by the code that set the flag, so this
128 * need not run.
129 */
130 if (test_sta_flags(sta, WLAN_STA_BLOCK_BA))
131 return;
132
133 mutex_lock(&sta->ampdu_mlme.mtx);
134 for (tid = 0; tid < STA_TID_NUM; tid++) {
135 if (test_and_clear_bit(tid, sta->ampdu_mlme.tid_rx_timer_expired))
136 ___ieee80211_stop_rx_ba_session(
137 sta, tid, WLAN_BACK_RECIPIENT,
138 WLAN_REASON_QSTA_TIMEOUT);
139
140 tid_tx = sta->ampdu_mlme.tid_tx[tid];
141 if (!tid_tx)
142 continue;
143
144 if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state))
145 ieee80211_tx_ba_session_handle_start(sta, tid);
146 else if (test_and_clear_bit(HT_AGG_STATE_WANT_STOP,
147 &tid_tx->state))
148 ___ieee80211_stop_tx_ba_session(sta, tid,
149 WLAN_BACK_INITIATOR);
150 }
151 mutex_unlock(&sta->ampdu_mlme.mtx);
152}
153
115void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata, 154void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata,
116 const u8 *da, u16 tid, 155 const u8 *da, u16 tid,
117 u16 initiator, u16 reason_code) 156 u16 initiator, u16 reason_code)
@@ -176,13 +215,8 @@ void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata,
176 215
177 if (initiator == WLAN_BACK_INITIATOR) 216 if (initiator == WLAN_BACK_INITIATOR)
178 __ieee80211_stop_rx_ba_session(sta, tid, WLAN_BACK_INITIATOR, 0); 217 __ieee80211_stop_rx_ba_session(sta, tid, WLAN_BACK_INITIATOR, 0);
179 else { /* WLAN_BACK_RECIPIENT */ 218 else
180 spin_lock_bh(&sta->lock); 219 __ieee80211_stop_tx_ba_session(sta, tid, WLAN_BACK_RECIPIENT);
181 if (sta->ampdu_mlme.tid_state_tx[tid] & HT_ADDBA_REQUESTED_MSK)
182 ___ieee80211_stop_tx_ba_session(sta, tid,
183 WLAN_BACK_RECIPIENT);
184 spin_unlock_bh(&sta->lock);
185 }
186} 220}
187 221
188int ieee80211_send_smps_action(struct ieee80211_sub_if_data *sdata, 222int ieee80211_send_smps_action(struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index b2cc1fda6cfd..c691780725a7 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -43,6 +43,8 @@ static void ieee80211_rx_mgmt_auth_ibss(struct ieee80211_sub_if_data *sdata,
43{ 43{
44 u16 auth_alg, auth_transaction, status_code; 44 u16 auth_alg, auth_transaction, status_code;
45 45
46 lockdep_assert_held(&sdata->u.ibss.mtx);
47
46 if (len < 24 + 6) 48 if (len < 24 + 6)
47 return; 49 return;
48 50
@@ -78,6 +80,8 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
78 u32 bss_change; 80 u32 bss_change;
79 u8 supp_rates[IEEE80211_MAX_SUPP_RATES]; 81 u8 supp_rates[IEEE80211_MAX_SUPP_RATES];
80 82
83 lockdep_assert_held(&ifibss->mtx);
84
81 /* Reset own TSF to allow time synchronization work. */ 85 /* Reset own TSF to allow time synchronization work. */
82 drv_reset_tsf(local); 86 drv_reset_tsf(local);
83 87
@@ -172,11 +176,13 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
172 rcu_assign_pointer(ifibss->presp, skb); 176 rcu_assign_pointer(ifibss->presp, skb);
173 177
174 sdata->vif.bss_conf.beacon_int = beacon_int; 178 sdata->vif.bss_conf.beacon_int = beacon_int;
179 sdata->vif.bss_conf.basic_rates = basic_rates;
175 bss_change = BSS_CHANGED_BEACON_INT; 180 bss_change = BSS_CHANGED_BEACON_INT;
176 bss_change |= ieee80211_reset_erp_info(sdata); 181 bss_change |= ieee80211_reset_erp_info(sdata);
177 bss_change |= BSS_CHANGED_BSSID; 182 bss_change |= BSS_CHANGED_BSSID;
178 bss_change |= BSS_CHANGED_BEACON; 183 bss_change |= BSS_CHANGED_BEACON;
179 bss_change |= BSS_CHANGED_BEACON_ENABLED; 184 bss_change |= BSS_CHANGED_BEACON_ENABLED;
185 bss_change |= BSS_CHANGED_BASIC_RATES;
180 bss_change |= BSS_CHANGED_IBSS; 186 bss_change |= BSS_CHANGED_IBSS;
181 sdata->vif.bss_conf.ibss_joined = true; 187 sdata->vif.bss_conf.ibss_joined = true;
182 ieee80211_bss_info_change_notify(sdata, bss_change); 188 ieee80211_bss_info_change_notify(sdata, bss_change);
@@ -203,6 +209,8 @@ static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
203 int i, j; 209 int i, j;
204 u16 beacon_int = cbss->beacon_interval; 210 u16 beacon_int = cbss->beacon_interval;
205 211
212 lockdep_assert_held(&sdata->u.ibss.mtx);
213
206 if (beacon_int < 10) 214 if (beacon_int < 10)
207 beacon_int = 10; 215 beacon_int = 10;
208 216
@@ -447,6 +455,8 @@ static int ieee80211_sta_active_ibss(struct ieee80211_sub_if_data *sdata)
447 int active = 0; 455 int active = 0;
448 struct sta_info *sta; 456 struct sta_info *sta;
449 457
458 lockdep_assert_held(&sdata->u.ibss.mtx);
459
450 rcu_read_lock(); 460 rcu_read_lock();
451 461
452 list_for_each_entry_rcu(sta, &local->sta_list, list) { 462 list_for_each_entry_rcu(sta, &local->sta_list, list) {
@@ -471,6 +481,8 @@ static void ieee80211_sta_merge_ibss(struct ieee80211_sub_if_data *sdata)
471{ 481{
472 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; 482 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
473 483
484 lockdep_assert_held(&ifibss->mtx);
485
474 mod_timer(&ifibss->timer, 486 mod_timer(&ifibss->timer,
475 round_jiffies(jiffies + IEEE80211_IBSS_MERGE_INTERVAL)); 487 round_jiffies(jiffies + IEEE80211_IBSS_MERGE_INTERVAL));
476 488
@@ -503,6 +515,8 @@ static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata)
503 u16 capability; 515 u16 capability;
504 int i; 516 int i;
505 517
518 lockdep_assert_held(&ifibss->mtx);
519
506 if (ifibss->fixed_bssid) { 520 if (ifibss->fixed_bssid) {
507 memcpy(bssid, ifibss->bssid, ETH_ALEN); 521 memcpy(bssid, ifibss->bssid, ETH_ALEN);
508 } else { 522 } else {
@@ -529,7 +543,7 @@ static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata)
529 sdata->drop_unencrypted = 0; 543 sdata->drop_unencrypted = 0;
530 544
531 __ieee80211_sta_join_ibss(sdata, bssid, sdata->vif.bss_conf.beacon_int, 545 __ieee80211_sta_join_ibss(sdata, bssid, sdata->vif.bss_conf.beacon_int,
532 ifibss->channel, 3, /* first two are basic */ 546 ifibss->channel, ifibss->basic_rates,
533 capability, 0); 547 capability, 0);
534} 548}
535 549
@@ -547,6 +561,8 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
547 int active_ibss; 561 int active_ibss;
548 u16 capability; 562 u16 capability;
549 563
564 lockdep_assert_held(&ifibss->mtx);
565
550 active_ibss = ieee80211_sta_active_ibss(sdata); 566 active_ibss = ieee80211_sta_active_ibss(sdata);
551#ifdef CONFIG_MAC80211_IBSS_DEBUG 567#ifdef CONFIG_MAC80211_IBSS_DEBUG
552 printk(KERN_DEBUG "%s: sta_find_ibss (active_ibss=%d)\n", 568 printk(KERN_DEBUG "%s: sta_find_ibss (active_ibss=%d)\n",
@@ -635,6 +651,8 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
635 struct ieee80211_mgmt *resp; 651 struct ieee80211_mgmt *resp;
636 u8 *pos, *end; 652 u8 *pos, *end;
637 653
654 lockdep_assert_held(&ifibss->mtx);
655
638 if (ifibss->state != IEEE80211_IBSS_MLME_JOINED || 656 if (ifibss->state != IEEE80211_IBSS_MLME_JOINED ||
639 len < 24 + 2 || !ifibss->presp) 657 len < 24 + 2 || !ifibss->presp)
640 return; 658 return;
@@ -727,8 +745,8 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
727 ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems, true); 745 ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems, true);
728} 746}
729 747
730static void ieee80211_ibss_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, 748void ieee80211_ibss_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
731 struct sk_buff *skb) 749 struct sk_buff *skb)
732{ 750{
733 struct ieee80211_rx_status *rx_status; 751 struct ieee80211_rx_status *rx_status;
734 struct ieee80211_mgmt *mgmt; 752 struct ieee80211_mgmt *mgmt;
@@ -738,6 +756,8 @@ static void ieee80211_ibss_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
738 mgmt = (struct ieee80211_mgmt *) skb->data; 756 mgmt = (struct ieee80211_mgmt *) skb->data;
739 fc = le16_to_cpu(mgmt->frame_control); 757 fc = le16_to_cpu(mgmt->frame_control);
740 758
759 mutex_lock(&sdata->u.ibss.mtx);
760
741 switch (fc & IEEE80211_FCTL_STYPE) { 761 switch (fc & IEEE80211_FCTL_STYPE) {
742 case IEEE80211_STYPE_PROBE_REQ: 762 case IEEE80211_STYPE_PROBE_REQ:
743 ieee80211_rx_mgmt_probe_req(sdata, mgmt, skb->len); 763 ieee80211_rx_mgmt_probe_req(sdata, mgmt, skb->len);
@@ -755,35 +775,22 @@ static void ieee80211_ibss_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
755 break; 775 break;
756 } 776 }
757 777
758 kfree_skb(skb); 778 mutex_unlock(&sdata->u.ibss.mtx);
759} 779}
760 780
761static void ieee80211_ibss_work(struct work_struct *work) 781void ieee80211_ibss_work(struct ieee80211_sub_if_data *sdata)
762{ 782{
763 struct ieee80211_sub_if_data *sdata = 783 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
764 container_of(work, struct ieee80211_sub_if_data, u.ibss.work);
765 struct ieee80211_local *local = sdata->local;
766 struct ieee80211_if_ibss *ifibss;
767 struct sk_buff *skb;
768
769 if (WARN_ON(local->suspended))
770 return;
771
772 if (!ieee80211_sdata_running(sdata))
773 return;
774
775 if (local->scanning)
776 return;
777
778 if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_ADHOC))
779 return;
780 ifibss = &sdata->u.ibss;
781 784
782 while ((skb = skb_dequeue(&ifibss->skb_queue))) 785 mutex_lock(&ifibss->mtx);
783 ieee80211_ibss_rx_queued_mgmt(sdata, skb);
784 786
785 if (!test_and_clear_bit(IEEE80211_IBSS_REQ_RUN, &ifibss->request)) 787 /*
786 return; 788 * Work could be scheduled after scan or similar
789 * when we aren't even joined (or trying) with a
790 * network.
791 */
792 if (!ifibss->ssid_len)
793 goto out;
787 794
788 switch (ifibss->state) { 795 switch (ifibss->state) {
789 case IEEE80211_IBSS_MLME_SEARCH: 796 case IEEE80211_IBSS_MLME_SEARCH:
@@ -796,6 +803,9 @@ static void ieee80211_ibss_work(struct work_struct *work)
796 WARN_ON(1); 803 WARN_ON(1);
797 break; 804 break;
798 } 805 }
806
807 out:
808 mutex_unlock(&ifibss->mtx);
799} 809}
800 810
801static void ieee80211_ibss_timer(unsigned long data) 811static void ieee80211_ibss_timer(unsigned long data)
@@ -810,8 +820,7 @@ static void ieee80211_ibss_timer(unsigned long data)
810 return; 820 return;
811 } 821 }
812 822
813 set_bit(IEEE80211_IBSS_REQ_RUN, &ifibss->request); 823 ieee80211_queue_work(&local->hw, &sdata->work);
814 ieee80211_queue_work(&local->hw, &ifibss->work);
815} 824}
816 825
817#ifdef CONFIG_PM 826#ifdef CONFIG_PM
@@ -819,7 +828,6 @@ void ieee80211_ibss_quiesce(struct ieee80211_sub_if_data *sdata)
819{ 828{
820 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; 829 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
821 830
822 cancel_work_sync(&ifibss->work);
823 if (del_timer_sync(&ifibss->timer)) 831 if (del_timer_sync(&ifibss->timer))
824 ifibss->timer_running = true; 832 ifibss->timer_running = true;
825} 833}
@@ -839,10 +847,9 @@ void ieee80211_ibss_setup_sdata(struct ieee80211_sub_if_data *sdata)
839{ 847{
840 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; 848 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
841 849
842 INIT_WORK(&ifibss->work, ieee80211_ibss_work);
843 setup_timer(&ifibss->timer, ieee80211_ibss_timer, 850 setup_timer(&ifibss->timer, ieee80211_ibss_timer,
844 (unsigned long) sdata); 851 (unsigned long) sdata);
845 skb_queue_head_init(&ifibss->skb_queue); 852 mutex_init(&ifibss->mtx);
846} 853}
847 854
848/* scan finished notification */ 855/* scan finished notification */
@@ -856,45 +863,28 @@ void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local)
856 continue; 863 continue;
857 if (sdata->vif.type != NL80211_IFTYPE_ADHOC) 864 if (sdata->vif.type != NL80211_IFTYPE_ADHOC)
858 continue; 865 continue;
859 if (!sdata->u.ibss.ssid_len)
860 continue;
861 sdata->u.ibss.last_scan_completed = jiffies; 866 sdata->u.ibss.last_scan_completed = jiffies;
862 mod_timer(&sdata->u.ibss.timer, 0); 867 ieee80211_queue_work(&local->hw, &sdata->work);
863 } 868 }
864 mutex_unlock(&local->iflist_mtx); 869 mutex_unlock(&local->iflist_mtx);
865} 870}
866 871
867ieee80211_rx_result
868ieee80211_ibss_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
869{
870 struct ieee80211_local *local = sdata->local;
871 struct ieee80211_mgmt *mgmt;
872 u16 fc;
873
874 if (skb->len < 24)
875 return RX_DROP_MONITOR;
876
877 mgmt = (struct ieee80211_mgmt *) skb->data;
878 fc = le16_to_cpu(mgmt->frame_control);
879
880 switch (fc & IEEE80211_FCTL_STYPE) {
881 case IEEE80211_STYPE_PROBE_RESP:
882 case IEEE80211_STYPE_BEACON:
883 case IEEE80211_STYPE_PROBE_REQ:
884 case IEEE80211_STYPE_AUTH:
885 skb_queue_tail(&sdata->u.ibss.skb_queue, skb);
886 ieee80211_queue_work(&local->hw, &sdata->u.ibss.work);
887 return RX_QUEUED;
888 }
889
890 return RX_DROP_MONITOR;
891}
892
893int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata, 872int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
894 struct cfg80211_ibss_params *params) 873 struct cfg80211_ibss_params *params)
895{ 874{
896 struct sk_buff *skb; 875 struct sk_buff *skb;
897 876
877 skb = dev_alloc_skb(sdata->local->hw.extra_tx_headroom +
878 36 /* bitrates */ +
879 34 /* SSID */ +
880 3 /* DS params */ +
881 4 /* IBSS params */ +
882 params->ie_len);
883 if (!skb)
884 return -ENOMEM;
885
886 mutex_lock(&sdata->u.ibss.mtx);
887
898 if (params->bssid) { 888 if (params->bssid) {
899 memcpy(sdata->u.ibss.bssid, params->bssid, ETH_ALEN); 889 memcpy(sdata->u.ibss.bssid, params->bssid, ETH_ALEN);
900 sdata->u.ibss.fixed_bssid = true; 890 sdata->u.ibss.fixed_bssid = true;
@@ -902,6 +892,7 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
902 sdata->u.ibss.fixed_bssid = false; 892 sdata->u.ibss.fixed_bssid = false;
903 893
904 sdata->u.ibss.privacy = params->privacy; 894 sdata->u.ibss.privacy = params->privacy;
895 sdata->u.ibss.basic_rates = params->basic_rates;
905 896
906 sdata->vif.bss_conf.beacon_int = params->beacon_interval; 897 sdata->vif.bss_conf.beacon_int = params->beacon_interval;
907 898
@@ -922,34 +913,18 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
922 sdata->u.ibss.ie_len = params->ie_len; 913 sdata->u.ibss.ie_len = params->ie_len;
923 } 914 }
924 915
925 skb = dev_alloc_skb(sdata->local->hw.extra_tx_headroom +
926 36 /* bitrates */ +
927 34 /* SSID */ +
928 3 /* DS params */ +
929 4 /* IBSS params */ +
930 params->ie_len);
931 if (!skb)
932 return -ENOMEM;
933
934 sdata->u.ibss.skb = skb; 916 sdata->u.ibss.skb = skb;
935 sdata->u.ibss.state = IEEE80211_IBSS_MLME_SEARCH; 917 sdata->u.ibss.state = IEEE80211_IBSS_MLME_SEARCH;
936 sdata->u.ibss.ibss_join_req = jiffies; 918 sdata->u.ibss.ibss_join_req = jiffies;
937 919
938 memcpy(sdata->u.ibss.ssid, params->ssid, IEEE80211_MAX_SSID_LEN); 920 memcpy(sdata->u.ibss.ssid, params->ssid, IEEE80211_MAX_SSID_LEN);
939
940 /*
941 * The ssid_len setting below is used to see whether
942 * we are active, and we need all other settings
943 * before that may get visible.
944 */
945 mb();
946
947 sdata->u.ibss.ssid_len = params->ssid_len; 921 sdata->u.ibss.ssid_len = params->ssid_len;
948 922
949 ieee80211_recalc_idle(sdata->local); 923 ieee80211_recalc_idle(sdata->local);
950 924
951 set_bit(IEEE80211_IBSS_REQ_RUN, &sdata->u.ibss.request); 925 ieee80211_queue_work(&sdata->local->hw, &sdata->work);
952 ieee80211_queue_work(&sdata->local->hw, &sdata->u.ibss.work); 926
927 mutex_unlock(&sdata->u.ibss.mtx);
953 928
954 return 0; 929 return 0;
955} 930}
@@ -957,11 +932,33 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
957int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata) 932int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
958{ 933{
959 struct sk_buff *skb; 934 struct sk_buff *skb;
935 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
936 struct ieee80211_local *local = sdata->local;
937 struct cfg80211_bss *cbss;
938 u16 capability;
939 int active_ibss;
960 940
961 del_timer_sync(&sdata->u.ibss.timer); 941 mutex_lock(&sdata->u.ibss.mtx);
962 clear_bit(IEEE80211_IBSS_REQ_RUN, &sdata->u.ibss.request); 942
963 cancel_work_sync(&sdata->u.ibss.work); 943 active_ibss = ieee80211_sta_active_ibss(sdata);
964 clear_bit(IEEE80211_IBSS_REQ_RUN, &sdata->u.ibss.request); 944
945 if (!active_ibss && !is_zero_ether_addr(ifibss->bssid)) {
946 capability = WLAN_CAPABILITY_IBSS;
947
948 if (ifibss->privacy)
949 capability |= WLAN_CAPABILITY_PRIVACY;
950
951 cbss = cfg80211_get_bss(local->hw.wiphy, ifibss->channel,
952 ifibss->bssid, ifibss->ssid,
953 ifibss->ssid_len, WLAN_CAPABILITY_IBSS |
954 WLAN_CAPABILITY_PRIVACY,
955 capability);
956
957 if (cbss) {
958 cfg80211_unlink_bss(local->hw.wiphy, cbss);
959 cfg80211_put_bss(cbss);
960 }
961 }
965 962
966 sta_info_flush(sdata->local, sdata); 963 sta_info_flush(sdata->local, sdata);
967 964
@@ -975,10 +972,14 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
975 synchronize_rcu(); 972 synchronize_rcu();
976 kfree_skb(skb); 973 kfree_skb(skb);
977 974
978 skb_queue_purge(&sdata->u.ibss.skb_queue); 975 skb_queue_purge(&sdata->skb_queue);
979 memset(sdata->u.ibss.bssid, 0, ETH_ALEN); 976 memset(sdata->u.ibss.bssid, 0, ETH_ALEN);
980 sdata->u.ibss.ssid_len = 0; 977 sdata->u.ibss.ssid_len = 0;
981 978
979 del_timer_sync(&sdata->u.ibss.timer);
980
981 mutex_unlock(&sdata->u.ibss.mtx);
982
982 ieee80211_recalc_idle(sdata->local); 983 ieee80211_recalc_idle(sdata->local);
983 984
984 return 0; 985 return 0;
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 1a9e2da37a93..65e0ed6c2975 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -238,6 +238,7 @@ enum ieee80211_work_type {
238 IEEE80211_WORK_ABORT, 238 IEEE80211_WORK_ABORT,
239 IEEE80211_WORK_DIRECT_PROBE, 239 IEEE80211_WORK_DIRECT_PROBE,
240 IEEE80211_WORK_AUTH, 240 IEEE80211_WORK_AUTH,
241 IEEE80211_WORK_ASSOC_BEACON_WAIT,
241 IEEE80211_WORK_ASSOC, 242 IEEE80211_WORK_ASSOC,
242 IEEE80211_WORK_REMAIN_ON_CHANNEL, 243 IEEE80211_WORK_REMAIN_ON_CHANNEL,
243}; 244};
@@ -325,7 +326,6 @@ struct ieee80211_if_managed {
325 struct timer_list conn_mon_timer; 326 struct timer_list conn_mon_timer;
326 struct timer_list bcn_mon_timer; 327 struct timer_list bcn_mon_timer;
327 struct timer_list chswitch_timer; 328 struct timer_list chswitch_timer;
328 struct work_struct work;
329 struct work_struct monitor_work; 329 struct work_struct monitor_work;
330 struct work_struct chswitch_work; 330 struct work_struct chswitch_work;
331 struct work_struct beacon_connection_loss_work; 331 struct work_struct beacon_connection_loss_work;
@@ -340,8 +340,6 @@ struct ieee80211_if_managed {
340 340
341 u16 aid; 341 u16 aid;
342 342
343 struct sk_buff_head skb_queue;
344
345 unsigned long timers_running; /* used for quiesce/restart */ 343 unsigned long timers_running; /* used for quiesce/restart */
346 bool powersave; /* powersave requested for this iface */ 344 bool powersave; /* powersave requested for this iface */
347 enum ieee80211_smps_mode req_smps, /* requested smps mode */ 345 enum ieee80211_smps_mode req_smps, /* requested smps mode */
@@ -380,19 +378,15 @@ struct ieee80211_if_managed {
380 int last_cqm_event_signal; 378 int last_cqm_event_signal;
381}; 379};
382 380
383enum ieee80211_ibss_request {
384 IEEE80211_IBSS_REQ_RUN = 0,
385};
386
387struct ieee80211_if_ibss { 381struct ieee80211_if_ibss {
388 struct timer_list timer; 382 struct timer_list timer;
389 struct work_struct work;
390 383
391 struct sk_buff_head skb_queue; 384 struct mutex mtx;
392 385
393 unsigned long request;
394 unsigned long last_scan_completed; 386 unsigned long last_scan_completed;
395 387
388 u32 basic_rates;
389
396 bool timer_running; 390 bool timer_running;
397 391
398 bool fixed_bssid; 392 bool fixed_bssid;
@@ -416,11 +410,9 @@ struct ieee80211_if_ibss {
416}; 410};
417 411
418struct ieee80211_if_mesh { 412struct ieee80211_if_mesh {
419 struct work_struct work;
420 struct timer_list housekeeping_timer; 413 struct timer_list housekeeping_timer;
421 struct timer_list mesh_path_timer; 414 struct timer_list mesh_path_timer;
422 struct timer_list mesh_path_root_timer; 415 struct timer_list mesh_path_root_timer;
423 struct sk_buff_head skb_queue;
424 416
425 unsigned long timers_running; 417 unsigned long timers_running;
426 418
@@ -517,6 +509,11 @@ struct ieee80211_sub_if_data {
517 509
518 u16 sequence_number; 510 u16 sequence_number;
519 511
512 struct work_struct work;
513 struct sk_buff_head skb_queue;
514
515 bool arp_filter_state;
516
520 /* 517 /*
521 * AP this belongs to: self in AP mode and 518 * AP this belongs to: self in AP mode and
522 * corresponding AP in VLAN mode, NULL for 519 * corresponding AP in VLAN mode, NULL for
@@ -569,11 +566,15 @@ ieee80211_sdata_set_mesh_id(struct ieee80211_sub_if_data *sdata,
569#endif 566#endif
570} 567}
571 568
569enum sdata_queue_type {
570 IEEE80211_SDATA_QUEUE_TYPE_FRAME = 0,
571 IEEE80211_SDATA_QUEUE_AGG_START = 1,
572 IEEE80211_SDATA_QUEUE_AGG_STOP = 2,
573};
574
572enum { 575enum {
573 IEEE80211_RX_MSG = 1, 576 IEEE80211_RX_MSG = 1,
574 IEEE80211_TX_STATUS_MSG = 2, 577 IEEE80211_TX_STATUS_MSG = 2,
575 IEEE80211_DELBA_MSG = 3,
576 IEEE80211_ADDBA_MSG = 4,
577}; 578};
578 579
579enum queue_stop_reason { 580enum queue_stop_reason {
@@ -724,13 +725,7 @@ struct ieee80211_local {
724 struct sk_buff_head pending[IEEE80211_MAX_QUEUES]; 725 struct sk_buff_head pending[IEEE80211_MAX_QUEUES];
725 struct tasklet_struct tx_pending_tasklet; 726 struct tasklet_struct tx_pending_tasklet;
726 727
727 /* 728 atomic_t agg_queue_stop[IEEE80211_MAX_QUEUES];
728 * This lock is used to prevent concurrent A-MPDU
729 * session start/stop processing, this thus also
730 * synchronises the ->ampdu_action() callback to
731 * drivers and limits it to one at a time.
732 */
733 spinlock_t ampdu_lock;
734 729
735 /* number of interfaces with corresponding IFF_ flags */ 730 /* number of interfaces with corresponding IFF_ flags */
736 atomic_t iff_allmultis, iff_promiscs; 731 atomic_t iff_allmultis, iff_promiscs;
@@ -746,10 +741,10 @@ struct ieee80211_local {
746 struct mutex iflist_mtx; 741 struct mutex iflist_mtx;
747 742
748 /* 743 /*
749 * Key lock, protects sdata's key_list and sta_info's 744 * Key mutex, protects sdata's key_list and sta_info's
750 * key pointers (write access, they're RCU.) 745 * key pointers (write access, they're RCU.)
751 */ 746 */
752 spinlock_t key_lock; 747 struct mutex key_mtx;
753 748
754 749
755 /* Scanning and BSS list */ 750 /* Scanning and BSS list */
@@ -851,6 +846,15 @@ struct ieee80211_local {
851 struct work_struct dynamic_ps_disable_work; 846 struct work_struct dynamic_ps_disable_work;
852 struct timer_list dynamic_ps_timer; 847 struct timer_list dynamic_ps_timer;
853 struct notifier_block network_latency_notifier; 848 struct notifier_block network_latency_notifier;
849 struct notifier_block ifa_notifier;
850
851 /*
852 * The dynamic ps timeout configured from user space via WEXT -
853 * this will override whatever chosen by mac80211 internally.
854 */
855 int dynamic_ps_forced_timeout;
856 int dynamic_ps_user_timeout;
857 bool disable_dynamic_ps;
854 858
855 int user_power_level; /* in dBm */ 859 int user_power_level; /* in dBm */
856 int power_constr_level; /* in dBm */ 860 int power_constr_level; /* in dBm */
@@ -874,9 +878,8 @@ IEEE80211_DEV_TO_SUB_IF(struct net_device *dev)
874 return netdev_priv(dev); 878 return netdev_priv(dev);
875} 879}
876 880
877/* this struct represents 802.11n's RA/TID combination along with our vif */ 881/* this struct represents 802.11n's RA/TID combination */
878struct ieee80211_ra_tid { 882struct ieee80211_ra_tid {
879 struct ieee80211_vif *vif;
880 u8 ra[ETH_ALEN]; 883 u8 ra[ETH_ALEN];
881 u16 tid; 884 u16 tid;
882}; 885};
@@ -985,29 +988,25 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
985int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata, 988int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
986 struct cfg80211_disassoc_request *req, 989 struct cfg80211_disassoc_request *req,
987 void *cookie); 990 void *cookie);
988int ieee80211_mgd_action(struct ieee80211_sub_if_data *sdata,
989 struct ieee80211_channel *chan,
990 enum nl80211_channel_type channel_type,
991 const u8 *buf, size_t len, u64 *cookie);
992ieee80211_rx_result ieee80211_sta_rx_mgmt(struct ieee80211_sub_if_data *sdata,
993 struct sk_buff *skb);
994void ieee80211_send_pspoll(struct ieee80211_local *local, 991void ieee80211_send_pspoll(struct ieee80211_local *local,
995 struct ieee80211_sub_if_data *sdata); 992 struct ieee80211_sub_if_data *sdata);
996void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency); 993void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency);
997int ieee80211_max_network_latency(struct notifier_block *nb, 994int ieee80211_max_network_latency(struct notifier_block *nb,
998 unsigned long data, void *dummy); 995 unsigned long data, void *dummy);
996int ieee80211_set_arp_filter(struct ieee80211_sub_if_data *sdata);
999void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, 997void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
1000 struct ieee80211_channel_sw_ie *sw_elem, 998 struct ieee80211_channel_sw_ie *sw_elem,
1001 struct ieee80211_bss *bss, 999 struct ieee80211_bss *bss,
1002 u64 timestamp); 1000 u64 timestamp);
1003void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata); 1001void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata);
1004void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata); 1002void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata);
1003void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata);
1004void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
1005 struct sk_buff *skb);
1005 1006
1006/* IBSS code */ 1007/* IBSS code */
1007void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local); 1008void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local);
1008void ieee80211_ibss_setup_sdata(struct ieee80211_sub_if_data *sdata); 1009void ieee80211_ibss_setup_sdata(struct ieee80211_sub_if_data *sdata);
1009ieee80211_rx_result
1010ieee80211_ibss_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb);
1011struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata, 1010struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
1012 u8 *bssid, u8 *addr, u32 supp_rates, 1011 u8 *bssid, u8 *addr, u32 supp_rates,
1013 gfp_t gfp); 1012 gfp_t gfp);
@@ -1016,6 +1015,14 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
1016int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata); 1015int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata);
1017void ieee80211_ibss_quiesce(struct ieee80211_sub_if_data *sdata); 1016void ieee80211_ibss_quiesce(struct ieee80211_sub_if_data *sdata);
1018void ieee80211_ibss_restart(struct ieee80211_sub_if_data *sdata); 1017void ieee80211_ibss_restart(struct ieee80211_sub_if_data *sdata);
1018void ieee80211_ibss_work(struct ieee80211_sub_if_data *sdata);
1019void ieee80211_ibss_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
1020 struct sk_buff *skb);
1021
1022/* mesh code */
1023void ieee80211_mesh_work(struct ieee80211_sub_if_data *sdata);
1024void ieee80211_mesh_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
1025 struct sk_buff *skb);
1019 1026
1020/* scan/BSS handling */ 1027/* scan/BSS handling */
1021void ieee80211_scan_work(struct work_struct *work); 1028void ieee80211_scan_work(struct work_struct *work);
@@ -1084,7 +1091,7 @@ struct ieee80211_tx_status_rtap_hdr {
1084 u8 padding_for_rate; 1091 u8 padding_for_rate;
1085 __le16 tx_flags; 1092 __le16 tx_flags;
1086 u8 data_retries; 1093 u8 data_retries;
1087} __attribute__ ((packed)); 1094} __packed;
1088 1095
1089 1096
1090/* HT */ 1097/* HT */
@@ -1099,6 +1106,8 @@ int ieee80211_send_smps_action(struct ieee80211_sub_if_data *sdata,
1099 enum ieee80211_smps_mode smps, const u8 *da, 1106 enum ieee80211_smps_mode smps, const u8 *da,
1100 const u8 *bssid); 1107 const u8 *bssid);
1101 1108
1109void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
1110 u16 initiator, u16 reason);
1102void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid, 1111void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
1103 u16 initiator, u16 reason); 1112 u16 initiator, u16 reason);
1104void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta); 1113void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta);
@@ -1118,6 +1127,10 @@ int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
1118 enum ieee80211_back_parties initiator); 1127 enum ieee80211_back_parties initiator);
1119int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, 1128int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
1120 enum ieee80211_back_parties initiator); 1129 enum ieee80211_back_parties initiator);
1130void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid);
1131void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid);
1132void ieee80211_ba_session_work(struct work_struct *work);
1133void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid);
1121 1134
1122/* Spectrum management */ 1135/* Spectrum management */
1123void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata, 1136void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 50deb017fd6e..ebbe264e2b0b 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -249,6 +249,8 @@ static int ieee80211_open(struct net_device *dev)
249 local->fif_other_bss++; 249 local->fif_other_bss++;
250 250
251 ieee80211_configure_filter(local); 251 ieee80211_configure_filter(local);
252
253 netif_carrier_on(dev);
252 break; 254 break;
253 default: 255 default:
254 res = drv_add_interface(local, &sdata->vif); 256 res = drv_add_interface(local, &sdata->vif);
@@ -268,7 +270,6 @@ static int ieee80211_open(struct net_device *dev)
268 270
269 changed |= ieee80211_reset_erp_info(sdata); 271 changed |= ieee80211_reset_erp_info(sdata);
270 ieee80211_bss_info_change_notify(sdata, changed); 272 ieee80211_bss_info_change_notify(sdata, changed);
271 ieee80211_enable_keys(sdata);
272 273
273 if (sdata->vif.type == NL80211_IFTYPE_STATION) 274 if (sdata->vif.type == NL80211_IFTYPE_STATION)
274 netif_carrier_off(dev); 275 netif_carrier_off(dev);
@@ -321,15 +322,6 @@ static int ieee80211_open(struct net_device *dev)
321 322
322 ieee80211_recalc_ps(local, -1); 323 ieee80211_recalc_ps(local, -1);
323 324
324 /*
325 * ieee80211_sta_work is disabled while network interface
326 * is down. Therefore, some configuration changes may not
327 * yet be effective. Trigger execution of ieee80211_sta_work
328 * to fix this.
329 */
330 if (sdata->vif.type == NL80211_IFTYPE_STATION)
331 ieee80211_queue_work(&local->hw, &sdata->u.mgd.work);
332
333 netif_tx_start_all_queues(dev); 325 netif_tx_start_all_queues(dev);
334 326
335 return 0; 327 return 0;
@@ -349,7 +341,6 @@ static int ieee80211_stop(struct net_device *dev)
349{ 341{
350 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 342 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
351 struct ieee80211_local *local = sdata->local; 343 struct ieee80211_local *local = sdata->local;
352 struct sta_info *sta;
353 unsigned long flags; 344 unsigned long flags;
354 struct sk_buff *skb, *tmp; 345 struct sk_buff *skb, *tmp;
355 u32 hw_reconf_flags = 0; 346 u32 hw_reconf_flags = 0;
@@ -366,18 +357,6 @@ static int ieee80211_stop(struct net_device *dev)
366 ieee80211_work_purge(sdata); 357 ieee80211_work_purge(sdata);
367 358
368 /* 359 /*
369 * Now delete all active aggregation sessions.
370 */
371 rcu_read_lock();
372
373 list_for_each_entry_rcu(sta, &local->sta_list, list) {
374 if (sta->sdata == sdata)
375 ieee80211_sta_tear_down_BA_sessions(sta);
376 }
377
378 rcu_read_unlock();
379
380 /*
381 * Remove all stations associated with this interface. 360 * Remove all stations associated with this interface.
382 * 361 *
383 * This must be done before calling ops->remove_interface() 362 * This must be done before calling ops->remove_interface()
@@ -483,27 +462,14 @@ static int ieee80211_stop(struct net_device *dev)
483 * whether the interface is running, which, at this point, 462 * whether the interface is running, which, at this point,
484 * it no longer is. 463 * it no longer is.
485 */ 464 */
486 cancel_work_sync(&sdata->u.mgd.work);
487 cancel_work_sync(&sdata->u.mgd.chswitch_work); 465 cancel_work_sync(&sdata->u.mgd.chswitch_work);
488 cancel_work_sync(&sdata->u.mgd.monitor_work); 466 cancel_work_sync(&sdata->u.mgd.monitor_work);
489 cancel_work_sync(&sdata->u.mgd.beacon_connection_loss_work); 467 cancel_work_sync(&sdata->u.mgd.beacon_connection_loss_work);
490 468
491 /*
492 * When we get here, the interface is marked down.
493 * Call synchronize_rcu() to wait for the RX path
494 * should it be using the interface and enqueuing
495 * frames at this very time on another CPU.
496 */
497 synchronize_rcu();
498 skb_queue_purge(&sdata->u.mgd.skb_queue);
499 /* fall through */ 469 /* fall through */
500 case NL80211_IFTYPE_ADHOC: 470 case NL80211_IFTYPE_ADHOC:
501 if (sdata->vif.type == NL80211_IFTYPE_ADHOC) { 471 if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
502 del_timer_sync(&sdata->u.ibss.timer); 472 del_timer_sync(&sdata->u.ibss.timer);
503 cancel_work_sync(&sdata->u.ibss.work);
504 synchronize_rcu();
505 skb_queue_purge(&sdata->u.ibss.skb_queue);
506 }
507 /* fall through */ 473 /* fall through */
508 case NL80211_IFTYPE_MESH_POINT: 474 case NL80211_IFTYPE_MESH_POINT:
509 if (ieee80211_vif_is_mesh(&sdata->vif)) { 475 if (ieee80211_vif_is_mesh(&sdata->vif)) {
@@ -518,6 +484,16 @@ static int ieee80211_stop(struct net_device *dev)
518 } 484 }
519 /* fall through */ 485 /* fall through */
520 default: 486 default:
487 flush_work(&sdata->work);
488 /*
489 * When we get here, the interface is marked down.
490 * Call synchronize_rcu() to wait for the RX path
491 * should it be using the interface and enqueuing
492 * frames at this very time on another CPU.
493 */
494 synchronize_rcu();
495 skb_queue_purge(&sdata->skb_queue);
496
521 if (local->scan_sdata == sdata) 497 if (local->scan_sdata == sdata)
522 ieee80211_scan_cancel(local); 498 ieee80211_scan_cancel(local);
523 499
@@ -531,8 +507,8 @@ static int ieee80211_stop(struct net_device *dev)
531 BSS_CHANGED_BEACON_ENABLED); 507 BSS_CHANGED_BEACON_ENABLED);
532 } 508 }
533 509
534 /* disable all keys for as long as this netdev is down */ 510 /* free all remaining keys, there shouldn't be any */
535 ieee80211_disable_keys(sdata); 511 ieee80211_free_keys(sdata);
536 drv_remove_interface(local, &sdata->vif); 512 drv_remove_interface(local, &sdata->vif);
537 } 513 }
538 514
@@ -727,6 +703,136 @@ static void ieee80211_if_setup(struct net_device *dev)
727 dev->destructor = free_netdev; 703 dev->destructor = free_netdev;
728} 704}
729 705
706static void ieee80211_iface_work(struct work_struct *work)
707{
708 struct ieee80211_sub_if_data *sdata =
709 container_of(work, struct ieee80211_sub_if_data, work);
710 struct ieee80211_local *local = sdata->local;
711 struct sk_buff *skb;
712 struct sta_info *sta;
713 struct ieee80211_ra_tid *ra_tid;
714
715 if (!ieee80211_sdata_running(sdata))
716 return;
717
718 if (local->scanning)
719 return;
720
721 /*
722 * ieee80211_queue_work() should have picked up most cases,
723 * here we'll pick the rest.
724 */
725 if (WARN(local->suspended,
726 "interface work scheduled while going to suspend\n"))
727 return;
728
729 /* first process frames */
730 while ((skb = skb_dequeue(&sdata->skb_queue))) {
731 struct ieee80211_mgmt *mgmt = (void *)skb->data;
732
733 if (skb->pkt_type == IEEE80211_SDATA_QUEUE_AGG_START) {
734 ra_tid = (void *)&skb->cb;
735 ieee80211_start_tx_ba_cb(&sdata->vif, ra_tid->ra,
736 ra_tid->tid);
737 } else if (skb->pkt_type == IEEE80211_SDATA_QUEUE_AGG_STOP) {
738 ra_tid = (void *)&skb->cb;
739 ieee80211_stop_tx_ba_cb(&sdata->vif, ra_tid->ra,
740 ra_tid->tid);
741 } else if (ieee80211_is_action(mgmt->frame_control) &&
742 mgmt->u.action.category == WLAN_CATEGORY_BACK) {
743 int len = skb->len;
744
745 mutex_lock(&local->sta_mtx);
746 sta = sta_info_get_bss(sdata, mgmt->sa);
747 if (sta) {
748 switch (mgmt->u.action.u.addba_req.action_code) {
749 case WLAN_ACTION_ADDBA_REQ:
750 ieee80211_process_addba_request(
751 local, sta, mgmt, len);
752 break;
753 case WLAN_ACTION_ADDBA_RESP:
754 ieee80211_process_addba_resp(local, sta,
755 mgmt, len);
756 break;
757 case WLAN_ACTION_DELBA:
758 ieee80211_process_delba(sdata, sta,
759 mgmt, len);
760 break;
761 default:
762 WARN_ON(1);
763 break;
764 }
765 }
766 mutex_unlock(&local->sta_mtx);
767 } else if (ieee80211_is_data_qos(mgmt->frame_control)) {
768 struct ieee80211_hdr *hdr = (void *)mgmt;
769 /*
770 * So the frame isn't mgmt, but frame_control
771 * is at the right place anyway, of course, so
772 * the if statement is correct.
773 *
774 * Warn if we have other data frame types here,
775 * they must not get here.
776 */
777 WARN_ON(hdr->frame_control &
778 cpu_to_le16(IEEE80211_STYPE_NULLFUNC));
779 WARN_ON(!(hdr->seq_ctrl &
780 cpu_to_le16(IEEE80211_SCTL_FRAG)));
781 /*
782 * This was a fragment of a frame, received while
783 * a block-ack session was active. That cannot be
784 * right, so terminate the session.
785 */
786 mutex_lock(&local->sta_mtx);
787 sta = sta_info_get_bss(sdata, mgmt->sa);
788 if (sta) {
789 u16 tid = *ieee80211_get_qos_ctl(hdr) &
790 IEEE80211_QOS_CTL_TID_MASK;
791
792 __ieee80211_stop_rx_ba_session(
793 sta, tid, WLAN_BACK_RECIPIENT,
794 WLAN_REASON_QSTA_REQUIRE_SETUP);
795 }
796 mutex_unlock(&local->sta_mtx);
797 } else switch (sdata->vif.type) {
798 case NL80211_IFTYPE_STATION:
799 ieee80211_sta_rx_queued_mgmt(sdata, skb);
800 break;
801 case NL80211_IFTYPE_ADHOC:
802 ieee80211_ibss_rx_queued_mgmt(sdata, skb);
803 break;
804 case NL80211_IFTYPE_MESH_POINT:
805 if (!ieee80211_vif_is_mesh(&sdata->vif))
806 break;
807 ieee80211_mesh_rx_queued_mgmt(sdata, skb);
808 break;
809 default:
810 WARN(1, "frame for unexpected interface type");
811 break;
812 }
813
814 kfree_skb(skb);
815 }
816
817 /* then other type-dependent work */
818 switch (sdata->vif.type) {
819 case NL80211_IFTYPE_STATION:
820 ieee80211_sta_work(sdata);
821 break;
822 case NL80211_IFTYPE_ADHOC:
823 ieee80211_ibss_work(sdata);
824 break;
825 case NL80211_IFTYPE_MESH_POINT:
826 if (!ieee80211_vif_is_mesh(&sdata->vif))
827 break;
828 ieee80211_mesh_work(sdata);
829 break;
830 default:
831 break;
832 }
833}
834
835
730/* 836/*
731 * Helper function to initialise an interface to a specific type. 837 * Helper function to initialise an interface to a specific type.
732 */ 838 */
@@ -744,6 +850,9 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
744 /* only monitor differs */ 850 /* only monitor differs */
745 sdata->dev->type = ARPHRD_ETHER; 851 sdata->dev->type = ARPHRD_ETHER;
746 852
853 skb_queue_head_init(&sdata->skb_queue);
854 INIT_WORK(&sdata->work, ieee80211_iface_work);
855
747 switch (type) { 856 switch (type) {
748 case NL80211_IFTYPE_AP: 857 case NL80211_IFTYPE_AP:
749 skb_queue_head_init(&sdata->u.ap.ps_bc_buf); 858 skb_queue_head_init(&sdata->u.ap.ps_bc_buf);
@@ -969,6 +1078,9 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
969 sdata->wdev.wiphy = local->hw.wiphy; 1078 sdata->wdev.wiphy = local->hw.wiphy;
970 sdata->local = local; 1079 sdata->local = local;
971 sdata->dev = ndev; 1080 sdata->dev = ndev;
1081#ifdef CONFIG_INET
1082 sdata->arp_filter_state = true;
1083#endif
972 1084
973 for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) 1085 for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++)
974 skb_queue_head_init(&sdata->fragments[i].skb_list); 1086 skb_queue_head_init(&sdata->fragments[i].skb_list);
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index e8f6e3b252d8..1b9d87ed143a 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -36,80 +36,20 @@
36 * There is currently no way of knowing this except by looking into 36 * There is currently no way of knowing this except by looking into
37 * debugfs. 37 * debugfs.
38 * 38 *
39 * All key operations are protected internally so you can call them at 39 * All key operations are protected internally.
40 * any time.
41 * 40 *
42 * Within mac80211, key references are, just as STA structure references, 41 * Within mac80211, key references are, just as STA structure references,
43 * protected by RCU. Note, however, that some things are unprotected, 42 * protected by RCU. Note, however, that some things are unprotected,
44 * namely the key->sta dereferences within the hardware acceleration 43 * namely the key->sta dereferences within the hardware acceleration
45 * functions. This means that sta_info_destroy() must flush the key todo 44 * functions. This means that sta_info_destroy() must remove the key
46 * list. 45 * which waits for an RCU grace period.
47 *
48 * All the direct key list manipulation functions must not sleep because
49 * they can operate on STA info structs that are protected by RCU.
50 */ 46 */
51 47
52static const u8 bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; 48static const u8 bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
53 49
54/* key mutex: used to synchronise todo runners */ 50static void assert_key_lock(struct ieee80211_local *local)
55static DEFINE_MUTEX(key_mutex);
56static DEFINE_SPINLOCK(todo_lock);
57static LIST_HEAD(todo_list);
58
59static void key_todo(struct work_struct *work)
60{
61 ieee80211_key_todo();
62}
63
64static DECLARE_WORK(todo_work, key_todo);
65
66/**
67 * add_todo - add todo item for a key
68 *
69 * @key: key to add to do item for
70 * @flag: todo flag(s)
71 *
72 * Must be called with IRQs or softirqs disabled.
73 */
74static void add_todo(struct ieee80211_key *key, u32 flag)
75{
76 if (!key)
77 return;
78
79 spin_lock(&todo_lock);
80 key->flags |= flag;
81 /*
82 * Remove again if already on the list so that we move it to the end.
83 */
84 if (!list_empty(&key->todo))
85 list_del(&key->todo);
86 list_add_tail(&key->todo, &todo_list);
87 schedule_work(&todo_work);
88 spin_unlock(&todo_lock);
89}
90
91/**
92 * ieee80211_key_lock - lock the mac80211 key operation lock
93 *
94 * This locks the (global) mac80211 key operation lock, all
95 * key operations must be done under this lock.
96 */
97static void ieee80211_key_lock(void)
98{
99 mutex_lock(&key_mutex);
100}
101
102/**
103 * ieee80211_key_unlock - unlock the mac80211 key operation lock
104 */
105static void ieee80211_key_unlock(void)
106{
107 mutex_unlock(&key_mutex);
108}
109
110static void assert_key_lock(void)
111{ 51{
112 WARN_ON(!mutex_is_locked(&key_mutex)); 52 WARN_ON(!mutex_is_locked(&local->key_mtx));
113} 53}
114 54
115static struct ieee80211_sta *get_sta_for_key(struct ieee80211_key *key) 55static struct ieee80211_sta *get_sta_for_key(struct ieee80211_key *key)
@@ -126,12 +66,13 @@ static void ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
126 struct ieee80211_sta *sta; 66 struct ieee80211_sta *sta;
127 int ret; 67 int ret;
128 68
129 assert_key_lock();
130 might_sleep(); 69 might_sleep();
131 70
132 if (!key->local->ops->set_key) 71 if (!key->local->ops->set_key)
133 return; 72 return;
134 73
74 assert_key_lock(key->local);
75
135 sta = get_sta_for_key(key); 76 sta = get_sta_for_key(key);
136 77
137 sdata = key->sdata; 78 sdata = key->sdata;
@@ -142,11 +83,8 @@ static void ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
142 83
143 ret = drv_set_key(key->local, SET_KEY, sdata, sta, &key->conf); 84 ret = drv_set_key(key->local, SET_KEY, sdata, sta, &key->conf);
144 85
145 if (!ret) { 86 if (!ret)
146 spin_lock_bh(&todo_lock);
147 key->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE; 87 key->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE;
148 spin_unlock_bh(&todo_lock);
149 }
150 88
151 if (ret && ret != -ENOSPC && ret != -EOPNOTSUPP) 89 if (ret && ret != -ENOSPC && ret != -EOPNOTSUPP)
152 printk(KERN_ERR "mac80211-%s: failed to set key " 90 printk(KERN_ERR "mac80211-%s: failed to set key "
@@ -161,18 +99,15 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key)
161 struct ieee80211_sta *sta; 99 struct ieee80211_sta *sta;
162 int ret; 100 int ret;
163 101
164 assert_key_lock();
165 might_sleep(); 102 might_sleep();
166 103
167 if (!key || !key->local->ops->set_key) 104 if (!key || !key->local->ops->set_key)
168 return; 105 return;
169 106
170 spin_lock_bh(&todo_lock); 107 assert_key_lock(key->local);
171 if (!(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) { 108
172 spin_unlock_bh(&todo_lock); 109 if (!(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE))
173 return; 110 return;
174 }
175 spin_unlock_bh(&todo_lock);
176 111
177 sta = get_sta_for_key(key); 112 sta = get_sta_for_key(key);
178 sdata = key->sdata; 113 sdata = key->sdata;
@@ -191,9 +126,7 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key)
191 wiphy_name(key->local->hw.wiphy), 126 wiphy_name(key->local->hw.wiphy),
192 key->conf.keyidx, sta ? sta->addr : bcast_addr, ret); 127 key->conf.keyidx, sta ? sta->addr : bcast_addr, ret);
193 128
194 spin_lock_bh(&todo_lock);
195 key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE; 129 key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE;
196 spin_unlock_bh(&todo_lock);
197} 130}
198 131
199static void __ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata, 132static void __ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata,
@@ -201,22 +134,24 @@ static void __ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata,
201{ 134{
202 struct ieee80211_key *key = NULL; 135 struct ieee80211_key *key = NULL;
203 136
137 assert_key_lock(sdata->local);
138
204 if (idx >= 0 && idx < NUM_DEFAULT_KEYS) 139 if (idx >= 0 && idx < NUM_DEFAULT_KEYS)
205 key = sdata->keys[idx]; 140 key = sdata->keys[idx];
206 141
207 rcu_assign_pointer(sdata->default_key, key); 142 rcu_assign_pointer(sdata->default_key, key);
208 143
209 if (key) 144 if (key) {
210 add_todo(key, KEY_FLAG_TODO_DEFKEY); 145 ieee80211_debugfs_key_remove_default(key->sdata);
146 ieee80211_debugfs_key_add_default(key->sdata);
147 }
211} 148}
212 149
213void ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata, int idx) 150void ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata, int idx)
214{ 151{
215 unsigned long flags; 152 mutex_lock(&sdata->local->key_mtx);
216
217 spin_lock_irqsave(&sdata->local->key_lock, flags);
218 __ieee80211_set_default_key(sdata, idx); 153 __ieee80211_set_default_key(sdata, idx);
219 spin_unlock_irqrestore(&sdata->local->key_lock, flags); 154 mutex_unlock(&sdata->local->key_mtx);
220} 155}
221 156
222static void 157static void
@@ -224,24 +159,26 @@ __ieee80211_set_default_mgmt_key(struct ieee80211_sub_if_data *sdata, int idx)
224{ 159{
225 struct ieee80211_key *key = NULL; 160 struct ieee80211_key *key = NULL;
226 161
162 assert_key_lock(sdata->local);
163
227 if (idx >= NUM_DEFAULT_KEYS && 164 if (idx >= NUM_DEFAULT_KEYS &&
228 idx < NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS) 165 idx < NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS)
229 key = sdata->keys[idx]; 166 key = sdata->keys[idx];
230 167
231 rcu_assign_pointer(sdata->default_mgmt_key, key); 168 rcu_assign_pointer(sdata->default_mgmt_key, key);
232 169
233 if (key) 170 if (key) {
234 add_todo(key, KEY_FLAG_TODO_DEFMGMTKEY); 171 ieee80211_debugfs_key_remove_mgmt_default(key->sdata);
172 ieee80211_debugfs_key_add_mgmt_default(key->sdata);
173 }
235} 174}
236 175
237void ieee80211_set_default_mgmt_key(struct ieee80211_sub_if_data *sdata, 176void ieee80211_set_default_mgmt_key(struct ieee80211_sub_if_data *sdata,
238 int idx) 177 int idx)
239{ 178{
240 unsigned long flags; 179 mutex_lock(&sdata->local->key_mtx);
241
242 spin_lock_irqsave(&sdata->local->key_lock, flags);
243 __ieee80211_set_default_mgmt_key(sdata, idx); 180 __ieee80211_set_default_mgmt_key(sdata, idx);
244 spin_unlock_irqrestore(&sdata->local->key_lock, flags); 181 mutex_unlock(&sdata->local->key_mtx);
245} 182}
246 183
247 184
@@ -336,7 +273,7 @@ struct ieee80211_key *ieee80211_key_alloc(enum ieee80211_key_alg alg,
336 key->conf.iv_len = CCMP_HDR_LEN; 273 key->conf.iv_len = CCMP_HDR_LEN;
337 key->conf.icv_len = CCMP_MIC_LEN; 274 key->conf.icv_len = CCMP_MIC_LEN;
338 if (seq) { 275 if (seq) {
339 for (i = 0; i < NUM_RX_DATA_QUEUES; i++) 276 for (i = 0; i < NUM_RX_DATA_QUEUES + 1; i++)
340 for (j = 0; j < CCMP_PN_LEN; j++) 277 for (j = 0; j < CCMP_PN_LEN; j++)
341 key->u.ccmp.rx_pn[i][j] = 278 key->u.ccmp.rx_pn[i][j] =
342 seq[CCMP_PN_LEN - j - 1]; 279 seq[CCMP_PN_LEN - j - 1];
@@ -352,7 +289,6 @@ struct ieee80211_key *ieee80211_key_alloc(enum ieee80211_key_alg alg,
352 } 289 }
353 memcpy(key->conf.key, key_data, key_len); 290 memcpy(key->conf.key, key_data, key_len);
354 INIT_LIST_HEAD(&key->list); 291 INIT_LIST_HEAD(&key->list);
355 INIT_LIST_HEAD(&key->todo);
356 292
357 if (alg == ALG_CCMP) { 293 if (alg == ALG_CCMP) {
358 /* 294 /*
@@ -382,12 +318,29 @@ struct ieee80211_key *ieee80211_key_alloc(enum ieee80211_key_alg alg,
382 return key; 318 return key;
383} 319}
384 320
321static void __ieee80211_key_destroy(struct ieee80211_key *key)
322{
323 if (!key)
324 return;
325
326 if (key->local)
327 ieee80211_key_disable_hw_accel(key);
328
329 if (key->conf.alg == ALG_CCMP)
330 ieee80211_aes_key_free(key->u.ccmp.tfm);
331 if (key->conf.alg == ALG_AES_CMAC)
332 ieee80211_aes_cmac_key_free(key->u.aes_cmac.tfm);
333 if (key->local)
334 ieee80211_debugfs_key_remove(key);
335
336 kfree(key);
337}
338
385void ieee80211_key_link(struct ieee80211_key *key, 339void ieee80211_key_link(struct ieee80211_key *key,
386 struct ieee80211_sub_if_data *sdata, 340 struct ieee80211_sub_if_data *sdata,
387 struct sta_info *sta) 341 struct sta_info *sta)
388{ 342{
389 struct ieee80211_key *old_key; 343 struct ieee80211_key *old_key;
390 unsigned long flags;
391 int idx; 344 int idx;
392 345
393 BUG_ON(!sdata); 346 BUG_ON(!sdata);
@@ -431,7 +384,7 @@ void ieee80211_key_link(struct ieee80211_key *key,
431 } 384 }
432 } 385 }
433 386
434 spin_lock_irqsave(&sdata->local->key_lock, flags); 387 mutex_lock(&sdata->local->key_mtx);
435 388
436 if (sta) 389 if (sta)
437 old_key = sta->key; 390 old_key = sta->key;
@@ -439,15 +392,13 @@ void ieee80211_key_link(struct ieee80211_key *key,
439 old_key = sdata->keys[idx]; 392 old_key = sdata->keys[idx];
440 393
441 __ieee80211_key_replace(sdata, sta, old_key, key); 394 __ieee80211_key_replace(sdata, sta, old_key, key);
395 __ieee80211_key_destroy(old_key);
442 396
443 /* free old key later */ 397 ieee80211_debugfs_key_add(key);
444 add_todo(old_key, KEY_FLAG_TODO_DELETE);
445 398
446 add_todo(key, KEY_FLAG_TODO_ADD_DEBUGFS); 399 ieee80211_key_enable_hw_accel(key);
447 if (ieee80211_sdata_running(sdata))
448 add_todo(key, KEY_FLAG_TODO_HWACCEL_ADD);
449 400
450 spin_unlock_irqrestore(&sdata->local->key_lock, flags); 401 mutex_unlock(&sdata->local->key_mtx);
451} 402}
452 403
453static void __ieee80211_key_free(struct ieee80211_key *key) 404static void __ieee80211_key_free(struct ieee80211_key *key)
@@ -458,170 +409,62 @@ static void __ieee80211_key_free(struct ieee80211_key *key)
458 if (key->sdata) 409 if (key->sdata)
459 __ieee80211_key_replace(key->sdata, key->sta, 410 __ieee80211_key_replace(key->sdata, key->sta,
460 key, NULL); 411 key, NULL);
461 412 __ieee80211_key_destroy(key);
462 add_todo(key, KEY_FLAG_TODO_DELETE);
463} 413}
464 414
465void ieee80211_key_free(struct ieee80211_key *key) 415void ieee80211_key_free(struct ieee80211_local *local,
416 struct ieee80211_key *key)
466{ 417{
467 unsigned long flags;
468
469 if (!key) 418 if (!key)
470 return; 419 return;
471 420
472 if (!key->sdata) { 421 mutex_lock(&local->key_mtx);
473 /* The key has not been linked yet, simply free it
474 * and don't Oops */
475 if (key->conf.alg == ALG_CCMP)
476 ieee80211_aes_key_free(key->u.ccmp.tfm);
477 kfree(key);
478 return;
479 }
480
481 spin_lock_irqsave(&key->sdata->local->key_lock, flags);
482 __ieee80211_key_free(key); 422 __ieee80211_key_free(key);
483 spin_unlock_irqrestore(&key->sdata->local->key_lock, flags); 423 mutex_unlock(&local->key_mtx);
484} 424}
485 425
486/* 426void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata)
487 * To be safe against concurrent manipulations of the list (which shouldn't
488 * actually happen) we need to hold the spinlock. But under the spinlock we
489 * can't actually do much, so we defer processing to the todo list. Then run
490 * the todo list to be sure the operation and possibly previously pending
491 * operations are completed.
492 */
493static void ieee80211_todo_for_each_key(struct ieee80211_sub_if_data *sdata,
494 u32 todo_flags)
495{ 427{
496 struct ieee80211_key *key; 428 struct ieee80211_key *key;
497 unsigned long flags;
498
499 might_sleep();
500
501 spin_lock_irqsave(&sdata->local->key_lock, flags);
502 list_for_each_entry(key, &sdata->key_list, list)
503 add_todo(key, todo_flags);
504 spin_unlock_irqrestore(&sdata->local->key_lock, flags);
505 429
506 ieee80211_key_todo();
507}
508
509void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata)
510{
511 ASSERT_RTNL(); 430 ASSERT_RTNL();
512 431
513 if (WARN_ON(!ieee80211_sdata_running(sdata))) 432 if (WARN_ON(!ieee80211_sdata_running(sdata)))
514 return; 433 return;
515 434
516 ieee80211_todo_for_each_key(sdata, KEY_FLAG_TODO_HWACCEL_ADD); 435 mutex_lock(&sdata->local->key_mtx);
517}
518 436
519void ieee80211_disable_keys(struct ieee80211_sub_if_data *sdata) 437 list_for_each_entry(key, &sdata->key_list, list)
520{ 438 ieee80211_key_enable_hw_accel(key);
521 ASSERT_RTNL();
522
523 ieee80211_todo_for_each_key(sdata, KEY_FLAG_TODO_HWACCEL_REMOVE);
524}
525
526static void __ieee80211_key_destroy(struct ieee80211_key *key)
527{
528 if (!key)
529 return;
530
531 ieee80211_key_disable_hw_accel(key);
532
533 if (key->conf.alg == ALG_CCMP)
534 ieee80211_aes_key_free(key->u.ccmp.tfm);
535 if (key->conf.alg == ALG_AES_CMAC)
536 ieee80211_aes_cmac_key_free(key->u.aes_cmac.tfm);
537 ieee80211_debugfs_key_remove(key);
538 439
539 kfree(key); 440 mutex_unlock(&sdata->local->key_mtx);
540} 441}
541 442
542static void __ieee80211_key_todo(void) 443void ieee80211_disable_keys(struct ieee80211_sub_if_data *sdata)
543{ 444{
544 struct ieee80211_key *key; 445 struct ieee80211_key *key;
545 bool work_done;
546 u32 todoflags;
547 446
548 /* 447 ASSERT_RTNL();
549 * NB: sta_info_destroy relies on this!
550 */
551 synchronize_rcu();
552
553 spin_lock_bh(&todo_lock);
554 while (!list_empty(&todo_list)) {
555 key = list_first_entry(&todo_list, struct ieee80211_key, todo);
556 list_del_init(&key->todo);
557 todoflags = key->flags & (KEY_FLAG_TODO_ADD_DEBUGFS |
558 KEY_FLAG_TODO_DEFKEY |
559 KEY_FLAG_TODO_DEFMGMTKEY |
560 KEY_FLAG_TODO_HWACCEL_ADD |
561 KEY_FLAG_TODO_HWACCEL_REMOVE |
562 KEY_FLAG_TODO_DELETE);
563 key->flags &= ~todoflags;
564 spin_unlock_bh(&todo_lock);
565
566 work_done = false;
567
568 if (todoflags & KEY_FLAG_TODO_ADD_DEBUGFS) {
569 ieee80211_debugfs_key_add(key);
570 work_done = true;
571 }
572 if (todoflags & KEY_FLAG_TODO_DEFKEY) {
573 ieee80211_debugfs_key_remove_default(key->sdata);
574 ieee80211_debugfs_key_add_default(key->sdata);
575 work_done = true;
576 }
577 if (todoflags & KEY_FLAG_TODO_DEFMGMTKEY) {
578 ieee80211_debugfs_key_remove_mgmt_default(key->sdata);
579 ieee80211_debugfs_key_add_mgmt_default(key->sdata);
580 work_done = true;
581 }
582 if (todoflags & KEY_FLAG_TODO_HWACCEL_ADD) {
583 ieee80211_key_enable_hw_accel(key);
584 work_done = true;
585 }
586 if (todoflags & KEY_FLAG_TODO_HWACCEL_REMOVE) {
587 ieee80211_key_disable_hw_accel(key);
588 work_done = true;
589 }
590 if (todoflags & KEY_FLAG_TODO_DELETE) {
591 __ieee80211_key_destroy(key);
592 work_done = true;
593 }
594 448
595 WARN_ON(!work_done); 449 mutex_lock(&sdata->local->key_mtx);
596 450
597 spin_lock_bh(&todo_lock); 451 list_for_each_entry(key, &sdata->key_list, list)
598 } 452 ieee80211_key_disable_hw_accel(key);
599 spin_unlock_bh(&todo_lock);
600}
601 453
602void ieee80211_key_todo(void) 454 mutex_unlock(&sdata->local->key_mtx);
603{
604 ieee80211_key_lock();
605 __ieee80211_key_todo();
606 ieee80211_key_unlock();
607} 455}
608 456
609void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata) 457void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata)
610{ 458{
611 struct ieee80211_key *key, *tmp; 459 struct ieee80211_key *key, *tmp;
612 unsigned long flags;
613 460
614 ieee80211_key_lock(); 461 mutex_lock(&sdata->local->key_mtx);
615 462
616 ieee80211_debugfs_key_remove_default(sdata); 463 ieee80211_debugfs_key_remove_default(sdata);
617 ieee80211_debugfs_key_remove_mgmt_default(sdata); 464 ieee80211_debugfs_key_remove_mgmt_default(sdata);
618 465
619 spin_lock_irqsave(&sdata->local->key_lock, flags);
620 list_for_each_entry_safe(key, tmp, &sdata->key_list, list) 466 list_for_each_entry_safe(key, tmp, &sdata->key_list, list)
621 __ieee80211_key_free(key); 467 __ieee80211_key_free(key);
622 spin_unlock_irqrestore(&sdata->local->key_lock, flags);
623
624 __ieee80211_key_todo();
625 468
626 ieee80211_key_unlock(); 469 mutex_unlock(&sdata->local->key_mtx);
627} 470}
diff --git a/net/mac80211/key.h b/net/mac80211/key.h
index bdc2968c2bbe..b665bbb7a471 100644
--- a/net/mac80211/key.h
+++ b/net/mac80211/key.h
@@ -38,25 +38,9 @@ struct sta_info;
38 * 38 *
39 * @KEY_FLAG_UPLOADED_TO_HARDWARE: Indicates that this key is present 39 * @KEY_FLAG_UPLOADED_TO_HARDWARE: Indicates that this key is present
40 * in the hardware for TX crypto hardware acceleration. 40 * in the hardware for TX crypto hardware acceleration.
41 * @KEY_FLAG_TODO_DELETE: Key is marked for deletion and will, after an
42 * RCU grace period, no longer be reachable other than from the
43 * todo list.
44 * @KEY_FLAG_TODO_HWACCEL_ADD: Key needs to be added to hardware acceleration.
45 * @KEY_FLAG_TODO_HWACCEL_REMOVE: Key needs to be removed from hardware
46 * acceleration.
47 * @KEY_FLAG_TODO_DEFKEY: Key is default key and debugfs needs to be updated.
48 * @KEY_FLAG_TODO_ADD_DEBUGFS: Key needs to be added to debugfs.
49 * @KEY_FLAG_TODO_DEFMGMTKEY: Key is default management key and debugfs needs
50 * to be updated.
51 */ 41 */
52enum ieee80211_internal_key_flags { 42enum ieee80211_internal_key_flags {
53 KEY_FLAG_UPLOADED_TO_HARDWARE = BIT(0), 43 KEY_FLAG_UPLOADED_TO_HARDWARE = BIT(0),
54 KEY_FLAG_TODO_DELETE = BIT(1),
55 KEY_FLAG_TODO_HWACCEL_ADD = BIT(2),
56 KEY_FLAG_TODO_HWACCEL_REMOVE = BIT(3),
57 KEY_FLAG_TODO_DEFKEY = BIT(4),
58 KEY_FLAG_TODO_ADD_DEBUGFS = BIT(5),
59 KEY_FLAG_TODO_DEFMGMTKEY = BIT(6),
60}; 44};
61 45
62enum ieee80211_internal_tkip_state { 46enum ieee80211_internal_tkip_state {
@@ -79,10 +63,8 @@ struct ieee80211_key {
79 63
80 /* for sdata list */ 64 /* for sdata list */
81 struct list_head list; 65 struct list_head list;
82 /* for todo list */
83 struct list_head todo;
84 66
85 /* protected by todo lock! */ 67 /* protected by key mutex */
86 unsigned int flags; 68 unsigned int flags;
87 69
88 union { 70 union {
@@ -95,7 +77,13 @@ struct ieee80211_key {
95 } tkip; 77 } tkip;
96 struct { 78 struct {
97 u8 tx_pn[6]; 79 u8 tx_pn[6];
98 u8 rx_pn[NUM_RX_DATA_QUEUES][6]; 80 /*
81 * Last received packet number. The first
82 * NUM_RX_DATA_QUEUES counters are used with Data
83 * frames and the last counter is used with Robust
84 * Management frames.
85 */
86 u8 rx_pn[NUM_RX_DATA_QUEUES + 1][6];
99 struct crypto_cipher *tfm; 87 struct crypto_cipher *tfm;
100 u32 replays; /* dot11RSNAStatsCCMPReplays */ 88 u32 replays; /* dot11RSNAStatsCCMPReplays */
101 /* scratch buffers for virt_to_page() (crypto API) */ 89 /* scratch buffers for virt_to_page() (crypto API) */
@@ -147,7 +135,8 @@ struct ieee80211_key *ieee80211_key_alloc(enum ieee80211_key_alg alg,
147void ieee80211_key_link(struct ieee80211_key *key, 135void ieee80211_key_link(struct ieee80211_key *key,
148 struct ieee80211_sub_if_data *sdata, 136 struct ieee80211_sub_if_data *sdata,
149 struct sta_info *sta); 137 struct sta_info *sta);
150void ieee80211_key_free(struct ieee80211_key *key); 138void ieee80211_key_free(struct ieee80211_local *local,
139 struct ieee80211_key *key);
151void ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata, int idx); 140void ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata, int idx);
152void ieee80211_set_default_mgmt_key(struct ieee80211_sub_if_data *sdata, 141void ieee80211_set_default_mgmt_key(struct ieee80211_sub_if_data *sdata,
153 int idx); 142 int idx);
@@ -155,6 +144,4 @@ void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata);
155void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata); 144void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata);
156void ieee80211_disable_keys(struct ieee80211_sub_if_data *sdata); 145void ieee80211_disable_keys(struct ieee80211_sub_if_data *sdata);
157 146
158void ieee80211_key_todo(void);
159
160#endif /* IEEE80211_KEY_H */ 147#endif /* IEEE80211_KEY_H */
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 22a384dfab65..7cc4f913a431 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -20,6 +20,7 @@
20#include <linux/rtnetlink.h> 20#include <linux/rtnetlink.h>
21#include <linux/bitmap.h> 21#include <linux/bitmap.h>
22#include <linux/pm_qos_params.h> 22#include <linux/pm_qos_params.h>
23#include <linux/inetdevice.h>
23#include <net/net_namespace.h> 24#include <net/net_namespace.h>
24#include <net/cfg80211.h> 25#include <net/cfg80211.h>
25 26
@@ -106,12 +107,15 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
106 if (scan_chan) { 107 if (scan_chan) {
107 chan = scan_chan; 108 chan = scan_chan;
108 channel_type = NL80211_CHAN_NO_HT; 109 channel_type = NL80211_CHAN_NO_HT;
110 local->hw.conf.flags |= IEEE80211_CONF_OFFCHANNEL;
109 } else if (local->tmp_channel) { 111 } else if (local->tmp_channel) {
110 chan = scan_chan = local->tmp_channel; 112 chan = scan_chan = local->tmp_channel;
111 channel_type = local->tmp_channel_type; 113 channel_type = local->tmp_channel_type;
114 local->hw.conf.flags |= IEEE80211_CONF_OFFCHANNEL;
112 } else { 115 } else {
113 chan = local->oper_channel; 116 chan = local->oper_channel;
114 channel_type = local->_oper_channel_type; 117 channel_type = local->_oper_channel_type;
118 local->hw.conf.flags &= ~IEEE80211_CONF_OFFCHANNEL;
115 } 119 }
116 120
117 if (chan != local->hw.conf.channel || 121 if (chan != local->hw.conf.channel ||
@@ -259,7 +263,6 @@ static void ieee80211_tasklet_handler(unsigned long data)
259{ 263{
260 struct ieee80211_local *local = (struct ieee80211_local *) data; 264 struct ieee80211_local *local = (struct ieee80211_local *) data;
261 struct sk_buff *skb; 265 struct sk_buff *skb;
262 struct ieee80211_ra_tid *ra_tid;
263 266
264 while ((skb = skb_dequeue(&local->skb_queue)) || 267 while ((skb = skb_dequeue(&local->skb_queue)) ||
265 (skb = skb_dequeue(&local->skb_queue_unreliable))) { 268 (skb = skb_dequeue(&local->skb_queue_unreliable))) {
@@ -274,18 +277,6 @@ static void ieee80211_tasklet_handler(unsigned long data)
274 skb->pkt_type = 0; 277 skb->pkt_type = 0;
275 ieee80211_tx_status(local_to_hw(local), skb); 278 ieee80211_tx_status(local_to_hw(local), skb);
276 break; 279 break;
277 case IEEE80211_DELBA_MSG:
278 ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
279 ieee80211_stop_tx_ba_cb(ra_tid->vif, ra_tid->ra,
280 ra_tid->tid);
281 dev_kfree_skb(skb);
282 break;
283 case IEEE80211_ADDBA_MSG:
284 ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
285 ieee80211_start_tx_ba_cb(ra_tid->vif, ra_tid->ra,
286 ra_tid->tid);
287 dev_kfree_skb(skb);
288 break ;
289 default: 280 default:
290 WARN(1, "mac80211: Packet is of unknown type %d\n", 281 WARN(1, "mac80211: Packet is of unknown type %d\n",
291 skb->pkt_type); 282 skb->pkt_type);
@@ -329,6 +320,76 @@ static void ieee80211_recalc_smps_work(struct work_struct *work)
329 mutex_unlock(&local->iflist_mtx); 320 mutex_unlock(&local->iflist_mtx);
330} 321}
331 322
323#ifdef CONFIG_INET
324static int ieee80211_ifa_changed(struct notifier_block *nb,
325 unsigned long data, void *arg)
326{
327 struct in_ifaddr *ifa = arg;
328 struct ieee80211_local *local =
329 container_of(nb, struct ieee80211_local,
330 ifa_notifier);
331 struct net_device *ndev = ifa->ifa_dev->dev;
332 struct wireless_dev *wdev = ndev->ieee80211_ptr;
333 struct in_device *idev;
334 struct ieee80211_sub_if_data *sdata;
335 struct ieee80211_bss_conf *bss_conf;
336 struct ieee80211_if_managed *ifmgd;
337 int c = 0;
338
339 if (!netif_running(ndev))
340 return NOTIFY_DONE;
341
342 /* Make sure it's our interface that got changed */
343 if (!wdev)
344 return NOTIFY_DONE;
345
346 if (wdev->wiphy != local->hw.wiphy)
347 return NOTIFY_DONE;
348
349 sdata = IEEE80211_DEV_TO_SUB_IF(ndev);
350 bss_conf = &sdata->vif.bss_conf;
351
352 /* ARP filtering is only supported in managed mode */
353 if (sdata->vif.type != NL80211_IFTYPE_STATION)
354 return NOTIFY_DONE;
355
356 idev = sdata->dev->ip_ptr;
357 if (!idev)
358 return NOTIFY_DONE;
359
360 ifmgd = &sdata->u.mgd;
361 mutex_lock(&ifmgd->mtx);
362
363 /* Copy the addresses to the bss_conf list */
364 ifa = idev->ifa_list;
365 while (c < IEEE80211_BSS_ARP_ADDR_LIST_LEN && ifa) {
366 bss_conf->arp_addr_list[c] = ifa->ifa_address;
367 ifa = ifa->ifa_next;
368 c++;
369 }
370
371 /* If not all addresses fit the list, disable filtering */
372 if (ifa) {
373 sdata->arp_filter_state = false;
374 c = 0;
375 } else {
376 sdata->arp_filter_state = true;
377 }
378 bss_conf->arp_addr_cnt = c;
379
380 /* Configure driver only if associated */
381 if (ifmgd->associated) {
382 bss_conf->arp_filter_enabled = sdata->arp_filter_state;
383 ieee80211_bss_info_change_notify(sdata,
384 BSS_CHANGED_ARP_FILTER);
385 }
386
387 mutex_unlock(&ifmgd->mtx);
388
389 return NOTIFY_DONE;
390}
391#endif
392
332struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, 393struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
333 const struct ieee80211_ops *ops) 394 const struct ieee80211_ops *ops)
334{ 395{
@@ -396,7 +457,7 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
396 mutex_init(&local->iflist_mtx); 457 mutex_init(&local->iflist_mtx);
397 mutex_init(&local->scan_mtx); 458 mutex_init(&local->scan_mtx);
398 459
399 spin_lock_init(&local->key_lock); 460 mutex_init(&local->key_mtx);
400 spin_lock_init(&local->filter_lock); 461 spin_lock_init(&local->filter_lock);
401 spin_lock_init(&local->queue_stop_reason_lock); 462 spin_lock_init(&local->queue_stop_reason_lock);
402 463
@@ -419,8 +480,10 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
419 480
420 sta_info_init(local); 481 sta_info_init(local);
421 482
422 for (i = 0; i < IEEE80211_MAX_QUEUES; i++) 483 for (i = 0; i < IEEE80211_MAX_QUEUES; i++) {
423 skb_queue_head_init(&local->pending[i]); 484 skb_queue_head_init(&local->pending[i]);
485 atomic_set(&local->agg_queue_stop[i], 0);
486 }
424 tasklet_init(&local->tx_pending_tasklet, ieee80211_tx_pending, 487 tasklet_init(&local->tx_pending_tasklet, ieee80211_tx_pending,
425 (unsigned long)local); 488 (unsigned long)local);
426 489
@@ -431,8 +494,6 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
431 skb_queue_head_init(&local->skb_queue); 494 skb_queue_head_init(&local->skb_queue);
432 skb_queue_head_init(&local->skb_queue_unreliable); 495 skb_queue_head_init(&local->skb_queue_unreliable);
433 496
434 spin_lock_init(&local->ampdu_lock);
435
436 return local_to_hw(local); 497 return local_to_hw(local);
437} 498}
438EXPORT_SYMBOL(ieee80211_alloc_hw); 499EXPORT_SYMBOL(ieee80211_alloc_hw);
@@ -572,18 +633,16 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
572 633
573 local->hw.conf.listen_interval = local->hw.max_listen_interval; 634 local->hw.conf.listen_interval = local->hw.max_listen_interval;
574 635
575 local->hw.conf.dynamic_ps_forced_timeout = -1; 636 local->dynamic_ps_forced_timeout = -1;
576 637
577 result = sta_info_start(local); 638 result = sta_info_start(local);
578 if (result < 0) 639 if (result < 0)
579 goto fail_sta_info; 640 goto fail_sta_info;
580 641
581 result = ieee80211_wep_init(local); 642 result = ieee80211_wep_init(local);
582 if (result < 0) { 643 if (result < 0)
583 printk(KERN_DEBUG "%s: Failed to initialize wep: %d\n", 644 printk(KERN_DEBUG "%s: Failed to initialize wep: %d\n",
584 wiphy_name(local->hw.wiphy), result); 645 wiphy_name(local->hw.wiphy), result);
585 goto fail_wep;
586 }
587 646
588 rtnl_lock(); 647 rtnl_lock();
589 648
@@ -612,21 +671,30 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
612 ieee80211_max_network_latency; 671 ieee80211_max_network_latency;
613 result = pm_qos_add_notifier(PM_QOS_NETWORK_LATENCY, 672 result = pm_qos_add_notifier(PM_QOS_NETWORK_LATENCY,
614 &local->network_latency_notifier); 673 &local->network_latency_notifier);
615
616 if (result) { 674 if (result) {
617 rtnl_lock(); 675 rtnl_lock();
618 goto fail_pm_qos; 676 goto fail_pm_qos;
619 } 677 }
620 678
679#ifdef CONFIG_INET
680 local->ifa_notifier.notifier_call = ieee80211_ifa_changed;
681 result = register_inetaddr_notifier(&local->ifa_notifier);
682 if (result)
683 goto fail_ifa;
684#endif
685
621 return 0; 686 return 0;
622 687
688 fail_ifa:
689 pm_qos_remove_notifier(PM_QOS_NETWORK_LATENCY,
690 &local->network_latency_notifier);
691 rtnl_lock();
623 fail_pm_qos: 692 fail_pm_qos:
624 ieee80211_led_exit(local); 693 ieee80211_led_exit(local);
625 ieee80211_remove_interfaces(local); 694 ieee80211_remove_interfaces(local);
626 fail_rate: 695 fail_rate:
627 rtnl_unlock(); 696 rtnl_unlock();
628 ieee80211_wep_free(local); 697 ieee80211_wep_free(local);
629 fail_wep:
630 sta_info_stop(local); 698 sta_info_stop(local);
631 fail_sta_info: 699 fail_sta_info:
632 destroy_workqueue(local->workqueue); 700 destroy_workqueue(local->workqueue);
@@ -647,6 +715,9 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
647 715
648 pm_qos_remove_notifier(PM_QOS_NETWORK_LATENCY, 716 pm_qos_remove_notifier(PM_QOS_NETWORK_LATENCY,
649 &local->network_latency_notifier); 717 &local->network_latency_notifier);
718#ifdef CONFIG_INET
719 unregister_inetaddr_notifier(&local->ifa_notifier);
720#endif
650 721
651 rtnl_lock(); 722 rtnl_lock();
652 723
@@ -704,6 +775,10 @@ static int __init ieee80211_init(void)
704 if (ret) 775 if (ret)
705 return ret; 776 return ret;
706 777
778 ret = rc80211_minstrel_ht_init();
779 if (ret)
780 goto err_minstrel;
781
707 ret = rc80211_pid_init(); 782 ret = rc80211_pid_init();
708 if (ret) 783 if (ret)
709 goto err_pid; 784 goto err_pid;
@@ -716,6 +791,8 @@ static int __init ieee80211_init(void)
716 err_netdev: 791 err_netdev:
717 rc80211_pid_exit(); 792 rc80211_pid_exit();
718 err_pid: 793 err_pid:
794 rc80211_minstrel_ht_exit();
795 err_minstrel:
719 rc80211_minstrel_exit(); 796 rc80211_minstrel_exit();
720 797
721 return ret; 798 return ret;
@@ -724,6 +801,7 @@ static int __init ieee80211_init(void)
724static void __exit ieee80211_exit(void) 801static void __exit ieee80211_exit(void)
725{ 802{
726 rc80211_pid_exit(); 803 rc80211_pid_exit();
804 rc80211_minstrel_ht_exit();
727 rc80211_minstrel_exit(); 805 rc80211_minstrel_exit();
728 806
729 /* 807 /*
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index bde81031727a..c8a4f19ed13b 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -54,7 +54,7 @@ static void ieee80211_mesh_housekeeping_timer(unsigned long data)
54 return; 54 return;
55 } 55 }
56 56
57 ieee80211_queue_work(&local->hw, &ifmsh->work); 57 ieee80211_queue_work(&local->hw, &sdata->work);
58} 58}
59 59
60/** 60/**
@@ -345,7 +345,7 @@ static void ieee80211_mesh_path_timer(unsigned long data)
345 return; 345 return;
346 } 346 }
347 347
348 ieee80211_queue_work(&local->hw, &ifmsh->work); 348 ieee80211_queue_work(&local->hw, &sdata->work);
349} 349}
350 350
351static void ieee80211_mesh_path_root_timer(unsigned long data) 351static void ieee80211_mesh_path_root_timer(unsigned long data)
@@ -362,7 +362,7 @@ static void ieee80211_mesh_path_root_timer(unsigned long data)
362 return; 362 return;
363 } 363 }
364 364
365 ieee80211_queue_work(&local->hw, &ifmsh->work); 365 ieee80211_queue_work(&local->hw, &sdata->work);
366} 366}
367 367
368void ieee80211_mesh_root_setup(struct ieee80211_if_mesh *ifmsh) 368void ieee80211_mesh_root_setup(struct ieee80211_if_mesh *ifmsh)
@@ -484,9 +484,6 @@ void ieee80211_mesh_quiesce(struct ieee80211_sub_if_data *sdata)
484{ 484{
485 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 485 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
486 486
487 /* might restart the timer but that doesn't matter */
488 cancel_work_sync(&ifmsh->work);
489
490 /* use atomic bitops in case both timers fire at the same time */ 487 /* use atomic bitops in case both timers fire at the same time */
491 488
492 if (del_timer_sync(&ifmsh->housekeeping_timer)) 489 if (del_timer_sync(&ifmsh->housekeeping_timer))
@@ -518,7 +515,7 @@ void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata)
518 515
519 set_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags); 516 set_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags);
520 ieee80211_mesh_root_setup(ifmsh); 517 ieee80211_mesh_root_setup(ifmsh);
521 ieee80211_queue_work(&local->hw, &ifmsh->work); 518 ieee80211_queue_work(&local->hw, &sdata->work);
522 sdata->vif.bss_conf.beacon_int = MESH_DEFAULT_BEACON_INTERVAL; 519 sdata->vif.bss_conf.beacon_int = MESH_DEFAULT_BEACON_INTERVAL;
523 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON | 520 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON |
524 BSS_CHANGED_BEACON_ENABLED | 521 BSS_CHANGED_BEACON_ENABLED |
@@ -536,16 +533,7 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
536 * whether the interface is running, which, at this point, 533 * whether the interface is running, which, at this point,
537 * it no longer is. 534 * it no longer is.
538 */ 535 */
539 cancel_work_sync(&sdata->u.mesh.work); 536 cancel_work_sync(&sdata->work);
540
541 /*
542 * When we get here, the interface is marked down.
543 * Call synchronize_rcu() to wait for the RX path
544 * should it be using the interface and enqueuing
545 * frames at this very time on another CPU.
546 */
547 rcu_barrier(); /* Wait for RX path and call_rcu()'s */
548 skb_queue_purge(&sdata->u.mesh.skb_queue);
549} 537}
550 538
551static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata, 539static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
@@ -608,8 +596,8 @@ static void ieee80211_mesh_rx_mgmt_action(struct ieee80211_sub_if_data *sdata,
608 } 596 }
609} 597}
610 598
611static void ieee80211_mesh_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, 599void ieee80211_mesh_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
612 struct sk_buff *skb) 600 struct sk_buff *skb)
613{ 601{
614 struct ieee80211_rx_status *rx_status; 602 struct ieee80211_rx_status *rx_status;
615 struct ieee80211_if_mesh *ifmsh; 603 struct ieee80211_if_mesh *ifmsh;
@@ -632,26 +620,11 @@ static void ieee80211_mesh_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
632 ieee80211_mesh_rx_mgmt_action(sdata, mgmt, skb->len, rx_status); 620 ieee80211_mesh_rx_mgmt_action(sdata, mgmt, skb->len, rx_status);
633 break; 621 break;
634 } 622 }
635
636 kfree_skb(skb);
637} 623}
638 624
639static void ieee80211_mesh_work(struct work_struct *work) 625void ieee80211_mesh_work(struct ieee80211_sub_if_data *sdata)
640{ 626{
641 struct ieee80211_sub_if_data *sdata =
642 container_of(work, struct ieee80211_sub_if_data, u.mesh.work);
643 struct ieee80211_local *local = sdata->local;
644 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 627 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
645 struct sk_buff *skb;
646
647 if (!ieee80211_sdata_running(sdata))
648 return;
649
650 if (local->scanning)
651 return;
652
653 while ((skb = skb_dequeue(&ifmsh->skb_queue)))
654 ieee80211_mesh_rx_queued_mgmt(sdata, skb);
655 628
656 if (ifmsh->preq_queue_len && 629 if (ifmsh->preq_queue_len &&
657 time_after(jiffies, 630 time_after(jiffies,
@@ -678,7 +651,7 @@ void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local)
678 rcu_read_lock(); 651 rcu_read_lock();
679 list_for_each_entry_rcu(sdata, &local->interfaces, list) 652 list_for_each_entry_rcu(sdata, &local->interfaces, list)
680 if (ieee80211_vif_is_mesh(&sdata->vif)) 653 if (ieee80211_vif_is_mesh(&sdata->vif))
681 ieee80211_queue_work(&local->hw, &sdata->u.mesh.work); 654 ieee80211_queue_work(&local->hw, &sdata->work);
682 rcu_read_unlock(); 655 rcu_read_unlock();
683} 656}
684 657
@@ -686,11 +659,9 @@ void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata)
686{ 659{
687 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 660 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
688 661
689 INIT_WORK(&ifmsh->work, ieee80211_mesh_work);
690 setup_timer(&ifmsh->housekeeping_timer, 662 setup_timer(&ifmsh->housekeeping_timer,
691 ieee80211_mesh_housekeeping_timer, 663 ieee80211_mesh_housekeeping_timer,
692 (unsigned long) sdata); 664 (unsigned long) sdata);
693 skb_queue_head_init(&sdata->u.mesh.skb_queue);
694 665
695 ifmsh->mshcfg.dot11MeshRetryTimeout = MESH_RET_T; 666 ifmsh->mshcfg.dot11MeshRetryTimeout = MESH_RET_T;
696 ifmsh->mshcfg.dot11MeshConfirmTimeout = MESH_CONF_T; 667 ifmsh->mshcfg.dot11MeshConfirmTimeout = MESH_CONF_T;
@@ -731,29 +702,3 @@ void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata)
731 INIT_LIST_HEAD(&ifmsh->preq_queue.list); 702 INIT_LIST_HEAD(&ifmsh->preq_queue.list);
732 spin_lock_init(&ifmsh->mesh_preq_queue_lock); 703 spin_lock_init(&ifmsh->mesh_preq_queue_lock);
733} 704}
734
735ieee80211_rx_result
736ieee80211_mesh_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
737{
738 struct ieee80211_local *local = sdata->local;
739 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
740 struct ieee80211_mgmt *mgmt;
741 u16 fc;
742
743 if (skb->len < 24)
744 return RX_DROP_MONITOR;
745
746 mgmt = (struct ieee80211_mgmt *) skb->data;
747 fc = le16_to_cpu(mgmt->frame_control);
748
749 switch (fc & IEEE80211_FCTL_STYPE) {
750 case IEEE80211_STYPE_ACTION:
751 case IEEE80211_STYPE_PROBE_RESP:
752 case IEEE80211_STYPE_BEACON:
753 skb_queue_tail(&ifmsh->skb_queue, skb);
754 ieee80211_queue_work(&local->hw, &ifmsh->work);
755 return RX_QUEUED;
756 }
757
758 return RX_CONTINUE;
759}
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index c88087f1cd0f..ebd3f1d9d889 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -237,8 +237,6 @@ void ieee80211s_update_metric(struct ieee80211_local *local,
237 struct sta_info *stainfo, struct sk_buff *skb); 237 struct sta_info *stainfo, struct sk_buff *skb);
238void ieee80211s_stop(void); 238void ieee80211s_stop(void);
239void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata); 239void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata);
240ieee80211_rx_result
241ieee80211_mesh_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb);
242void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata); 240void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata);
243void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata); 241void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata);
244void ieee80211_mesh_root_setup(struct ieee80211_if_mesh *ifmsh); 242void ieee80211_mesh_root_setup(struct ieee80211_if_mesh *ifmsh);
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 0705018d8d1e..829e08a657d0 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -805,14 +805,14 @@ static void mesh_queue_preq(struct mesh_path *mpath, u8 flags)
805 spin_unlock(&ifmsh->mesh_preq_queue_lock); 805 spin_unlock(&ifmsh->mesh_preq_queue_lock);
806 806
807 if (time_after(jiffies, ifmsh->last_preq + min_preq_int_jiff(sdata))) 807 if (time_after(jiffies, ifmsh->last_preq + min_preq_int_jiff(sdata)))
808 ieee80211_queue_work(&sdata->local->hw, &ifmsh->work); 808 ieee80211_queue_work(&sdata->local->hw, &sdata->work);
809 809
810 else if (time_before(jiffies, ifmsh->last_preq)) { 810 else if (time_before(jiffies, ifmsh->last_preq)) {
811 /* avoid long wait if did not send preqs for a long time 811 /* avoid long wait if did not send preqs for a long time
812 * and jiffies wrapped around 812 * and jiffies wrapped around
813 */ 813 */
814 ifmsh->last_preq = jiffies - min_preq_int_jiff(sdata) - 1; 814 ifmsh->last_preq = jiffies - min_preq_int_jiff(sdata) - 1;
815 ieee80211_queue_work(&sdata->local->hw, &ifmsh->work); 815 ieee80211_queue_work(&sdata->local->hw, &sdata->work);
816 } else 816 } else
817 mod_timer(&ifmsh->mesh_path_timer, ifmsh->last_preq + 817 mod_timer(&ifmsh->mesh_path_timer, ifmsh->last_preq +
818 min_preq_int_jiff(sdata)); 818 min_preq_int_jiff(sdata));
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 181ffd6efd81..349e466cf08b 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -315,7 +315,7 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
315 read_unlock(&pathtbl_resize_lock); 315 read_unlock(&pathtbl_resize_lock);
316 if (grow) { 316 if (grow) {
317 set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags); 317 set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags);
318 ieee80211_queue_work(&local->hw, &ifmsh->work); 318 ieee80211_queue_work(&local->hw, &sdata->work);
319 } 319 }
320 return 0; 320 return 0;
321 321
@@ -425,7 +425,7 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
425 read_unlock(&pathtbl_resize_lock); 425 read_unlock(&pathtbl_resize_lock);
426 if (grow) { 426 if (grow) {
427 set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags); 427 set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags);
428 ieee80211_queue_work(&local->hw, &ifmsh->work); 428 ieee80211_queue_work(&local->hw, &sdata->work);
429 } 429 }
430 return 0; 430 return 0;
431 431
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 3cd5f7b5d693..ea13a80a476c 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -65,7 +65,6 @@ void mesh_plink_inc_estab_count(struct ieee80211_sub_if_data *sdata)
65{ 65{
66 atomic_inc(&sdata->u.mesh.mshstats.estab_plinks); 66 atomic_inc(&sdata->u.mesh.mshstats.estab_plinks);
67 mesh_accept_plinks_update(sdata); 67 mesh_accept_plinks_update(sdata);
68 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
69} 68}
70 69
71static inline 70static inline
@@ -73,7 +72,6 @@ void mesh_plink_dec_estab_count(struct ieee80211_sub_if_data *sdata)
73{ 72{
74 atomic_dec(&sdata->u.mesh.mshstats.estab_plinks); 73 atomic_dec(&sdata->u.mesh.mshstats.estab_plinks);
75 mesh_accept_plinks_update(sdata); 74 mesh_accept_plinks_update(sdata);
76 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
77} 75}
78 76
79/** 77/**
@@ -115,7 +113,7 @@ static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata,
115} 113}
116 114
117/** 115/**
118 * mesh_plink_deactivate - deactivate mesh peer link 116 * __mesh_plink_deactivate - deactivate mesh peer link
119 * 117 *
120 * @sta: mesh peer link to deactivate 118 * @sta: mesh peer link to deactivate
121 * 119 *
@@ -123,18 +121,23 @@ static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata,
123 * 121 *
124 * Locking: the caller must hold sta->lock 122 * Locking: the caller must hold sta->lock
125 */ 123 */
126static void __mesh_plink_deactivate(struct sta_info *sta) 124static bool __mesh_plink_deactivate(struct sta_info *sta)
127{ 125{
128 struct ieee80211_sub_if_data *sdata = sta->sdata; 126 struct ieee80211_sub_if_data *sdata = sta->sdata;
127 bool deactivated = false;
129 128
130 if (sta->plink_state == PLINK_ESTAB) 129 if (sta->plink_state == PLINK_ESTAB) {
131 mesh_plink_dec_estab_count(sdata); 130 mesh_plink_dec_estab_count(sdata);
131 deactivated = true;
132 }
132 sta->plink_state = PLINK_BLOCKED; 133 sta->plink_state = PLINK_BLOCKED;
133 mesh_path_flush_by_nexthop(sta); 134 mesh_path_flush_by_nexthop(sta);
135
136 return deactivated;
134} 137}
135 138
136/** 139/**
137 * __mesh_plink_deactivate - deactivate mesh peer link 140 * mesh_plink_deactivate - deactivate mesh peer link
138 * 141 *
139 * @sta: mesh peer link to deactivate 142 * @sta: mesh peer link to deactivate
140 * 143 *
@@ -142,9 +145,15 @@ static void __mesh_plink_deactivate(struct sta_info *sta)
142 */ 145 */
143void mesh_plink_deactivate(struct sta_info *sta) 146void mesh_plink_deactivate(struct sta_info *sta)
144{ 147{
148 struct ieee80211_sub_if_data *sdata = sta->sdata;
149 bool deactivated;
150
145 spin_lock_bh(&sta->lock); 151 spin_lock_bh(&sta->lock);
146 __mesh_plink_deactivate(sta); 152 deactivated = __mesh_plink_deactivate(sta);
147 spin_unlock_bh(&sta->lock); 153 spin_unlock_bh(&sta->lock);
154
155 if (deactivated)
156 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
148} 157}
149 158
150static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata, 159static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
@@ -381,10 +390,16 @@ int mesh_plink_open(struct sta_info *sta)
381 390
382void mesh_plink_block(struct sta_info *sta) 391void mesh_plink_block(struct sta_info *sta)
383{ 392{
393 struct ieee80211_sub_if_data *sdata = sta->sdata;
394 bool deactivated;
395
384 spin_lock_bh(&sta->lock); 396 spin_lock_bh(&sta->lock);
385 __mesh_plink_deactivate(sta); 397 deactivated = __mesh_plink_deactivate(sta);
386 sta->plink_state = PLINK_BLOCKED; 398 sta->plink_state = PLINK_BLOCKED;
387 spin_unlock_bh(&sta->lock); 399 spin_unlock_bh(&sta->lock);
400
401 if (deactivated)
402 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
388} 403}
389 404
390 405
@@ -397,6 +412,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
397 enum plink_event event; 412 enum plink_event event;
398 enum plink_frame_type ftype; 413 enum plink_frame_type ftype;
399 size_t baselen; 414 size_t baselen;
415 bool deactivated;
400 u8 ie_len; 416 u8 ie_len;
401 u8 *baseaddr; 417 u8 *baseaddr;
402 __le16 plid, llid, reason; 418 __le16 plid, llid, reason;
@@ -651,8 +667,9 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
651 case CNF_ACPT: 667 case CNF_ACPT:
652 del_timer(&sta->plink_timer); 668 del_timer(&sta->plink_timer);
653 sta->plink_state = PLINK_ESTAB; 669 sta->plink_state = PLINK_ESTAB;
654 mesh_plink_inc_estab_count(sdata);
655 spin_unlock_bh(&sta->lock); 670 spin_unlock_bh(&sta->lock);
671 mesh_plink_inc_estab_count(sdata);
672 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
656 mpl_dbg("Mesh plink with %pM ESTABLISHED\n", 673 mpl_dbg("Mesh plink with %pM ESTABLISHED\n",
657 sta->sta.addr); 674 sta->sta.addr);
658 break; 675 break;
@@ -684,8 +701,9 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
684 case OPN_ACPT: 701 case OPN_ACPT:
685 del_timer(&sta->plink_timer); 702 del_timer(&sta->plink_timer);
686 sta->plink_state = PLINK_ESTAB; 703 sta->plink_state = PLINK_ESTAB;
687 mesh_plink_inc_estab_count(sdata);
688 spin_unlock_bh(&sta->lock); 704 spin_unlock_bh(&sta->lock);
705 mesh_plink_inc_estab_count(sdata);
706 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
689 mpl_dbg("Mesh plink with %pM ESTABLISHED\n", 707 mpl_dbg("Mesh plink with %pM ESTABLISHED\n",
690 sta->sta.addr); 708 sta->sta.addr);
691 mesh_plink_frame_tx(sdata, PLINK_CONFIRM, sta->sta.addr, llid, 709 mesh_plink_frame_tx(sdata, PLINK_CONFIRM, sta->sta.addr, llid,
@@ -702,11 +720,13 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
702 case CLS_ACPT: 720 case CLS_ACPT:
703 reason = cpu_to_le16(MESH_CLOSE_RCVD); 721 reason = cpu_to_le16(MESH_CLOSE_RCVD);
704 sta->reason = reason; 722 sta->reason = reason;
705 __mesh_plink_deactivate(sta); 723 deactivated = __mesh_plink_deactivate(sta);
706 sta->plink_state = PLINK_HOLDING; 724 sta->plink_state = PLINK_HOLDING;
707 llid = sta->llid; 725 llid = sta->llid;
708 mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata)); 726 mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata));
709 spin_unlock_bh(&sta->lock); 727 spin_unlock_bh(&sta->lock);
728 if (deactivated)
729 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
710 mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr, llid, 730 mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr, llid,
711 plid, reason); 731 plid, reason);
712 break; 732 break;
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index f803f8b72a93..b6c163ac22da 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -478,6 +478,39 @@ static void ieee80211_handle_pwr_constr(struct ieee80211_sub_if_data *sdata,
478 } 478 }
479} 479}
480 480
481void ieee80211_enable_dyn_ps(struct ieee80211_vif *vif)
482{
483 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
484 struct ieee80211_local *local = sdata->local;
485 struct ieee80211_conf *conf = &local->hw.conf;
486
487 WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION ||
488 !(local->hw.flags & IEEE80211_HW_SUPPORTS_PS) ||
489 (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS));
490
491 local->disable_dynamic_ps = false;
492 conf->dynamic_ps_timeout = local->dynamic_ps_user_timeout;
493}
494EXPORT_SYMBOL(ieee80211_enable_dyn_ps);
495
496void ieee80211_disable_dyn_ps(struct ieee80211_vif *vif)
497{
498 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
499 struct ieee80211_local *local = sdata->local;
500 struct ieee80211_conf *conf = &local->hw.conf;
501
502 WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION ||
503 !(local->hw.flags & IEEE80211_HW_SUPPORTS_PS) ||
504 (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS));
505
506 local->disable_dynamic_ps = true;
507 conf->dynamic_ps_timeout = 0;
508 del_timer_sync(&local->dynamic_ps_timer);
509 ieee80211_queue_work(&local->hw,
510 &local->dynamic_ps_enable_work);
511}
512EXPORT_SYMBOL(ieee80211_disable_dyn_ps);
513
481/* powersave */ 514/* powersave */
482static void ieee80211_enable_ps(struct ieee80211_local *local, 515static void ieee80211_enable_ps(struct ieee80211_local *local,
483 struct ieee80211_sub_if_data *sdata) 516 struct ieee80211_sub_if_data *sdata)
@@ -553,6 +586,7 @@ void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency)
553 found->u.mgd.associated->beacon_ies && 586 found->u.mgd.associated->beacon_ies &&
554 !(found->u.mgd.flags & (IEEE80211_STA_BEACON_POLL | 587 !(found->u.mgd.flags & (IEEE80211_STA_BEACON_POLL |
555 IEEE80211_STA_CONNECTION_POLL))) { 588 IEEE80211_STA_CONNECTION_POLL))) {
589 struct ieee80211_conf *conf = &local->hw.conf;
556 s32 beaconint_us; 590 s32 beaconint_us;
557 591
558 if (latency < 0) 592 if (latency < 0)
@@ -561,25 +595,24 @@ void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency)
561 beaconint_us = ieee80211_tu_to_usec( 595 beaconint_us = ieee80211_tu_to_usec(
562 found->vif.bss_conf.beacon_int); 596 found->vif.bss_conf.beacon_int);
563 597
564 timeout = local->hw.conf.dynamic_ps_forced_timeout; 598 timeout = local->dynamic_ps_forced_timeout;
565 if (timeout < 0) { 599 if (timeout < 0) {
566 /* 600 /*
601 * Go to full PSM if the user configures a very low
602 * latency requirement.
567 * The 2 second value is there for compatibility until 603 * The 2 second value is there for compatibility until
568 * the PM_QOS_NETWORK_LATENCY is configured with real 604 * the PM_QOS_NETWORK_LATENCY is configured with real
569 * values. 605 * values.
570 */ 606 */
571 if (latency == 2000000000) 607 if (latency > 1900000000 && latency != 2000000000)
572 timeout = 100;
573 else if (latency <= 50000)
574 timeout = 300;
575 else if (latency <= 100000)
576 timeout = 100;
577 else if (latency <= 500000)
578 timeout = 50;
579 else
580 timeout = 0; 608 timeout = 0;
609 else
610 timeout = 100;
581 } 611 }
582 local->hw.conf.dynamic_ps_timeout = timeout; 612 local->dynamic_ps_user_timeout = timeout;
613 if (!local->disable_dynamic_ps)
614 conf->dynamic_ps_timeout =
615 local->dynamic_ps_user_timeout;
583 616
584 if (beaconint_us > latency) { 617 if (beaconint_us > latency) {
585 local->ps_sdata = NULL; 618 local->ps_sdata = NULL;
@@ -665,10 +698,11 @@ void ieee80211_dynamic_ps_timer(unsigned long data)
665 698
666/* MLME */ 699/* MLME */
667static void ieee80211_sta_wmm_params(struct ieee80211_local *local, 700static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
668 struct ieee80211_if_managed *ifmgd, 701 struct ieee80211_sub_if_data *sdata,
669 u8 *wmm_param, size_t wmm_param_len) 702 u8 *wmm_param, size_t wmm_param_len)
670{ 703{
671 struct ieee80211_tx_queue_params params; 704 struct ieee80211_tx_queue_params params;
705 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
672 size_t left; 706 size_t left;
673 int count; 707 int count;
674 u8 *pos, uapsd_queues = 0; 708 u8 *pos, uapsd_queues = 0;
@@ -757,8 +791,8 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
757 } 791 }
758 792
759 /* enable WMM or activate new settings */ 793 /* enable WMM or activate new settings */
760 local->hw.conf.flags |= IEEE80211_CONF_QOS; 794 sdata->vif.bss_conf.qos = true;
761 drv_config(local, IEEE80211_CONF_CHANGE_QOS); 795 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_QOS);
762} 796}
763 797
764static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata, 798static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata,
@@ -806,11 +840,12 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
806{ 840{
807 struct ieee80211_bss *bss = (void *)cbss->priv; 841 struct ieee80211_bss *bss = (void *)cbss->priv;
808 struct ieee80211_local *local = sdata->local; 842 struct ieee80211_local *local = sdata->local;
843 struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf;
809 844
810 bss_info_changed |= BSS_CHANGED_ASSOC; 845 bss_info_changed |= BSS_CHANGED_ASSOC;
811 /* set timing information */ 846 /* set timing information */
812 sdata->vif.bss_conf.beacon_int = cbss->beacon_interval; 847 bss_conf->beacon_int = cbss->beacon_interval;
813 sdata->vif.bss_conf.timestamp = cbss->tsf; 848 bss_conf->timestamp = cbss->tsf;
814 849
815 bss_info_changed |= BSS_CHANGED_BEACON_INT; 850 bss_info_changed |= BSS_CHANGED_BEACON_INT;
816 bss_info_changed |= ieee80211_handle_bss_capability(sdata, 851 bss_info_changed |= ieee80211_handle_bss_capability(sdata,
@@ -835,7 +870,12 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
835 870
836 ieee80211_led_assoc(local, 1); 871 ieee80211_led_assoc(local, 1);
837 872
838 sdata->vif.bss_conf.assoc = 1; 873 if (local->hw.flags & IEEE80211_HW_NEED_DTIM_PERIOD)
874 bss_conf->dtim_period = bss->dtim_period;
875 else
876 bss_conf->dtim_period = 0;
877
878 bss_conf->assoc = 1;
839 /* 879 /*
840 * For now just always ask the driver to update the basic rateset 880 * For now just always ask the driver to update the basic rateset
841 * when we have associated, we aren't checking whether it actually 881 * when we have associated, we aren't checking whether it actually
@@ -848,9 +888,15 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
848 888
849 /* Tell the driver to monitor connection quality (if supported) */ 889 /* Tell the driver to monitor connection quality (if supported) */
850 if ((local->hw.flags & IEEE80211_HW_SUPPORTS_CQM_RSSI) && 890 if ((local->hw.flags & IEEE80211_HW_SUPPORTS_CQM_RSSI) &&
851 sdata->vif.bss_conf.cqm_rssi_thold) 891 bss_conf->cqm_rssi_thold)
852 bss_info_changed |= BSS_CHANGED_CQM; 892 bss_info_changed |= BSS_CHANGED_CQM;
853 893
894 /* Enable ARP filtering */
895 if (bss_conf->arp_filter_enabled != sdata->arp_filter_state) {
896 bss_conf->arp_filter_enabled = sdata->arp_filter_state;
897 bss_info_changed |= BSS_CHANGED_ARP_FILTER;
898 }
899
854 ieee80211_bss_info_change_notify(sdata, bss_info_changed); 900 ieee80211_bss_info_change_notify(sdata, bss_info_changed);
855 901
856 mutex_lock(&local->iflist_mtx); 902 mutex_lock(&local->iflist_mtx);
@@ -898,13 +944,13 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
898 netif_tx_stop_all_queues(sdata->dev); 944 netif_tx_stop_all_queues(sdata->dev);
899 netif_carrier_off(sdata->dev); 945 netif_carrier_off(sdata->dev);
900 946
901 rcu_read_lock(); 947 mutex_lock(&local->sta_mtx);
902 sta = sta_info_get(sdata, bssid); 948 sta = sta_info_get(sdata, bssid);
903 if (sta) { 949 if (sta) {
904 set_sta_flags(sta, WLAN_STA_DISASSOC); 950 set_sta_flags(sta, WLAN_STA_BLOCK_BA);
905 ieee80211_sta_tear_down_BA_sessions(sta); 951 ieee80211_sta_tear_down_BA_sessions(sta);
906 } 952 }
907 rcu_read_unlock(); 953 mutex_unlock(&local->sta_mtx);
908 954
909 changed |= ieee80211_reset_erp_info(sdata); 955 changed |= ieee80211_reset_erp_info(sdata);
910 956
@@ -932,6 +978,12 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
932 978
933 ieee80211_hw_config(local, config_changed); 979 ieee80211_hw_config(local, config_changed);
934 980
981 /* Disable ARP filtering */
982 if (sdata->vif.bss_conf.arp_filter_enabled) {
983 sdata->vif.bss_conf.arp_filter_enabled = false;
984 changed |= BSS_CHANGED_ARP_FILTER;
985 }
986
935 /* The BSSID (not really interesting) and HT changed */ 987 /* The BSSID (not really interesting) and HT changed */
936 changed |= BSS_CHANGED_BSSID | BSS_CHANGED_HT; 988 changed |= BSS_CHANGED_BSSID | BSS_CHANGED_HT;
937 ieee80211_bss_info_change_notify(sdata, changed); 989 ieee80211_bss_info_change_notify(sdata, changed);
@@ -1279,7 +1331,7 @@ static bool ieee80211_assoc_success(struct ieee80211_work *wk,
1279 } 1331 }
1280 1332
1281 if (elems.wmm_param) 1333 if (elems.wmm_param)
1282 ieee80211_sta_wmm_params(local, ifmgd, elems.wmm_param, 1334 ieee80211_sta_wmm_params(local, sdata, elems.wmm_param,
1283 elems.wmm_param_len); 1335 elems.wmm_param_len);
1284 else 1336 else
1285 ieee80211_set_wmm_default(sdata); 1337 ieee80211_set_wmm_default(sdata);
@@ -1551,7 +1603,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
1551 ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems, 1603 ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems,
1552 true); 1604 true);
1553 1605
1554 ieee80211_sta_wmm_params(local, ifmgd, elems.wmm_param, 1606 ieee80211_sta_wmm_params(local, sdata, elems.wmm_param,
1555 elems.wmm_param_len); 1607 elems.wmm_param_len);
1556 } 1608 }
1557 1609
@@ -1633,35 +1685,8 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
1633 ieee80211_bss_info_change_notify(sdata, changed); 1685 ieee80211_bss_info_change_notify(sdata, changed);
1634} 1686}
1635 1687
1636ieee80211_rx_result ieee80211_sta_rx_mgmt(struct ieee80211_sub_if_data *sdata, 1688void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
1637 struct sk_buff *skb) 1689 struct sk_buff *skb)
1638{
1639 struct ieee80211_local *local = sdata->local;
1640 struct ieee80211_mgmt *mgmt;
1641 u16 fc;
1642
1643 if (skb->len < 24)
1644 return RX_DROP_MONITOR;
1645
1646 mgmt = (struct ieee80211_mgmt *) skb->data;
1647 fc = le16_to_cpu(mgmt->frame_control);
1648
1649 switch (fc & IEEE80211_FCTL_STYPE) {
1650 case IEEE80211_STYPE_PROBE_RESP:
1651 case IEEE80211_STYPE_BEACON:
1652 case IEEE80211_STYPE_DEAUTH:
1653 case IEEE80211_STYPE_DISASSOC:
1654 case IEEE80211_STYPE_ACTION:
1655 skb_queue_tail(&sdata->u.mgd.skb_queue, skb);
1656 ieee80211_queue_work(&local->hw, &sdata->u.mgd.work);
1657 return RX_QUEUED;
1658 }
1659
1660 return RX_DROP_MONITOR;
1661}
1662
1663static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
1664 struct sk_buff *skb)
1665{ 1690{
1666 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 1691 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
1667 struct ieee80211_rx_status *rx_status; 1692 struct ieee80211_rx_status *rx_status;
@@ -1693,44 +1718,6 @@ static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
1693 break; 1718 break;
1694 case IEEE80211_STYPE_ACTION: 1719 case IEEE80211_STYPE_ACTION:
1695 switch (mgmt->u.action.category) { 1720 switch (mgmt->u.action.category) {
1696 case WLAN_CATEGORY_BACK: {
1697 struct ieee80211_local *local = sdata->local;
1698 int len = skb->len;
1699 struct sta_info *sta;
1700
1701 rcu_read_lock();
1702 sta = sta_info_get(sdata, mgmt->sa);
1703 if (!sta) {
1704 rcu_read_unlock();
1705 break;
1706 }
1707
1708 local_bh_disable();
1709
1710 switch (mgmt->u.action.u.addba_req.action_code) {
1711 case WLAN_ACTION_ADDBA_REQ:
1712 if (len < (IEEE80211_MIN_ACTION_SIZE +
1713 sizeof(mgmt->u.action.u.addba_req)))
1714 break;
1715 ieee80211_process_addba_request(local, sta, mgmt, len);
1716 break;
1717 case WLAN_ACTION_ADDBA_RESP:
1718 if (len < (IEEE80211_MIN_ACTION_SIZE +
1719 sizeof(mgmt->u.action.u.addba_resp)))
1720 break;
1721 ieee80211_process_addba_resp(local, sta, mgmt, len);
1722 break;
1723 case WLAN_ACTION_DELBA:
1724 if (len < (IEEE80211_MIN_ACTION_SIZE +
1725 sizeof(mgmt->u.action.u.delba)))
1726 break;
1727 ieee80211_process_delba(sdata, sta, mgmt, len);
1728 break;
1729 }
1730 local_bh_enable();
1731 rcu_read_unlock();
1732 break;
1733 }
1734 case WLAN_CATEGORY_SPECTRUM_MGMT: 1721 case WLAN_CATEGORY_SPECTRUM_MGMT:
1735 ieee80211_sta_process_chanswitch(sdata, 1722 ieee80211_sta_process_chanswitch(sdata,
1736 &mgmt->u.action.u.chan_switch.sw_elem, 1723 &mgmt->u.action.u.chan_switch.sw_elem,
@@ -1754,7 +1741,7 @@ static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
1754 default: 1741 default:
1755 WARN(1, "unexpected: %d", rma); 1742 WARN(1, "unexpected: %d", rma);
1756 } 1743 }
1757 goto out; 1744 return;
1758 } 1745 }
1759 1746
1760 mutex_unlock(&ifmgd->mtx); 1747 mutex_unlock(&ifmgd->mtx);
@@ -1769,7 +1756,8 @@ static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
1769 if (wk->sdata != sdata) 1756 if (wk->sdata != sdata)
1770 continue; 1757 continue;
1771 1758
1772 if (wk->type != IEEE80211_WORK_ASSOC) 1759 if (wk->type != IEEE80211_WORK_ASSOC &&
1760 wk->type != IEEE80211_WORK_ASSOC_BEACON_WAIT)
1773 continue; 1761 continue;
1774 1762
1775 if (memcmp(mgmt->bssid, wk->filter_ta, ETH_ALEN)) 1763 if (memcmp(mgmt->bssid, wk->filter_ta, ETH_ALEN))
@@ -1799,8 +1787,6 @@ static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
1799 1787
1800 cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len); 1788 cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len);
1801 } 1789 }
1802 out:
1803 kfree_skb(skb);
1804} 1790}
1805 1791
1806static void ieee80211_sta_timer(unsigned long data) 1792static void ieee80211_sta_timer(unsigned long data)
@@ -1815,39 +1801,13 @@ static void ieee80211_sta_timer(unsigned long data)
1815 return; 1801 return;
1816 } 1802 }
1817 1803
1818 ieee80211_queue_work(&local->hw, &ifmgd->work); 1804 ieee80211_queue_work(&local->hw, &sdata->work);
1819} 1805}
1820 1806
1821static void ieee80211_sta_work(struct work_struct *work) 1807void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
1822{ 1808{
1823 struct ieee80211_sub_if_data *sdata =
1824 container_of(work, struct ieee80211_sub_if_data, u.mgd.work);
1825 struct ieee80211_local *local = sdata->local; 1809 struct ieee80211_local *local = sdata->local;
1826 struct ieee80211_if_managed *ifmgd; 1810 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
1827 struct sk_buff *skb;
1828
1829 if (!ieee80211_sdata_running(sdata))
1830 return;
1831
1832 if (local->scanning)
1833 return;
1834
1835 if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION))
1836 return;
1837
1838 /*
1839 * ieee80211_queue_work() should have picked up most cases,
1840 * here we'll pick the the rest.
1841 */
1842 if (WARN(local->suspended, "STA MLME work scheduled while "
1843 "going to suspend\n"))
1844 return;
1845
1846 ifmgd = &sdata->u.mgd;
1847
1848 /* first process frames to avoid timing out while a frame is pending */
1849 while ((skb = skb_dequeue(&ifmgd->skb_queue)))
1850 ieee80211_sta_rx_queued_mgmt(sdata, skb);
1851 1811
1852 /* then process the rest of the work */ 1812 /* then process the rest of the work */
1853 mutex_lock(&ifmgd->mtx); 1813 mutex_lock(&ifmgd->mtx);
@@ -1942,8 +1902,7 @@ static void ieee80211_restart_sta_timer(struct ieee80211_sub_if_data *sdata)
1942 ieee80211_queue_work(&sdata->local->hw, 1902 ieee80211_queue_work(&sdata->local->hw,
1943 &sdata->u.mgd.monitor_work); 1903 &sdata->u.mgd.monitor_work);
1944 /* and do all the other regular work too */ 1904 /* and do all the other regular work too */
1945 ieee80211_queue_work(&sdata->local->hw, 1905 ieee80211_queue_work(&sdata->local->hw, &sdata->work);
1946 &sdata->u.mgd.work);
1947 } 1906 }
1948} 1907}
1949 1908
@@ -1958,7 +1917,6 @@ void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata)
1958 * time -- the code here is properly synchronised. 1917 * time -- the code here is properly synchronised.
1959 */ 1918 */
1960 1919
1961 cancel_work_sync(&ifmgd->work);
1962 cancel_work_sync(&ifmgd->beacon_connection_loss_work); 1920 cancel_work_sync(&ifmgd->beacon_connection_loss_work);
1963 if (del_timer_sync(&ifmgd->timer)) 1921 if (del_timer_sync(&ifmgd->timer))
1964 set_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running); 1922 set_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running);
@@ -1990,7 +1948,6 @@ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata)
1990 struct ieee80211_if_managed *ifmgd; 1948 struct ieee80211_if_managed *ifmgd;
1991 1949
1992 ifmgd = &sdata->u.mgd; 1950 ifmgd = &sdata->u.mgd;
1993 INIT_WORK(&ifmgd->work, ieee80211_sta_work);
1994 INIT_WORK(&ifmgd->monitor_work, ieee80211_sta_monitor_work); 1951 INIT_WORK(&ifmgd->monitor_work, ieee80211_sta_monitor_work);
1995 INIT_WORK(&ifmgd->chswitch_work, ieee80211_chswitch_work); 1952 INIT_WORK(&ifmgd->chswitch_work, ieee80211_chswitch_work);
1996 INIT_WORK(&ifmgd->beacon_connection_loss_work, 1953 INIT_WORK(&ifmgd->beacon_connection_loss_work,
@@ -2003,7 +1960,6 @@ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata)
2003 (unsigned long) sdata); 1960 (unsigned long) sdata);
2004 setup_timer(&ifmgd->chswitch_timer, ieee80211_chswitch_timer, 1961 setup_timer(&ifmgd->chswitch_timer, ieee80211_chswitch_timer,
2005 (unsigned long) sdata); 1962 (unsigned long) sdata);
2006 skb_queue_head_init(&ifmgd->skb_queue);
2007 1963
2008 ifmgd->flags = 0; 1964 ifmgd->flags = 0;
2009 1965
@@ -2081,6 +2037,8 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
2081 auth_alg = WLAN_AUTH_OPEN; 2037 auth_alg = WLAN_AUTH_OPEN;
2082 break; 2038 break;
2083 case NL80211_AUTHTYPE_SHARED_KEY: 2039 case NL80211_AUTHTYPE_SHARED_KEY:
2040 if (IS_ERR(sdata->local->wep_tx_tfm))
2041 return -EOPNOTSUPP;
2084 auth_alg = WLAN_AUTH_SHARED_KEY; 2042 auth_alg = WLAN_AUTH_SHARED_KEY;
2085 break; 2043 break;
2086 case NL80211_AUTHTYPE_FT: 2044 case NL80211_AUTHTYPE_FT:
@@ -2134,6 +2092,8 @@ static enum work_done_result ieee80211_assoc_done(struct ieee80211_work *wk,
2134 struct sk_buff *skb) 2092 struct sk_buff *skb)
2135{ 2093{
2136 struct ieee80211_mgmt *mgmt; 2094 struct ieee80211_mgmt *mgmt;
2095 struct ieee80211_rx_status *rx_status;
2096 struct ieee802_11_elems elems;
2137 u16 status; 2097 u16 status;
2138 2098
2139 if (!skb) { 2099 if (!skb) {
@@ -2141,6 +2101,19 @@ static enum work_done_result ieee80211_assoc_done(struct ieee80211_work *wk,
2141 return WORK_DONE_DESTROY; 2101 return WORK_DONE_DESTROY;
2142 } 2102 }
2143 2103
2104 if (wk->type == IEEE80211_WORK_ASSOC_BEACON_WAIT) {
2105 mutex_lock(&wk->sdata->u.mgd.mtx);
2106 rx_status = (void *) skb->cb;
2107 ieee802_11_parse_elems(skb->data + 24 + 12, skb->len - 24 - 12, &elems);
2108 ieee80211_rx_bss_info(wk->sdata, (void *)skb->data, skb->len, rx_status,
2109 &elems, true);
2110 mutex_unlock(&wk->sdata->u.mgd.mtx);
2111
2112 wk->type = IEEE80211_WORK_ASSOC;
2113 /* not really done yet */
2114 return WORK_DONE_REQUEUE;
2115 }
2116
2144 mgmt = (void *)skb->data; 2117 mgmt = (void *)skb->data;
2145 status = le16_to_cpu(mgmt->u.assoc_resp.status_code); 2118 status = le16_to_cpu(mgmt->u.assoc_resp.status_code);
2146 2119
@@ -2153,6 +2126,7 @@ static enum work_done_result ieee80211_assoc_done(struct ieee80211_work *wk,
2153 wk->filter_ta); 2126 wk->filter_ta);
2154 return WORK_DONE_DESTROY; 2127 return WORK_DONE_DESTROY;
2155 } 2128 }
2129
2156 mutex_unlock(&wk->sdata->u.mgd.mtx); 2130 mutex_unlock(&wk->sdata->u.mgd.mtx);
2157 } 2131 }
2158 2132
@@ -2253,10 +2227,14 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
2253 if (req->prev_bssid) 2227 if (req->prev_bssid)
2254 memcpy(wk->assoc.prev_bssid, req->prev_bssid, ETH_ALEN); 2228 memcpy(wk->assoc.prev_bssid, req->prev_bssid, ETH_ALEN);
2255 2229
2256 wk->type = IEEE80211_WORK_ASSOC;
2257 wk->chan = req->bss->channel; 2230 wk->chan = req->bss->channel;
2258 wk->sdata = sdata; 2231 wk->sdata = sdata;
2259 wk->done = ieee80211_assoc_done; 2232 wk->done = ieee80211_assoc_done;
2233 if (!bss->dtim_period &&
2234 sdata->local->hw.flags & IEEE80211_HW_NEED_DTIM_PERIOD)
2235 wk->type = IEEE80211_WORK_ASSOC_BEACON_WAIT;
2236 else
2237 wk->type = IEEE80211_WORK_ASSOC;
2260 2238
2261 if (req->use_mfp) { 2239 if (req->use_mfp) {
2262 ifmgd->mfp = IEEE80211_MFP_REQUIRED; 2240 ifmgd->mfp = IEEE80211_MFP_REQUIRED;
@@ -2282,14 +2260,16 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
2282 struct ieee80211_local *local = sdata->local; 2260 struct ieee80211_local *local = sdata->local;
2283 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 2261 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
2284 struct ieee80211_work *wk; 2262 struct ieee80211_work *wk;
2285 const u8 *bssid = req->bss->bssid; 2263 u8 bssid[ETH_ALEN];
2264 bool assoc_bss = false;
2286 2265
2287 mutex_lock(&ifmgd->mtx); 2266 mutex_lock(&ifmgd->mtx);
2288 2267
2268 memcpy(bssid, req->bss->bssid, ETH_ALEN);
2289 if (ifmgd->associated == req->bss) { 2269 if (ifmgd->associated == req->bss) {
2290 bssid = req->bss->bssid; 2270 ieee80211_set_disassoc(sdata, false);
2291 ieee80211_set_disassoc(sdata, true);
2292 mutex_unlock(&ifmgd->mtx); 2271 mutex_unlock(&ifmgd->mtx);
2272 assoc_bss = true;
2293 } else { 2273 } else {
2294 bool not_auth_yet = false; 2274 bool not_auth_yet = false;
2295 2275
@@ -2302,7 +2282,8 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
2302 2282
2303 if (wk->type != IEEE80211_WORK_DIRECT_PROBE && 2283 if (wk->type != IEEE80211_WORK_DIRECT_PROBE &&
2304 wk->type != IEEE80211_WORK_AUTH && 2284 wk->type != IEEE80211_WORK_AUTH &&
2305 wk->type != IEEE80211_WORK_ASSOC) 2285 wk->type != IEEE80211_WORK_ASSOC &&
2286 wk->type != IEEE80211_WORK_ASSOC_BEACON_WAIT)
2306 continue; 2287 continue;
2307 2288
2308 if (memcmp(req->bss->bssid, wk->filter_ta, ETH_ALEN)) 2289 if (memcmp(req->bss->bssid, wk->filter_ta, ETH_ALEN))
@@ -2335,6 +2316,8 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
2335 ieee80211_send_deauth_disassoc(sdata, bssid, IEEE80211_STYPE_DEAUTH, 2316 ieee80211_send_deauth_disassoc(sdata, bssid, IEEE80211_STYPE_DEAUTH,
2336 req->reason_code, cookie, 2317 req->reason_code, cookie,
2337 !req->local_state_change); 2318 !req->local_state_change);
2319 if (assoc_bss)
2320 sta_info_destroy_addr(sdata, bssid);
2338 2321
2339 ieee80211_recalc_idle(sdata->local); 2322 ieee80211_recalc_idle(sdata->local);
2340 2323
@@ -2379,41 +2362,6 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
2379 return 0; 2362 return 0;
2380} 2363}
2381 2364
2382int ieee80211_mgd_action(struct ieee80211_sub_if_data *sdata,
2383 struct ieee80211_channel *chan,
2384 enum nl80211_channel_type channel_type,
2385 const u8 *buf, size_t len, u64 *cookie)
2386{
2387 struct ieee80211_local *local = sdata->local;
2388 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
2389 struct sk_buff *skb;
2390
2391 /* Check that we are on the requested channel for transmission */
2392 if ((chan != local->tmp_channel ||
2393 channel_type != local->tmp_channel_type) &&
2394 (chan != local->oper_channel ||
2395 channel_type != local->_oper_channel_type))
2396 return -EBUSY;
2397
2398 skb = dev_alloc_skb(local->hw.extra_tx_headroom + len);
2399 if (!skb)
2400 return -ENOMEM;
2401 skb_reserve(skb, local->hw.extra_tx_headroom);
2402
2403 memcpy(skb_put(skb, len), buf, len);
2404
2405 if (!(ifmgd->flags & IEEE80211_STA_MFP_ENABLED))
2406 IEEE80211_SKB_CB(skb)->flags |=
2407 IEEE80211_TX_INTFL_DONT_ENCRYPT;
2408 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_NL80211_FRAME_TX |
2409 IEEE80211_TX_CTL_REQ_TX_STATUS;
2410 skb->dev = sdata->dev;
2411 ieee80211_tx_skb(sdata, skb);
2412
2413 *cookie = (unsigned long) skb;
2414 return 0;
2415}
2416
2417void ieee80211_cqm_rssi_notify(struct ieee80211_vif *vif, 2365void ieee80211_cqm_rssi_notify(struct ieee80211_vif *vif,
2418 enum nl80211_cqm_rssi_threshold_event rssi_event, 2366 enum nl80211_cqm_rssi_threshold_event rssi_event,
2419 gfp_t gfp) 2367 gfp_t gfp)
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index 75202b295a4e..d287fde0431d 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -40,22 +40,14 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
40 list_for_each_entry(sdata, &local->interfaces, list) 40 list_for_each_entry(sdata, &local->interfaces, list)
41 ieee80211_disable_keys(sdata); 41 ieee80211_disable_keys(sdata);
42 42
43 /* Tear down aggregation sessions */ 43 /* tear down aggregation sessions and remove STAs */
44 44 mutex_lock(&local->sta_mtx);
45 rcu_read_lock(); 45 list_for_each_entry(sta, &local->sta_list, list) {
46 46 if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) {
47 if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) {
48 list_for_each_entry_rcu(sta, &local->sta_list, list) {
49 set_sta_flags(sta, WLAN_STA_BLOCK_BA); 47 set_sta_flags(sta, WLAN_STA_BLOCK_BA);
50 ieee80211_sta_tear_down_BA_sessions(sta); 48 ieee80211_sta_tear_down_BA_sessions(sta);
51 } 49 }
52 }
53 50
54 rcu_read_unlock();
55
56 /* remove STAs */
57 mutex_lock(&local->sta_mtx);
58 list_for_each_entry(sta, &local->sta_list, list) {
59 if (sta->uploaded) { 51 if (sta->uploaded) {
60 sdata = sta->sdata; 52 sdata = sta->sdata;
61 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 53 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
@@ -72,6 +64,8 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
72 64
73 /* remove all interfaces */ 65 /* remove all interfaces */
74 list_for_each_entry(sdata, &local->interfaces, list) { 66 list_for_each_entry(sdata, &local->interfaces, list) {
67 cancel_work_sync(&sdata->work);
68
75 switch(sdata->vif.type) { 69 switch(sdata->vif.type) {
76 case NL80211_IFTYPE_STATION: 70 case NL80211_IFTYPE_STATION:
77 ieee80211_sta_quiesce(sdata); 71 ieee80211_sta_quiesce(sdata);
diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h
index 065a96190e32..168427b0ffdc 100644
--- a/net/mac80211/rate.h
+++ b/net/mac80211/rate.h
@@ -147,5 +147,18 @@ static inline void rc80211_minstrel_exit(void)
147} 147}
148#endif 148#endif
149 149
150#ifdef CONFIG_MAC80211_RC_MINSTREL_HT
151extern int rc80211_minstrel_ht_init(void);
152extern void rc80211_minstrel_ht_exit(void);
153#else
154static inline int rc80211_minstrel_ht_init(void)
155{
156 return 0;
157}
158static inline void rc80211_minstrel_ht_exit(void)
159{
160}
161#endif
162
150 163
151#endif /* IEEE80211_RATE_H */ 164#endif /* IEEE80211_RATE_H */
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c
index f65ce6dcc8e2..778c604d7939 100644
--- a/net/mac80211/rc80211_minstrel.c
+++ b/net/mac80211/rc80211_minstrel.c
@@ -67,7 +67,6 @@ rix_to_ndx(struct minstrel_sta_info *mi, int rix)
67 for (i = rix; i >= 0; i--) 67 for (i = rix; i >= 0; i--)
68 if (mi->r[i].rix == rix) 68 if (mi->r[i].rix == rix)
69 break; 69 break;
70 WARN_ON(i < 0);
71 return i; 70 return i;
72} 71}
73 72
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
new file mode 100644
index 000000000000..c5b465904e3b
--- /dev/null
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -0,0 +1,827 @@
1/*
2 * Copyright (C) 2010 Felix Fietkau <nbd@openwrt.org>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8#include <linux/netdevice.h>
9#include <linux/types.h>
10#include <linux/skbuff.h>
11#include <linux/debugfs.h>
12#include <linux/random.h>
13#include <linux/ieee80211.h>
14#include <net/mac80211.h>
15#include "rate.h"
16#include "rc80211_minstrel.h"
17#include "rc80211_minstrel_ht.h"
18
19#define AVG_PKT_SIZE 1200
20#define SAMPLE_COLUMNS 10
21#define EWMA_LEVEL 75
22
23/* Number of bits for an average sized packet */
24#define MCS_NBITS (AVG_PKT_SIZE << 3)
25
26/* Number of symbols for a packet with (bps) bits per symbol */
27#define MCS_NSYMS(bps) ((MCS_NBITS + (bps) - 1) / (bps))
28
29/* Transmission time for a packet containing (syms) symbols */
30#define MCS_SYMBOL_TIME(sgi, syms) \
31 (sgi ? \
32 ((syms) * 18 + 4) / 5 : /* syms * 3.6 us */ \
33 (syms) << 2 /* syms * 4 us */ \
34 )
35
36/* Transmit duration for the raw data part of an average sized packet */
37#define MCS_DURATION(streams, sgi, bps) MCS_SYMBOL_TIME(sgi, MCS_NSYMS((streams) * (bps)))
38
39/* MCS rate information for an MCS group */
40#define MCS_GROUP(_streams, _sgi, _ht40) { \
41 .streams = _streams, \
42 .flags = \
43 (_sgi ? IEEE80211_TX_RC_SHORT_GI : 0) | \
44 (_ht40 ? IEEE80211_TX_RC_40_MHZ_WIDTH : 0), \
45 .duration = { \
46 MCS_DURATION(_streams, _sgi, _ht40 ? 54 : 26), \
47 MCS_DURATION(_streams, _sgi, _ht40 ? 108 : 52), \
48 MCS_DURATION(_streams, _sgi, _ht40 ? 162 : 78), \
49 MCS_DURATION(_streams, _sgi, _ht40 ? 216 : 104), \
50 MCS_DURATION(_streams, _sgi, _ht40 ? 324 : 156), \
51 MCS_DURATION(_streams, _sgi, _ht40 ? 432 : 208), \
52 MCS_DURATION(_streams, _sgi, _ht40 ? 486 : 234), \
53 MCS_DURATION(_streams, _sgi, _ht40 ? 540 : 260) \
54 } \
55}
56
57/*
58 * To enable sufficiently targeted rate sampling, MCS rates are divided into
59 * groups, based on the number of streams and flags (HT40, SGI) that they
60 * use.
61 */
62const struct mcs_group minstrel_mcs_groups[] = {
63 MCS_GROUP(1, 0, 0),
64 MCS_GROUP(2, 0, 0),
65#if MINSTREL_MAX_STREAMS >= 3
66 MCS_GROUP(3, 0, 0),
67#endif
68
69 MCS_GROUP(1, 1, 0),
70 MCS_GROUP(2, 1, 0),
71#if MINSTREL_MAX_STREAMS >= 3
72 MCS_GROUP(3, 1, 0),
73#endif
74
75 MCS_GROUP(1, 0, 1),
76 MCS_GROUP(2, 0, 1),
77#if MINSTREL_MAX_STREAMS >= 3
78 MCS_GROUP(3, 0, 1),
79#endif
80
81 MCS_GROUP(1, 1, 1),
82 MCS_GROUP(2, 1, 1),
83#if MINSTREL_MAX_STREAMS >= 3
84 MCS_GROUP(3, 1, 1),
85#endif
86};
87
88static u8 sample_table[SAMPLE_COLUMNS][MCS_GROUP_RATES];
89
90/*
91 * Perform EWMA (Exponentially Weighted Moving Average) calculation
92 */
93static int
94minstrel_ewma(int old, int new, int weight)
95{
96 return (new * (100 - weight) + old * weight) / 100;
97}
98
99/*
100 * Look up an MCS group index based on mac80211 rate information
101 */
102static int
103minstrel_ht_get_group_idx(struct ieee80211_tx_rate *rate)
104{
105 int streams = (rate->idx / MCS_GROUP_RATES) + 1;
106 u32 flags = IEEE80211_TX_RC_SHORT_GI | IEEE80211_TX_RC_40_MHZ_WIDTH;
107 int i;
108
109 for (i = 0; i < ARRAY_SIZE(minstrel_mcs_groups); i++) {
110 if (minstrel_mcs_groups[i].streams != streams)
111 continue;
112 if (minstrel_mcs_groups[i].flags != (rate->flags & flags))
113 continue;
114
115 return i;
116 }
117
118 WARN_ON(1);
119 return 0;
120}
121
122static inline struct minstrel_rate_stats *
123minstrel_get_ratestats(struct minstrel_ht_sta *mi, int index)
124{
125 return &mi->groups[index / MCS_GROUP_RATES].rates[index % MCS_GROUP_RATES];
126}
127
128
129/*
130 * Recalculate success probabilities and counters for a rate using EWMA
131 */
132static void
133minstrel_calc_rate_ewma(struct minstrel_priv *mp, struct minstrel_rate_stats *mr)
134{
135 if (unlikely(mr->attempts > 0)) {
136 mr->sample_skipped = 0;
137 mr->cur_prob = MINSTREL_FRAC(mr->success, mr->attempts);
138 if (!mr->att_hist)
139 mr->probability = mr->cur_prob;
140 else
141 mr->probability = minstrel_ewma(mr->probability,
142 mr->cur_prob, EWMA_LEVEL);
143 mr->att_hist += mr->attempts;
144 mr->succ_hist += mr->success;
145 } else {
146 mr->sample_skipped++;
147 }
148 mr->last_success = mr->success;
149 mr->last_attempts = mr->attempts;
150 mr->success = 0;
151 mr->attempts = 0;
152}
153
154/*
155 * Calculate throughput based on the average A-MPDU length, taking into account
156 * the expected number of retransmissions and their expected length
157 */
158static void
159minstrel_ht_calc_tp(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
160 int group, int rate)
161{
162 struct minstrel_rate_stats *mr;
163 unsigned int usecs;
164
165 mr = &mi->groups[group].rates[rate];
166
167 if (mr->probability < MINSTREL_FRAC(1, 10)) {
168 mr->cur_tp = 0;
169 return;
170 }
171
172 usecs = mi->overhead / MINSTREL_TRUNC(mi->avg_ampdu_len);
173 usecs += minstrel_mcs_groups[group].duration[rate];
174 mr->cur_tp = MINSTREL_TRUNC((1000000 / usecs) * mr->probability);
175}
176
177/*
178 * Update rate statistics and select new primary rates
179 *
180 * Rules for rate selection:
181 * - max_prob_rate must use only one stream, as a tradeoff between delivery
182 * probability and throughput during strong fluctuations
183 * - as long as the max prob rate has a probability of more than 3/4, pick
184 * higher throughput rates, even if the probablity is a bit lower
185 */
186static void
187minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
188{
189 struct minstrel_mcs_group_data *mg;
190 struct minstrel_rate_stats *mr;
191 int cur_prob, cur_prob_tp, cur_tp, cur_tp2;
192 int group, i, index;
193
194 if (mi->ampdu_packets > 0) {
195 mi->avg_ampdu_len = minstrel_ewma(mi->avg_ampdu_len,
196 MINSTREL_FRAC(mi->ampdu_len, mi->ampdu_packets), EWMA_LEVEL);
197 mi->ampdu_len = 0;
198 mi->ampdu_packets = 0;
199 }
200
201 mi->sample_slow = 0;
202 mi->sample_count = 0;
203 mi->max_tp_rate = 0;
204 mi->max_tp_rate2 = 0;
205 mi->max_prob_rate = 0;
206
207 for (group = 0; group < ARRAY_SIZE(minstrel_mcs_groups); group++) {
208 cur_prob = 0;
209 cur_prob_tp = 0;
210 cur_tp = 0;
211 cur_tp2 = 0;
212
213 mg = &mi->groups[group];
214 if (!mg->supported)
215 continue;
216
217 mg->max_tp_rate = 0;
218 mg->max_tp_rate2 = 0;
219 mg->max_prob_rate = 0;
220 mi->sample_count++;
221
222 for (i = 0; i < MCS_GROUP_RATES; i++) {
223 if (!(mg->supported & BIT(i)))
224 continue;
225
226 mr = &mg->rates[i];
227 mr->retry_updated = false;
228 index = MCS_GROUP_RATES * group + i;
229 minstrel_calc_rate_ewma(mp, mr);
230 minstrel_ht_calc_tp(mp, mi, group, i);
231
232 if (!mr->cur_tp)
233 continue;
234
235 /* ignore the lowest rate of each single-stream group */
236 if (!i && minstrel_mcs_groups[group].streams == 1)
237 continue;
238
239 if ((mr->cur_tp > cur_prob_tp && mr->probability >
240 MINSTREL_FRAC(3, 4)) || mr->probability > cur_prob) {
241 mg->max_prob_rate = index;
242 cur_prob = mr->probability;
243 cur_prob_tp = mr->cur_tp;
244 }
245
246 if (mr->cur_tp > cur_tp) {
247 swap(index, mg->max_tp_rate);
248 cur_tp = mr->cur_tp;
249 mr = minstrel_get_ratestats(mi, index);
250 }
251
252 if (index >= mg->max_tp_rate)
253 continue;
254
255 if (mr->cur_tp > cur_tp2) {
256 mg->max_tp_rate2 = index;
257 cur_tp2 = mr->cur_tp;
258 }
259 }
260 }
261
262 /* try to sample up to half of the availble rates during each interval */
263 mi->sample_count *= 4;
264
265 cur_prob = 0;
266 cur_prob_tp = 0;
267 cur_tp = 0;
268 cur_tp2 = 0;
269 for (group = 0; group < ARRAY_SIZE(minstrel_mcs_groups); group++) {
270 mg = &mi->groups[group];
271 if (!mg->supported)
272 continue;
273
274 mr = minstrel_get_ratestats(mi, mg->max_prob_rate);
275 if (cur_prob_tp < mr->cur_tp &&
276 minstrel_mcs_groups[group].streams == 1) {
277 mi->max_prob_rate = mg->max_prob_rate;
278 cur_prob = mr->cur_prob;
279 cur_prob_tp = mr->cur_tp;
280 }
281
282 mr = minstrel_get_ratestats(mi, mg->max_tp_rate);
283 if (cur_tp < mr->cur_tp) {
284 mi->max_tp_rate = mg->max_tp_rate;
285 cur_tp = mr->cur_tp;
286 }
287
288 mr = minstrel_get_ratestats(mi, mg->max_tp_rate2);
289 if (cur_tp2 < mr->cur_tp) {
290 mi->max_tp_rate2 = mg->max_tp_rate2;
291 cur_tp2 = mr->cur_tp;
292 }
293 }
294
295 mi->stats_update = jiffies;
296}
297
298static bool
299minstrel_ht_txstat_valid(struct ieee80211_tx_rate *rate)
300{
301 if (!rate->count)
302 return false;
303
304 if (rate->idx < 0)
305 return false;
306
307 return !!(rate->flags & IEEE80211_TX_RC_MCS);
308}
309
310static void
311minstrel_next_sample_idx(struct minstrel_ht_sta *mi)
312{
313 struct minstrel_mcs_group_data *mg;
314
315 for (;;) {
316 mi->sample_group++;
317 mi->sample_group %= ARRAY_SIZE(minstrel_mcs_groups);
318 mg = &mi->groups[mi->sample_group];
319
320 if (!mg->supported)
321 continue;
322
323 if (++mg->index >= MCS_GROUP_RATES) {
324 mg->index = 0;
325 if (++mg->column >= ARRAY_SIZE(sample_table))
326 mg->column = 0;
327 }
328 break;
329 }
330}
331
332static void
333minstrel_downgrade_rate(struct minstrel_ht_sta *mi, unsigned int *idx,
334 bool primary)
335{
336 int group, orig_group;
337
338 orig_group = group = *idx / MCS_GROUP_RATES;
339 while (group > 0) {
340 group--;
341
342 if (!mi->groups[group].supported)
343 continue;
344
345 if (minstrel_mcs_groups[group].streams >
346 minstrel_mcs_groups[orig_group].streams)
347 continue;
348
349 if (primary)
350 *idx = mi->groups[group].max_tp_rate;
351 else
352 *idx = mi->groups[group].max_tp_rate2;
353 break;
354 }
355}
356
357static void
358minstrel_aggr_check(struct minstrel_priv *mp, struct ieee80211_sta *pubsta, struct sk_buff *skb)
359{
360 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
361 struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
362 u16 tid;
363
364 if (unlikely(!ieee80211_is_data_qos(hdr->frame_control)))
365 return;
366
367 if (unlikely(skb->protocol == cpu_to_be16(ETH_P_PAE)))
368 return;
369
370 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
371 if (likely(sta->ampdu_mlme.tid_tx[tid]))
372 return;
373
374 ieee80211_start_tx_ba_session(pubsta, tid);
375}
376
377static void
378minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband,
379 struct ieee80211_sta *sta, void *priv_sta,
380 struct sk_buff *skb)
381{
382 struct minstrel_ht_sta_priv *msp = priv_sta;
383 struct minstrel_ht_sta *mi = &msp->ht;
384 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
385 struct ieee80211_tx_rate *ar = info->status.rates;
386 struct minstrel_rate_stats *rate, *rate2;
387 struct minstrel_priv *mp = priv;
388 bool last = false;
389 int group;
390 int i = 0;
391
392 if (!msp->is_ht)
393 return mac80211_minstrel.tx_status(priv, sband, sta, &msp->legacy, skb);
394
395 /* This packet was aggregated but doesn't carry status info */
396 if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
397 !(info->flags & IEEE80211_TX_STAT_AMPDU))
398 return;
399
400 if (!info->status.ampdu_len) {
401 info->status.ampdu_ack_len = 1;
402 info->status.ampdu_len = 1;
403 }
404
405 mi->ampdu_packets++;
406 mi->ampdu_len += info->status.ampdu_len;
407
408 if (!mi->sample_wait && !mi->sample_tries && mi->sample_count > 0) {
409 mi->sample_wait = 4 + 2 * MINSTREL_TRUNC(mi->avg_ampdu_len);
410 mi->sample_tries = 3;
411 mi->sample_count--;
412 }
413
414 if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) {
415 mi->sample_packets += info->status.ampdu_len;
416 minstrel_next_sample_idx(mi);
417 }
418
419 for (i = 0; !last; i++) {
420 last = (i == IEEE80211_TX_MAX_RATES - 1) ||
421 !minstrel_ht_txstat_valid(&ar[i + 1]);
422
423 if (!minstrel_ht_txstat_valid(&ar[i]))
424 break;
425
426 group = minstrel_ht_get_group_idx(&ar[i]);
427 rate = &mi->groups[group].rates[ar[i].idx % 8];
428
429 if (last && (info->flags & IEEE80211_TX_STAT_ACK))
430 rate->success += info->status.ampdu_ack_len;
431
432 rate->attempts += ar[i].count * info->status.ampdu_len;
433 }
434
435 /*
436 * check for sudden death of spatial multiplexing,
437 * downgrade to a lower number of streams if necessary.
438 */
439 rate = minstrel_get_ratestats(mi, mi->max_tp_rate);
440 if (rate->attempts > 30 &&
441 MINSTREL_FRAC(rate->success, rate->attempts) <
442 MINSTREL_FRAC(20, 100))
443 minstrel_downgrade_rate(mi, &mi->max_tp_rate, true);
444
445 rate2 = minstrel_get_ratestats(mi, mi->max_tp_rate2);
446 if (rate2->attempts > 30 &&
447 MINSTREL_FRAC(rate2->success, rate2->attempts) <
448 MINSTREL_FRAC(20, 100))
449 minstrel_downgrade_rate(mi, &mi->max_tp_rate2, false);
450
451 if (time_after(jiffies, mi->stats_update + (mp->update_interval / 2 * HZ) / 1000)) {
452 minstrel_ht_update_stats(mp, mi);
453 minstrel_aggr_check(mp, sta, skb);
454 }
455}
456
457static void
458minstrel_calc_retransmit(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
459 int index)
460{
461 struct minstrel_rate_stats *mr;
462 const struct mcs_group *group;
463 unsigned int tx_time, tx_time_rtscts, tx_time_data;
464 unsigned int cw = mp->cw_min;
465 unsigned int t_slot = 9; /* FIXME */
466 unsigned int ampdu_len = MINSTREL_TRUNC(mi->avg_ampdu_len);
467
468 mr = minstrel_get_ratestats(mi, index);
469 if (mr->probability < MINSTREL_FRAC(1, 10)) {
470 mr->retry_count = 1;
471 mr->retry_count_rtscts = 1;
472 return;
473 }
474
475 mr->retry_count = 2;
476 mr->retry_count_rtscts = 2;
477 mr->retry_updated = true;
478
479 group = &minstrel_mcs_groups[index / MCS_GROUP_RATES];
480 tx_time_data = group->duration[index % MCS_GROUP_RATES] * ampdu_len;
481 tx_time = 2 * (t_slot + mi->overhead + tx_time_data);
482 tx_time_rtscts = 2 * (t_slot + mi->overhead_rtscts + tx_time_data);
483 do {
484 cw = (cw << 1) | 1;
485 cw = min(cw, mp->cw_max);
486 tx_time += cw + t_slot + mi->overhead;
487 tx_time_rtscts += cw + t_slot + mi->overhead_rtscts;
488 if (tx_time_rtscts < mp->segment_size)
489 mr->retry_count_rtscts++;
490 } while ((tx_time < mp->segment_size) &&
491 (++mr->retry_count < mp->max_retry));
492}
493
494
495static void
496minstrel_ht_set_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
497 struct ieee80211_tx_rate *rate, int index,
498 struct ieee80211_tx_rate_control *txrc,
499 bool sample, bool rtscts)
500{
501 const struct mcs_group *group = &minstrel_mcs_groups[index / MCS_GROUP_RATES];
502 struct minstrel_rate_stats *mr;
503
504 mr = minstrel_get_ratestats(mi, index);
505 if (!mr->retry_updated)
506 minstrel_calc_retransmit(mp, mi, index);
507
508 if (mr->probability < MINSTREL_FRAC(20, 100))
509 rate->count = 2;
510 else if (rtscts)
511 rate->count = mr->retry_count_rtscts;
512 else
513 rate->count = mr->retry_count;
514
515 rate->flags = IEEE80211_TX_RC_MCS | group->flags;
516 if (txrc->short_preamble)
517 rate->flags |= IEEE80211_TX_RC_USE_SHORT_PREAMBLE;
518 if (txrc->rts || rtscts)
519 rate->flags |= IEEE80211_TX_RC_USE_RTS_CTS;
520 rate->idx = index % MCS_GROUP_RATES + (group->streams - 1) * MCS_GROUP_RATES;
521}
522
523static inline int
524minstrel_get_duration(int index)
525{
526 const struct mcs_group *group = &minstrel_mcs_groups[index / MCS_GROUP_RATES];
527 return group->duration[index % MCS_GROUP_RATES];
528}
529
530static int
531minstrel_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
532{
533 struct minstrel_rate_stats *mr;
534 struct minstrel_mcs_group_data *mg;
535 int sample_idx = 0;
536
537 if (mi->sample_wait > 0) {
538 mi->sample_wait--;
539 return -1;
540 }
541
542 if (!mi->sample_tries)
543 return -1;
544
545 mi->sample_tries--;
546 mg = &mi->groups[mi->sample_group];
547 sample_idx = sample_table[mg->column][mg->index];
548 mr = &mg->rates[sample_idx];
549 sample_idx += mi->sample_group * MCS_GROUP_RATES;
550
551 /*
552 * When not using MRR, do not sample if the probability is already
553 * higher than 95% to avoid wasting airtime
554 */
555 if (!mp->has_mrr && (mr->probability > MINSTREL_FRAC(95, 100)))
556 goto next;
557
558 /*
559 * Make sure that lower rates get sampled only occasionally,
560 * if the link is working perfectly.
561 */
562 if (minstrel_get_duration(sample_idx) >
563 minstrel_get_duration(mi->max_tp_rate)) {
564 if (mr->sample_skipped < 10)
565 goto next;
566
567 if (mi->sample_slow++ > 2)
568 goto next;
569 }
570
571 return sample_idx;
572
573next:
574 minstrel_next_sample_idx(mi);
575 return -1;
576}
577
578static void
579minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
580 struct ieee80211_tx_rate_control *txrc)
581{
582 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(txrc->skb);
583 struct ieee80211_tx_rate *ar = info->status.rates;
584 struct minstrel_ht_sta_priv *msp = priv_sta;
585 struct minstrel_ht_sta *mi = &msp->ht;
586 struct minstrel_priv *mp = priv;
587 int sample_idx;
588
589 if (rate_control_send_low(sta, priv_sta, txrc))
590 return;
591
592 if (!msp->is_ht)
593 return mac80211_minstrel.get_rate(priv, sta, &msp->legacy, txrc);
594
595 info->flags |= mi->tx_flags;
596 sample_idx = minstrel_get_sample_rate(mp, mi);
597 if (sample_idx >= 0) {
598 minstrel_ht_set_rate(mp, mi, &ar[0], sample_idx,
599 txrc, true, false);
600 minstrel_ht_set_rate(mp, mi, &ar[1], mi->max_tp_rate,
601 txrc, false, true);
602 info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
603 } else {
604 minstrel_ht_set_rate(mp, mi, &ar[0], mi->max_tp_rate,
605 txrc, false, false);
606 minstrel_ht_set_rate(mp, mi, &ar[1], mi->max_tp_rate2,
607 txrc, false, true);
608 }
609 minstrel_ht_set_rate(mp, mi, &ar[2], mi->max_prob_rate, txrc, false, true);
610
611 ar[3].count = 0;
612 ar[3].idx = -1;
613
614 mi->total_packets++;
615
616 /* wraparound */
617 if (mi->total_packets == ~0) {
618 mi->total_packets = 0;
619 mi->sample_packets = 0;
620 }
621}
622
623static void
624minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband,
625 struct ieee80211_sta *sta, void *priv_sta,
626 enum nl80211_channel_type oper_chan_type)
627{
628 struct minstrel_priv *mp = priv;
629 struct minstrel_ht_sta_priv *msp = priv_sta;
630 struct minstrel_ht_sta *mi = &msp->ht;
631 struct ieee80211_mcs_info *mcs = &sta->ht_cap.mcs;
632 struct ieee80211_local *local = hw_to_local(mp->hw);
633 u16 sta_cap = sta->ht_cap.cap;
634 int ack_dur;
635 int stbc;
636 int i;
637
638 /* fall back to the old minstrel for legacy stations */
639 if (!sta->ht_cap.ht_supported) {
640 msp->is_ht = false;
641 memset(&msp->legacy, 0, sizeof(msp->legacy));
642 msp->legacy.r = msp->ratelist;
643 msp->legacy.sample_table = msp->sample_table;
644 return mac80211_minstrel.rate_init(priv, sband, sta, &msp->legacy);
645 }
646
647 BUILD_BUG_ON(ARRAY_SIZE(minstrel_mcs_groups) !=
648 MINSTREL_MAX_STREAMS * MINSTREL_STREAM_GROUPS);
649
650 msp->is_ht = true;
651 memset(mi, 0, sizeof(*mi));
652 mi->stats_update = jiffies;
653
654 ack_dur = ieee80211_frame_duration(local, 10, 60, 1, 1);
655 mi->overhead = ieee80211_frame_duration(local, 0, 60, 1, 1) + ack_dur;
656 mi->overhead_rtscts = mi->overhead + 2 * ack_dur;
657
658 mi->avg_ampdu_len = MINSTREL_FRAC(1, 1);
659
660 /* When using MRR, sample more on the first attempt, without delay */
661 if (mp->has_mrr) {
662 mi->sample_count = 16;
663 mi->sample_wait = 0;
664 } else {
665 mi->sample_count = 8;
666 mi->sample_wait = 8;
667 }
668 mi->sample_tries = 4;
669
670 stbc = (sta_cap & IEEE80211_HT_CAP_RX_STBC) >>
671 IEEE80211_HT_CAP_RX_STBC_SHIFT;
672 mi->tx_flags |= stbc << IEEE80211_TX_CTL_STBC_SHIFT;
673
674 if (sta_cap & IEEE80211_HT_CAP_LDPC_CODING)
675 mi->tx_flags |= IEEE80211_TX_CTL_LDPC;
676
677 if (oper_chan_type != NL80211_CHAN_HT40MINUS &&
678 oper_chan_type != NL80211_CHAN_HT40PLUS)
679 sta_cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
680
681 for (i = 0; i < ARRAY_SIZE(mi->groups); i++) {
682 u16 req = 0;
683
684 mi->groups[i].supported = 0;
685 if (minstrel_mcs_groups[i].flags & IEEE80211_TX_RC_SHORT_GI) {
686 if (minstrel_mcs_groups[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
687 req |= IEEE80211_HT_CAP_SGI_40;
688 else
689 req |= IEEE80211_HT_CAP_SGI_20;
690 }
691
692 if (minstrel_mcs_groups[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
693 req |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
694
695 if ((sta_cap & req) != req)
696 continue;
697
698 mi->groups[i].supported =
699 mcs->rx_mask[minstrel_mcs_groups[i].streams - 1];
700 }
701}
702
703static void
704minstrel_ht_rate_init(void *priv, struct ieee80211_supported_band *sband,
705 struct ieee80211_sta *sta, void *priv_sta)
706{
707 struct minstrel_priv *mp = priv;
708
709 minstrel_ht_update_caps(priv, sband, sta, priv_sta, mp->hw->conf.channel_type);
710}
711
712static void
713minstrel_ht_rate_update(void *priv, struct ieee80211_supported_band *sband,
714 struct ieee80211_sta *sta, void *priv_sta,
715 u32 changed, enum nl80211_channel_type oper_chan_type)
716{
717 minstrel_ht_update_caps(priv, sband, sta, priv_sta, oper_chan_type);
718}
719
720static void *
721minstrel_ht_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp)
722{
723 struct ieee80211_supported_band *sband;
724 struct minstrel_ht_sta_priv *msp;
725 struct minstrel_priv *mp = priv;
726 struct ieee80211_hw *hw = mp->hw;
727 int max_rates = 0;
728 int i;
729
730 for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
731 sband = hw->wiphy->bands[i];
732 if (sband && sband->n_bitrates > max_rates)
733 max_rates = sband->n_bitrates;
734 }
735
736 msp = kzalloc(sizeof(struct minstrel_ht_sta), gfp);
737 if (!msp)
738 return NULL;
739
740 msp->ratelist = kzalloc(sizeof(struct minstrel_rate) * max_rates, gfp);
741 if (!msp->ratelist)
742 goto error;
743
744 msp->sample_table = kmalloc(SAMPLE_COLUMNS * max_rates, gfp);
745 if (!msp->sample_table)
746 goto error1;
747
748 return msp;
749
750error1:
751 kfree(msp->ratelist);
752error:
753 kfree(msp);
754 return NULL;
755}
756
757static void
758minstrel_ht_free_sta(void *priv, struct ieee80211_sta *sta, void *priv_sta)
759{
760 struct minstrel_ht_sta_priv *msp = priv_sta;
761
762 kfree(msp->sample_table);
763 kfree(msp->ratelist);
764 kfree(msp);
765}
766
767static void *
768minstrel_ht_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
769{
770 return mac80211_minstrel.alloc(hw, debugfsdir);
771}
772
773static void
774minstrel_ht_free(void *priv)
775{
776 mac80211_minstrel.free(priv);
777}
778
779static struct rate_control_ops mac80211_minstrel_ht = {
780 .name = "minstrel_ht",
781 .tx_status = minstrel_ht_tx_status,
782 .get_rate = minstrel_ht_get_rate,
783 .rate_init = minstrel_ht_rate_init,
784 .rate_update = minstrel_ht_rate_update,
785 .alloc_sta = minstrel_ht_alloc_sta,
786 .free_sta = minstrel_ht_free_sta,
787 .alloc = minstrel_ht_alloc,
788 .free = minstrel_ht_free,
789#ifdef CONFIG_MAC80211_DEBUGFS
790 .add_sta_debugfs = minstrel_ht_add_sta_debugfs,
791 .remove_sta_debugfs = minstrel_ht_remove_sta_debugfs,
792#endif
793};
794
795
796static void
797init_sample_table(void)
798{
799 int col, i, new_idx;
800 u8 rnd[MCS_GROUP_RATES];
801
802 memset(sample_table, 0xff, sizeof(sample_table));
803 for (col = 0; col < SAMPLE_COLUMNS; col++) {
804 for (i = 0; i < MCS_GROUP_RATES; i++) {
805 get_random_bytes(rnd, sizeof(rnd));
806 new_idx = (i + rnd[i]) % MCS_GROUP_RATES;
807
808 while (sample_table[col][new_idx] != 0xff)
809 new_idx = (new_idx + 1) % MCS_GROUP_RATES;
810
811 sample_table[col][new_idx] = i;
812 }
813 }
814}
815
816int __init
817rc80211_minstrel_ht_init(void)
818{
819 init_sample_table();
820 return ieee80211_rate_control_register(&mac80211_minstrel_ht);
821}
822
823void
824rc80211_minstrel_ht_exit(void)
825{
826 ieee80211_rate_control_unregister(&mac80211_minstrel_ht);
827}
diff --git a/net/mac80211/rc80211_minstrel_ht.h b/net/mac80211/rc80211_minstrel_ht.h
new file mode 100644
index 000000000000..462d2b227ed5
--- /dev/null
+++ b/net/mac80211/rc80211_minstrel_ht.h
@@ -0,0 +1,130 @@
1/*
2 * Copyright (C) 2010 Felix Fietkau <nbd@openwrt.org>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef __RC_MINSTREL_HT_H
10#define __RC_MINSTREL_HT_H
11
12/*
13 * The number of streams can be changed to 2 to reduce code
14 * size and memory footprint.
15 */
16#define MINSTREL_MAX_STREAMS 3
17#define MINSTREL_STREAM_GROUPS 4
18
19/* scaled fraction values */
20#define MINSTREL_SCALE 16
21#define MINSTREL_FRAC(val, div) (((val) << MINSTREL_SCALE) / div)
22#define MINSTREL_TRUNC(val) ((val) >> MINSTREL_SCALE)
23
24#define MCS_GROUP_RATES 8
25
26struct mcs_group {
27 u32 flags;
28 unsigned int streams;
29 unsigned int duration[MCS_GROUP_RATES];
30};
31
32extern const struct mcs_group minstrel_mcs_groups[];
33
34struct minstrel_rate_stats {
35 /* current / last sampling period attempts/success counters */
36 unsigned int attempts, last_attempts;
37 unsigned int success, last_success;
38
39 /* total attempts/success counters */
40 u64 att_hist, succ_hist;
41
42 /* current throughput */
43 unsigned int cur_tp;
44
45 /* packet delivery probabilities */
46 unsigned int cur_prob, probability;
47
48 /* maximum retry counts */
49 unsigned int retry_count;
50 unsigned int retry_count_rtscts;
51
52 bool retry_updated;
53 u8 sample_skipped;
54};
55
56struct minstrel_mcs_group_data {
57 u8 index;
58 u8 column;
59
60 /* bitfield of supported MCS rates of this group */
61 u8 supported;
62
63 /* selected primary rates */
64 unsigned int max_tp_rate;
65 unsigned int max_tp_rate2;
66 unsigned int max_prob_rate;
67
68 /* MCS rate statistics */
69 struct minstrel_rate_stats rates[MCS_GROUP_RATES];
70};
71
72struct minstrel_ht_sta {
73 /* ampdu length (average, per sampling interval) */
74 unsigned int ampdu_len;
75 unsigned int ampdu_packets;
76
77 /* ampdu length (EWMA) */
78 unsigned int avg_ampdu_len;
79
80 /* best throughput rate */
81 unsigned int max_tp_rate;
82
83 /* second best throughput rate */
84 unsigned int max_tp_rate2;
85
86 /* best probability rate */
87 unsigned int max_prob_rate;
88
89 /* time of last status update */
90 unsigned long stats_update;
91
92 /* overhead time in usec for each frame */
93 unsigned int overhead;
94 unsigned int overhead_rtscts;
95
96 unsigned int total_packets;
97 unsigned int sample_packets;
98
99 /* tx flags to add for frames for this sta */
100 u32 tx_flags;
101
102 u8 sample_wait;
103 u8 sample_tries;
104 u8 sample_count;
105 u8 sample_slow;
106
107 /* current MCS group to be sampled */
108 u8 sample_group;
109
110 /* MCS rate group info and statistics */
111 struct minstrel_mcs_group_data groups[MINSTREL_MAX_STREAMS * MINSTREL_STREAM_GROUPS];
112};
113
114struct minstrel_ht_sta_priv {
115 union {
116 struct minstrel_ht_sta ht;
117 struct minstrel_sta_info legacy;
118 };
119#ifdef CONFIG_MAC80211_DEBUGFS
120 struct dentry *dbg_stats;
121#endif
122 void *ratelist;
123 void *sample_table;
124 bool is_ht;
125};
126
127void minstrel_ht_add_sta_debugfs(void *priv, void *priv_sta, struct dentry *dir);
128void minstrel_ht_remove_sta_debugfs(void *priv, void *priv_sta);
129
130#endif
diff --git a/net/mac80211/rc80211_minstrel_ht_debugfs.c b/net/mac80211/rc80211_minstrel_ht_debugfs.c
new file mode 100644
index 000000000000..4a5a4b3e7799
--- /dev/null
+++ b/net/mac80211/rc80211_minstrel_ht_debugfs.c
@@ -0,0 +1,118 @@
1/*
2 * Copyright (C) 2010 Felix Fietkau <nbd@openwrt.org>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8#include <linux/netdevice.h>
9#include <linux/types.h>
10#include <linux/skbuff.h>
11#include <linux/debugfs.h>
12#include <linux/ieee80211.h>
13#include <net/mac80211.h>
14#include "rc80211_minstrel.h"
15#include "rc80211_minstrel_ht.h"
16
17static int
18minstrel_ht_stats_open(struct inode *inode, struct file *file)
19{
20 struct minstrel_ht_sta_priv *msp = inode->i_private;
21 struct minstrel_ht_sta *mi = &msp->ht;
22 struct minstrel_debugfs_info *ms;
23 unsigned int i, j, tp, prob, eprob;
24 char *p;
25 int ret;
26
27 if (!msp->is_ht) {
28 inode->i_private = &msp->legacy;
29 ret = minstrel_stats_open(inode, file);
30 inode->i_private = msp;
31 return ret;
32 }
33
34 ms = kmalloc(sizeof(*ms) + 8192, GFP_KERNEL);
35 if (!ms)
36 return -ENOMEM;
37
38 file->private_data = ms;
39 p = ms->buf;
40 p += sprintf(p, "type rate throughput ewma prob this prob "
41 "this succ/attempt success attempts\n");
42 for (i = 0; i < MINSTREL_MAX_STREAMS * MINSTREL_STREAM_GROUPS; i++) {
43 char htmode = '2';
44 char gimode = 'L';
45
46 if (!mi->groups[i].supported)
47 continue;
48
49 if (minstrel_mcs_groups[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
50 htmode = '4';
51 if (minstrel_mcs_groups[i].flags & IEEE80211_TX_RC_SHORT_GI)
52 gimode = 'S';
53
54 for (j = 0; j < MCS_GROUP_RATES; j++) {
55 struct minstrel_rate_stats *mr = &mi->groups[i].rates[j];
56 int idx = i * MCS_GROUP_RATES + j;
57
58 if (!(mi->groups[i].supported & BIT(j)))
59 continue;
60
61 p += sprintf(p, "HT%c0/%cGI ", htmode, gimode);
62
63 *(p++) = (idx == mi->max_tp_rate) ? 'T' : ' ';
64 *(p++) = (idx == mi->max_tp_rate2) ? 't' : ' ';
65 *(p++) = (idx == mi->max_prob_rate) ? 'P' : ' ';
66 p += sprintf(p, "MCS%-2u", (minstrel_mcs_groups[i].streams - 1) *
67 MCS_GROUP_RATES + j);
68
69 tp = mr->cur_tp / 10;
70 prob = MINSTREL_TRUNC(mr->cur_prob * 1000);
71 eprob = MINSTREL_TRUNC(mr->probability * 1000);
72
73 p += sprintf(p, " %6u.%1u %6u.%1u %6u.%1u "
74 "%3u(%3u) %8llu %8llu\n",
75 tp / 10, tp % 10,
76 eprob / 10, eprob % 10,
77 prob / 10, prob % 10,
78 mr->last_success,
79 mr->last_attempts,
80 (unsigned long long)mr->succ_hist,
81 (unsigned long long)mr->att_hist);
82 }
83 }
84 p += sprintf(p, "\nTotal packet count:: ideal %d "
85 "lookaround %d\n",
86 max(0, (int) mi->total_packets - (int) mi->sample_packets),
87 mi->sample_packets);
88 p += sprintf(p, "Average A-MPDU length: %d.%d\n",
89 MINSTREL_TRUNC(mi->avg_ampdu_len),
90 MINSTREL_TRUNC(mi->avg_ampdu_len * 10) % 10);
91 ms->len = p - ms->buf;
92
93 return 0;
94}
95
96static const struct file_operations minstrel_ht_stat_fops = {
97 .owner = THIS_MODULE,
98 .open = minstrel_ht_stats_open,
99 .read = minstrel_stats_read,
100 .release = minstrel_stats_release,
101};
102
103void
104minstrel_ht_add_sta_debugfs(void *priv, void *priv_sta, struct dentry *dir)
105{
106 struct minstrel_ht_sta_priv *msp = priv_sta;
107
108 msp->dbg_stats = debugfs_create_file("rc_stats", S_IRUGO, dir, msp,
109 &minstrel_ht_stat_fops);
110}
111
112void
113minstrel_ht_remove_sta_debugfs(void *priv, void *priv_sta)
114{
115 struct minstrel_ht_sta_priv *msp = priv_sta;
116
117 debugfs_remove(msp->dbg_stats);
118}
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index be9abc2e6348..fa0f37e4afe4 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -293,7 +293,7 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
293 skb2 = skb_clone(skb, GFP_ATOMIC); 293 skb2 = skb_clone(skb, GFP_ATOMIC);
294 if (skb2) { 294 if (skb2) {
295 skb2->dev = prev_dev; 295 skb2->dev = prev_dev;
296 netif_rx(skb2); 296 netif_receive_skb(skb2);
297 } 297 }
298 } 298 }
299 299
@@ -304,7 +304,7 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
304 304
305 if (prev_dev) { 305 if (prev_dev) {
306 skb->dev = prev_dev; 306 skb->dev = prev_dev;
307 netif_rx(skb); 307 netif_receive_skb(skb);
308 } else 308 } else
309 dev_kfree_skb(skb); 309 dev_kfree_skb(skb);
310 310
@@ -719,16 +719,13 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx,
719 719
720 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; 720 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
721 721
722 spin_lock(&sta->lock); 722 tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
723 723 if (!tid_agg_rx)
724 if (!sta->ampdu_mlme.tid_active_rx[tid]) 724 goto dont_reorder;
725 goto dont_reorder_unlock;
726
727 tid_agg_rx = sta->ampdu_mlme.tid_rx[tid];
728 725
729 /* qos null data frames are excluded */ 726 /* qos null data frames are excluded */
730 if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC))) 727 if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC)))
731 goto dont_reorder_unlock; 728 goto dont_reorder;
732 729
733 /* new, potentially un-ordered, ampdu frame - process it */ 730 /* new, potentially un-ordered, ampdu frame - process it */
734 731
@@ -740,20 +737,22 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx,
740 /* if this mpdu is fragmented - terminate rx aggregation session */ 737 /* if this mpdu is fragmented - terminate rx aggregation session */
741 sc = le16_to_cpu(hdr->seq_ctrl); 738 sc = le16_to_cpu(hdr->seq_ctrl);
742 if (sc & IEEE80211_SCTL_FRAG) { 739 if (sc & IEEE80211_SCTL_FRAG) {
743 spin_unlock(&sta->lock); 740 skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
744 __ieee80211_stop_rx_ba_session(sta, tid, WLAN_BACK_RECIPIENT, 741 skb_queue_tail(&rx->sdata->skb_queue, skb);
745 WLAN_REASON_QSTA_REQUIRE_SETUP); 742 ieee80211_queue_work(&local->hw, &rx->sdata->work);
746 dev_kfree_skb(skb);
747 return; 743 return;
748 } 744 }
749 745
750 if (ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb, frames)) { 746 /*
751 spin_unlock(&sta->lock); 747 * No locking needed -- we will only ever process one
748 * RX packet at a time, and thus own tid_agg_rx. All
749 * other code manipulating it needs to (and does) make
750 * sure that we cannot get to it any more before doing
751 * anything with it.
752 */
753 if (ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb, frames))
752 return; 754 return;
753 }
754 755
755 dont_reorder_unlock:
756 spin_unlock(&sta->lock);
757 dont_reorder: 756 dont_reorder:
758 __skb_queue_tail(frames, skb); 757 __skb_queue_tail(frames, skb);
759} 758}
@@ -825,6 +824,7 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
825 ieee80211_rx_result result = RX_DROP_UNUSABLE; 824 ieee80211_rx_result result = RX_DROP_UNUSABLE;
826 struct ieee80211_key *stakey = NULL; 825 struct ieee80211_key *stakey = NULL;
827 int mmie_keyidx = -1; 826 int mmie_keyidx = -1;
827 __le16 fc;
828 828
829 /* 829 /*
830 * Key selection 101 830 * Key selection 101
@@ -866,13 +866,15 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
866 if (rx->sta) 866 if (rx->sta)
867 stakey = rcu_dereference(rx->sta->key); 867 stakey = rcu_dereference(rx->sta->key);
868 868
869 if (!ieee80211_has_protected(hdr->frame_control)) 869 fc = hdr->frame_control;
870
871 if (!ieee80211_has_protected(fc))
870 mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb); 872 mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb);
871 873
872 if (!is_multicast_ether_addr(hdr->addr1) && stakey) { 874 if (!is_multicast_ether_addr(hdr->addr1) && stakey) {
873 rx->key = stakey; 875 rx->key = stakey;
874 /* Skip decryption if the frame is not protected. */ 876 /* Skip decryption if the frame is not protected. */
875 if (!ieee80211_has_protected(hdr->frame_control)) 877 if (!ieee80211_has_protected(fc))
876 return RX_CONTINUE; 878 return RX_CONTINUE;
877 } else if (mmie_keyidx >= 0) { 879 } else if (mmie_keyidx >= 0) {
878 /* Broadcast/multicast robust management frame / BIP */ 880 /* Broadcast/multicast robust management frame / BIP */
@@ -884,7 +886,7 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
884 mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS) 886 mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS)
885 return RX_DROP_MONITOR; /* unexpected BIP keyidx */ 887 return RX_DROP_MONITOR; /* unexpected BIP keyidx */
886 rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]); 888 rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]);
887 } else if (!ieee80211_has_protected(hdr->frame_control)) { 889 } else if (!ieee80211_has_protected(fc)) {
888 /* 890 /*
889 * The frame was not protected, so skip decryption. However, we 891 * The frame was not protected, so skip decryption. However, we
890 * need to set rx->key if there is a key that could have been 892 * need to set rx->key if there is a key that could have been
@@ -892,7 +894,7 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
892 * have been expected. 894 * have been expected.
893 */ 895 */
894 struct ieee80211_key *key = NULL; 896 struct ieee80211_key *key = NULL;
895 if (ieee80211_is_mgmt(hdr->frame_control) && 897 if (ieee80211_is_mgmt(fc) &&
896 is_multicast_ether_addr(hdr->addr1) && 898 is_multicast_ether_addr(hdr->addr1) &&
897 (key = rcu_dereference(rx->sdata->default_mgmt_key))) 899 (key = rcu_dereference(rx->sdata->default_mgmt_key)))
898 rx->key = key; 900 rx->key = key;
@@ -914,7 +916,7 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
914 (status->flag & RX_FLAG_IV_STRIPPED)) 916 (status->flag & RX_FLAG_IV_STRIPPED))
915 return RX_CONTINUE; 917 return RX_CONTINUE;
916 918
917 hdrlen = ieee80211_hdrlen(hdr->frame_control); 919 hdrlen = ieee80211_hdrlen(fc);
918 920
919 if (rx->skb->len < 8 + hdrlen) 921 if (rx->skb->len < 8 + hdrlen)
920 return RX_DROP_UNUSABLE; /* TODO: count this? */ 922 return RX_DROP_UNUSABLE; /* TODO: count this? */
@@ -947,19 +949,17 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
947 949
948 if (skb_linearize(rx->skb)) 950 if (skb_linearize(rx->skb))
949 return RX_DROP_UNUSABLE; 951 return RX_DROP_UNUSABLE;
950 952 /* the hdr variable is invalid now! */
951 hdr = (struct ieee80211_hdr *)rx->skb->data;
952
953 /* Check for weak IVs if possible */
954 if (rx->sta && rx->key->conf.alg == ALG_WEP &&
955 ieee80211_is_data(hdr->frame_control) &&
956 (!(status->flag & RX_FLAG_IV_STRIPPED) ||
957 !(status->flag & RX_FLAG_DECRYPTED)) &&
958 ieee80211_wep_is_weak_iv(rx->skb, rx->key))
959 rx->sta->wep_weak_iv_count++;
960 953
961 switch (rx->key->conf.alg) { 954 switch (rx->key->conf.alg) {
962 case ALG_WEP: 955 case ALG_WEP:
956 /* Check for weak IVs if possible */
957 if (rx->sta && ieee80211_is_data(fc) &&
958 (!(status->flag & RX_FLAG_IV_STRIPPED) ||
959 !(status->flag & RX_FLAG_DECRYPTED)) &&
960 ieee80211_wep_is_weak_iv(rx->skb, rx->key))
961 rx->sta->wep_weak_iv_count++;
962
963 result = ieee80211_crypto_wep_decrypt(rx); 963 result = ieee80211_crypto_wep_decrypt(rx);
964 break; 964 break;
965 case ALG_TKIP: 965 case ALG_TKIP:
@@ -1267,11 +1267,13 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
1267 rx->queue, &(rx->skb)); 1267 rx->queue, &(rx->skb));
1268 if (rx->key && rx->key->conf.alg == ALG_CCMP && 1268 if (rx->key && rx->key->conf.alg == ALG_CCMP &&
1269 ieee80211_has_protected(fc)) { 1269 ieee80211_has_protected(fc)) {
1270 int queue = ieee80211_is_mgmt(fc) ?
1271 NUM_RX_DATA_QUEUES : rx->queue;
1270 /* Store CCMP PN so that we can verify that the next 1272 /* Store CCMP PN so that we can verify that the next
1271 * fragment has a sequential PN value. */ 1273 * fragment has a sequential PN value. */
1272 entry->ccmp = 1; 1274 entry->ccmp = 1;
1273 memcpy(entry->last_pn, 1275 memcpy(entry->last_pn,
1274 rx->key->u.ccmp.rx_pn[rx->queue], 1276 rx->key->u.ccmp.rx_pn[queue],
1275 CCMP_PN_LEN); 1277 CCMP_PN_LEN);
1276 } 1278 }
1277 return RX_QUEUED; 1279 return RX_QUEUED;
@@ -1291,6 +1293,7 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
1291 if (entry->ccmp) { 1293 if (entry->ccmp) {
1292 int i; 1294 int i;
1293 u8 pn[CCMP_PN_LEN], *rpn; 1295 u8 pn[CCMP_PN_LEN], *rpn;
1296 int queue;
1294 if (!rx->key || rx->key->conf.alg != ALG_CCMP) 1297 if (!rx->key || rx->key->conf.alg != ALG_CCMP)
1295 return RX_DROP_UNUSABLE; 1298 return RX_DROP_UNUSABLE;
1296 memcpy(pn, entry->last_pn, CCMP_PN_LEN); 1299 memcpy(pn, entry->last_pn, CCMP_PN_LEN);
@@ -1299,7 +1302,9 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
1299 if (pn[i]) 1302 if (pn[i])
1300 break; 1303 break;
1301 } 1304 }
1302 rpn = rx->key->u.ccmp.rx_pn[rx->queue]; 1305 queue = ieee80211_is_mgmt(fc) ?
1306 NUM_RX_DATA_QUEUES : rx->queue;
1307 rpn = rx->key->u.ccmp.rx_pn[queue];
1303 if (memcmp(pn, rpn, CCMP_PN_LEN)) 1308 if (memcmp(pn, rpn, CCMP_PN_LEN))
1304 return RX_DROP_UNUSABLE; 1309 return RX_DROP_UNUSABLE;
1305 memcpy(entry->last_pn, pn, CCMP_PN_LEN); 1310 memcpy(entry->last_pn, pn, CCMP_PN_LEN);
@@ -1573,7 +1578,7 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
1573 /* deliver to local stack */ 1578 /* deliver to local stack */
1574 skb->protocol = eth_type_trans(skb, dev); 1579 skb->protocol = eth_type_trans(skb, dev);
1575 memset(skb->cb, 0, sizeof(skb->cb)); 1580 memset(skb->cb, 0, sizeof(skb->cb));
1576 netif_rx(skb); 1581 netif_receive_skb(skb);
1577 } 1582 }
1578 } 1583 }
1579 1584
@@ -1829,13 +1834,11 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames)
1829 &bar_data, sizeof(bar_data))) 1834 &bar_data, sizeof(bar_data)))
1830 return RX_DROP_MONITOR; 1835 return RX_DROP_MONITOR;
1831 1836
1832 spin_lock(&rx->sta->lock);
1833 tid = le16_to_cpu(bar_data.control) >> 12; 1837 tid = le16_to_cpu(bar_data.control) >> 12;
1834 if (!rx->sta->ampdu_mlme.tid_active_rx[tid]) { 1838
1835 spin_unlock(&rx->sta->lock); 1839 tid_agg_rx = rcu_dereference(rx->sta->ampdu_mlme.tid_rx[tid]);
1840 if (!tid_agg_rx)
1836 return RX_DROP_MONITOR; 1841 return RX_DROP_MONITOR;
1837 }
1838 tid_agg_rx = rx->sta->ampdu_mlme.tid_rx[tid];
1839 1842
1840 start_seq_num = le16_to_cpu(bar_data.start_seq_num) >> 4; 1843 start_seq_num = le16_to_cpu(bar_data.start_seq_num) >> 4;
1841 1844
@@ -1848,11 +1851,15 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames)
1848 ieee80211_release_reorder_frames(hw, tid_agg_rx, start_seq_num, 1851 ieee80211_release_reorder_frames(hw, tid_agg_rx, start_seq_num,
1849 frames); 1852 frames);
1850 kfree_skb(skb); 1853 kfree_skb(skb);
1851 spin_unlock(&rx->sta->lock);
1852 return RX_QUEUED; 1854 return RX_QUEUED;
1853 } 1855 }
1854 1856
1855 return RX_CONTINUE; 1857 /*
1858 * After this point, we only want management frames,
1859 * so we can drop all remaining control frames to
1860 * cooked monitor interfaces.
1861 */
1862 return RX_DROP_MONITOR;
1856} 1863}
1857 1864
1858static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata, 1865static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata,
@@ -1944,30 +1951,27 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
1944 if (len < IEEE80211_MIN_ACTION_SIZE + 1) 1951 if (len < IEEE80211_MIN_ACTION_SIZE + 1)
1945 break; 1952 break;
1946 1953
1947 if (sdata->vif.type == NL80211_IFTYPE_STATION)
1948 return ieee80211_sta_rx_mgmt(sdata, rx->skb);
1949
1950 switch (mgmt->u.action.u.addba_req.action_code) { 1954 switch (mgmt->u.action.u.addba_req.action_code) {
1951 case WLAN_ACTION_ADDBA_REQ: 1955 case WLAN_ACTION_ADDBA_REQ:
1952 if (len < (IEEE80211_MIN_ACTION_SIZE + 1956 if (len < (IEEE80211_MIN_ACTION_SIZE +
1953 sizeof(mgmt->u.action.u.addba_req))) 1957 sizeof(mgmt->u.action.u.addba_req)))
1954 return RX_DROP_MONITOR; 1958 goto invalid;
1955 ieee80211_process_addba_request(local, rx->sta, mgmt, len); 1959 break;
1956 goto handled;
1957 case WLAN_ACTION_ADDBA_RESP: 1960 case WLAN_ACTION_ADDBA_RESP:
1958 if (len < (IEEE80211_MIN_ACTION_SIZE + 1961 if (len < (IEEE80211_MIN_ACTION_SIZE +
1959 sizeof(mgmt->u.action.u.addba_resp))) 1962 sizeof(mgmt->u.action.u.addba_resp)))
1960 break; 1963 goto invalid;
1961 ieee80211_process_addba_resp(local, rx->sta, mgmt, len); 1964 break;
1962 goto handled;
1963 case WLAN_ACTION_DELBA: 1965 case WLAN_ACTION_DELBA:
1964 if (len < (IEEE80211_MIN_ACTION_SIZE + 1966 if (len < (IEEE80211_MIN_ACTION_SIZE +
1965 sizeof(mgmt->u.action.u.delba))) 1967 sizeof(mgmt->u.action.u.delba)))
1966 break; 1968 goto invalid;
1967 ieee80211_process_delba(sdata, rx->sta, mgmt, len); 1969 break;
1968 goto handled; 1970 default:
1971 goto invalid;
1969 } 1972 }
1970 break; 1973
1974 goto queue;
1971 case WLAN_CATEGORY_SPECTRUM_MGMT: 1975 case WLAN_CATEGORY_SPECTRUM_MGMT:
1972 if (local->hw.conf.channel->band != IEEE80211_BAND_5GHZ) 1976 if (local->hw.conf.channel->band != IEEE80211_BAND_5GHZ)
1973 break; 1977 break;
@@ -1997,7 +2001,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
1997 if (memcmp(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN)) 2001 if (memcmp(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN))
1998 break; 2002 break;
1999 2003
2000 return ieee80211_sta_rx_mgmt(sdata, rx->skb); 2004 goto queue;
2001 } 2005 }
2002 break; 2006 break;
2003 case WLAN_CATEGORY_SA_QUERY: 2007 case WLAN_CATEGORY_SA_QUERY:
@@ -2015,11 +2019,12 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
2015 break; 2019 break;
2016 case WLAN_CATEGORY_MESH_PLINK: 2020 case WLAN_CATEGORY_MESH_PLINK:
2017 case WLAN_CATEGORY_MESH_PATH_SEL: 2021 case WLAN_CATEGORY_MESH_PATH_SEL:
2018 if (ieee80211_vif_is_mesh(&sdata->vif)) 2022 if (!ieee80211_vif_is_mesh(&sdata->vif))
2019 return ieee80211_mesh_rx_mgmt(sdata, rx->skb); 2023 break;
2020 break; 2024 goto queue;
2021 } 2025 }
2022 2026
2027 invalid:
2023 /* 2028 /*
2024 * For AP mode, hostapd is responsible for handling any action 2029 * For AP mode, hostapd is responsible for handling any action
2025 * frames that we didn't handle, including returning unknown 2030 * frames that we didn't handle, including returning unknown
@@ -2039,8 +2044,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
2039 */ 2044 */
2040 status = IEEE80211_SKB_RXCB(rx->skb); 2045 status = IEEE80211_SKB_RXCB(rx->skb);
2041 2046
2042 if (sdata->vif.type == NL80211_IFTYPE_STATION && 2047 if (cfg80211_rx_action(rx->sdata->dev, status->freq,
2043 cfg80211_rx_action(rx->sdata->dev, status->freq,
2044 rx->skb->data, rx->skb->len, 2048 rx->skb->data, rx->skb->len,
2045 GFP_ATOMIC)) 2049 GFP_ATOMIC))
2046 goto handled; 2050 goto handled;
@@ -2052,11 +2056,11 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
2052 nskb = skb_copy_expand(rx->skb, local->hw.extra_tx_headroom, 0, 2056 nskb = skb_copy_expand(rx->skb, local->hw.extra_tx_headroom, 0,
2053 GFP_ATOMIC); 2057 GFP_ATOMIC);
2054 if (nskb) { 2058 if (nskb) {
2055 struct ieee80211_mgmt *mgmt = (void *)nskb->data; 2059 struct ieee80211_mgmt *nmgmt = (void *)nskb->data;
2056 2060
2057 mgmt->u.action.category |= 0x80; 2061 nmgmt->u.action.category |= 0x80;
2058 memcpy(mgmt->da, mgmt->sa, ETH_ALEN); 2062 memcpy(nmgmt->da, nmgmt->sa, ETH_ALEN);
2059 memcpy(mgmt->sa, rx->sdata->vif.addr, ETH_ALEN); 2063 memcpy(nmgmt->sa, rx->sdata->vif.addr, ETH_ALEN);
2060 2064
2061 memset(nskb->cb, 0, sizeof(nskb->cb)); 2065 memset(nskb->cb, 0, sizeof(nskb->cb));
2062 2066
@@ -2068,6 +2072,14 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
2068 rx->sta->rx_packets++; 2072 rx->sta->rx_packets++;
2069 dev_kfree_skb(rx->skb); 2073 dev_kfree_skb(rx->skb);
2070 return RX_QUEUED; 2074 return RX_QUEUED;
2075
2076 queue:
2077 rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
2078 skb_queue_tail(&sdata->skb_queue, rx->skb);
2079 ieee80211_queue_work(&local->hw, &sdata->work);
2080 if (rx->sta)
2081 rx->sta->rx_packets++;
2082 return RX_QUEUED;
2071} 2083}
2072 2084
2073static ieee80211_rx_result debug_noinline 2085static ieee80211_rx_result debug_noinline
@@ -2075,10 +2087,15 @@ ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
2075{ 2087{
2076 struct ieee80211_sub_if_data *sdata = rx->sdata; 2088 struct ieee80211_sub_if_data *sdata = rx->sdata;
2077 ieee80211_rx_result rxs; 2089 ieee80211_rx_result rxs;
2090 struct ieee80211_mgmt *mgmt = (void *)rx->skb->data;
2091 __le16 stype;
2078 2092
2079 if (!(rx->flags & IEEE80211_RX_RA_MATCH)) 2093 if (!(rx->flags & IEEE80211_RX_RA_MATCH))
2080 return RX_DROP_MONITOR; 2094 return RX_DROP_MONITOR;
2081 2095
2096 if (rx->skb->len < 24)
2097 return RX_DROP_MONITOR;
2098
2082 if (ieee80211_drop_unencrypted_mgmt(rx)) 2099 if (ieee80211_drop_unencrypted_mgmt(rx))
2083 return RX_DROP_UNUSABLE; 2100 return RX_DROP_UNUSABLE;
2084 2101
@@ -2086,16 +2103,42 @@ ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
2086 if (rxs != RX_CONTINUE) 2103 if (rxs != RX_CONTINUE)
2087 return rxs; 2104 return rxs;
2088 2105
2089 if (ieee80211_vif_is_mesh(&sdata->vif)) 2106 stype = mgmt->frame_control & cpu_to_le16(IEEE80211_FCTL_STYPE);
2090 return ieee80211_mesh_rx_mgmt(sdata, rx->skb);
2091 2107
2092 if (sdata->vif.type == NL80211_IFTYPE_ADHOC) 2108 if (!ieee80211_vif_is_mesh(&sdata->vif) &&
2093 return ieee80211_ibss_rx_mgmt(sdata, rx->skb); 2109 sdata->vif.type != NL80211_IFTYPE_ADHOC &&
2110 sdata->vif.type != NL80211_IFTYPE_STATION)
2111 return RX_DROP_MONITOR;
2112
2113 switch (stype) {
2114 case cpu_to_le16(IEEE80211_STYPE_BEACON):
2115 case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
2116 /* process for all: mesh, mlme, ibss */
2117 break;
2118 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
2119 case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
2120 /* process only for station */
2121 if (sdata->vif.type != NL80211_IFTYPE_STATION)
2122 return RX_DROP_MONITOR;
2123 break;
2124 case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
2125 case cpu_to_le16(IEEE80211_STYPE_AUTH):
2126 /* process only for ibss */
2127 if (sdata->vif.type != NL80211_IFTYPE_ADHOC)
2128 return RX_DROP_MONITOR;
2129 break;
2130 default:
2131 return RX_DROP_MONITOR;
2132 }
2094 2133
2095 if (sdata->vif.type == NL80211_IFTYPE_STATION) 2134 /* queue up frame and kick off work to process it */
2096 return ieee80211_sta_rx_mgmt(sdata, rx->skb); 2135 rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
2136 skb_queue_tail(&sdata->skb_queue, rx->skb);
2137 ieee80211_queue_work(&rx->local->hw, &sdata->work);
2138 if (rx->sta)
2139 rx->sta->rx_packets++;
2097 2140
2098 return RX_DROP_MONITOR; 2141 return RX_QUEUED;
2099} 2142}
2100 2143
2101static void ieee80211_rx_michael_mic_report(struct ieee80211_hdr *hdr, 2144static void ieee80211_rx_michael_mic_report(struct ieee80211_hdr *hdr,
@@ -2151,7 +2194,7 @@ static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
2151 u8 rate_or_pad; 2194 u8 rate_or_pad;
2152 __le16 chan_freq; 2195 __le16 chan_freq;
2153 __le16 chan_flags; 2196 __le16 chan_flags;
2154 } __attribute__ ((packed)) *rthdr; 2197 } __packed *rthdr;
2155 struct sk_buff *skb = rx->skb, *skb2; 2198 struct sk_buff *skb = rx->skb, *skb2;
2156 struct net_device *prev_dev = NULL; 2199 struct net_device *prev_dev = NULL;
2157 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 2200 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
@@ -2201,7 +2244,7 @@ static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
2201 skb2 = skb_clone(skb, GFP_ATOMIC); 2244 skb2 = skb_clone(skb, GFP_ATOMIC);
2202 if (skb2) { 2245 if (skb2) {
2203 skb2->dev = prev_dev; 2246 skb2->dev = prev_dev;
2204 netif_rx(skb2); 2247 netif_receive_skb(skb2);
2205 } 2248 }
2206 } 2249 }
2207 2250
@@ -2212,7 +2255,7 @@ static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
2212 2255
2213 if (prev_dev) { 2256 if (prev_dev) {
2214 skb->dev = prev_dev; 2257 skb->dev = prev_dev;
2215 netif_rx(skb); 2258 netif_receive_skb(skb);
2216 skb = NULL; 2259 skb = NULL;
2217 } else 2260 } else
2218 goto out_free_skb; 2261 goto out_free_skb;
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index e1b0be7a57b9..41f20fb7e670 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -114,6 +114,10 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
114 bss->dtim_period = tim_ie->dtim_period; 114 bss->dtim_period = tim_ie->dtim_period;
115 } 115 }
116 116
117 /* If the beacon had no TIM IE, or it was invalid, use 1 */
118 if (beacon && !bss->dtim_period)
119 bss->dtim_period = 1;
120
117 /* replace old supported rates if we get new values */ 121 /* replace old supported rates if we get new values */
118 srlen = 0; 122 srlen = 0;
119 if (elems->supp_rates) { 123 if (elems->supp_rates) {
@@ -734,7 +738,7 @@ int ieee80211_request_internal_scan(struct ieee80211_sub_if_data *sdata,
734{ 738{
735 struct ieee80211_local *local = sdata->local; 739 struct ieee80211_local *local = sdata->local;
736 int ret = -EBUSY; 740 int ret = -EBUSY;
737 enum nl80211_band band; 741 enum ieee80211_band band;
738 742
739 mutex_lock(&local->scan_mtx); 743 mutex_lock(&local->scan_mtx);
740 744
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index ba9360a475b0..6d86f0c1ad04 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -235,6 +235,8 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
235 spin_lock_init(&sta->lock); 235 spin_lock_init(&sta->lock);
236 spin_lock_init(&sta->flaglock); 236 spin_lock_init(&sta->flaglock);
237 INIT_WORK(&sta->drv_unblock_wk, sta_unblock); 237 INIT_WORK(&sta->drv_unblock_wk, sta_unblock);
238 INIT_WORK(&sta->ampdu_mlme.work, ieee80211_ba_session_work);
239 mutex_init(&sta->ampdu_mlme.mtx);
238 240
239 memcpy(sta->sta.addr, addr, ETH_ALEN); 241 memcpy(sta->sta.addr, addr, ETH_ALEN);
240 sta->local = local; 242 sta->local = local;
@@ -246,14 +248,12 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
246 } 248 }
247 249
248 for (i = 0; i < STA_TID_NUM; i++) { 250 for (i = 0; i < STA_TID_NUM; i++) {
249 /* timer_to_tid must be initialized with identity mapping to 251 /*
250 * enable session_timer's data differentiation. refer to 252 * timer_to_tid must be initialized with identity mapping
251 * sta_rx_agg_session_timer_expired for useage */ 253 * to enable session_timer's data differentiation. See
254 * sta_rx_agg_session_timer_expired for usage.
255 */
252 sta->timer_to_tid[i] = i; 256 sta->timer_to_tid[i] = i;
253 /* tx */
254 sta->ampdu_mlme.tid_state_tx[i] = HT_AGG_STATE_IDLE;
255 sta->ampdu_mlme.tid_tx[i] = NULL;
256 sta->ampdu_mlme.addba_req_num[i] = 0;
257 } 257 }
258 skb_queue_head_init(&sta->ps_tx_buf); 258 skb_queue_head_init(&sta->ps_tx_buf);
259 skb_queue_head_init(&sta->tx_filtered); 259 skb_queue_head_init(&sta->tx_filtered);
@@ -647,15 +647,7 @@ static int __must_check __sta_info_destroy(struct sta_info *sta)
647 return ret; 647 return ret;
648 648
649 if (sta->key) { 649 if (sta->key) {
650 ieee80211_key_free(sta->key); 650 ieee80211_key_free(local, sta->key);
651 /*
652 * We have only unlinked the key, and actually destroying it
653 * may mean it is removed from hardware which requires that
654 * the key->sta pointer is still valid, so flush the key todo
655 * list here.
656 */
657 ieee80211_key_todo();
658
659 WARN_ON(sta->key); 651 WARN_ON(sta->key);
660 } 652 }
661 653
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index df9d45544ca5..54262e72376d 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -42,9 +42,6 @@
42 * be in the queues 42 * be in the queues
43 * @WLAN_STA_PSPOLL: Station sent PS-poll while driver was keeping 43 * @WLAN_STA_PSPOLL: Station sent PS-poll while driver was keeping
44 * station in power-save mode, reply when the driver unblocks. 44 * station in power-save mode, reply when the driver unblocks.
45 * @WLAN_STA_DISASSOC: Disassociation in progress.
46 * This is used to reject TX BA session requests when disassociation
47 * is in progress.
48 */ 45 */
49enum ieee80211_sta_info_flags { 46enum ieee80211_sta_info_flags {
50 WLAN_STA_AUTH = 1<<0, 47 WLAN_STA_AUTH = 1<<0,
@@ -60,38 +57,44 @@ enum ieee80211_sta_info_flags {
60 WLAN_STA_BLOCK_BA = 1<<11, 57 WLAN_STA_BLOCK_BA = 1<<11,
61 WLAN_STA_PS_DRIVER = 1<<12, 58 WLAN_STA_PS_DRIVER = 1<<12,
62 WLAN_STA_PSPOLL = 1<<13, 59 WLAN_STA_PSPOLL = 1<<13,
63 WLAN_STA_DISASSOC = 1<<14,
64}; 60};
65 61
66#define STA_TID_NUM 16 62#define STA_TID_NUM 16
67#define ADDBA_RESP_INTERVAL HZ 63#define ADDBA_RESP_INTERVAL HZ
68#define HT_AGG_MAX_RETRIES (0x3) 64#define HT_AGG_MAX_RETRIES 0x3
69 65
70#define HT_AGG_STATE_INITIATOR_SHIFT (4) 66#define HT_AGG_STATE_DRV_READY 0
71 67#define HT_AGG_STATE_RESPONSE_RECEIVED 1
72#define HT_ADDBA_REQUESTED_MSK BIT(0) 68#define HT_AGG_STATE_OPERATIONAL 2
73#define HT_ADDBA_DRV_READY_MSK BIT(1) 69#define HT_AGG_STATE_STOPPING 3
74#define HT_ADDBA_RECEIVED_MSK BIT(2) 70#define HT_AGG_STATE_WANT_START 4
75#define HT_AGG_STATE_REQ_STOP_BA_MSK BIT(3) 71#define HT_AGG_STATE_WANT_STOP 5
76#define HT_AGG_STATE_INITIATOR_MSK BIT(HT_AGG_STATE_INITIATOR_SHIFT)
77#define HT_AGG_STATE_IDLE (0x0)
78#define HT_AGG_STATE_OPERATIONAL (HT_ADDBA_REQUESTED_MSK | \
79 HT_ADDBA_DRV_READY_MSK | \
80 HT_ADDBA_RECEIVED_MSK)
81 72
82/** 73/**
83 * struct tid_ampdu_tx - TID aggregation information (Tx). 74 * struct tid_ampdu_tx - TID aggregation information (Tx).
84 * 75 *
76 * @rcu_head: rcu head for freeing structure
85 * @addba_resp_timer: timer for peer's response to addba request 77 * @addba_resp_timer: timer for peer's response to addba request
86 * @pending: pending frames queue -- use sta's spinlock to protect 78 * @pending: pending frames queue -- use sta's spinlock to protect
87 * @ssn: Starting Sequence Number expected to be aggregated.
88 * @dialog_token: dialog token for aggregation session 79 * @dialog_token: dialog token for aggregation session
80 * @state: session state (see above)
81 * @stop_initiator: initiator of a session stop
82 *
83 * This structure is protected by RCU and the per-station
84 * spinlock. Assignments to the array holding it must hold
85 * the spinlock, only the TX path can access it under RCU
86 * lock-free if, and only if, the state has the flag
87 * %HT_AGG_STATE_OPERATIONAL set. Otherwise, the TX path
88 * must also acquire the spinlock and re-check the state,
89 * see comments in the tx code touching it.
89 */ 90 */
90struct tid_ampdu_tx { 91struct tid_ampdu_tx {
92 struct rcu_head rcu_head;
91 struct timer_list addba_resp_timer; 93 struct timer_list addba_resp_timer;
92 struct sk_buff_head pending; 94 struct sk_buff_head pending;
93 u16 ssn; 95 unsigned long state;
94 u8 dialog_token; 96 u8 dialog_token;
97 u8 stop_initiator;
95}; 98};
96 99
97/** 100/**
@@ -106,8 +109,18 @@ struct tid_ampdu_tx {
106 * @buf_size: buffer size for incoming A-MPDUs 109 * @buf_size: buffer size for incoming A-MPDUs
107 * @timeout: reset timer value (in TUs). 110 * @timeout: reset timer value (in TUs).
108 * @dialog_token: dialog token for aggregation session 111 * @dialog_token: dialog token for aggregation session
112 * @rcu_head: RCU head used for freeing this struct
113 *
114 * This structure is protected by RCU and the per-station
115 * spinlock. Assignments to the array holding it must hold
116 * the spinlock, only the RX path can access it under RCU
117 * lock-free. The RX path, since it is single-threaded,
118 * can even modify the structure without locking since the
119 * only other modifications to it are done when the struct
120 * can not yet or no longer be found by the RX path.
109 */ 121 */
110struct tid_ampdu_rx { 122struct tid_ampdu_rx {
123 struct rcu_head rcu_head;
111 struct sk_buff **reorder_buf; 124 struct sk_buff **reorder_buf;
112 unsigned long *reorder_time; 125 unsigned long *reorder_time;
113 struct timer_list session_timer; 126 struct timer_list session_timer;
@@ -120,6 +133,32 @@ struct tid_ampdu_rx {
120}; 133};
121 134
122/** 135/**
136 * struct sta_ampdu_mlme - STA aggregation information.
137 *
138 * @tid_rx: aggregation info for Rx per TID -- RCU protected
139 * @tid_tx: aggregation info for Tx per TID
140 * @addba_req_num: number of times addBA request has been sent.
141 * @dialog_token_allocator: dialog token enumerator for each new session;
142 * @work: work struct for starting/stopping aggregation
143 * @tid_rx_timer_expired: bitmap indicating on which TIDs the
144 * RX timer expired until the work for it runs
145 * @mtx: mutex to protect all TX data (except non-NULL assignments
146 * to tid_tx[idx], which are protected by the sta spinlock)
147 */
148struct sta_ampdu_mlme {
149 struct mutex mtx;
150 /* rx */
151 struct tid_ampdu_rx *tid_rx[STA_TID_NUM];
152 unsigned long tid_rx_timer_expired[BITS_TO_LONGS(STA_TID_NUM)];
153 /* tx */
154 struct work_struct work;
155 struct tid_ampdu_tx *tid_tx[STA_TID_NUM];
156 u8 addba_req_num[STA_TID_NUM];
157 u8 dialog_token_allocator;
158};
159
160
161/**
123 * enum plink_state - state of a mesh peer link finite state machine 162 * enum plink_state - state of a mesh peer link finite state machine
124 * 163 *
125 * @PLINK_LISTEN: initial state, considered the implicit state of non existant 164 * @PLINK_LISTEN: initial state, considered the implicit state of non existant
@@ -143,28 +182,6 @@ enum plink_state {
143}; 182};
144 183
145/** 184/**
146 * struct sta_ampdu_mlme - STA aggregation information.
147 *
148 * @tid_active_rx: TID's state in Rx session state machine.
149 * @tid_rx: aggregation info for Rx per TID
150 * @tid_state_tx: TID's state in Tx session state machine.
151 * @tid_tx: aggregation info for Tx per TID
152 * @addba_req_num: number of times addBA request has been sent.
153 * @dialog_token_allocator: dialog token enumerator for each new session;
154 */
155struct sta_ampdu_mlme {
156 /* rx */
157 bool tid_active_rx[STA_TID_NUM];
158 struct tid_ampdu_rx *tid_rx[STA_TID_NUM];
159 /* tx */
160 u8 tid_state_tx[STA_TID_NUM];
161 struct tid_ampdu_tx *tid_tx[STA_TID_NUM];
162 u8 addba_req_num[STA_TID_NUM];
163 u8 dialog_token_allocator;
164};
165
166
167/**
168 * struct sta_info - STA information 185 * struct sta_info - STA information
169 * 186 *
170 * This structure collects information about a station that 187 * This structure collects information about a station that
@@ -410,20 +427,20 @@ void for_each_sta_info_type_check(struct ieee80211_local *local,
410{ 427{
411} 428}
412 429
413#define for_each_sta_info(local, _addr, sta, nxt) \ 430#define for_each_sta_info(local, _addr, _sta, nxt) \
414 for ( /* initialise loop */ \ 431 for ( /* initialise loop */ \
415 sta = rcu_dereference(local->sta_hash[STA_HASH(_addr)]),\ 432 _sta = rcu_dereference(local->sta_hash[STA_HASH(_addr)]),\
416 nxt = sta ? rcu_dereference(sta->hnext) : NULL; \ 433 nxt = _sta ? rcu_dereference(_sta->hnext) : NULL; \
417 /* typecheck */ \ 434 /* typecheck */ \
418 for_each_sta_info_type_check(local, (_addr), sta, nxt), \ 435 for_each_sta_info_type_check(local, (_addr), _sta, nxt),\
419 /* continue condition */ \ 436 /* continue condition */ \
420 sta; \ 437 _sta; \
421 /* advance loop */ \ 438 /* advance loop */ \
422 sta = nxt, \ 439 _sta = nxt, \
423 nxt = sta ? rcu_dereference(sta->hnext) : NULL \ 440 nxt = _sta ? rcu_dereference(_sta->hnext) : NULL \
424 ) \ 441 ) \
425 /* compare address and run code only if it matches */ \ 442 /* compare address and run code only if it matches */ \
426 if (memcmp(sta->sta.addr, (_addr), ETH_ALEN) == 0) 443 if (memcmp(_sta->sta.addr, (_addr), ETH_ALEN) == 0)
427 444
428/* 445/*
429 * Get STA info by index, BROKEN! 446 * Get STA info by index, BROKEN!
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 94613af009f3..10caec5ea8fa 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -47,7 +47,7 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
47 /* 47 /*
48 * This skb 'survived' a round-trip through the driver, and 48 * This skb 'survived' a round-trip through the driver, and
49 * hopefully the driver didn't mangle it too badly. However, 49 * hopefully the driver didn't mangle it too badly. However,
50 * we can definitely not rely on the the control information 50 * we can definitely not rely on the control information
51 * being correct. Clear it so we don't get junk there, and 51 * being correct. Clear it so we don't get junk there, and
52 * indicate that it needs new processing, but must not be 52 * indicate that it needs new processing, but must not be
53 * modified/encrypted again. 53 * modified/encrypted again.
@@ -377,7 +377,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
377 skb2 = skb_clone(skb, GFP_ATOMIC); 377 skb2 = skb_clone(skb, GFP_ATOMIC);
378 if (skb2) { 378 if (skb2) {
379 skb2->dev = prev_dev; 379 skb2->dev = prev_dev;
380 netif_rx(skb2); 380 netif_receive_skb(skb2);
381 } 381 }
382 } 382 }
383 383
@@ -386,7 +386,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
386 } 386 }
387 if (prev_dev) { 387 if (prev_dev) {
388 skb->dev = prev_dev; 388 skb->dev = prev_dev;
389 netif_rx(skb); 389 netif_receive_skb(skb);
390 skb = NULL; 390 skb = NULL;
391 } 391 }
392 rcu_read_unlock(); 392 rcu_read_unlock();
diff --git a/net/mac80211/tkip.c b/net/mac80211/tkip.c
index 7ef491e9d66d..e840c9cd46db 100644
--- a/net/mac80211/tkip.c
+++ b/net/mac80211/tkip.c
@@ -202,9 +202,9 @@ EXPORT_SYMBOL(ieee80211_get_tkip_key);
202 * @payload_len is the length of payload (_not_ including IV/ICV length). 202 * @payload_len is the length of payload (_not_ including IV/ICV length).
203 * @ta is the transmitter addresses. 203 * @ta is the transmitter addresses.
204 */ 204 */
205void ieee80211_tkip_encrypt_data(struct crypto_blkcipher *tfm, 205int ieee80211_tkip_encrypt_data(struct crypto_blkcipher *tfm,
206 struct ieee80211_key *key, 206 struct ieee80211_key *key,
207 u8 *pos, size_t payload_len, u8 *ta) 207 u8 *pos, size_t payload_len, u8 *ta)
208{ 208{
209 u8 rc4key[16]; 209 u8 rc4key[16];
210 struct tkip_ctx *ctx = &key->u.tkip.tx; 210 struct tkip_ctx *ctx = &key->u.tkip.tx;
@@ -216,7 +216,7 @@ void ieee80211_tkip_encrypt_data(struct crypto_blkcipher *tfm,
216 216
217 tkip_mixing_phase2(tk, ctx, ctx->iv16, rc4key); 217 tkip_mixing_phase2(tk, ctx, ctx->iv16, rc4key);
218 218
219 ieee80211_wep_encrypt_data(tfm, rc4key, 16, pos, payload_len); 219 return ieee80211_wep_encrypt_data(tfm, rc4key, 16, pos, payload_len);
220} 220}
221 221
222/* Decrypt packet payload with TKIP using @key. @pos is a pointer to the 222/* Decrypt packet payload with TKIP using @key. @pos is a pointer to the
diff --git a/net/mac80211/tkip.h b/net/mac80211/tkip.h
index d4714383f5fc..7e83dee976fa 100644
--- a/net/mac80211/tkip.h
+++ b/net/mac80211/tkip.h
@@ -15,7 +15,7 @@
15 15
16u8 *ieee80211_tkip_add_iv(u8 *pos, struct ieee80211_key *key, u16 iv16); 16u8 *ieee80211_tkip_add_iv(u8 *pos, struct ieee80211_key *key, u16 iv16);
17 17
18void ieee80211_tkip_encrypt_data(struct crypto_blkcipher *tfm, 18int ieee80211_tkip_encrypt_data(struct crypto_blkcipher *tfm,
19 struct ieee80211_key *key, 19 struct ieee80211_key *key,
20 u8 *pos, size_t payload_len, u8 *ta); 20 u8 *pos, size_t payload_len, u8 *ta);
21enum { 21enum {
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 680bcb7093db..c54db966926b 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -576,17 +576,6 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
576} 576}
577 577
578static ieee80211_tx_result debug_noinline 578static ieee80211_tx_result debug_noinline
579ieee80211_tx_h_sta(struct ieee80211_tx_data *tx)
580{
581 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
582
583 if (tx->sta && tx->sta->uploaded)
584 info->control.sta = &tx->sta->sta;
585
586 return TX_CONTINUE;
587}
588
589static ieee80211_tx_result debug_noinline
590ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx) 579ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
591{ 580{
592 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); 581 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
@@ -1092,6 +1081,59 @@ static bool __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx,
1092 return true; 1081 return true;
1093} 1082}
1094 1083
1084static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx,
1085 struct sk_buff *skb,
1086 struct ieee80211_tx_info *info,
1087 struct tid_ampdu_tx *tid_tx,
1088 int tid)
1089{
1090 bool queued = false;
1091
1092 if (test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) {
1093 info->flags |= IEEE80211_TX_CTL_AMPDU;
1094 } else if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) {
1095 /*
1096 * nothing -- this aggregation session is being started
1097 * but that might still fail with the driver
1098 */
1099 } else {
1100 spin_lock(&tx->sta->lock);
1101 /*
1102 * Need to re-check now, because we may get here
1103 *
1104 * 1) in the window during which the setup is actually
1105 * already done, but not marked yet because not all
1106 * packets are spliced over to the driver pending
1107 * queue yet -- if this happened we acquire the lock
1108 * either before or after the splice happens, but
1109 * need to recheck which of these cases happened.
1110 *
1111 * 2) during session teardown, if the OPERATIONAL bit
1112 * was cleared due to the teardown but the pointer
1113 * hasn't been assigned NULL yet (or we loaded it
1114 * before it was assigned) -- in this case it may
1115 * now be NULL which means we should just let the
1116 * packet pass through because splicing the frames
1117 * back is already done.
1118 */
1119 tid_tx = tx->sta->ampdu_mlme.tid_tx[tid];
1120
1121 if (!tid_tx) {
1122 /* do nothing, let packet pass through */
1123 } else if (test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) {
1124 info->flags |= IEEE80211_TX_CTL_AMPDU;
1125 } else {
1126 queued = true;
1127 info->control.vif = &tx->sdata->vif;
1128 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
1129 __skb_queue_tail(&tid_tx->pending, skb);
1130 }
1131 spin_unlock(&tx->sta->lock);
1132 }
1133
1134 return queued;
1135}
1136
1095/* 1137/*
1096 * initialises @tx 1138 * initialises @tx
1097 */ 1139 */
@@ -1104,8 +1146,7 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
1104 struct ieee80211_hdr *hdr; 1146 struct ieee80211_hdr *hdr;
1105 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1147 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1106 int hdrlen, tid; 1148 int hdrlen, tid;
1107 u8 *qc, *state; 1149 u8 *qc;
1108 bool queued = false;
1109 1150
1110 memset(tx, 0, sizeof(*tx)); 1151 memset(tx, 0, sizeof(*tx));
1111 tx->skb = skb; 1152 tx->skb = skb;
@@ -1157,35 +1198,16 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
1157 qc = ieee80211_get_qos_ctl(hdr); 1198 qc = ieee80211_get_qos_ctl(hdr);
1158 tid = *qc & IEEE80211_QOS_CTL_TID_MASK; 1199 tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
1159 1200
1160 spin_lock(&tx->sta->lock); 1201 tid_tx = rcu_dereference(tx->sta->ampdu_mlme.tid_tx[tid]);
1161 /* 1202 if (tid_tx) {
1162 * XXX: This spinlock could be fairly expensive, but see the 1203 bool queued;
1163 * comment in agg-tx.c:ieee80211_agg_tx_operational().
1164 * One way to solve this would be to do something RCU-like
1165 * for managing the tid_tx struct and using atomic bitops
1166 * for the actual state -- by introducing an actual
1167 * 'operational' bit that would be possible. It would
1168 * require changing ieee80211_agg_tx_operational() to
1169 * set that bit, and changing the way tid_tx is managed
1170 * everywhere, including races between that bit and
1171 * tid_tx going away (tid_tx being added can be easily
1172 * committed to memory before the 'operational' bit).
1173 */
1174 tid_tx = tx->sta->ampdu_mlme.tid_tx[tid];
1175 state = &tx->sta->ampdu_mlme.tid_state_tx[tid];
1176 if (*state == HT_AGG_STATE_OPERATIONAL) {
1177 info->flags |= IEEE80211_TX_CTL_AMPDU;
1178 } else if (*state != HT_AGG_STATE_IDLE) {
1179 /* in progress */
1180 queued = true;
1181 info->control.vif = &sdata->vif;
1182 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
1183 __skb_queue_tail(&tid_tx->pending, skb);
1184 }
1185 spin_unlock(&tx->sta->lock);
1186 1204
1187 if (unlikely(queued)) 1205 queued = ieee80211_tx_prep_agg(tx, skb, info,
1188 return TX_QUEUED; 1206 tid_tx, tid);
1207
1208 if (unlikely(queued))
1209 return TX_QUEUED;
1210 }
1189 } 1211 }
1190 1212
1191 if (is_multicast_ether_addr(hdr->addr1)) { 1213 if (is_multicast_ether_addr(hdr->addr1)) {
@@ -1274,6 +1296,11 @@ static int __ieee80211_tx(struct ieee80211_local *local,
1274 break; 1296 break;
1275 } 1297 }
1276 1298
1299 if (sta && sta->uploaded)
1300 info->control.sta = &sta->sta;
1301 else
1302 info->control.sta = NULL;
1303
1277 ret = drv_tx(local, skb); 1304 ret = drv_tx(local, skb);
1278 if (WARN_ON(ret != NETDEV_TX_OK && skb->len != len)) { 1305 if (WARN_ON(ret != NETDEV_TX_OK && skb->len != len)) {
1279 dev_kfree_skb(skb); 1306 dev_kfree_skb(skb);
@@ -1313,7 +1340,6 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
1313 CALL_TXH(ieee80211_tx_h_check_assoc); 1340 CALL_TXH(ieee80211_tx_h_check_assoc);
1314 CALL_TXH(ieee80211_tx_h_ps_buf); 1341 CALL_TXH(ieee80211_tx_h_ps_buf);
1315 CALL_TXH(ieee80211_tx_h_select_key); 1342 CALL_TXH(ieee80211_tx_h_select_key);
1316 CALL_TXH(ieee80211_tx_h_sta);
1317 if (!(tx->local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL)) 1343 if (!(tx->local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL))
1318 CALL_TXH(ieee80211_tx_h_rate_ctrl); 1344 CALL_TXH(ieee80211_tx_h_rate_ctrl);
1319 1345
@@ -1909,11 +1935,13 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1909 h_pos += encaps_len; 1935 h_pos += encaps_len;
1910 } 1936 }
1911 1937
1938#ifdef CONFIG_MAC80211_MESH
1912 if (meshhdrlen > 0) { 1939 if (meshhdrlen > 0) {
1913 memcpy(skb_push(skb, meshhdrlen), &mesh_hdr, meshhdrlen); 1940 memcpy(skb_push(skb, meshhdrlen), &mesh_hdr, meshhdrlen);
1914 nh_pos += meshhdrlen; 1941 nh_pos += meshhdrlen;
1915 h_pos += meshhdrlen; 1942 h_pos += meshhdrlen;
1916 } 1943 }
1944#endif
1917 1945
1918 if (ieee80211_is_data_qos(fc)) { 1946 if (ieee80211_is_data_qos(fc)) {
1919 __le16 *qos_control; 1947 __le16 *qos_control;
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 5b79d552780a..748387d45bc0 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -803,8 +803,12 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata)
803 803
804 /* after reinitialize QoS TX queues setting to default, 804 /* after reinitialize QoS TX queues setting to default,
805 * disable QoS at all */ 805 * disable QoS at all */
806 local->hw.conf.flags &= ~IEEE80211_CONF_QOS; 806
807 drv_config(local, IEEE80211_CONF_CHANGE_QOS); 807 if (sdata->vif.type != NL80211_IFTYPE_MONITOR) {
808 sdata->vif.bss_conf.qos =
809 sdata->vif.type != NL80211_IFTYPE_STATION;
810 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_QOS);
811 }
808} 812}
809 813
810void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata, 814void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata,
@@ -1138,18 +1142,6 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1138 } 1142 }
1139 mutex_unlock(&local->sta_mtx); 1143 mutex_unlock(&local->sta_mtx);
1140 1144
1141 /* Clear Suspend state so that ADDBA requests can be processed */
1142
1143 rcu_read_lock();
1144
1145 if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) {
1146 list_for_each_entry_rcu(sta, &local->sta_list, list) {
1147 clear_sta_flags(sta, WLAN_STA_BLOCK_BA);
1148 }
1149 }
1150
1151 rcu_read_unlock();
1152
1153 /* setup RTS threshold */ 1145 /* setup RTS threshold */
1154 drv_set_rts_threshold(local, hw->wiphy->rts_threshold); 1146 drv_set_rts_threshold(local, hw->wiphy->rts_threshold);
1155 1147
@@ -1173,7 +1165,8 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1173 BSS_CHANGED_BASIC_RATES | 1165 BSS_CHANGED_BASIC_RATES |
1174 BSS_CHANGED_BEACON_INT | 1166 BSS_CHANGED_BEACON_INT |
1175 BSS_CHANGED_BSSID | 1167 BSS_CHANGED_BSSID |
1176 BSS_CHANGED_CQM; 1168 BSS_CHANGED_CQM |
1169 BSS_CHANGED_QOS;
1177 1170
1178 switch (sdata->vif.type) { 1171 switch (sdata->vif.type) {
1179 case NL80211_IFTYPE_STATION: 1172 case NL80211_IFTYPE_STATION:
@@ -1202,13 +1195,26 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1202 } 1195 }
1203 } 1196 }
1204 1197
1205 rcu_read_lock(); 1198 /*
1199 * Clear the WLAN_STA_BLOCK_BA flag so new aggregation
1200 * sessions can be established after a resume.
1201 *
1202 * Also tear down aggregation sessions since reconfiguring
1203 * them in a hardware restart scenario is not easily done
1204 * right now, and the hardware will have lost information
1205 * about the sessions, but we and the AP still think they
1206 * are active. This is really a workaround though.
1207 */
1206 if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) { 1208 if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) {
1207 list_for_each_entry_rcu(sta, &local->sta_list, list) { 1209 mutex_lock(&local->sta_mtx);
1210
1211 list_for_each_entry(sta, &local->sta_list, list) {
1208 ieee80211_sta_tear_down_BA_sessions(sta); 1212 ieee80211_sta_tear_down_BA_sessions(sta);
1213 clear_sta_flags(sta, WLAN_STA_BLOCK_BA);
1209 } 1214 }
1215
1216 mutex_unlock(&local->sta_mtx);
1210 } 1217 }
1211 rcu_read_unlock();
1212 1218
1213 /* add back keys */ 1219 /* add back keys */
1214 list_for_each_entry(sdata, &local->interfaces, list) 1220 list_for_each_entry(sdata, &local->interfaces, list)
diff --git a/net/mac80211/wep.c b/net/mac80211/wep.c
index 5f3a4113bda1..9ebc8d8a1f5b 100644
--- a/net/mac80211/wep.c
+++ b/net/mac80211/wep.c
@@ -32,13 +32,16 @@ int ieee80211_wep_init(struct ieee80211_local *local)
32 32
33 local->wep_tx_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, 33 local->wep_tx_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0,
34 CRYPTO_ALG_ASYNC); 34 CRYPTO_ALG_ASYNC);
35 if (IS_ERR(local->wep_tx_tfm)) 35 if (IS_ERR(local->wep_tx_tfm)) {
36 local->wep_rx_tfm = ERR_PTR(-EINVAL);
36 return PTR_ERR(local->wep_tx_tfm); 37 return PTR_ERR(local->wep_tx_tfm);
38 }
37 39
38 local->wep_rx_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, 40 local->wep_rx_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0,
39 CRYPTO_ALG_ASYNC); 41 CRYPTO_ALG_ASYNC);
40 if (IS_ERR(local->wep_rx_tfm)) { 42 if (IS_ERR(local->wep_rx_tfm)) {
41 crypto_free_blkcipher(local->wep_tx_tfm); 43 crypto_free_blkcipher(local->wep_tx_tfm);
44 local->wep_tx_tfm = ERR_PTR(-EINVAL);
42 return PTR_ERR(local->wep_rx_tfm); 45 return PTR_ERR(local->wep_rx_tfm);
43 } 46 }
44 47
@@ -47,8 +50,10 @@ int ieee80211_wep_init(struct ieee80211_local *local)
47 50
48void ieee80211_wep_free(struct ieee80211_local *local) 51void ieee80211_wep_free(struct ieee80211_local *local)
49{ 52{
50 crypto_free_blkcipher(local->wep_tx_tfm); 53 if (!IS_ERR(local->wep_tx_tfm))
51 crypto_free_blkcipher(local->wep_rx_tfm); 54 crypto_free_blkcipher(local->wep_tx_tfm);
55 if (!IS_ERR(local->wep_rx_tfm))
56 crypto_free_blkcipher(local->wep_rx_tfm);
52} 57}
53 58
54static inline bool ieee80211_wep_weak_iv(u32 iv, int keylen) 59static inline bool ieee80211_wep_weak_iv(u32 iv, int keylen)
@@ -122,19 +127,24 @@ static void ieee80211_wep_remove_iv(struct ieee80211_local *local,
122/* Perform WEP encryption using given key. data buffer must have tailroom 127/* Perform WEP encryption using given key. data buffer must have tailroom
123 * for 4-byte ICV. data_len must not include this ICV. Note: this function 128 * for 4-byte ICV. data_len must not include this ICV. Note: this function
124 * does _not_ add IV. data = RC4(data | CRC32(data)) */ 129 * does _not_ add IV. data = RC4(data | CRC32(data)) */
125void ieee80211_wep_encrypt_data(struct crypto_blkcipher *tfm, u8 *rc4key, 130int ieee80211_wep_encrypt_data(struct crypto_blkcipher *tfm, u8 *rc4key,
126 size_t klen, u8 *data, size_t data_len) 131 size_t klen, u8 *data, size_t data_len)
127{ 132{
128 struct blkcipher_desc desc = { .tfm = tfm }; 133 struct blkcipher_desc desc = { .tfm = tfm };
129 struct scatterlist sg; 134 struct scatterlist sg;
130 __le32 icv; 135 __le32 icv;
131 136
137 if (IS_ERR(tfm))
138 return -1;
139
132 icv = cpu_to_le32(~crc32_le(~0, data, data_len)); 140 icv = cpu_to_le32(~crc32_le(~0, data, data_len));
133 put_unaligned(icv, (__le32 *)(data + data_len)); 141 put_unaligned(icv, (__le32 *)(data + data_len));
134 142
135 crypto_blkcipher_setkey(tfm, rc4key, klen); 143 crypto_blkcipher_setkey(tfm, rc4key, klen);
136 sg_init_one(&sg, data, data_len + WEP_ICV_LEN); 144 sg_init_one(&sg, data, data_len + WEP_ICV_LEN);
137 crypto_blkcipher_encrypt(&desc, &sg, &sg, sg.length); 145 crypto_blkcipher_encrypt(&desc, &sg, &sg, sg.length);
146
147 return 0;
138} 148}
139 149
140 150
@@ -168,10 +178,8 @@ int ieee80211_wep_encrypt(struct ieee80211_local *local,
168 /* Add room for ICV */ 178 /* Add room for ICV */
169 skb_put(skb, WEP_ICV_LEN); 179 skb_put(skb, WEP_ICV_LEN);
170 180
171 ieee80211_wep_encrypt_data(local->wep_tx_tfm, rc4key, keylen + 3, 181 return ieee80211_wep_encrypt_data(local->wep_tx_tfm, rc4key, keylen + 3,
172 iv + WEP_IV_LEN, len); 182 iv + WEP_IV_LEN, len);
173
174 return 0;
175} 183}
176 184
177 185
@@ -185,6 +193,9 @@ int ieee80211_wep_decrypt_data(struct crypto_blkcipher *tfm, u8 *rc4key,
185 struct scatterlist sg; 193 struct scatterlist sg;
186 __le32 crc; 194 __le32 crc;
187 195
196 if (IS_ERR(tfm))
197 return -1;
198
188 crypto_blkcipher_setkey(tfm, rc4key, klen); 199 crypto_blkcipher_setkey(tfm, rc4key, klen);
189 sg_init_one(&sg, data, data_len + WEP_ICV_LEN); 200 sg_init_one(&sg, data, data_len + WEP_ICV_LEN);
190 crypto_blkcipher_decrypt(&desc, &sg, &sg, sg.length); 201 crypto_blkcipher_decrypt(&desc, &sg, &sg, sg.length);
diff --git a/net/mac80211/wep.h b/net/mac80211/wep.h
index fe29d7e5759f..58654ee33518 100644
--- a/net/mac80211/wep.h
+++ b/net/mac80211/wep.h
@@ -18,7 +18,7 @@
18 18
19int ieee80211_wep_init(struct ieee80211_local *local); 19int ieee80211_wep_init(struct ieee80211_local *local);
20void ieee80211_wep_free(struct ieee80211_local *local); 20void ieee80211_wep_free(struct ieee80211_local *local);
21void ieee80211_wep_encrypt_data(struct crypto_blkcipher *tfm, u8 *rc4key, 21int ieee80211_wep_encrypt_data(struct crypto_blkcipher *tfm, u8 *rc4key,
22 size_t klen, u8 *data, size_t data_len); 22 size_t klen, u8 *data, size_t data_len);
23int ieee80211_wep_encrypt(struct ieee80211_local *local, 23int ieee80211_wep_encrypt(struct ieee80211_local *local,
24 struct sk_buff *skb, 24 struct sk_buff *skb,
diff --git a/net/mac80211/work.c b/net/mac80211/work.c
index be3d4a698692..81d4ad64184a 100644
--- a/net/mac80211/work.c
+++ b/net/mac80211/work.c
@@ -560,6 +560,22 @@ ieee80211_remain_on_channel_timeout(struct ieee80211_work *wk)
560 return WORK_ACT_TIMEOUT; 560 return WORK_ACT_TIMEOUT;
561} 561}
562 562
563static enum work_action __must_check
564ieee80211_assoc_beacon_wait(struct ieee80211_work *wk)
565{
566 if (wk->started)
567 return WORK_ACT_TIMEOUT;
568
569 /*
570 * Wait up to one beacon interval ...
571 * should this be more if we miss one?
572 */
573 printk(KERN_DEBUG "%s: waiting for beacon from %pM\n",
574 wk->sdata->name, wk->filter_ta);
575 wk->timeout = TU_TO_EXP_TIME(wk->assoc.bss->beacon_interval);
576 return WORK_ACT_NONE;
577}
578
563static void ieee80211_auth_challenge(struct ieee80211_work *wk, 579static void ieee80211_auth_challenge(struct ieee80211_work *wk,
564 struct ieee80211_mgmt *mgmt, 580 struct ieee80211_mgmt *mgmt,
565 size_t len) 581 size_t len)
@@ -709,13 +725,32 @@ ieee80211_rx_mgmt_probe_resp(struct ieee80211_work *wk,
709 return WORK_ACT_DONE; 725 return WORK_ACT_DONE;
710} 726}
711 727
728static enum work_action __must_check
729ieee80211_rx_mgmt_beacon(struct ieee80211_work *wk,
730 struct ieee80211_mgmt *mgmt, size_t len)
731{
732 struct ieee80211_sub_if_data *sdata = wk->sdata;
733 struct ieee80211_local *local = sdata->local;
734
735 ASSERT_WORK_MTX(local);
736
737 if (wk->type != IEEE80211_WORK_ASSOC_BEACON_WAIT)
738 return WORK_ACT_MISMATCH;
739
740 if (len < 24 + 12)
741 return WORK_ACT_NONE;
742
743 printk(KERN_DEBUG "%s: beacon received\n", sdata->name);
744 return WORK_ACT_DONE;
745}
746
712static void ieee80211_work_rx_queued_mgmt(struct ieee80211_local *local, 747static void ieee80211_work_rx_queued_mgmt(struct ieee80211_local *local,
713 struct sk_buff *skb) 748 struct sk_buff *skb)
714{ 749{
715 struct ieee80211_rx_status *rx_status; 750 struct ieee80211_rx_status *rx_status;
716 struct ieee80211_mgmt *mgmt; 751 struct ieee80211_mgmt *mgmt;
717 struct ieee80211_work *wk; 752 struct ieee80211_work *wk;
718 enum work_action rma; 753 enum work_action rma = WORK_ACT_NONE;
719 u16 fc; 754 u16 fc;
720 755
721 rx_status = (struct ieee80211_rx_status *) skb->cb; 756 rx_status = (struct ieee80211_rx_status *) skb->cb;
@@ -731,6 +766,7 @@ static void ieee80211_work_rx_queued_mgmt(struct ieee80211_local *local,
731 case IEEE80211_WORK_DIRECT_PROBE: 766 case IEEE80211_WORK_DIRECT_PROBE:
732 case IEEE80211_WORK_AUTH: 767 case IEEE80211_WORK_AUTH:
733 case IEEE80211_WORK_ASSOC: 768 case IEEE80211_WORK_ASSOC:
769 case IEEE80211_WORK_ASSOC_BEACON_WAIT:
734 bssid = wk->filter_ta; 770 bssid = wk->filter_ta;
735 break; 771 break;
736 default: 772 default:
@@ -745,6 +781,9 @@ static void ieee80211_work_rx_queued_mgmt(struct ieee80211_local *local,
745 continue; 781 continue;
746 782
747 switch (fc & IEEE80211_FCTL_STYPE) { 783 switch (fc & IEEE80211_FCTL_STYPE) {
784 case IEEE80211_STYPE_BEACON:
785 rma = ieee80211_rx_mgmt_beacon(wk, mgmt, skb->len);
786 break;
748 case IEEE80211_STYPE_PROBE_RESP: 787 case IEEE80211_STYPE_PROBE_RESP:
749 rma = ieee80211_rx_mgmt_probe_resp(wk, mgmt, skb->len, 788 rma = ieee80211_rx_mgmt_probe_resp(wk, mgmt, skb->len,
750 rx_status); 789 rx_status);
@@ -840,7 +879,7 @@ static void ieee80211_work_work(struct work_struct *work)
840 879
841 /* 880 /*
842 * ieee80211_queue_work() should have picked up most cases, 881 * ieee80211_queue_work() should have picked up most cases,
843 * here we'll pick the the rest. 882 * here we'll pick the rest.
844 */ 883 */
845 if (WARN(local->suspended, "work scheduled while going to suspend\n")) 884 if (WARN(local->suspended, "work scheduled while going to suspend\n"))
846 return; 885 return;
@@ -916,6 +955,9 @@ static void ieee80211_work_work(struct work_struct *work)
916 case IEEE80211_WORK_REMAIN_ON_CHANNEL: 955 case IEEE80211_WORK_REMAIN_ON_CHANNEL:
917 rma = ieee80211_remain_on_channel_timeout(wk); 956 rma = ieee80211_remain_on_channel_timeout(wk);
918 break; 957 break;
958 case IEEE80211_WORK_ASSOC_BEACON_WAIT:
959 rma = ieee80211_assoc_beacon_wait(wk);
960 break;
919 } 961 }
920 962
921 wk->started = started; 963 wk->started = started;
@@ -1065,6 +1107,7 @@ ieee80211_rx_result ieee80211_work_rx_mgmt(struct ieee80211_sub_if_data *sdata,
1065 case IEEE80211_STYPE_PROBE_RESP: 1107 case IEEE80211_STYPE_PROBE_RESP:
1066 case IEEE80211_STYPE_ASSOC_RESP: 1108 case IEEE80211_STYPE_ASSOC_RESP:
1067 case IEEE80211_STYPE_REASSOC_RESP: 1109 case IEEE80211_STYPE_REASSOC_RESP:
1110 case IEEE80211_STYPE_BEACON:
1068 skb_queue_tail(&local->work_skb_queue, skb); 1111 skb_queue_tail(&local->work_skb_queue, skb);
1069 ieee80211_queue_work(&local->hw, &local->work_work); 1112 ieee80211_queue_work(&local->hw, &local->work_work);
1070 return RX_QUEUED; 1113 return RX_QUEUED;
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index 0adbcc941ac9..8d59d27d887e 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -183,9 +183,8 @@ static int tkip_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
183 skb_put(skb, TKIP_ICV_LEN); 183 skb_put(skb, TKIP_ICV_LEN);
184 184
185 hdr = (struct ieee80211_hdr *) skb->data; 185 hdr = (struct ieee80211_hdr *) skb->data;
186 ieee80211_tkip_encrypt_data(tx->local->wep_tx_tfm, 186 return ieee80211_tkip_encrypt_data(tx->local->wep_tx_tfm,
187 key, pos, len, hdr->addr2); 187 key, pos, len, hdr->addr2);
188 return 0;
189} 188}
190 189
191 190
@@ -436,6 +435,7 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx)
436 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 435 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
437 u8 pn[CCMP_PN_LEN]; 436 u8 pn[CCMP_PN_LEN];
438 int data_len; 437 int data_len;
438 int queue;
439 439
440 hdrlen = ieee80211_hdrlen(hdr->frame_control); 440 hdrlen = ieee80211_hdrlen(hdr->frame_control);
441 441
@@ -453,7 +453,10 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx)
453 453
454 ccmp_hdr2pn(pn, skb->data + hdrlen); 454 ccmp_hdr2pn(pn, skb->data + hdrlen);
455 455
456 if (memcmp(pn, key->u.ccmp.rx_pn[rx->queue], CCMP_PN_LEN) <= 0) { 456 queue = ieee80211_is_mgmt(hdr->frame_control) ?
457 NUM_RX_DATA_QUEUES : rx->queue;
458
459 if (memcmp(pn, key->u.ccmp.rx_pn[queue], CCMP_PN_LEN) <= 0) {
457 key->u.ccmp.replays++; 460 key->u.ccmp.replays++;
458 return RX_DROP_UNUSABLE; 461 return RX_DROP_UNUSABLE;
459 } 462 }
@@ -470,7 +473,7 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx)
470 return RX_DROP_UNUSABLE; 473 return RX_DROP_UNUSABLE;
471 } 474 }
472 475
473 memcpy(key->u.ccmp.rx_pn[rx->queue], pn, CCMP_PN_LEN); 476 memcpy(key->u.ccmp.rx_pn[queue], pn, CCMP_PN_LEN);
474 477
475 /* Remove CCMP header and MIC */ 478 /* Remove CCMP header and MIC */
476 skb_trim(skb, skb->len - CCMP_MIC_LEN); 479 skb_trim(skb, skb->len - CCMP_MIC_LEN);
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 8593a77cfea9..43288259f4a1 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -40,27 +40,6 @@ config NF_CONNTRACK
40 40
41if NF_CONNTRACK 41if NF_CONNTRACK
42 42
43config NF_CT_ACCT
44 bool "Connection tracking flow accounting"
45 depends on NETFILTER_ADVANCED
46 help
47 If this option is enabled, the connection tracking code will
48 keep per-flow packet and byte counters.
49
50 Those counters can be used for flow-based accounting or the
51 `connbytes' match.
52
53 Please note that currently this option only sets a default state.
54 You may change it at boot time with nf_conntrack.acct=0/1 kernel
55 parameter or by loading the nf_conntrack module with acct=0/1.
56
57 You may also disable/enable it on a running system with:
58 sysctl net.netfilter.nf_conntrack_acct=0/1
59
60 This option will be removed in 2.6.29.
61
62 If unsure, say `N'.
63
64config NF_CONNTRACK_MARK 43config NF_CONNTRACK_MARK
65 bool 'Connection mark tracking support' 44 bool 'Connection mark tracking support'
66 depends on NETFILTER_ADVANCED 45 depends on NETFILTER_ADVANCED
@@ -347,6 +326,22 @@ config NETFILTER_XT_CONNMARK
347 326
348comment "Xtables targets" 327comment "Xtables targets"
349 328
329config NETFILTER_XT_TARGET_CHECKSUM
330 tristate "CHECKSUM target support"
331 depends on IP_NF_MANGLE || IP6_NF_MANGLE
332 depends on NETFILTER_ADVANCED
333 ---help---
334 This option adds a `CHECKSUM' target, which can be used in the iptables mangle
335 table.
336
337 You can use this target to compute and fill in the checksum in
338 a packet that lacks a checksum. This is particularly useful,
339 if you need to work around old applications such as dhcp clients,
340 that do not work well with checksum offloads, but don't want to disable
341 checksum offload in your device.
342
343 To compile it as a module, choose M here. If unsure, say N.
344
350config NETFILTER_XT_TARGET_CLASSIFY 345config NETFILTER_XT_TARGET_CLASSIFY
351 tristate '"CLASSIFY" target support' 346 tristate '"CLASSIFY" target support'
352 depends on NETFILTER_ADVANCED 347 depends on NETFILTER_ADVANCED
@@ -424,6 +419,18 @@ config NETFILTER_XT_TARGET_HL
424 since you can easily create immortal packets that loop 419 since you can easily create immortal packets that loop
425 forever on the network. 420 forever on the network.
426 421
422config NETFILTER_XT_TARGET_IDLETIMER
423 tristate "IDLETIMER target support"
424 depends on NETFILTER_ADVANCED
425 help
426
427 This option adds the `IDLETIMER' target. Each matching packet
428 resets the timer associated with label specified when the rule is
429 added. When the timer expires, it triggers a sysfs notification.
430 The remaining time for expiration can be read via sysfs.
431
432 To compile it as a module, choose M here. If unsure, say N.
433
427config NETFILTER_XT_TARGET_LED 434config NETFILTER_XT_TARGET_LED
428 tristate '"LED" target support' 435 tristate '"LED" target support'
429 depends on LEDS_CLASS && LEDS_TRIGGERS 436 depends on LEDS_CLASS && LEDS_TRIGGERS
@@ -503,7 +510,7 @@ config NETFILTER_XT_TARGET_RATEEST
503 To compile it as a module, choose M here. If unsure, say N. 510 To compile it as a module, choose M here. If unsure, say N.
504 511
505config NETFILTER_XT_TARGET_TEE 512config NETFILTER_XT_TARGET_TEE
506 tristate '"TEE" - packet cloning to alternate destiantion' 513 tristate '"TEE" - packet cloning to alternate destination'
507 depends on NETFILTER_ADVANCED 514 depends on NETFILTER_ADVANCED
508 depends on (IPV6 || IPV6=n) 515 depends on (IPV6 || IPV6=n)
509 depends on !NF_CONNTRACK || NF_CONNTRACK 516 depends on !NF_CONNTRACK || NF_CONNTRACK
@@ -618,7 +625,6 @@ config NETFILTER_XT_MATCH_CONNBYTES
618 tristate '"connbytes" per-connection counter match support' 625 tristate '"connbytes" per-connection counter match support'
619 depends on NF_CONNTRACK 626 depends on NF_CONNTRACK
620 depends on NETFILTER_ADVANCED 627 depends on NETFILTER_ADVANCED
621 select NF_CT_ACCT
622 help 628 help
623 This option adds a `connbytes' match, which allows you to match the 629 This option adds a `connbytes' match, which allows you to match the
624 number of bytes and/or packets for each direction within a connection. 630 number of bytes and/or packets for each direction within a connection.
@@ -657,6 +663,15 @@ config NETFILTER_XT_MATCH_CONNTRACK
657 663
658 To compile it as a module, choose M here. If unsure, say N. 664 To compile it as a module, choose M here. If unsure, say N.
659 665
666config NETFILTER_XT_MATCH_CPU
667 tristate '"cpu" match support'
668 depends on NETFILTER_ADVANCED
669 help
670 CPU matching allows you to match packets based on the CPU
671 currently handling the packet.
672
673 To compile it as a module, choose M here. If unsure, say N.
674
660config NETFILTER_XT_MATCH_DCCP 675config NETFILTER_XT_MATCH_DCCP
661 tristate '"dccp" protocol match support' 676 tristate '"dccp" protocol match support'
662 depends on NETFILTER_ADVANCED 677 depends on NETFILTER_ADVANCED
@@ -736,6 +751,16 @@ config NETFILTER_XT_MATCH_IPRANGE
736 751
737 If unsure, say M. 752 If unsure, say M.
738 753
754config NETFILTER_XT_MATCH_IPVS
755 tristate '"ipvs" match support'
756 depends on IP_VS
757 depends on NETFILTER_ADVANCED
758 depends on NF_CONNTRACK
759 help
760 This option allows you to match against IPVS properties of a packet.
761
762 If unsure, say N.
763
739config NETFILTER_XT_MATCH_LENGTH 764config NETFILTER_XT_MATCH_LENGTH
740 tristate '"length" match support' 765 tristate '"length" match support'
741 depends on NETFILTER_ADVANCED 766 depends on NETFILTER_ADVANCED
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index 14e3a8fd8180..441050f31111 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -45,6 +45,7 @@ obj-$(CONFIG_NETFILTER_XT_MARK) += xt_mark.o
45obj-$(CONFIG_NETFILTER_XT_CONNMARK) += xt_connmark.o 45obj-$(CONFIG_NETFILTER_XT_CONNMARK) += xt_connmark.o
46 46
47# targets 47# targets
48obj-$(CONFIG_NETFILTER_XT_TARGET_CHECKSUM) += xt_CHECKSUM.o
48obj-$(CONFIG_NETFILTER_XT_TARGET_CLASSIFY) += xt_CLASSIFY.o 49obj-$(CONFIG_NETFILTER_XT_TARGET_CLASSIFY) += xt_CLASSIFY.o
49obj-$(CONFIG_NETFILTER_XT_TARGET_CONNSECMARK) += xt_CONNSECMARK.o 50obj-$(CONFIG_NETFILTER_XT_TARGET_CONNSECMARK) += xt_CONNSECMARK.o
50obj-$(CONFIG_NETFILTER_XT_TARGET_CT) += xt_CT.o 51obj-$(CONFIG_NETFILTER_XT_TARGET_CT) += xt_CT.o
@@ -61,6 +62,7 @@ obj-$(CONFIG_NETFILTER_XT_TARGET_TCPMSS) += xt_TCPMSS.o
61obj-$(CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP) += xt_TCPOPTSTRIP.o 62obj-$(CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP) += xt_TCPOPTSTRIP.o
62obj-$(CONFIG_NETFILTER_XT_TARGET_TEE) += xt_TEE.o 63obj-$(CONFIG_NETFILTER_XT_TARGET_TEE) += xt_TEE.o
63obj-$(CONFIG_NETFILTER_XT_TARGET_TRACE) += xt_TRACE.o 64obj-$(CONFIG_NETFILTER_XT_TARGET_TRACE) += xt_TRACE.o
65obj-$(CONFIG_NETFILTER_XT_TARGET_IDLETIMER) += xt_IDLETIMER.o
64 66
65# matches 67# matches
66obj-$(CONFIG_NETFILTER_XT_MATCH_CLUSTER) += xt_cluster.o 68obj-$(CONFIG_NETFILTER_XT_MATCH_CLUSTER) += xt_cluster.o
@@ -68,6 +70,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_COMMENT) += xt_comment.o
68obj-$(CONFIG_NETFILTER_XT_MATCH_CONNBYTES) += xt_connbytes.o 70obj-$(CONFIG_NETFILTER_XT_MATCH_CONNBYTES) += xt_connbytes.o
69obj-$(CONFIG_NETFILTER_XT_MATCH_CONNLIMIT) += xt_connlimit.o 71obj-$(CONFIG_NETFILTER_XT_MATCH_CONNLIMIT) += xt_connlimit.o
70obj-$(CONFIG_NETFILTER_XT_MATCH_CONNTRACK) += xt_conntrack.o 72obj-$(CONFIG_NETFILTER_XT_MATCH_CONNTRACK) += xt_conntrack.o
73obj-$(CONFIG_NETFILTER_XT_MATCH_CPU) += xt_cpu.o
71obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o 74obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
72obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o 75obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
73obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o 76obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
@@ -75,6 +78,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
75obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o 78obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
76obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o 79obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
77obj-$(CONFIG_NETFILTER_XT_MATCH_IPRANGE) += xt_iprange.o 80obj-$(CONFIG_NETFILTER_XT_MATCH_IPRANGE) += xt_iprange.o
81obj-$(CONFIG_NETFILTER_XT_MATCH_IPVS) += xt_ipvs.o
78obj-$(CONFIG_NETFILTER_XT_MATCH_LENGTH) += xt_length.o 82obj-$(CONFIG_NETFILTER_XT_MATCH_LENGTH) += xt_length.o
79obj-$(CONFIG_NETFILTER_XT_MATCH_LIMIT) += xt_limit.o 83obj-$(CONFIG_NETFILTER_XT_MATCH_LIMIT) += xt_limit.o
80obj-$(CONFIG_NETFILTER_XT_MATCH_MAC) += xt_mac.o 84obj-$(CONFIG_NETFILTER_XT_MATCH_MAC) += xt_mac.o
diff --git a/net/netfilter/ipvs/Kconfig b/net/netfilter/ipvs/Kconfig
index 712ccad13344..46a77d5c3887 100644
--- a/net/netfilter/ipvs/Kconfig
+++ b/net/netfilter/ipvs/Kconfig
@@ -3,7 +3,7 @@
3# 3#
4menuconfig IP_VS 4menuconfig IP_VS
5 tristate "IP virtual server support" 5 tristate "IP virtual server support"
6 depends on NET && INET && NETFILTER 6 depends on NET && INET && NETFILTER && NF_CONNTRACK
7 ---help--- 7 ---help---
8 IP Virtual Server support will let you build a high-performance 8 IP Virtual Server support will let you build a high-performance
9 virtual server based on cluster of two or more real servers. This 9 virtual server based on cluster of two or more real servers. This
@@ -26,7 +26,7 @@ if IP_VS
26 26
27config IP_VS_IPV6 27config IP_VS_IPV6
28 bool "IPv6 support for IPVS" 28 bool "IPv6 support for IPVS"
29 depends on EXPERIMENTAL && (IPV6 = y || IP_VS = IPV6) 29 depends on IPV6 = y || IP_VS = IPV6
30 ---help--- 30 ---help---
31 Add IPv6 support to IPVS. This is incomplete and might be dangerous. 31 Add IPv6 support to IPVS. This is incomplete and might be dangerous.
32 32
@@ -87,19 +87,16 @@ config IP_VS_PROTO_UDP
87 protocol. Say Y if unsure. 87 protocol. Say Y if unsure.
88 88
89config IP_VS_PROTO_AH_ESP 89config IP_VS_PROTO_AH_ESP
90 bool 90 def_bool IP_VS_PROTO_ESP || IP_VS_PROTO_AH
91 depends on UNDEFINED
92 91
93config IP_VS_PROTO_ESP 92config IP_VS_PROTO_ESP
94 bool "ESP load balancing support" 93 bool "ESP load balancing support"
95 select IP_VS_PROTO_AH_ESP
96 ---help--- 94 ---help---
97 This option enables support for load balancing ESP (Encapsulation 95 This option enables support for load balancing ESP (Encapsulation
98 Security Payload) transport protocol. Say Y if unsure. 96 Security Payload) transport protocol. Say Y if unsure.
99 97
100config IP_VS_PROTO_AH 98config IP_VS_PROTO_AH
101 bool "AH load balancing support" 99 bool "AH load balancing support"
102 select IP_VS_PROTO_AH_ESP
103 ---help--- 100 ---help---
104 This option enables support for load balancing AH (Authentication 101 This option enables support for load balancing AH (Authentication
105 Header) transport protocol. Say Y if unsure. 102 Header) transport protocol. Say Y if unsure.
@@ -238,7 +235,7 @@ comment 'IPVS application helper'
238 235
239config IP_VS_FTP 236config IP_VS_FTP
240 tristate "FTP protocol helper" 237 tristate "FTP protocol helper"
241 depends on IP_VS_PROTO_TCP 238 depends on IP_VS_PROTO_TCP && NF_NAT
242 ---help--- 239 ---help---
243 FTP is a protocol that transfers IP address and/or port number in 240 FTP is a protocol that transfers IP address and/or port number in
244 the payload. In the virtual server via Network Address Translation, 241 the payload. In the virtual server via Network Address Translation,
diff --git a/net/netfilter/ipvs/ip_vs_app.c b/net/netfilter/ipvs/ip_vs_app.c
index 1cb0e834f8ff..e76f87f4aca8 100644
--- a/net/netfilter/ipvs/ip_vs_app.c
+++ b/net/netfilter/ipvs/ip_vs_app.c
@@ -569,49 +569,6 @@ static const struct file_operations ip_vs_app_fops = {
569}; 569};
570#endif 570#endif
571 571
572
573/*
574 * Replace a segment of data with a new segment
575 */
576int ip_vs_skb_replace(struct sk_buff *skb, gfp_t pri,
577 char *o_buf, int o_len, char *n_buf, int n_len)
578{
579 int diff;
580 int o_offset;
581 int o_left;
582
583 EnterFunction(9);
584
585 diff = n_len - o_len;
586 o_offset = o_buf - (char *)skb->data;
587 /* The length of left data after o_buf+o_len in the skb data */
588 o_left = skb->len - (o_offset + o_len);
589
590 if (diff <= 0) {
591 memmove(o_buf + n_len, o_buf + o_len, o_left);
592 memcpy(o_buf, n_buf, n_len);
593 skb_trim(skb, skb->len + diff);
594 } else if (diff <= skb_tailroom(skb)) {
595 skb_put(skb, diff);
596 memmove(o_buf + n_len, o_buf + o_len, o_left);
597 memcpy(o_buf, n_buf, n_len);
598 } else {
599 if (pskb_expand_head(skb, skb_headroom(skb), diff, pri))
600 return -ENOMEM;
601 skb_put(skb, diff);
602 memmove(skb->data + o_offset + n_len,
603 skb->data + o_offset + o_len, o_left);
604 skb_copy_to_linear_data_offset(skb, o_offset, n_buf, n_len);
605 }
606
607 /* must update the iph total length here */
608 ip_hdr(skb)->tot_len = htons(skb->len);
609
610 LeaveFunction(9);
611 return 0;
612}
613
614
615int __init ip_vs_app_init(void) 572int __init ip_vs_app_init(void)
616{ 573{
617 /* we will replace it with proc_net_ipvs_create() soon */ 574 /* we will replace it with proc_net_ipvs_create() soon */
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index d8f7e8ef67b4..b71c69a2db13 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -158,10 +158,14 @@ static inline int ip_vs_conn_hash(struct ip_vs_conn *cp)
158 unsigned hash; 158 unsigned hash;
159 int ret; 159 int ret;
160 160
161 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
162 return 0;
163
161 /* Hash by protocol, client address and port */ 164 /* Hash by protocol, client address and port */
162 hash = ip_vs_conn_hashkey(cp->af, cp->protocol, &cp->caddr, cp->cport); 165 hash = ip_vs_conn_hashkey(cp->af, cp->protocol, &cp->caddr, cp->cport);
163 166
164 ct_write_lock(hash); 167 ct_write_lock(hash);
168 spin_lock(&cp->lock);
165 169
166 if (!(cp->flags & IP_VS_CONN_F_HASHED)) { 170 if (!(cp->flags & IP_VS_CONN_F_HASHED)) {
167 list_add(&cp->c_list, &ip_vs_conn_tab[hash]); 171 list_add(&cp->c_list, &ip_vs_conn_tab[hash]);
@@ -174,6 +178,7 @@ static inline int ip_vs_conn_hash(struct ip_vs_conn *cp)
174 ret = 0; 178 ret = 0;
175 } 179 }
176 180
181 spin_unlock(&cp->lock);
177 ct_write_unlock(hash); 182 ct_write_unlock(hash);
178 183
179 return ret; 184 return ret;
@@ -193,6 +198,7 @@ static inline int ip_vs_conn_unhash(struct ip_vs_conn *cp)
193 hash = ip_vs_conn_hashkey(cp->af, cp->protocol, &cp->caddr, cp->cport); 198 hash = ip_vs_conn_hashkey(cp->af, cp->protocol, &cp->caddr, cp->cport);
194 199
195 ct_write_lock(hash); 200 ct_write_lock(hash);
201 spin_lock(&cp->lock);
196 202
197 if (cp->flags & IP_VS_CONN_F_HASHED) { 203 if (cp->flags & IP_VS_CONN_F_HASHED) {
198 list_del(&cp->c_list); 204 list_del(&cp->c_list);
@@ -202,6 +208,7 @@ static inline int ip_vs_conn_unhash(struct ip_vs_conn *cp)
202 } else 208 } else
203 ret = 0; 209 ret = 0;
204 210
211 spin_unlock(&cp->lock);
205 ct_write_unlock(hash); 212 ct_write_unlock(hash);
206 213
207 return ret; 214 return ret;
@@ -264,6 +271,29 @@ struct ip_vs_conn *ip_vs_conn_in_get
264 return cp; 271 return cp;
265} 272}
266 273
274struct ip_vs_conn *
275ip_vs_conn_in_get_proto(int af, const struct sk_buff *skb,
276 struct ip_vs_protocol *pp,
277 const struct ip_vs_iphdr *iph,
278 unsigned int proto_off, int inverse)
279{
280 __be16 _ports[2], *pptr;
281
282 pptr = skb_header_pointer(skb, proto_off, sizeof(_ports), _ports);
283 if (pptr == NULL)
284 return NULL;
285
286 if (likely(!inverse))
287 return ip_vs_conn_in_get(af, iph->protocol,
288 &iph->saddr, pptr[0],
289 &iph->daddr, pptr[1]);
290 else
291 return ip_vs_conn_in_get(af, iph->protocol,
292 &iph->daddr, pptr[1],
293 &iph->saddr, pptr[0]);
294}
295EXPORT_SYMBOL_GPL(ip_vs_conn_in_get_proto);
296
267/* Get reference to connection template */ 297/* Get reference to connection template */
268struct ip_vs_conn *ip_vs_ct_in_get 298struct ip_vs_conn *ip_vs_ct_in_get
269(int af, int protocol, const union nf_inet_addr *s_addr, __be16 s_port, 299(int af, int protocol, const union nf_inet_addr *s_addr, __be16 s_port,
@@ -349,14 +379,37 @@ struct ip_vs_conn *ip_vs_conn_out_get
349 return ret; 379 return ret;
350} 380}
351 381
382struct ip_vs_conn *
383ip_vs_conn_out_get_proto(int af, const struct sk_buff *skb,
384 struct ip_vs_protocol *pp,
385 const struct ip_vs_iphdr *iph,
386 unsigned int proto_off, int inverse)
387{
388 __be16 _ports[2], *pptr;
389
390 pptr = skb_header_pointer(skb, proto_off, sizeof(_ports), _ports);
391 if (pptr == NULL)
392 return NULL;
393
394 if (likely(!inverse))
395 return ip_vs_conn_out_get(af, iph->protocol,
396 &iph->saddr, pptr[0],
397 &iph->daddr, pptr[1]);
398 else
399 return ip_vs_conn_out_get(af, iph->protocol,
400 &iph->daddr, pptr[1],
401 &iph->saddr, pptr[0]);
402}
403EXPORT_SYMBOL_GPL(ip_vs_conn_out_get_proto);
352 404
353/* 405/*
354 * Put back the conn and restart its timer with its timeout 406 * Put back the conn and restart its timer with its timeout
355 */ 407 */
356void ip_vs_conn_put(struct ip_vs_conn *cp) 408void ip_vs_conn_put(struct ip_vs_conn *cp)
357{ 409{
358 /* reset it expire in its timeout */ 410 unsigned long t = (cp->flags & IP_VS_CONN_F_ONE_PACKET) ?
359 mod_timer(&cp->timer, jiffies+cp->timeout); 411 0 : cp->timeout;
412 mod_timer(&cp->timer, jiffies+t);
360 413
361 __ip_vs_conn_put(cp); 414 __ip_vs_conn_put(cp);
362} 415}
@@ -649,7 +702,7 @@ static void ip_vs_conn_expire(unsigned long data)
649 /* 702 /*
650 * unhash it if it is hashed in the conn table 703 * unhash it if it is hashed in the conn table
651 */ 704 */
652 if (!ip_vs_conn_unhash(cp)) 705 if (!ip_vs_conn_unhash(cp) && !(cp->flags & IP_VS_CONN_F_ONE_PACKET))
653 goto expire_later; 706 goto expire_later;
654 707
655 /* 708 /*
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 1cd6e3fd058b..4f8ddba48011 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -54,7 +54,6 @@
54 54
55EXPORT_SYMBOL(register_ip_vs_scheduler); 55EXPORT_SYMBOL(register_ip_vs_scheduler);
56EXPORT_SYMBOL(unregister_ip_vs_scheduler); 56EXPORT_SYMBOL(unregister_ip_vs_scheduler);
57EXPORT_SYMBOL(ip_vs_skb_replace);
58EXPORT_SYMBOL(ip_vs_proto_name); 57EXPORT_SYMBOL(ip_vs_proto_name);
59EXPORT_SYMBOL(ip_vs_conn_new); 58EXPORT_SYMBOL(ip_vs_conn_new);
60EXPORT_SYMBOL(ip_vs_conn_in_get); 59EXPORT_SYMBOL(ip_vs_conn_in_get);
@@ -194,6 +193,7 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
194 struct ip_vs_dest *dest; 193 struct ip_vs_dest *dest;
195 struct ip_vs_conn *ct; 194 struct ip_vs_conn *ct;
196 __be16 dport; /* destination port to forward */ 195 __be16 dport; /* destination port to forward */
196 __be16 flags;
197 union nf_inet_addr snet; /* source network of the client, 197 union nf_inet_addr snet; /* source network of the client,
198 after masking */ 198 after masking */
199 199
@@ -340,6 +340,10 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
340 dport = ports[1]; 340 dport = ports[1];
341 } 341 }
342 342
343 flags = (svc->flags & IP_VS_SVC_F_ONEPACKET
344 && iph.protocol == IPPROTO_UDP)?
345 IP_VS_CONN_F_ONE_PACKET : 0;
346
343 /* 347 /*
344 * Create a new connection according to the template 348 * Create a new connection according to the template
345 */ 349 */
@@ -347,7 +351,7 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
347 &iph.saddr, ports[0], 351 &iph.saddr, ports[0],
348 &iph.daddr, ports[1], 352 &iph.daddr, ports[1],
349 &dest->addr, dport, 353 &dest->addr, dport,
350 0, 354 flags,
351 dest); 355 dest);
352 if (cp == NULL) { 356 if (cp == NULL) {
353 ip_vs_conn_put(ct); 357 ip_vs_conn_put(ct);
@@ -377,7 +381,7 @@ ip_vs_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
377 struct ip_vs_conn *cp = NULL; 381 struct ip_vs_conn *cp = NULL;
378 struct ip_vs_iphdr iph; 382 struct ip_vs_iphdr iph;
379 struct ip_vs_dest *dest; 383 struct ip_vs_dest *dest;
380 __be16 _ports[2], *pptr; 384 __be16 _ports[2], *pptr, flags;
381 385
382 ip_vs_fill_iphdr(svc->af, skb_network_header(skb), &iph); 386 ip_vs_fill_iphdr(svc->af, skb_network_header(skb), &iph);
383 pptr = skb_header_pointer(skb, iph.len, sizeof(_ports), _ports); 387 pptr = skb_header_pointer(skb, iph.len, sizeof(_ports), _ports);
@@ -407,6 +411,10 @@ ip_vs_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
407 return NULL; 411 return NULL;
408 } 412 }
409 413
414 flags = (svc->flags & IP_VS_SVC_F_ONEPACKET
415 && iph.protocol == IPPROTO_UDP)?
416 IP_VS_CONN_F_ONE_PACKET : 0;
417
410 /* 418 /*
411 * Create a connection entry. 419 * Create a connection entry.
412 */ 420 */
@@ -414,7 +422,7 @@ ip_vs_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
414 &iph.saddr, pptr[0], 422 &iph.saddr, pptr[0],
415 &iph.daddr, pptr[1], 423 &iph.daddr, pptr[1],
416 &dest->addr, dest->port ? dest->port : pptr[1], 424 &dest->addr, dest->port ? dest->port : pptr[1],
417 0, 425 flags,
418 dest); 426 dest);
419 if (cp == NULL) 427 if (cp == NULL)
420 return NULL; 428 return NULL;
@@ -464,6 +472,9 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
464 if (sysctl_ip_vs_cache_bypass && svc->fwmark && unicast) { 472 if (sysctl_ip_vs_cache_bypass && svc->fwmark && unicast) {
465 int ret, cs; 473 int ret, cs;
466 struct ip_vs_conn *cp; 474 struct ip_vs_conn *cp;
475 __u16 flags = (svc->flags & IP_VS_SVC_F_ONEPACKET &&
476 iph.protocol == IPPROTO_UDP)?
477 IP_VS_CONN_F_ONE_PACKET : 0;
467 union nf_inet_addr daddr = { .all = { 0, 0, 0, 0 } }; 478 union nf_inet_addr daddr = { .all = { 0, 0, 0, 0 } };
468 479
469 ip_vs_service_put(svc); 480 ip_vs_service_put(svc);
@@ -474,7 +485,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
474 &iph.saddr, pptr[0], 485 &iph.saddr, pptr[0],
475 &iph.daddr, pptr[1], 486 &iph.daddr, pptr[1],
476 &daddr, 0, 487 &daddr, 0,
477 IP_VS_CONN_F_BYPASS, 488 IP_VS_CONN_F_BYPASS | flags,
478 NULL); 489 NULL);
479 if (cp == NULL) 490 if (cp == NULL)
480 return NF_DROP; 491 return NF_DROP;
@@ -524,26 +535,6 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
524 return NF_DROP; 535 return NF_DROP;
525} 536}
526 537
527
528/*
529 * It is hooked before NF_IP_PRI_NAT_SRC at the NF_INET_POST_ROUTING
530 * chain, and is used for VS/NAT.
531 * It detects packets for VS/NAT connections and sends the packets
532 * immediately. This can avoid that iptable_nat mangles the packets
533 * for VS/NAT.
534 */
535static unsigned int ip_vs_post_routing(unsigned int hooknum,
536 struct sk_buff *skb,
537 const struct net_device *in,
538 const struct net_device *out,
539 int (*okfn)(struct sk_buff *))
540{
541 if (!skb->ipvs_property)
542 return NF_ACCEPT;
543 /* The packet was sent from IPVS, exit this chain */
544 return NF_STOP;
545}
546
547__sum16 ip_vs_checksum_complete(struct sk_buff *skb, int offset) 538__sum16 ip_vs_checksum_complete(struct sk_buff *skb, int offset)
548{ 539{
549 return csum_fold(skb_checksum(skb, offset, skb->len - offset, 0)); 540 return csum_fold(skb_checksum(skb, offset, skb->len - offset, 0));
@@ -1487,14 +1478,6 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
1487 .hooknum = NF_INET_FORWARD, 1478 .hooknum = NF_INET_FORWARD,
1488 .priority = 99, 1479 .priority = 99,
1489 }, 1480 },
1490 /* Before the netfilter connection tracking, exit from POST_ROUTING */
1491 {
1492 .hook = ip_vs_post_routing,
1493 .owner = THIS_MODULE,
1494 .pf = PF_INET,
1495 .hooknum = NF_INET_POST_ROUTING,
1496 .priority = NF_IP_PRI_NAT_SRC-1,
1497 },
1498#ifdef CONFIG_IP_VS_IPV6 1481#ifdef CONFIG_IP_VS_IPV6
1499 /* After packet filtering, forward packet through VS/DR, VS/TUN, 1482 /* After packet filtering, forward packet through VS/DR, VS/TUN,
1500 * or VS/NAT(change destination), so that filtering rules can be 1483 * or VS/NAT(change destination), so that filtering rules can be
@@ -1523,14 +1506,6 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
1523 .hooknum = NF_INET_FORWARD, 1506 .hooknum = NF_INET_FORWARD,
1524 .priority = 99, 1507 .priority = 99,
1525 }, 1508 },
1526 /* Before the netfilter connection tracking, exit from POST_ROUTING */
1527 {
1528 .hook = ip_vs_post_routing,
1529 .owner = THIS_MODULE,
1530 .pf = PF_INET6,
1531 .hooknum = NF_INET_POST_ROUTING,
1532 .priority = NF_IP6_PRI_NAT_SRC-1,
1533 },
1534#endif 1509#endif
1535}; 1510};
1536 1511
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 36dc1d88c2fa..0f0c079c422a 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -1864,14 +1864,16 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
1864 svc->scheduler->name); 1864 svc->scheduler->name);
1865 else 1865 else
1866#endif 1866#endif
1867 seq_printf(seq, "%s %08X:%04X %s ", 1867 seq_printf(seq, "%s %08X:%04X %s %s ",
1868 ip_vs_proto_name(svc->protocol), 1868 ip_vs_proto_name(svc->protocol),
1869 ntohl(svc->addr.ip), 1869 ntohl(svc->addr.ip),
1870 ntohs(svc->port), 1870 ntohs(svc->port),
1871 svc->scheduler->name); 1871 svc->scheduler->name,
1872 (svc->flags & IP_VS_SVC_F_ONEPACKET)?"ops ":"");
1872 } else { 1873 } else {
1873 seq_printf(seq, "FWM %08X %s ", 1874 seq_printf(seq, "FWM %08X %s %s",
1874 svc->fwmark, svc->scheduler->name); 1875 svc->fwmark, svc->scheduler->name,
1876 (svc->flags & IP_VS_SVC_F_ONEPACKET)?"ops ":"");
1875 } 1877 }
1876 1878
1877 if (svc->flags & IP_VS_SVC_F_PERSISTENT) 1879 if (svc->flags & IP_VS_SVC_F_PERSISTENT)
diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c
index 2ae747a376a5..f228a17ec649 100644
--- a/net/netfilter/ipvs/ip_vs_ftp.c
+++ b/net/netfilter/ipvs/ip_vs_ftp.c
@@ -20,6 +20,17 @@
20 * 20 *
21 * Author: Wouter Gadeyne 21 * Author: Wouter Gadeyne
22 * 22 *
23 *
24 * Code for ip_vs_expect_related and ip_vs_expect_callback is taken from
25 * http://www.ssi.bg/~ja/nfct/:
26 *
27 * ip_vs_nfct.c: Netfilter connection tracking support for IPVS
28 *
29 * Portions Copyright (C) 2001-2002
30 * Antefacto Ltd, 181 Parnell St, Dublin 1, Ireland.
31 *
32 * Portions Copyright (C) 2003-2008
33 * Julian Anastasov
23 */ 34 */
24 35
25#define KMSG_COMPONENT "IPVS" 36#define KMSG_COMPONENT "IPVS"
@@ -32,6 +43,9 @@
32#include <linux/in.h> 43#include <linux/in.h>
33#include <linux/ip.h> 44#include <linux/ip.h>
34#include <linux/netfilter.h> 45#include <linux/netfilter.h>
46#include <net/netfilter/nf_conntrack.h>
47#include <net/netfilter/nf_conntrack_expect.h>
48#include <net/netfilter/nf_nat_helper.h>
35#include <linux/gfp.h> 49#include <linux/gfp.h>
36#include <net/protocol.h> 50#include <net/protocol.h>
37#include <net/tcp.h> 51#include <net/tcp.h>
@@ -43,6 +57,16 @@
43#define SERVER_STRING "227 Entering Passive Mode (" 57#define SERVER_STRING "227 Entering Passive Mode ("
44#define CLIENT_STRING "PORT " 58#define CLIENT_STRING "PORT "
45 59
60#define FMT_TUPLE "%pI4:%u->%pI4:%u/%u"
61#define ARG_TUPLE(T) &(T)->src.u3.ip, ntohs((T)->src.u.all), \
62 &(T)->dst.u3.ip, ntohs((T)->dst.u.all), \
63 (T)->dst.protonum
64
65#define FMT_CONN "%pI4:%u->%pI4:%u->%pI4:%u/%u:%u"
66#define ARG_CONN(C) &((C)->caddr.ip), ntohs((C)->cport), \
67 &((C)->vaddr.ip), ntohs((C)->vport), \
68 &((C)->daddr.ip), ntohs((C)->dport), \
69 (C)->protocol, (C)->state
46 70
47/* 71/*
48 * List of ports (up to IP_VS_APP_MAX_PORTS) to be handled by helper 72 * List of ports (up to IP_VS_APP_MAX_PORTS) to be handled by helper
@@ -123,6 +147,119 @@ static int ip_vs_ftp_get_addrport(char *data, char *data_limit,
123 return 1; 147 return 1;
124} 148}
125 149
150/*
151 * Called from init_conntrack() as expectfn handler.
152 */
153static void
154ip_vs_expect_callback(struct nf_conn *ct,
155 struct nf_conntrack_expect *exp)
156{
157 struct nf_conntrack_tuple *orig, new_reply;
158 struct ip_vs_conn *cp;
159
160 if (exp->tuple.src.l3num != PF_INET)
161 return;
162
163 /*
164 * We assume that no NF locks are held before this callback.
165 * ip_vs_conn_out_get and ip_vs_conn_in_get should match their
166 * expectations even if they use wildcard values, now we provide the
167 * actual values from the newly created original conntrack direction.
168 * The conntrack is confirmed when packet reaches IPVS hooks.
169 */
170
171 /* RS->CLIENT */
172 orig = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
173 cp = ip_vs_conn_out_get(exp->tuple.src.l3num, orig->dst.protonum,
174 &orig->src.u3, orig->src.u.tcp.port,
175 &orig->dst.u3, orig->dst.u.tcp.port);
176 if (cp) {
177 /* Change reply CLIENT->RS to CLIENT->VS */
178 new_reply = ct->tuplehash[IP_CT_DIR_REPLY].tuple;
179 IP_VS_DBG(7, "%s(): ct=%p, status=0x%lX, tuples=" FMT_TUPLE ", "
180 FMT_TUPLE ", found inout cp=" FMT_CONN "\n",
181 __func__, ct, ct->status,
182 ARG_TUPLE(orig), ARG_TUPLE(&new_reply),
183 ARG_CONN(cp));
184 new_reply.dst.u3 = cp->vaddr;
185 new_reply.dst.u.tcp.port = cp->vport;
186 IP_VS_DBG(7, "%s(): ct=%p, new tuples=" FMT_TUPLE ", " FMT_TUPLE
187 ", inout cp=" FMT_CONN "\n",
188 __func__, ct,
189 ARG_TUPLE(orig), ARG_TUPLE(&new_reply),
190 ARG_CONN(cp));
191 goto alter;
192 }
193
194 /* CLIENT->VS */
195 cp = ip_vs_conn_in_get(exp->tuple.src.l3num, orig->dst.protonum,
196 &orig->src.u3, orig->src.u.tcp.port,
197 &orig->dst.u3, orig->dst.u.tcp.port);
198 if (cp) {
199 /* Change reply VS->CLIENT to RS->CLIENT */
200 new_reply = ct->tuplehash[IP_CT_DIR_REPLY].tuple;
201 IP_VS_DBG(7, "%s(): ct=%p, status=0x%lX, tuples=" FMT_TUPLE ", "
202 FMT_TUPLE ", found outin cp=" FMT_CONN "\n",
203 __func__, ct, ct->status,
204 ARG_TUPLE(orig), ARG_TUPLE(&new_reply),
205 ARG_CONN(cp));
206 new_reply.src.u3 = cp->daddr;
207 new_reply.src.u.tcp.port = cp->dport;
208 IP_VS_DBG(7, "%s(): ct=%p, new tuples=" FMT_TUPLE ", "
209 FMT_TUPLE ", outin cp=" FMT_CONN "\n",
210 __func__, ct,
211 ARG_TUPLE(orig), ARG_TUPLE(&new_reply),
212 ARG_CONN(cp));
213 goto alter;
214 }
215
216 IP_VS_DBG(7, "%s(): ct=%p, status=0x%lX, tuple=" FMT_TUPLE
217 " - unknown expect\n",
218 __func__, ct, ct->status, ARG_TUPLE(orig));
219 return;
220
221alter:
222 /* Never alter conntrack for non-NAT conns */
223 if (IP_VS_FWD_METHOD(cp) == IP_VS_CONN_F_MASQ)
224 nf_conntrack_alter_reply(ct, &new_reply);
225 ip_vs_conn_put(cp);
226 return;
227}
228
229/*
230 * Create NF conntrack expectation with wildcard (optional) source port.
231 * Then the default callback function will alter the reply and will confirm
232 * the conntrack entry when the first packet comes.
233 */
234static void
235ip_vs_expect_related(struct sk_buff *skb, struct nf_conn *ct,
236 struct ip_vs_conn *cp, u_int8_t proto,
237 const __be16 *port, int from_rs)
238{
239 struct nf_conntrack_expect *exp;
240
241 BUG_ON(!ct || ct == &nf_conntrack_untracked);
242
243 exp = nf_ct_expect_alloc(ct);
244 if (!exp)
245 return;
246
247 if (from_rs)
248 nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT,
249 nf_ct_l3num(ct), &cp->daddr, &cp->caddr,
250 proto, port, &cp->cport);
251 else
252 nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT,
253 nf_ct_l3num(ct), &cp->caddr, &cp->vaddr,
254 proto, port, &cp->vport);
255
256 exp->expectfn = ip_vs_expect_callback;
257
258 IP_VS_DBG(7, "%s(): ct=%p, expect tuple=" FMT_TUPLE "\n",
259 __func__, ct, ARG_TUPLE(&exp->tuple));
260 nf_ct_expect_related(exp);
261 nf_ct_expect_put(exp);
262}
126 263
127/* 264/*
128 * Look at outgoing ftp packets to catch the response to a PASV command 265 * Look at outgoing ftp packets to catch the response to a PASV command
@@ -149,7 +286,9 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
149 struct ip_vs_conn *n_cp; 286 struct ip_vs_conn *n_cp;
150 char buf[24]; /* xxx.xxx.xxx.xxx,ppp,ppp\000 */ 287 char buf[24]; /* xxx.xxx.xxx.xxx,ppp,ppp\000 */
151 unsigned buf_len; 288 unsigned buf_len;
152 int ret; 289 int ret = 0;
290 enum ip_conntrack_info ctinfo;
291 struct nf_conn *ct;
153 292
154#ifdef CONFIG_IP_VS_IPV6 293#ifdef CONFIG_IP_VS_IPV6
155 /* This application helper doesn't work with IPv6 yet, 294 /* This application helper doesn't work with IPv6 yet,
@@ -219,19 +358,26 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
219 358
220 buf_len = strlen(buf); 359 buf_len = strlen(buf);
221 360
361 ct = nf_ct_get(skb, &ctinfo);
362 if (ct && !nf_ct_is_untracked(ct)) {
363 /* If mangling fails this function will return 0
364 * which will cause the packet to be dropped.
365 * Mangling can only fail under memory pressure,
366 * hopefully it will succeed on the retransmitted
367 * packet.
368 */
369 ret = nf_nat_mangle_tcp_packet(skb, ct, ctinfo,
370 start-data, end-start,
371 buf, buf_len);
372 if (ret)
373 ip_vs_expect_related(skb, ct, n_cp,
374 IPPROTO_TCP, NULL, 0);
375 }
376
222 /* 377 /*
223 * Calculate required delta-offset to keep TCP happy 378 * Not setting 'diff' is intentional, otherwise the sequence
379 * would be adjusted twice.
224 */ 380 */
225 *diff = buf_len - (end-start);
226
227 if (*diff == 0) {
228 /* simply replace it with new passive address */
229 memcpy(start, buf, buf_len);
230 ret = 1;
231 } else {
232 ret = !ip_vs_skb_replace(skb, GFP_ATOMIC, start,
233 end-start, buf, buf_len);
234 }
235 381
236 cp->app_data = NULL; 382 cp->app_data = NULL;
237 ip_vs_tcp_conn_listen(n_cp); 383 ip_vs_tcp_conn_listen(n_cp);
@@ -263,6 +409,7 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp,
263 union nf_inet_addr to; 409 union nf_inet_addr to;
264 __be16 port; 410 __be16 port;
265 struct ip_vs_conn *n_cp; 411 struct ip_vs_conn *n_cp;
412 struct nf_conn *ct;
266 413
267#ifdef CONFIG_IP_VS_IPV6 414#ifdef CONFIG_IP_VS_IPV6
268 /* This application helper doesn't work with IPv6 yet, 415 /* This application helper doesn't work with IPv6 yet,
@@ -349,6 +496,11 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp,
349 ip_vs_control_add(n_cp, cp); 496 ip_vs_control_add(n_cp, cp);
350 } 497 }
351 498
499 ct = (struct nf_conn *)skb->nfct;
500 if (ct && ct != &nf_conntrack_untracked)
501 ip_vs_expect_related(skb, ct, n_cp,
502 IPPROTO_TCP, &n_cp->dport, 1);
503
352 /* 504 /*
353 * Move tunnel to listen state 505 * Move tunnel to listen state
354 */ 506 */
diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
index 94a45213faa6..9323f8944199 100644
--- a/net/netfilter/ipvs/ip_vs_lblc.c
+++ b/net/netfilter/ipvs/ip_vs_lblc.c
@@ -11,7 +11,7 @@
11 * Changes: 11 * Changes:
12 * Martin Hamilton : fixed the terrible locking bugs 12 * Martin Hamilton : fixed the terrible locking bugs
13 * *lock(tbl->lock) ==> *lock(&tbl->lock) 13 * *lock(tbl->lock) ==> *lock(&tbl->lock)
14 * Wensong Zhang : fixed the uninitilized tbl->lock bug 14 * Wensong Zhang : fixed the uninitialized tbl->lock bug
15 * Wensong Zhang : added doing full expiration check to 15 * Wensong Zhang : added doing full expiration check to
16 * collect stale entries of 24+ hours when 16 * collect stale entries of 24+ hours when
17 * no partial expire check in a half hour 17 * no partial expire check in a half hour
diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
index 535dc2b419d8..dbeed8ea421a 100644
--- a/net/netfilter/ipvs/ip_vs_lblcr.c
+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
@@ -386,7 +386,7 @@ ip_vs_lblcr_new(struct ip_vs_lblcr_table *tbl, const union nf_inet_addr *daddr,
386 ip_vs_addr_copy(dest->af, &en->addr, daddr); 386 ip_vs_addr_copy(dest->af, &en->addr, daddr);
387 en->lastuse = jiffies; 387 en->lastuse = jiffies;
388 388
389 /* initilize its dest set */ 389 /* initialize its dest set */
390 atomic_set(&(en->set.size), 0); 390 atomic_set(&(en->set.size), 0);
391 INIT_LIST_HEAD(&en->set.list); 391 INIT_LIST_HEAD(&en->set.list);
392 rwlock_init(&en->set.lock); 392 rwlock_init(&en->set.lock);
diff --git a/net/netfilter/ipvs/ip_vs_proto.c b/net/netfilter/ipvs/ip_vs_proto.c
index 2d3d5e4b35f8..027f654799fe 100644
--- a/net/netfilter/ipvs/ip_vs_proto.c
+++ b/net/netfilter/ipvs/ip_vs_proto.c
@@ -98,6 +98,7 @@ struct ip_vs_protocol * ip_vs_proto_get(unsigned short proto)
98 98
99 return NULL; 99 return NULL;
100} 100}
101EXPORT_SYMBOL(ip_vs_proto_get);
101 102
102 103
103/* 104/*
diff --git a/net/netfilter/ipvs/ip_vs_proto_sctp.c b/net/netfilter/ipvs/ip_vs_proto_sctp.c
index c9a3f7a21d53..4c0855cb006e 100644
--- a/net/netfilter/ipvs/ip_vs_proto_sctp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_sctp.c
@@ -8,55 +8,6 @@
8#include <net/sctp/checksum.h> 8#include <net/sctp/checksum.h>
9#include <net/ip_vs.h> 9#include <net/ip_vs.h>
10 10
11
12static struct ip_vs_conn *
13sctp_conn_in_get(int af,
14 const struct sk_buff *skb,
15 struct ip_vs_protocol *pp,
16 const struct ip_vs_iphdr *iph,
17 unsigned int proto_off,
18 int inverse)
19{
20 __be16 _ports[2], *pptr;
21
22 pptr = skb_header_pointer(skb, proto_off, sizeof(_ports), _ports);
23 if (pptr == NULL)
24 return NULL;
25
26 if (likely(!inverse))
27 return ip_vs_conn_in_get(af, iph->protocol,
28 &iph->saddr, pptr[0],
29 &iph->daddr, pptr[1]);
30 else
31 return ip_vs_conn_in_get(af, iph->protocol,
32 &iph->daddr, pptr[1],
33 &iph->saddr, pptr[0]);
34}
35
36static struct ip_vs_conn *
37sctp_conn_out_get(int af,
38 const struct sk_buff *skb,
39 struct ip_vs_protocol *pp,
40 const struct ip_vs_iphdr *iph,
41 unsigned int proto_off,
42 int inverse)
43{
44 __be16 _ports[2], *pptr;
45
46 pptr = skb_header_pointer(skb, proto_off, sizeof(_ports), _ports);
47 if (pptr == NULL)
48 return NULL;
49
50 if (likely(!inverse))
51 return ip_vs_conn_out_get(af, iph->protocol,
52 &iph->saddr, pptr[0],
53 &iph->daddr, pptr[1]);
54 else
55 return ip_vs_conn_out_get(af, iph->protocol,
56 &iph->daddr, pptr[1],
57 &iph->saddr, pptr[0]);
58}
59
60static int 11static int
61sctp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp, 12sctp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
62 int *verdict, struct ip_vs_conn **cpp) 13 int *verdict, struct ip_vs_conn **cpp)
@@ -173,7 +124,7 @@ sctp_dnat_handler(struct sk_buff *skb,
173 return 0; 124 return 0;
174 125
175 /* Call application helper if needed */ 126 /* Call application helper if needed */
176 if (!ip_vs_app_pkt_out(cp, skb)) 127 if (!ip_vs_app_pkt_in(cp, skb))
177 return 0; 128 return 0;
178 } 129 }
179 130
@@ -1169,8 +1120,8 @@ struct ip_vs_protocol ip_vs_protocol_sctp = {
1169 .register_app = sctp_register_app, 1120 .register_app = sctp_register_app,
1170 .unregister_app = sctp_unregister_app, 1121 .unregister_app = sctp_unregister_app,
1171 .conn_schedule = sctp_conn_schedule, 1122 .conn_schedule = sctp_conn_schedule,
1172 .conn_in_get = sctp_conn_in_get, 1123 .conn_in_get = ip_vs_conn_in_get_proto,
1173 .conn_out_get = sctp_conn_out_get, 1124 .conn_out_get = ip_vs_conn_out_get_proto,
1174 .snat_handler = sctp_snat_handler, 1125 .snat_handler = sctp_snat_handler,
1175 .dnat_handler = sctp_dnat_handler, 1126 .dnat_handler = sctp_dnat_handler,
1176 .csum_check = sctp_csum_check, 1127 .csum_check = sctp_csum_check,
diff --git a/net/netfilter/ipvs/ip_vs_proto_tcp.c b/net/netfilter/ipvs/ip_vs_proto_tcp.c
index 91d28e073742..282d24de8592 100644
--- a/net/netfilter/ipvs/ip_vs_proto_tcp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_tcp.c
@@ -27,52 +27,6 @@
27 27
28#include <net/ip_vs.h> 28#include <net/ip_vs.h>
29 29
30
31static struct ip_vs_conn *
32tcp_conn_in_get(int af, const struct sk_buff *skb, struct ip_vs_protocol *pp,
33 const struct ip_vs_iphdr *iph, unsigned int proto_off,
34 int inverse)
35{
36 __be16 _ports[2], *pptr;
37
38 pptr = skb_header_pointer(skb, proto_off, sizeof(_ports), _ports);
39 if (pptr == NULL)
40 return NULL;
41
42 if (likely(!inverse)) {
43 return ip_vs_conn_in_get(af, iph->protocol,
44 &iph->saddr, pptr[0],
45 &iph->daddr, pptr[1]);
46 } else {
47 return ip_vs_conn_in_get(af, iph->protocol,
48 &iph->daddr, pptr[1],
49 &iph->saddr, pptr[0]);
50 }
51}
52
53static struct ip_vs_conn *
54tcp_conn_out_get(int af, const struct sk_buff *skb, struct ip_vs_protocol *pp,
55 const struct ip_vs_iphdr *iph, unsigned int proto_off,
56 int inverse)
57{
58 __be16 _ports[2], *pptr;
59
60 pptr = skb_header_pointer(skb, proto_off, sizeof(_ports), _ports);
61 if (pptr == NULL)
62 return NULL;
63
64 if (likely(!inverse)) {
65 return ip_vs_conn_out_get(af, iph->protocol,
66 &iph->saddr, pptr[0],
67 &iph->daddr, pptr[1]);
68 } else {
69 return ip_vs_conn_out_get(af, iph->protocol,
70 &iph->daddr, pptr[1],
71 &iph->saddr, pptr[0]);
72 }
73}
74
75
76static int 30static int
77tcp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp, 31tcp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
78 int *verdict, struct ip_vs_conn **cpp) 32 int *verdict, struct ip_vs_conn **cpp)
@@ -721,8 +675,8 @@ struct ip_vs_protocol ip_vs_protocol_tcp = {
721 .register_app = tcp_register_app, 675 .register_app = tcp_register_app,
722 .unregister_app = tcp_unregister_app, 676 .unregister_app = tcp_unregister_app,
723 .conn_schedule = tcp_conn_schedule, 677 .conn_schedule = tcp_conn_schedule,
724 .conn_in_get = tcp_conn_in_get, 678 .conn_in_get = ip_vs_conn_in_get_proto,
725 .conn_out_get = tcp_conn_out_get, 679 .conn_out_get = ip_vs_conn_out_get_proto,
726 .snat_handler = tcp_snat_handler, 680 .snat_handler = tcp_snat_handler,
727 .dnat_handler = tcp_dnat_handler, 681 .dnat_handler = tcp_dnat_handler,
728 .csum_check = tcp_csum_check, 682 .csum_check = tcp_csum_check,
diff --git a/net/netfilter/ipvs/ip_vs_proto_udp.c b/net/netfilter/ipvs/ip_vs_proto_udp.c
index e7a6885e0167..8553231b5d41 100644
--- a/net/netfilter/ipvs/ip_vs_proto_udp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_udp.c
@@ -27,58 +27,6 @@
27#include <net/ip.h> 27#include <net/ip.h>
28#include <net/ip6_checksum.h> 28#include <net/ip6_checksum.h>
29 29
30static struct ip_vs_conn *
31udp_conn_in_get(int af, const struct sk_buff *skb, struct ip_vs_protocol *pp,
32 const struct ip_vs_iphdr *iph, unsigned int proto_off,
33 int inverse)
34{
35 struct ip_vs_conn *cp;
36 __be16 _ports[2], *pptr;
37
38 pptr = skb_header_pointer(skb, proto_off, sizeof(_ports), _ports);
39 if (pptr == NULL)
40 return NULL;
41
42 if (likely(!inverse)) {
43 cp = ip_vs_conn_in_get(af, iph->protocol,
44 &iph->saddr, pptr[0],
45 &iph->daddr, pptr[1]);
46 } else {
47 cp = ip_vs_conn_in_get(af, iph->protocol,
48 &iph->daddr, pptr[1],
49 &iph->saddr, pptr[0]);
50 }
51
52 return cp;
53}
54
55
56static struct ip_vs_conn *
57udp_conn_out_get(int af, const struct sk_buff *skb, struct ip_vs_protocol *pp,
58 const struct ip_vs_iphdr *iph, unsigned int proto_off,
59 int inverse)
60{
61 struct ip_vs_conn *cp;
62 __be16 _ports[2], *pptr;
63
64 pptr = skb_header_pointer(skb, proto_off, sizeof(_ports), _ports);
65 if (pptr == NULL)
66 return NULL;
67
68 if (likely(!inverse)) {
69 cp = ip_vs_conn_out_get(af, iph->protocol,
70 &iph->saddr, pptr[0],
71 &iph->daddr, pptr[1]);
72 } else {
73 cp = ip_vs_conn_out_get(af, iph->protocol,
74 &iph->daddr, pptr[1],
75 &iph->saddr, pptr[0]);
76 }
77
78 return cp;
79}
80
81
82static int 30static int
83udp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp, 31udp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_protocol *pp,
84 int *verdict, struct ip_vs_conn **cpp) 32 int *verdict, struct ip_vs_conn **cpp)
@@ -520,8 +468,8 @@ struct ip_vs_protocol ip_vs_protocol_udp = {
520 .init = udp_init, 468 .init = udp_init,
521 .exit = udp_exit, 469 .exit = udp_exit,
522 .conn_schedule = udp_conn_schedule, 470 .conn_schedule = udp_conn_schedule,
523 .conn_in_get = udp_conn_in_get, 471 .conn_in_get = ip_vs_conn_in_get_proto,
524 .conn_out_get = udp_conn_out_get, 472 .conn_out_get = ip_vs_conn_out_get_proto,
525 .snat_handler = udp_snat_handler, 473 .snat_handler = udp_snat_handler,
526 .dnat_handler = udp_dnat_handler, 474 .dnat_handler = udp_dnat_handler,
527 .csum_check = udp_csum_check, 475 .csum_check = udp_csum_check,
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index 93c15a107b2c..21e1a5e9b9d3 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -28,6 +28,7 @@
28#include <net/ip6_route.h> 28#include <net/ip6_route.h>
29#include <linux/icmpv6.h> 29#include <linux/icmpv6.h>
30#include <linux/netfilter.h> 30#include <linux/netfilter.h>
31#include <net/netfilter/nf_conntrack.h>
31#include <linux/netfilter_ipv4.h> 32#include <linux/netfilter_ipv4.h>
32 33
33#include <net/ip_vs.h> 34#include <net/ip_vs.h>
@@ -90,10 +91,10 @@ __ip_vs_get_out_rt(struct ip_vs_conn *cp, u32 rtos)
90 &dest->addr.ip); 91 &dest->addr.ip);
91 return NULL; 92 return NULL;
92 } 93 }
93 __ip_vs_dst_set(dest, rtos, dst_clone(&rt->u.dst)); 94 __ip_vs_dst_set(dest, rtos, dst_clone(&rt->dst));
94 IP_VS_DBG(10, "new dst %pI4, refcnt=%d, rtos=%X\n", 95 IP_VS_DBG(10, "new dst %pI4, refcnt=%d, rtos=%X\n",
95 &dest->addr.ip, 96 &dest->addr.ip,
96 atomic_read(&rt->u.dst.__refcnt), rtos); 97 atomic_read(&rt->dst.__refcnt), rtos);
97 } 98 }
98 spin_unlock(&dest->dst_lock); 99 spin_unlock(&dest->dst_lock);
99 } else { 100 } else {
@@ -148,10 +149,10 @@ __ip_vs_get_out_rt_v6(struct ip_vs_conn *cp)
148 &dest->addr.in6); 149 &dest->addr.in6);
149 return NULL; 150 return NULL;
150 } 151 }
151 __ip_vs_dst_set(dest, 0, dst_clone(&rt->u.dst)); 152 __ip_vs_dst_set(dest, 0, dst_clone(&rt->dst));
152 IP_VS_DBG(10, "new dst %pI6, refcnt=%d\n", 153 IP_VS_DBG(10, "new dst %pI6, refcnt=%d\n",
153 &dest->addr.in6, 154 &dest->addr.in6,
154 atomic_read(&rt->u.dst.__refcnt)); 155 atomic_read(&rt->dst.__refcnt));
155 } 156 }
156 spin_unlock(&dest->dst_lock); 157 spin_unlock(&dest->dst_lock);
157 } else { 158 } else {
@@ -198,7 +199,7 @@ do { \
198 (skb)->ipvs_property = 1; \ 199 (skb)->ipvs_property = 1; \
199 skb_forward_csum(skb); \ 200 skb_forward_csum(skb); \
200 NF_HOOK(pf, NF_INET_LOCAL_OUT, (skb), NULL, \ 201 NF_HOOK(pf, NF_INET_LOCAL_OUT, (skb), NULL, \
201 (rt)->u.dst.dev, dst_output); \ 202 (rt)->dst.dev, dst_output); \
202} while (0) 203} while (0)
203 204
204 205
@@ -245,7 +246,7 @@ ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
245 } 246 }
246 247
247 /* MTU checking */ 248 /* MTU checking */
248 mtu = dst_mtu(&rt->u.dst); 249 mtu = dst_mtu(&rt->dst);
249 if ((skb->len > mtu) && (iph->frag_off & htons(IP_DF))) { 250 if ((skb->len > mtu) && (iph->frag_off & htons(IP_DF))) {
250 ip_rt_put(rt); 251 ip_rt_put(rt);
251 icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu)); 252 icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
@@ -265,7 +266,7 @@ ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
265 266
266 /* drop old route */ 267 /* drop old route */
267 skb_dst_drop(skb); 268 skb_dst_drop(skb);
268 skb_dst_set(skb, &rt->u.dst); 269 skb_dst_set(skb, &rt->dst);
269 270
270 /* Another hack: avoid icmp_send in ip_fragment */ 271 /* Another hack: avoid icmp_send in ip_fragment */
271 skb->local_df = 1; 272 skb->local_df = 1;
@@ -309,9 +310,9 @@ ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
309 } 310 }
310 311
311 /* MTU checking */ 312 /* MTU checking */
312 mtu = dst_mtu(&rt->u.dst); 313 mtu = dst_mtu(&rt->dst);
313 if (skb->len > mtu) { 314 if (skb->len > mtu) {
314 dst_release(&rt->u.dst); 315 dst_release(&rt->dst);
315 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 316 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
316 IP_VS_DBG_RL("%s(): frag needed\n", __func__); 317 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
317 goto tx_error; 318 goto tx_error;
@@ -323,13 +324,13 @@ ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
323 */ 324 */
324 skb = skb_share_check(skb, GFP_ATOMIC); 325 skb = skb_share_check(skb, GFP_ATOMIC);
325 if (unlikely(skb == NULL)) { 326 if (unlikely(skb == NULL)) {
326 dst_release(&rt->u.dst); 327 dst_release(&rt->dst);
327 return NF_STOLEN; 328 return NF_STOLEN;
328 } 329 }
329 330
330 /* drop old route */ 331 /* drop old route */
331 skb_dst_drop(skb); 332 skb_dst_drop(skb);
332 skb_dst_set(skb, &rt->u.dst); 333 skb_dst_set(skb, &rt->dst);
333 334
334 /* Another hack: avoid icmp_send in ip_fragment */ 335 /* Another hack: avoid icmp_send in ip_fragment */
335 skb->local_df = 1; 336 skb->local_df = 1;
@@ -348,6 +349,30 @@ ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
348} 349}
349#endif 350#endif
350 351
352static void
353ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp)
354{
355 struct nf_conn *ct = (struct nf_conn *)skb->nfct;
356 struct nf_conntrack_tuple new_tuple;
357
358 if (ct == NULL || nf_ct_is_untracked(ct) || nf_ct_is_confirmed(ct))
359 return;
360
361 /*
362 * The connection is not yet in the hashtable, so we update it.
363 * CIP->VIP will remain the same, so leave the tuple in
364 * IP_CT_DIR_ORIGINAL untouched. When the reply comes back from the
365 * real-server we will see RIP->DIP.
366 */
367 new_tuple = ct->tuplehash[IP_CT_DIR_REPLY].tuple;
368 new_tuple.src.u3 = cp->daddr;
369 /*
370 * This will also take care of UDP and other protocols.
371 */
372 new_tuple.src.u.tcp.port = cp->dport;
373 nf_conntrack_alter_reply(ct, &new_tuple);
374}
375
351/* 376/*
352 * NAT transmitter (only for outside-to-inside nat forwarding) 377 * NAT transmitter (only for outside-to-inside nat forwarding)
353 * Not used for related ICMP 378 * Not used for related ICMP
@@ -376,7 +401,7 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
376 goto tx_error_icmp; 401 goto tx_error_icmp;
377 402
378 /* MTU checking */ 403 /* MTU checking */
379 mtu = dst_mtu(&rt->u.dst); 404 mtu = dst_mtu(&rt->dst);
380 if ((skb->len > mtu) && (iph->frag_off & htons(IP_DF))) { 405 if ((skb->len > mtu) && (iph->frag_off & htons(IP_DF))) {
381 ip_rt_put(rt); 406 ip_rt_put(rt);
382 icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu)); 407 icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
@@ -388,12 +413,12 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
388 if (!skb_make_writable(skb, sizeof(struct iphdr))) 413 if (!skb_make_writable(skb, sizeof(struct iphdr)))
389 goto tx_error_put; 414 goto tx_error_put;
390 415
391 if (skb_cow(skb, rt->u.dst.dev->hard_header_len)) 416 if (skb_cow(skb, rt->dst.dev->hard_header_len))
392 goto tx_error_put; 417 goto tx_error_put;
393 418
394 /* drop old route */ 419 /* drop old route */
395 skb_dst_drop(skb); 420 skb_dst_drop(skb);
396 skb_dst_set(skb, &rt->u.dst); 421 skb_dst_set(skb, &rt->dst);
397 422
398 /* mangle the packet */ 423 /* mangle the packet */
399 if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp)) 424 if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp))
@@ -403,6 +428,8 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
403 428
404 IP_VS_DBG_PKT(10, pp, skb, 0, "After DNAT"); 429 IP_VS_DBG_PKT(10, pp, skb, 0, "After DNAT");
405 430
431 ip_vs_update_conntrack(skb, cp);
432
406 /* FIXME: when application helper enlarges the packet and the length 433 /* FIXME: when application helper enlarges the packet and the length
407 is larger than the MTU of outgoing device, there will be still 434 is larger than the MTU of outgoing device, there will be still
408 MTU problem. */ 435 MTU problem. */
@@ -452,9 +479,9 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
452 goto tx_error_icmp; 479 goto tx_error_icmp;
453 480
454 /* MTU checking */ 481 /* MTU checking */
455 mtu = dst_mtu(&rt->u.dst); 482 mtu = dst_mtu(&rt->dst);
456 if (skb->len > mtu) { 483 if (skb->len > mtu) {
457 dst_release(&rt->u.dst); 484 dst_release(&rt->dst);
458 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 485 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
459 IP_VS_DBG_RL_PKT(0, pp, skb, 0, 486 IP_VS_DBG_RL_PKT(0, pp, skb, 0,
460 "ip_vs_nat_xmit_v6(): frag needed for"); 487 "ip_vs_nat_xmit_v6(): frag needed for");
@@ -465,12 +492,12 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
465 if (!skb_make_writable(skb, sizeof(struct ipv6hdr))) 492 if (!skb_make_writable(skb, sizeof(struct ipv6hdr)))
466 goto tx_error_put; 493 goto tx_error_put;
467 494
468 if (skb_cow(skb, rt->u.dst.dev->hard_header_len)) 495 if (skb_cow(skb, rt->dst.dev->hard_header_len))
469 goto tx_error_put; 496 goto tx_error_put;
470 497
471 /* drop old route */ 498 /* drop old route */
472 skb_dst_drop(skb); 499 skb_dst_drop(skb);
473 skb_dst_set(skb, &rt->u.dst); 500 skb_dst_set(skb, &rt->dst);
474 501
475 /* mangle the packet */ 502 /* mangle the packet */
476 if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp)) 503 if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp))
@@ -479,6 +506,8 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
479 506
480 IP_VS_DBG_PKT(10, pp, skb, 0, "After DNAT"); 507 IP_VS_DBG_PKT(10, pp, skb, 0, "After DNAT");
481 508
509 ip_vs_update_conntrack(skb, cp);
510
482 /* FIXME: when application helper enlarges the packet and the length 511 /* FIXME: when application helper enlarges the packet and the length
483 is larger than the MTU of outgoing device, there will be still 512 is larger than the MTU of outgoing device, there will be still
484 MTU problem. */ 513 MTU problem. */
@@ -498,7 +527,7 @@ tx_error:
498 kfree_skb(skb); 527 kfree_skb(skb);
499 return NF_STOLEN; 528 return NF_STOLEN;
500tx_error_put: 529tx_error_put:
501 dst_release(&rt->u.dst); 530 dst_release(&rt->dst);
502 goto tx_error; 531 goto tx_error;
503} 532}
504#endif 533#endif
@@ -549,9 +578,9 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
549 if (!(rt = __ip_vs_get_out_rt(cp, RT_TOS(tos)))) 578 if (!(rt = __ip_vs_get_out_rt(cp, RT_TOS(tos))))
550 goto tx_error_icmp; 579 goto tx_error_icmp;
551 580
552 tdev = rt->u.dst.dev; 581 tdev = rt->dst.dev;
553 582
554 mtu = dst_mtu(&rt->u.dst) - sizeof(struct iphdr); 583 mtu = dst_mtu(&rt->dst) - sizeof(struct iphdr);
555 if (mtu < 68) { 584 if (mtu < 68) {
556 ip_rt_put(rt); 585 ip_rt_put(rt);
557 IP_VS_DBG_RL("%s(): mtu less than 68\n", __func__); 586 IP_VS_DBG_RL("%s(): mtu less than 68\n", __func__);
@@ -601,7 +630,7 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
601 630
602 /* drop old route */ 631 /* drop old route */
603 skb_dst_drop(skb); 632 skb_dst_drop(skb);
604 skb_dst_set(skb, &rt->u.dst); 633 skb_dst_set(skb, &rt->dst);
605 634
606 /* 635 /*
607 * Push down and install the IPIP header. 636 * Push down and install the IPIP header.
@@ -615,7 +644,7 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
615 iph->daddr = rt->rt_dst; 644 iph->daddr = rt->rt_dst;
616 iph->saddr = rt->rt_src; 645 iph->saddr = rt->rt_src;
617 iph->ttl = old_iph->ttl; 646 iph->ttl = old_iph->ttl;
618 ip_select_ident(iph, &rt->u.dst, NULL); 647 ip_select_ident(iph, &rt->dst, NULL);
619 648
620 /* Another hack: avoid icmp_send in ip_fragment */ 649 /* Another hack: avoid icmp_send in ip_fragment */
621 skb->local_df = 1; 650 skb->local_df = 1;
@@ -660,12 +689,12 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
660 if (!rt) 689 if (!rt)
661 goto tx_error_icmp; 690 goto tx_error_icmp;
662 691
663 tdev = rt->u.dst.dev; 692 tdev = rt->dst.dev;
664 693
665 mtu = dst_mtu(&rt->u.dst) - sizeof(struct ipv6hdr); 694 mtu = dst_mtu(&rt->dst) - sizeof(struct ipv6hdr);
666 /* TODO IPv6: do we need this check in IPv6? */ 695 /* TODO IPv6: do we need this check in IPv6? */
667 if (mtu < 1280) { 696 if (mtu < 1280) {
668 dst_release(&rt->u.dst); 697 dst_release(&rt->dst);
669 IP_VS_DBG_RL("%s(): mtu less than 1280\n", __func__); 698 IP_VS_DBG_RL("%s(): mtu less than 1280\n", __func__);
670 goto tx_error; 699 goto tx_error;
671 } 700 }
@@ -674,7 +703,7 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
674 703
675 if (mtu < ntohs(old_iph->payload_len) + sizeof(struct ipv6hdr)) { 704 if (mtu < ntohs(old_iph->payload_len) + sizeof(struct ipv6hdr)) {
676 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 705 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
677 dst_release(&rt->u.dst); 706 dst_release(&rt->dst);
678 IP_VS_DBG_RL("%s(): frag needed\n", __func__); 707 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
679 goto tx_error; 708 goto tx_error;
680 } 709 }
@@ -689,7 +718,7 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
689 struct sk_buff *new_skb = 718 struct sk_buff *new_skb =
690 skb_realloc_headroom(skb, max_headroom); 719 skb_realloc_headroom(skb, max_headroom);
691 if (!new_skb) { 720 if (!new_skb) {
692 dst_release(&rt->u.dst); 721 dst_release(&rt->dst);
693 kfree_skb(skb); 722 kfree_skb(skb);
694 IP_VS_ERR_RL("%s(): no memory\n", __func__); 723 IP_VS_ERR_RL("%s(): no memory\n", __func__);
695 return NF_STOLEN; 724 return NF_STOLEN;
@@ -707,7 +736,7 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
707 736
708 /* drop old route */ 737 /* drop old route */
709 skb_dst_drop(skb); 738 skb_dst_drop(skb);
710 skb_dst_set(skb, &rt->u.dst); 739 skb_dst_set(skb, &rt->dst);
711 740
712 /* 741 /*
713 * Push down and install the IPIP header. 742 * Push down and install the IPIP header.
@@ -760,7 +789,7 @@ ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
760 goto tx_error_icmp; 789 goto tx_error_icmp;
761 790
762 /* MTU checking */ 791 /* MTU checking */
763 mtu = dst_mtu(&rt->u.dst); 792 mtu = dst_mtu(&rt->dst);
764 if ((iph->frag_off & htons(IP_DF)) && skb->len > mtu) { 793 if ((iph->frag_off & htons(IP_DF)) && skb->len > mtu) {
765 icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu)); 794 icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
766 ip_rt_put(rt); 795 ip_rt_put(rt);
@@ -780,7 +809,7 @@ ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
780 809
781 /* drop old route */ 810 /* drop old route */
782 skb_dst_drop(skb); 811 skb_dst_drop(skb);
783 skb_dst_set(skb, &rt->u.dst); 812 skb_dst_set(skb, &rt->dst);
784 813
785 /* Another hack: avoid icmp_send in ip_fragment */ 814 /* Another hack: avoid icmp_send in ip_fragment */
786 skb->local_df = 1; 815 skb->local_df = 1;
@@ -813,10 +842,10 @@ ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
813 goto tx_error_icmp; 842 goto tx_error_icmp;
814 843
815 /* MTU checking */ 844 /* MTU checking */
816 mtu = dst_mtu(&rt->u.dst); 845 mtu = dst_mtu(&rt->dst);
817 if (skb->len > mtu) { 846 if (skb->len > mtu) {
818 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 847 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
819 dst_release(&rt->u.dst); 848 dst_release(&rt->dst);
820 IP_VS_DBG_RL("%s(): frag needed\n", __func__); 849 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
821 goto tx_error; 850 goto tx_error;
822 } 851 }
@@ -827,13 +856,13 @@ ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
827 */ 856 */
828 skb = skb_share_check(skb, GFP_ATOMIC); 857 skb = skb_share_check(skb, GFP_ATOMIC);
829 if (unlikely(skb == NULL)) { 858 if (unlikely(skb == NULL)) {
830 dst_release(&rt->u.dst); 859 dst_release(&rt->dst);
831 return NF_STOLEN; 860 return NF_STOLEN;
832 } 861 }
833 862
834 /* drop old route */ 863 /* drop old route */
835 skb_dst_drop(skb); 864 skb_dst_drop(skb);
836 skb_dst_set(skb, &rt->u.dst); 865 skb_dst_set(skb, &rt->dst);
837 866
838 /* Another hack: avoid icmp_send in ip_fragment */ 867 /* Another hack: avoid icmp_send in ip_fragment */
839 skb->local_df = 1; 868 skb->local_df = 1;
@@ -888,7 +917,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
888 goto tx_error_icmp; 917 goto tx_error_icmp;
889 918
890 /* MTU checking */ 919 /* MTU checking */
891 mtu = dst_mtu(&rt->u.dst); 920 mtu = dst_mtu(&rt->dst);
892 if ((skb->len > mtu) && (ip_hdr(skb)->frag_off & htons(IP_DF))) { 921 if ((skb->len > mtu) && (ip_hdr(skb)->frag_off & htons(IP_DF))) {
893 ip_rt_put(rt); 922 ip_rt_put(rt);
894 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); 923 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
@@ -900,12 +929,12 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
900 if (!skb_make_writable(skb, offset)) 929 if (!skb_make_writable(skb, offset))
901 goto tx_error_put; 930 goto tx_error_put;
902 931
903 if (skb_cow(skb, rt->u.dst.dev->hard_header_len)) 932 if (skb_cow(skb, rt->dst.dev->hard_header_len))
904 goto tx_error_put; 933 goto tx_error_put;
905 934
906 /* drop the old route when skb is not shared */ 935 /* drop the old route when skb is not shared */
907 skb_dst_drop(skb); 936 skb_dst_drop(skb);
908 skb_dst_set(skb, &rt->u.dst); 937 skb_dst_set(skb, &rt->dst);
909 938
910 ip_vs_nat_icmp(skb, pp, cp, 0); 939 ip_vs_nat_icmp(skb, pp, cp, 0);
911 940
@@ -963,9 +992,9 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
963 goto tx_error_icmp; 992 goto tx_error_icmp;
964 993
965 /* MTU checking */ 994 /* MTU checking */
966 mtu = dst_mtu(&rt->u.dst); 995 mtu = dst_mtu(&rt->dst);
967 if (skb->len > mtu) { 996 if (skb->len > mtu) {
968 dst_release(&rt->u.dst); 997 dst_release(&rt->dst);
969 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 998 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
970 IP_VS_DBG_RL("%s(): frag needed\n", __func__); 999 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
971 goto tx_error; 1000 goto tx_error;
@@ -975,12 +1004,12 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
975 if (!skb_make_writable(skb, offset)) 1004 if (!skb_make_writable(skb, offset))
976 goto tx_error_put; 1005 goto tx_error_put;
977 1006
978 if (skb_cow(skb, rt->u.dst.dev->hard_header_len)) 1007 if (skb_cow(skb, rt->dst.dev->hard_header_len))
979 goto tx_error_put; 1008 goto tx_error_put;
980 1009
981 /* drop the old route when skb is not shared */ 1010 /* drop the old route when skb is not shared */
982 skb_dst_drop(skb); 1011 skb_dst_drop(skb);
983 skb_dst_set(skb, &rt->u.dst); 1012 skb_dst_set(skb, &rt->dst);
984 1013
985 ip_vs_nat_icmp_v6(skb, pp, cp, 0); 1014 ip_vs_nat_icmp_v6(skb, pp, cp, 0);
986 1015
@@ -1001,7 +1030,7 @@ out:
1001 LeaveFunction(10); 1030 LeaveFunction(10);
1002 return rc; 1031 return rc;
1003tx_error_put: 1032tx_error_put:
1004 dst_release(&rt->u.dst); 1033 dst_release(&rt->dst);
1005 goto tx_error; 1034 goto tx_error;
1006} 1035}
1007#endif 1036#endif
diff --git a/net/netfilter/nf_conntrack_acct.c b/net/netfilter/nf_conntrack_acct.c
index ab81b380eae6..5178c691ecbf 100644
--- a/net/netfilter/nf_conntrack_acct.c
+++ b/net/netfilter/nf_conntrack_acct.c
@@ -17,13 +17,7 @@
17#include <net/netfilter/nf_conntrack_extend.h> 17#include <net/netfilter/nf_conntrack_extend.h>
18#include <net/netfilter/nf_conntrack_acct.h> 18#include <net/netfilter/nf_conntrack_acct.h>
19 19
20#ifdef CONFIG_NF_CT_ACCT 20static int nf_ct_acct __read_mostly;
21#define NF_CT_ACCT_DEFAULT 1
22#else
23#define NF_CT_ACCT_DEFAULT 0
24#endif
25
26static int nf_ct_acct __read_mostly = NF_CT_ACCT_DEFAULT;
27 21
28module_param_named(acct, nf_ct_acct, bool, 0644); 22module_param_named(acct, nf_ct_acct, bool, 0644);
29MODULE_PARM_DESC(acct, "Enable connection tracking flow accounting."); 23MODULE_PARM_DESC(acct, "Enable connection tracking flow accounting.");
@@ -114,12 +108,6 @@ int nf_conntrack_acct_init(struct net *net)
114 net->ct.sysctl_acct = nf_ct_acct; 108 net->ct.sysctl_acct = nf_ct_acct;
115 109
116 if (net_eq(net, &init_net)) { 110 if (net_eq(net, &init_net)) {
117#ifdef CONFIG_NF_CT_ACCT
118 printk(KERN_WARNING "CONFIG_NF_CT_ACCT is deprecated and will be removed soon. Please use\n");
119 printk(KERN_WARNING "nf_conntrack.acct=1 kernel parameter, acct=1 nf_conntrack module option or\n");
120 printk(KERN_WARNING "sysctl net.netfilter.nf_conntrack_acct=1 to enable it.\n");
121#endif
122
123 ret = nf_ct_extend_register(&acct_extend); 111 ret = nf_ct_extend_register(&acct_extend);
124 if (ret < 0) { 112 if (ret < 0) {
125 printk(KERN_ERR "nf_conntrack_acct: Unable to register extension\n"); 113 printk(KERN_ERR "nf_conntrack_acct: Unable to register extension\n");
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index eeeb8bc73982..df3eedb142ff 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -62,8 +62,8 @@ EXPORT_SYMBOL_GPL(nf_conntrack_htable_size);
62unsigned int nf_conntrack_max __read_mostly; 62unsigned int nf_conntrack_max __read_mostly;
63EXPORT_SYMBOL_GPL(nf_conntrack_max); 63EXPORT_SYMBOL_GPL(nf_conntrack_max);
64 64
65struct nf_conn nf_conntrack_untracked __read_mostly; 65DEFINE_PER_CPU(struct nf_conn, nf_conntrack_untracked);
66EXPORT_SYMBOL_GPL(nf_conntrack_untracked); 66EXPORT_PER_CPU_SYMBOL(nf_conntrack_untracked);
67 67
68static int nf_conntrack_hash_rnd_initted; 68static int nf_conntrack_hash_rnd_initted;
69static unsigned int nf_conntrack_hash_rnd; 69static unsigned int nf_conntrack_hash_rnd;
@@ -619,9 +619,7 @@ struct nf_conn *nf_conntrack_alloc(struct net *net, u16 zone,
619 ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev = NULL; 619 ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev = NULL;
620 /* Don't set timer yet: wait for confirmation */ 620 /* Don't set timer yet: wait for confirmation */
621 setup_timer(&ct->timeout, death_by_timeout, (unsigned long)ct); 621 setup_timer(&ct->timeout, death_by_timeout, (unsigned long)ct);
622#ifdef CONFIG_NET_NS 622 write_pnet(&ct->ct_net, net);
623 ct->ct_net = net;
624#endif
625#ifdef CONFIG_NF_CONNTRACK_ZONES 623#ifdef CONFIG_NF_CONNTRACK_ZONES
626 if (zone) { 624 if (zone) {
627 struct nf_conntrack_zone *nf_ct_zone; 625 struct nf_conntrack_zone *nf_ct_zone;
@@ -968,8 +966,7 @@ acct:
968 if (acct) { 966 if (acct) {
969 spin_lock_bh(&ct->lock); 967 spin_lock_bh(&ct->lock);
970 acct[CTINFO2DIR(ctinfo)].packets++; 968 acct[CTINFO2DIR(ctinfo)].packets++;
971 acct[CTINFO2DIR(ctinfo)].bytes += 969 acct[CTINFO2DIR(ctinfo)].bytes += skb->len;
972 skb->len - skb_network_offset(skb);
973 spin_unlock_bh(&ct->lock); 970 spin_unlock_bh(&ct->lock);
974 } 971 }
975 } 972 }
@@ -1183,10 +1180,21 @@ static void nf_ct_release_dying_list(struct net *net)
1183 spin_unlock_bh(&nf_conntrack_lock); 1180 spin_unlock_bh(&nf_conntrack_lock);
1184} 1181}
1185 1182
1183static int untrack_refs(void)
1184{
1185 int cnt = 0, cpu;
1186
1187 for_each_possible_cpu(cpu) {
1188 struct nf_conn *ct = &per_cpu(nf_conntrack_untracked, cpu);
1189
1190 cnt += atomic_read(&ct->ct_general.use) - 1;
1191 }
1192 return cnt;
1193}
1194
1186static void nf_conntrack_cleanup_init_net(void) 1195static void nf_conntrack_cleanup_init_net(void)
1187{ 1196{
1188 /* wait until all references to nf_conntrack_untracked are dropped */ 1197 while (untrack_refs() > 0)
1189 while (atomic_read(&nf_conntrack_untracked.ct_general.use) > 1)
1190 schedule(); 1198 schedule();
1191 1199
1192 nf_conntrack_helper_fini(); 1200 nf_conntrack_helper_fini();
@@ -1321,10 +1329,19 @@ EXPORT_SYMBOL_GPL(nf_conntrack_set_hashsize);
1321module_param_call(hashsize, nf_conntrack_set_hashsize, param_get_uint, 1329module_param_call(hashsize, nf_conntrack_set_hashsize, param_get_uint,
1322 &nf_conntrack_htable_size, 0600); 1330 &nf_conntrack_htable_size, 0600);
1323 1331
1332void nf_ct_untracked_status_or(unsigned long bits)
1333{
1334 int cpu;
1335
1336 for_each_possible_cpu(cpu)
1337 per_cpu(nf_conntrack_untracked, cpu).status |= bits;
1338}
1339EXPORT_SYMBOL_GPL(nf_ct_untracked_status_or);
1340
1324static int nf_conntrack_init_init_net(void) 1341static int nf_conntrack_init_init_net(void)
1325{ 1342{
1326 int max_factor = 8; 1343 int max_factor = 8;
1327 int ret; 1344 int ret, cpu;
1328 1345
1329 /* Idea from tcp.c: use 1/16384 of memory. On i386: 32MB 1346 /* Idea from tcp.c: use 1/16384 of memory. On i386: 32MB
1330 * machine has 512 buckets. >= 1GB machines have 16384 buckets. */ 1347 * machine has 512 buckets. >= 1GB machines have 16384 buckets. */
@@ -1363,13 +1380,13 @@ static int nf_conntrack_init_init_net(void)
1363 goto err_extend; 1380 goto err_extend;
1364#endif 1381#endif
1365 /* Set up fake conntrack: to never be deleted, not in any hashes */ 1382 /* Set up fake conntrack: to never be deleted, not in any hashes */
1366#ifdef CONFIG_NET_NS 1383 for_each_possible_cpu(cpu) {
1367 nf_conntrack_untracked.ct_net = &init_net; 1384 struct nf_conn *ct = &per_cpu(nf_conntrack_untracked, cpu);
1368#endif 1385 write_pnet(&ct->ct_net, &init_net);
1369 atomic_set(&nf_conntrack_untracked.ct_general.use, 1); 1386 atomic_set(&ct->ct_general.use, 1);
1387 }
1370 /* - and look it like as a confirmed connection */ 1388 /* - and look it like as a confirmed connection */
1371 set_bit(IPS_CONFIRMED_BIT, &nf_conntrack_untracked.status); 1389 nf_ct_untracked_status_or(IPS_CONFIRMED | IPS_UNTRACKED);
1372
1373 return 0; 1390 return 0;
1374 1391
1375#ifdef CONFIG_NF_CONNTRACK_ZONES 1392#ifdef CONFIG_NF_CONNTRACK_ZONES
diff --git a/net/netfilter/nf_conntrack_extend.c b/net/netfilter/nf_conntrack_extend.c
index fdc8fb4ae10f..7dcf7a404190 100644
--- a/net/netfilter/nf_conntrack_extend.c
+++ b/net/netfilter/nf_conntrack_extend.c
@@ -23,9 +23,10 @@ void __nf_ct_ext_destroy(struct nf_conn *ct)
23{ 23{
24 unsigned int i; 24 unsigned int i;
25 struct nf_ct_ext_type *t; 25 struct nf_ct_ext_type *t;
26 struct nf_ct_ext *ext = ct->ext;
26 27
27 for (i = 0; i < NF_CT_EXT_NUM; i++) { 28 for (i = 0; i < NF_CT_EXT_NUM; i++) {
28 if (!nf_ct_ext_exist(ct, i)) 29 if (!__nf_ct_ext_exist(ext, i))
29 continue; 30 continue;
30 31
31 rcu_read_lock(); 32 rcu_read_lock();
@@ -73,44 +74,45 @@ static void __nf_ct_ext_free_rcu(struct rcu_head *head)
73 74
74void *__nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp) 75void *__nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp)
75{ 76{
76 struct nf_ct_ext *new; 77 struct nf_ct_ext *old, *new;
77 int i, newlen, newoff; 78 int i, newlen, newoff;
78 struct nf_ct_ext_type *t; 79 struct nf_ct_ext_type *t;
79 80
80 /* Conntrack must not be confirmed to avoid races on reallocation. */ 81 /* Conntrack must not be confirmed to avoid races on reallocation. */
81 NF_CT_ASSERT(!nf_ct_is_confirmed(ct)); 82 NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
82 83
83 if (!ct->ext) 84 old = ct->ext;
85 if (!old)
84 return nf_ct_ext_create(&ct->ext, id, gfp); 86 return nf_ct_ext_create(&ct->ext, id, gfp);
85 87
86 if (nf_ct_ext_exist(ct, id)) 88 if (__nf_ct_ext_exist(old, id))
87 return NULL; 89 return NULL;
88 90
89 rcu_read_lock(); 91 rcu_read_lock();
90 t = rcu_dereference(nf_ct_ext_types[id]); 92 t = rcu_dereference(nf_ct_ext_types[id]);
91 BUG_ON(t == NULL); 93 BUG_ON(t == NULL);
92 94
93 newoff = ALIGN(ct->ext->len, t->align); 95 newoff = ALIGN(old->len, t->align);
94 newlen = newoff + t->len; 96 newlen = newoff + t->len;
95 rcu_read_unlock(); 97 rcu_read_unlock();
96 98
97 new = __krealloc(ct->ext, newlen, gfp); 99 new = __krealloc(old, newlen, gfp);
98 if (!new) 100 if (!new)
99 return NULL; 101 return NULL;
100 102
101 if (new != ct->ext) { 103 if (new != old) {
102 for (i = 0; i < NF_CT_EXT_NUM; i++) { 104 for (i = 0; i < NF_CT_EXT_NUM; i++) {
103 if (!nf_ct_ext_exist(ct, i)) 105 if (!__nf_ct_ext_exist(old, i))
104 continue; 106 continue;
105 107
106 rcu_read_lock(); 108 rcu_read_lock();
107 t = rcu_dereference(nf_ct_ext_types[i]); 109 t = rcu_dereference(nf_ct_ext_types[i]);
108 if (t && t->move) 110 if (t && t->move)
109 t->move((void *)new + new->offset[i], 111 t->move((void *)new + new->offset[i],
110 (void *)ct->ext + ct->ext->offset[i]); 112 (void *)old + old->offset[i]);
111 rcu_read_unlock(); 113 rcu_read_unlock();
112 } 114 }
113 call_rcu(&ct->ext->rcu, __nf_ct_ext_free_rcu); 115 call_rcu(&old->rcu, __nf_ct_ext_free_rcu);
114 ct->ext = new; 116 ct->ext = new;
115 } 117 }
116 118
diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c
index 6eaee7c8a337..b969025cf82f 100644
--- a/net/netfilter/nf_conntrack_h323_main.c
+++ b/net/netfilter/nf_conntrack_h323_main.c
@@ -734,11 +734,11 @@ static int callforward_do_filter(const union nf_inet_addr *src,
734 if (!afinfo->route((struct dst_entry **)&rt1, &fl1)) { 734 if (!afinfo->route((struct dst_entry **)&rt1, &fl1)) {
735 if (!afinfo->route((struct dst_entry **)&rt2, &fl2)) { 735 if (!afinfo->route((struct dst_entry **)&rt2, &fl2)) {
736 if (rt1->rt_gateway == rt2->rt_gateway && 736 if (rt1->rt_gateway == rt2->rt_gateway &&
737 rt1->u.dst.dev == rt2->u.dst.dev) 737 rt1->dst.dev == rt2->dst.dev)
738 ret = 1; 738 ret = 1;
739 dst_release(&rt2->u.dst); 739 dst_release(&rt2->dst);
740 } 740 }
741 dst_release(&rt1->u.dst); 741 dst_release(&rt1->dst);
742 } 742 }
743 break; 743 break;
744 } 744 }
@@ -753,11 +753,11 @@ static int callforward_do_filter(const union nf_inet_addr *src,
753 if (!afinfo->route((struct dst_entry **)&rt2, &fl2)) { 753 if (!afinfo->route((struct dst_entry **)&rt2, &fl2)) {
754 if (!memcmp(&rt1->rt6i_gateway, &rt2->rt6i_gateway, 754 if (!memcmp(&rt1->rt6i_gateway, &rt2->rt6i_gateway,
755 sizeof(rt1->rt6i_gateway)) && 755 sizeof(rt1->rt6i_gateway)) &&
756 rt1->u.dst.dev == rt2->u.dst.dev) 756 rt1->dst.dev == rt2->dst.dev)
757 ret = 1; 757 ret = 1;
758 dst_release(&rt2->u.dst); 758 dst_release(&rt2->dst);
759 } 759 }
760 dst_release(&rt1->u.dst); 760 dst_release(&rt1->dst);
761 } 761 }
762 break; 762 break;
763 } 763 }
diff --git a/net/netfilter/nf_conntrack_netbios_ns.c b/net/netfilter/nf_conntrack_netbios_ns.c
index 497b2224536f..aadde018a072 100644
--- a/net/netfilter/nf_conntrack_netbios_ns.c
+++ b/net/netfilter/nf_conntrack_netbios_ns.c
@@ -61,7 +61,7 @@ static int help(struct sk_buff *skb, unsigned int protoff,
61 goto out; 61 goto out;
62 62
63 rcu_read_lock(); 63 rcu_read_lock();
64 in_dev = __in_dev_get_rcu(rt->u.dst.dev); 64 in_dev = __in_dev_get_rcu(rt->dst.dev);
65 if (in_dev != NULL) { 65 if (in_dev != NULL) {
66 for_primary_ifa(in_dev) { 66 for_primary_ifa(in_dev) {
67 if (ifa->ifa_broadcast == iph->daddr) { 67 if (ifa->ifa_broadcast == iph->daddr) {
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index c42ff6aa441d..5bae1cd15eea 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -480,7 +480,7 @@ ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
480 int err; 480 int err;
481 481
482 /* ignore our fake conntrack entry */ 482 /* ignore our fake conntrack entry */
483 if (ct == &nf_conntrack_untracked) 483 if (nf_ct_is_untracked(ct))
484 return 0; 484 return 0;
485 485
486 if (events & (1 << IPCT_DESTROY)) { 486 if (events & (1 << IPCT_DESTROY)) {
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 9dd8cd4fb6e6..c4c885dca3bd 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -585,8 +585,16 @@ static bool tcp_in_window(const struct nf_conn *ct,
585 * Let's try to use the data from the packet. 585 * Let's try to use the data from the packet.
586 */ 586 */
587 sender->td_end = end; 587 sender->td_end = end;
588 win <<= sender->td_scale;
588 sender->td_maxwin = (win == 0 ? 1 : win); 589 sender->td_maxwin = (win == 0 ? 1 : win);
589 sender->td_maxend = end + sender->td_maxwin; 590 sender->td_maxend = end + sender->td_maxwin;
591 /*
592 * We haven't seen traffic in the other direction yet
593 * but we have to tweak window tracking to pass III
594 * and IV until that happens.
595 */
596 if (receiver->td_maxwin == 0)
597 receiver->td_end = receiver->td_maxend = sack;
590 } 598 }
591 } else if (((state->state == TCP_CONNTRACK_SYN_SENT 599 } else if (((state->state == TCP_CONNTRACK_SYN_SENT
592 && dir == IP_CT_DIR_ORIGINAL) 600 && dir == IP_CT_DIR_ORIGINAL)
@@ -680,7 +688,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
680 /* 688 /*
681 * Update receiver data. 689 * Update receiver data.
682 */ 690 */
683 if (after(end, sender->td_maxend)) 691 if (receiver->td_maxwin != 0 && after(end, sender->td_maxend))
684 receiver->td_maxwin += end - sender->td_maxend; 692 receiver->td_maxwin += end - sender->td_maxend;
685 if (after(sack + win, receiver->td_maxend - 1)) { 693 if (after(sack + win, receiver->td_maxend - 1)) {
686 receiver->td_maxend = sack + win; 694 receiver->td_maxend = sack + win;
@@ -736,27 +744,19 @@ static bool tcp_in_window(const struct nf_conn *ct,
736 return res; 744 return res;
737} 745}
738 746
739#define TH_FIN 0x01
740#define TH_SYN 0x02
741#define TH_RST 0x04
742#define TH_PUSH 0x08
743#define TH_ACK 0x10
744#define TH_URG 0x20
745#define TH_ECE 0x40
746#define TH_CWR 0x80
747
748/* table of valid flag combinations - PUSH, ECE and CWR are always valid */ 747/* table of valid flag combinations - PUSH, ECE and CWR are always valid */
749static const u8 tcp_valid_flags[(TH_FIN|TH_SYN|TH_RST|TH_ACK|TH_URG) + 1] = 748static const u8 tcp_valid_flags[(TCPHDR_FIN|TCPHDR_SYN|TCPHDR_RST|TCPHDR_ACK|
749 TCPHDR_URG) + 1] =
750{ 750{
751 [TH_SYN] = 1, 751 [TCPHDR_SYN] = 1,
752 [TH_SYN|TH_URG] = 1, 752 [TCPHDR_SYN|TCPHDR_URG] = 1,
753 [TH_SYN|TH_ACK] = 1, 753 [TCPHDR_SYN|TCPHDR_ACK] = 1,
754 [TH_RST] = 1, 754 [TCPHDR_RST] = 1,
755 [TH_RST|TH_ACK] = 1, 755 [TCPHDR_RST|TCPHDR_ACK] = 1,
756 [TH_FIN|TH_ACK] = 1, 756 [TCPHDR_FIN|TCPHDR_ACK] = 1,
757 [TH_FIN|TH_ACK|TH_URG] = 1, 757 [TCPHDR_FIN|TCPHDR_ACK|TCPHDR_URG] = 1,
758 [TH_ACK] = 1, 758 [TCPHDR_ACK] = 1,
759 [TH_ACK|TH_URG] = 1, 759 [TCPHDR_ACK|TCPHDR_URG] = 1,
760}; 760};
761 761
762/* Protect conntrack agaist broken packets. Code taken from ipt_unclean.c. */ 762/* Protect conntrack agaist broken packets. Code taken from ipt_unclean.c. */
@@ -803,7 +803,7 @@ static int tcp_error(struct net *net, struct nf_conn *tmpl,
803 } 803 }
804 804
805 /* Check TCP flags. */ 805 /* Check TCP flags. */
806 tcpflags = (((u_int8_t *)th)[13] & ~(TH_ECE|TH_CWR|TH_PUSH)); 806 tcpflags = (tcp_flag_byte(th) & ~(TCPHDR_ECE|TCPHDR_CWR|TCPHDR_PSH));
807 if (!tcp_valid_flags[tcpflags]) { 807 if (!tcp_valid_flags[tcpflags]) {
808 if (LOG_INVALID(net, IPPROTO_TCP)) 808 if (LOG_INVALID(net, IPPROTO_TCP))
809 nf_log_packet(pf, 0, skb, NULL, NULL, NULL, 809 nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index fc9a211e629e..6a1572b0ab41 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -66,9 +66,10 @@ struct nfulnl_instance {
66 u_int16_t group_num; /* number of this queue */ 66 u_int16_t group_num; /* number of this queue */
67 u_int16_t flags; 67 u_int16_t flags;
68 u_int8_t copy_mode; 68 u_int8_t copy_mode;
69 struct rcu_head rcu;
69}; 70};
70 71
71static DEFINE_RWLOCK(instances_lock); 72static DEFINE_SPINLOCK(instances_lock);
72static atomic_t global_seq; 73static atomic_t global_seq;
73 74
74#define INSTANCE_BUCKETS 16 75#define INSTANCE_BUCKETS 16
@@ -88,7 +89,7 @@ __instance_lookup(u_int16_t group_num)
88 struct nfulnl_instance *inst; 89 struct nfulnl_instance *inst;
89 90
90 head = &instance_table[instance_hashfn(group_num)]; 91 head = &instance_table[instance_hashfn(group_num)];
91 hlist_for_each_entry(inst, pos, head, hlist) { 92 hlist_for_each_entry_rcu(inst, pos, head, hlist) {
92 if (inst->group_num == group_num) 93 if (inst->group_num == group_num)
93 return inst; 94 return inst;
94 } 95 }
@@ -106,22 +107,26 @@ instance_lookup_get(u_int16_t group_num)
106{ 107{
107 struct nfulnl_instance *inst; 108 struct nfulnl_instance *inst;
108 109
109 read_lock_bh(&instances_lock); 110 rcu_read_lock_bh();
110 inst = __instance_lookup(group_num); 111 inst = __instance_lookup(group_num);
111 if (inst) 112 if (inst && !atomic_inc_not_zero(&inst->use))
112 instance_get(inst); 113 inst = NULL;
113 read_unlock_bh(&instances_lock); 114 rcu_read_unlock_bh();
114 115
115 return inst; 116 return inst;
116} 117}
117 118
119static void nfulnl_instance_free_rcu(struct rcu_head *head)
120{
121 kfree(container_of(head, struct nfulnl_instance, rcu));
122 module_put(THIS_MODULE);
123}
124
118static void 125static void
119instance_put(struct nfulnl_instance *inst) 126instance_put(struct nfulnl_instance *inst)
120{ 127{
121 if (inst && atomic_dec_and_test(&inst->use)) { 128 if (inst && atomic_dec_and_test(&inst->use))
122 kfree(inst); 129 call_rcu_bh(&inst->rcu, nfulnl_instance_free_rcu);
123 module_put(THIS_MODULE);
124 }
125} 130}
126 131
127static void nfulnl_timer(unsigned long data); 132static void nfulnl_timer(unsigned long data);
@@ -132,7 +137,7 @@ instance_create(u_int16_t group_num, int pid)
132 struct nfulnl_instance *inst; 137 struct nfulnl_instance *inst;
133 int err; 138 int err;
134 139
135 write_lock_bh(&instances_lock); 140 spin_lock_bh(&instances_lock);
136 if (__instance_lookup(group_num)) { 141 if (__instance_lookup(group_num)) {
137 err = -EEXIST; 142 err = -EEXIST;
138 goto out_unlock; 143 goto out_unlock;
@@ -166,32 +171,37 @@ instance_create(u_int16_t group_num, int pid)
166 inst->copy_mode = NFULNL_COPY_PACKET; 171 inst->copy_mode = NFULNL_COPY_PACKET;
167 inst->copy_range = NFULNL_COPY_RANGE_MAX; 172 inst->copy_range = NFULNL_COPY_RANGE_MAX;
168 173
169 hlist_add_head(&inst->hlist, 174 hlist_add_head_rcu(&inst->hlist,
170 &instance_table[instance_hashfn(group_num)]); 175 &instance_table[instance_hashfn(group_num)]);
171 176
172 write_unlock_bh(&instances_lock); 177 spin_unlock_bh(&instances_lock);
173 178
174 return inst; 179 return inst;
175 180
176out_unlock: 181out_unlock:
177 write_unlock_bh(&instances_lock); 182 spin_unlock_bh(&instances_lock);
178 return ERR_PTR(err); 183 return ERR_PTR(err);
179} 184}
180 185
181static void __nfulnl_flush(struct nfulnl_instance *inst); 186static void __nfulnl_flush(struct nfulnl_instance *inst);
182 187
188/* called with BH disabled */
183static void 189static void
184__instance_destroy(struct nfulnl_instance *inst) 190__instance_destroy(struct nfulnl_instance *inst)
185{ 191{
186 /* first pull it out of the global list */ 192 /* first pull it out of the global list */
187 hlist_del(&inst->hlist); 193 hlist_del_rcu(&inst->hlist);
188 194
189 /* then flush all pending packets from skb */ 195 /* then flush all pending packets from skb */
190 196
191 spin_lock_bh(&inst->lock); 197 spin_lock(&inst->lock);
198
199 /* lockless readers wont be able to use us */
200 inst->copy_mode = NFULNL_COPY_DISABLED;
201
192 if (inst->skb) 202 if (inst->skb)
193 __nfulnl_flush(inst); 203 __nfulnl_flush(inst);
194 spin_unlock_bh(&inst->lock); 204 spin_unlock(&inst->lock);
195 205
196 /* and finally put the refcount */ 206 /* and finally put the refcount */
197 instance_put(inst); 207 instance_put(inst);
@@ -200,9 +210,9 @@ __instance_destroy(struct nfulnl_instance *inst)
200static inline void 210static inline void
201instance_destroy(struct nfulnl_instance *inst) 211instance_destroy(struct nfulnl_instance *inst)
202{ 212{
203 write_lock_bh(&instances_lock); 213 spin_lock_bh(&instances_lock);
204 __instance_destroy(inst); 214 __instance_destroy(inst);
205 write_unlock_bh(&instances_lock); 215 spin_unlock_bh(&instances_lock);
206} 216}
207 217
208static int 218static int
@@ -403,8 +413,9 @@ __build_packet_message(struct nfulnl_instance *inst,
403 NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_PHYSINDEV, 413 NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_PHYSINDEV,
404 htonl(indev->ifindex)); 414 htonl(indev->ifindex));
405 /* this is the bridge group "brX" */ 415 /* this is the bridge group "brX" */
416 /* rcu_read_lock()ed by nf_hook_slow or nf_log_packet */
406 NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_INDEV, 417 NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_INDEV,
407 htonl(indev->br_port->br->dev->ifindex)); 418 htonl(br_port_get_rcu(indev)->br->dev->ifindex));
408 } else { 419 } else {
409 /* Case 2: indev is bridge group, we need to look for 420 /* Case 2: indev is bridge group, we need to look for
410 * physical device (when called from ipv4) */ 421 * physical device (when called from ipv4) */
@@ -430,8 +441,9 @@ __build_packet_message(struct nfulnl_instance *inst,
430 NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV, 441 NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV,
431 htonl(outdev->ifindex)); 442 htonl(outdev->ifindex));
432 /* this is the bridge group "brX" */ 443 /* this is the bridge group "brX" */
444 /* rcu_read_lock()ed by nf_hook_slow or nf_log_packet */
433 NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_OUTDEV, 445 NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_OUTDEV,
434 htonl(outdev->br_port->br->dev->ifindex)); 446 htonl(br_port_get_rcu(outdev)->br->dev->ifindex));
435 } else { 447 } else {
436 /* Case 2: indev is a bridge group, we need to look 448 /* Case 2: indev is a bridge group, we need to look
437 * for physical device (when called from ipv4) */ 449 * for physical device (when called from ipv4) */
@@ -619,6 +631,7 @@ nfulnl_log_packet(u_int8_t pf,
619 size += nla_total_size(data_len); 631 size += nla_total_size(data_len);
620 break; 632 break;
621 633
634 case NFULNL_COPY_DISABLED:
622 default: 635 default:
623 goto unlock_and_release; 636 goto unlock_and_release;
624 } 637 }
@@ -672,7 +685,7 @@ nfulnl_rcv_nl_event(struct notifier_block *this,
672 int i; 685 int i;
673 686
674 /* destroy all instances for this pid */ 687 /* destroy all instances for this pid */
675 write_lock_bh(&instances_lock); 688 spin_lock_bh(&instances_lock);
676 for (i = 0; i < INSTANCE_BUCKETS; i++) { 689 for (i = 0; i < INSTANCE_BUCKETS; i++) {
677 struct hlist_node *tmp, *t2; 690 struct hlist_node *tmp, *t2;
678 struct nfulnl_instance *inst; 691 struct nfulnl_instance *inst;
@@ -684,7 +697,7 @@ nfulnl_rcv_nl_event(struct notifier_block *this,
684 __instance_destroy(inst); 697 __instance_destroy(inst);
685 } 698 }
686 } 699 }
687 write_unlock_bh(&instances_lock); 700 spin_unlock_bh(&instances_lock);
688 } 701 }
689 return NOTIFY_DONE; 702 return NOTIFY_DONE;
690} 703}
@@ -861,19 +874,19 @@ static struct hlist_node *get_first(struct iter_state *st)
861 874
862 for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) { 875 for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) {
863 if (!hlist_empty(&instance_table[st->bucket])) 876 if (!hlist_empty(&instance_table[st->bucket]))
864 return instance_table[st->bucket].first; 877 return rcu_dereference_bh(instance_table[st->bucket].first);
865 } 878 }
866 return NULL; 879 return NULL;
867} 880}
868 881
869static struct hlist_node *get_next(struct iter_state *st, struct hlist_node *h) 882static struct hlist_node *get_next(struct iter_state *st, struct hlist_node *h)
870{ 883{
871 h = h->next; 884 h = rcu_dereference_bh(h->next);
872 while (!h) { 885 while (!h) {
873 if (++st->bucket >= INSTANCE_BUCKETS) 886 if (++st->bucket >= INSTANCE_BUCKETS)
874 return NULL; 887 return NULL;
875 888
876 h = instance_table[st->bucket].first; 889 h = rcu_dereference_bh(instance_table[st->bucket].first);
877 } 890 }
878 return h; 891 return h;
879} 892}
@@ -890,9 +903,9 @@ static struct hlist_node *get_idx(struct iter_state *st, loff_t pos)
890} 903}
891 904
892static void *seq_start(struct seq_file *seq, loff_t *pos) 905static void *seq_start(struct seq_file *seq, loff_t *pos)
893 __acquires(instances_lock) 906 __acquires(rcu_bh)
894{ 907{
895 read_lock_bh(&instances_lock); 908 rcu_read_lock_bh();
896 return get_idx(seq->private, *pos); 909 return get_idx(seq->private, *pos);
897} 910}
898 911
@@ -903,9 +916,9 @@ static void *seq_next(struct seq_file *s, void *v, loff_t *pos)
903} 916}
904 917
905static void seq_stop(struct seq_file *s, void *v) 918static void seq_stop(struct seq_file *s, void *v)
906 __releases(instances_lock) 919 __releases(rcu_bh)
907{ 920{
908 read_unlock_bh(&instances_lock); 921 rcu_read_unlock_bh();
909} 922}
910 923
911static int seq_show(struct seq_file *s, void *v) 924static int seq_show(struct seq_file *s, void *v)
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index 12e1ab37fcd8..68e67d19724d 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -46,17 +46,19 @@ struct nfqnl_instance {
46 int peer_pid; 46 int peer_pid;
47 unsigned int queue_maxlen; 47 unsigned int queue_maxlen;
48 unsigned int copy_range; 48 unsigned int copy_range;
49 unsigned int queue_total;
50 unsigned int queue_dropped; 49 unsigned int queue_dropped;
51 unsigned int queue_user_dropped; 50 unsigned int queue_user_dropped;
52 51
53 unsigned int id_sequence; /* 'sequence' of pkt ids */
54 52
55 u_int16_t queue_num; /* number of this queue */ 53 u_int16_t queue_num; /* number of this queue */
56 u_int8_t copy_mode; 54 u_int8_t copy_mode;
57 55/*
58 spinlock_t lock; 56 * Following fields are dirtied for each queued packet,
59 57 * keep them in same cache line if possible.
58 */
59 spinlock_t lock;
60 unsigned int queue_total;
61 atomic_t id_sequence; /* 'sequence' of pkt ids */
60 struct list_head queue_list; /* packets in queue */ 62 struct list_head queue_list; /* packets in queue */
61}; 63};
62 64
@@ -238,32 +240,24 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
238 240
239 outdev = entry->outdev; 241 outdev = entry->outdev;
240 242
241 spin_lock_bh(&queue->lock); 243 switch ((enum nfqnl_config_mode)ACCESS_ONCE(queue->copy_mode)) {
242
243 switch ((enum nfqnl_config_mode)queue->copy_mode) {
244 case NFQNL_COPY_META: 244 case NFQNL_COPY_META:
245 case NFQNL_COPY_NONE: 245 case NFQNL_COPY_NONE:
246 break; 246 break;
247 247
248 case NFQNL_COPY_PACKET: 248 case NFQNL_COPY_PACKET:
249 if (entskb->ip_summed == CHECKSUM_PARTIAL && 249 if (entskb->ip_summed == CHECKSUM_PARTIAL &&
250 skb_checksum_help(entskb)) { 250 skb_checksum_help(entskb))
251 spin_unlock_bh(&queue->lock);
252 return NULL; 251 return NULL;
253 } 252
254 if (queue->copy_range == 0 253 data_len = ACCESS_ONCE(queue->copy_range);
255 || queue->copy_range > entskb->len) 254 if (data_len == 0 || data_len > entskb->len)
256 data_len = entskb->len; 255 data_len = entskb->len;
257 else
258 data_len = queue->copy_range;
259 256
260 size += nla_total_size(data_len); 257 size += nla_total_size(data_len);
261 break; 258 break;
262 } 259 }
263 260
264 entry->id = queue->id_sequence++;
265
266 spin_unlock_bh(&queue->lock);
267 261
268 skb = alloc_skb(size, GFP_ATOMIC); 262 skb = alloc_skb(size, GFP_ATOMIC);
269 if (!skb) 263 if (!skb)
@@ -278,6 +272,7 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
278 nfmsg->version = NFNETLINK_V0; 272 nfmsg->version = NFNETLINK_V0;
279 nfmsg->res_id = htons(queue->queue_num); 273 nfmsg->res_id = htons(queue->queue_num);
280 274
275 entry->id = atomic_inc_return(&queue->id_sequence);
281 pmsg.packet_id = htonl(entry->id); 276 pmsg.packet_id = htonl(entry->id);
282 pmsg.hw_protocol = entskb->protocol; 277 pmsg.hw_protocol = entskb->protocol;
283 pmsg.hook = entry->hook; 278 pmsg.hook = entry->hook;
@@ -296,8 +291,9 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
296 NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSINDEV, 291 NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSINDEV,
297 htonl(indev->ifindex)); 292 htonl(indev->ifindex));
298 /* this is the bridge group "brX" */ 293 /* this is the bridge group "brX" */
294 /* rcu_read_lock()ed by __nf_queue */
299 NLA_PUT_BE32(skb, NFQA_IFINDEX_INDEV, 295 NLA_PUT_BE32(skb, NFQA_IFINDEX_INDEV,
300 htonl(indev->br_port->br->dev->ifindex)); 296 htonl(br_port_get_rcu(indev)->br->dev->ifindex));
301 } else { 297 } else {
302 /* Case 2: indev is bridge group, we need to look for 298 /* Case 2: indev is bridge group, we need to look for
303 * physical device (when called from ipv4) */ 299 * physical device (when called from ipv4) */
@@ -321,8 +317,9 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
321 NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSOUTDEV, 317 NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSOUTDEV,
322 htonl(outdev->ifindex)); 318 htonl(outdev->ifindex));
323 /* this is the bridge group "brX" */ 319 /* this is the bridge group "brX" */
320 /* rcu_read_lock()ed by __nf_queue */
324 NLA_PUT_BE32(skb, NFQA_IFINDEX_OUTDEV, 321 NLA_PUT_BE32(skb, NFQA_IFINDEX_OUTDEV,
325 htonl(outdev->br_port->br->dev->ifindex)); 322 htonl(br_port_get_rcu(outdev)->br->dev->ifindex));
326 } else { 323 } else {
327 /* Case 2: outdev is bridge group, we need to look for 324 /* Case 2: outdev is bridge group, we need to look for
328 * physical output device (when called from ipv4) */ 325 * physical output device (when called from ipv4) */
@@ -866,7 +863,7 @@ static int seq_show(struct seq_file *s, void *v)
866 inst->peer_pid, inst->queue_total, 863 inst->peer_pid, inst->queue_total,
867 inst->copy_mode, inst->copy_range, 864 inst->copy_mode, inst->copy_range,
868 inst->queue_dropped, inst->queue_user_dropped, 865 inst->queue_dropped, inst->queue_user_dropped,
869 inst->id_sequence, 1); 866 atomic_read(&inst->id_sequence), 1);
870} 867}
871 868
872static const struct seq_operations nfqnl_seq_ops = { 869static const struct seq_operations nfqnl_seq_ops = {
diff --git a/net/netfilter/xt_CHECKSUM.c b/net/netfilter/xt_CHECKSUM.c
new file mode 100644
index 000000000000..0f642ef8cd26
--- /dev/null
+++ b/net/netfilter/xt_CHECKSUM.c
@@ -0,0 +1,70 @@
1/* iptables module for the packet checksum mangling
2 *
3 * (C) 2002 by Harald Welte <laforge@netfilter.org>
4 * (C) 2010 Red Hat, Inc.
5 *
6 * Author: Michael S. Tsirkin <mst@redhat.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11*/
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13#include <linux/module.h>
14#include <linux/skbuff.h>
15
16#include <linux/netfilter/x_tables.h>
17#include <linux/netfilter/xt_CHECKSUM.h>
18
19MODULE_LICENSE("GPL");
20MODULE_AUTHOR("Michael S. Tsirkin <mst@redhat.com>");
21MODULE_DESCRIPTION("Xtables: checksum modification");
22MODULE_ALIAS("ipt_CHECKSUM");
23MODULE_ALIAS("ip6t_CHECKSUM");
24
25static unsigned int
26checksum_tg(struct sk_buff *skb, const struct xt_action_param *par)
27{
28 if (skb->ip_summed == CHECKSUM_PARTIAL)
29 skb_checksum_help(skb);
30
31 return XT_CONTINUE;
32}
33
34static int checksum_tg_check(const struct xt_tgchk_param *par)
35{
36 const struct xt_CHECKSUM_info *einfo = par->targinfo;
37
38 if (einfo->operation & ~XT_CHECKSUM_OP_FILL) {
39 pr_info("unsupported CHECKSUM operation %x\n", einfo->operation);
40 return -EINVAL;
41 }
42 if (!einfo->operation) {
43 pr_info("no CHECKSUM operation enabled\n");
44 return -EINVAL;
45 }
46 return 0;
47}
48
49static struct xt_target checksum_tg_reg __read_mostly = {
50 .name = "CHECKSUM",
51 .family = NFPROTO_UNSPEC,
52 .target = checksum_tg,
53 .targetsize = sizeof(struct xt_CHECKSUM_info),
54 .table = "mangle",
55 .checkentry = checksum_tg_check,
56 .me = THIS_MODULE,
57};
58
59static int __init checksum_tg_init(void)
60{
61 return xt_register_target(&checksum_tg_reg);
62}
63
64static void __exit checksum_tg_exit(void)
65{
66 xt_unregister_target(&checksum_tg_reg);
67}
68
69module_init(checksum_tg_init);
70module_exit(checksum_tg_exit);
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
index 562bf3266e04..0cb6053f02fd 100644
--- a/net/netfilter/xt_CT.c
+++ b/net/netfilter/xt_CT.c
@@ -67,7 +67,7 @@ static int xt_ct_tg_check(const struct xt_tgchk_param *par)
67 return -EINVAL; 67 return -EINVAL;
68 68
69 if (info->flags & XT_CT_NOTRACK) { 69 if (info->flags & XT_CT_NOTRACK) {
70 ct = &nf_conntrack_untracked; 70 ct = nf_ct_untracked_get();
71 atomic_inc(&ct->ct_general.use); 71 atomic_inc(&ct->ct_general.use);
72 goto out; 72 goto out;
73 } 73 }
@@ -132,7 +132,7 @@ static void xt_ct_tg_destroy(const struct xt_tgdtor_param *par)
132 struct nf_conn *ct = info->ct; 132 struct nf_conn *ct = info->ct;
133 struct nf_conn_help *help; 133 struct nf_conn_help *help;
134 134
135 if (ct != &nf_conntrack_untracked) { 135 if (!nf_ct_is_untracked(ct)) {
136 help = nfct_help(ct); 136 help = nfct_help(ct);
137 if (help) 137 if (help)
138 module_put(help->helper->me); 138 module_put(help->helper->me);
diff --git a/net/netfilter/xt_IDLETIMER.c b/net/netfilter/xt_IDLETIMER.c
new file mode 100644
index 000000000000..be1f22e13545
--- /dev/null
+++ b/net/netfilter/xt_IDLETIMER.c
@@ -0,0 +1,315 @@
1/*
2 * linux/net/netfilter/xt_IDLETIMER.c
3 *
4 * Netfilter module to trigger a timer when packet matches.
5 * After timer expires a kevent will be sent.
6 *
7 * Copyright (C) 2004, 2010 Nokia Corporation
8 * Written by Timo Teras <ext-timo.teras@nokia.com>
9 *
10 * Converted to x_tables and reworked for upstream inclusion
11 * by Luciano Coelho <luciano.coelho@nokia.com>
12 *
13 * Contact: Luciano Coelho <luciano.coelho@nokia.com>
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * version 2 as published by the Free Software Foundation.
18 *
19 * This program is distributed in the hope that it will be useful, but
20 * WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 * General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
27 * 02110-1301 USA
28 */
29
30#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31
32#include <linux/module.h>
33#include <linux/timer.h>
34#include <linux/list.h>
35#include <linux/mutex.h>
36#include <linux/netfilter.h>
37#include <linux/netfilter/x_tables.h>
38#include <linux/netfilter/xt_IDLETIMER.h>
39#include <linux/kdev_t.h>
40#include <linux/kobject.h>
41#include <linux/workqueue.h>
42#include <linux/sysfs.h>
43
44struct idletimer_tg_attr {
45 struct attribute attr;
46 ssize_t (*show)(struct kobject *kobj,
47 struct attribute *attr, char *buf);
48};
49
50struct idletimer_tg {
51 struct list_head entry;
52 struct timer_list timer;
53 struct work_struct work;
54
55 struct kobject *kobj;
56 struct idletimer_tg_attr attr;
57
58 unsigned int refcnt;
59};
60
61static LIST_HEAD(idletimer_tg_list);
62static DEFINE_MUTEX(list_mutex);
63
64static struct kobject *idletimer_tg_kobj;
65
66static
67struct idletimer_tg *__idletimer_tg_find_by_label(const char *label)
68{
69 struct idletimer_tg *entry;
70
71 BUG_ON(!label);
72
73 list_for_each_entry(entry, &idletimer_tg_list, entry) {
74 if (!strcmp(label, entry->attr.attr.name))
75 return entry;
76 }
77
78 return NULL;
79}
80
81static ssize_t idletimer_tg_show(struct kobject *kobj, struct attribute *attr,
82 char *buf)
83{
84 struct idletimer_tg *timer;
85 unsigned long expires = 0;
86
87 mutex_lock(&list_mutex);
88
89 timer = __idletimer_tg_find_by_label(attr->name);
90 if (timer)
91 expires = timer->timer.expires;
92
93 mutex_unlock(&list_mutex);
94
95 if (time_after(expires, jiffies))
96 return sprintf(buf, "%u\n",
97 jiffies_to_msecs(expires - jiffies) / 1000);
98
99 return sprintf(buf, "0\n");
100}
101
102static void idletimer_tg_work(struct work_struct *work)
103{
104 struct idletimer_tg *timer = container_of(work, struct idletimer_tg,
105 work);
106
107 sysfs_notify(idletimer_tg_kobj, NULL, timer->attr.attr.name);
108}
109
110static void idletimer_tg_expired(unsigned long data)
111{
112 struct idletimer_tg *timer = (struct idletimer_tg *) data;
113
114 pr_debug("timer %s expired\n", timer->attr.attr.name);
115
116 schedule_work(&timer->work);
117}
118
119static int idletimer_tg_create(struct idletimer_tg_info *info)
120{
121 int ret;
122
123 info->timer = kmalloc(sizeof(*info->timer), GFP_KERNEL);
124 if (!info->timer) {
125 pr_debug("couldn't alloc timer\n");
126 ret = -ENOMEM;
127 goto out;
128 }
129
130 info->timer->attr.attr.name = kstrdup(info->label, GFP_KERNEL);
131 if (!info->timer->attr.attr.name) {
132 pr_debug("couldn't alloc attribute name\n");
133 ret = -ENOMEM;
134 goto out_free_timer;
135 }
136 info->timer->attr.attr.mode = S_IRUGO;
137 info->timer->attr.show = idletimer_tg_show;
138
139 ret = sysfs_create_file(idletimer_tg_kobj, &info->timer->attr.attr);
140 if (ret < 0) {
141 pr_debug("couldn't add file to sysfs");
142 goto out_free_attr;
143 }
144
145 list_add(&info->timer->entry, &idletimer_tg_list);
146
147 setup_timer(&info->timer->timer, idletimer_tg_expired,
148 (unsigned long) info->timer);
149 info->timer->refcnt = 1;
150
151 mod_timer(&info->timer->timer,
152 msecs_to_jiffies(info->timeout * 1000) + jiffies);
153
154 INIT_WORK(&info->timer->work, idletimer_tg_work);
155
156 return 0;
157
158out_free_attr:
159 kfree(info->timer->attr.attr.name);
160out_free_timer:
161 kfree(info->timer);
162out:
163 return ret;
164}
165
166/*
167 * The actual xt_tables plugin.
168 */
169static unsigned int idletimer_tg_target(struct sk_buff *skb,
170 const struct xt_action_param *par)
171{
172 const struct idletimer_tg_info *info = par->targinfo;
173
174 pr_debug("resetting timer %s, timeout period %u\n",
175 info->label, info->timeout);
176
177 BUG_ON(!info->timer);
178
179 mod_timer(&info->timer->timer,
180 msecs_to_jiffies(info->timeout * 1000) + jiffies);
181
182 return XT_CONTINUE;
183}
184
185static int idletimer_tg_checkentry(const struct xt_tgchk_param *par)
186{
187 struct idletimer_tg_info *info = par->targinfo;
188 int ret;
189
190 pr_debug("checkentry targinfo%s\n", info->label);
191
192 if (info->timeout == 0) {
193 pr_debug("timeout value is zero\n");
194 return -EINVAL;
195 }
196
197 if (info->label[0] == '\0' ||
198 strnlen(info->label,
199 MAX_IDLETIMER_LABEL_SIZE) == MAX_IDLETIMER_LABEL_SIZE) {
200 pr_debug("label is empty or not nul-terminated\n");
201 return -EINVAL;
202 }
203
204 mutex_lock(&list_mutex);
205
206 info->timer = __idletimer_tg_find_by_label(info->label);
207 if (info->timer) {
208 info->timer->refcnt++;
209 mod_timer(&info->timer->timer,
210 msecs_to_jiffies(info->timeout * 1000) + jiffies);
211
212 pr_debug("increased refcnt of timer %s to %u\n",
213 info->label, info->timer->refcnt);
214 } else {
215 ret = idletimer_tg_create(info);
216 if (ret < 0) {
217 pr_debug("failed to create timer\n");
218 mutex_unlock(&list_mutex);
219 return ret;
220 }
221 }
222
223 mutex_unlock(&list_mutex);
224 return 0;
225}
226
227static void idletimer_tg_destroy(const struct xt_tgdtor_param *par)
228{
229 const struct idletimer_tg_info *info = par->targinfo;
230
231 pr_debug("destroy targinfo %s\n", info->label);
232
233 mutex_lock(&list_mutex);
234
235 if (--info->timer->refcnt == 0) {
236 pr_debug("deleting timer %s\n", info->label);
237
238 list_del(&info->timer->entry);
239 del_timer_sync(&info->timer->timer);
240 sysfs_remove_file(idletimer_tg_kobj, &info->timer->attr.attr);
241 kfree(info->timer->attr.attr.name);
242 kfree(info->timer);
243 } else {
244 pr_debug("decreased refcnt of timer %s to %u\n",
245 info->label, info->timer->refcnt);
246 }
247
248 mutex_unlock(&list_mutex);
249}
250
251static struct xt_target idletimer_tg __read_mostly = {
252 .name = "IDLETIMER",
253 .family = NFPROTO_UNSPEC,
254 .target = idletimer_tg_target,
255 .targetsize = sizeof(struct idletimer_tg_info),
256 .checkentry = idletimer_tg_checkentry,
257 .destroy = idletimer_tg_destroy,
258 .me = THIS_MODULE,
259};
260
261static struct class *idletimer_tg_class;
262
263static struct device *idletimer_tg_device;
264
265static int __init idletimer_tg_init(void)
266{
267 int err;
268
269 idletimer_tg_class = class_create(THIS_MODULE, "xt_idletimer");
270 err = PTR_ERR(idletimer_tg_class);
271 if (IS_ERR(idletimer_tg_class)) {
272 pr_debug("couldn't register device class\n");
273 goto out;
274 }
275
276 idletimer_tg_device = device_create(idletimer_tg_class, NULL,
277 MKDEV(0, 0), NULL, "timers");
278 err = PTR_ERR(idletimer_tg_device);
279 if (IS_ERR(idletimer_tg_device)) {
280 pr_debug("couldn't register system device\n");
281 goto out_class;
282 }
283
284 idletimer_tg_kobj = &idletimer_tg_device->kobj;
285
286 err = xt_register_target(&idletimer_tg);
287 if (err < 0) {
288 pr_debug("couldn't register xt target\n");
289 goto out_dev;
290 }
291
292 return 0;
293out_dev:
294 device_destroy(idletimer_tg_class, MKDEV(0, 0));
295out_class:
296 class_destroy(idletimer_tg_class);
297out:
298 return err;
299}
300
301static void __exit idletimer_tg_exit(void)
302{
303 xt_unregister_target(&idletimer_tg);
304
305 device_destroy(idletimer_tg_class, MKDEV(0, 0));
306 class_destroy(idletimer_tg_class);
307}
308
309module_init(idletimer_tg_init);
310module_exit(idletimer_tg_exit);
311
312MODULE_AUTHOR("Timo Teras <ext-timo.teras@nokia.com>");
313MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>");
314MODULE_DESCRIPTION("Xtables: idle time monitor");
315MODULE_LICENSE("GPL v2");
diff --git a/net/netfilter/xt_NOTRACK.c b/net/netfilter/xt_NOTRACK.c
index 512b9123252f..9d782181b6c8 100644
--- a/net/netfilter/xt_NOTRACK.c
+++ b/net/netfilter/xt_NOTRACK.c
@@ -23,7 +23,7 @@ notrack_tg(struct sk_buff *skb, const struct xt_action_param *par)
23 If there is a real ct entry correspondig to this packet, 23 If there is a real ct entry correspondig to this packet,
24 it'll hang aroun till timing out. We don't deal with it 24 it'll hang aroun till timing out. We don't deal with it
25 for performance reasons. JK */ 25 for performance reasons. JK */
26 skb->nfct = &nf_conntrack_untracked.ct_general; 26 skb->nfct = &nf_ct_untracked_get()->ct_general;
27 skb->nfctinfo = IP_CT_NEW; 27 skb->nfctinfo = IP_CT_NEW;
28 nf_conntrack_get(skb->nfct); 28 nf_conntrack_get(skb->nfct);
29 29
diff --git a/net/netfilter/xt_RATEEST.c b/net/netfilter/xt_RATEEST.c
index 69c01e10f8af..de079abd5bc8 100644
--- a/net/netfilter/xt_RATEEST.c
+++ b/net/netfilter/xt_RATEEST.c
@@ -60,13 +60,22 @@ struct xt_rateest *xt_rateest_lookup(const char *name)
60} 60}
61EXPORT_SYMBOL_GPL(xt_rateest_lookup); 61EXPORT_SYMBOL_GPL(xt_rateest_lookup);
62 62
63static void xt_rateest_free_rcu(struct rcu_head *head)
64{
65 kfree(container_of(head, struct xt_rateest, rcu));
66}
67
63void xt_rateest_put(struct xt_rateest *est) 68void xt_rateest_put(struct xt_rateest *est)
64{ 69{
65 mutex_lock(&xt_rateest_mutex); 70 mutex_lock(&xt_rateest_mutex);
66 if (--est->refcnt == 0) { 71 if (--est->refcnt == 0) {
67 hlist_del(&est->list); 72 hlist_del(&est->list);
68 gen_kill_estimator(&est->bstats, &est->rstats); 73 gen_kill_estimator(&est->bstats, &est->rstats);
69 kfree(est); 74 /*
75 * gen_estimator est_timer() might access est->lock or bstats,
76 * wait a RCU grace period before freeing 'est'
77 */
78 call_rcu(&est->rcu, xt_rateest_free_rcu);
70 } 79 }
71 mutex_unlock(&xt_rateest_mutex); 80 mutex_unlock(&xt_rateest_mutex);
72} 81}
@@ -179,6 +188,7 @@ static int __init xt_rateest_tg_init(void)
179static void __exit xt_rateest_tg_fini(void) 188static void __exit xt_rateest_tg_fini(void)
180{ 189{
181 xt_unregister_target(&xt_rateest_tg_reg); 190 xt_unregister_target(&xt_rateest_tg_reg);
191 rcu_barrier(); /* Wait for completion of call_rcu()'s (xt_rateest_free_rcu) */
182} 192}
183 193
184 194
diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
index 62ec021fbd50..eb81c380da1b 100644
--- a/net/netfilter/xt_TCPMSS.c
+++ b/net/netfilter/xt_TCPMSS.c
@@ -165,8 +165,8 @@ static u_int32_t tcpmss_reverse_mtu(const struct sk_buff *skb,
165 rcu_read_unlock(); 165 rcu_read_unlock();
166 166
167 if (rt != NULL) { 167 if (rt != NULL) {
168 mtu = dst_mtu(&rt->u.dst); 168 mtu = dst_mtu(&rt->dst);
169 dst_release(&rt->u.dst); 169 dst_release(&rt->dst);
170 } 170 }
171 return mtu; 171 return mtu;
172} 172}
@@ -220,15 +220,13 @@ tcpmss_tg6(struct sk_buff *skb, const struct xt_action_param *par)
220} 220}
221#endif 221#endif
222 222
223#define TH_SYN 0x02
224
225/* Must specify -p tcp --syn */ 223/* Must specify -p tcp --syn */
226static inline bool find_syn_match(const struct xt_entry_match *m) 224static inline bool find_syn_match(const struct xt_entry_match *m)
227{ 225{
228 const struct xt_tcp *tcpinfo = (const struct xt_tcp *)m->data; 226 const struct xt_tcp *tcpinfo = (const struct xt_tcp *)m->data;
229 227
230 if (strcmp(m->u.kernel.match->name, "tcp") == 0 && 228 if (strcmp(m->u.kernel.match->name, "tcp") == 0 &&
231 tcpinfo->flg_cmp & TH_SYN && 229 tcpinfo->flg_cmp & TCPHDR_SYN &&
232 !(tcpinfo->invflags & XT_TCP_INV_FLAGS)) 230 !(tcpinfo->invflags & XT_TCP_INV_FLAGS))
233 return true; 231 return true;
234 232
diff --git a/net/netfilter/xt_TEE.c b/net/netfilter/xt_TEE.c
index 859d9fd429c8..22a2d421e7eb 100644
--- a/net/netfilter/xt_TEE.c
+++ b/net/netfilter/xt_TEE.c
@@ -77,8 +77,8 @@ tee_tg_route4(struct sk_buff *skb, const struct xt_tee_tginfo *info)
77 return false; 77 return false;
78 78
79 skb_dst_drop(skb); 79 skb_dst_drop(skb);
80 skb_dst_set(skb, &rt->u.dst); 80 skb_dst_set(skb, &rt->dst);
81 skb->dev = rt->u.dst.dev; 81 skb->dev = rt->dst.dev;
82 skb->protocol = htons(ETH_P_IP); 82 skb->protocol = htons(ETH_P_IP);
83 return true; 83 return true;
84} 84}
@@ -104,7 +104,7 @@ tee_tg4(struct sk_buff *skb, const struct xt_action_param *par)
104#ifdef WITH_CONNTRACK 104#ifdef WITH_CONNTRACK
105 /* Avoid counting cloned packets towards the original connection. */ 105 /* Avoid counting cloned packets towards the original connection. */
106 nf_conntrack_put(skb->nfct); 106 nf_conntrack_put(skb->nfct);
107 skb->nfct = &nf_conntrack_untracked.ct_general; 107 skb->nfct = &nf_ct_untracked_get()->ct_general;
108 skb->nfctinfo = IP_CT_NEW; 108 skb->nfctinfo = IP_CT_NEW;
109 nf_conntrack_get(skb->nfct); 109 nf_conntrack_get(skb->nfct);
110#endif 110#endif
@@ -177,7 +177,7 @@ tee_tg6(struct sk_buff *skb, const struct xt_action_param *par)
177 177
178#ifdef WITH_CONNTRACK 178#ifdef WITH_CONNTRACK
179 nf_conntrack_put(skb->nfct); 179 nf_conntrack_put(skb->nfct);
180 skb->nfct = &nf_conntrack_untracked.ct_general; 180 skb->nfct = &nf_ct_untracked_get()->ct_general;
181 skb->nfctinfo = IP_CT_NEW; 181 skb->nfctinfo = IP_CT_NEW;
182 nf_conntrack_get(skb->nfct); 182 nf_conntrack_get(skb->nfct);
183#endif 183#endif
diff --git a/net/netfilter/xt_TPROXY.c b/net/netfilter/xt_TPROXY.c
index e1a0dedac258..c61294d85fda 100644
--- a/net/netfilter/xt_TPROXY.c
+++ b/net/netfilter/xt_TPROXY.c
@@ -37,8 +37,10 @@ tproxy_tg(struct sk_buff *skb, const struct xt_action_param *par)
37 return NF_DROP; 37 return NF_DROP;
38 38
39 sk = nf_tproxy_get_sock_v4(dev_net(skb->dev), iph->protocol, 39 sk = nf_tproxy_get_sock_v4(dev_net(skb->dev), iph->protocol,
40 iph->saddr, tgi->laddr ? tgi->laddr : iph->daddr, 40 iph->saddr,
41 hp->source, tgi->lport ? tgi->lport : hp->dest, 41 tgi->laddr ? tgi->laddr : iph->daddr,
42 hp->source,
43 tgi->lport ? tgi->lport : hp->dest,
42 par->in, true); 44 par->in, true);
43 45
44 /* NOTE: assign_sock consumes our sk reference */ 46 /* NOTE: assign_sock consumes our sk reference */
diff --git a/net/netfilter/xt_cluster.c b/net/netfilter/xt_cluster.c
index 30b95a1c1c89..f4af1bfafb1c 100644
--- a/net/netfilter/xt_cluster.c
+++ b/net/netfilter/xt_cluster.c
@@ -120,7 +120,7 @@ xt_cluster_mt(const struct sk_buff *skb, struct xt_action_param *par)
120 if (ct == NULL) 120 if (ct == NULL)
121 return false; 121 return false;
122 122
123 if (ct == &nf_conntrack_untracked) 123 if (nf_ct_is_untracked(ct))
124 return false; 124 return false;
125 125
126 if (ct->master) 126 if (ct->master)
diff --git a/net/netfilter/xt_connbytes.c b/net/netfilter/xt_connbytes.c
index 73517835303d..5b138506690e 100644
--- a/net/netfilter/xt_connbytes.c
+++ b/net/netfilter/xt_connbytes.c
@@ -112,6 +112,16 @@ static int connbytes_mt_check(const struct xt_mtchk_param *par)
112 if (ret < 0) 112 if (ret < 0)
113 pr_info("cannot load conntrack support for proto=%u\n", 113 pr_info("cannot load conntrack support for proto=%u\n",
114 par->family); 114 par->family);
115
116 /*
117 * This filter cannot function correctly unless connection tracking
118 * accounting is enabled, so complain in the hope that someone notices.
119 */
120 if (!nf_ct_acct_enabled(par->net)) {
121 pr_warning("Forcing CT accounting to be enabled\n");
122 nf_ct_set_acct(par->net, true);
123 }
124
115 return ret; 125 return ret;
116} 126}
117 127
diff --git a/net/netfilter/xt_conntrack.c b/net/netfilter/xt_conntrack.c
index 39681f10291c..e536710ad916 100644
--- a/net/netfilter/xt_conntrack.c
+++ b/net/netfilter/xt_conntrack.c
@@ -123,11 +123,12 @@ conntrack_mt(const struct sk_buff *skb, struct xt_action_param *par,
123 123
124 ct = nf_ct_get(skb, &ctinfo); 124 ct = nf_ct_get(skb, &ctinfo);
125 125
126 if (ct == &nf_conntrack_untracked) 126 if (ct) {
127 statebit = XT_CONNTRACK_STATE_UNTRACKED; 127 if (nf_ct_is_untracked(ct))
128 else if (ct != NULL) 128 statebit = XT_CONNTRACK_STATE_UNTRACKED;
129 statebit = XT_CONNTRACK_STATE_BIT(ctinfo); 129 else
130 else 130 statebit = XT_CONNTRACK_STATE_BIT(ctinfo);
131 } else
131 statebit = XT_CONNTRACK_STATE_INVALID; 132 statebit = XT_CONNTRACK_STATE_INVALID;
132 133
133 if (info->match_flags & XT_CONNTRACK_STATE) { 134 if (info->match_flags & XT_CONNTRACK_STATE) {
diff --git a/net/netfilter/xt_cpu.c b/net/netfilter/xt_cpu.c
new file mode 100644
index 000000000000..b39db8a5cbae
--- /dev/null
+++ b/net/netfilter/xt_cpu.c
@@ -0,0 +1,63 @@
1/* Kernel module to match running CPU */
2
3/*
4 * Might be used to distribute connections on several daemons, if
5 * RPS (Remote Packet Steering) is enabled or NIC is multiqueue capable,
6 * each RX queue IRQ affined to one CPU (1:1 mapping)
7 *
8 */
9
10/* (C) 2010 Eric Dumazet
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 */
16
17#include <linux/module.h>
18#include <linux/skbuff.h>
19#include <linux/netfilter/xt_cpu.h>
20#include <linux/netfilter/x_tables.h>
21
22MODULE_LICENSE("GPL");
23MODULE_AUTHOR("Eric Dumazet <eric.dumazet@gmail.com>");
24MODULE_DESCRIPTION("Xtables: CPU match");
25
26static int cpu_mt_check(const struct xt_mtchk_param *par)
27{
28 const struct xt_cpu_info *info = par->matchinfo;
29
30 if (info->invert & ~1)
31 return -EINVAL;
32 return 0;
33}
34
35static bool cpu_mt(const struct sk_buff *skb, struct xt_action_param *par)
36{
37 const struct xt_cpu_info *info = par->matchinfo;
38
39 return (info->cpu == smp_processor_id()) ^ info->invert;
40}
41
42static struct xt_match cpu_mt_reg __read_mostly = {
43 .name = "cpu",
44 .revision = 0,
45 .family = NFPROTO_UNSPEC,
46 .checkentry = cpu_mt_check,
47 .match = cpu_mt,
48 .matchsize = sizeof(struct xt_cpu_info),
49 .me = THIS_MODULE,
50};
51
52static int __init cpu_mt_init(void)
53{
54 return xt_register_match(&cpu_mt_reg);
55}
56
57static void __exit cpu_mt_exit(void)
58{
59 xt_unregister_match(&cpu_mt_reg);
60}
61
62module_init(cpu_mt_init);
63module_exit(cpu_mt_exit);
diff --git a/net/netfilter/xt_ipvs.c b/net/netfilter/xt_ipvs.c
new file mode 100644
index 000000000000..7a4d66db95ae
--- /dev/null
+++ b/net/netfilter/xt_ipvs.c
@@ -0,0 +1,189 @@
1/*
2 * xt_ipvs - kernel module to match IPVS connection properties
3 *
4 * Author: Hannes Eder <heder@google.com>
5 */
6
7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8
9#include <linux/module.h>
10#include <linux/moduleparam.h>
11#include <linux/spinlock.h>
12#include <linux/skbuff.h>
13#ifdef CONFIG_IP_VS_IPV6
14#include <net/ipv6.h>
15#endif
16#include <linux/ip_vs.h>
17#include <linux/types.h>
18#include <linux/netfilter/x_tables.h>
19#include <linux/netfilter/x_tables.h>
20#include <linux/netfilter/xt_ipvs.h>
21#include <net/netfilter/nf_conntrack.h>
22
23#include <net/ip_vs.h>
24
25MODULE_AUTHOR("Hannes Eder <heder@google.com>");
26MODULE_DESCRIPTION("Xtables: match IPVS connection properties");
27MODULE_LICENSE("GPL");
28MODULE_ALIAS("ipt_ipvs");
29MODULE_ALIAS("ip6t_ipvs");
30
31/* borrowed from xt_conntrack */
32static bool ipvs_mt_addrcmp(const union nf_inet_addr *kaddr,
33 const union nf_inet_addr *uaddr,
34 const union nf_inet_addr *umask,
35 unsigned int l3proto)
36{
37 if (l3proto == NFPROTO_IPV4)
38 return ((kaddr->ip ^ uaddr->ip) & umask->ip) == 0;
39#ifdef CONFIG_IP_VS_IPV6
40 else if (l3proto == NFPROTO_IPV6)
41 return ipv6_masked_addr_cmp(&kaddr->in6, &umask->in6,
42 &uaddr->in6) == 0;
43#endif
44 else
45 return false;
46}
47
48static bool
49ipvs_mt(const struct sk_buff *skb, struct xt_action_param *par)
50{
51 const struct xt_ipvs_mtinfo *data = par->matchinfo;
52 /* ipvs_mt_check ensures that family is only NFPROTO_IPV[46]. */
53 const u_int8_t family = par->family;
54 struct ip_vs_iphdr iph;
55 struct ip_vs_protocol *pp;
56 struct ip_vs_conn *cp;
57 bool match = true;
58
59 if (data->bitmask == XT_IPVS_IPVS_PROPERTY) {
60 match = skb->ipvs_property ^
61 !!(data->invert & XT_IPVS_IPVS_PROPERTY);
62 goto out;
63 }
64
65 /* other flags than XT_IPVS_IPVS_PROPERTY are set */
66 if (!skb->ipvs_property) {
67 match = false;
68 goto out;
69 }
70
71 ip_vs_fill_iphdr(family, skb_network_header(skb), &iph);
72
73 if (data->bitmask & XT_IPVS_PROTO)
74 if ((iph.protocol == data->l4proto) ^
75 !(data->invert & XT_IPVS_PROTO)) {
76 match = false;
77 goto out;
78 }
79
80 pp = ip_vs_proto_get(iph.protocol);
81 if (unlikely(!pp)) {
82 match = false;
83 goto out;
84 }
85
86 /*
87 * Check if the packet belongs to an existing entry
88 */
89 cp = pp->conn_out_get(family, skb, pp, &iph, iph.len, 1 /* inverse */);
90 if (unlikely(cp == NULL)) {
91 match = false;
92 goto out;
93 }
94
95 /*
96 * We found a connection, i.e. ct != 0, make sure to call
97 * __ip_vs_conn_put before returning. In our case jump to out_put_con.
98 */
99
100 if (data->bitmask & XT_IPVS_VPORT)
101 if ((cp->vport == data->vport) ^
102 !(data->invert & XT_IPVS_VPORT)) {
103 match = false;
104 goto out_put_cp;
105 }
106
107 if (data->bitmask & XT_IPVS_VPORTCTL)
108 if ((cp->control != NULL &&
109 cp->control->vport == data->vportctl) ^
110 !(data->invert & XT_IPVS_VPORTCTL)) {
111 match = false;
112 goto out_put_cp;
113 }
114
115 if (data->bitmask & XT_IPVS_DIR) {
116 enum ip_conntrack_info ctinfo;
117 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
118
119 if (ct == NULL || nf_ct_is_untracked(ct)) {
120 match = false;
121 goto out_put_cp;
122 }
123
124 if ((ctinfo >= IP_CT_IS_REPLY) ^
125 !!(data->invert & XT_IPVS_DIR)) {
126 match = false;
127 goto out_put_cp;
128 }
129 }
130
131 if (data->bitmask & XT_IPVS_METHOD)
132 if (((cp->flags & IP_VS_CONN_F_FWD_MASK) == data->fwd_method) ^
133 !(data->invert & XT_IPVS_METHOD)) {
134 match = false;
135 goto out_put_cp;
136 }
137
138 if (data->bitmask & XT_IPVS_VADDR) {
139 if (ipvs_mt_addrcmp(&cp->vaddr, &data->vaddr,
140 &data->vmask, family) ^
141 !(data->invert & XT_IPVS_VADDR)) {
142 match = false;
143 goto out_put_cp;
144 }
145 }
146
147out_put_cp:
148 __ip_vs_conn_put(cp);
149out:
150 pr_debug("match=%d\n", match);
151 return match;
152}
153
154static int ipvs_mt_check(const struct xt_mtchk_param *par)
155{
156 if (par->family != NFPROTO_IPV4
157#ifdef CONFIG_IP_VS_IPV6
158 && par->family != NFPROTO_IPV6
159#endif
160 ) {
161 pr_info("protocol family %u not supported\n", par->family);
162 return -EINVAL;
163 }
164
165 return 0;
166}
167
168static struct xt_match xt_ipvs_mt_reg __read_mostly = {
169 .name = "ipvs",
170 .revision = 0,
171 .family = NFPROTO_UNSPEC,
172 .match = ipvs_mt,
173 .checkentry = ipvs_mt_check,
174 .matchsize = XT_ALIGN(sizeof(struct xt_ipvs_mtinfo)),
175 .me = THIS_MODULE,
176};
177
178static int __init ipvs_mt_init(void)
179{
180 return xt_register_match(&xt_ipvs_mt_reg);
181}
182
183static void __exit ipvs_mt_exit(void)
184{
185 xt_unregister_match(&xt_ipvs_mt_reg);
186}
187
188module_init(ipvs_mt_init);
189module_exit(ipvs_mt_exit);
diff --git a/net/netfilter/xt_quota.c b/net/netfilter/xt_quota.c
index b4f7dfea5980..70eb2b4984dd 100644
--- a/net/netfilter/xt_quota.c
+++ b/net/netfilter/xt_quota.c
@@ -11,7 +11,8 @@
11#include <linux/netfilter/xt_quota.h> 11#include <linux/netfilter/xt_quota.h>
12 12
13struct xt_quota_priv { 13struct xt_quota_priv {
14 uint64_t quota; 14 spinlock_t lock;
15 uint64_t quota;
15}; 16};
16 17
17MODULE_LICENSE("GPL"); 18MODULE_LICENSE("GPL");
@@ -20,8 +21,6 @@ MODULE_DESCRIPTION("Xtables: countdown quota match");
20MODULE_ALIAS("ipt_quota"); 21MODULE_ALIAS("ipt_quota");
21MODULE_ALIAS("ip6t_quota"); 22MODULE_ALIAS("ip6t_quota");
22 23
23static DEFINE_SPINLOCK(quota_lock);
24
25static bool 24static bool
26quota_mt(const struct sk_buff *skb, struct xt_action_param *par) 25quota_mt(const struct sk_buff *skb, struct xt_action_param *par)
27{ 26{
@@ -29,7 +28,7 @@ quota_mt(const struct sk_buff *skb, struct xt_action_param *par)
29 struct xt_quota_priv *priv = q->master; 28 struct xt_quota_priv *priv = q->master;
30 bool ret = q->flags & XT_QUOTA_INVERT; 29 bool ret = q->flags & XT_QUOTA_INVERT;
31 30
32 spin_lock_bh(&quota_lock); 31 spin_lock_bh(&priv->lock);
33 if (priv->quota >= skb->len) { 32 if (priv->quota >= skb->len) {
34 priv->quota -= skb->len; 33 priv->quota -= skb->len;
35 ret = !ret; 34 ret = !ret;
@@ -37,9 +36,7 @@ quota_mt(const struct sk_buff *skb, struct xt_action_param *par)
37 /* we do not allow even small packets from now on */ 36 /* we do not allow even small packets from now on */
38 priv->quota = 0; 37 priv->quota = 0;
39 } 38 }
40 /* Copy quota back to matchinfo so that iptables can display it */ 39 spin_unlock_bh(&priv->lock);
41 q->quota = priv->quota;
42 spin_unlock_bh(&quota_lock);
43 40
44 return ret; 41 return ret;
45} 42}
@@ -55,6 +52,7 @@ static int quota_mt_check(const struct xt_mtchk_param *par)
55 if (q->master == NULL) 52 if (q->master == NULL)
56 return -ENOMEM; 53 return -ENOMEM;
57 54
55 spin_lock_init(&q->master->lock);
58 q->master->quota = q->quota; 56 q->master->quota = q->quota;
59 return 0; 57 return 0;
60} 58}
diff --git a/net/netfilter/xt_sctp.c b/net/netfilter/xt_sctp.c
index c04fcf385c59..ef36a56a02c6 100644
--- a/net/netfilter/xt_sctp.c
+++ b/net/netfilter/xt_sctp.c
@@ -3,6 +3,7 @@
3#include <linux/skbuff.h> 3#include <linux/skbuff.h>
4#include <net/ip.h> 4#include <net/ip.h>
5#include <net/ipv6.h> 5#include <net/ipv6.h>
6#include <net/sctp/sctp.h>
6#include <linux/sctp.h> 7#include <linux/sctp.h>
7 8
8#include <linux/netfilter/x_tables.h> 9#include <linux/netfilter/x_tables.h>
@@ -67,7 +68,7 @@ match_packet(const struct sk_buff *skb,
67 ++i, offset, sch->type, htons(sch->length), 68 ++i, offset, sch->type, htons(sch->length),
68 sch->flags); 69 sch->flags);
69#endif 70#endif
70 offset += (ntohs(sch->length) + 3) & ~3; 71 offset += WORD_ROUND(ntohs(sch->length));
71 72
72 pr_debug("skb->len: %d\toffset: %d\n", skb->len, offset); 73 pr_debug("skb->len: %d\toffset: %d\n", skb->len, offset);
73 74
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c
index 3d54c236a1ba..1ca89908cbad 100644
--- a/net/netfilter/xt_socket.c
+++ b/net/netfilter/xt_socket.c
@@ -127,7 +127,7 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
127 * reply packet of an established SNAT-ted connection. */ 127 * reply packet of an established SNAT-ted connection. */
128 128
129 ct = nf_ct_get(skb, &ctinfo); 129 ct = nf_ct_get(skb, &ctinfo);
130 if (ct && (ct != &nf_conntrack_untracked) && 130 if (ct && !nf_ct_is_untracked(ct) &&
131 ((iph->protocol != IPPROTO_ICMP && 131 ((iph->protocol != IPPROTO_ICMP &&
132 ctinfo == IP_CT_IS_REPLY + IP_CT_ESTABLISHED) || 132 ctinfo == IP_CT_IS_REPLY + IP_CT_ESTABLISHED) ||
133 (iph->protocol == IPPROTO_ICMP && 133 (iph->protocol == IPPROTO_ICMP &&
diff --git a/net/netfilter/xt_state.c b/net/netfilter/xt_state.c
index e12e053d3782..a507922d80cd 100644
--- a/net/netfilter/xt_state.c
+++ b/net/netfilter/xt_state.c
@@ -26,14 +26,16 @@ state_mt(const struct sk_buff *skb, struct xt_action_param *par)
26 const struct xt_state_info *sinfo = par->matchinfo; 26 const struct xt_state_info *sinfo = par->matchinfo;
27 enum ip_conntrack_info ctinfo; 27 enum ip_conntrack_info ctinfo;
28 unsigned int statebit; 28 unsigned int statebit;
29 struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
29 30
30 if (nf_ct_is_untracked(skb)) 31 if (!ct)
31 statebit = XT_STATE_UNTRACKED;
32 else if (!nf_ct_get(skb, &ctinfo))
33 statebit = XT_STATE_INVALID; 32 statebit = XT_STATE_INVALID;
34 else 33 else {
35 statebit = XT_STATE_BIT(ctinfo); 34 if (nf_ct_is_untracked(ct))
36 35 statebit = XT_STATE_UNTRACKED;
36 else
37 statebit = XT_STATE_BIT(ctinfo);
38 }
37 return (sinfo->statemask & statebit); 39 return (sinfo->statemask & statebit);
38} 40}
39 41
diff --git a/net/netfilter/xt_statistic.c b/net/netfilter/xt_statistic.c
index 96e62b8fd6b1..42ecb71d445f 100644
--- a/net/netfilter/xt_statistic.c
+++ b/net/netfilter/xt_statistic.c
@@ -18,8 +18,8 @@
18#include <linux/netfilter/x_tables.h> 18#include <linux/netfilter/x_tables.h>
19 19
20struct xt_statistic_priv { 20struct xt_statistic_priv {
21 uint32_t count; 21 atomic_t count;
22}; 22} ____cacheline_aligned_in_smp;
23 23
24MODULE_LICENSE("GPL"); 24MODULE_LICENSE("GPL");
25MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>"); 25MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
@@ -27,13 +27,12 @@ MODULE_DESCRIPTION("Xtables: statistics-based matching (\"Nth\", random)");
27MODULE_ALIAS("ipt_statistic"); 27MODULE_ALIAS("ipt_statistic");
28MODULE_ALIAS("ip6t_statistic"); 28MODULE_ALIAS("ip6t_statistic");
29 29
30static DEFINE_SPINLOCK(nth_lock);
31
32static bool 30static bool
33statistic_mt(const struct sk_buff *skb, struct xt_action_param *par) 31statistic_mt(const struct sk_buff *skb, struct xt_action_param *par)
34{ 32{
35 const struct xt_statistic_info *info = par->matchinfo; 33 const struct xt_statistic_info *info = par->matchinfo;
36 bool ret = info->flags & XT_STATISTIC_INVERT; 34 bool ret = info->flags & XT_STATISTIC_INVERT;
35 int nval, oval;
37 36
38 switch (info->mode) { 37 switch (info->mode) {
39 case XT_STATISTIC_MODE_RANDOM: 38 case XT_STATISTIC_MODE_RANDOM:
@@ -41,12 +40,12 @@ statistic_mt(const struct sk_buff *skb, struct xt_action_param *par)
41 ret = !ret; 40 ret = !ret;
42 break; 41 break;
43 case XT_STATISTIC_MODE_NTH: 42 case XT_STATISTIC_MODE_NTH:
44 spin_lock_bh(&nth_lock); 43 do {
45 if (info->master->count++ == info->u.nth.every) { 44 oval = atomic_read(&info->master->count);
46 info->master->count = 0; 45 nval = (oval == info->u.nth.every) ? 0 : oval + 1;
46 } while (atomic_cmpxchg(&info->master->count, oval, nval) != oval);
47 if (nval == 0)
47 ret = !ret; 48 ret = !ret;
48 }
49 spin_unlock_bh(&nth_lock);
50 break; 49 break;
51 } 50 }
52 51
@@ -64,7 +63,7 @@ static int statistic_mt_check(const struct xt_mtchk_param *par)
64 info->master = kzalloc(sizeof(*info->master), GFP_KERNEL); 63 info->master = kzalloc(sizeof(*info->master), GFP_KERNEL);
65 if (info->master == NULL) 64 if (info->master == NULL)
66 return -ENOMEM; 65 return -ENOMEM;
67 info->master->count = info->u.nth.count; 66 atomic_set(&info->master->count, info->u.nth.count);
68 67
69 return 0; 68 return 0;
70} 69}
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index a2eb965207d3..2cbf380377d5 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1076,14 +1076,15 @@ int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 pid,
1076 sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list) 1076 sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list)
1077 do_one_broadcast(sk, &info); 1077 do_one_broadcast(sk, &info);
1078 1078
1079 kfree_skb(skb); 1079 consume_skb(skb);
1080 1080
1081 netlink_unlock_table(); 1081 netlink_unlock_table();
1082 1082
1083 kfree_skb(info.skb2); 1083 if (info.delivery_failure) {
1084 1084 kfree_skb(info.skb2);
1085 if (info.delivery_failure)
1086 return -ENOBUFS; 1085 return -ENOBUFS;
1086 } else
1087 consume_skb(info.skb2);
1087 1088
1088 if (info.delivered) { 1089 if (info.delivered) {
1089 if (info.congested && (allocation & __GFP_WAIT)) 1090 if (info.congested && (allocation & __GFP_WAIT))
@@ -1323,19 +1324,23 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
1323 if (msg->msg_flags&MSG_OOB) 1324 if (msg->msg_flags&MSG_OOB)
1324 return -EOPNOTSUPP; 1325 return -EOPNOTSUPP;
1325 1326
1326 if (NULL == siocb->scm) 1327 if (NULL == siocb->scm) {
1327 siocb->scm = &scm; 1328 siocb->scm = &scm;
1329 memset(&scm, 0, sizeof(scm));
1330 }
1328 err = scm_send(sock, msg, siocb->scm); 1331 err = scm_send(sock, msg, siocb->scm);
1329 if (err < 0) 1332 if (err < 0)
1330 return err; 1333 return err;
1331 1334
1332 if (msg->msg_namelen) { 1335 if (msg->msg_namelen) {
1336 err = -EINVAL;
1333 if (addr->nl_family != AF_NETLINK) 1337 if (addr->nl_family != AF_NETLINK)
1334 return -EINVAL; 1338 goto out;
1335 dst_pid = addr->nl_pid; 1339 dst_pid = addr->nl_pid;
1336 dst_group = ffs(addr->nl_groups); 1340 dst_group = ffs(addr->nl_groups);
1341 err = -EPERM;
1337 if (dst_group && !netlink_capable(sock, NL_NONROOT_SEND)) 1342 if (dst_group && !netlink_capable(sock, NL_NONROOT_SEND))
1338 return -EPERM; 1343 goto out;
1339 } else { 1344 } else {
1340 dst_pid = nlk->dst_pid; 1345 dst_pid = nlk->dst_pid;
1341 dst_group = nlk->dst_group; 1346 dst_group = nlk->dst_group;
@@ -1387,6 +1392,7 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
1387 err = netlink_unicast(sk, skb, dst_pid, msg->msg_flags&MSG_DONTWAIT); 1392 err = netlink_unicast(sk, skb, dst_pid, msg->msg_flags&MSG_DONTWAIT);
1388 1393
1389out: 1394out:
1395 scm_destroy(siocb->scm);
1390 return err; 1396 return err;
1391} 1397}
1392 1398
@@ -1400,7 +1406,7 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
1400 struct netlink_sock *nlk = nlk_sk(sk); 1406 struct netlink_sock *nlk = nlk_sk(sk);
1401 int noblock = flags&MSG_DONTWAIT; 1407 int noblock = flags&MSG_DONTWAIT;
1402 size_t copied; 1408 size_t copied;
1403 struct sk_buff *skb, *frag __maybe_unused = NULL; 1409 struct sk_buff *skb;
1404 int err; 1410 int err;
1405 1411
1406 if (flags&MSG_OOB) 1412 if (flags&MSG_OOB)
@@ -1435,7 +1441,21 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
1435 kfree_skb(skb); 1441 kfree_skb(skb);
1436 skb = compskb; 1442 skb = compskb;
1437 } else { 1443 } else {
1438 frag = skb_shinfo(skb)->frag_list; 1444 /*
1445 * Before setting frag_list to NULL, we must get a
1446 * private copy of skb if shared (because of MSG_PEEK)
1447 */
1448 if (skb_shared(skb)) {
1449 struct sk_buff *nskb;
1450
1451 nskb = pskb_copy(skb, GFP_KERNEL);
1452 kfree_skb(skb);
1453 skb = nskb;
1454 err = -ENOMEM;
1455 if (!skb)
1456 goto out;
1457 }
1458 kfree_skb(skb_shinfo(skb)->frag_list);
1439 skb_shinfo(skb)->frag_list = NULL; 1459 skb_shinfo(skb)->frag_list = NULL;
1440 } 1460 }
1441 } 1461 }
@@ -1472,10 +1492,6 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
1472 if (flags & MSG_TRUNC) 1492 if (flags & MSG_TRUNC)
1473 copied = skb->len; 1493 copied = skb->len;
1474 1494
1475#ifdef CONFIG_COMPAT_NETLINK_MESSAGES
1476 skb_shinfo(skb)->frag_list = frag;
1477#endif
1478
1479 skb_free_datagram(sk, skb); 1495 skb_free_datagram(sk, skb);
1480 1496
1481 if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) 1497 if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2)
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index aa4308afcc7f..26ed3e8587c2 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -303,6 +303,7 @@ int genl_register_ops(struct genl_family *family, struct genl_ops *ops)
303errout: 303errout:
304 return err; 304 return err;
305} 305}
306EXPORT_SYMBOL(genl_register_ops);
306 307
307/** 308/**
308 * genl_unregister_ops - unregister generic netlink operations 309 * genl_unregister_ops - unregister generic netlink operations
@@ -337,6 +338,7 @@ int genl_unregister_ops(struct genl_family *family, struct genl_ops *ops)
337 338
338 return -ENOENT; 339 return -ENOENT;
339} 340}
341EXPORT_SYMBOL(genl_unregister_ops);
340 342
341/** 343/**
342 * genl_register_family - register a generic netlink family 344 * genl_register_family - register a generic netlink family
@@ -405,6 +407,7 @@ errout_locked:
405errout: 407errout:
406 return err; 408 return err;
407} 409}
410EXPORT_SYMBOL(genl_register_family);
408 411
409/** 412/**
410 * genl_register_family_with_ops - register a generic netlink family 413 * genl_register_family_with_ops - register a generic netlink family
@@ -485,6 +488,7 @@ int genl_unregister_family(struct genl_family *family)
485 488
486 return -ENOENT; 489 return -ENOENT;
487} 490}
491EXPORT_SYMBOL(genl_unregister_family);
488 492
489static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) 493static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
490{ 494{
@@ -873,11 +877,7 @@ static int __init genl_init(void)
873 for (i = 0; i < GENL_FAM_TAB_SIZE; i++) 877 for (i = 0; i < GENL_FAM_TAB_SIZE; i++)
874 INIT_LIST_HEAD(&family_ht[i]); 878 INIT_LIST_HEAD(&family_ht[i]);
875 879
876 err = genl_register_family(&genl_ctrl); 880 err = genl_register_family_with_ops(&genl_ctrl, &genl_ctrl_ops, 1);
877 if (err < 0)
878 goto problem;
879
880 err = genl_register_ops(&genl_ctrl, &genl_ctrl_ops);
881 if (err < 0) 881 if (err < 0)
882 goto problem; 882 goto problem;
883 883
@@ -899,11 +899,6 @@ problem:
899 899
900subsys_initcall(genl_init); 900subsys_initcall(genl_init);
901 901
902EXPORT_SYMBOL(genl_register_ops);
903EXPORT_SYMBOL(genl_unregister_ops);
904EXPORT_SYMBOL(genl_register_family);
905EXPORT_SYMBOL(genl_unregister_family);
906
907static int genlmsg_mcast(struct sk_buff *skb, u32 pid, unsigned long group, 902static int genlmsg_mcast(struct sk_buff *skb, u32 pid, unsigned long group,
908 gfp_t flags) 903 gfp_t flags)
909{ 904{
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 2078a277e06b..9a17f28b1253 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -83,6 +83,7 @@
83#include <linux/if_vlan.h> 83#include <linux/if_vlan.h>
84#include <linux/virtio_net.h> 84#include <linux/virtio_net.h>
85#include <linux/errqueue.h> 85#include <linux/errqueue.h>
86#include <linux/net_tstamp.h>
86 87
87#ifdef CONFIG_INET 88#ifdef CONFIG_INET
88#include <net/inet_common.h> 89#include <net/inet_common.h>
@@ -202,6 +203,7 @@ struct packet_sock {
202 unsigned int tp_hdrlen; 203 unsigned int tp_hdrlen;
203 unsigned int tp_reserve; 204 unsigned int tp_reserve;
204 unsigned int tp_loss:1; 205 unsigned int tp_loss:1;
206 unsigned int tp_tstamp;
205 struct packet_type prot_hook ____cacheline_aligned_in_smp; 207 struct packet_type prot_hook ____cacheline_aligned_in_smp;
206}; 208};
207 209
@@ -656,6 +658,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
656 struct sk_buff *copy_skb = NULL; 658 struct sk_buff *copy_skb = NULL;
657 struct timeval tv; 659 struct timeval tv;
658 struct timespec ts; 660 struct timespec ts;
661 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
659 662
660 if (skb->pkt_type == PACKET_LOOPBACK) 663 if (skb->pkt_type == PACKET_LOOPBACK)
661 goto drop; 664 goto drop;
@@ -737,7 +740,13 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
737 h.h1->tp_snaplen = snaplen; 740 h.h1->tp_snaplen = snaplen;
738 h.h1->tp_mac = macoff; 741 h.h1->tp_mac = macoff;
739 h.h1->tp_net = netoff; 742 h.h1->tp_net = netoff;
740 if (skb->tstamp.tv64) 743 if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE)
744 && shhwtstamps->syststamp.tv64)
745 tv = ktime_to_timeval(shhwtstamps->syststamp);
746 else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE)
747 && shhwtstamps->hwtstamp.tv64)
748 tv = ktime_to_timeval(shhwtstamps->hwtstamp);
749 else if (skb->tstamp.tv64)
741 tv = ktime_to_timeval(skb->tstamp); 750 tv = ktime_to_timeval(skb->tstamp);
742 else 751 else
743 do_gettimeofday(&tv); 752 do_gettimeofday(&tv);
@@ -750,7 +759,13 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
750 h.h2->tp_snaplen = snaplen; 759 h.h2->tp_snaplen = snaplen;
751 h.h2->tp_mac = macoff; 760 h.h2->tp_mac = macoff;
752 h.h2->tp_net = netoff; 761 h.h2->tp_net = netoff;
753 if (skb->tstamp.tv64) 762 if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE)
763 && shhwtstamps->syststamp.tv64)
764 ts = ktime_to_timespec(shhwtstamps->syststamp);
765 else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE)
766 && shhwtstamps->hwtstamp.tv64)
767 ts = ktime_to_timespec(shhwtstamps->hwtstamp);
768 else if (skb->tstamp.tv64)
754 ts = ktime_to_timespec(skb->tstamp); 769 ts = ktime_to_timespec(skb->tstamp);
755 else 770 else
756 getnstimeofday(&ts); 771 getnstimeofday(&ts);
@@ -2027,6 +2042,18 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
2027 po->has_vnet_hdr = !!val; 2042 po->has_vnet_hdr = !!val;
2028 return 0; 2043 return 0;
2029 } 2044 }
2045 case PACKET_TIMESTAMP:
2046 {
2047 int val;
2048
2049 if (optlen != sizeof(val))
2050 return -EINVAL;
2051 if (copy_from_user(&val, optval, sizeof(val)))
2052 return -EFAULT;
2053
2054 po->tp_tstamp = val;
2055 return 0;
2056 }
2030 default: 2057 default:
2031 return -ENOPROTOOPT; 2058 return -ENOPROTOOPT;
2032 } 2059 }
@@ -2119,6 +2146,12 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
2119 val = po->tp_loss; 2146 val = po->tp_loss;
2120 data = &val; 2147 data = &val;
2121 break; 2148 break;
2149 case PACKET_TIMESTAMP:
2150 if (len > sizeof(int))
2151 len = sizeof(int);
2152 val = po->tp_tstamp;
2153 data = &val;
2154 break;
2122 default: 2155 default:
2123 return -ENOPROTOOPT; 2156 return -ENOPROTOOPT;
2124 } 2157 }
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index 94d72e85a475..b2a3ae6cad78 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -698,6 +698,7 @@ static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp)
698 newsk = NULL; 698 newsk = NULL;
699 goto out; 699 goto out;
700 } 700 }
701 kfree_skb(oskb);
701 702
702 sock_hold(sk); 703 sock_hold(sk);
703 pep_sk(newsk)->listener = sk; 704 pep_sk(newsk)->listener = sk;
diff --git a/net/phonet/pn_dev.c b/net/phonet/pn_dev.c
index c33da6576942..b18e48fae975 100644
--- a/net/phonet/pn_dev.c
+++ b/net/phonet/pn_dev.c
@@ -162,6 +162,14 @@ int phonet_address_add(struct net_device *dev, u8 addr)
162 return err; 162 return err;
163} 163}
164 164
165static void phonet_device_rcu_free(struct rcu_head *head)
166{
167 struct phonet_device *pnd;
168
169 pnd = container_of(head, struct phonet_device, rcu);
170 kfree(pnd);
171}
172
165int phonet_address_del(struct net_device *dev, u8 addr) 173int phonet_address_del(struct net_device *dev, u8 addr)
166{ 174{
167 struct phonet_device_list *pndevs = phonet_device_list(dev_net(dev)); 175 struct phonet_device_list *pndevs = phonet_device_list(dev_net(dev));
@@ -179,10 +187,9 @@ int phonet_address_del(struct net_device *dev, u8 addr)
179 pnd = NULL; 187 pnd = NULL;
180 mutex_unlock(&pndevs->lock); 188 mutex_unlock(&pndevs->lock);
181 189
182 if (pnd) { 190 if (pnd)
183 synchronize_rcu(); 191 call_rcu(&pnd->rcu, phonet_device_rcu_free);
184 kfree(pnd); 192
185 }
186 return err; 193 return err;
187} 194}
188 195
diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c
index cbc244a128bd..b4fdaac233f7 100644
--- a/net/rose/rose_route.c
+++ b/net/rose/rose_route.c
@@ -109,7 +109,9 @@ static int __must_check rose_add_node(struct rose_route_struct *rose_route,
109 init_timer(&rose_neigh->t0timer); 109 init_timer(&rose_neigh->t0timer);
110 110
111 if (rose_route->ndigis != 0) { 111 if (rose_route->ndigis != 0) {
112 if ((rose_neigh->digipeat = kmalloc(sizeof(ax25_digi), GFP_KERNEL)) == NULL) { 112 rose_neigh->digipeat =
113 kmalloc(sizeof(ax25_digi), GFP_ATOMIC);
114 if (rose_neigh->digipeat == NULL) {
113 kfree(rose_neigh); 115 kfree(rose_neigh);
114 res = -ENOMEM; 116 res = -ENOMEM;
115 goto out; 117 goto out;
diff --git a/net/rxrpc/ar-peer.c b/net/rxrpc/ar-peer.c
index f0f85b0123f7..9f1729bd60de 100644
--- a/net/rxrpc/ar-peer.c
+++ b/net/rxrpc/ar-peer.c
@@ -64,8 +64,8 @@ static void rxrpc_assess_MTU_size(struct rxrpc_peer *peer)
64 return; 64 return;
65 } 65 }
66 66
67 peer->if_mtu = dst_mtu(&rt->u.dst); 67 peer->if_mtu = dst_mtu(&rt->dst);
68 dst_release(&rt->u.dst); 68 dst_release(&rt->dst);
69 69
70 _leave(" [if_mtu %u]", peer->if_mtu); 70 _leave(" [if_mtu %u]", peer->if_mtu);
71} 71}
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 972378f47f3c..23b25f89e7e0 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -26,6 +26,11 @@
26#include <net/act_api.h> 26#include <net/act_api.h>
27#include <net/netlink.h> 27#include <net/netlink.h>
28 28
29static void tcf_common_free_rcu(struct rcu_head *head)
30{
31 kfree(container_of(head, struct tcf_common, tcfc_rcu));
32}
33
29void tcf_hash_destroy(struct tcf_common *p, struct tcf_hashinfo *hinfo) 34void tcf_hash_destroy(struct tcf_common *p, struct tcf_hashinfo *hinfo)
30{ 35{
31 unsigned int h = tcf_hash(p->tcfc_index, hinfo->hmask); 36 unsigned int h = tcf_hash(p->tcfc_index, hinfo->hmask);
@@ -38,7 +43,11 @@ void tcf_hash_destroy(struct tcf_common *p, struct tcf_hashinfo *hinfo)
38 write_unlock_bh(hinfo->lock); 43 write_unlock_bh(hinfo->lock);
39 gen_kill_estimator(&p->tcfc_bstats, 44 gen_kill_estimator(&p->tcfc_bstats,
40 &p->tcfc_rate_est); 45 &p->tcfc_rate_est);
41 kfree(p); 46 /*
47 * gen_estimator est_timer() might access p->tcfc_lock
48 * or bstats, wait a RCU grace period before freeing p
49 */
50 call_rcu(&p->tcfc_rcu, tcf_common_free_rcu);
42 return; 51 return;
43 } 52 }
44 } 53 }
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index c0b6863e3b87..11f195af2da0 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -33,6 +33,7 @@
33static struct tcf_common *tcf_mirred_ht[MIRRED_TAB_MASK + 1]; 33static struct tcf_common *tcf_mirred_ht[MIRRED_TAB_MASK + 1];
34static u32 mirred_idx_gen; 34static u32 mirred_idx_gen;
35static DEFINE_RWLOCK(mirred_lock); 35static DEFINE_RWLOCK(mirred_lock);
36static LIST_HEAD(mirred_list);
36 37
37static struct tcf_hashinfo mirred_hash_info = { 38static struct tcf_hashinfo mirred_hash_info = {
38 .htab = tcf_mirred_ht, 39 .htab = tcf_mirred_ht,
@@ -47,7 +48,9 @@ static inline int tcf_mirred_release(struct tcf_mirred *m, int bind)
47 m->tcf_bindcnt--; 48 m->tcf_bindcnt--;
48 m->tcf_refcnt--; 49 m->tcf_refcnt--;
49 if(!m->tcf_bindcnt && m->tcf_refcnt <= 0) { 50 if(!m->tcf_bindcnt && m->tcf_refcnt <= 0) {
50 dev_put(m->tcfm_dev); 51 list_del(&m->tcfm_list);
52 if (m->tcfm_dev)
53 dev_put(m->tcfm_dev);
51 tcf_hash_destroy(&m->common, &mirred_hash_info); 54 tcf_hash_destroy(&m->common, &mirred_hash_info);
52 return 1; 55 return 1;
53 } 56 }
@@ -134,8 +137,10 @@ static int tcf_mirred_init(struct nlattr *nla, struct nlattr *est,
134 m->tcfm_ok_push = ok_push; 137 m->tcfm_ok_push = ok_push;
135 } 138 }
136 spin_unlock_bh(&m->tcf_lock); 139 spin_unlock_bh(&m->tcf_lock);
137 if (ret == ACT_P_CREATED) 140 if (ret == ACT_P_CREATED) {
141 list_add(&m->tcfm_list, &mirred_list);
138 tcf_hash_insert(pc, &mirred_hash_info); 142 tcf_hash_insert(pc, &mirred_hash_info);
143 }
139 144
140 return ret; 145 return ret;
141} 146}
@@ -160,22 +165,27 @@ static int tcf_mirred(struct sk_buff *skb, struct tc_action *a,
160 165
161 spin_lock(&m->tcf_lock); 166 spin_lock(&m->tcf_lock);
162 m->tcf_tm.lastuse = jiffies; 167 m->tcf_tm.lastuse = jiffies;
168 m->tcf_bstats.bytes += qdisc_pkt_len(skb);
169 m->tcf_bstats.packets++;
163 170
164 dev = m->tcfm_dev; 171 dev = m->tcfm_dev;
172 if (!dev) {
173 printk_once(KERN_NOTICE "tc mirred: target device is gone\n");
174 goto out;
175 }
176
165 if (!(dev->flags & IFF_UP)) { 177 if (!(dev->flags & IFF_UP)) {
166 if (net_ratelimit()) 178 if (net_ratelimit())
167 pr_notice("tc mirred to Houston: device %s is gone!\n", 179 pr_notice("tc mirred to Houston: device %s is down\n",
168 dev->name); 180 dev->name);
169 goto out; 181 goto out;
170 } 182 }
171 183
172 skb2 = skb_act_clone(skb, GFP_ATOMIC); 184 at = G_TC_AT(skb->tc_verd);
185 skb2 = skb_act_clone(skb, GFP_ATOMIC, m->tcf_action);
173 if (skb2 == NULL) 186 if (skb2 == NULL)
174 goto out; 187 goto out;
175 188
176 m->tcf_bstats.bytes += qdisc_pkt_len(skb2);
177 m->tcf_bstats.packets++;
178 at = G_TC_AT(skb->tc_verd);
179 if (!(at & AT_EGRESS)) { 189 if (!(at & AT_EGRESS)) {
180 if (m->tcfm_ok_push) 190 if (m->tcfm_ok_push)
181 skb_push(skb2, skb2->dev->hard_header_len); 191 skb_push(skb2, skb2->dev->hard_header_len);
@@ -185,16 +195,14 @@ static int tcf_mirred(struct sk_buff *skb, struct tc_action *a,
185 if (m->tcfm_eaction != TCA_EGRESS_MIRROR) 195 if (m->tcfm_eaction != TCA_EGRESS_MIRROR)
186 skb2->tc_verd = SET_TC_FROM(skb2->tc_verd, at); 196 skb2->tc_verd = SET_TC_FROM(skb2->tc_verd, at);
187 197
188 skb2->dev = dev;
189 skb2->skb_iif = skb->dev->ifindex; 198 skb2->skb_iif = skb->dev->ifindex;
199 skb2->dev = dev;
190 dev_queue_xmit(skb2); 200 dev_queue_xmit(skb2);
191 err = 0; 201 err = 0;
192 202
193out: 203out:
194 if (err) { 204 if (err) {
195 m->tcf_qstats.overlimits++; 205 m->tcf_qstats.overlimits++;
196 m->tcf_bstats.bytes += qdisc_pkt_len(skb);
197 m->tcf_bstats.packets++;
198 /* should we be asking for packet to be dropped? 206 /* should we be asking for packet to be dropped?
199 * may make sense for redirect case only 207 * may make sense for redirect case only
200 */ 208 */
@@ -232,6 +240,28 @@ nla_put_failure:
232 return -1; 240 return -1;
233} 241}
234 242
243static int mirred_device_event(struct notifier_block *unused,
244 unsigned long event, void *ptr)
245{
246 struct net_device *dev = ptr;
247 struct tcf_mirred *m;
248
249 if (event == NETDEV_UNREGISTER)
250 list_for_each_entry(m, &mirred_list, tcfm_list) {
251 if (m->tcfm_dev == dev) {
252 dev_put(dev);
253 m->tcfm_dev = NULL;
254 }
255 }
256
257 return NOTIFY_DONE;
258}
259
260static struct notifier_block mirred_device_notifier = {
261 .notifier_call = mirred_device_event,
262};
263
264
235static struct tc_action_ops act_mirred_ops = { 265static struct tc_action_ops act_mirred_ops = {
236 .kind = "mirred", 266 .kind = "mirred",
237 .hinfo = &mirred_hash_info, 267 .hinfo = &mirred_hash_info,
@@ -252,12 +282,17 @@ MODULE_LICENSE("GPL");
252 282
253static int __init mirred_init_module(void) 283static int __init mirred_init_module(void)
254{ 284{
285 int err = register_netdevice_notifier(&mirred_device_notifier);
286 if (err)
287 return err;
288
255 pr_info("Mirror/redirect action on\n"); 289 pr_info("Mirror/redirect action on\n");
256 return tcf_register_action(&act_mirred_ops); 290 return tcf_register_action(&act_mirred_ops);
257} 291}
258 292
259static void __exit mirred_cleanup_module(void) 293static void __exit mirred_cleanup_module(void)
260{ 294{
295 unregister_netdevice_notifier(&mirred_device_notifier);
261 tcf_unregister_action(&act_mirred_ops); 296 tcf_unregister_action(&act_mirred_ops);
262} 297}
263 298
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index 570949417f38..d0386a413e8d 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -205,7 +205,7 @@ static int tcf_nat(struct sk_buff *skb, struct tc_action *a,
205 { 205 {
206 struct icmphdr *icmph; 206 struct icmphdr *icmph;
207 207
208 if (!pskb_may_pull(skb, ihl + sizeof(*icmph) + sizeof(*iph))) 208 if (!pskb_may_pull(skb, ihl + sizeof(*icmph)))
209 goto drop; 209 goto drop;
210 210
211 icmph = (void *)(skb_network_header(skb) + ihl); 211 icmph = (void *)(skb_network_header(skb) + ihl);
@@ -215,6 +215,10 @@ static int tcf_nat(struct sk_buff *skb, struct tc_action *a,
215 (icmph->type != ICMP_PARAMETERPROB)) 215 (icmph->type != ICMP_PARAMETERPROB))
216 break; 216 break;
217 217
218 if (!pskb_may_pull(skb, ihl + sizeof(*icmph) + sizeof(*iph)))
219 goto drop;
220
221 icmph = (void *)(skb_network_header(skb) + ihl);
218 iph = (void *)(icmph + 1); 222 iph = (void *)(icmph + 1);
219 if (egress) 223 if (egress)
220 addr = iph->daddr; 224 addr = iph->daddr;
@@ -243,7 +247,7 @@ static int tcf_nat(struct sk_buff *skb, struct tc_action *a,
243 iph->saddr = new_addr; 247 iph->saddr = new_addr;
244 248
245 inet_proto_csum_replace4(&icmph->checksum, skb, addr, new_addr, 249 inet_proto_csum_replace4(&icmph->checksum, skb, addr, new_addr,
246 1); 250 0);
247 break; 251 break;
248 } 252 }
249 default: 253 default:
@@ -265,40 +269,29 @@ static int tcf_nat_dump(struct sk_buff *skb, struct tc_action *a,
265{ 269{
266 unsigned char *b = skb_tail_pointer(skb); 270 unsigned char *b = skb_tail_pointer(skb);
267 struct tcf_nat *p = a->priv; 271 struct tcf_nat *p = a->priv;
268 struct tc_nat *opt; 272 struct tc_nat opt;
269 struct tcf_t t; 273 struct tcf_t t;
270 int s;
271
272 s = sizeof(*opt);
273 274
274 /* netlink spinlocks held above us - must use ATOMIC */ 275 opt.old_addr = p->old_addr;
275 opt = kzalloc(s, GFP_ATOMIC); 276 opt.new_addr = p->new_addr;
276 if (unlikely(!opt)) 277 opt.mask = p->mask;
277 return -ENOBUFS; 278 opt.flags = p->flags;
278 279
279 opt->old_addr = p->old_addr; 280 opt.index = p->tcf_index;
280 opt->new_addr = p->new_addr; 281 opt.action = p->tcf_action;
281 opt->mask = p->mask; 282 opt.refcnt = p->tcf_refcnt - ref;
282 opt->flags = p->flags; 283 opt.bindcnt = p->tcf_bindcnt - bind;
283 284
284 opt->index = p->tcf_index; 285 NLA_PUT(skb, TCA_NAT_PARMS, sizeof(opt), &opt);
285 opt->action = p->tcf_action;
286 opt->refcnt = p->tcf_refcnt - ref;
287 opt->bindcnt = p->tcf_bindcnt - bind;
288
289 NLA_PUT(skb, TCA_NAT_PARMS, s, opt);
290 t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install); 286 t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install);
291 t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse); 287 t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse);
292 t.expires = jiffies_to_clock_t(p->tcf_tm.expires); 288 t.expires = jiffies_to_clock_t(p->tcf_tm.expires);
293 NLA_PUT(skb, TCA_NAT_TM, sizeof(t), &t); 289 NLA_PUT(skb, TCA_NAT_TM, sizeof(t), &t);
294 290
295 kfree(opt);
296
297 return skb->len; 291 return skb->len;
298 292
299nla_put_failure: 293nla_put_failure:
300 nlmsg_trim(skb, b); 294 nlmsg_trim(skb, b);
301 kfree(opt);
302 return -1; 295 return -1;
303} 296}
304 297
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 50e3d945e1f4..a0593c9640db 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -127,8 +127,7 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a,
127 int i, munged = 0; 127 int i, munged = 0;
128 unsigned int off; 128 unsigned int off;
129 129
130 if (!(skb->tc_verd & TC_OK2MUNGE)) { 130 if (skb_cloned(skb)) {
131 /* should we set skb->cloned? */
132 if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) { 131 if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
133 return p->tcf_action; 132 return p->tcf_action;
134 } 133 }
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 654f73dff7c1..537a48732e9e 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -97,6 +97,11 @@ nla_put_failure:
97 goto done; 97 goto done;
98} 98}
99 99
100static void tcf_police_free_rcu(struct rcu_head *head)
101{
102 kfree(container_of(head, struct tcf_police, tcf_rcu));
103}
104
100static void tcf_police_destroy(struct tcf_police *p) 105static void tcf_police_destroy(struct tcf_police *p)
101{ 106{
102 unsigned int h = tcf_hash(p->tcf_index, POL_TAB_MASK); 107 unsigned int h = tcf_hash(p->tcf_index, POL_TAB_MASK);
@@ -113,7 +118,11 @@ static void tcf_police_destroy(struct tcf_police *p)
113 qdisc_put_rtab(p->tcfp_R_tab); 118 qdisc_put_rtab(p->tcfp_R_tab);
114 if (p->tcfp_P_tab) 119 if (p->tcfp_P_tab)
115 qdisc_put_rtab(p->tcfp_P_tab); 120 qdisc_put_rtab(p->tcfp_P_tab);
116 kfree(p); 121 /*
122 * gen_estimator est_timer() might access p->tcf_lock
123 * or bstats, wait a RCU grace period before freeing p
124 */
125 call_rcu(&p->tcf_rcu, tcf_police_free_rcu);
117 return; 126 return;
118 } 127 }
119 } 128 }
@@ -397,6 +406,7 @@ static void __exit
397police_cleanup_module(void) 406police_cleanup_module(void)
398{ 407{
399 tcf_unregister_action(&act_police_ops); 408 tcf_unregister_action(&act_police_ops);
409 rcu_barrier(); /* Wait for completion of call_rcu()'s (tcf_police_free_rcu) */
400} 410}
401 411
402module_init(police_init_module); 412module_init(police_init_module);
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index 1b4bc691d7d1..4a1d640b0cf1 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -73,10 +73,10 @@ static int tcf_simp_release(struct tcf_defact *d, int bind)
73 73
74static int alloc_defdata(struct tcf_defact *d, char *defdata) 74static int alloc_defdata(struct tcf_defact *d, char *defdata)
75{ 75{
76 d->tcfd_defdata = kstrndup(defdata, SIMP_MAX_DATA, GFP_KERNEL); 76 d->tcfd_defdata = kzalloc(SIMP_MAX_DATA, GFP_KERNEL);
77 if (unlikely(!d->tcfd_defdata)) 77 if (unlikely(!d->tcfd_defdata))
78 return -ENOMEM; 78 return -ENOMEM;
79 79 strlcpy(d->tcfd_defdata, defdata, SIMP_MAX_DATA);
80 return 0; 80 return 0;
81} 81}
82 82
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 4f522143811e..7416a5c73b2a 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -134,10 +134,12 @@ next_knode:
134#endif 134#endif
135 135
136 for (i = n->sel.nkeys; i>0; i--, key++) { 136 for (i = n->sel.nkeys; i>0; i--, key++) {
137 unsigned int toff; 137 int toff = off + key->off + (off2 & key->offmask);
138 __be32 *data, _data; 138 __be32 *data, _data;
139 139
140 toff = off + key->off + (off2 & key->offmask); 140 if (skb_headroom(skb) + toff < 0)
141 goto out;
142
141 data = skb_header_pointer(skb, toff, 4, &_data); 143 data = skb_header_pointer(skb, toff, 4, &_data);
142 if (!data) 144 if (!data)
143 goto out; 145 goto out;
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index fcbb86a486a2..e114f23d5eae 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -52,7 +52,7 @@ struct atm_flow_data {
52 int ref; /* reference count */ 52 int ref; /* reference count */
53 struct gnet_stats_basic_packed bstats; 53 struct gnet_stats_basic_packed bstats;
54 struct gnet_stats_queue qstats; 54 struct gnet_stats_queue qstats;
55 struct atm_flow_data *next; 55 struct list_head list;
56 struct atm_flow_data *excess; /* flow for excess traffic; 56 struct atm_flow_data *excess; /* flow for excess traffic;
57 NULL to set CLP instead */ 57 NULL to set CLP instead */
58 int hdr_len; 58 int hdr_len;
@@ -61,34 +61,23 @@ struct atm_flow_data {
61 61
62struct atm_qdisc_data { 62struct atm_qdisc_data {
63 struct atm_flow_data link; /* unclassified skbs go here */ 63 struct atm_flow_data link; /* unclassified skbs go here */
64 struct atm_flow_data *flows; /* NB: "link" is also on this 64 struct list_head flows; /* NB: "link" is also on this
65 list */ 65 list */
66 struct tasklet_struct task; /* dequeue tasklet */ 66 struct tasklet_struct task; /* dequeue tasklet */
67}; 67};
68 68
69/* ------------------------- Class/flow operations ------------------------- */ 69/* ------------------------- Class/flow operations ------------------------- */
70 70
71static int find_flow(struct atm_qdisc_data *qdisc, struct atm_flow_data *flow)
72{
73 struct atm_flow_data *walk;
74
75 pr_debug("find_flow(qdisc %p,flow %p)\n", qdisc, flow);
76 for (walk = qdisc->flows; walk; walk = walk->next)
77 if (walk == flow)
78 return 1;
79 pr_debug("find_flow: not found\n");
80 return 0;
81}
82
83static inline struct atm_flow_data *lookup_flow(struct Qdisc *sch, u32 classid) 71static inline struct atm_flow_data *lookup_flow(struct Qdisc *sch, u32 classid)
84{ 72{
85 struct atm_qdisc_data *p = qdisc_priv(sch); 73 struct atm_qdisc_data *p = qdisc_priv(sch);
86 struct atm_flow_data *flow; 74 struct atm_flow_data *flow;
87 75
88 for (flow = p->flows; flow; flow = flow->next) 76 list_for_each_entry(flow, &p->flows, list) {
89 if (flow->classid == classid) 77 if (flow->classid == classid)
90 break; 78 return flow;
91 return flow; 79 }
80 return NULL;
92} 81}
93 82
94static int atm_tc_graft(struct Qdisc *sch, unsigned long arg, 83static int atm_tc_graft(struct Qdisc *sch, unsigned long arg,
@@ -99,7 +88,7 @@ static int atm_tc_graft(struct Qdisc *sch, unsigned long arg,
99 88
100 pr_debug("atm_tc_graft(sch %p,[qdisc %p],flow %p,new %p,old %p)\n", 89 pr_debug("atm_tc_graft(sch %p,[qdisc %p],flow %p,new %p,old %p)\n",
101 sch, p, flow, new, old); 90 sch, p, flow, new, old);
102 if (!find_flow(p, flow)) 91 if (list_empty(&flow->list))
103 return -EINVAL; 92 return -EINVAL;
104 if (!new) 93 if (!new)
105 new = &noop_qdisc; 94 new = &noop_qdisc;
@@ -146,20 +135,12 @@ static void atm_tc_put(struct Qdisc *sch, unsigned long cl)
146{ 135{
147 struct atm_qdisc_data *p = qdisc_priv(sch); 136 struct atm_qdisc_data *p = qdisc_priv(sch);
148 struct atm_flow_data *flow = (struct atm_flow_data *)cl; 137 struct atm_flow_data *flow = (struct atm_flow_data *)cl;
149 struct atm_flow_data **prev;
150 138
151 pr_debug("atm_tc_put(sch %p,[qdisc %p],flow %p)\n", sch, p, flow); 139 pr_debug("atm_tc_put(sch %p,[qdisc %p],flow %p)\n", sch, p, flow);
152 if (--flow->ref) 140 if (--flow->ref)
153 return; 141 return;
154 pr_debug("atm_tc_put: destroying\n"); 142 pr_debug("atm_tc_put: destroying\n");
155 for (prev = &p->flows; *prev; prev = &(*prev)->next) 143 list_del_init(&flow->list);
156 if (*prev == flow)
157 break;
158 if (!*prev) {
159 printk(KERN_CRIT "atm_tc_put: class %p not found\n", flow);
160 return;
161 }
162 *prev = flow->next;
163 pr_debug("atm_tc_put: qdisc %p\n", flow->q); 144 pr_debug("atm_tc_put: qdisc %p\n", flow->q);
164 qdisc_destroy(flow->q); 145 qdisc_destroy(flow->q);
165 tcf_destroy_chain(&flow->filter_list); 146 tcf_destroy_chain(&flow->filter_list);
@@ -274,7 +255,7 @@ static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent,
274 error = -EINVAL; 255 error = -EINVAL;
275 goto err_out; 256 goto err_out;
276 } 257 }
277 if (find_flow(p, flow)) { 258 if (!list_empty(&flow->list)) {
278 error = -EEXIST; 259 error = -EEXIST;
279 goto err_out; 260 goto err_out;
280 } 261 }
@@ -313,8 +294,7 @@ static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent,
313 flow->classid = classid; 294 flow->classid = classid;
314 flow->ref = 1; 295 flow->ref = 1;
315 flow->excess = excess; 296 flow->excess = excess;
316 flow->next = p->link.next; 297 list_add(&flow->list, &p->link.list);
317 p->link.next = flow;
318 flow->hdr_len = hdr_len; 298 flow->hdr_len = hdr_len;
319 if (hdr) 299 if (hdr)
320 memcpy(flow->hdr, hdr, hdr_len); 300 memcpy(flow->hdr, hdr, hdr_len);
@@ -335,7 +315,7 @@ static int atm_tc_delete(struct Qdisc *sch, unsigned long arg)
335 struct atm_flow_data *flow = (struct atm_flow_data *)arg; 315 struct atm_flow_data *flow = (struct atm_flow_data *)arg;
336 316
337 pr_debug("atm_tc_delete(sch %p,[qdisc %p],flow %p)\n", sch, p, flow); 317 pr_debug("atm_tc_delete(sch %p,[qdisc %p],flow %p)\n", sch, p, flow);
338 if (!find_flow(qdisc_priv(sch), flow)) 318 if (list_empty(&flow->list))
339 return -EINVAL; 319 return -EINVAL;
340 if (flow->filter_list || flow == &p->link) 320 if (flow->filter_list || flow == &p->link)
341 return -EBUSY; 321 return -EBUSY;
@@ -361,12 +341,12 @@ static void atm_tc_walk(struct Qdisc *sch, struct qdisc_walker *walker)
361 pr_debug("atm_tc_walk(sch %p,[qdisc %p],walker %p)\n", sch, p, walker); 341 pr_debug("atm_tc_walk(sch %p,[qdisc %p],walker %p)\n", sch, p, walker);
362 if (walker->stop) 342 if (walker->stop)
363 return; 343 return;
364 for (flow = p->flows; flow; flow = flow->next) { 344 list_for_each_entry(flow, &p->flows, list) {
365 if (walker->count >= walker->skip) 345 if (walker->count >= walker->skip &&
366 if (walker->fn(sch, (unsigned long)flow, walker) < 0) { 346 walker->fn(sch, (unsigned long)flow, walker) < 0) {
367 walker->stop = 1; 347 walker->stop = 1;
368 break; 348 break;
369 } 349 }
370 walker->count++; 350 walker->count++;
371 } 351 }
372} 352}
@@ -385,16 +365,17 @@ static struct tcf_proto **atm_tc_find_tcf(struct Qdisc *sch, unsigned long cl)
385static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch) 365static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
386{ 366{
387 struct atm_qdisc_data *p = qdisc_priv(sch); 367 struct atm_qdisc_data *p = qdisc_priv(sch);
388 struct atm_flow_data *flow = NULL; /* @@@ */ 368 struct atm_flow_data *flow;
389 struct tcf_result res; 369 struct tcf_result res;
390 int result; 370 int result;
391 int ret = NET_XMIT_POLICED; 371 int ret = NET_XMIT_POLICED;
392 372
393 pr_debug("atm_tc_enqueue(skb %p,sch %p,[qdisc %p])\n", skb, sch, p); 373 pr_debug("atm_tc_enqueue(skb %p,sch %p,[qdisc %p])\n", skb, sch, p);
394 result = TC_POLICE_OK; /* be nice to gcc */ 374 result = TC_POLICE_OK; /* be nice to gcc */
375 flow = NULL;
395 if (TC_H_MAJ(skb->priority) != sch->handle || 376 if (TC_H_MAJ(skb->priority) != sch->handle ||
396 !(flow = (struct atm_flow_data *)atm_tc_get(sch, skb->priority))) 377 !(flow = (struct atm_flow_data *)atm_tc_get(sch, skb->priority))) {
397 for (flow = p->flows; flow; flow = flow->next) 378 list_for_each_entry(flow, &p->flows, list) {
398 if (flow->filter_list) { 379 if (flow->filter_list) {
399 result = tc_classify_compat(skb, 380 result = tc_classify_compat(skb,
400 flow->filter_list, 381 flow->filter_list,
@@ -404,8 +385,13 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
404 flow = (struct atm_flow_data *)res.class; 385 flow = (struct atm_flow_data *)res.class;
405 if (!flow) 386 if (!flow)
406 flow = lookup_flow(sch, res.classid); 387 flow = lookup_flow(sch, res.classid);
407 break; 388 goto done;
408 } 389 }
390 }
391 flow = NULL;
392 done:
393 ;
394 }
409 if (!flow) 395 if (!flow)
410 flow = &p->link; 396 flow = &p->link;
411 else { 397 else {
@@ -477,7 +463,9 @@ static void sch_atm_dequeue(unsigned long data)
477 struct sk_buff *skb; 463 struct sk_buff *skb;
478 464
479 pr_debug("sch_atm_dequeue(sch %p,[qdisc %p])\n", sch, p); 465 pr_debug("sch_atm_dequeue(sch %p,[qdisc %p])\n", sch, p);
480 for (flow = p->link.next; flow; flow = flow->next) 466 list_for_each_entry(flow, &p->flows, list) {
467 if (flow == &p->link)
468 continue;
481 /* 469 /*
482 * If traffic is properly shaped, this won't generate nasty 470 * If traffic is properly shaped, this won't generate nasty
483 * little bursts. Otherwise, it may ... (but that's okay) 471 * little bursts. Otherwise, it may ... (but that's okay)
@@ -512,6 +500,7 @@ static void sch_atm_dequeue(unsigned long data)
512 /* atm.atm_options are already set by atm_tc_enqueue */ 500 /* atm.atm_options are already set by atm_tc_enqueue */
513 flow->vcc->send(flow->vcc, skb); 501 flow->vcc->send(flow->vcc, skb);
514 } 502 }
503 }
515} 504}
516 505
517static struct sk_buff *atm_tc_dequeue(struct Qdisc *sch) 506static struct sk_buff *atm_tc_dequeue(struct Qdisc *sch)
@@ -543,9 +532,10 @@ static unsigned int atm_tc_drop(struct Qdisc *sch)
543 unsigned int len; 532 unsigned int len;
544 533
545 pr_debug("atm_tc_drop(sch %p,[qdisc %p])\n", sch, p); 534 pr_debug("atm_tc_drop(sch %p,[qdisc %p])\n", sch, p);
546 for (flow = p->flows; flow; flow = flow->next) 535 list_for_each_entry(flow, &p->flows, list) {
547 if (flow->q->ops->drop && (len = flow->q->ops->drop(flow->q))) 536 if (flow->q->ops->drop && (len = flow->q->ops->drop(flow->q)))
548 return len; 537 return len;
538 }
549 return 0; 539 return 0;
550} 540}
551 541
@@ -554,7 +544,9 @@ static int atm_tc_init(struct Qdisc *sch, struct nlattr *opt)
554 struct atm_qdisc_data *p = qdisc_priv(sch); 544 struct atm_qdisc_data *p = qdisc_priv(sch);
555 545
556 pr_debug("atm_tc_init(sch %p,[qdisc %p],opt %p)\n", sch, p, opt); 546 pr_debug("atm_tc_init(sch %p,[qdisc %p],opt %p)\n", sch, p, opt);
557 p->flows = &p->link; 547 INIT_LIST_HEAD(&p->flows);
548 INIT_LIST_HEAD(&p->link.list);
549 list_add(&p->link.list, &p->flows);
558 p->link.q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue, 550 p->link.q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
559 &pfifo_qdisc_ops, sch->handle); 551 &pfifo_qdisc_ops, sch->handle);
560 if (!p->link.q) 552 if (!p->link.q)
@@ -565,7 +557,6 @@ static int atm_tc_init(struct Qdisc *sch, struct nlattr *opt)
565 p->link.sock = NULL; 557 p->link.sock = NULL;
566 p->link.classid = sch->handle; 558 p->link.classid = sch->handle;
567 p->link.ref = 1; 559 p->link.ref = 1;
568 p->link.next = NULL;
569 tasklet_init(&p->task, sch_atm_dequeue, (unsigned long)sch); 560 tasklet_init(&p->task, sch_atm_dequeue, (unsigned long)sch);
570 return 0; 561 return 0;
571} 562}
@@ -576,7 +567,7 @@ static void atm_tc_reset(struct Qdisc *sch)
576 struct atm_flow_data *flow; 567 struct atm_flow_data *flow;
577 568
578 pr_debug("atm_tc_reset(sch %p,[qdisc %p])\n", sch, p); 569 pr_debug("atm_tc_reset(sch %p,[qdisc %p])\n", sch, p);
579 for (flow = p->flows; flow; flow = flow->next) 570 list_for_each_entry(flow, &p->flows, list)
580 qdisc_reset(flow->q); 571 qdisc_reset(flow->q);
581 sch->q.qlen = 0; 572 sch->q.qlen = 0;
582} 573}
@@ -584,24 +575,17 @@ static void atm_tc_reset(struct Qdisc *sch)
584static void atm_tc_destroy(struct Qdisc *sch) 575static void atm_tc_destroy(struct Qdisc *sch)
585{ 576{
586 struct atm_qdisc_data *p = qdisc_priv(sch); 577 struct atm_qdisc_data *p = qdisc_priv(sch);
587 struct atm_flow_data *flow; 578 struct atm_flow_data *flow, *tmp;
588 579
589 pr_debug("atm_tc_destroy(sch %p,[qdisc %p])\n", sch, p); 580 pr_debug("atm_tc_destroy(sch %p,[qdisc %p])\n", sch, p);
590 for (flow = p->flows; flow; flow = flow->next) 581 list_for_each_entry(flow, &p->flows, list)
591 tcf_destroy_chain(&flow->filter_list); 582 tcf_destroy_chain(&flow->filter_list);
592 583
593 /* races ? */ 584 list_for_each_entry_safe(flow, tmp, &p->flows, list) {
594 while ((flow = p->flows)) {
595 if (flow->ref > 1) 585 if (flow->ref > 1)
596 printk(KERN_ERR "atm_destroy: %p->ref = %d\n", flow, 586 printk(KERN_ERR "atm_destroy: %p->ref = %d\n", flow,
597 flow->ref); 587 flow->ref);
598 atm_tc_put(sch, (unsigned long)flow); 588 atm_tc_put(sch, (unsigned long)flow);
599 if (p->flows == flow) {
600 printk(KERN_ERR "atm_destroy: putting flow %p didn't "
601 "kill it\n", flow);
602 p->flows = flow->next; /* brute force */
603 break;
604 }
605 } 589 }
606 tasklet_kill(&p->task); 590 tasklet_kill(&p->task);
607} 591}
@@ -615,7 +599,7 @@ static int atm_tc_dump_class(struct Qdisc *sch, unsigned long cl,
615 599
616 pr_debug("atm_tc_dump_class(sch %p,[qdisc %p],flow %p,skb %p,tcm %p)\n", 600 pr_debug("atm_tc_dump_class(sch %p,[qdisc %p],flow %p,skb %p,tcm %p)\n",
617 sch, p, flow, skb, tcm); 601 sch, p, flow, skb, tcm);
618 if (!find_flow(p, flow)) 602 if (list_empty(&flow->list))
619 return -EINVAL; 603 return -EINVAL;
620 tcm->tcm_handle = flow->classid; 604 tcm->tcm_handle = flow->classid;
621 tcm->tcm_info = flow->q->handle; 605 tcm->tcm_info = flow->q->handle;
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index a63029ef3edd..2aeb3a4386a1 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -96,7 +96,7 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb,
96 * Another cpu is holding lock, requeue & delay xmits for 96 * Another cpu is holding lock, requeue & delay xmits for
97 * some time. 97 * some time.
98 */ 98 */
99 __get_cpu_var(softnet_data).cpu_collision++; 99 __this_cpu_inc(softnet_data.cpu_collision);
100 ret = dev_requeue_skb(skb, q); 100 ret = dev_requeue_skb(skb, q);
101 } 101 }
102 102
@@ -205,7 +205,7 @@ void __qdisc_run(struct Qdisc *q)
205 } 205 }
206 } 206 }
207 207
208 clear_bit(__QDISC_STATE_RUNNING, &q->state); 208 qdisc_run_end(q);
209} 209}
210 210
211unsigned long dev_trans_start(struct net_device *dev) 211unsigned long dev_trans_start(struct net_device *dev)
@@ -327,6 +327,24 @@ void netif_carrier_off(struct net_device *dev)
327} 327}
328EXPORT_SYMBOL(netif_carrier_off); 328EXPORT_SYMBOL(netif_carrier_off);
329 329
330/**
331 * netif_notify_peers - notify network peers about existence of @dev
332 * @dev: network device
333 *
334 * Generate traffic such that interested network peers are aware of
335 * @dev, such as by generating a gratuitous ARP. This may be used when
336 * a device wants to inform the rest of the network about some sort of
337 * reconfiguration such as a failover event or virtual machine
338 * migration.
339 */
340void netif_notify_peers(struct net_device *dev)
341{
342 rtnl_lock();
343 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
344 rtnl_unlock();
345}
346EXPORT_SYMBOL(netif_notify_peers);
347
330/* "NOOP" scheduler: the best scheduler, recommended for all interfaces 348/* "NOOP" scheduler: the best scheduler, recommended for all interfaces
331 under all circumstances. It is difficult to invent anything faster or 349 under all circumstances. It is difficult to invent anything faster or
332 cheaper. 350 cheaper.
@@ -543,6 +561,7 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
543 561
544 INIT_LIST_HEAD(&sch->list); 562 INIT_LIST_HEAD(&sch->list);
545 skb_queue_head_init(&sch->q); 563 skb_queue_head_init(&sch->q);
564 spin_lock_init(&sch->busylock);
546 sch->ops = ops; 565 sch->ops = ops;
547 sch->enqueue = ops->enqueue; 566 sch->enqueue = ops->enqueue;
548 sch->dequeue = ops->dequeue; 567 sch->dequeue = ops->dequeue;
@@ -779,7 +798,7 @@ static bool some_qdisc_is_busy(struct net_device *dev)
779 798
780 spin_lock_bh(root_lock); 799 spin_lock_bh(root_lock);
781 800
782 val = (test_bit(__QDISC_STATE_RUNNING, &q->state) || 801 val = (qdisc_is_running(q) ||
783 test_bit(__QDISC_STATE_SCHED, &q->state)); 802 test_bit(__QDISC_STATE_SCHED, &q->state));
784 803
785 spin_unlock_bh(root_lock); 804 spin_unlock_bh(root_lock);
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 0b52b8de562c..4be8d04b262d 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -1550,7 +1550,6 @@ static const struct Qdisc_class_ops htb_class_ops = {
1550}; 1550};
1551 1551
1552static struct Qdisc_ops htb_qdisc_ops __read_mostly = { 1552static struct Qdisc_ops htb_qdisc_ops __read_mostly = {
1553 .next = NULL,
1554 .cl_ops = &htb_class_ops, 1553 .cl_ops = &htb_class_ops,
1555 .id = "htb", 1554 .id = "htb",
1556 .priv_size = sizeof(struct htb_sched), 1555 .priv_size = sizeof(struct htb_sched),
@@ -1561,7 +1560,6 @@ static struct Qdisc_ops htb_qdisc_ops __read_mostly = {
1561 .init = htb_init, 1560 .init = htb_init,
1562 .reset = htb_reset, 1561 .reset = htb_reset,
1563 .destroy = htb_destroy, 1562 .destroy = htb_destroy,
1564 .change = NULL /* htb_change */,
1565 .dump = htb_dump, 1563 .dump = htb_dump,
1566 .owner = THIS_MODULE, 1564 .owner = THIS_MODULE,
1567}; 1565};
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 3415b6ce1c0a..807643bdcbac 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -449,6 +449,7 @@ static __init void teql_master_setup(struct net_device *dev)
449 dev->tx_queue_len = 100; 449 dev->tx_queue_len = 100;
450 dev->flags = IFF_NOARP; 450 dev->flags = IFF_NOARP;
451 dev->hard_header_len = LL_MAX_HEADER; 451 dev->hard_header_len = LL_MAX_HEADER;
452 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
452} 453}
453 454
454static LIST_HEAD(master_dev_list); 455static LIST_HEAD(master_dev_list);
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index e41feff19e43..0b85e5256434 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -172,7 +172,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
172 asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] = 172 asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] =
173 (unsigned long)sp->autoclose * HZ; 173 (unsigned long)sp->autoclose * HZ;
174 174
175 /* Initilizes the timers */ 175 /* Initializes the timers */
176 for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i) 176 for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i)
177 setup_timer(&asoc->timers[i], sctp_timer_events[i], 177 setup_timer(&asoc->timers[i], sctp_timer_events[i],
178 (unsigned long)asoc); 178 (unsigned long)asoc);
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 182749867c72..5027b83f1cc0 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -490,7 +490,7 @@ static struct dst_entry *sctp_v4_get_dst(struct sctp_association *asoc,
490 __func__, &fl.fl4_dst, &fl.fl4_src); 490 __func__, &fl.fl4_dst, &fl.fl4_src);
491 491
492 if (!ip_route_output_key(&init_net, &rt, &fl)) { 492 if (!ip_route_output_key(&init_net, &rt, &fl)) {
493 dst = &rt->u.dst; 493 dst = &rt->dst;
494 } 494 }
495 495
496 /* If there is no association or if a source address is passed, no 496 /* If there is no association or if a source address is passed, no
@@ -534,7 +534,7 @@ static struct dst_entry *sctp_v4_get_dst(struct sctp_association *asoc,
534 fl.fl4_src = laddr->a.v4.sin_addr.s_addr; 534 fl.fl4_src = laddr->a.v4.sin_addr.s_addr;
535 fl.fl_ip_sport = laddr->a.v4.sin_port; 535 fl.fl_ip_sport = laddr->a.v4.sin_port;
536 if (!ip_route_output_key(&init_net, &rt, &fl)) { 536 if (!ip_route_output_key(&init_net, &rt, &fl)) {
537 dst = &rt->u.dst; 537 dst = &rt->dst;
538 goto out_unlock; 538 goto out_unlock;
539 } 539 }
540 } 540 }
@@ -1002,7 +1002,8 @@ int sctp_register_pf(struct sctp_pf *pf, sa_family_t family)
1002static inline int init_sctp_mibs(void) 1002static inline int init_sctp_mibs(void)
1003{ 1003{
1004 return snmp_mib_init((void __percpu **)sctp_statistics, 1004 return snmp_mib_init((void __percpu **)sctp_statistics,
1005 sizeof(struct sctp_mib)); 1005 sizeof(struct sctp_mib),
1006 __alignof__(struct sctp_mib));
1006} 1007}
1007 1008
1008static inline void cleanup_sctp_mibs(void) 1009static inline void cleanup_sctp_mibs(void)
@@ -1162,7 +1163,7 @@ SCTP_STATIC __init int sctp_init(void)
1162 /* Set the pressure threshold to be a fraction of global memory that 1163 /* Set the pressure threshold to be a fraction of global memory that
1163 * is up to 1/2 at 256 MB, decreasing toward zero with the amount of 1164 * is up to 1/2 at 256 MB, decreasing toward zero with the amount of
1164 * memory, with a floor of 128 pages. 1165 * memory, with a floor of 128 pages.
1165 * Note this initalizes the data in sctpv6_prot too 1166 * Note this initializes the data in sctpv6_prot too
1166 * Unabashedly stolen from tcp_init 1167 * Unabashedly stolen from tcp_init
1167 */ 1168 */
1168 nr_pages = totalram_pages - totalhigh_pages; 1169 nr_pages = totalram_pages - totalhigh_pages;
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index bd2a50b482ac..246f92924658 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -1817,7 +1817,7 @@ malformed:
1817struct __sctp_missing { 1817struct __sctp_missing {
1818 __be32 num_missing; 1818 __be32 num_missing;
1819 __be16 type; 1819 __be16 type;
1820} __attribute__((packed)); 1820} __packed;
1821 1821
1822/* 1822/*
1823 * Report a missing mandatory parameter. 1823 * Report a missing mandatory parameter.
diff --git a/net/socket.c b/net/socket.c
index 367d5477d00f..2270b941bcc7 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -124,7 +124,7 @@ static int sock_fasync(int fd, struct file *filp, int on);
124static ssize_t sock_sendpage(struct file *file, struct page *page, 124static ssize_t sock_sendpage(struct file *file, struct page *page,
125 int offset, size_t size, loff_t *ppos, int more); 125 int offset, size_t size, loff_t *ppos, int more);
126static ssize_t sock_splice_read(struct file *file, loff_t *ppos, 126static ssize_t sock_splice_read(struct file *file, loff_t *ppos,
127 struct pipe_inode_info *pipe, size_t len, 127 struct pipe_inode_info *pipe, size_t len,
128 unsigned int flags); 128 unsigned int flags);
129 129
130/* 130/*
@@ -162,7 +162,7 @@ static const struct net_proto_family *net_families[NPROTO] __read_mostly;
162 * Statistics counters of the socket lists 162 * Statistics counters of the socket lists
163 */ 163 */
164 164
165static DEFINE_PER_CPU(int, sockets_in_use) = 0; 165static DEFINE_PER_CPU(int, sockets_in_use);
166 166
167/* 167/*
168 * Support routines. 168 * Support routines.
@@ -170,15 +170,6 @@ static DEFINE_PER_CPU(int, sockets_in_use) = 0;
170 * divide and look after the messy bits. 170 * divide and look after the messy bits.
171 */ 171 */
172 172
173#define MAX_SOCK_ADDR 128 /* 108 for Unix domain -
174 16 for IP, 16 for IPX,
175 24 for IPv6,
176 about 80 for AX.25
177 must be at least one bigger than
178 the AF_UNIX size (see net/unix/af_unix.c
179 :unix_mkname()).
180 */
181
182/** 173/**
183 * move_addr_to_kernel - copy a socket address into kernel space 174 * move_addr_to_kernel - copy a socket address into kernel space
184 * @uaddr: Address in user space 175 * @uaddr: Address in user space
@@ -309,9 +300,9 @@ static int init_inodecache(void)
309} 300}
310 301
311static const struct super_operations sockfs_ops = { 302static const struct super_operations sockfs_ops = {
312 .alloc_inode = sock_alloc_inode, 303 .alloc_inode = sock_alloc_inode,
313 .destroy_inode =sock_destroy_inode, 304 .destroy_inode = sock_destroy_inode,
314 .statfs = simple_statfs, 305 .statfs = simple_statfs,
315}; 306};
316 307
317static int sockfs_get_sb(struct file_system_type *fs_type, 308static int sockfs_get_sb(struct file_system_type *fs_type,
@@ -411,6 +402,7 @@ int sock_map_fd(struct socket *sock, int flags)
411 402
412 return fd; 403 return fd;
413} 404}
405EXPORT_SYMBOL(sock_map_fd);
414 406
415static struct socket *sock_from_file(struct file *file, int *err) 407static struct socket *sock_from_file(struct file *file, int *err)
416{ 408{
@@ -422,7 +414,7 @@ static struct socket *sock_from_file(struct file *file, int *err)
422} 414}
423 415
424/** 416/**
425 * sockfd_lookup - Go from a file number to its socket slot 417 * sockfd_lookup - Go from a file number to its socket slot
426 * @fd: file handle 418 * @fd: file handle
427 * @err: pointer to an error code return 419 * @err: pointer to an error code return
428 * 420 *
@@ -450,6 +442,7 @@ struct socket *sockfd_lookup(int fd, int *err)
450 fput(file); 442 fput(file);
451 return sock; 443 return sock;
452} 444}
445EXPORT_SYMBOL(sockfd_lookup);
453 446
454static struct socket *sockfd_lookup_light(int fd, int *err, int *fput_needed) 447static struct socket *sockfd_lookup_light(int fd, int *err, int *fput_needed)
455{ 448{
@@ -540,6 +533,7 @@ void sock_release(struct socket *sock)
540 } 533 }
541 sock->file = NULL; 534 sock->file = NULL;
542} 535}
536EXPORT_SYMBOL(sock_release);
543 537
544int sock_tx_timestamp(struct msghdr *msg, struct sock *sk, 538int sock_tx_timestamp(struct msghdr *msg, struct sock *sk,
545 union skb_shared_tx *shtx) 539 union skb_shared_tx *shtx)
@@ -586,6 +580,7 @@ int sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
586 ret = wait_on_sync_kiocb(&iocb); 580 ret = wait_on_sync_kiocb(&iocb);
587 return ret; 581 return ret;
588} 582}
583EXPORT_SYMBOL(sock_sendmsg);
589 584
590int kernel_sendmsg(struct socket *sock, struct msghdr *msg, 585int kernel_sendmsg(struct socket *sock, struct msghdr *msg,
591 struct kvec *vec, size_t num, size_t size) 586 struct kvec *vec, size_t num, size_t size)
@@ -604,6 +599,7 @@ int kernel_sendmsg(struct socket *sock, struct msghdr *msg,
604 set_fs(oldfs); 599 set_fs(oldfs);
605 return result; 600 return result;
606} 601}
602EXPORT_SYMBOL(kernel_sendmsg);
607 603
608static int ktime2ts(ktime_t kt, struct timespec *ts) 604static int ktime2ts(ktime_t kt, struct timespec *ts)
609{ 605{
@@ -664,7 +660,6 @@ void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
664 put_cmsg(msg, SOL_SOCKET, 660 put_cmsg(msg, SOL_SOCKET,
665 SCM_TIMESTAMPING, sizeof(ts), &ts); 661 SCM_TIMESTAMPING, sizeof(ts), &ts);
666} 662}
667
668EXPORT_SYMBOL_GPL(__sock_recv_timestamp); 663EXPORT_SYMBOL_GPL(__sock_recv_timestamp);
669 664
670inline void sock_recv_drops(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) 665inline void sock_recv_drops(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
@@ -720,6 +715,7 @@ int sock_recvmsg(struct socket *sock, struct msghdr *msg,
720 ret = wait_on_sync_kiocb(&iocb); 715 ret = wait_on_sync_kiocb(&iocb);
721 return ret; 716 return ret;
722} 717}
718EXPORT_SYMBOL(sock_recvmsg);
723 719
724static int sock_recvmsg_nosec(struct socket *sock, struct msghdr *msg, 720static int sock_recvmsg_nosec(struct socket *sock, struct msghdr *msg,
725 size_t size, int flags) 721 size_t size, int flags)
@@ -752,6 +748,7 @@ int kernel_recvmsg(struct socket *sock, struct msghdr *msg,
752 set_fs(oldfs); 748 set_fs(oldfs);
753 return result; 749 return result;
754} 750}
751EXPORT_SYMBOL(kernel_recvmsg);
755 752
756static void sock_aio_dtor(struct kiocb *iocb) 753static void sock_aio_dtor(struct kiocb *iocb)
757{ 754{
@@ -774,7 +771,7 @@ static ssize_t sock_sendpage(struct file *file, struct page *page,
774} 771}
775 772
776static ssize_t sock_splice_read(struct file *file, loff_t *ppos, 773static ssize_t sock_splice_read(struct file *file, loff_t *ppos,
777 struct pipe_inode_info *pipe, size_t len, 774 struct pipe_inode_info *pipe, size_t len,
778 unsigned int flags) 775 unsigned int flags)
779{ 776{
780 struct socket *sock = file->private_data; 777 struct socket *sock = file->private_data;
@@ -887,7 +884,7 @@ static ssize_t sock_aio_write(struct kiocb *iocb, const struct iovec *iov,
887 */ 884 */
888 885
889static DEFINE_MUTEX(br_ioctl_mutex); 886static DEFINE_MUTEX(br_ioctl_mutex);
890static int (*br_ioctl_hook) (struct net *, unsigned int cmd, void __user *arg) = NULL; 887static int (*br_ioctl_hook) (struct net *, unsigned int cmd, void __user *arg);
891 888
892void brioctl_set(int (*hook) (struct net *, unsigned int, void __user *)) 889void brioctl_set(int (*hook) (struct net *, unsigned int, void __user *))
893{ 890{
@@ -895,7 +892,6 @@ void brioctl_set(int (*hook) (struct net *, unsigned int, void __user *))
895 br_ioctl_hook = hook; 892 br_ioctl_hook = hook;
896 mutex_unlock(&br_ioctl_mutex); 893 mutex_unlock(&br_ioctl_mutex);
897} 894}
898
899EXPORT_SYMBOL(brioctl_set); 895EXPORT_SYMBOL(brioctl_set);
900 896
901static DEFINE_MUTEX(vlan_ioctl_mutex); 897static DEFINE_MUTEX(vlan_ioctl_mutex);
@@ -907,7 +903,6 @@ void vlan_ioctl_set(int (*hook) (struct net *, void __user *))
907 vlan_ioctl_hook = hook; 903 vlan_ioctl_hook = hook;
908 mutex_unlock(&vlan_ioctl_mutex); 904 mutex_unlock(&vlan_ioctl_mutex);
909} 905}
910
911EXPORT_SYMBOL(vlan_ioctl_set); 906EXPORT_SYMBOL(vlan_ioctl_set);
912 907
913static DEFINE_MUTEX(dlci_ioctl_mutex); 908static DEFINE_MUTEX(dlci_ioctl_mutex);
@@ -919,7 +914,6 @@ void dlci_ioctl_set(int (*hook) (unsigned int, void __user *))
919 dlci_ioctl_hook = hook; 914 dlci_ioctl_hook = hook;
920 mutex_unlock(&dlci_ioctl_mutex); 915 mutex_unlock(&dlci_ioctl_mutex);
921} 916}
922
923EXPORT_SYMBOL(dlci_ioctl_set); 917EXPORT_SYMBOL(dlci_ioctl_set);
924 918
925static long sock_do_ioctl(struct net *net, struct socket *sock, 919static long sock_do_ioctl(struct net *net, struct socket *sock,
@@ -1047,6 +1041,7 @@ out_release:
1047 sock = NULL; 1041 sock = NULL;
1048 goto out; 1042 goto out;
1049} 1043}
1044EXPORT_SYMBOL(sock_create_lite);
1050 1045
1051/* No kernel lock held - perfect */ 1046/* No kernel lock held - perfect */
1052static unsigned int sock_poll(struct file *file, poll_table *wait) 1047static unsigned int sock_poll(struct file *file, poll_table *wait)
@@ -1147,6 +1142,7 @@ call_kill:
1147 rcu_read_unlock(); 1142 rcu_read_unlock();
1148 return 0; 1143 return 0;
1149} 1144}
1145EXPORT_SYMBOL(sock_wake_async);
1150 1146
1151static int __sock_create(struct net *net, int family, int type, int protocol, 1147static int __sock_create(struct net *net, int family, int type, int protocol,
1152 struct socket **res, int kern) 1148 struct socket **res, int kern)
@@ -1265,11 +1261,13 @@ int sock_create(int family, int type, int protocol, struct socket **res)
1265{ 1261{
1266 return __sock_create(current->nsproxy->net_ns, family, type, protocol, res, 0); 1262 return __sock_create(current->nsproxy->net_ns, family, type, protocol, res, 0);
1267} 1263}
1264EXPORT_SYMBOL(sock_create);
1268 1265
1269int sock_create_kern(int family, int type, int protocol, struct socket **res) 1266int sock_create_kern(int family, int type, int protocol, struct socket **res)
1270{ 1267{
1271 return __sock_create(&init_net, family, type, protocol, res, 1); 1268 return __sock_create(&init_net, family, type, protocol, res, 1);
1272} 1269}
1270EXPORT_SYMBOL(sock_create_kern);
1273 1271
1274SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol) 1272SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol)
1275{ 1273{
@@ -1474,7 +1472,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
1474 goto out; 1472 goto out;
1475 1473
1476 err = -ENFILE; 1474 err = -ENFILE;
1477 if (!(newsock = sock_alloc())) 1475 newsock = sock_alloc();
1476 if (!newsock)
1478 goto out_put; 1477 goto out_put;
1479 1478
1480 newsock->type = sock->type; 1479 newsock->type = sock->type;
@@ -1861,8 +1860,7 @@ SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned, flags)
1861 if (MSG_CMSG_COMPAT & flags) { 1860 if (MSG_CMSG_COMPAT & flags) {
1862 if (get_compat_msghdr(&msg_sys, msg_compat)) 1861 if (get_compat_msghdr(&msg_sys, msg_compat))
1863 return -EFAULT; 1862 return -EFAULT;
1864 } 1863 } else if (copy_from_user(&msg_sys, msg, sizeof(struct msghdr)))
1865 else if (copy_from_user(&msg_sys, msg, sizeof(struct msghdr)))
1866 return -EFAULT; 1864 return -EFAULT;
1867 1865
1868 sock = sockfd_lookup_light(fd, &err, &fput_needed); 1866 sock = sockfd_lookup_light(fd, &err, &fput_needed);
@@ -1964,8 +1962,7 @@ static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
1964 if (MSG_CMSG_COMPAT & flags) { 1962 if (MSG_CMSG_COMPAT & flags) {
1965 if (get_compat_msghdr(msg_sys, msg_compat)) 1963 if (get_compat_msghdr(msg_sys, msg_compat))
1966 return -EFAULT; 1964 return -EFAULT;
1967 } 1965 } else if (copy_from_user(msg_sys, msg, sizeof(struct msghdr)))
1968 else if (copy_from_user(msg_sys, msg, sizeof(struct msghdr)))
1969 return -EFAULT; 1966 return -EFAULT;
1970 1967
1971 err = -EMSGSIZE; 1968 err = -EMSGSIZE;
@@ -2191,10 +2188,10 @@ SYSCALL_DEFINE5(recvmmsg, int, fd, struct mmsghdr __user *, mmsg,
2191/* Argument list sizes for sys_socketcall */ 2188/* Argument list sizes for sys_socketcall */
2192#define AL(x) ((x) * sizeof(unsigned long)) 2189#define AL(x) ((x) * sizeof(unsigned long))
2193static const unsigned char nargs[20] = { 2190static const unsigned char nargs[20] = {
2194 AL(0),AL(3),AL(3),AL(3),AL(2),AL(3), 2191 AL(0), AL(3), AL(3), AL(3), AL(2), AL(3),
2195 AL(3),AL(3),AL(4),AL(4),AL(4),AL(6), 2192 AL(3), AL(3), AL(4), AL(4), AL(4), AL(6),
2196 AL(6),AL(2),AL(5),AL(5),AL(3),AL(3), 2193 AL(6), AL(2), AL(5), AL(5), AL(3), AL(3),
2197 AL(4),AL(5) 2194 AL(4), AL(5)
2198}; 2195};
2199 2196
2200#undef AL 2197#undef AL
@@ -2340,6 +2337,7 @@ int sock_register(const struct net_proto_family *ops)
2340 printk(KERN_INFO "NET: Registered protocol family %d\n", ops->family); 2337 printk(KERN_INFO "NET: Registered protocol family %d\n", ops->family);
2341 return err; 2338 return err;
2342} 2339}
2340EXPORT_SYMBOL(sock_register);
2343 2341
2344/** 2342/**
2345 * sock_unregister - remove a protocol handler 2343 * sock_unregister - remove a protocol handler
@@ -2366,6 +2364,7 @@ void sock_unregister(int family)
2366 2364
2367 printk(KERN_INFO "NET: Unregistered protocol family %d\n", family); 2365 printk(KERN_INFO "NET: Unregistered protocol family %d\n", family);
2368} 2366}
2367EXPORT_SYMBOL(sock_unregister);
2369 2368
2370static int __init sock_init(void) 2369static int __init sock_init(void)
2371{ 2370{
@@ -2395,6 +2394,10 @@ static int __init sock_init(void)
2395 netfilter_init(); 2394 netfilter_init();
2396#endif 2395#endif
2397 2396
2397#ifdef CONFIG_NETWORK_PHY_TIMESTAMPING
2398 skb_timestamping_init();
2399#endif
2400
2398 return 0; 2401 return 0;
2399} 2402}
2400 2403
@@ -2490,13 +2493,13 @@ static int dev_ifconf(struct net *net, struct compat_ifconf __user *uifc32)
2490 ifc.ifc_req = NULL; 2493 ifc.ifc_req = NULL;
2491 uifc = compat_alloc_user_space(sizeof(struct ifconf)); 2494 uifc = compat_alloc_user_space(sizeof(struct ifconf));
2492 } else { 2495 } else {
2493 size_t len =((ifc32.ifc_len / sizeof (struct compat_ifreq)) + 1) * 2496 size_t len = ((ifc32.ifc_len / sizeof(struct compat_ifreq)) + 1) *
2494 sizeof (struct ifreq); 2497 sizeof(struct ifreq);
2495 uifc = compat_alloc_user_space(sizeof(struct ifconf) + len); 2498 uifc = compat_alloc_user_space(sizeof(struct ifconf) + len);
2496 ifc.ifc_len = len; 2499 ifc.ifc_len = len;
2497 ifr = ifc.ifc_req = (void __user *)(uifc + 1); 2500 ifr = ifc.ifc_req = (void __user *)(uifc + 1);
2498 ifr32 = compat_ptr(ifc32.ifcbuf); 2501 ifr32 = compat_ptr(ifc32.ifcbuf);
2499 for (i = 0; i < ifc32.ifc_len; i += sizeof (struct compat_ifreq)) { 2502 for (i = 0; i < ifc32.ifc_len; i += sizeof(struct compat_ifreq)) {
2500 if (copy_in_user(ifr, ifr32, sizeof(struct compat_ifreq))) 2503 if (copy_in_user(ifr, ifr32, sizeof(struct compat_ifreq)))
2501 return -EFAULT; 2504 return -EFAULT;
2502 ifr++; 2505 ifr++;
@@ -2516,9 +2519,9 @@ static int dev_ifconf(struct net *net, struct compat_ifconf __user *uifc32)
2516 ifr = ifc.ifc_req; 2519 ifr = ifc.ifc_req;
2517 ifr32 = compat_ptr(ifc32.ifcbuf); 2520 ifr32 = compat_ptr(ifc32.ifcbuf);
2518 for (i = 0, j = 0; 2521 for (i = 0, j = 0;
2519 i + sizeof (struct compat_ifreq) <= ifc32.ifc_len && j < ifc.ifc_len; 2522 i + sizeof(struct compat_ifreq) <= ifc32.ifc_len && j < ifc.ifc_len;
2520 i += sizeof (struct compat_ifreq), j += sizeof (struct ifreq)) { 2523 i += sizeof(struct compat_ifreq), j += sizeof(struct ifreq)) {
2521 if (copy_in_user(ifr32, ifr, sizeof (struct compat_ifreq))) 2524 if (copy_in_user(ifr32, ifr, sizeof(struct compat_ifreq)))
2522 return -EFAULT; 2525 return -EFAULT;
2523 ifr32++; 2526 ifr32++;
2524 ifr++; 2527 ifr++;
@@ -2567,7 +2570,7 @@ static int compat_siocwandev(struct net *net, struct compat_ifreq __user *uifr32
2567 compat_uptr_t uptr32; 2570 compat_uptr_t uptr32;
2568 struct ifreq __user *uifr; 2571 struct ifreq __user *uifr;
2569 2572
2570 uifr = compat_alloc_user_space(sizeof (*uifr)); 2573 uifr = compat_alloc_user_space(sizeof(*uifr));
2571 if (copy_in_user(uifr, uifr32, sizeof(struct compat_ifreq))) 2574 if (copy_in_user(uifr, uifr32, sizeof(struct compat_ifreq)))
2572 return -EFAULT; 2575 return -EFAULT;
2573 2576
@@ -2601,9 +2604,9 @@ static int bond_ioctl(struct net *net, unsigned int cmd,
2601 return -EFAULT; 2604 return -EFAULT;
2602 2605
2603 old_fs = get_fs(); 2606 old_fs = get_fs();
2604 set_fs (KERNEL_DS); 2607 set_fs(KERNEL_DS);
2605 err = dev_ioctl(net, cmd, &kifr); 2608 err = dev_ioctl(net, cmd, &kifr);
2606 set_fs (old_fs); 2609 set_fs(old_fs);
2607 2610
2608 return err; 2611 return err;
2609 case SIOCBONDSLAVEINFOQUERY: 2612 case SIOCBONDSLAVEINFOQUERY:
@@ -2710,9 +2713,9 @@ static int compat_sioc_ifmap(struct net *net, unsigned int cmd,
2710 return -EFAULT; 2713 return -EFAULT;
2711 2714
2712 old_fs = get_fs(); 2715 old_fs = get_fs();
2713 set_fs (KERNEL_DS); 2716 set_fs(KERNEL_DS);
2714 err = dev_ioctl(net, cmd, (void __user *)&ifr); 2717 err = dev_ioctl(net, cmd, (void __user *)&ifr);
2715 set_fs (old_fs); 2718 set_fs(old_fs);
2716 2719
2717 if (cmd == SIOCGIFMAP && !err) { 2720 if (cmd == SIOCGIFMAP && !err) {
2718 err = copy_to_user(uifr32, &ifr, sizeof(ifr.ifr_name)); 2721 err = copy_to_user(uifr32, &ifr, sizeof(ifr.ifr_name));
@@ -2734,7 +2737,7 @@ static int compat_siocshwtstamp(struct net *net, struct compat_ifreq __user *uif
2734 compat_uptr_t uptr32; 2737 compat_uptr_t uptr32;
2735 struct ifreq __user *uifr; 2738 struct ifreq __user *uifr;
2736 2739
2737 uifr = compat_alloc_user_space(sizeof (*uifr)); 2740 uifr = compat_alloc_user_space(sizeof(*uifr));
2738 if (copy_in_user(uifr, uifr32, sizeof(struct compat_ifreq))) 2741 if (copy_in_user(uifr, uifr32, sizeof(struct compat_ifreq)))
2739 return -EFAULT; 2742 return -EFAULT;
2740 2743
@@ -2750,20 +2753,20 @@ static int compat_siocshwtstamp(struct net *net, struct compat_ifreq __user *uif
2750} 2753}
2751 2754
2752struct rtentry32 { 2755struct rtentry32 {
2753 u32 rt_pad1; 2756 u32 rt_pad1;
2754 struct sockaddr rt_dst; /* target address */ 2757 struct sockaddr rt_dst; /* target address */
2755 struct sockaddr rt_gateway; /* gateway addr (RTF_GATEWAY) */ 2758 struct sockaddr rt_gateway; /* gateway addr (RTF_GATEWAY) */
2756 struct sockaddr rt_genmask; /* target network mask (IP) */ 2759 struct sockaddr rt_genmask; /* target network mask (IP) */
2757 unsigned short rt_flags; 2760 unsigned short rt_flags;
2758 short rt_pad2; 2761 short rt_pad2;
2759 u32 rt_pad3; 2762 u32 rt_pad3;
2760 unsigned char rt_tos; 2763 unsigned char rt_tos;
2761 unsigned char rt_class; 2764 unsigned char rt_class;
2762 short rt_pad4; 2765 short rt_pad4;
2763 short rt_metric; /* +1 for binary compatibility! */ 2766 short rt_metric; /* +1 for binary compatibility! */
2764 /* char * */ u32 rt_dev; /* forcing the device at add */ 2767 /* char * */ u32 rt_dev; /* forcing the device at add */
2765 u32 rt_mtu; /* per route MTU/Window */ 2768 u32 rt_mtu; /* per route MTU/Window */
2766 u32 rt_window; /* Window clamping */ 2769 u32 rt_window; /* Window clamping */
2767 unsigned short rt_irtt; /* Initial RTT */ 2770 unsigned short rt_irtt; /* Initial RTT */
2768}; 2771};
2769 2772
@@ -2793,29 +2796,29 @@ static int routing_ioctl(struct net *net, struct socket *sock,
2793 2796
2794 if (sock && sock->sk && sock->sk->sk_family == AF_INET6) { /* ipv6 */ 2797 if (sock && sock->sk && sock->sk->sk_family == AF_INET6) { /* ipv6 */
2795 struct in6_rtmsg32 __user *ur6 = argp; 2798 struct in6_rtmsg32 __user *ur6 = argp;
2796 ret = copy_from_user (&r6.rtmsg_dst, &(ur6->rtmsg_dst), 2799 ret = copy_from_user(&r6.rtmsg_dst, &(ur6->rtmsg_dst),
2797 3 * sizeof(struct in6_addr)); 2800 3 * sizeof(struct in6_addr));
2798 ret |= __get_user (r6.rtmsg_type, &(ur6->rtmsg_type)); 2801 ret |= __get_user(r6.rtmsg_type, &(ur6->rtmsg_type));
2799 ret |= __get_user (r6.rtmsg_dst_len, &(ur6->rtmsg_dst_len)); 2802 ret |= __get_user(r6.rtmsg_dst_len, &(ur6->rtmsg_dst_len));
2800 ret |= __get_user (r6.rtmsg_src_len, &(ur6->rtmsg_src_len)); 2803 ret |= __get_user(r6.rtmsg_src_len, &(ur6->rtmsg_src_len));
2801 ret |= __get_user (r6.rtmsg_metric, &(ur6->rtmsg_metric)); 2804 ret |= __get_user(r6.rtmsg_metric, &(ur6->rtmsg_metric));
2802 ret |= __get_user (r6.rtmsg_info, &(ur6->rtmsg_info)); 2805 ret |= __get_user(r6.rtmsg_info, &(ur6->rtmsg_info));
2803 ret |= __get_user (r6.rtmsg_flags, &(ur6->rtmsg_flags)); 2806 ret |= __get_user(r6.rtmsg_flags, &(ur6->rtmsg_flags));
2804 ret |= __get_user (r6.rtmsg_ifindex, &(ur6->rtmsg_ifindex)); 2807 ret |= __get_user(r6.rtmsg_ifindex, &(ur6->rtmsg_ifindex));
2805 2808
2806 r = (void *) &r6; 2809 r = (void *) &r6;
2807 } else { /* ipv4 */ 2810 } else { /* ipv4 */
2808 struct rtentry32 __user *ur4 = argp; 2811 struct rtentry32 __user *ur4 = argp;
2809 ret = copy_from_user (&r4.rt_dst, &(ur4->rt_dst), 2812 ret = copy_from_user(&r4.rt_dst, &(ur4->rt_dst),
2810 3 * sizeof(struct sockaddr)); 2813 3 * sizeof(struct sockaddr));
2811 ret |= __get_user (r4.rt_flags, &(ur4->rt_flags)); 2814 ret |= __get_user(r4.rt_flags, &(ur4->rt_flags));
2812 ret |= __get_user (r4.rt_metric, &(ur4->rt_metric)); 2815 ret |= __get_user(r4.rt_metric, &(ur4->rt_metric));
2813 ret |= __get_user (r4.rt_mtu, &(ur4->rt_mtu)); 2816 ret |= __get_user(r4.rt_mtu, &(ur4->rt_mtu));
2814 ret |= __get_user (r4.rt_window, &(ur4->rt_window)); 2817 ret |= __get_user(r4.rt_window, &(ur4->rt_window));
2815 ret |= __get_user (r4.rt_irtt, &(ur4->rt_irtt)); 2818 ret |= __get_user(r4.rt_irtt, &(ur4->rt_irtt));
2816 ret |= __get_user (rtdev, &(ur4->rt_dev)); 2819 ret |= __get_user(rtdev, &(ur4->rt_dev));
2817 if (rtdev) { 2820 if (rtdev) {
2818 ret |= copy_from_user (devname, compat_ptr(rtdev), 15); 2821 ret |= copy_from_user(devname, compat_ptr(rtdev), 15);
2819 r4.rt_dev = devname; devname[15] = 0; 2822 r4.rt_dev = devname; devname[15] = 0;
2820 } else 2823 } else
2821 r4.rt_dev = NULL; 2824 r4.rt_dev = NULL;
@@ -2828,9 +2831,9 @@ static int routing_ioctl(struct net *net, struct socket *sock,
2828 goto out; 2831 goto out;
2829 } 2832 }
2830 2833
2831 set_fs (KERNEL_DS); 2834 set_fs(KERNEL_DS);
2832 ret = sock_do_ioctl(net, sock, cmd, (unsigned long) r); 2835 ret = sock_do_ioctl(net, sock, cmd, (unsigned long) r);
2833 set_fs (old_fs); 2836 set_fs(old_fs);
2834 2837
2835out: 2838out:
2836 return ret; 2839 return ret;
@@ -2993,11 +2996,13 @@ int kernel_bind(struct socket *sock, struct sockaddr *addr, int addrlen)
2993{ 2996{
2994 return sock->ops->bind(sock, addr, addrlen); 2997 return sock->ops->bind(sock, addr, addrlen);
2995} 2998}
2999EXPORT_SYMBOL(kernel_bind);
2996 3000
2997int kernel_listen(struct socket *sock, int backlog) 3001int kernel_listen(struct socket *sock, int backlog)
2998{ 3002{
2999 return sock->ops->listen(sock, backlog); 3003 return sock->ops->listen(sock, backlog);
3000} 3004}
3005EXPORT_SYMBOL(kernel_listen);
3001 3006
3002int kernel_accept(struct socket *sock, struct socket **newsock, int flags) 3007int kernel_accept(struct socket *sock, struct socket **newsock, int flags)
3003{ 3008{
@@ -3022,24 +3027,28 @@ int kernel_accept(struct socket *sock, struct socket **newsock, int flags)
3022done: 3027done:
3023 return err; 3028 return err;
3024} 3029}
3030EXPORT_SYMBOL(kernel_accept);
3025 3031
3026int kernel_connect(struct socket *sock, struct sockaddr *addr, int addrlen, 3032int kernel_connect(struct socket *sock, struct sockaddr *addr, int addrlen,
3027 int flags) 3033 int flags)
3028{ 3034{
3029 return sock->ops->connect(sock, addr, addrlen, flags); 3035 return sock->ops->connect(sock, addr, addrlen, flags);
3030} 3036}
3037EXPORT_SYMBOL(kernel_connect);
3031 3038
3032int kernel_getsockname(struct socket *sock, struct sockaddr *addr, 3039int kernel_getsockname(struct socket *sock, struct sockaddr *addr,
3033 int *addrlen) 3040 int *addrlen)
3034{ 3041{
3035 return sock->ops->getname(sock, addr, addrlen, 0); 3042 return sock->ops->getname(sock, addr, addrlen, 0);
3036} 3043}
3044EXPORT_SYMBOL(kernel_getsockname);
3037 3045
3038int kernel_getpeername(struct socket *sock, struct sockaddr *addr, 3046int kernel_getpeername(struct socket *sock, struct sockaddr *addr,
3039 int *addrlen) 3047 int *addrlen)
3040{ 3048{
3041 return sock->ops->getname(sock, addr, addrlen, 1); 3049 return sock->ops->getname(sock, addr, addrlen, 1);
3042} 3050}
3051EXPORT_SYMBOL(kernel_getpeername);
3043 3052
3044int kernel_getsockopt(struct socket *sock, int level, int optname, 3053int kernel_getsockopt(struct socket *sock, int level, int optname,
3045 char *optval, int *optlen) 3054 char *optval, int *optlen)
@@ -3056,6 +3065,7 @@ int kernel_getsockopt(struct socket *sock, int level, int optname,
3056 set_fs(oldfs); 3065 set_fs(oldfs);
3057 return err; 3066 return err;
3058} 3067}
3068EXPORT_SYMBOL(kernel_getsockopt);
3059 3069
3060int kernel_setsockopt(struct socket *sock, int level, int optname, 3070int kernel_setsockopt(struct socket *sock, int level, int optname,
3061 char *optval, unsigned int optlen) 3071 char *optval, unsigned int optlen)
@@ -3072,6 +3082,7 @@ int kernel_setsockopt(struct socket *sock, int level, int optname,
3072 set_fs(oldfs); 3082 set_fs(oldfs);
3073 return err; 3083 return err;
3074} 3084}
3085EXPORT_SYMBOL(kernel_setsockopt);
3075 3086
3076int kernel_sendpage(struct socket *sock, struct page *page, int offset, 3087int kernel_sendpage(struct socket *sock, struct page *page, int offset,
3077 size_t size, int flags) 3088 size_t size, int flags)
@@ -3083,6 +3094,7 @@ int kernel_sendpage(struct socket *sock, struct page *page, int offset,
3083 3094
3084 return sock_no_sendpage(sock, page, offset, size, flags); 3095 return sock_no_sendpage(sock, page, offset, size, flags);
3085} 3096}
3097EXPORT_SYMBOL(kernel_sendpage);
3086 3098
3087int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg) 3099int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg)
3088{ 3100{
@@ -3095,33 +3107,10 @@ int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg)
3095 3107
3096 return err; 3108 return err;
3097} 3109}
3110EXPORT_SYMBOL(kernel_sock_ioctl);
3098 3111
3099int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how) 3112int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how)
3100{ 3113{
3101 return sock->ops->shutdown(sock, how); 3114 return sock->ops->shutdown(sock, how);
3102} 3115}
3103
3104EXPORT_SYMBOL(sock_create);
3105EXPORT_SYMBOL(sock_create_kern);
3106EXPORT_SYMBOL(sock_create_lite);
3107EXPORT_SYMBOL(sock_map_fd);
3108EXPORT_SYMBOL(sock_recvmsg);
3109EXPORT_SYMBOL(sock_register);
3110EXPORT_SYMBOL(sock_release);
3111EXPORT_SYMBOL(sock_sendmsg);
3112EXPORT_SYMBOL(sock_unregister);
3113EXPORT_SYMBOL(sock_wake_async);
3114EXPORT_SYMBOL(sockfd_lookup);
3115EXPORT_SYMBOL(kernel_sendmsg);
3116EXPORT_SYMBOL(kernel_recvmsg);
3117EXPORT_SYMBOL(kernel_bind);
3118EXPORT_SYMBOL(kernel_listen);
3119EXPORT_SYMBOL(kernel_accept);
3120EXPORT_SYMBOL(kernel_connect);
3121EXPORT_SYMBOL(kernel_getsockname);
3122EXPORT_SYMBOL(kernel_getpeername);
3123EXPORT_SYMBOL(kernel_getsockopt);
3124EXPORT_SYMBOL(kernel_setsockopt);
3125EXPORT_SYMBOL(kernel_sendpage);
3126EXPORT_SYMBOL(kernel_sock_ioctl);
3127EXPORT_SYMBOL(kernel_sock_shutdown); 3116EXPORT_SYMBOL(kernel_sock_shutdown);
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index 95721426296d..880d0de3f50f 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -321,7 +321,7 @@ rpcauth_prune_expired(struct list_head *free, int nr_to_scan)
321 * Run memory cache shrinker. 321 * Run memory cache shrinker.
322 */ 322 */
323static int 323static int
324rpcauth_cache_shrinker(int nr_to_scan, gfp_t gfp_mask) 324rpcauth_cache_shrinker(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
325{ 325{
326 LIST_HEAD(free); 326 LIST_HEAD(free);
327 int res; 327 int res;
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index fef2cc5e9d2b..4414a18c63b4 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -282,7 +282,7 @@ static inline struct sock *unix_find_socket_byname(struct net *net,
282 return s; 282 return s;
283} 283}
284 284
285static struct sock *unix_find_socket_byinode(struct net *net, struct inode *i) 285static struct sock *unix_find_socket_byinode(struct inode *i)
286{ 286{
287 struct sock *s; 287 struct sock *s;
288 struct hlist_node *node; 288 struct hlist_node *node;
@@ -292,9 +292,6 @@ static struct sock *unix_find_socket_byinode(struct net *net, struct inode *i)
292 &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) { 292 &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
293 struct dentry *dentry = unix_sk(s)->dentry; 293 struct dentry *dentry = unix_sk(s)->dentry;
294 294
295 if (!net_eq(sock_net(s), net))
296 continue;
297
298 if (dentry && dentry->d_inode == i) { 295 if (dentry && dentry->d_inode == i) {
299 sock_hold(s); 296 sock_hold(s);
300 goto found; 297 goto found;
@@ -450,11 +447,31 @@ static int unix_release_sock(struct sock *sk, int embrion)
450 return 0; 447 return 0;
451} 448}
452 449
450static void init_peercred(struct sock *sk)
451{
452 put_pid(sk->sk_peer_pid);
453 if (sk->sk_peer_cred)
454 put_cred(sk->sk_peer_cred);
455 sk->sk_peer_pid = get_pid(task_tgid(current));
456 sk->sk_peer_cred = get_current_cred();
457}
458
459static void copy_peercred(struct sock *sk, struct sock *peersk)
460{
461 put_pid(sk->sk_peer_pid);
462 if (sk->sk_peer_cred)
463 put_cred(sk->sk_peer_cred);
464 sk->sk_peer_pid = get_pid(peersk->sk_peer_pid);
465 sk->sk_peer_cred = get_cred(peersk->sk_peer_cred);
466}
467
453static int unix_listen(struct socket *sock, int backlog) 468static int unix_listen(struct socket *sock, int backlog)
454{ 469{
455 int err; 470 int err;
456 struct sock *sk = sock->sk; 471 struct sock *sk = sock->sk;
457 struct unix_sock *u = unix_sk(sk); 472 struct unix_sock *u = unix_sk(sk);
473 struct pid *old_pid = NULL;
474 const struct cred *old_cred = NULL;
458 475
459 err = -EOPNOTSUPP; 476 err = -EOPNOTSUPP;
460 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET) 477 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
@@ -470,12 +487,14 @@ static int unix_listen(struct socket *sock, int backlog)
470 sk->sk_max_ack_backlog = backlog; 487 sk->sk_max_ack_backlog = backlog;
471 sk->sk_state = TCP_LISTEN; 488 sk->sk_state = TCP_LISTEN;
472 /* set credentials so connect can copy them */ 489 /* set credentials so connect can copy them */
473 sk->sk_peercred.pid = task_tgid_vnr(current); 490 init_peercred(sk);
474 current_euid_egid(&sk->sk_peercred.uid, &sk->sk_peercred.gid);
475 err = 0; 491 err = 0;
476 492
477out_unlock: 493out_unlock:
478 unix_state_unlock(sk); 494 unix_state_unlock(sk);
495 put_pid(old_pid);
496 if (old_cred)
497 put_cred(old_cred);
479out: 498out:
480 return err; 499 return err;
481} 500}
@@ -736,7 +755,7 @@ static struct sock *unix_find_other(struct net *net,
736 err = -ECONNREFUSED; 755 err = -ECONNREFUSED;
737 if (!S_ISSOCK(inode->i_mode)) 756 if (!S_ISSOCK(inode->i_mode))
738 goto put_fail; 757 goto put_fail;
739 u = unix_find_socket_byinode(net, inode); 758 u = unix_find_socket_byinode(inode);
740 if (!u) 759 if (!u)
741 goto put_fail; 760 goto put_fail;
742 761
@@ -1140,8 +1159,7 @@ restart:
1140 unix_peer(newsk) = sk; 1159 unix_peer(newsk) = sk;
1141 newsk->sk_state = TCP_ESTABLISHED; 1160 newsk->sk_state = TCP_ESTABLISHED;
1142 newsk->sk_type = sk->sk_type; 1161 newsk->sk_type = sk->sk_type;
1143 newsk->sk_peercred.pid = task_tgid_vnr(current); 1162 init_peercred(newsk);
1144 current_euid_egid(&newsk->sk_peercred.uid, &newsk->sk_peercred.gid);
1145 newu = unix_sk(newsk); 1163 newu = unix_sk(newsk);
1146 newsk->sk_wq = &newu->peer_wq; 1164 newsk->sk_wq = &newu->peer_wq;
1147 otheru = unix_sk(other); 1165 otheru = unix_sk(other);
@@ -1157,7 +1175,7 @@ restart:
1157 } 1175 }
1158 1176
1159 /* Set credentials */ 1177 /* Set credentials */
1160 sk->sk_peercred = other->sk_peercred; 1178 copy_peercred(sk, other);
1161 1179
1162 sock->state = SS_CONNECTED; 1180 sock->state = SS_CONNECTED;
1163 sk->sk_state = TCP_ESTABLISHED; 1181 sk->sk_state = TCP_ESTABLISHED;
@@ -1199,10 +1217,8 @@ static int unix_socketpair(struct socket *socka, struct socket *sockb)
1199 sock_hold(skb); 1217 sock_hold(skb);
1200 unix_peer(ska) = skb; 1218 unix_peer(ska) = skb;
1201 unix_peer(skb) = ska; 1219 unix_peer(skb) = ska;
1202 ska->sk_peercred.pid = skb->sk_peercred.pid = task_tgid_vnr(current); 1220 init_peercred(ska);
1203 current_euid_egid(&skb->sk_peercred.uid, &skb->sk_peercred.gid); 1221 init_peercred(skb);
1204 ska->sk_peercred.uid = skb->sk_peercred.uid;
1205 ska->sk_peercred.gid = skb->sk_peercred.gid;
1206 1222
1207 if (ska->sk_type != SOCK_DGRAM) { 1223 if (ska->sk_type != SOCK_DGRAM) {
1208 ska->sk_state = TCP_ESTABLISHED; 1224 ska->sk_state = TCP_ESTABLISHED;
@@ -1297,18 +1313,20 @@ static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1297 int i; 1313 int i;
1298 1314
1299 scm->fp = UNIXCB(skb).fp; 1315 scm->fp = UNIXCB(skb).fp;
1300 skb->destructor = sock_wfree;
1301 UNIXCB(skb).fp = NULL; 1316 UNIXCB(skb).fp = NULL;
1302 1317
1303 for (i = scm->fp->count-1; i >= 0; i--) 1318 for (i = scm->fp->count-1; i >= 0; i--)
1304 unix_notinflight(scm->fp->fp[i]); 1319 unix_notinflight(scm->fp->fp[i]);
1305} 1320}
1306 1321
1307static void unix_destruct_fds(struct sk_buff *skb) 1322static void unix_destruct_scm(struct sk_buff *skb)
1308{ 1323{
1309 struct scm_cookie scm; 1324 struct scm_cookie scm;
1310 memset(&scm, 0, sizeof(scm)); 1325 memset(&scm, 0, sizeof(scm));
1311 unix_detach_fds(&scm, skb); 1326 scm.pid = UNIXCB(skb).pid;
1327 scm.cred = UNIXCB(skb).cred;
1328 if (UNIXCB(skb).fp)
1329 unix_detach_fds(&scm, skb);
1312 1330
1313 /* Alas, it calls VFS */ 1331 /* Alas, it calls VFS */
1314 /* So fscking what? fput() had been SMP-safe since the last Summer */ 1332 /* So fscking what? fput() had been SMP-safe since the last Summer */
@@ -1331,10 +1349,22 @@ static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1331 1349
1332 for (i = scm->fp->count-1; i >= 0; i--) 1350 for (i = scm->fp->count-1; i >= 0; i--)
1333 unix_inflight(scm->fp->fp[i]); 1351 unix_inflight(scm->fp->fp[i]);
1334 skb->destructor = unix_destruct_fds;
1335 return 0; 1352 return 0;
1336} 1353}
1337 1354
1355static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
1356{
1357 int err = 0;
1358 UNIXCB(skb).pid = get_pid(scm->pid);
1359 UNIXCB(skb).cred = get_cred(scm->cred);
1360 UNIXCB(skb).fp = NULL;
1361 if (scm->fp && send_fds)
1362 err = unix_attach_fds(scm, skb);
1363
1364 skb->destructor = unix_destruct_scm;
1365 return err;
1366}
1367
1338/* 1368/*
1339 * Send AF_UNIX data. 1369 * Send AF_UNIX data.
1340 */ 1370 */
@@ -1391,12 +1421,9 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
1391 if (skb == NULL) 1421 if (skb == NULL)
1392 goto out; 1422 goto out;
1393 1423
1394 memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred)); 1424 err = unix_scm_to_skb(siocb->scm, skb, true);
1395 if (siocb->scm->fp) { 1425 if (err)
1396 err = unix_attach_fds(siocb->scm, skb); 1426 goto out_free;
1397 if (err)
1398 goto out_free;
1399 }
1400 unix_get_secdata(siocb->scm, skb); 1427 unix_get_secdata(siocb->scm, skb);
1401 1428
1402 skb_reset_transport_header(skb); 1429 skb_reset_transport_header(skb);
@@ -1566,16 +1593,14 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
1566 */ 1593 */
1567 size = min_t(int, size, skb_tailroom(skb)); 1594 size = min_t(int, size, skb_tailroom(skb));
1568 1595
1569 memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred)); 1596
1570 /* Only send the fds in the first buffer */ 1597 /* Only send the fds in the first buffer */
1571 if (siocb->scm->fp && !fds_sent) { 1598 err = unix_scm_to_skb(siocb->scm, skb, !fds_sent);
1572 err = unix_attach_fds(siocb->scm, skb); 1599 if (err) {
1573 if (err) { 1600 kfree_skb(skb);
1574 kfree_skb(skb); 1601 goto out_err;
1575 goto out_err;
1576 }
1577 fds_sent = true;
1578 } 1602 }
1603 fds_sent = true;
1579 1604
1580 err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size); 1605 err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
1581 if (err) { 1606 if (err) {
@@ -1692,7 +1717,7 @@ static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
1692 siocb->scm = &tmp_scm; 1717 siocb->scm = &tmp_scm;
1693 memset(&tmp_scm, 0, sizeof(tmp_scm)); 1718 memset(&tmp_scm, 0, sizeof(tmp_scm));
1694 } 1719 }
1695 siocb->scm->creds = *UNIXCREDS(skb); 1720 scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).cred);
1696 unix_set_secdata(siocb->scm, skb); 1721 unix_set_secdata(siocb->scm, skb);
1697 1722
1698 if (!(flags & MSG_PEEK)) { 1723 if (!(flags & MSG_PEEK)) {
@@ -1841,14 +1866,14 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
1841 1866
1842 if (check_creds) { 1867 if (check_creds) {
1843 /* Never glue messages from different writers */ 1868 /* Never glue messages from different writers */
1844 if (memcmp(UNIXCREDS(skb), &siocb->scm->creds, 1869 if ((UNIXCB(skb).pid != siocb->scm->pid) ||
1845 sizeof(siocb->scm->creds)) != 0) { 1870 (UNIXCB(skb).cred != siocb->scm->cred)) {
1846 skb_queue_head(&sk->sk_receive_queue, skb); 1871 skb_queue_head(&sk->sk_receive_queue, skb);
1847 break; 1872 break;
1848 } 1873 }
1849 } else { 1874 } else {
1850 /* Copy credentials */ 1875 /* Copy credentials */
1851 siocb->scm->creds = *UNIXCREDS(skb); 1876 scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).cred);
1852 check_creds = 1; 1877 check_creds = 1;
1853 } 1878 }
1854 1879
@@ -1881,7 +1906,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
1881 break; 1906 break;
1882 } 1907 }
1883 1908
1884 kfree_skb(skb); 1909 consume_skb(skb);
1885 1910
1886 if (siocb->scm->fp) 1911 if (siocb->scm->fp)
1887 break; 1912 break;
diff --git a/net/wanrouter/wanmain.c b/net/wanrouter/wanmain.c
index 258daa80ad92..2bf23406637a 100644
--- a/net/wanrouter/wanmain.c
+++ b/net/wanrouter/wanmain.c
@@ -48,7 +48,7 @@
48#include <linux/kernel.h> 48#include <linux/kernel.h>
49#include <linux/module.h> /* support for loadable modules */ 49#include <linux/module.h> /* support for loadable modules */
50#include <linux/slab.h> /* kmalloc(), kfree() */ 50#include <linux/slab.h> /* kmalloc(), kfree() */
51#include <linux/smp_lock.h> 51#include <linux/mutex.h>
52#include <linux/mm.h> 52#include <linux/mm.h>
53#include <linux/string.h> /* inline mem*, str* functions */ 53#include <linux/string.h> /* inline mem*, str* functions */
54 54
@@ -71,6 +71,7 @@
71 * WAN device IOCTL handlers 71 * WAN device IOCTL handlers
72 */ 72 */
73 73
74static DEFINE_MUTEX(wanrouter_mutex);
74static int wanrouter_device_setup(struct wan_device *wandev, 75static int wanrouter_device_setup(struct wan_device *wandev,
75 wandev_conf_t __user *u_conf); 76 wandev_conf_t __user *u_conf);
76static int wanrouter_device_stat(struct wan_device *wandev, 77static int wanrouter_device_stat(struct wan_device *wandev,
@@ -376,7 +377,7 @@ long wanrouter_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
376 if (wandev->magic != ROUTER_MAGIC) 377 if (wandev->magic != ROUTER_MAGIC)
377 return -EINVAL; 378 return -EINVAL;
378 379
379 lock_kernel(); 380 mutex_lock(&wanrouter_mutex);
380 switch (cmd) { 381 switch (cmd) {
381 case ROUTER_SETUP: 382 case ROUTER_SETUP:
382 err = wanrouter_device_setup(wandev, data); 383 err = wanrouter_device_setup(wandev, data);
@@ -408,7 +409,7 @@ long wanrouter_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
408 err = wandev->ioctl(wandev, cmd, arg); 409 err = wandev->ioctl(wandev, cmd, arg);
409 else err = -EINVAL; 410 else err = -EINVAL;
410 } 411 }
411 unlock_kernel(); 412 mutex_unlock(&wanrouter_mutex);
412 return err; 413 return err;
413} 414}
414 415
diff --git a/net/wanrouter/wanproc.c b/net/wanrouter/wanproc.c
index c44d96b3a437..11f25c7a7a05 100644
--- a/net/wanrouter/wanproc.c
+++ b/net/wanrouter/wanproc.c
@@ -27,7 +27,7 @@
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/wanrouter.h> /* WAN router API definitions */ 28#include <linux/wanrouter.h> /* WAN router API definitions */
29#include <linux/seq_file.h> 29#include <linux/seq_file.h>
30#include <linux/smp_lock.h> 30#include <linux/mutex.h>
31 31
32#include <net/net_namespace.h> 32#include <net/net_namespace.h>
33#include <asm/io.h> 33#include <asm/io.h>
@@ -66,6 +66,7 @@
66 * /proc/net/router 66 * /proc/net/router
67 */ 67 */
68 68
69static DEFINE_MUTEX(config_mutex);
69static struct proc_dir_entry *proc_router; 70static struct proc_dir_entry *proc_router;
70 71
71/* Strings */ 72/* Strings */
@@ -85,7 +86,7 @@ static void *r_start(struct seq_file *m, loff_t *pos)
85 struct wan_device *wandev; 86 struct wan_device *wandev;
86 loff_t l = *pos; 87 loff_t l = *pos;
87 88
88 lock_kernel(); 89 mutex_lock(&config_mutex);
89 if (!l--) 90 if (!l--)
90 return SEQ_START_TOKEN; 91 return SEQ_START_TOKEN;
91 for (wandev = wanrouter_router_devlist; l-- && wandev; 92 for (wandev = wanrouter_router_devlist; l-- && wandev;
@@ -104,7 +105,7 @@ static void *r_next(struct seq_file *m, void *v, loff_t *pos)
104static void r_stop(struct seq_file *m, void *v) 105static void r_stop(struct seq_file *m, void *v)
105 __releases(kernel_lock) 106 __releases(kernel_lock)
106{ 107{
107 unlock_kernel(); 108 mutex_unlock(&config_mutex);
108} 109}
109 110
110static int config_show(struct seq_file *m, void *v) 111static int config_show(struct seq_file *m, void *v)
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
index b01a6f6397d7..d0c92dddb26b 100644
--- a/net/wireless/chan.c
+++ b/net/wireless/chan.c
@@ -35,8 +35,9 @@ rdev_freq_to_chan(struct cfg80211_registered_device *rdev,
35 if (!ht_cap->ht_supported) 35 if (!ht_cap->ht_supported)
36 return NULL; 36 return NULL;
37 37
38 if (!(ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) || 38 if (channel_type != NL80211_CHAN_HT20 &&
39 ht_cap->cap & IEEE80211_HT_CAP_40MHZ_INTOLERANT) 39 (!(ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) ||
40 ht_cap->cap & IEEE80211_HT_CAP_40MHZ_INTOLERANT))
40 return NULL; 41 return NULL;
41 } 42 }
42 43
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 37d0e0ab4432..541e2fff5e9c 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -472,24 +472,22 @@ int wiphy_register(struct wiphy *wiphy)
472 /* check and set up bitrates */ 472 /* check and set up bitrates */
473 ieee80211_set_bitrate_flags(wiphy); 473 ieee80211_set_bitrate_flags(wiphy);
474 474
475 mutex_lock(&cfg80211_mutex);
476
475 res = device_add(&rdev->wiphy.dev); 477 res = device_add(&rdev->wiphy.dev);
476 if (res) 478 if (res)
477 return res; 479 goto out_unlock;
478 480
479 res = rfkill_register(rdev->rfkill); 481 res = rfkill_register(rdev->rfkill);
480 if (res) 482 if (res)
481 goto out_rm_dev; 483 goto out_rm_dev;
482 484
483 mutex_lock(&cfg80211_mutex);
484
485 /* set up regulatory info */ 485 /* set up regulatory info */
486 wiphy_update_regulatory(wiphy, NL80211_REGDOM_SET_BY_CORE); 486 wiphy_update_regulatory(wiphy, NL80211_REGDOM_SET_BY_CORE);
487 487
488 list_add_rcu(&rdev->list, &cfg80211_rdev_list); 488 list_add_rcu(&rdev->list, &cfg80211_rdev_list);
489 cfg80211_rdev_list_generation++; 489 cfg80211_rdev_list_generation++;
490 490
491 mutex_unlock(&cfg80211_mutex);
492
493 /* add to debugfs */ 491 /* add to debugfs */
494 rdev->wiphy.debugfsdir = 492 rdev->wiphy.debugfsdir =
495 debugfs_create_dir(wiphy_name(&rdev->wiphy), 493 debugfs_create_dir(wiphy_name(&rdev->wiphy),
@@ -509,11 +507,15 @@ int wiphy_register(struct wiphy *wiphy)
509 } 507 }
510 508
511 cfg80211_debugfs_rdev_add(rdev); 509 cfg80211_debugfs_rdev_add(rdev);
510 mutex_unlock(&cfg80211_mutex);
512 511
513 return 0; 512 return 0;
514 513
515 out_rm_dev: 514out_rm_dev:
516 device_del(&rdev->wiphy.dev); 515 device_del(&rdev->wiphy.dev);
516
517out_unlock:
518 mutex_unlock(&cfg80211_mutex);
517 return res; 519 return res;
518} 520}
519EXPORT_SYMBOL(wiphy_register); 521EXPORT_SYMBOL(wiphy_register);
@@ -894,7 +896,7 @@ out_fail_pernet:
894} 896}
895subsys_initcall(cfg80211_init); 897subsys_initcall(cfg80211_init);
896 898
897static void cfg80211_exit(void) 899static void __exit cfg80211_exit(void)
898{ 900{
899 debugfs_remove(ieee80211_debugfs_dir); 901 debugfs_remove(ieee80211_debugfs_dir);
900 nl80211_exit(); 902 nl80211_exit();
@@ -905,3 +907,52 @@ static void cfg80211_exit(void)
905 destroy_workqueue(cfg80211_wq); 907 destroy_workqueue(cfg80211_wq);
906} 908}
907module_exit(cfg80211_exit); 909module_exit(cfg80211_exit);
910
911static int ___wiphy_printk(const char *level, const struct wiphy *wiphy,
912 struct va_format *vaf)
913{
914 if (!wiphy)
915 return printk("%s(NULL wiphy *): %pV", level, vaf);
916
917 return printk("%s%s: %pV", level, wiphy_name(wiphy), vaf);
918}
919
920int __wiphy_printk(const char *level, const struct wiphy *wiphy,
921 const char *fmt, ...)
922{
923 struct va_format vaf;
924 va_list args;
925 int r;
926
927 va_start(args, fmt);
928
929 vaf.fmt = fmt;
930 vaf.va = &args;
931
932 r = ___wiphy_printk(level, wiphy, &vaf);
933 va_end(args);
934
935 return r;
936}
937EXPORT_SYMBOL(__wiphy_printk);
938
939#define define_wiphy_printk_level(func, kern_level) \
940int func(const struct wiphy *wiphy, const char *fmt, ...) \
941{ \
942 struct va_format vaf; \
943 va_list args; \
944 int r; \
945 \
946 va_start(args, fmt); \
947 \
948 vaf.fmt = fmt; \
949 vaf.va = &args; \
950 \
951 r = ___wiphy_printk(kern_level, wiphy, &vaf); \
952 va_end(args); \
953 \
954 return r; \
955} \
956EXPORT_SYMBOL(func);
957
958define_wiphy_printk_level(wiphy_debug, KERN_DEBUG);
diff --git a/net/wireless/core.h b/net/wireless/core.h
index ae930acf75e9..63d57ae399c3 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -339,6 +339,7 @@ int cfg80211_mlme_action(struct cfg80211_registered_device *rdev,
339 struct net_device *dev, 339 struct net_device *dev,
340 struct ieee80211_channel *chan, 340 struct ieee80211_channel *chan,
341 enum nl80211_channel_type channel_type, 341 enum nl80211_channel_type channel_type,
342 bool channel_type_valid,
342 const u8 *buf, size_t len, u64 *cookie); 343 const u8 *buf, size_t len, u64 *cookie);
343 344
344/* SME */ 345/* SME */
diff --git a/net/wireless/genregdb.awk b/net/wireless/genregdb.awk
index 3cc9e69880a8..53c143f5e770 100644
--- a/net/wireless/genregdb.awk
+++ b/net/wireless/genregdb.awk
@@ -21,6 +21,7 @@ BEGIN {
21 print "" 21 print ""
22 print "#include <linux/nl80211.h>" 22 print "#include <linux/nl80211.h>"
23 print "#include <net/cfg80211.h>" 23 print "#include <net/cfg80211.h>"
24 print "#include \"regdb.h\""
24 print "" 25 print ""
25 regdb = "const struct ieee80211_regdomain *reg_regdb[] = {\n" 26 regdb = "const struct ieee80211_regdomain *reg_regdb[] = {\n"
26} 27}
diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c
index adcabba02e20..27a8ce9343c3 100644
--- a/net/wireless/ibss.c
+++ b/net/wireless/ibss.c
@@ -247,8 +247,10 @@ int cfg80211_ibss_wext_join(struct cfg80211_registered_device *rdev,
247 if (!netif_running(wdev->netdev)) 247 if (!netif_running(wdev->netdev))
248 return 0; 248 return 0;
249 249
250 if (wdev->wext.keys) 250 if (wdev->wext.keys) {
251 wdev->wext.keys->def = wdev->wext.default_key; 251 wdev->wext.keys->def = wdev->wext.default_key;
252 wdev->wext.keys->defmgmt = wdev->wext.default_mgmt_key;
253 }
252 254
253 wdev->wext.ibss.privacy = wdev->wext.default_key != -1; 255 wdev->wext.ibss.privacy = wdev->wext.default_key != -1;
254 256
diff --git a/net/wireless/lib80211_crypt_ccmp.c b/net/wireless/lib80211_crypt_ccmp.c
index b7fa31d5fd13..dacb3b4b1bdb 100644
--- a/net/wireless/lib80211_crypt_ccmp.c
+++ b/net/wireless/lib80211_crypt_ccmp.c
@@ -467,7 +467,6 @@ static struct lib80211_crypto_ops lib80211_crypt_ccmp = {
467 .name = "CCMP", 467 .name = "CCMP",
468 .init = lib80211_ccmp_init, 468 .init = lib80211_ccmp_init,
469 .deinit = lib80211_ccmp_deinit, 469 .deinit = lib80211_ccmp_deinit,
470 .build_iv = lib80211_ccmp_hdr,
471 .encrypt_mpdu = lib80211_ccmp_encrypt, 470 .encrypt_mpdu = lib80211_ccmp_encrypt,
472 .decrypt_mpdu = lib80211_ccmp_decrypt, 471 .decrypt_mpdu = lib80211_ccmp_decrypt,
473 .encrypt_msdu = NULL, 472 .encrypt_msdu = NULL,
diff --git a/net/wireless/lib80211_crypt_tkip.c b/net/wireless/lib80211_crypt_tkip.c
index 8cbdb32ff316..0fe40510e2cb 100644
--- a/net/wireless/lib80211_crypt_tkip.c
+++ b/net/wireless/lib80211_crypt_tkip.c
@@ -578,7 +578,7 @@ static void michael_mic_hdr(struct sk_buff *skb, u8 * hdr)
578 } 578 }
579 579
580 if (ieee80211_is_data_qos(hdr11->frame_control)) { 580 if (ieee80211_is_data_qos(hdr11->frame_control)) {
581 hdr[12] = le16_to_cpu(*ieee80211_get_qos_ctl(hdr11)) 581 hdr[12] = le16_to_cpu(*((__le16 *)ieee80211_get_qos_ctl(hdr11)))
582 & IEEE80211_QOS_CTL_TID_MASK; 582 & IEEE80211_QOS_CTL_TID_MASK;
583 } else 583 } else
584 hdr[12] = 0; /* priority */ 584 hdr[12] = 0; /* priority */
@@ -757,7 +757,6 @@ static struct lib80211_crypto_ops lib80211_crypt_tkip = {
757 .name = "TKIP", 757 .name = "TKIP",
758 .init = lib80211_tkip_init, 758 .init = lib80211_tkip_init,
759 .deinit = lib80211_tkip_deinit, 759 .deinit = lib80211_tkip_deinit,
760 .build_iv = lib80211_tkip_hdr,
761 .encrypt_mpdu = lib80211_tkip_encrypt, 760 .encrypt_mpdu = lib80211_tkip_encrypt,
762 .decrypt_mpdu = lib80211_tkip_decrypt, 761 .decrypt_mpdu = lib80211_tkip_decrypt,
763 .encrypt_msdu = lib80211_michael_mic_add, 762 .encrypt_msdu = lib80211_michael_mic_add,
diff --git a/net/wireless/lib80211_crypt_wep.c b/net/wireless/lib80211_crypt_wep.c
index 6d41e05ca33b..e2e88878ba35 100644
--- a/net/wireless/lib80211_crypt_wep.c
+++ b/net/wireless/lib80211_crypt_wep.c
@@ -269,7 +269,6 @@ static struct lib80211_crypto_ops lib80211_crypt_wep = {
269 .name = "WEP", 269 .name = "WEP",
270 .init = lib80211_wep_init, 270 .init = lib80211_wep_init,
271 .deinit = lib80211_wep_deinit, 271 .deinit = lib80211_wep_deinit,
272 .build_iv = lib80211_wep_build_iv,
273 .encrypt_mpdu = lib80211_wep_encrypt, 272 .encrypt_mpdu = lib80211_wep_encrypt,
274 .decrypt_mpdu = lib80211_wep_decrypt, 273 .decrypt_mpdu = lib80211_wep_decrypt,
275 .encrypt_msdu = NULL, 274 .encrypt_msdu = NULL,
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index 48ead6f0426d..e74a1a2119d3 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -44,10 +44,10 @@ void cfg80211_send_rx_auth(struct net_device *dev, const u8 *buf, size_t len)
44 } 44 }
45 } 45 }
46 46
47 WARN_ON(!done); 47 if (done) {
48 48 nl80211_send_rx_auth(rdev, dev, buf, len, GFP_KERNEL);
49 nl80211_send_rx_auth(rdev, dev, buf, len, GFP_KERNEL); 49 cfg80211_sme_rx_auth(dev, buf, len);
50 cfg80211_sme_rx_auth(dev, buf, len); 50 }
51 51
52 wdev_unlock(wdev); 52 wdev_unlock(wdev);
53} 53}
@@ -827,6 +827,7 @@ int cfg80211_mlme_action(struct cfg80211_registered_device *rdev,
827 struct net_device *dev, 827 struct net_device *dev,
828 struct ieee80211_channel *chan, 828 struct ieee80211_channel *chan,
829 enum nl80211_channel_type channel_type, 829 enum nl80211_channel_type channel_type,
830 bool channel_type_valid,
830 const u8 *buf, size_t len, u64 *cookie) 831 const u8 *buf, size_t len, u64 *cookie)
831{ 832{
832 struct wireless_dev *wdev = dev->ieee80211_ptr; 833 struct wireless_dev *wdev = dev->ieee80211_ptr;
@@ -845,8 +846,9 @@ int cfg80211_mlme_action(struct cfg80211_registered_device *rdev,
845 if (!wdev->current_bss || 846 if (!wdev->current_bss ||
846 memcmp(wdev->current_bss->pub.bssid, mgmt->bssid, 847 memcmp(wdev->current_bss->pub.bssid, mgmt->bssid,
847 ETH_ALEN) != 0 || 848 ETH_ALEN) != 0 ||
848 memcmp(wdev->current_bss->pub.bssid, mgmt->da, 849 (wdev->iftype == NL80211_IFTYPE_STATION &&
849 ETH_ALEN) != 0) 850 memcmp(wdev->current_bss->pub.bssid, mgmt->da,
851 ETH_ALEN) != 0))
850 return -ENOTCONN; 852 return -ENOTCONN;
851 } 853 }
852 854
@@ -855,7 +857,7 @@ int cfg80211_mlme_action(struct cfg80211_registered_device *rdev,
855 857
856 /* Transmit the Action frame as requested by user space */ 858 /* Transmit the Action frame as requested by user space */
857 return rdev->ops->action(&rdev->wiphy, dev, chan, channel_type, 859 return rdev->ops->action(&rdev->wiphy, dev, chan, channel_type,
858 buf, len, cookie); 860 channel_type_valid, buf, len, cookie);
859} 861}
860 862
861bool cfg80211_rx_action(struct net_device *dev, int freq, const u8 *buf, 863bool cfg80211_rx_action(struct net_device *dev, int freq, const u8 *buf,
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index db71150b8040..37902a54e9c1 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -153,6 +153,9 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
153 [NL80211_ATTR_CQM] = { .type = NLA_NESTED, }, 153 [NL80211_ATTR_CQM] = { .type = NLA_NESTED, },
154 [NL80211_ATTR_LOCAL_STATE_CHANGE] = { .type = NLA_FLAG }, 154 [NL80211_ATTR_LOCAL_STATE_CHANGE] = { .type = NLA_FLAG },
155 [NL80211_ATTR_AP_ISOLATE] = { .type = NLA_U8 }, 155 [NL80211_ATTR_AP_ISOLATE] = { .type = NLA_U8 },
156
157 [NL80211_ATTR_WIPHY_TX_POWER_SETTING] = { .type = NLA_U32 },
158 [NL80211_ATTR_WIPHY_TX_POWER_LEVEL] = { .type = NLA_U32 },
156}; 159};
157 160
158/* policy for the attributes */ 161/* policy for the attributes */
@@ -869,6 +872,34 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
869 goto bad_res; 872 goto bad_res;
870 } 873 }
871 874
875 if (info->attrs[NL80211_ATTR_WIPHY_TX_POWER_SETTING]) {
876 enum nl80211_tx_power_setting type;
877 int idx, mbm = 0;
878
879 if (!rdev->ops->set_tx_power) {
880 result = -EOPNOTSUPP;
881 goto bad_res;
882 }
883
884 idx = NL80211_ATTR_WIPHY_TX_POWER_SETTING;
885 type = nla_get_u32(info->attrs[idx]);
886
887 if (!info->attrs[NL80211_ATTR_WIPHY_TX_POWER_LEVEL] &&
888 (type != NL80211_TX_POWER_AUTOMATIC)) {
889 result = -EINVAL;
890 goto bad_res;
891 }
892
893 if (type != NL80211_TX_POWER_AUTOMATIC) {
894 idx = NL80211_ATTR_WIPHY_TX_POWER_LEVEL;
895 mbm = nla_get_u32(info->attrs[idx]);
896 }
897
898 result = rdev->ops->set_tx_power(&rdev->wiphy, type, mbm);
899 if (result)
900 goto bad_res;
901 }
902
872 changed = 0; 903 changed = 0;
873 904
874 if (info->attrs[NL80211_ATTR_WIPHY_RETRY_SHORT]) { 905 if (info->attrs[NL80211_ATTR_WIPHY_RETRY_SHORT]) {
@@ -1107,7 +1138,7 @@ static int nl80211_valid_4addr(struct cfg80211_registered_device *rdev,
1107 enum nl80211_iftype iftype) 1138 enum nl80211_iftype iftype)
1108{ 1139{
1109 if (!use_4addr) { 1140 if (!use_4addr) {
1110 if (netdev && netdev->br_port) 1141 if (netdev && (netdev->priv_flags & IFF_BRIDGE_PORT))
1111 return -EBUSY; 1142 return -EBUSY;
1112 return 0; 1143 return 0;
1113 } 1144 }
@@ -2738,6 +2769,7 @@ static int nl80211_get_mesh_params(struct sk_buff *skb,
2738 2769
2739 nla_put_failure: 2770 nla_put_failure:
2740 genlmsg_cancel(msg, hdr); 2771 genlmsg_cancel(msg, hdr);
2772 nlmsg_free(msg);
2741 err = -EMSGSIZE; 2773 err = -EMSGSIZE;
2742 out: 2774 out:
2743 /* Cleanup */ 2775 /* Cleanup */
@@ -2929,6 +2961,7 @@ static int nl80211_get_reg(struct sk_buff *skb, struct genl_info *info)
2929 2961
2930nla_put_failure: 2962nla_put_failure:
2931 genlmsg_cancel(msg, hdr); 2963 genlmsg_cancel(msg, hdr);
2964 nlmsg_free(msg);
2932 err = -EMSGSIZE; 2965 err = -EMSGSIZE;
2933out: 2966out:
2934 mutex_unlock(&cfg80211_mutex); 2967 mutex_unlock(&cfg80211_mutex);
@@ -3955,6 +3988,55 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info)
3955 } 3988 }
3956 } 3989 }
3957 3990
3991 if (info->attrs[NL80211_ATTR_BSS_BASIC_RATES]) {
3992 u8 *rates =
3993 nla_data(info->attrs[NL80211_ATTR_BSS_BASIC_RATES]);
3994 int n_rates =
3995 nla_len(info->attrs[NL80211_ATTR_BSS_BASIC_RATES]);
3996 struct ieee80211_supported_band *sband =
3997 wiphy->bands[ibss.channel->band];
3998 int i, j;
3999
4000 if (n_rates == 0) {
4001 err = -EINVAL;
4002 goto out;
4003 }
4004
4005 for (i = 0; i < n_rates; i++) {
4006 int rate = (rates[i] & 0x7f) * 5;
4007 bool found = false;
4008
4009 for (j = 0; j < sband->n_bitrates; j++) {
4010 if (sband->bitrates[j].bitrate == rate) {
4011 found = true;
4012 ibss.basic_rates |= BIT(j);
4013 break;
4014 }
4015 }
4016 if (!found) {
4017 err = -EINVAL;
4018 goto out;
4019 }
4020 }
4021 } else {
4022 /*
4023 * If no rates were explicitly configured,
4024 * use the mandatory rate set for 11b or
4025 * 11a for maximum compatibility.
4026 */
4027 struct ieee80211_supported_band *sband =
4028 wiphy->bands[ibss.channel->band];
4029 int j;
4030 u32 flag = ibss.channel->band == IEEE80211_BAND_5GHZ ?
4031 IEEE80211_RATE_MANDATORY_A :
4032 IEEE80211_RATE_MANDATORY_B;
4033
4034 for (j = 0; j < sband->n_bitrates; j++) {
4035 if (sband->bitrates[j].flags & flag)
4036 ibss.basic_rates |= BIT(j);
4037 }
4038 }
4039
3958 err = cfg80211_join_ibss(rdev, dev, &ibss, connkeys); 4040 err = cfg80211_join_ibss(rdev, dev, &ibss, connkeys);
3959 4041
3960out: 4042out:
@@ -4653,7 +4735,8 @@ static int nl80211_register_action(struct sk_buff *skb, struct genl_info *info)
4653 if (err) 4735 if (err)
4654 goto unlock_rtnl; 4736 goto unlock_rtnl;
4655 4737
4656 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION) { 4738 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION &&
4739 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_ADHOC) {
4657 err = -EOPNOTSUPP; 4740 err = -EOPNOTSUPP;
4658 goto out; 4741 goto out;
4659 } 4742 }
@@ -4681,6 +4764,7 @@ static int nl80211_action(struct sk_buff *skb, struct genl_info *info)
4681 struct net_device *dev; 4764 struct net_device *dev;
4682 struct ieee80211_channel *chan; 4765 struct ieee80211_channel *chan;
4683 enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT; 4766 enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT;
4767 bool channel_type_valid = false;
4684 u32 freq; 4768 u32 freq;
4685 int err; 4769 int err;
4686 void *hdr; 4770 void *hdr;
@@ -4702,7 +4786,8 @@ static int nl80211_action(struct sk_buff *skb, struct genl_info *info)
4702 goto out; 4786 goto out;
4703 } 4787 }
4704 4788
4705 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION) { 4789 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION &&
4790 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_ADHOC) {
4706 err = -EOPNOTSUPP; 4791 err = -EOPNOTSUPP;
4707 goto out; 4792 goto out;
4708 } 4793 }
@@ -4722,6 +4807,7 @@ static int nl80211_action(struct sk_buff *skb, struct genl_info *info)
4722 err = -EINVAL; 4807 err = -EINVAL;
4723 goto out; 4808 goto out;
4724 } 4809 }
4810 channel_type_valid = true;
4725 } 4811 }
4726 4812
4727 freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]); 4813 freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]);
@@ -4745,6 +4831,7 @@ static int nl80211_action(struct sk_buff *skb, struct genl_info *info)
4745 goto free_msg; 4831 goto free_msg;
4746 } 4832 }
4747 err = cfg80211_mlme_action(rdev, dev, chan, channel_type, 4833 err = cfg80211_mlme_action(rdev, dev, chan, channel_type,
4834 channel_type_valid,
4748 nla_data(info->attrs[NL80211_ATTR_FRAME]), 4835 nla_data(info->attrs[NL80211_ATTR_FRAME]),
4749 nla_len(info->attrs[NL80211_ATTR_FRAME]), 4836 nla_len(info->attrs[NL80211_ATTR_FRAME]),
4750 &cookie); 4837 &cookie);
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 8f0d97dd3109..f180db0de66c 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -67,20 +67,12 @@ static struct platform_device *reg_pdev;
67const struct ieee80211_regdomain *cfg80211_regdomain; 67const struct ieee80211_regdomain *cfg80211_regdomain;
68 68
69/* 69/*
70 * We use this as a place for the rd structure built from the
71 * last parsed country IE to rest until CRDA gets back to us with
72 * what it thinks should apply for the same country
73 */
74static const struct ieee80211_regdomain *country_ie_regdomain;
75
76/*
77 * Protects static reg.c components: 70 * Protects static reg.c components:
78 * - cfg80211_world_regdom 71 * - cfg80211_world_regdom
79 * - cfg80211_regdom 72 * - cfg80211_regdom
80 * - country_ie_regdomain
81 * - last_request 73 * - last_request
82 */ 74 */
83DEFINE_MUTEX(reg_mutex); 75static DEFINE_MUTEX(reg_mutex);
84#define assert_reg_lock() WARN_ON(!mutex_is_locked(&reg_mutex)) 76#define assert_reg_lock() WARN_ON(!mutex_is_locked(&reg_mutex))
85 77
86/* Used to queue up regulatory hints */ 78/* Used to queue up regulatory hints */
@@ -275,25 +267,6 @@ static bool is_user_regdom_saved(void)
275 return true; 267 return true;
276} 268}
277 269
278/**
279 * country_ie_integrity_changes - tells us if the country IE has changed
280 * @checksum: checksum of country IE of fields we are interested in
281 *
282 * If the country IE has not changed you can ignore it safely. This is
283 * useful to determine if two devices are seeing two different country IEs
284 * even on the same alpha2. Note that this will return false if no IE has
285 * been set on the wireless core yet.
286 */
287static bool country_ie_integrity_changes(u32 checksum)
288{
289 /* If no IE has been set then the checksum doesn't change */
290 if (unlikely(!last_request->country_ie_checksum))
291 return false;
292 if (unlikely(last_request->country_ie_checksum != checksum))
293 return true;
294 return false;
295}
296
297static int reg_copy_regd(const struct ieee80211_regdomain **dst_regd, 270static int reg_copy_regd(const struct ieee80211_regdomain **dst_regd,
298 const struct ieee80211_regdomain *src_regd) 271 const struct ieee80211_regdomain *src_regd)
299{ 272{
@@ -506,471 +479,6 @@ static bool freq_in_rule_band(const struct ieee80211_freq_range *freq_range,
506} 479}
507 480
508/* 481/*
509 * This is a work around for sanity checking ieee80211_channel_to_frequency()'s
510 * work. ieee80211_channel_to_frequency() can for example currently provide a
511 * 2 GHz channel when in fact a 5 GHz channel was desired. An example would be
512 * an AP providing channel 8 on a country IE triplet when it sent this on the
513 * 5 GHz band, that channel is designed to be channel 8 on 5 GHz, not a 2 GHz
514 * channel.
515 *
516 * This can be removed once ieee80211_channel_to_frequency() takes in a band.
517 */
518static bool chan_in_band(int chan, enum ieee80211_band band)
519{
520 int center_freq = ieee80211_channel_to_frequency(chan);
521
522 switch (band) {
523 case IEEE80211_BAND_2GHZ:
524 if (center_freq <= 2484)
525 return true;
526 return false;
527 case IEEE80211_BAND_5GHZ:
528 if (center_freq >= 5005)
529 return true;
530 return false;
531 default:
532 return false;
533 }
534}
535
536/*
537 * Some APs may send a country IE triplet for each channel they
538 * support and while this is completely overkill and silly we still
539 * need to support it. We avoid making a single rule for each channel
540 * though and to help us with this we use this helper to find the
541 * actual subband end channel. These type of country IE triplet
542 * scenerios are handled then, all yielding two regulaotry rules from
543 * parsing a country IE:
544 *
545 * [1]
546 * [2]
547 * [36]
548 * [40]
549 *
550 * [1]
551 * [2-4]
552 * [5-12]
553 * [36]
554 * [40-44]
555 *
556 * [1-4]
557 * [5-7]
558 * [36-44]
559 * [48-64]
560 *
561 * [36-36]
562 * [40-40]
563 * [44-44]
564 * [48-48]
565 * [52-52]
566 * [56-56]
567 * [60-60]
568 * [64-64]
569 * [100-100]
570 * [104-104]
571 * [108-108]
572 * [112-112]
573 * [116-116]
574 * [120-120]
575 * [124-124]
576 * [128-128]
577 * [132-132]
578 * [136-136]
579 * [140-140]
580 *
581 * Returns 0 if the IE has been found to be invalid in the middle
582 * somewhere.
583 */
584static int max_subband_chan(enum ieee80211_band band,
585 int orig_cur_chan,
586 int orig_end_channel,
587 s8 orig_max_power,
588 u8 **country_ie,
589 u8 *country_ie_len)
590{
591 u8 *triplets_start = *country_ie;
592 u8 len_at_triplet = *country_ie_len;
593 int end_subband_chan = orig_end_channel;
594
595 /*
596 * We'll deal with padding for the caller unless
597 * its not immediate and we don't process any channels
598 */
599 if (*country_ie_len == 1) {
600 *country_ie += 1;
601 *country_ie_len -= 1;
602 return orig_end_channel;
603 }
604
605 /* Move to the next triplet and then start search */
606 *country_ie += 3;
607 *country_ie_len -= 3;
608
609 if (!chan_in_band(orig_cur_chan, band))
610 return 0;
611
612 while (*country_ie_len >= 3) {
613 int end_channel = 0;
614 struct ieee80211_country_ie_triplet *triplet =
615 (struct ieee80211_country_ie_triplet *) *country_ie;
616 int cur_channel = 0, next_expected_chan;
617
618 /* means last triplet is completely unrelated to this one */
619 if (triplet->ext.reg_extension_id >=
620 IEEE80211_COUNTRY_EXTENSION_ID) {
621 *country_ie -= 3;
622 *country_ie_len += 3;
623 break;
624 }
625
626 if (triplet->chans.first_channel == 0) {
627 *country_ie += 1;
628 *country_ie_len -= 1;
629 if (*country_ie_len != 0)
630 return 0;
631 break;
632 }
633
634 if (triplet->chans.num_channels == 0)
635 return 0;
636
637 /* Monitonically increasing channel order */
638 if (triplet->chans.first_channel <= end_subband_chan)
639 return 0;
640
641 if (!chan_in_band(triplet->chans.first_channel, band))
642 return 0;
643
644 /* 2 GHz */
645 if (triplet->chans.first_channel <= 14) {
646 end_channel = triplet->chans.first_channel +
647 triplet->chans.num_channels - 1;
648 }
649 else {
650 end_channel = triplet->chans.first_channel +
651 (4 * (triplet->chans.num_channels - 1));
652 }
653
654 if (!chan_in_band(end_channel, band))
655 return 0;
656
657 if (orig_max_power != triplet->chans.max_power) {
658 *country_ie -= 3;
659 *country_ie_len += 3;
660 break;
661 }
662
663 cur_channel = triplet->chans.first_channel;
664
665 /* The key is finding the right next expected channel */
666 if (band == IEEE80211_BAND_2GHZ)
667 next_expected_chan = end_subband_chan + 1;
668 else
669 next_expected_chan = end_subband_chan + 4;
670
671 if (cur_channel != next_expected_chan) {
672 *country_ie -= 3;
673 *country_ie_len += 3;
674 break;
675 }
676
677 end_subband_chan = end_channel;
678
679 /* Move to the next one */
680 *country_ie += 3;
681 *country_ie_len -= 3;
682
683 /*
684 * Padding needs to be dealt with if we processed
685 * some channels.
686 */
687 if (*country_ie_len == 1) {
688 *country_ie += 1;
689 *country_ie_len -= 1;
690 break;
691 }
692
693 /* If seen, the IE is invalid */
694 if (*country_ie_len == 2)
695 return 0;
696 }
697
698 if (end_subband_chan == orig_end_channel) {
699 *country_ie = triplets_start;
700 *country_ie_len = len_at_triplet;
701 return orig_end_channel;
702 }
703
704 return end_subband_chan;
705}
706
707/*
708 * Converts a country IE to a regulatory domain. A regulatory domain
709 * structure has a lot of information which the IE doesn't yet have,
710 * so for the other values we use upper max values as we will intersect
711 * with our userspace regulatory agent to get lower bounds.
712 */
713static struct ieee80211_regdomain *country_ie_2_rd(
714 enum ieee80211_band band,
715 u8 *country_ie,
716 u8 country_ie_len,
717 u32 *checksum)
718{
719 struct ieee80211_regdomain *rd = NULL;
720 unsigned int i = 0;
721 char alpha2[2];
722 u32 flags = 0;
723 u32 num_rules = 0, size_of_regd = 0;
724 u8 *triplets_start = NULL;
725 u8 len_at_triplet = 0;
726 /* the last channel we have registered in a subband (triplet) */
727 int last_sub_max_channel = 0;
728
729 *checksum = 0xDEADBEEF;
730
731 /* Country IE requirements */
732 BUG_ON(country_ie_len < IEEE80211_COUNTRY_IE_MIN_LEN ||
733 country_ie_len & 0x01);
734
735 alpha2[0] = country_ie[0];
736 alpha2[1] = country_ie[1];
737
738 /*
739 * Third octet can be:
740 * 'I' - Indoor
741 * 'O' - Outdoor
742 *
743 * anything else we assume is no restrictions
744 */
745 if (country_ie[2] == 'I')
746 flags = NL80211_RRF_NO_OUTDOOR;
747 else if (country_ie[2] == 'O')
748 flags = NL80211_RRF_NO_INDOOR;
749
750 country_ie += 3;
751 country_ie_len -= 3;
752
753 triplets_start = country_ie;
754 len_at_triplet = country_ie_len;
755
756 *checksum ^= ((flags ^ alpha2[0] ^ alpha2[1]) << 8);
757
758 /*
759 * We need to build a reg rule for each triplet, but first we must
760 * calculate the number of reg rules we will need. We will need one
761 * for each channel subband
762 */
763 while (country_ie_len >= 3) {
764 int end_channel = 0;
765 struct ieee80211_country_ie_triplet *triplet =
766 (struct ieee80211_country_ie_triplet *) country_ie;
767 int cur_sub_max_channel = 0, cur_channel = 0;
768
769 if (triplet->ext.reg_extension_id >=
770 IEEE80211_COUNTRY_EXTENSION_ID) {
771 country_ie += 3;
772 country_ie_len -= 3;
773 continue;
774 }
775
776 /*
777 * APs can add padding to make length divisible
778 * by two, required by the spec.
779 */
780 if (triplet->chans.first_channel == 0) {
781 country_ie++;
782 country_ie_len--;
783 /* This is expected to be at the very end only */
784 if (country_ie_len != 0)
785 return NULL;
786 break;
787 }
788
789 if (triplet->chans.num_channels == 0)
790 return NULL;
791
792 if (!chan_in_band(triplet->chans.first_channel, band))
793 return NULL;
794
795 /* 2 GHz */
796 if (band == IEEE80211_BAND_2GHZ)
797 end_channel = triplet->chans.first_channel +
798 triplet->chans.num_channels - 1;
799 else
800 /*
801 * 5 GHz -- For example in country IEs if the first
802 * channel given is 36 and the number of channels is 4
803 * then the individual channel numbers defined for the
804 * 5 GHz PHY by these parameters are: 36, 40, 44, and 48
805 * and not 36, 37, 38, 39.
806 *
807 * See: http://tinyurl.com/11d-clarification
808 */
809 end_channel = triplet->chans.first_channel +
810 (4 * (triplet->chans.num_channels - 1));
811
812 cur_channel = triplet->chans.first_channel;
813
814 /*
815 * Enhancement for APs that send a triplet for every channel
816 * or for whatever reason sends triplets with multiple channels
817 * separated when in fact they should be together.
818 */
819 end_channel = max_subband_chan(band,
820 cur_channel,
821 end_channel,
822 triplet->chans.max_power,
823 &country_ie,
824 &country_ie_len);
825 if (!end_channel)
826 return NULL;
827
828 if (!chan_in_band(end_channel, band))
829 return NULL;
830
831 cur_sub_max_channel = end_channel;
832
833 /* Basic sanity check */
834 if (cur_sub_max_channel < cur_channel)
835 return NULL;
836
837 /*
838 * Do not allow overlapping channels. Also channels
839 * passed in each subband must be monotonically
840 * increasing
841 */
842 if (last_sub_max_channel) {
843 if (cur_channel <= last_sub_max_channel)
844 return NULL;
845 if (cur_sub_max_channel <= last_sub_max_channel)
846 return NULL;
847 }
848
849 /*
850 * When dot11RegulatoryClassesRequired is supported
851 * we can throw ext triplets as part of this soup,
852 * for now we don't care when those change as we
853 * don't support them
854 */
855 *checksum ^= ((cur_channel ^ cur_sub_max_channel) << 8) |
856 ((cur_sub_max_channel ^ cur_sub_max_channel) << 16) |
857 ((triplet->chans.max_power ^ cur_sub_max_channel) << 24);
858
859 last_sub_max_channel = cur_sub_max_channel;
860
861 num_rules++;
862
863 if (country_ie_len >= 3) {
864 country_ie += 3;
865 country_ie_len -= 3;
866 }
867
868 /*
869 * Note: this is not a IEEE requirement but
870 * simply a memory requirement
871 */
872 if (num_rules > NL80211_MAX_SUPP_REG_RULES)
873 return NULL;
874 }
875
876 country_ie = triplets_start;
877 country_ie_len = len_at_triplet;
878
879 size_of_regd = sizeof(struct ieee80211_regdomain) +
880 (num_rules * sizeof(struct ieee80211_reg_rule));
881
882 rd = kzalloc(size_of_regd, GFP_KERNEL);
883 if (!rd)
884 return NULL;
885
886 rd->n_reg_rules = num_rules;
887 rd->alpha2[0] = alpha2[0];
888 rd->alpha2[1] = alpha2[1];
889
890 /* This time around we fill in the rd */
891 while (country_ie_len >= 3) {
892 int end_channel = 0;
893 struct ieee80211_country_ie_triplet *triplet =
894 (struct ieee80211_country_ie_triplet *) country_ie;
895 struct ieee80211_reg_rule *reg_rule = NULL;
896 struct ieee80211_freq_range *freq_range = NULL;
897 struct ieee80211_power_rule *power_rule = NULL;
898
899 /*
900 * Must parse if dot11RegulatoryClassesRequired is true,
901 * we don't support this yet
902 */
903 if (triplet->ext.reg_extension_id >=
904 IEEE80211_COUNTRY_EXTENSION_ID) {
905 country_ie += 3;
906 country_ie_len -= 3;
907 continue;
908 }
909
910 if (triplet->chans.first_channel == 0) {
911 country_ie++;
912 country_ie_len--;
913 break;
914 }
915
916 reg_rule = &rd->reg_rules[i];
917 freq_range = &reg_rule->freq_range;
918 power_rule = &reg_rule->power_rule;
919
920 reg_rule->flags = flags;
921
922 /* 2 GHz */
923 if (band == IEEE80211_BAND_2GHZ)
924 end_channel = triplet->chans.first_channel +
925 triplet->chans.num_channels -1;
926 else
927 end_channel = triplet->chans.first_channel +
928 (4 * (triplet->chans.num_channels - 1));
929
930 end_channel = max_subband_chan(band,
931 triplet->chans.first_channel,
932 end_channel,
933 triplet->chans.max_power,
934 &country_ie,
935 &country_ie_len);
936
937 /*
938 * The +10 is since the regulatory domain expects
939 * the actual band edge, not the center of freq for
940 * its start and end freqs, assuming 20 MHz bandwidth on
941 * the channels passed
942 */
943 freq_range->start_freq_khz =
944 MHZ_TO_KHZ(ieee80211_channel_to_frequency(
945 triplet->chans.first_channel) - 10);
946 freq_range->end_freq_khz =
947 MHZ_TO_KHZ(ieee80211_channel_to_frequency(
948 end_channel) + 10);
949
950 /*
951 * These are large arbitrary values we use to intersect later.
952 * Increment this if we ever support >= 40 MHz channels
953 * in IEEE 802.11
954 */
955 freq_range->max_bandwidth_khz = MHZ_TO_KHZ(40);
956 power_rule->max_antenna_gain = DBI_TO_MBI(100);
957 power_rule->max_eirp = DBM_TO_MBM(triplet->chans.max_power);
958
959 i++;
960
961 if (country_ie_len >= 3) {
962 country_ie += 3;
963 country_ie_len -= 3;
964 }
965
966 BUG_ON(i > NL80211_MAX_SUPP_REG_RULES);
967 }
968
969 return rd;
970}
971
972
973/*
974 * Helper for regdom_intersect(), this does the real 482 * Helper for regdom_intersect(), this does the real
975 * mathematical intersection fun 483 * mathematical intersection fun
976 */ 484 */
@@ -1191,7 +699,6 @@ static int freq_reg_info_regd(struct wiphy *wiphy,
1191 699
1192 return -EINVAL; 700 return -EINVAL;
1193} 701}
1194EXPORT_SYMBOL(freq_reg_info);
1195 702
1196int freq_reg_info(struct wiphy *wiphy, 703int freq_reg_info(struct wiphy *wiphy,
1197 u32 center_freq, 704 u32 center_freq,
@@ -1205,6 +712,7 @@ int freq_reg_info(struct wiphy *wiphy,
1205 reg_rule, 712 reg_rule,
1206 NULL); 713 NULL);
1207} 714}
715EXPORT_SYMBOL(freq_reg_info);
1208 716
1209/* 717/*
1210 * Note that right now we assume the desired channel bandwidth 718 * Note that right now we assume the desired channel bandwidth
@@ -1243,41 +751,8 @@ static void handle_channel(struct wiphy *wiphy, enum ieee80211_band band,
1243 desired_bw_khz, 751 desired_bw_khz,
1244 &reg_rule); 752 &reg_rule);
1245 753
1246 if (r) { 754 if (r)
1247 /*
1248 * This means no regulatory rule was found in the country IE
1249 * with a frequency range on the center_freq's band, since
1250 * IEEE-802.11 allows for a country IE to have a subset of the
1251 * regulatory information provided in a country we ignore
1252 * disabling the channel unless at least one reg rule was
1253 * found on the center_freq's band. For details see this
1254 * clarification:
1255 *
1256 * http://tinyurl.com/11d-clarification
1257 */
1258 if (r == -ERANGE &&
1259 last_request->initiator ==
1260 NL80211_REGDOM_SET_BY_COUNTRY_IE) {
1261 REG_DBG_PRINT("cfg80211: Leaving channel %d MHz "
1262 "intact on %s - no rule found in band on "
1263 "Country IE\n",
1264 chan->center_freq, wiphy_name(wiphy));
1265 } else {
1266 /*
1267 * In this case we know the country IE has at least one reg rule
1268 * for the band so we respect its band definitions
1269 */
1270 if (last_request->initiator ==
1271 NL80211_REGDOM_SET_BY_COUNTRY_IE)
1272 REG_DBG_PRINT("cfg80211: Disabling "
1273 "channel %d MHz on %s due to "
1274 "Country IE\n",
1275 chan->center_freq, wiphy_name(wiphy));
1276 flags |= IEEE80211_CHAN_DISABLED;
1277 chan->flags = flags;
1278 }
1279 return; 755 return;
1280 }
1281 756
1282 power_rule = &reg_rule->power_rule; 757 power_rule = &reg_rule->power_rule;
1283 freq_range = &reg_rule->freq_range; 758 freq_range = &reg_rule->freq_range;
@@ -1831,6 +1306,7 @@ static void reg_process_hint(struct regulatory_request *reg_request)
1831{ 1306{
1832 int r = 0; 1307 int r = 0;
1833 struct wiphy *wiphy = NULL; 1308 struct wiphy *wiphy = NULL;
1309 enum nl80211_reg_initiator initiator = reg_request->initiator;
1834 1310
1835 BUG_ON(!reg_request->alpha2); 1311 BUG_ON(!reg_request->alpha2);
1836 1312
@@ -1850,7 +1326,7 @@ static void reg_process_hint(struct regulatory_request *reg_request)
1850 /* This is required so that the orig_* parameters are saved */ 1326 /* This is required so that the orig_* parameters are saved */
1851 if (r == -EALREADY && wiphy && 1327 if (r == -EALREADY && wiphy &&
1852 wiphy->flags & WIPHY_FLAG_STRICT_REGULATORY) 1328 wiphy->flags & WIPHY_FLAG_STRICT_REGULATORY)
1853 wiphy_update_regulatory(wiphy, reg_request->initiator); 1329 wiphy_update_regulatory(wiphy, initiator);
1854out: 1330out:
1855 mutex_unlock(&reg_mutex); 1331 mutex_unlock(&reg_mutex);
1856 mutex_unlock(&cfg80211_mutex); 1332 mutex_unlock(&cfg80211_mutex);
@@ -2008,35 +1484,6 @@ int regulatory_hint(struct wiphy *wiphy, const char *alpha2)
2008} 1484}
2009EXPORT_SYMBOL(regulatory_hint); 1485EXPORT_SYMBOL(regulatory_hint);
2010 1486
2011/* Caller must hold reg_mutex */
2012static bool reg_same_country_ie_hint(struct wiphy *wiphy,
2013 u32 country_ie_checksum)
2014{
2015 struct wiphy *request_wiphy;
2016
2017 assert_reg_lock();
2018
2019 if (unlikely(last_request->initiator !=
2020 NL80211_REGDOM_SET_BY_COUNTRY_IE))
2021 return false;
2022
2023 request_wiphy = wiphy_idx_to_wiphy(last_request->wiphy_idx);
2024
2025 if (!request_wiphy)
2026 return false;
2027
2028 if (likely(request_wiphy != wiphy))
2029 return !country_ie_integrity_changes(country_ie_checksum);
2030 /*
2031 * We should not have let these through at this point, they
2032 * should have been picked up earlier by the first alpha2 check
2033 * on the device
2034 */
2035 if (WARN_ON(!country_ie_integrity_changes(country_ie_checksum)))
2036 return true;
2037 return false;
2038}
2039
2040/* 1487/*
2041 * We hold wdev_lock() here so we cannot hold cfg80211_mutex() and 1488 * We hold wdev_lock() here so we cannot hold cfg80211_mutex() and
2042 * therefore cannot iterate over the rdev list here. 1489 * therefore cannot iterate over the rdev list here.
@@ -2046,9 +1493,7 @@ void regulatory_hint_11d(struct wiphy *wiphy,
2046 u8 *country_ie, 1493 u8 *country_ie,
2047 u8 country_ie_len) 1494 u8 country_ie_len)
2048{ 1495{
2049 struct ieee80211_regdomain *rd = NULL;
2050 char alpha2[2]; 1496 char alpha2[2];
2051 u32 checksum = 0;
2052 enum environment_cap env = ENVIRON_ANY; 1497 enum environment_cap env = ENVIRON_ANY;
2053 struct regulatory_request *request; 1498 struct regulatory_request *request;
2054 1499
@@ -2064,14 +1509,6 @@ void regulatory_hint_11d(struct wiphy *wiphy,
2064 if (country_ie_len < IEEE80211_COUNTRY_IE_MIN_LEN) 1509 if (country_ie_len < IEEE80211_COUNTRY_IE_MIN_LEN)
2065 goto out; 1510 goto out;
2066 1511
2067 /*
2068 * Pending country IE processing, this can happen after we
2069 * call CRDA and wait for a response if a beacon was received before
2070 * we were able to process the last regulatory_hint_11d() call
2071 */
2072 if (country_ie_regdomain)
2073 goto out;
2074
2075 alpha2[0] = country_ie[0]; 1512 alpha2[0] = country_ie[0];
2076 alpha2[1] = country_ie[1]; 1513 alpha2[1] = country_ie[1];
2077 1514
@@ -2090,39 +1527,14 @@ void regulatory_hint_11d(struct wiphy *wiphy,
2090 wiphy_idx_valid(last_request->wiphy_idx))) 1527 wiphy_idx_valid(last_request->wiphy_idx)))
2091 goto out; 1528 goto out;
2092 1529
2093 rd = country_ie_2_rd(band, country_ie, country_ie_len, &checksum);
2094 if (!rd) {
2095 REG_DBG_PRINT("cfg80211: Ignoring bogus country IE\n");
2096 goto out;
2097 }
2098
2099 /*
2100 * This will not happen right now but we leave it here for the
2101 * the future when we want to add suspend/resume support and having
2102 * the user move to another country after doing so, or having the user
2103 * move to another AP. Right now we just trust the first AP.
2104 *
2105 * If we hit this before we add this support we want to be informed of
2106 * it as it would indicate a mistake in the current design
2107 */
2108 if (WARN_ON(reg_same_country_ie_hint(wiphy, checksum)))
2109 goto free_rd_out;
2110
2111 request = kzalloc(sizeof(struct regulatory_request), GFP_KERNEL); 1530 request = kzalloc(sizeof(struct regulatory_request), GFP_KERNEL);
2112 if (!request) 1531 if (!request)
2113 goto free_rd_out; 1532 goto out;
2114
2115 /*
2116 * We keep this around for when CRDA comes back with a response so
2117 * we can intersect with that
2118 */
2119 country_ie_regdomain = rd;
2120 1533
2121 request->wiphy_idx = get_wiphy_idx(wiphy); 1534 request->wiphy_idx = get_wiphy_idx(wiphy);
2122 request->alpha2[0] = rd->alpha2[0]; 1535 request->alpha2[0] = alpha2[0];
2123 request->alpha2[1] = rd->alpha2[1]; 1536 request->alpha2[1] = alpha2[1];
2124 request->initiator = NL80211_REGDOM_SET_BY_COUNTRY_IE; 1537 request->initiator = NL80211_REGDOM_SET_BY_COUNTRY_IE;
2125 request->country_ie_checksum = checksum;
2126 request->country_ie_env = env; 1538 request->country_ie_env = env;
2127 1539
2128 mutex_unlock(&reg_mutex); 1540 mutex_unlock(&reg_mutex);
@@ -2131,8 +1543,6 @@ void regulatory_hint_11d(struct wiphy *wiphy,
2131 1543
2132 return; 1544 return;
2133 1545
2134free_rd_out:
2135 kfree(rd);
2136out: 1546out:
2137 mutex_unlock(&reg_mutex); 1547 mutex_unlock(&reg_mutex);
2138} 1548}
@@ -2383,33 +1793,6 @@ static void print_regdomain_info(const struct ieee80211_regdomain *rd)
2383 print_rd_rules(rd); 1793 print_rd_rules(rd);
2384} 1794}
2385 1795
2386#ifdef CONFIG_CFG80211_REG_DEBUG
2387static void reg_country_ie_process_debug(
2388 const struct ieee80211_regdomain *rd,
2389 const struct ieee80211_regdomain *country_ie_regdomain,
2390 const struct ieee80211_regdomain *intersected_rd)
2391{
2392 printk(KERN_DEBUG "cfg80211: Received country IE:\n");
2393 print_regdomain_info(country_ie_regdomain);
2394 printk(KERN_DEBUG "cfg80211: CRDA thinks this should applied:\n");
2395 print_regdomain_info(rd);
2396 if (intersected_rd) {
2397 printk(KERN_DEBUG "cfg80211: We intersect both of these "
2398 "and get:\n");
2399 print_regdomain_info(intersected_rd);
2400 return;
2401 }
2402 printk(KERN_DEBUG "cfg80211: Intersection between both failed\n");
2403}
2404#else
2405static inline void reg_country_ie_process_debug(
2406 const struct ieee80211_regdomain *rd,
2407 const struct ieee80211_regdomain *country_ie_regdomain,
2408 const struct ieee80211_regdomain *intersected_rd)
2409{
2410}
2411#endif
2412
2413/* Takes ownership of rd only if it doesn't fail */ 1796/* Takes ownership of rd only if it doesn't fail */
2414static int __set_regdom(const struct ieee80211_regdomain *rd) 1797static int __set_regdom(const struct ieee80211_regdomain *rd)
2415{ 1798{
@@ -2521,34 +1904,6 @@ static int __set_regdom(const struct ieee80211_regdomain *rd)
2521 return 0; 1904 return 0;
2522 } 1905 }
2523 1906
2524 /*
2525 * Country IE requests are handled a bit differently, we intersect
2526 * the country IE rd with what CRDA believes that country should have
2527 */
2528
2529 /*
2530 * Userspace could have sent two replies with only
2531 * one kernel request. By the second reply we would have
2532 * already processed and consumed the country_ie_regdomain.
2533 */
2534 if (!country_ie_regdomain)
2535 return -EALREADY;
2536 BUG_ON(rd == country_ie_regdomain);
2537
2538 /*
2539 * Intersect what CRDA returned and our what we
2540 * had built from the Country IE received
2541 */
2542
2543 intersected_rd = regdom_intersect(rd, country_ie_regdomain);
2544
2545 reg_country_ie_process_debug(rd,
2546 country_ie_regdomain,
2547 intersected_rd);
2548
2549 kfree(country_ie_regdomain);
2550 country_ie_regdomain = NULL;
2551
2552 if (!intersected_rd) 1907 if (!intersected_rd)
2553 return -EINVAL; 1908 return -EINVAL;
2554 1909
@@ -2630,7 +1985,7 @@ out:
2630 mutex_unlock(&reg_mutex); 1985 mutex_unlock(&reg_mutex);
2631} 1986}
2632 1987
2633int regulatory_init(void) 1988int __init regulatory_init(void)
2634{ 1989{
2635 int err = 0; 1990 int err = 0;
2636 1991
@@ -2676,7 +2031,7 @@ int regulatory_init(void)
2676 return 0; 2031 return 0;
2677} 2032}
2678 2033
2679void regulatory_exit(void) 2034void /* __init_or_exit */ regulatory_exit(void)
2680{ 2035{
2681 struct regulatory_request *reg_request, *tmp; 2036 struct regulatory_request *reg_request, *tmp;
2682 struct reg_beacon *reg_beacon, *btmp; 2037 struct reg_beacon *reg_beacon, *btmp;
@@ -2688,9 +2043,6 @@ void regulatory_exit(void)
2688 2043
2689 reset_regdomains(); 2044 reset_regdomains();
2690 2045
2691 kfree(country_ie_regdomain);
2692 country_ie_regdomain = NULL;
2693
2694 kfree(last_request); 2046 kfree(last_request);
2695 2047
2696 platform_device_unregister(reg_pdev); 2048 platform_device_unregister(reg_pdev);
diff --git a/net/wireless/reg.h b/net/wireless/reg.h
index b26224a9f3bc..c4695d07af23 100644
--- a/net/wireless/reg.h
+++ b/net/wireless/reg.h
@@ -10,7 +10,7 @@ int regulatory_hint_user(const char *alpha2);
10 10
11void reg_device_remove(struct wiphy *wiphy); 11void reg_device_remove(struct wiphy *wiphy);
12 12
13int regulatory_init(void); 13int __init regulatory_init(void);
14void regulatory_exit(void); 14void regulatory_exit(void);
15 15
16int set_regdom(const struct ieee80211_regdomain *rd); 16int set_regdom(const struct ieee80211_regdomain *rd);
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 58401d246bda..5ca8c7180141 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -275,6 +275,7 @@ struct cfg80211_bss *cfg80211_get_bss(struct wiphy *wiphy,
275{ 275{
276 struct cfg80211_registered_device *dev = wiphy_to_dev(wiphy); 276 struct cfg80211_registered_device *dev = wiphy_to_dev(wiphy);
277 struct cfg80211_internal_bss *bss, *res = NULL; 277 struct cfg80211_internal_bss *bss, *res = NULL;
278 unsigned long now = jiffies;
278 279
279 spin_lock_bh(&dev->bss_lock); 280 spin_lock_bh(&dev->bss_lock);
280 281
@@ -283,6 +284,10 @@ struct cfg80211_bss *cfg80211_get_bss(struct wiphy *wiphy,
283 continue; 284 continue;
284 if (channel && bss->pub.channel != channel) 285 if (channel && bss->pub.channel != channel)
285 continue; 286 continue;
287 /* Don't get expired BSS structs */
288 if (time_after(now, bss->ts + IEEE80211_SCAN_RESULT_EXPIRE) &&
289 !atomic_read(&bss->hold))
290 continue;
286 if (is_bss(&bss->pub, bssid, ssid, ssid_len)) { 291 if (is_bss(&bss->pub, bssid, ssid, ssid_len)) {
287 res = bss; 292 res = bss;
288 kref_get(&res->ref); 293 kref_get(&res->ref);
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index 72222f0074db..a8c2d6b877ae 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -35,7 +35,7 @@ struct cfg80211_conn {
35 bool auto_auth, prev_bssid_valid; 35 bool auto_auth, prev_bssid_valid;
36}; 36};
37 37
38bool cfg80211_is_all_idle(void) 38static bool cfg80211_is_all_idle(void)
39{ 39{
40 struct cfg80211_registered_device *rdev; 40 struct cfg80211_registered_device *rdev;
41 struct wireless_dev *wdev; 41 struct wireless_dev *wdev;
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 3416373a9c0c..0c8a1e8b7690 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -770,8 +770,8 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
770 return -EOPNOTSUPP; 770 return -EOPNOTSUPP;
771 771
772 /* if it's part of a bridge, reject changing type to station/ibss */ 772 /* if it's part of a bridge, reject changing type to station/ibss */
773 if (dev->br_port && (ntype == NL80211_IFTYPE_ADHOC || 773 if ((dev->priv_flags & IFF_BRIDGE_PORT) &&
774 ntype == NL80211_IFTYPE_STATION)) 774 (ntype == NL80211_IFTYPE_ADHOC || ntype == NL80211_IFTYPE_STATION))
775 return -EBUSY; 775 return -EBUSY;
776 776
777 if (ntype != otype) { 777 if (ntype != otype) {
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
index 96342993cf93..bb5e0a5ecfa1 100644
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -829,7 +829,7 @@ int cfg80211_wext_siwtxpower(struct net_device *dev,
829{ 829{
830 struct wireless_dev *wdev = dev->ieee80211_ptr; 830 struct wireless_dev *wdev = dev->ieee80211_ptr;
831 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 831 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
832 enum tx_power_setting type; 832 enum nl80211_tx_power_setting type;
833 int dbm = 0; 833 int dbm = 0;
834 834
835 if ((data->txpower.flags & IW_TXPOW_TYPE) != IW_TXPOW_DBM) 835 if ((data->txpower.flags & IW_TXPOW_TYPE) != IW_TXPOW_DBM)
@@ -852,7 +852,7 @@ int cfg80211_wext_siwtxpower(struct net_device *dev,
852 if (data->txpower.value < 0) 852 if (data->txpower.value < 0)
853 return -EINVAL; 853 return -EINVAL;
854 dbm = data->txpower.value; 854 dbm = data->txpower.value;
855 type = TX_POWER_FIXED; 855 type = NL80211_TX_POWER_FIXED;
856 /* TODO: do regulatory check! */ 856 /* TODO: do regulatory check! */
857 } else { 857 } else {
858 /* 858 /*
@@ -860,10 +860,10 @@ int cfg80211_wext_siwtxpower(struct net_device *dev,
860 * passed in from userland. 860 * passed in from userland.
861 */ 861 */
862 if (data->txpower.value < 0) { 862 if (data->txpower.value < 0) {
863 type = TX_POWER_AUTOMATIC; 863 type = NL80211_TX_POWER_AUTOMATIC;
864 } else { 864 } else {
865 dbm = data->txpower.value; 865 dbm = data->txpower.value;
866 type = TX_POWER_LIMITED; 866 type = NL80211_TX_POWER_LIMITED;
867 } 867 }
868 } 868 }
869 } else { 869 } else {
@@ -872,7 +872,7 @@ int cfg80211_wext_siwtxpower(struct net_device *dev,
872 return 0; 872 return 0;
873 } 873 }
874 874
875 return rdev->ops->set_tx_power(wdev->wiphy, type, dbm); 875 return rdev->ops->set_tx_power(wdev->wiphy, type, DBM_TO_MBM(dbm));
876} 876}
877EXPORT_SYMBOL_GPL(cfg80211_wext_siwtxpower); 877EXPORT_SYMBOL_GPL(cfg80211_wext_siwtxpower);
878 878
@@ -1471,6 +1471,7 @@ int cfg80211_wext_siwpmksa(struct net_device *dev,
1471 return -EOPNOTSUPP; 1471 return -EOPNOTSUPP;
1472 } 1472 }
1473} 1473}
1474EXPORT_SYMBOL_GPL(cfg80211_wext_siwpmksa);
1474 1475
1475static const iw_handler cfg80211_handlers[] = { 1476static const iw_handler cfg80211_handlers[] = {
1476 [IW_IOCTL_IDX(SIOCGIWNAME)] = (iw_handler) cfg80211_wext_giwname, 1477 [IW_IOCTL_IDX(SIOCGIWNAME)] = (iw_handler) cfg80211_wext_giwname,
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 4bf27d901333..2b3ed7ad4933 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1594,8 +1594,8 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
1594 1594
1595 /* Try to instantiate a bundle */ 1595 /* Try to instantiate a bundle */
1596 err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family); 1596 err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family);
1597 if (err < 0) { 1597 if (err <= 0) {
1598 if (err != -EAGAIN) 1598 if (err != 0 && err != -EAGAIN)
1599 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR); 1599 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
1600 return ERR_PTR(err); 1600 return ERR_PTR(err);
1601 } 1601 }
@@ -1678,6 +1678,13 @@ xfrm_bundle_lookup(struct net *net, struct flowi *fl, u16 family, u8 dir,
1678 goto make_dummy_bundle; 1678 goto make_dummy_bundle;
1679 dst_hold(&xdst->u.dst); 1679 dst_hold(&xdst->u.dst);
1680 return oldflo; 1680 return oldflo;
1681 } else if (new_xdst == NULL) {
1682 num_xfrms = 0;
1683 if (oldflo == NULL)
1684 goto make_dummy_bundle;
1685 xdst->num_xfrms = 0;
1686 dst_hold(&xdst->u.dst);
1687 return oldflo;
1681 } 1688 }
1682 1689
1683 /* Kill the previous bundle */ 1690 /* Kill the previous bundle */
@@ -1760,6 +1767,10 @@ restart:
1760 xfrm_pols_put(pols, num_pols); 1767 xfrm_pols_put(pols, num_pols);
1761 err = PTR_ERR(xdst); 1768 err = PTR_ERR(xdst);
1762 goto dropdst; 1769 goto dropdst;
1770 } else if (xdst == NULL) {
1771 num_xfrms = 0;
1772 drop_pols = num_pols;
1773 goto no_transform;
1763 } 1774 }
1764 1775
1765 spin_lock_bh(&xfrm_policy_sk_bundle_lock); 1776 spin_lock_bh(&xfrm_policy_sk_bundle_lock);
@@ -2300,7 +2311,8 @@ int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *first,
2300 return 0; 2311 return 0;
2301 if (xdst->xfrm_genid != dst->xfrm->genid) 2312 if (xdst->xfrm_genid != dst->xfrm->genid)
2302 return 0; 2313 return 0;
2303 if (xdst->policy_genid != atomic_read(&xdst->pols[0]->genid)) 2314 if (xdst->num_pols > 0 &&
2315 xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
2304 return 0; 2316 return 0;
2305 2317
2306 if (strict && fl && 2318 if (strict && fl &&
@@ -2480,7 +2492,8 @@ static int __net_init xfrm_statistics_init(struct net *net)
2480 int rv; 2492 int rv;
2481 2493
2482 if (snmp_mib_init((void __percpu **)net->mib.xfrm_statistics, 2494 if (snmp_mib_init((void __percpu **)net->mib.xfrm_statistics,
2483 sizeof(struct linux_xfrm_mib)) < 0) 2495 sizeof(struct linux_xfrm_mib),
2496 __alignof__(struct linux_xfrm_mib)) < 0)
2484 return -ENOMEM; 2497 return -ENOMEM;
2485 rv = xfrm_proc_init(net); 2498 rv = xfrm_proc_init(net);
2486 if (rv < 0) 2499 if (rv < 0)