aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan.c28
-rw-r--r--net/8021q/vlan.h2
-rw-r--r--net/8021q/vlan_core.c85
-rw-r--r--net/8021q/vlan_dev.c223
-rw-r--r--net/8021q/vlanproc.c2
-rw-r--r--net/9p/client.c11
-rw-r--r--net/9p/trans_common.c4
-rw-r--r--net/9p/util.c2
-rw-r--r--net/atm/br2684.c2
-rw-r--r--net/atm/lec.c4
-rw-r--r--net/atm/lec.h2
-rw-r--r--net/ax25/af_ax25.c16
-rw-r--r--net/ax25/ax25_iface.c3
-rw-r--r--net/batman-adv/gateway_client.c259
-rw-r--r--net/batman-adv/gateway_client.h2
-rw-r--r--net/batman-adv/icmp_socket.c18
-rw-r--r--net/batman-adv/originator.c38
-rw-r--r--net/batman-adv/originator.h1
-rw-r--r--net/batman-adv/routing.c378
-rw-r--r--net/batman-adv/send.c19
-rw-r--r--net/batman-adv/soft-interface.c130
-rw-r--r--net/batman-adv/types.h7
-rw-r--r--net/batman-adv/unicast.c2
-rw-r--r--net/batman-adv/vis.c91
-rw-r--r--net/bluetooth/hci_core.c2
-rw-r--r--net/bluetooth/l2cap_sock.c2
-rw-r--r--net/bridge/br.c1
-rw-r--r--net/bridge/br_device.c41
-rw-r--r--net/bridge/br_fdb.c313
-rw-r--r--net/bridge/br_if.c83
-rw-r--r--net/bridge/br_input.c5
-rw-r--r--net/bridge/br_ioctl.c42
-rw-r--r--net/bridge/br_multicast.c12
-rw-r--r--net/bridge/br_netfilter.c10
-rw-r--r--net/bridge/br_netlink.c53
-rw-r--r--net/bridge/br_notify.c6
-rw-r--r--net/bridge/br_private.h19
-rw-r--r--net/bridge/br_private_stp.h13
-rw-r--r--net/bridge/br_stp.c48
-rw-r--r--net/bridge/br_stp_if.c21
-rw-r--r--net/bridge/br_sysfs_br.c39
-rw-r--r--net/bridge/br_sysfs_if.c26
-rw-r--r--net/caif/caif_config_util.c6
-rw-r--r--net/caif/caif_dev.c56
-rw-r--r--net/caif/caif_socket.c37
-rw-r--r--net/caif/cfcnfg.c2
-rw-r--r--net/caif/cfctrl.c75
-rw-r--r--net/caif/cfdgml.c13
-rw-r--r--net/caif/cffrml.c8
-rw-r--r--net/caif/cfmuxl.c49
-rw-r--r--net/caif/cfpkt_skbuff.c178
-rw-r--r--net/caif/cfserl.c7
-rw-r--r--net/caif/cfsrvl.c7
-rw-r--r--net/caif/cfutill.c7
-rw-r--r--net/caif/cfveil.c5
-rw-r--r--net/caif/cfvidl.c5
-rw-r--r--net/can/af_can.c52
-rw-r--r--net/can/bcm.c2
-rw-r--r--net/ceph/Kconfig1
-rw-r--r--net/ceph/auth.c8
-rw-r--r--net/ceph/auth_x.c8
-rw-r--r--net/ceph/ceph_common.c112
-rw-r--r--net/ceph/crypto.c73
-rw-r--r--net/ceph/crypto.h4
-rw-r--r--net/ceph/mon_client.c2
-rw-r--r--net/ceph/osd_client.c14
-rw-r--r--net/core/dev.c75
-rw-r--r--net/core/ethtool.c106
-rw-r--r--net/core/filter.c2
-rw-r--r--net/core/link_watch.c2
-rw-r--r--net/core/net_namespace.c12
-rw-r--r--net/core/netpoll.c2
-rw-r--r--net/core/pktgen.c2
-rw-r--r--net/core/rtnetlink.c4
-rw-r--r--net/core/skbuff.c2
-rw-r--r--net/core/sock.c10
-rw-r--r--net/dccp/ipv6.c8
-rw-r--r--net/dccp/output.c2
-rw-r--r--net/decnet/dn_table.c4
-rw-r--r--net/dsa/mv88e6131.c25
-rw-r--r--net/dsa/mv88e6xxx.h2
-rw-r--r--net/dsa/slave.c1
-rw-r--r--net/econet/af_econet.c8
-rw-r--r--net/ieee802154/Makefile2
-rw-r--r--net/ipv4/af_inet.c4
-rw-r--r--net/ipv4/ah4.c7
-rw-r--r--net/ipv4/cipso_ipv4.c8
-rw-r--r--net/ipv4/devinet.c1
-rw-r--r--net/ipv4/esp4.c7
-rw-r--r--net/ipv4/fib_frontend.c16
-rw-r--r--net/ipv4/fib_trie.c112
-rw-r--r--net/ipv4/icmp.c14
-rw-r--r--net/ipv4/inet_connection_sock.c23
-rw-r--r--net/ipv4/inet_diag.c2
-rw-r--r--net/ipv4/inet_lro.c4
-rw-r--r--net/ipv4/inetpeer.c13
-rw-r--r--net/ipv4/ip_gre.c28
-rw-r--r--net/ipv4/ip_input.c4
-rw-r--r--net/ipv4/ip_options.c6
-rw-r--r--net/ipv4/ip_output.c20
-rw-r--r--net/ipv4/ip_sockglue.c2
-rw-r--r--net/ipv4/ipcomp.c4
-rw-r--r--net/ipv4/ipconfig.c2
-rw-r--r--net/ipv4/ipip.c8
-rw-r--r--net/ipv4/ipmr.c2
-rw-r--r--net/ipv4/netfilter.c5
-rw-r--r--net/ipv4/netfilter/arp_tables.c22
-rw-r--r--net/ipv4/netfilter/ip_tables.c30
-rw-r--r--net/ipv4/netfilter/nf_nat_core.c4
-rw-r--r--net/ipv4/netfilter/nf_nat_helper.c2
-rw-r--r--net/ipv4/raw.c30
-rw-r--r--net/ipv4/route.c34
-rw-r--r--net/ipv4/syncookies.c18
-rw-r--r--net/ipv4/sysctl_net_ipv4.c3
-rw-r--r--net/ipv4/tcp.c7
-rw-r--r--net/ipv4/tcp_ipv4.c8
-rw-r--r--net/ipv4/tcp_lp.c2
-rw-r--r--net/ipv4/tcp_output.c2
-rw-r--r--net/ipv4/tcp_yeah.c2
-rw-r--r--net/ipv4/udp.c22
-rw-r--r--net/ipv4/xfrm4_policy.c3
-rw-r--r--net/ipv4/xfrm4_state.c2
-rw-r--r--net/ipv6/addrconf.c22
-rw-r--r--net/ipv6/af_inet6.c4
-rw-r--r--net/ipv6/anycast.c16
-rw-r--r--net/ipv6/esp6.c5
-rw-r--r--net/ipv6/icmp.c8
-rw-r--r--net/ipv6/inet6_connection_sock.c2
-rw-r--r--net/ipv6/ip6_fib.c16
-rw-r--r--net/ipv6/ip6_input.c6
-rw-r--r--net/ipv6/ip6_output.c18
-rw-r--r--net/ipv6/ip6_tunnel.c36
-rw-r--r--net/ipv6/ip6mr.c4
-rw-r--r--net/ipv6/ipcomp6.c5
-rw-r--r--net/ipv6/mcast.c36
-rw-r--r--net/ipv6/mip6.c8
-rw-r--r--net/ipv6/ndisc.c52
-rw-r--r--net/ipv6/netfilter.c23
-rw-r--r--net/ipv6/netfilter/ip6_tables.c23
-rw-r--r--net/ipv6/netfilter/ip6table_mangle.c3
-rw-r--r--net/ipv6/netfilter/nf_defrag_ipv6_hooks.c2
-rw-r--r--net/ipv6/raw.c14
-rw-r--r--net/ipv6/reassembly.c4
-rw-r--r--net/ipv6/route.c122
-rw-r--r--net/ipv6/sit.c25
-rw-r--r--net/ipv6/syncookies.c13
-rw-r--r--net/ipv6/tcp_ipv6.c53
-rw-r--r--net/ipv6/udp.c23
-rw-r--r--net/ipv6/xfrm6_mode_beet.c2
-rw-r--r--net/ipv6/xfrm6_mode_tunnel.c6
-rw-r--r--net/ipv6/xfrm6_policy.c2
-rw-r--r--net/ipv6/xfrm6_tunnel.c10
-rw-r--r--net/irda/af_irda.c3
-rw-r--r--net/irda/irlap.c2
-rw-r--r--net/irda/irlap_event.c11
-rw-r--r--net/irda/irlap_frame.c2
-rw-r--r--net/irda/irlmp_event.c2
-rw-r--r--net/irda/irnet/irnet.h2
-rw-r--r--net/irda/irproc.c5
-rw-r--r--net/irda/irqueue.c2
-rw-r--r--net/irda/irttp.c2
-rw-r--r--net/irda/qos.c8
-rw-r--r--net/irda/timer.c2
-rw-r--r--net/iucv/af_iucv.c2
-rw-r--r--net/iucv/iucv.c4
-rw-r--r--net/key/af_key.c2
-rw-r--r--net/l2tp/l2tp_netlink.c3
-rw-r--r--net/llc/llc_input.c3
-rw-r--r--net/mac80211/ieee80211_i.h2
-rw-r--r--net/mac80211/mesh_pathtbl.c2
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c2
-rw-r--r--net/mac80211/rc80211_pid.h2
-rw-r--r--net/mac80211/rx.c2
-rw-r--r--net/mac80211/sta_info.c4
-rw-r--r--net/mac80211/sta_info.h2
-rw-r--r--net/netfilter/Kconfig1
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_ip.c3
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_ipmac.c3
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_port.c3
-rw-r--r--net/netfilter/ipset/ip_set_core.c111
-rw-r--r--net/netfilter/ipset/ip_set_getport.c16
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipport.c2
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipportip.c2
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipportnet.c2
-rw-r--r--net/netfilter/ipset/ip_set_hash_netport.c2
-rw-r--r--net/netfilter/ipset/ip_set_list_set.c53
-rw-r--r--net/netfilter/ipvs/ip_vs_conn.c4
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c5
-rw-r--r--net/netfilter/ipvs/ip_vs_lblc.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_lblcr.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_sctp.c8
-rw-r--r--net/netfilter/nf_conntrack_core.c4
-rw-r--r--net/netfilter/nf_conntrack_h323_asn1.c2
-rw-r--r--net/netfilter/nf_conntrack_h323_main.c16
-rw-r--r--net/netfilter/nf_conntrack_proto_dccp.c2
-rw-r--r--net/netfilter/nf_conntrack_proto_sctp.c6
-rw-r--r--net/netfilter/nf_conntrack_sip.c2
-rw-r--r--net/netfilter/nf_conntrack_standalone.c2
-rw-r--r--net/netfilter/nf_queue.c2
-rw-r--r--net/netfilter/nfnetlink_log.c2
-rw-r--r--net/netfilter/x_tables.c9
-rw-r--r--net/netfilter/xt_TCPMSS.c2
-rw-r--r--net/netfilter/xt_addrtype.c42
-rw-r--r--net/netfilter/xt_conntrack.c2
-rw-r--r--net/netlabel/netlabel_cipso_v4.c4
-rw-r--r--net/netlabel/netlabel_domainhash.c10
-rw-r--r--net/netlabel/netlabel_mgmt.c2
-rw-r--r--net/netrom/af_netrom.c12
-rw-r--r--net/phonet/socket.c45
-rw-r--r--net/rds/ib_send.c2
-rw-r--r--net/rds/iw_cm.c2
-rw-r--r--net/rds/iw_rdma.c2
-rw-r--r--net/rds/iw_send.c2
-rw-r--r--net/rds/send.c2
-rw-r--r--net/rose/af_rose.c16
-rw-r--r--net/rose/rose_route.c2
-rw-r--r--net/sched/Kconfig11
-rw-r--r--net/sched/Makefile1
-rw-r--r--net/sched/act_api.c2
-rw-r--r--net/sched/act_pedit.c2
-rw-r--r--net/sched/em_meta.c2
-rw-r--r--net/sched/sch_htb.c2
-rw-r--r--net/sched/sch_netem.c6
-rw-r--r--net/sched/sch_qfq.c1137
-rw-r--r--net/sched/sch_sfq.c2
-rw-r--r--net/sctp/associola.c6
-rw-r--r--net/sctp/auth.c6
-rw-r--r--net/sctp/debug.c1
-rw-r--r--net/sctp/endpointola.c20
-rw-r--r--net/sctp/input.c21
-rw-r--r--net/sctp/ipv6.c2
-rw-r--r--net/sctp/output.c2
-rw-r--r--net/sctp/outqueue.c25
-rw-r--r--net/sctp/sm_make_chunk.c62
-rw-r--r--net/sctp/sm_sideeffect.c11
-rw-r--r--net/sctp/sm_statefuns.c109
-rw-r--r--net/sctp/sm_statetable.c78
-rw-r--r--net/sctp/socket.c82
-rw-r--r--net/sctp/ulpevent.c32
-rw-r--r--net/sctp/ulpqueue.c2
-rw-r--r--net/socket.c16
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_mech.c2
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c2
-rw-r--r--net/sunrpc/xprtsock.c4
-rw-r--r--net/tipc/link.c2
-rw-r--r--net/tipc/name_distr.c2
-rw-r--r--net/unix/af_unix.c2
-rw-r--r--net/wanrouter/wanproc.c2
-rw-r--r--net/wireless/reg.c4
-rw-r--r--net/x25/x25_facilities.c2
-rw-r--r--net/x25/x25_forward.c4
-rw-r--r--net/xfrm/xfrm_state.c12
-rw-r--r--net/xfrm/xfrm_user.c6
253 files changed, 3939 insertions, 2340 deletions
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 7850412f52b7..969e7004cf86 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -49,11 +49,6 @@ const char vlan_version[] = DRV_VERSION;
49static const char vlan_copyright[] = "Ben Greear <greearb@candelatech.com>"; 49static const char vlan_copyright[] = "Ben Greear <greearb@candelatech.com>";
50static const char vlan_buggyright[] = "David S. Miller <davem@redhat.com>"; 50static const char vlan_buggyright[] = "David S. Miller <davem@redhat.com>";
51 51
52static struct packet_type vlan_packet_type __read_mostly = {
53 .type = cpu_to_be16(ETH_P_8021Q),
54 .func = vlan_skb_recv, /* VLAN receive method */
55};
56
57/* End of global variables definitions. */ 52/* End of global variables definitions. */
58 53
59static void vlan_group_free(struct vlan_group *grp) 54static void vlan_group_free(struct vlan_group *grp)
@@ -327,10 +322,6 @@ static void vlan_sync_address(struct net_device *dev,
327static void vlan_transfer_features(struct net_device *dev, 322static void vlan_transfer_features(struct net_device *dev,
328 struct net_device *vlandev) 323 struct net_device *vlandev)
329{ 324{
330 u32 old_features = vlandev->features;
331
332 vlandev->features &= ~dev->vlan_features;
333 vlandev->features |= dev->features & dev->vlan_features;
334 vlandev->gso_max_size = dev->gso_max_size; 325 vlandev->gso_max_size = dev->gso_max_size;
335 326
336 if (dev->features & NETIF_F_HW_VLAN_TX) 327 if (dev->features & NETIF_F_HW_VLAN_TX)
@@ -341,8 +332,8 @@ static void vlan_transfer_features(struct net_device *dev,
341#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) 332#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
342 vlandev->fcoe_ddp_xid = dev->fcoe_ddp_xid; 333 vlandev->fcoe_ddp_xid = dev->fcoe_ddp_xid;
343#endif 334#endif
344 if (old_features != vlandev->features) 335
345 netdev_features_change(vlandev); 336 netdev_update_features(vlandev);
346} 337}
347 338
348static void __vlan_device_event(struct net_device *dev, unsigned long event) 339static void __vlan_device_event(struct net_device *dev, unsigned long event)
@@ -508,6 +499,18 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
508 case NETDEV_PRE_TYPE_CHANGE: 499 case NETDEV_PRE_TYPE_CHANGE:
509 /* Forbid underlaying device to change its type. */ 500 /* Forbid underlaying device to change its type. */
510 return NOTIFY_BAD; 501 return NOTIFY_BAD;
502
503 case NETDEV_NOTIFY_PEERS:
504 case NETDEV_BONDING_FAILOVER:
505 /* Propagate to vlan devices */
506 for (i = 0; i < VLAN_N_VID; i++) {
507 vlandev = vlan_group_get_device(grp, i);
508 if (!vlandev)
509 continue;
510
511 call_netdevice_notifiers(event, vlandev);
512 }
513 break;
511 } 514 }
512 515
513out: 516out:
@@ -688,7 +691,6 @@ static int __init vlan_proto_init(void)
688 if (err < 0) 691 if (err < 0)
689 goto err4; 692 goto err4;
690 693
691 dev_add_pack(&vlan_packet_type);
692 vlan_ioctl_set(vlan_ioctl_handler); 694 vlan_ioctl_set(vlan_ioctl_handler);
693 return 0; 695 return 0;
694 696
@@ -709,8 +711,6 @@ static void __exit vlan_cleanup_module(void)
709 711
710 unregister_netdevice_notifier(&vlan_notifier_block); 712 unregister_netdevice_notifier(&vlan_notifier_block);
711 713
712 dev_remove_pack(&vlan_packet_type);
713
714 unregister_pernet_subsys(&vlan_net_ops); 714 unregister_pernet_subsys(&vlan_net_ops);
715 rcu_barrier(); /* Wait for completion of call_rcu()'s */ 715 rcu_barrier(); /* Wait for completion of call_rcu()'s */
716 716
diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h
index 5687c9b95f33..c3408def8a19 100644
--- a/net/8021q/vlan.h
+++ b/net/8021q/vlan.h
@@ -75,8 +75,6 @@ static inline struct vlan_dev_info *vlan_dev_info(const struct net_device *dev)
75} 75}
76 76
77/* found in vlan_dev.c */ 77/* found in vlan_dev.c */
78int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
79 struct packet_type *ptype, struct net_device *orig_dev);
80void vlan_dev_set_ingress_priority(const struct net_device *dev, 78void vlan_dev_set_ingress_priority(const struct net_device *dev,
81 u32 skb_prio, u16 vlan_prio); 79 u32 skb_prio, u16 vlan_prio);
82int vlan_dev_set_egress_priority(const struct net_device *dev, 80int vlan_dev_set_egress_priority(const struct net_device *dev,
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index ce8e3ab3e7a5..41495dc2a4c9 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -4,7 +4,7 @@
4#include <linux/netpoll.h> 4#include <linux/netpoll.h>
5#include "vlan.h" 5#include "vlan.h"
6 6
7bool vlan_hwaccel_do_receive(struct sk_buff **skbp) 7bool vlan_do_receive(struct sk_buff **skbp)
8{ 8{
9 struct sk_buff *skb = *skbp; 9 struct sk_buff *skb = *skbp;
10 u16 vlan_id = skb->vlan_tci & VLAN_VID_MASK; 10 u16 vlan_id = skb->vlan_tci & VLAN_VID_MASK;
@@ -88,3 +88,86 @@ gro_result_t vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp,
88 return napi_gro_frags(napi); 88 return napi_gro_frags(napi);
89} 89}
90EXPORT_SYMBOL(vlan_gro_frags); 90EXPORT_SYMBOL(vlan_gro_frags);
91
92static struct sk_buff *vlan_check_reorder_header(struct sk_buff *skb)
93{
94 if (vlan_dev_info(skb->dev)->flags & VLAN_FLAG_REORDER_HDR) {
95 if (skb_cow(skb, skb_headroom(skb)) < 0)
96 skb = NULL;
97 if (skb) {
98 /* Lifted from Gleb's VLAN code... */
99 memmove(skb->data - ETH_HLEN,
100 skb->data - VLAN_ETH_HLEN, 12);
101 skb->mac_header += VLAN_HLEN;
102 }
103 }
104 return skb;
105}
106
107static void vlan_set_encap_proto(struct sk_buff *skb, struct vlan_hdr *vhdr)
108{
109 __be16 proto;
110 unsigned char *rawp;
111
112 /*
113 * Was a VLAN packet, grab the encapsulated protocol, which the layer
114 * three protocols care about.
115 */
116
117 proto = vhdr->h_vlan_encapsulated_proto;
118 if (ntohs(proto) >= 1536) {
119 skb->protocol = proto;
120 return;
121 }
122
123 rawp = skb->data;
124 if (*(unsigned short *) rawp == 0xFFFF)
125 /*
126 * This is a magic hack to spot IPX packets. Older Novell
127 * breaks the protocol design and runs IPX over 802.3 without
128 * an 802.2 LLC layer. We look for FFFF which isn't a used
129 * 802.2 SSAP/DSAP. This won't work for fault tolerant netware
130 * but does for the rest.
131 */
132 skb->protocol = htons(ETH_P_802_3);
133 else
134 /*
135 * Real 802.2 LLC
136 */
137 skb->protocol = htons(ETH_P_802_2);
138}
139
140struct sk_buff *vlan_untag(struct sk_buff *skb)
141{
142 struct vlan_hdr *vhdr;
143 u16 vlan_tci;
144
145 if (unlikely(vlan_tx_tag_present(skb))) {
146 /* vlan_tci is already set-up so leave this for another time */
147 return skb;
148 }
149
150 skb = skb_share_check(skb, GFP_ATOMIC);
151 if (unlikely(!skb))
152 goto err_free;
153
154 if (unlikely(!pskb_may_pull(skb, VLAN_HLEN)))
155 goto err_free;
156
157 vhdr = (struct vlan_hdr *) skb->data;
158 vlan_tci = ntohs(vhdr->h_vlan_TCI);
159 __vlan_hwaccel_put_tag(skb, vlan_tci);
160
161 skb_pull_rcsum(skb, VLAN_HLEN);
162 vlan_set_encap_proto(skb, vhdr);
163
164 skb = vlan_check_reorder_header(skb);
165 if (unlikely(!skb))
166 goto err_free;
167
168 return skb;
169
170err_free:
171 kfree_skb(skb);
172 return NULL;
173}
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index e34ea9e5e28b..d174c312b7f1 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -65,179 +65,6 @@ static int vlan_dev_rebuild_header(struct sk_buff *skb)
65 return 0; 65 return 0;
66} 66}
67 67
68static inline struct sk_buff *vlan_check_reorder_header(struct sk_buff *skb)
69{
70 if (vlan_dev_info(skb->dev)->flags & VLAN_FLAG_REORDER_HDR) {
71 if (skb_cow(skb, skb_headroom(skb)) < 0)
72 skb = NULL;
73 if (skb) {
74 /* Lifted from Gleb's VLAN code... */
75 memmove(skb->data - ETH_HLEN,
76 skb->data - VLAN_ETH_HLEN, 12);
77 skb->mac_header += VLAN_HLEN;
78 }
79 }
80
81 return skb;
82}
83
84static inline void vlan_set_encap_proto(struct sk_buff *skb,
85 struct vlan_hdr *vhdr)
86{
87 __be16 proto;
88 unsigned char *rawp;
89
90 /*
91 * Was a VLAN packet, grab the encapsulated protocol, which the layer
92 * three protocols care about.
93 */
94
95 proto = vhdr->h_vlan_encapsulated_proto;
96 if (ntohs(proto) >= 1536) {
97 skb->protocol = proto;
98 return;
99 }
100
101 rawp = skb->data;
102 if (*(unsigned short *)rawp == 0xFFFF)
103 /*
104 * This is a magic hack to spot IPX packets. Older Novell
105 * breaks the protocol design and runs IPX over 802.3 without
106 * an 802.2 LLC layer. We look for FFFF which isn't a used
107 * 802.2 SSAP/DSAP. This won't work for fault tolerant netware
108 * but does for the rest.
109 */
110 skb->protocol = htons(ETH_P_802_3);
111 else
112 /*
113 * Real 802.2 LLC
114 */
115 skb->protocol = htons(ETH_P_802_2);
116}
117
118/*
119 * Determine the packet's protocol ID. The rule here is that we
120 * assume 802.3 if the type field is short enough to be a length.
121 * This is normal practice and works for any 'now in use' protocol.
122 *
123 * Also, at this point we assume that we ARE dealing exclusively with
124 * VLAN packets, or packets that should be made into VLAN packets based
125 * on a default VLAN ID.
126 *
127 * NOTE: Should be similar to ethernet/eth.c.
128 *
129 * SANITY NOTE: This method is called when a packet is moving up the stack
130 * towards userland. To get here, it would have already passed
131 * through the ethernet/eth.c eth_type_trans() method.
132 * SANITY NOTE 2: We are referencing to the VLAN_HDR frields, which MAY be
133 * stored UNALIGNED in the memory. RISC systems don't like
134 * such cases very much...
135 * SANITY NOTE 2a: According to Dave Miller & Alexey, it will always be
136 * aligned, so there doesn't need to be any of the unaligned
137 * stuff. It has been commented out now... --Ben
138 *
139 */
140int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
141 struct packet_type *ptype, struct net_device *orig_dev)
142{
143 struct vlan_hdr *vhdr;
144 struct vlan_pcpu_stats *rx_stats;
145 struct net_device *vlan_dev;
146 u16 vlan_id;
147 u16 vlan_tci;
148
149 skb = skb_share_check(skb, GFP_ATOMIC);
150 if (skb == NULL)
151 goto err_free;
152
153 if (unlikely(!pskb_may_pull(skb, VLAN_HLEN)))
154 goto err_free;
155
156 vhdr = (struct vlan_hdr *)skb->data;
157 vlan_tci = ntohs(vhdr->h_vlan_TCI);
158 vlan_id = vlan_tci & VLAN_VID_MASK;
159
160 rcu_read_lock();
161 vlan_dev = vlan_find_dev(dev, vlan_id);
162
163 /* If the VLAN device is defined, we use it.
164 * If not, and the VID is 0, it is a 802.1p packet (not
165 * really a VLAN), so we will just netif_rx it later to the
166 * original interface, but with the skb->proto set to the
167 * wrapped proto: we do nothing here.
168 */
169
170 if (!vlan_dev) {
171 if (vlan_id) {
172 pr_debug("%s: ERROR: No net_device for VID: %u on dev: %s\n",
173 __func__, vlan_id, dev->name);
174 goto err_unlock;
175 }
176 rx_stats = NULL;
177 } else {
178 skb->dev = vlan_dev;
179
180 rx_stats = this_cpu_ptr(vlan_dev_info(skb->dev)->vlan_pcpu_stats);
181
182 u64_stats_update_begin(&rx_stats->syncp);
183 rx_stats->rx_packets++;
184 rx_stats->rx_bytes += skb->len;
185
186 skb->priority = vlan_get_ingress_priority(skb->dev, vlan_tci);
187
188 pr_debug("%s: priority: %u for TCI: %hu\n",
189 __func__, skb->priority, vlan_tci);
190
191 switch (skb->pkt_type) {
192 case PACKET_BROADCAST:
193 /* Yeah, stats collect these together.. */
194 /* stats->broadcast ++; // no such counter :-( */
195 break;
196
197 case PACKET_MULTICAST:
198 rx_stats->rx_multicast++;
199 break;
200
201 case PACKET_OTHERHOST:
202 /* Our lower layer thinks this is not local, let's make
203 * sure.
204 * This allows the VLAN to have a different MAC than the
205 * underlying device, and still route correctly.
206 */
207 if (!compare_ether_addr(eth_hdr(skb)->h_dest,
208 skb->dev->dev_addr))
209 skb->pkt_type = PACKET_HOST;
210 break;
211 default:
212 break;
213 }
214 u64_stats_update_end(&rx_stats->syncp);
215 }
216
217 skb_pull_rcsum(skb, VLAN_HLEN);
218 vlan_set_encap_proto(skb, vhdr);
219
220 if (vlan_dev) {
221 skb = vlan_check_reorder_header(skb);
222 if (!skb) {
223 rx_stats->rx_errors++;
224 goto err_unlock;
225 }
226 }
227
228 netif_rx(skb);
229
230 rcu_read_unlock();
231 return NET_RX_SUCCESS;
232
233err_unlock:
234 rcu_read_unlock();
235err_free:
236 atomic_long_inc(&dev->rx_dropped);
237 kfree_skb(skb);
238 return NET_RX_DROP;
239}
240
241static inline u16 68static inline u16
242vlan_dev_get_egress_qos_mask(struct net_device *dev, struct sk_buff *skb) 69vlan_dev_get_egress_qos_mask(struct net_device *dev, struct sk_buff *skb)
243{ 70{
@@ -704,8 +531,8 @@ static int vlan_dev_init(struct net_device *dev)
704 (1<<__LINK_STATE_DORMANT))) | 531 (1<<__LINK_STATE_DORMANT))) |
705 (1<<__LINK_STATE_PRESENT); 532 (1<<__LINK_STATE_PRESENT);
706 533
707 dev->features |= real_dev->features & real_dev->vlan_features; 534 dev->hw_features = real_dev->vlan_features & NETIF_F_ALL_TX_OFFLOADS;
708 dev->features |= NETIF_F_LLTX; 535 dev->features |= real_dev->vlan_features | NETIF_F_LLTX;
709 dev->gso_max_size = real_dev->gso_max_size; 536 dev->gso_max_size = real_dev->gso_max_size;
710 537
711 /* ipv6 shared card related stuff */ 538 /* ipv6 shared card related stuff */
@@ -759,6 +586,17 @@ static void vlan_dev_uninit(struct net_device *dev)
759 } 586 }
760} 587}
761 588
589static u32 vlan_dev_fix_features(struct net_device *dev, u32 features)
590{
591 struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
592
593 features &= (real_dev->features | NETIF_F_LLTX);
594 if (dev_ethtool_get_rx_csum(real_dev))
595 features |= NETIF_F_RXCSUM;
596
597 return features;
598}
599
762static int vlan_ethtool_get_settings(struct net_device *dev, 600static int vlan_ethtool_get_settings(struct net_device *dev,
763 struct ethtool_cmd *cmd) 601 struct ethtool_cmd *cmd)
764{ 602{
@@ -774,18 +612,6 @@ static void vlan_ethtool_get_drvinfo(struct net_device *dev,
774 strcpy(info->fw_version, "N/A"); 612 strcpy(info->fw_version, "N/A");
775} 613}
776 614
777static u32 vlan_ethtool_get_rx_csum(struct net_device *dev)
778{
779 const struct vlan_dev_info *vlan = vlan_dev_info(dev);
780 return dev_ethtool_get_rx_csum(vlan->real_dev);
781}
782
783static u32 vlan_ethtool_get_flags(struct net_device *dev)
784{
785 const struct vlan_dev_info *vlan = vlan_dev_info(dev);
786 return dev_ethtool_get_flags(vlan->real_dev);
787}
788
789static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 615static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
790{ 616{
791 617
@@ -823,32 +649,10 @@ static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, st
823 return stats; 649 return stats;
824} 650}
825 651
826static int vlan_ethtool_set_tso(struct net_device *dev, u32 data)
827{
828 if (data) {
829 struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
830
831 /* Underlying device must support TSO for VLAN-tagged packets
832 * and must have TSO enabled now.
833 */
834 if (!(real_dev->vlan_features & NETIF_F_TSO))
835 return -EOPNOTSUPP;
836 if (!(real_dev->features & NETIF_F_TSO))
837 return -EINVAL;
838 dev->features |= NETIF_F_TSO;
839 } else {
840 dev->features &= ~NETIF_F_TSO;
841 }
842 return 0;
843}
844
845static const struct ethtool_ops vlan_ethtool_ops = { 652static const struct ethtool_ops vlan_ethtool_ops = {
846 .get_settings = vlan_ethtool_get_settings, 653 .get_settings = vlan_ethtool_get_settings,
847 .get_drvinfo = vlan_ethtool_get_drvinfo, 654 .get_drvinfo = vlan_ethtool_get_drvinfo,
848 .get_link = ethtool_op_get_link, 655 .get_link = ethtool_op_get_link,
849 .get_rx_csum = vlan_ethtool_get_rx_csum,
850 .get_flags = vlan_ethtool_get_flags,
851 .set_tso = vlan_ethtool_set_tso,
852}; 656};
853 657
854static const struct net_device_ops vlan_netdev_ops = { 658static const struct net_device_ops vlan_netdev_ops = {
@@ -874,6 +678,7 @@ static const struct net_device_ops vlan_netdev_ops = {
874 .ndo_fcoe_get_wwn = vlan_dev_fcoe_get_wwn, 678 .ndo_fcoe_get_wwn = vlan_dev_fcoe_get_wwn,
875 .ndo_fcoe_ddp_target = vlan_dev_fcoe_ddp_target, 679 .ndo_fcoe_ddp_target = vlan_dev_fcoe_ddp_target,
876#endif 680#endif
681 .ndo_fix_features = vlan_dev_fix_features,
877}; 682};
878 683
879void vlan_setup(struct net_device *dev) 684void vlan_setup(struct net_device *dev)
diff --git a/net/8021q/vlanproc.c b/net/8021q/vlanproc.c
index d1314cf18adf..d940c49d168a 100644
--- a/net/8021q/vlanproc.c
+++ b/net/8021q/vlanproc.c
@@ -54,7 +54,7 @@ static const char name_conf[] = "config";
54 54
55/* 55/*
56 * Structures for interfacing with the /proc filesystem. 56 * Structures for interfacing with the /proc filesystem.
57 * VLAN creates its own directory /proc/net/vlan with the folowing 57 * VLAN creates its own directory /proc/net/vlan with the following
58 * entries: 58 * entries:
59 * config device status/configuration 59 * config device status/configuration
60 * <device> entry for each device 60 * <device> entry for each device
diff --git a/net/9p/client.c b/net/9p/client.c
index 2ccbf04d37df..0ce959218607 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -178,7 +178,7 @@ free_and_return:
178 * @tag: numeric id for transaction 178 * @tag: numeric id for transaction
179 * 179 *
180 * this is a simple array lookup, but will grow the 180 * this is a simple array lookup, but will grow the
181 * request_slots as necessary to accomodate transaction 181 * request_slots as necessary to accommodate transaction
182 * ids which did not previously have a slot. 182 * ids which did not previously have a slot.
183 * 183 *
184 * this code relies on the client spinlock to manage locks, its 184 * this code relies on the client spinlock to manage locks, its
@@ -1302,7 +1302,7 @@ int
1302p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset, 1302p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
1303 u32 count) 1303 u32 count)
1304{ 1304{
1305 int err, rsize, total; 1305 int err, rsize;
1306 struct p9_client *clnt; 1306 struct p9_client *clnt;
1307 struct p9_req_t *req; 1307 struct p9_req_t *req;
1308 char *dataptr; 1308 char *dataptr;
@@ -1311,7 +1311,6 @@ p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
1311 (long long unsigned) offset, count); 1311 (long long unsigned) offset, count);
1312 err = 0; 1312 err = 0;
1313 clnt = fid->clnt; 1313 clnt = fid->clnt;
1314 total = 0;
1315 1314
1316 rsize = fid->iounit; 1315 rsize = fid->iounit;
1317 if (!rsize || rsize > clnt->msize-P9_IOHDRSZ) 1316 if (!rsize || rsize > clnt->msize-P9_IOHDRSZ)
@@ -1367,7 +1366,7 @@ int
1367p9_client_write(struct p9_fid *fid, char *data, const char __user *udata, 1366p9_client_write(struct p9_fid *fid, char *data, const char __user *udata,
1368 u64 offset, u32 count) 1367 u64 offset, u32 count)
1369{ 1368{
1370 int err, rsize, total; 1369 int err, rsize;
1371 struct p9_client *clnt; 1370 struct p9_client *clnt;
1372 struct p9_req_t *req; 1371 struct p9_req_t *req;
1373 1372
@@ -1375,7 +1374,6 @@ p9_client_write(struct p9_fid *fid, char *data, const char __user *udata,
1375 fid->fid, (long long unsigned) offset, count); 1374 fid->fid, (long long unsigned) offset, count);
1376 err = 0; 1375 err = 0;
1377 clnt = fid->clnt; 1376 clnt = fid->clnt;
1378 total = 0;
1379 1377
1380 rsize = fid->iounit; 1378 rsize = fid->iounit;
1381 if (!rsize || rsize > clnt->msize-P9_IOHDRSZ) 1379 if (!rsize || rsize > clnt->msize-P9_IOHDRSZ)
@@ -1766,7 +1764,7 @@ EXPORT_SYMBOL_GPL(p9_client_xattrcreate);
1766 1764
1767int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset) 1765int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset)
1768{ 1766{
1769 int err, rsize, total; 1767 int err, rsize;
1770 struct p9_client *clnt; 1768 struct p9_client *clnt;
1771 struct p9_req_t *req; 1769 struct p9_req_t *req;
1772 char *dataptr; 1770 char *dataptr;
@@ -1776,7 +1774,6 @@ int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset)
1776 1774
1777 err = 0; 1775 err = 0;
1778 clnt = fid->clnt; 1776 clnt = fid->clnt;
1779 total = 0;
1780 1777
1781 rsize = fid->iounit; 1778 rsize = fid->iounit;
1782 if (!rsize || rsize > clnt->msize-P9_READDIRHDRSZ) 1779 if (!rsize || rsize > clnt->msize-P9_READDIRHDRSZ)
diff --git a/net/9p/trans_common.c b/net/9p/trans_common.c
index 9172ab78fcb0..d47880e971dd 100644
--- a/net/9p/trans_common.c
+++ b/net/9p/trans_common.c
@@ -36,7 +36,7 @@ p9_release_req_pages(struct trans_rpage_info *rpinfo)
36EXPORT_SYMBOL(p9_release_req_pages); 36EXPORT_SYMBOL(p9_release_req_pages);
37 37
38/** 38/**
39 * p9_nr_pages - Return number of pages needed to accomodate the payload. 39 * p9_nr_pages - Return number of pages needed to accommodate the payload.
40 */ 40 */
41int 41int
42p9_nr_pages(struct p9_req_t *req) 42p9_nr_pages(struct p9_req_t *req)
@@ -55,7 +55,7 @@ EXPORT_SYMBOL(p9_nr_pages);
55 * @req: Request to be sent to server. 55 * @req: Request to be sent to server.
56 * @pdata_off: data offset into the first page after translation (gup). 56 * @pdata_off: data offset into the first page after translation (gup).
57 * @pdata_len: Total length of the IO. gup may not return requested # of pages. 57 * @pdata_len: Total length of the IO. gup may not return requested # of pages.
58 * @nr_pages: number of pages to accomodate the payload 58 * @nr_pages: number of pages to accommodate the payload
59 * @rw: Indicates if the pages are for read or write. 59 * @rw: Indicates if the pages are for read or write.
60 */ 60 */
61int 61int
diff --git a/net/9p/util.c b/net/9p/util.c
index b84619b5ba22..da6af81e59d9 100644
--- a/net/9p/util.c
+++ b/net/9p/util.c
@@ -67,7 +67,7 @@ EXPORT_SYMBOL(p9_idpool_create);
67 67
68/** 68/**
69 * p9_idpool_destroy - create a new per-connection id pool 69 * p9_idpool_destroy - create a new per-connection id pool
70 * @p: idpool to destory 70 * @p: idpool to destroy
71 */ 71 */
72 72
73void p9_idpool_destroy(struct p9_idpool *p) 73void p9_idpool_destroy(struct p9_idpool *p)
diff --git a/net/atm/br2684.c b/net/atm/br2684.c
index fce2eae8d476..2252c2085dac 100644
--- a/net/atm/br2684.c
+++ b/net/atm/br2684.c
@@ -509,7 +509,7 @@ static int br2684_regvcc(struct atm_vcc *atmvcc, void __user * arg)
509 write_lock_irq(&devs_lock); 509 write_lock_irq(&devs_lock);
510 net_dev = br2684_find_dev(&be.ifspec); 510 net_dev = br2684_find_dev(&be.ifspec);
511 if (net_dev == NULL) { 511 if (net_dev == NULL) {
512 pr_err("tried to attach to non-existant device\n"); 512 pr_err("tried to attach to non-existent device\n");
513 err = -ENXIO; 513 err = -ENXIO;
514 goto error; 514 goto error;
515 } 515 }
diff --git a/net/atm/lec.c b/net/atm/lec.c
index 38754fdb88ba..25073b6ef474 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -129,7 +129,6 @@ static struct net_device *dev_lec[MAX_LEC_ITF];
129#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) 129#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
130static void lec_handle_bridge(struct sk_buff *skb, struct net_device *dev) 130static void lec_handle_bridge(struct sk_buff *skb, struct net_device *dev)
131{ 131{
132 struct ethhdr *eth;
133 char *buff; 132 char *buff;
134 struct lec_priv *priv; 133 struct lec_priv *priv;
135 134
@@ -138,7 +137,6 @@ static void lec_handle_bridge(struct sk_buff *skb, struct net_device *dev)
138 * LE_TOPOLOGY_REQUEST with the same value of Topology Change bit 137 * LE_TOPOLOGY_REQUEST with the same value of Topology Change bit
139 * as the Config BPDU has 138 * as the Config BPDU has
140 */ 139 */
141 eth = (struct ethhdr *)skb->data;
142 buff = skb->data + skb->dev->hard_header_len; 140 buff = skb->data + skb->dev->hard_header_len;
143 if (*buff++ == 0x42 && *buff++ == 0x42 && *buff++ == 0x03) { 141 if (*buff++ == 0x42 && *buff++ == 0x42 && *buff++ == 0x03) {
144 struct sock *sk; 142 struct sock *sk;
@@ -1180,7 +1178,6 @@ static int __init lane_module_init(void)
1180static void __exit lane_module_cleanup(void) 1178static void __exit lane_module_cleanup(void)
1181{ 1179{
1182 int i; 1180 int i;
1183 struct lec_priv *priv;
1184 1181
1185 remove_proc_entry("lec", atm_proc_root); 1182 remove_proc_entry("lec", atm_proc_root);
1186 1183
@@ -1188,7 +1185,6 @@ static void __exit lane_module_cleanup(void)
1188 1185
1189 for (i = 0; i < MAX_LEC_ITF; i++) { 1186 for (i = 0; i < MAX_LEC_ITF; i++) {
1190 if (dev_lec[i] != NULL) { 1187 if (dev_lec[i] != NULL) {
1191 priv = netdev_priv(dev_lec[i]);
1192 unregister_netdev(dev_lec[i]); 1188 unregister_netdev(dev_lec[i]);
1193 free_netdev(dev_lec[i]); 1189 free_netdev(dev_lec[i]);
1194 dev_lec[i] = NULL; 1190 dev_lec[i] = NULL;
diff --git a/net/atm/lec.h b/net/atm/lec.h
index 9d14d196cc1d..dfc071966463 100644
--- a/net/atm/lec.h
+++ b/net/atm/lec.h
@@ -35,7 +35,7 @@ struct lecdatahdr_8025 {
35 * Operations that LANE2 capable device can do. Two first functions 35 * Operations that LANE2 capable device can do. Two first functions
36 * are used to make the device do things. See spec 3.1.3 and 3.1.4. 36 * are used to make the device do things. See spec 3.1.3 and 3.1.4.
37 * 37 *
38 * The third function is intented for the MPOA component sitting on 38 * The third function is intended for the MPOA component sitting on
39 * top of the LANE device. The MPOA component assigns it's own function 39 * top of the LANE device. The MPOA component assigns it's own function
40 * to (*associate_indicator)() and the LANE device will use that 40 * to (*associate_indicator)() and the LANE device will use that
41 * function to tell about TLVs it sees floating through. 41 * function to tell about TLVs it sees floating through.
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 6da5daeebab7..e7c69f4619ec 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -1538,8 +1538,6 @@ static int ax25_sendmsg(struct kiocb *iocb, struct socket *sock,
1538 } 1538 }
1539 1539
1540 /* Build a packet */ 1540 /* Build a packet */
1541 SOCK_DEBUG(sk, "AX.25: sendto: Addresses built. Building packet.\n");
1542
1543 /* Assume the worst case */ 1541 /* Assume the worst case */
1544 size = len + ax25->ax25_dev->dev->hard_header_len; 1542 size = len + ax25->ax25_dev->dev->hard_header_len;
1545 1543
@@ -1549,8 +1547,6 @@ static int ax25_sendmsg(struct kiocb *iocb, struct socket *sock,
1549 1547
1550 skb_reserve(skb, size - len); 1548 skb_reserve(skb, size - len);
1551 1549
1552 SOCK_DEBUG(sk, "AX.25: Appending user data\n");
1553
1554 /* User data follows immediately after the AX.25 data */ 1550 /* User data follows immediately after the AX.25 data */
1555 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) { 1551 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
1556 err = -EFAULT; 1552 err = -EFAULT;
@@ -1564,8 +1560,6 @@ static int ax25_sendmsg(struct kiocb *iocb, struct socket *sock,
1564 if (!ax25->pidincl) 1560 if (!ax25->pidincl)
1565 *skb_push(skb, 1) = sk->sk_protocol; 1561 *skb_push(skb, 1) = sk->sk_protocol;
1566 1562
1567 SOCK_DEBUG(sk, "AX.25: Transmitting buffer\n");
1568
1569 if (sk->sk_type == SOCK_SEQPACKET) { 1563 if (sk->sk_type == SOCK_SEQPACKET) {
1570 /* Connected mode sockets go via the LAPB machine */ 1564 /* Connected mode sockets go via the LAPB machine */
1571 if (sk->sk_state != TCP_ESTABLISHED) { 1565 if (sk->sk_state != TCP_ESTABLISHED) {
@@ -1583,22 +1577,14 @@ static int ax25_sendmsg(struct kiocb *iocb, struct socket *sock,
1583 1577
1584 skb_push(skb, 1 + ax25_addr_size(dp)); 1578 skb_push(skb, 1 + ax25_addr_size(dp));
1585 1579
1586 SOCK_DEBUG(sk, "Building AX.25 Header (dp=%p).\n", dp); 1580 /* Building AX.25 Header */
1587
1588 if (dp != NULL)
1589 SOCK_DEBUG(sk, "Num digipeaters=%d\n", dp->ndigi);
1590 1581
1591 /* Build an AX.25 header */ 1582 /* Build an AX.25 header */
1592 lv = ax25_addr_build(skb->data, &ax25->source_addr, &sax.sax25_call, 1583 lv = ax25_addr_build(skb->data, &ax25->source_addr, &sax.sax25_call,
1593 dp, AX25_COMMAND, AX25_MODULUS); 1584 dp, AX25_COMMAND, AX25_MODULUS);
1594 1585
1595 SOCK_DEBUG(sk, "Built header (%d bytes)\n",lv);
1596
1597 skb_set_transport_header(skb, lv); 1586 skb_set_transport_header(skb, lv);
1598 1587
1599 SOCK_DEBUG(sk, "base=%p pos=%p\n",
1600 skb->data, skb_transport_header(skb));
1601
1602 *skb_transport_header(skb) = AX25_UI; 1588 *skb_transport_header(skb) = AX25_UI;
1603 1589
1604 /* Datagram frames go straight out of the door as UI */ 1590 /* Datagram frames go straight out of the door as UI */
diff --git a/net/ax25/ax25_iface.c b/net/ax25/ax25_iface.c
index 5a0dda8df492..60b545e2822a 100644
--- a/net/ax25/ax25_iface.c
+++ b/net/ax25/ax25_iface.c
@@ -58,7 +58,7 @@ EXPORT_SYMBOL_GPL(ax25_register_pid);
58 58
59void ax25_protocol_release(unsigned int pid) 59void ax25_protocol_release(unsigned int pid)
60{ 60{
61 struct ax25_protocol *s, *protocol; 61 struct ax25_protocol *protocol;
62 62
63 write_lock_bh(&protocol_list_lock); 63 write_lock_bh(&protocol_list_lock);
64 protocol = protocol_list; 64 protocol = protocol_list;
@@ -72,7 +72,6 @@ void ax25_protocol_release(unsigned int pid)
72 72
73 while (protocol != NULL && protocol->next != NULL) { 73 while (protocol != NULL && protocol->next != NULL) {
74 if (protocol->next->pid == pid) { 74 if (protocol->next->pid == pid) {
75 s = protocol->next;
76 protocol->next = protocol->next->next; 75 protocol->next = protocol->next->next;
77 goto out; 76 goto out;
78 } 77 }
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index 3cc43558cf9c..2acd7a666bda 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -23,6 +23,7 @@
23#include "gateway_client.h" 23#include "gateway_client.h"
24#include "gateway_common.h" 24#include "gateway_common.h"
25#include "hard-interface.h" 25#include "hard-interface.h"
26#include "originator.h"
26#include <linux/ip.h> 27#include <linux/ip.h>
27#include <linux/ipv6.h> 28#include <linux/ipv6.h>
28#include <linux/udp.h> 29#include <linux/udp.h>
@@ -42,61 +43,76 @@ static void gw_node_free_ref(struct gw_node *gw_node)
42 call_rcu(&gw_node->rcu, gw_node_free_rcu); 43 call_rcu(&gw_node->rcu, gw_node_free_rcu);
43} 44}
44 45
45void *gw_get_selected(struct bat_priv *bat_priv) 46static struct gw_node *gw_get_selected_gw_node(struct bat_priv *bat_priv)
46{ 47{
47 struct gw_node *curr_gateway_tmp; 48 struct gw_node *gw_node;
48 struct orig_node *orig_node = NULL;
49 49
50 rcu_read_lock(); 50 rcu_read_lock();
51 curr_gateway_tmp = rcu_dereference(bat_priv->curr_gw); 51 gw_node = rcu_dereference(bat_priv->curr_gw);
52 if (!curr_gateway_tmp) 52 if (!gw_node)
53 goto out;
54
55 orig_node = curr_gateway_tmp->orig_node;
56 if (!orig_node)
57 goto out; 53 goto out;
58 54
59 if (!atomic_inc_not_zero(&orig_node->refcount)) 55 if (!atomic_inc_not_zero(&gw_node->refcount))
60 orig_node = NULL; 56 gw_node = NULL;
61 57
62out: 58out:
63 rcu_read_unlock(); 59 rcu_read_unlock();
64 return orig_node; 60 return gw_node;
65} 61}
66 62
67void gw_deselect(struct bat_priv *bat_priv) 63struct orig_node *gw_get_selected_orig(struct bat_priv *bat_priv)
68{ 64{
69 struct gw_node *gw_node; 65 struct gw_node *gw_node;
66 struct orig_node *orig_node = NULL;
70 67
71 spin_lock_bh(&bat_priv->gw_list_lock); 68 gw_node = gw_get_selected_gw_node(bat_priv);
72 gw_node = rcu_dereference(bat_priv->curr_gw); 69 if (!gw_node)
73 rcu_assign_pointer(bat_priv->curr_gw, NULL); 70 goto out;
74 spin_unlock_bh(&bat_priv->gw_list_lock); 71
72 rcu_read_lock();
73 orig_node = gw_node->orig_node;
74 if (!orig_node)
75 goto unlock;
76
77 if (!atomic_inc_not_zero(&orig_node->refcount))
78 orig_node = NULL;
75 79
80unlock:
81 rcu_read_unlock();
82out:
76 if (gw_node) 83 if (gw_node)
77 gw_node_free_ref(gw_node); 84 gw_node_free_ref(gw_node);
85 return orig_node;
78} 86}
79 87
80static void gw_select(struct bat_priv *bat_priv, struct gw_node *new_gw_node) 88static void gw_select(struct bat_priv *bat_priv, struct gw_node *new_gw_node)
81{ 89{
82 struct gw_node *curr_gw_node; 90 struct gw_node *curr_gw_node;
83 91
92 spin_lock_bh(&bat_priv->gw_list_lock);
93
84 if (new_gw_node && !atomic_inc_not_zero(&new_gw_node->refcount)) 94 if (new_gw_node && !atomic_inc_not_zero(&new_gw_node->refcount))
85 new_gw_node = NULL; 95 new_gw_node = NULL;
86 96
87 spin_lock_bh(&bat_priv->gw_list_lock); 97 curr_gw_node = bat_priv->curr_gw;
88 curr_gw_node = rcu_dereference(bat_priv->curr_gw);
89 rcu_assign_pointer(bat_priv->curr_gw, new_gw_node); 98 rcu_assign_pointer(bat_priv->curr_gw, new_gw_node);
90 spin_unlock_bh(&bat_priv->gw_list_lock);
91 99
92 if (curr_gw_node) 100 if (curr_gw_node)
93 gw_node_free_ref(curr_gw_node); 101 gw_node_free_ref(curr_gw_node);
102
103 spin_unlock_bh(&bat_priv->gw_list_lock);
104}
105
106void gw_deselect(struct bat_priv *bat_priv)
107{
108 gw_select(bat_priv, NULL);
94} 109}
95 110
96void gw_election(struct bat_priv *bat_priv) 111void gw_election(struct bat_priv *bat_priv)
97{ 112{
98 struct hlist_node *node; 113 struct hlist_node *node;
99 struct gw_node *gw_node, *curr_gw, *curr_gw_tmp = NULL; 114 struct gw_node *gw_node, *curr_gw = NULL, *curr_gw_tmp = NULL;
115 struct neigh_node *router;
100 uint8_t max_tq = 0; 116 uint8_t max_tq = 0;
101 uint32_t max_gw_factor = 0, tmp_gw_factor = 0; 117 uint32_t max_gw_factor = 0, tmp_gw_factor = 0;
102 int down, up; 118 int down, up;
@@ -110,32 +126,25 @@ void gw_election(struct bat_priv *bat_priv)
110 if (atomic_read(&bat_priv->gw_mode) != GW_MODE_CLIENT) 126 if (atomic_read(&bat_priv->gw_mode) != GW_MODE_CLIENT)
111 return; 127 return;
112 128
113 rcu_read_lock(); 129 curr_gw = gw_get_selected_gw_node(bat_priv);
114 curr_gw = rcu_dereference(bat_priv->curr_gw); 130 if (!curr_gw)
115 if (curr_gw) { 131 goto out;
116 rcu_read_unlock();
117 return;
118 }
119 132
133 rcu_read_lock();
120 if (hlist_empty(&bat_priv->gw_list)) { 134 if (hlist_empty(&bat_priv->gw_list)) {
121 135 bat_dbg(DBG_BATMAN, bat_priv,
122 if (curr_gw) { 136 "Removing selected gateway - "
123 rcu_read_unlock(); 137 "no gateway in range\n");
124 bat_dbg(DBG_BATMAN, bat_priv, 138 gw_deselect(bat_priv);
125 "Removing selected gateway - " 139 goto unlock;
126 "no gateway in range\n");
127 gw_deselect(bat_priv);
128 } else
129 rcu_read_unlock();
130
131 return;
132 } 140 }
133 141
134 hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) { 142 hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) {
135 if (!gw_node->orig_node->router) 143 if (gw_node->deleted)
136 continue; 144 continue;
137 145
138 if (gw_node->deleted) 146 router = orig_node_get_router(gw_node->orig_node);
147 if (!router)
139 continue; 148 continue;
140 149
141 switch (atomic_read(&bat_priv->gw_sel_class)) { 150 switch (atomic_read(&bat_priv->gw_sel_class)) {
@@ -143,15 +152,14 @@ void gw_election(struct bat_priv *bat_priv)
143 gw_bandwidth_to_kbit(gw_node->orig_node->gw_flags, 152 gw_bandwidth_to_kbit(gw_node->orig_node->gw_flags,
144 &down, &up); 153 &down, &up);
145 154
146 tmp_gw_factor = (gw_node->orig_node->router->tq_avg * 155 tmp_gw_factor = (router->tq_avg * router->tq_avg *
147 gw_node->orig_node->router->tq_avg *
148 down * 100 * 100) / 156 down * 100 * 100) /
149 (TQ_LOCAL_WINDOW_SIZE * 157 (TQ_LOCAL_WINDOW_SIZE *
150 TQ_LOCAL_WINDOW_SIZE * 64); 158 TQ_LOCAL_WINDOW_SIZE * 64);
151 159
152 if ((tmp_gw_factor > max_gw_factor) || 160 if ((tmp_gw_factor > max_gw_factor) ||
153 ((tmp_gw_factor == max_gw_factor) && 161 ((tmp_gw_factor == max_gw_factor) &&
154 (gw_node->orig_node->router->tq_avg > max_tq))) 162 (router->tq_avg > max_tq)))
155 curr_gw_tmp = gw_node; 163 curr_gw_tmp = gw_node;
156 break; 164 break;
157 165
@@ -163,19 +171,25 @@ void gw_election(struct bat_priv *bat_priv)
163 * soon as a better gateway appears which has 171 * soon as a better gateway appears which has
164 * $routing_class more tq points) 172 * $routing_class more tq points)
165 **/ 173 **/
166 if (gw_node->orig_node->router->tq_avg > max_tq) 174 if (router->tq_avg > max_tq)
167 curr_gw_tmp = gw_node; 175 curr_gw_tmp = gw_node;
168 break; 176 break;
169 } 177 }
170 178
171 if (gw_node->orig_node->router->tq_avg > max_tq) 179 if (router->tq_avg > max_tq)
172 max_tq = gw_node->orig_node->router->tq_avg; 180 max_tq = router->tq_avg;
173 181
174 if (tmp_gw_factor > max_gw_factor) 182 if (tmp_gw_factor > max_gw_factor)
175 max_gw_factor = tmp_gw_factor; 183 max_gw_factor = tmp_gw_factor;
184
185 neigh_node_free_ref(router);
176 } 186 }
177 187
178 if (curr_gw != curr_gw_tmp) { 188 if (curr_gw != curr_gw_tmp) {
189 router = orig_node_get_router(curr_gw_tmp->orig_node);
190 if (!router)
191 goto unlock;
192
179 if ((curr_gw) && (!curr_gw_tmp)) 193 if ((curr_gw) && (!curr_gw_tmp))
180 bat_dbg(DBG_BATMAN, bat_priv, 194 bat_dbg(DBG_BATMAN, bat_priv,
181 "Removing selected gateway - " 195 "Removing selected gateway - "
@@ -186,48 +200,50 @@ void gw_election(struct bat_priv *bat_priv)
186 "(gw_flags: %i, tq: %i)\n", 200 "(gw_flags: %i, tq: %i)\n",
187 curr_gw_tmp->orig_node->orig, 201 curr_gw_tmp->orig_node->orig,
188 curr_gw_tmp->orig_node->gw_flags, 202 curr_gw_tmp->orig_node->gw_flags,
189 curr_gw_tmp->orig_node->router->tq_avg); 203 router->tq_avg);
190 else 204 else
191 bat_dbg(DBG_BATMAN, bat_priv, 205 bat_dbg(DBG_BATMAN, bat_priv,
192 "Changing route to gateway %pM " 206 "Changing route to gateway %pM "
193 "(gw_flags: %i, tq: %i)\n", 207 "(gw_flags: %i, tq: %i)\n",
194 curr_gw_tmp->orig_node->orig, 208 curr_gw_tmp->orig_node->orig,
195 curr_gw_tmp->orig_node->gw_flags, 209 curr_gw_tmp->orig_node->gw_flags,
196 curr_gw_tmp->orig_node->router->tq_avg); 210 router->tq_avg);
197 211
212 neigh_node_free_ref(router);
198 gw_select(bat_priv, curr_gw_tmp); 213 gw_select(bat_priv, curr_gw_tmp);
199 } 214 }
200 215
216unlock:
201 rcu_read_unlock(); 217 rcu_read_unlock();
218out:
219 if (curr_gw)
220 gw_node_free_ref(curr_gw);
202} 221}
203 222
204void gw_check_election(struct bat_priv *bat_priv, struct orig_node *orig_node) 223void gw_check_election(struct bat_priv *bat_priv, struct orig_node *orig_node)
205{ 224{
206 struct gw_node *curr_gateway_tmp; 225 struct orig_node *curr_gw_orig;
226 struct neigh_node *router_gw = NULL, *router_orig = NULL;
207 uint8_t gw_tq_avg, orig_tq_avg; 227 uint8_t gw_tq_avg, orig_tq_avg;
208 228
209 rcu_read_lock(); 229 curr_gw_orig = gw_get_selected_orig(bat_priv);
210 curr_gateway_tmp = rcu_dereference(bat_priv->curr_gw); 230 if (!curr_gw_orig)
211 if (!curr_gateway_tmp) 231 goto deselect;
212 goto out_rcu;
213 232
214 if (!curr_gateway_tmp->orig_node) 233 router_gw = orig_node_get_router(curr_gw_orig);
215 goto deselect_rcu; 234 if (!router_gw)
216 235 goto deselect;
217 if (!curr_gateway_tmp->orig_node->router)
218 goto deselect_rcu;
219 236
220 /* this node already is the gateway */ 237 /* this node already is the gateway */
221 if (curr_gateway_tmp->orig_node == orig_node) 238 if (curr_gw_orig == orig_node)
222 goto out_rcu; 239 goto out;
223
224 if (!orig_node->router)
225 goto out_rcu;
226 240
227 gw_tq_avg = curr_gateway_tmp->orig_node->router->tq_avg; 241 router_orig = orig_node_get_router(orig_node);
228 rcu_read_unlock(); 242 if (!router_orig)
243 goto out;
229 244
230 orig_tq_avg = orig_node->router->tq_avg; 245 gw_tq_avg = router_gw->tq_avg;
246 orig_tq_avg = router_orig->tq_avg;
231 247
232 /* the TQ value has to be better */ 248 /* the TQ value has to be better */
233 if (orig_tq_avg < gw_tq_avg) 249 if (orig_tq_avg < gw_tq_avg)
@@ -245,16 +261,17 @@ void gw_check_election(struct bat_priv *bat_priv, struct orig_node *orig_node)
245 "Restarting gateway selection: better gateway found (tq curr: " 261 "Restarting gateway selection: better gateway found (tq curr: "
246 "%i, tq new: %i)\n", 262 "%i, tq new: %i)\n",
247 gw_tq_avg, orig_tq_avg); 263 gw_tq_avg, orig_tq_avg);
248 goto deselect;
249 264
250out_rcu:
251 rcu_read_unlock();
252 goto out;
253deselect_rcu:
254 rcu_read_unlock();
255deselect: 265deselect:
256 gw_deselect(bat_priv); 266 gw_deselect(bat_priv);
257out: 267out:
268 if (curr_gw_orig)
269 orig_node_free_ref(curr_gw_orig);
270 if (router_gw)
271 neigh_node_free_ref(router_gw);
272 if (router_orig)
273 neigh_node_free_ref(router_orig);
274
258 return; 275 return;
259} 276}
260 277
@@ -291,7 +308,11 @@ void gw_node_update(struct bat_priv *bat_priv,
291 struct orig_node *orig_node, uint8_t new_gwflags) 308 struct orig_node *orig_node, uint8_t new_gwflags)
292{ 309{
293 struct hlist_node *node; 310 struct hlist_node *node;
294 struct gw_node *gw_node; 311 struct gw_node *gw_node, *curr_gw;
312
313 curr_gw = gw_get_selected_gw_node(bat_priv);
314 if (!curr_gw)
315 goto out;
295 316
296 rcu_read_lock(); 317 rcu_read_lock();
297 hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) { 318 hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) {
@@ -312,22 +333,26 @@ void gw_node_update(struct bat_priv *bat_priv,
312 "Gateway %pM removed from gateway list\n", 333 "Gateway %pM removed from gateway list\n",
313 orig_node->orig); 334 orig_node->orig);
314 335
315 if (gw_node == rcu_dereference(bat_priv->curr_gw)) { 336 if (gw_node == curr_gw)
316 rcu_read_unlock(); 337 goto deselect;
317 gw_deselect(bat_priv);
318 return;
319 }
320 } 338 }
321 339
322 rcu_read_unlock(); 340 goto unlock;
323 return;
324 } 341 }
325 rcu_read_unlock();
326 342
327 if (new_gwflags == 0) 343 if (new_gwflags == 0)
328 return; 344 goto unlock;
329 345
330 gw_node_add(bat_priv, orig_node, new_gwflags); 346 gw_node_add(bat_priv, orig_node, new_gwflags);
347 goto unlock;
348
349deselect:
350 gw_deselect(bat_priv);
351unlock:
352 rcu_read_unlock();
353out:
354 if (curr_gw)
355 gw_node_free_ref(curr_gw);
331} 356}
332 357
333void gw_node_delete(struct bat_priv *bat_priv, struct orig_node *orig_node) 358void gw_node_delete(struct bat_priv *bat_priv, struct orig_node *orig_node)
@@ -337,9 +362,12 @@ void gw_node_delete(struct bat_priv *bat_priv, struct orig_node *orig_node)
337 362
338void gw_node_purge(struct bat_priv *bat_priv) 363void gw_node_purge(struct bat_priv *bat_priv)
339{ 364{
340 struct gw_node *gw_node; 365 struct gw_node *gw_node, *curr_gw;
341 struct hlist_node *node, *node_tmp; 366 struct hlist_node *node, *node_tmp;
342 unsigned long timeout = 2 * PURGE_TIMEOUT * HZ; 367 unsigned long timeout = 2 * PURGE_TIMEOUT * HZ;
368 char do_deselect = 0;
369
370 curr_gw = gw_get_selected_gw_node(bat_priv);
343 371
344 spin_lock_bh(&bat_priv->gw_list_lock); 372 spin_lock_bh(&bat_priv->gw_list_lock);
345 373
@@ -350,41 +378,56 @@ void gw_node_purge(struct bat_priv *bat_priv)
350 atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE) 378 atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE)
351 continue; 379 continue;
352 380
353 if (rcu_dereference(bat_priv->curr_gw) == gw_node) 381 if (curr_gw == gw_node)
354 gw_deselect(bat_priv); 382 do_deselect = 1;
355 383
356 hlist_del_rcu(&gw_node->list); 384 hlist_del_rcu(&gw_node->list);
357 gw_node_free_ref(gw_node); 385 gw_node_free_ref(gw_node);
358 } 386 }
359 387
360
361 spin_unlock_bh(&bat_priv->gw_list_lock); 388 spin_unlock_bh(&bat_priv->gw_list_lock);
389
390 /* gw_deselect() needs to acquire the gw_list_lock */
391 if (do_deselect)
392 gw_deselect(bat_priv);
393
394 if (curr_gw)
395 gw_node_free_ref(curr_gw);
362} 396}
363 397
398/**
399 * fails if orig_node has no router
400 */
364static int _write_buffer_text(struct bat_priv *bat_priv, 401static int _write_buffer_text(struct bat_priv *bat_priv,
365 struct seq_file *seq, struct gw_node *gw_node) 402 struct seq_file *seq, struct gw_node *gw_node)
366{ 403{
367 struct gw_node *curr_gw; 404 struct gw_node *curr_gw;
368 int down, up, ret; 405 struct neigh_node *router;
406 int down, up, ret = -1;
369 407
370 gw_bandwidth_to_kbit(gw_node->orig_node->gw_flags, &down, &up); 408 gw_bandwidth_to_kbit(gw_node->orig_node->gw_flags, &down, &up);
371 409
372 rcu_read_lock(); 410 router = orig_node_get_router(gw_node->orig_node);
373 curr_gw = rcu_dereference(bat_priv->curr_gw); 411 if (!router)
412 goto out;
374 413
375 ret = seq_printf(seq, "%s %pM (%3i) %pM [%10s]: %3i - %i%s/%i%s\n", 414 curr_gw = gw_get_selected_gw_node(bat_priv);
376 (curr_gw == gw_node ? "=>" : " "),
377 gw_node->orig_node->orig,
378 gw_node->orig_node->router->tq_avg,
379 gw_node->orig_node->router->addr,
380 gw_node->orig_node->router->if_incoming->net_dev->name,
381 gw_node->orig_node->gw_flags,
382 (down > 2048 ? down / 1024 : down),
383 (down > 2048 ? "MBit" : "KBit"),
384 (up > 2048 ? up / 1024 : up),
385 (up > 2048 ? "MBit" : "KBit"));
386 415
387 rcu_read_unlock(); 416 ret = seq_printf(seq, "%s %pM (%3i) %pM [%10s]: %3i - %i%s/%i%s\n",
417 (curr_gw == gw_node ? "=>" : " "),
418 gw_node->orig_node->orig,
419 router->tq_avg, router->addr,
420 router->if_incoming->net_dev->name,
421 gw_node->orig_node->gw_flags,
422 (down > 2048 ? down / 1024 : down),
423 (down > 2048 ? "MBit" : "KBit"),
424 (up > 2048 ? up / 1024 : up),
425 (up > 2048 ? "MBit" : "KBit"));
426
427 neigh_node_free_ref(router);
428 if (curr_gw)
429 gw_node_free_ref(curr_gw);
430out:
388 return ret; 431 return ret;
389} 432}
390 433
@@ -422,10 +465,10 @@ int gw_client_seq_print_text(struct seq_file *seq, void *offset)
422 if (gw_node->deleted) 465 if (gw_node->deleted)
423 continue; 466 continue;
424 467
425 if (!gw_node->orig_node->router) 468 /* fails if orig_node has no router */
469 if (_write_buffer_text(bat_priv, seq, gw_node) < 0)
426 continue; 470 continue;
427 471
428 _write_buffer_text(bat_priv, seq, gw_node);
429 gw_count++; 472 gw_count++;
430 } 473 }
431 rcu_read_unlock(); 474 rcu_read_unlock();
@@ -442,6 +485,7 @@ int gw_is_target(struct bat_priv *bat_priv, struct sk_buff *skb)
442 struct iphdr *iphdr; 485 struct iphdr *iphdr;
443 struct ipv6hdr *ipv6hdr; 486 struct ipv6hdr *ipv6hdr;
444 struct udphdr *udphdr; 487 struct udphdr *udphdr;
488 struct gw_node *curr_gw;
445 unsigned int header_len = 0; 489 unsigned int header_len = 0;
446 490
447 if (atomic_read(&bat_priv->gw_mode) == GW_MODE_OFF) 491 if (atomic_read(&bat_priv->gw_mode) == GW_MODE_OFF)
@@ -506,12 +550,11 @@ int gw_is_target(struct bat_priv *bat_priv, struct sk_buff *skb)
506 if (atomic_read(&bat_priv->gw_mode) == GW_MODE_SERVER) 550 if (atomic_read(&bat_priv->gw_mode) == GW_MODE_SERVER)
507 return -1; 551 return -1;
508 552
509 rcu_read_lock(); 553 curr_gw = gw_get_selected_gw_node(bat_priv);
510 if (!rcu_dereference(bat_priv->curr_gw)) { 554 if (!curr_gw)
511 rcu_read_unlock();
512 return 0; 555 return 0;
513 }
514 rcu_read_unlock();
515 556
557 if (curr_gw)
558 gw_node_free_ref(curr_gw);
516 return 1; 559 return 1;
517} 560}
diff --git a/net/batman-adv/gateway_client.h b/net/batman-adv/gateway_client.h
index 2aa439124ee3..1ce8c6066da1 100644
--- a/net/batman-adv/gateway_client.h
+++ b/net/batman-adv/gateway_client.h
@@ -24,7 +24,7 @@
24 24
25void gw_deselect(struct bat_priv *bat_priv); 25void gw_deselect(struct bat_priv *bat_priv);
26void gw_election(struct bat_priv *bat_priv); 26void gw_election(struct bat_priv *bat_priv);
27void *gw_get_selected(struct bat_priv *bat_priv); 27struct orig_node *gw_get_selected_orig(struct bat_priv *bat_priv);
28void gw_check_election(struct bat_priv *bat_priv, struct orig_node *orig_node); 28void gw_check_election(struct bat_priv *bat_priv, struct orig_node *orig_node);
29void gw_node_update(struct bat_priv *bat_priv, 29void gw_node_update(struct bat_priv *bat_priv,
30 struct orig_node *orig_node, uint8_t new_gwflags); 30 struct orig_node *orig_node, uint8_t new_gwflags);
diff --git a/net/batman-adv/icmp_socket.c b/net/batman-adv/icmp_socket.c
index 34ce56c358e5..49079c254476 100644
--- a/net/batman-adv/icmp_socket.c
+++ b/net/batman-adv/icmp_socket.c
@@ -218,23 +218,13 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff,
218 if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE) 218 if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE)
219 goto dst_unreach; 219 goto dst_unreach;
220 220
221 rcu_read_lock();
222 orig_node = orig_hash_find(bat_priv, icmp_packet->dst); 221 orig_node = orig_hash_find(bat_priv, icmp_packet->dst);
223
224 if (!orig_node) 222 if (!orig_node)
225 goto unlock; 223 goto dst_unreach;
226
227 neigh_node = orig_node->router;
228 224
225 neigh_node = orig_node_get_router(orig_node);
229 if (!neigh_node) 226 if (!neigh_node)
230 goto unlock; 227 goto dst_unreach;
231
232 if (!atomic_inc_not_zero(&neigh_node->refcount)) {
233 neigh_node = NULL;
234 goto unlock;
235 }
236
237 rcu_read_unlock();
238 228
239 if (!neigh_node->if_incoming) 229 if (!neigh_node->if_incoming)
240 goto dst_unreach; 230 goto dst_unreach;
@@ -252,8 +242,6 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff,
252 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); 242 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
253 goto out; 243 goto out;
254 244
255unlock:
256 rcu_read_unlock();
257dst_unreach: 245dst_unreach:
258 icmp_packet->msg_type = DESTINATION_UNREACHABLE; 246 icmp_packet->msg_type = DESTINATION_UNREACHABLE;
259 bat_socket_add_packet(socket_client, icmp_packet, packet_len); 247 bat_socket_add_packet(socket_client, icmp_packet, packet_len);
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index 0b9133022d2d..5b8fe32043da 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -70,6 +70,21 @@ void neigh_node_free_ref(struct neigh_node *neigh_node)
70 call_rcu(&neigh_node->rcu, neigh_node_free_rcu); 70 call_rcu(&neigh_node->rcu, neigh_node_free_rcu);
71} 71}
72 72
73/* increases the refcounter of a found router */
74struct neigh_node *orig_node_get_router(struct orig_node *orig_node)
75{
76 struct neigh_node *router;
77
78 rcu_read_lock();
79 router = rcu_dereference(orig_node->router);
80
81 if (router && !atomic_inc_not_zero(&router->refcount))
82 router = NULL;
83
84 rcu_read_unlock();
85 return router;
86}
87
73struct neigh_node *create_neighbor(struct orig_node *orig_node, 88struct neigh_node *create_neighbor(struct orig_node *orig_node,
74 struct orig_node *orig_neigh_node, 89 struct orig_node *orig_neigh_node,
75 uint8_t *neigh, 90 uint8_t *neigh,
@@ -87,6 +102,7 @@ struct neigh_node *create_neighbor(struct orig_node *orig_node,
87 102
88 INIT_HLIST_NODE(&neigh_node->list); 103 INIT_HLIST_NODE(&neigh_node->list);
89 INIT_LIST_HEAD(&neigh_node->bonding_list); 104 INIT_LIST_HEAD(&neigh_node->bonding_list);
105 spin_lock_init(&neigh_node->tq_lock);
90 106
91 memcpy(neigh_node->addr, neigh, ETH_ALEN); 107 memcpy(neigh_node->addr, neigh, ETH_ALEN);
92 neigh_node->orig_node = orig_neigh_node; 108 neigh_node->orig_node = orig_neigh_node;
@@ -390,7 +406,7 @@ int orig_seq_print_text(struct seq_file *seq, void *offset)
390 struct hlist_node *node, *node_tmp; 406 struct hlist_node *node, *node_tmp;
391 struct hlist_head *head; 407 struct hlist_head *head;
392 struct orig_node *orig_node; 408 struct orig_node *orig_node;
393 struct neigh_node *neigh_node; 409 struct neigh_node *neigh_node, *neigh_node_tmp;
394 int batman_count = 0; 410 int batman_count = 0;
395 int last_seen_secs; 411 int last_seen_secs;
396 int last_seen_msecs; 412 int last_seen_msecs;
@@ -421,37 +437,41 @@ int orig_seq_print_text(struct seq_file *seq, void *offset)
421 437
422 rcu_read_lock(); 438 rcu_read_lock();
423 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { 439 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
424 if (!orig_node->router) 440 neigh_node = orig_node_get_router(orig_node);
441 if (!neigh_node)
425 continue; 442 continue;
426 443
427 if (orig_node->router->tq_avg == 0) 444 if (neigh_node->tq_avg == 0)
428 continue; 445 goto next;
429 446
430 last_seen_secs = jiffies_to_msecs(jiffies - 447 last_seen_secs = jiffies_to_msecs(jiffies -
431 orig_node->last_valid) / 1000; 448 orig_node->last_valid) / 1000;
432 last_seen_msecs = jiffies_to_msecs(jiffies - 449 last_seen_msecs = jiffies_to_msecs(jiffies -
433 orig_node->last_valid) % 1000; 450 orig_node->last_valid) % 1000;
434 451
435 neigh_node = orig_node->router;
436 seq_printf(seq, "%pM %4i.%03is (%3i) %pM [%10s]:", 452 seq_printf(seq, "%pM %4i.%03is (%3i) %pM [%10s]:",
437 orig_node->orig, last_seen_secs, 453 orig_node->orig, last_seen_secs,
438 last_seen_msecs, neigh_node->tq_avg, 454 last_seen_msecs, neigh_node->tq_avg,
439 neigh_node->addr, 455 neigh_node->addr,
440 neigh_node->if_incoming->net_dev->name); 456 neigh_node->if_incoming->net_dev->name);
441 457
442 hlist_for_each_entry_rcu(neigh_node, node_tmp, 458 hlist_for_each_entry_rcu(neigh_node_tmp, node_tmp,
443 &orig_node->neigh_list, list) { 459 &orig_node->neigh_list, list) {
444 seq_printf(seq, " %pM (%3i)", neigh_node->addr, 460 seq_printf(seq, " %pM (%3i)",
445 neigh_node->tq_avg); 461 neigh_node_tmp->addr,
462 neigh_node_tmp->tq_avg);
446 } 463 }
447 464
448 seq_printf(seq, "\n"); 465 seq_printf(seq, "\n");
449 batman_count++; 466 batman_count++;
467
468next:
469 neigh_node_free_ref(neigh_node);
450 } 470 }
451 rcu_read_unlock(); 471 rcu_read_unlock();
452 } 472 }
453 473
454 if ((batman_count == 0)) 474 if (batman_count == 0)
455 seq_printf(seq, "No batman nodes in range ...\n"); 475 seq_printf(seq, "No batman nodes in range ...\n");
456 476
457 return 0; 477 return 0;
diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h
index 5cc011057da1..e1d641f27aa9 100644
--- a/net/batman-adv/originator.h
+++ b/net/batman-adv/originator.h
@@ -34,6 +34,7 @@ struct neigh_node *create_neighbor(struct orig_node *orig_node,
34 uint8_t *neigh, 34 uint8_t *neigh,
35 struct hard_iface *if_incoming); 35 struct hard_iface *if_incoming);
36void neigh_node_free_ref(struct neigh_node *neigh_node); 36void neigh_node_free_ref(struct neigh_node *neigh_node);
37struct neigh_node *orig_node_get_router(struct orig_node *orig_node);
37int orig_seq_print_text(struct seq_file *seq, void *offset); 38int orig_seq_print_text(struct seq_file *seq, void *offset);
38int orig_hash_add_if(struct hard_iface *hard_iface, int max_if_num); 39int orig_hash_add_if(struct hard_iface *hard_iface, int max_if_num);
39int orig_hash_del_if(struct hard_iface *hard_iface, int max_if_num); 40int orig_hash_del_if(struct hard_iface *hard_iface, int max_if_num);
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index c172f5d0e05a..f6c642246972 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -87,18 +87,20 @@ static void update_route(struct bat_priv *bat_priv,
87 struct neigh_node *neigh_node, 87 struct neigh_node *neigh_node,
88 unsigned char *hna_buff, int hna_buff_len) 88 unsigned char *hna_buff, int hna_buff_len)
89{ 89{
90 struct neigh_node *neigh_node_tmp; 90 struct neigh_node *curr_router;
91
92 curr_router = orig_node_get_router(orig_node);
91 93
92 /* route deleted */ 94 /* route deleted */
93 if ((orig_node->router) && (!neigh_node)) { 95 if ((curr_router) && (!neigh_node)) {
94 96
95 bat_dbg(DBG_ROUTES, bat_priv, "Deleting route towards: %pM\n", 97 bat_dbg(DBG_ROUTES, bat_priv, "Deleting route towards: %pM\n",
96 orig_node->orig); 98 orig_node->orig);
97 hna_global_del_orig(bat_priv, orig_node, 99 hna_global_del_orig(bat_priv, orig_node,
98 "originator timed out"); 100 "originator timed out");
99 101
100 /* route added */ 102 /* route added */
101 } else if ((!orig_node->router) && (neigh_node)) { 103 } else if ((!curr_router) && (neigh_node)) {
102 104
103 bat_dbg(DBG_ROUTES, bat_priv, 105 bat_dbg(DBG_ROUTES, bat_priv,
104 "Adding route towards: %pM (via %pM)\n", 106 "Adding route towards: %pM (via %pM)\n",
@@ -106,21 +108,29 @@ static void update_route(struct bat_priv *bat_priv,
106 hna_global_add_orig(bat_priv, orig_node, 108 hna_global_add_orig(bat_priv, orig_node,
107 hna_buff, hna_buff_len); 109 hna_buff, hna_buff_len);
108 110
109 /* route changed */ 111 /* route changed */
110 } else { 112 } else {
111 bat_dbg(DBG_ROUTES, bat_priv, 113 bat_dbg(DBG_ROUTES, bat_priv,
112 "Changing route towards: %pM " 114 "Changing route towards: %pM "
113 "(now via %pM - was via %pM)\n", 115 "(now via %pM - was via %pM)\n",
114 orig_node->orig, neigh_node->addr, 116 orig_node->orig, neigh_node->addr,
115 orig_node->router->addr); 117 curr_router->addr);
116 } 118 }
117 119
120 if (curr_router)
121 neigh_node_free_ref(curr_router);
122
123 /* increase refcount of new best neighbor */
118 if (neigh_node && !atomic_inc_not_zero(&neigh_node->refcount)) 124 if (neigh_node && !atomic_inc_not_zero(&neigh_node->refcount))
119 neigh_node = NULL; 125 neigh_node = NULL;
120 neigh_node_tmp = orig_node->router; 126
121 orig_node->router = neigh_node; 127 spin_lock_bh(&orig_node->neigh_list_lock);
122 if (neigh_node_tmp) 128 rcu_assign_pointer(orig_node->router, neigh_node);
123 neigh_node_free_ref(neigh_node_tmp); 129 spin_unlock_bh(&orig_node->neigh_list_lock);
130
131 /* decrease refcount of previous best neighbor */
132 if (curr_router)
133 neigh_node_free_ref(curr_router);
124} 134}
125 135
126 136
@@ -128,16 +138,23 @@ void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node,
128 struct neigh_node *neigh_node, unsigned char *hna_buff, 138 struct neigh_node *neigh_node, unsigned char *hna_buff,
129 int hna_buff_len) 139 int hna_buff_len)
130{ 140{
141 struct neigh_node *router = NULL;
131 142
132 if (!orig_node) 143 if (!orig_node)
133 return; 144 goto out;
145
146 router = orig_node_get_router(orig_node);
134 147
135 if (orig_node->router != neigh_node) 148 if (router != neigh_node)
136 update_route(bat_priv, orig_node, neigh_node, 149 update_route(bat_priv, orig_node, neigh_node,
137 hna_buff, hna_buff_len); 150 hna_buff, hna_buff_len);
138 /* may be just HNA changed */ 151 /* may be just HNA changed */
139 else 152 else
140 update_HNA(bat_priv, orig_node, hna_buff, hna_buff_len); 153 update_HNA(bat_priv, orig_node, hna_buff, hna_buff_len);
154
155out:
156 if (router)
157 neigh_node_free_ref(router);
141} 158}
142 159
143static int is_bidirectional_neigh(struct orig_node *orig_node, 160static int is_bidirectional_neigh(struct orig_node *orig_node,
@@ -288,8 +305,8 @@ static void bonding_candidate_add(struct orig_node *orig_node,
288 struct neigh_node *neigh_node) 305 struct neigh_node *neigh_node)
289{ 306{
290 struct hlist_node *node; 307 struct hlist_node *node;
291 struct neigh_node *tmp_neigh_node; 308 struct neigh_node *tmp_neigh_node, *router = NULL;
292 uint8_t best_tq, interference_candidate = 0; 309 uint8_t interference_candidate = 0;
293 310
294 spin_lock_bh(&orig_node->neigh_list_lock); 311 spin_lock_bh(&orig_node->neigh_list_lock);
295 312
@@ -298,13 +315,12 @@ static void bonding_candidate_add(struct orig_node *orig_node,
298 neigh_node->orig_node->primary_addr)) 315 neigh_node->orig_node->primary_addr))
299 goto candidate_del; 316 goto candidate_del;
300 317
301 if (!orig_node->router) 318 router = orig_node_get_router(orig_node);
319 if (!router)
302 goto candidate_del; 320 goto candidate_del;
303 321
304 best_tq = orig_node->router->tq_avg;
305
306 /* ... and is good enough to be considered */ 322 /* ... and is good enough to be considered */
307 if (neigh_node->tq_avg < best_tq - BONDING_TQ_THRESHOLD) 323 if (neigh_node->tq_avg < router->tq_avg - BONDING_TQ_THRESHOLD)
308 goto candidate_del; 324 goto candidate_del;
309 325
310 /** 326 /**
@@ -350,7 +366,9 @@ candidate_del:
350 366
351out: 367out:
352 spin_unlock_bh(&orig_node->neigh_list_lock); 368 spin_unlock_bh(&orig_node->neigh_list_lock);
353 return; 369
370 if (router)
371 neigh_node_free_ref(router);
354} 372}
355 373
356/* copy primary address for bonding */ 374/* copy primary address for bonding */
@@ -373,6 +391,7 @@ static void update_orig(struct bat_priv *bat_priv,
373 char is_duplicate) 391 char is_duplicate)
374{ 392{
375 struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL; 393 struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
394 struct neigh_node *router = NULL;
376 struct orig_node *orig_node_tmp; 395 struct orig_node *orig_node_tmp;
377 struct hlist_node *node; 396 struct hlist_node *node;
378 int tmp_hna_buff_len; 397 int tmp_hna_buff_len;
@@ -396,10 +415,12 @@ static void update_orig(struct bat_priv *bat_priv,
396 if (is_duplicate) 415 if (is_duplicate)
397 continue; 416 continue;
398 417
418 spin_lock_bh(&tmp_neigh_node->tq_lock);
399 ring_buffer_set(tmp_neigh_node->tq_recv, 419 ring_buffer_set(tmp_neigh_node->tq_recv,
400 &tmp_neigh_node->tq_index, 0); 420 &tmp_neigh_node->tq_index, 0);
401 tmp_neigh_node->tq_avg = 421 tmp_neigh_node->tq_avg =
402 ring_buffer_avg(tmp_neigh_node->tq_recv); 422 ring_buffer_avg(tmp_neigh_node->tq_recv);
423 spin_unlock_bh(&tmp_neigh_node->tq_lock);
403 } 424 }
404 425
405 if (!neigh_node) { 426 if (!neigh_node) {
@@ -424,10 +445,12 @@ static void update_orig(struct bat_priv *bat_priv,
424 orig_node->flags = batman_packet->flags; 445 orig_node->flags = batman_packet->flags;
425 neigh_node->last_valid = jiffies; 446 neigh_node->last_valid = jiffies;
426 447
448 spin_lock_bh(&neigh_node->tq_lock);
427 ring_buffer_set(neigh_node->tq_recv, 449 ring_buffer_set(neigh_node->tq_recv,
428 &neigh_node->tq_index, 450 &neigh_node->tq_index,
429 batman_packet->tq); 451 batman_packet->tq);
430 neigh_node->tq_avg = ring_buffer_avg(neigh_node->tq_recv); 452 neigh_node->tq_avg = ring_buffer_avg(neigh_node->tq_recv);
453 spin_unlock_bh(&neigh_node->tq_lock);
431 454
432 if (!is_duplicate) { 455 if (!is_duplicate) {
433 orig_node->last_ttl = batman_packet->ttl; 456 orig_node->last_ttl = batman_packet->ttl;
@@ -441,19 +464,18 @@ static void update_orig(struct bat_priv *bat_priv,
441 464
442 /* if this neighbor already is our next hop there is nothing 465 /* if this neighbor already is our next hop there is nothing
443 * to change */ 466 * to change */
444 if (orig_node->router == neigh_node) 467 router = orig_node_get_router(orig_node);
468 if (router == neigh_node)
445 goto update_hna; 469 goto update_hna;
446 470
447 /* if this neighbor does not offer a better TQ we won't consider it */ 471 /* if this neighbor does not offer a better TQ we won't consider it */
448 if ((orig_node->router) && 472 if (router && (router->tq_avg > neigh_node->tq_avg))
449 (orig_node->router->tq_avg > neigh_node->tq_avg))
450 goto update_hna; 473 goto update_hna;
451 474
452 /* if the TQ is the same and the link not more symetric we 475 /* if the TQ is the same and the link not more symetric we
453 * won't consider it either */ 476 * won't consider it either */
454 if ((orig_node->router) && 477 if (router && (neigh_node->tq_avg == router->tq_avg)) {
455 (neigh_node->tq_avg == orig_node->router->tq_avg)) { 478 orig_node_tmp = router->orig_node;
456 orig_node_tmp = orig_node->router->orig_node;
457 spin_lock_bh(&orig_node_tmp->ogm_cnt_lock); 479 spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
458 bcast_own_sum_orig = 480 bcast_own_sum_orig =
459 orig_node_tmp->bcast_own_sum[if_incoming->if_num]; 481 orig_node_tmp->bcast_own_sum[if_incoming->if_num];
@@ -474,7 +496,7 @@ static void update_orig(struct bat_priv *bat_priv,
474 goto update_gw; 496 goto update_gw;
475 497
476update_hna: 498update_hna:
477 update_routes(bat_priv, orig_node, orig_node->router, 499 update_routes(bat_priv, orig_node, router,
478 hna_buff, tmp_hna_buff_len); 500 hna_buff, tmp_hna_buff_len);
479 501
480update_gw: 502update_gw:
@@ -496,6 +518,8 @@ unlock:
496out: 518out:
497 if (neigh_node) 519 if (neigh_node)
498 neigh_node_free_ref(neigh_node); 520 neigh_node_free_ref(neigh_node);
521 if (router)
522 neigh_node_free_ref(router);
499} 523}
500 524
501/* checks whether the host restarted and is in the protection time. 525/* checks whether the host restarted and is in the protection time.
@@ -603,6 +627,8 @@ void receive_bat_packet(struct ethhdr *ethhdr,
603 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); 627 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
604 struct hard_iface *hard_iface; 628 struct hard_iface *hard_iface;
605 struct orig_node *orig_neigh_node, *orig_node; 629 struct orig_node *orig_neigh_node, *orig_node;
630 struct neigh_node *router = NULL, *router_router = NULL;
631 struct neigh_node *orig_neigh_router = NULL;
606 char has_directlink_flag; 632 char has_directlink_flag;
607 char is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0; 633 char is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0;
608 char is_broadcast = 0, is_bidirectional, is_single_hop_neigh; 634 char is_broadcast = 0, is_bidirectional, is_single_hop_neigh;
@@ -747,14 +773,15 @@ void receive_bat_packet(struct ethhdr *ethhdr,
747 goto out; 773 goto out;
748 } 774 }
749 775
776 router = orig_node_get_router(orig_node);
777 if (router)
778 router_router = orig_node_get_router(router->orig_node);
779
750 /* avoid temporary routing loops */ 780 /* avoid temporary routing loops */
751 if ((orig_node->router) && 781 if (router && router_router &&
752 (orig_node->router->orig_node->router) && 782 (compare_eth(router->addr, batman_packet->prev_sender)) &&
753 (compare_eth(orig_node->router->addr,
754 batman_packet->prev_sender)) &&
755 !(compare_eth(batman_packet->orig, batman_packet->prev_sender)) && 783 !(compare_eth(batman_packet->orig, batman_packet->prev_sender)) &&
756 (compare_eth(orig_node->router->addr, 784 (compare_eth(router->addr, router_router->addr))) {
757 orig_node->router->orig_node->router->addr))) {
758 bat_dbg(DBG_BATMAN, bat_priv, 785 bat_dbg(DBG_BATMAN, bat_priv,
759 "Drop packet: ignoring all rebroadcast packets that " 786 "Drop packet: ignoring all rebroadcast packets that "
760 "may make me loop (sender: %pM)\n", ethhdr->h_source); 787 "may make me loop (sender: %pM)\n", ethhdr->h_source);
@@ -769,9 +796,11 @@ void receive_bat_packet(struct ethhdr *ethhdr,
769 if (!orig_neigh_node) 796 if (!orig_neigh_node)
770 goto out; 797 goto out;
771 798
799 orig_neigh_router = orig_node_get_router(orig_neigh_node);
800
772 /* drop packet if sender is not a direct neighbor and if we 801 /* drop packet if sender is not a direct neighbor and if we
773 * don't route towards it */ 802 * don't route towards it */
774 if (!is_single_hop_neigh && (!orig_neigh_node->router)) { 803 if (!is_single_hop_neigh && (!orig_neigh_router)) {
775 bat_dbg(DBG_BATMAN, bat_priv, 804 bat_dbg(DBG_BATMAN, bat_priv,
776 "Drop packet: OGM via unknown neighbor!\n"); 805 "Drop packet: OGM via unknown neighbor!\n");
777 goto out_neigh; 806 goto out_neigh;
@@ -825,6 +854,13 @@ out_neigh:
825 if ((orig_neigh_node) && (!is_single_hop_neigh)) 854 if ((orig_neigh_node) && (!is_single_hop_neigh))
826 orig_node_free_ref(orig_neigh_node); 855 orig_node_free_ref(orig_neigh_node);
827out: 856out:
857 if (router)
858 neigh_node_free_ref(router);
859 if (router_router)
860 neigh_node_free_ref(router_router);
861 if (orig_neigh_router)
862 neigh_node_free_ref(orig_neigh_router);
863
828 orig_node_free_ref(orig_node); 864 orig_node_free_ref(orig_node);
829} 865}
830 866
@@ -869,7 +905,7 @@ static int recv_my_icmp_packet(struct bat_priv *bat_priv,
869 struct sk_buff *skb, size_t icmp_len) 905 struct sk_buff *skb, size_t icmp_len)
870{ 906{
871 struct orig_node *orig_node = NULL; 907 struct orig_node *orig_node = NULL;
872 struct neigh_node *neigh_node = NULL; 908 struct neigh_node *router = NULL;
873 struct icmp_packet_rr *icmp_packet; 909 struct icmp_packet_rr *icmp_packet;
874 int ret = NET_RX_DROP; 910 int ret = NET_RX_DROP;
875 911
@@ -886,23 +922,13 @@ static int recv_my_icmp_packet(struct bat_priv *bat_priv,
886 922
887 /* answer echo request (ping) */ 923 /* answer echo request (ping) */
888 /* get routing information */ 924 /* get routing information */
889 rcu_read_lock();
890 orig_node = orig_hash_find(bat_priv, icmp_packet->orig); 925 orig_node = orig_hash_find(bat_priv, icmp_packet->orig);
891
892 if (!orig_node) 926 if (!orig_node)
893 goto unlock; 927 goto out;
894
895 neigh_node = orig_node->router;
896
897 if (!neigh_node)
898 goto unlock;
899
900 if (!atomic_inc_not_zero(&neigh_node->refcount)) {
901 neigh_node = NULL;
902 goto unlock;
903 }
904 928
905 rcu_read_unlock(); 929 router = orig_node_get_router(orig_node);
930 if (!router)
931 goto out;
906 932
907 /* create a copy of the skb, if needed, to modify it. */ 933 /* create a copy of the skb, if needed, to modify it. */
908 if (skb_cow(skb, sizeof(struct ethhdr)) < 0) 934 if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
@@ -916,15 +942,12 @@ static int recv_my_icmp_packet(struct bat_priv *bat_priv,
916 icmp_packet->msg_type = ECHO_REPLY; 942 icmp_packet->msg_type = ECHO_REPLY;
917 icmp_packet->ttl = TTL; 943 icmp_packet->ttl = TTL;
918 944
919 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); 945 send_skb_packet(skb, router->if_incoming, router->addr);
920 ret = NET_RX_SUCCESS; 946 ret = NET_RX_SUCCESS;
921 goto out;
922 947
923unlock:
924 rcu_read_unlock();
925out: 948out:
926 if (neigh_node) 949 if (router)
927 neigh_node_free_ref(neigh_node); 950 neigh_node_free_ref(router);
928 if (orig_node) 951 if (orig_node)
929 orig_node_free_ref(orig_node); 952 orig_node_free_ref(orig_node);
930 return ret; 953 return ret;
@@ -934,7 +957,7 @@ static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv,
934 struct sk_buff *skb) 957 struct sk_buff *skb)
935{ 958{
936 struct orig_node *orig_node = NULL; 959 struct orig_node *orig_node = NULL;
937 struct neigh_node *neigh_node = NULL; 960 struct neigh_node *router = NULL;
938 struct icmp_packet *icmp_packet; 961 struct icmp_packet *icmp_packet;
939 int ret = NET_RX_DROP; 962 int ret = NET_RX_DROP;
940 963
@@ -952,23 +975,13 @@ static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv,
952 goto out; 975 goto out;
953 976
954 /* get routing information */ 977 /* get routing information */
955 rcu_read_lock();
956 orig_node = orig_hash_find(bat_priv, icmp_packet->orig); 978 orig_node = orig_hash_find(bat_priv, icmp_packet->orig);
957
958 if (!orig_node) 979 if (!orig_node)
959 goto unlock; 980 goto out;
960
961 neigh_node = orig_node->router;
962
963 if (!neigh_node)
964 goto unlock;
965
966 if (!atomic_inc_not_zero(&neigh_node->refcount)) {
967 neigh_node = NULL;
968 goto unlock;
969 }
970 981
971 rcu_read_unlock(); 982 router = orig_node_get_router(orig_node);
983 if (!router)
984 goto out;
972 985
973 /* create a copy of the skb, if needed, to modify it. */ 986 /* create a copy of the skb, if needed, to modify it. */
974 if (skb_cow(skb, sizeof(struct ethhdr)) < 0) 987 if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
@@ -982,15 +995,12 @@ static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv,
982 icmp_packet->msg_type = TTL_EXCEEDED; 995 icmp_packet->msg_type = TTL_EXCEEDED;
983 icmp_packet->ttl = TTL; 996 icmp_packet->ttl = TTL;
984 997
985 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); 998 send_skb_packet(skb, router->if_incoming, router->addr);
986 ret = NET_RX_SUCCESS; 999 ret = NET_RX_SUCCESS;
987 goto out;
988 1000
989unlock:
990 rcu_read_unlock();
991out: 1001out:
992 if (neigh_node) 1002 if (router)
993 neigh_node_free_ref(neigh_node); 1003 neigh_node_free_ref(router);
994 if (orig_node) 1004 if (orig_node)
995 orig_node_free_ref(orig_node); 1005 orig_node_free_ref(orig_node);
996 return ret; 1006 return ret;
@@ -1003,7 +1013,7 @@ int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if)
1003 struct icmp_packet_rr *icmp_packet; 1013 struct icmp_packet_rr *icmp_packet;
1004 struct ethhdr *ethhdr; 1014 struct ethhdr *ethhdr;
1005 struct orig_node *orig_node = NULL; 1015 struct orig_node *orig_node = NULL;
1006 struct neigh_node *neigh_node = NULL; 1016 struct neigh_node *router = NULL;
1007 int hdr_size = sizeof(struct icmp_packet); 1017 int hdr_size = sizeof(struct icmp_packet);
1008 int ret = NET_RX_DROP; 1018 int ret = NET_RX_DROP;
1009 1019
@@ -1050,23 +1060,13 @@ int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if)
1050 return recv_icmp_ttl_exceeded(bat_priv, skb); 1060 return recv_icmp_ttl_exceeded(bat_priv, skb);
1051 1061
1052 /* get routing information */ 1062 /* get routing information */
1053 rcu_read_lock();
1054 orig_node = orig_hash_find(bat_priv, icmp_packet->dst); 1063 orig_node = orig_hash_find(bat_priv, icmp_packet->dst);
1055
1056 if (!orig_node) 1064 if (!orig_node)
1057 goto unlock; 1065 goto out;
1058
1059 neigh_node = orig_node->router;
1060
1061 if (!neigh_node)
1062 goto unlock;
1063
1064 if (!atomic_inc_not_zero(&neigh_node->refcount)) {
1065 neigh_node = NULL;
1066 goto unlock;
1067 }
1068 1066
1069 rcu_read_unlock(); 1067 router = orig_node_get_router(orig_node);
1068 if (!router)
1069 goto out;
1070 1070
1071 /* create a copy of the skb, if needed, to modify it. */ 1071 /* create a copy of the skb, if needed, to modify it. */
1072 if (skb_cow(skb, sizeof(struct ethhdr)) < 0) 1072 if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
@@ -1078,20 +1078,117 @@ int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if)
1078 icmp_packet->ttl--; 1078 icmp_packet->ttl--;
1079 1079
1080 /* route it */ 1080 /* route it */
1081 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); 1081 send_skb_packet(skb, router->if_incoming, router->addr);
1082 ret = NET_RX_SUCCESS; 1082 ret = NET_RX_SUCCESS;
1083 goto out;
1084 1083
1085unlock:
1086 rcu_read_unlock();
1087out: 1084out:
1088 if (neigh_node) 1085 if (router)
1089 neigh_node_free_ref(neigh_node); 1086 neigh_node_free_ref(router);
1090 if (orig_node) 1087 if (orig_node)
1091 orig_node_free_ref(orig_node); 1088 orig_node_free_ref(orig_node);
1092 return ret; 1089 return ret;
1093} 1090}
1094 1091
1092/* In the bonding case, send the packets in a round
1093 * robin fashion over the remaining interfaces.
1094 *
1095 * This method rotates the bonding list and increases the
1096 * returned router's refcount. */
1097static struct neigh_node *find_bond_router(struct orig_node *primary_orig,
1098 struct hard_iface *recv_if)
1099{
1100 struct neigh_node *tmp_neigh_node;
1101 struct neigh_node *router = NULL, *first_candidate = NULL;
1102
1103 rcu_read_lock();
1104 list_for_each_entry_rcu(tmp_neigh_node, &primary_orig->bond_list,
1105 bonding_list) {
1106 if (!first_candidate)
1107 first_candidate = tmp_neigh_node;
1108
1109 /* recv_if == NULL on the first node. */
1110 if (tmp_neigh_node->if_incoming == recv_if)
1111 continue;
1112
1113 if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
1114 continue;
1115
1116 router = tmp_neigh_node;
1117 break;
1118 }
1119
1120 /* use the first candidate if nothing was found. */
1121 if (!router && first_candidate &&
1122 atomic_inc_not_zero(&first_candidate->refcount))
1123 router = first_candidate;
1124
1125 if (!router)
1126 goto out;
1127
1128 /* selected should point to the next element
1129 * after the current router */
1130 spin_lock_bh(&primary_orig->neigh_list_lock);
1131 /* this is a list_move(), which unfortunately
1132 * does not exist as rcu version */
1133 list_del_rcu(&primary_orig->bond_list);
1134 list_add_rcu(&primary_orig->bond_list,
1135 &router->bonding_list);
1136 spin_unlock_bh(&primary_orig->neigh_list_lock);
1137
1138out:
1139 rcu_read_unlock();
1140 return router;
1141}
1142
1143/* Interface Alternating: Use the best of the
1144 * remaining candidates which are not using
1145 * this interface.
1146 *
1147 * Increases the returned router's refcount */
1148static struct neigh_node *find_ifalter_router(struct orig_node *primary_orig,
1149 struct hard_iface *recv_if)
1150{
1151 struct neigh_node *tmp_neigh_node;
1152 struct neigh_node *router = NULL, *first_candidate = NULL;
1153
1154 rcu_read_lock();
1155 list_for_each_entry_rcu(tmp_neigh_node, &primary_orig->bond_list,
1156 bonding_list) {
1157 if (!first_candidate)
1158 first_candidate = tmp_neigh_node;
1159
1160 /* recv_if == NULL on the first node. */
1161 if (tmp_neigh_node->if_incoming == recv_if)
1162 continue;
1163
1164 if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
1165 continue;
1166
1167 /* if we don't have a router yet
1168 * or this one is better, choose it. */
1169 if ((!router) ||
1170 (tmp_neigh_node->tq_avg > router->tq_avg)) {
1171 /* decrement refcount of
1172 * previously selected router */
1173 if (router)
1174 neigh_node_free_ref(router);
1175
1176 router = tmp_neigh_node;
1177 atomic_inc_not_zero(&router->refcount);
1178 }
1179
1180 neigh_node_free_ref(tmp_neigh_node);
1181 }
1182
1183 /* use the first candidate if nothing was found. */
1184 if (!router && first_candidate &&
1185 atomic_inc_not_zero(&first_candidate->refcount))
1186 router = first_candidate;
1187
1188 rcu_read_unlock();
1189 return router;
1190}
1191
1095/* find a suitable router for this originator, and use 1192/* find a suitable router for this originator, and use
1096 * bonding if possible. increases the found neighbors 1193 * bonding if possible. increases the found neighbors
1097 * refcount.*/ 1194 * refcount.*/
@@ -1101,14 +1198,15 @@ struct neigh_node *find_router(struct bat_priv *bat_priv,
1101{ 1198{
1102 struct orig_node *primary_orig_node; 1199 struct orig_node *primary_orig_node;
1103 struct orig_node *router_orig; 1200 struct orig_node *router_orig;
1104 struct neigh_node *router, *first_candidate, *tmp_neigh_node; 1201 struct neigh_node *router;
1105 static uint8_t zero_mac[ETH_ALEN] = {0, 0, 0, 0, 0, 0}; 1202 static uint8_t zero_mac[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
1106 int bonding_enabled; 1203 int bonding_enabled;
1107 1204
1108 if (!orig_node) 1205 if (!orig_node)
1109 return NULL; 1206 return NULL;
1110 1207
1111 if (!orig_node->router) 1208 router = orig_node_get_router(orig_node);
1209 if (!router)
1112 return NULL; 1210 return NULL;
1113 1211
1114 /* without bonding, the first node should 1212 /* without bonding, the first node should
@@ -1117,9 +1215,8 @@ struct neigh_node *find_router(struct bat_priv *bat_priv,
1117 1215
1118 rcu_read_lock(); 1216 rcu_read_lock();
1119 /* select default router to output */ 1217 /* select default router to output */
1120 router = orig_node->router; 1218 router_orig = router->orig_node;
1121 router_orig = orig_node->router->orig_node; 1219 if (!router_orig) {
1122 if (!router_orig || !atomic_inc_not_zero(&router->refcount)) {
1123 rcu_read_unlock(); 1220 rcu_read_unlock();
1124 return NULL; 1221 return NULL;
1125 } 1222 }
@@ -1151,88 +1248,17 @@ struct neigh_node *find_router(struct bat_priv *bat_priv,
1151 if (atomic_read(&primary_orig_node->bond_candidates) < 2) 1248 if (atomic_read(&primary_orig_node->bond_candidates) < 2)
1152 goto return_router; 1249 goto return_router;
1153 1250
1154
1155 /* all nodes between should choose a candidate which 1251 /* all nodes between should choose a candidate which
1156 * is is not on the interface where the packet came 1252 * is is not on the interface where the packet came
1157 * in. */ 1253 * in. */
1158 1254
1159 neigh_node_free_ref(router); 1255 neigh_node_free_ref(router);
1160 first_candidate = NULL;
1161 router = NULL;
1162
1163 if (bonding_enabled) {
1164 /* in the bonding case, send the packets in a round
1165 * robin fashion over the remaining interfaces. */
1166
1167 list_for_each_entry_rcu(tmp_neigh_node,
1168 &primary_orig_node->bond_list, bonding_list) {
1169 if (!first_candidate)
1170 first_candidate = tmp_neigh_node;
1171 /* recv_if == NULL on the first node. */
1172 if (tmp_neigh_node->if_incoming != recv_if &&
1173 atomic_inc_not_zero(&tmp_neigh_node->refcount)) {
1174 router = tmp_neigh_node;
1175 break;
1176 }
1177 }
1178
1179 /* use the first candidate if nothing was found. */
1180 if (!router && first_candidate &&
1181 atomic_inc_not_zero(&first_candidate->refcount))
1182 router = first_candidate;
1183
1184 if (!router) {
1185 rcu_read_unlock();
1186 return NULL;
1187 }
1188
1189 /* selected should point to the next element
1190 * after the current router */
1191 spin_lock_bh(&primary_orig_node->neigh_list_lock);
1192 /* this is a list_move(), which unfortunately
1193 * does not exist as rcu version */
1194 list_del_rcu(&primary_orig_node->bond_list);
1195 list_add_rcu(&primary_orig_node->bond_list,
1196 &router->bonding_list);
1197 spin_unlock_bh(&primary_orig_node->neigh_list_lock);
1198
1199 } else {
1200 /* if bonding is disabled, use the best of the
1201 * remaining candidates which are not using
1202 * this interface. */
1203 list_for_each_entry_rcu(tmp_neigh_node,
1204 &primary_orig_node->bond_list, bonding_list) {
1205 if (!first_candidate)
1206 first_candidate = tmp_neigh_node;
1207
1208 /* recv_if == NULL on the first node. */
1209 if (tmp_neigh_node->if_incoming == recv_if)
1210 continue;
1211 1256
1212 if (!atomic_inc_not_zero(&tmp_neigh_node->refcount)) 1257 if (bonding_enabled)
1213 continue; 1258 router = find_bond_router(primary_orig_node, recv_if);
1214 1259 else
1215 /* if we don't have a router yet 1260 router = find_ifalter_router(primary_orig_node, recv_if);
1216 * or this one is better, choose it. */
1217 if ((!router) ||
1218 (tmp_neigh_node->tq_avg > router->tq_avg)) {
1219 /* decrement refcount of
1220 * previously selected router */
1221 if (router)
1222 neigh_node_free_ref(router);
1223
1224 router = tmp_neigh_node;
1225 atomic_inc_not_zero(&router->refcount);
1226 }
1227
1228 neigh_node_free_ref(tmp_neigh_node);
1229 }
1230 1261
1231 /* use the first candidate if nothing was found. */
1232 if (!router && first_candidate &&
1233 atomic_inc_not_zero(&first_candidate->refcount))
1234 router = first_candidate;
1235 }
1236return_router: 1262return_router:
1237 rcu_read_unlock(); 1263 rcu_read_unlock();
1238 return router; 1264 return router;
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index d49e54d932af..e78670c3c4b7 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -308,6 +308,7 @@ void schedule_forward_packet(struct orig_node *orig_node,
308 struct hard_iface *if_incoming) 308 struct hard_iface *if_incoming)
309{ 309{
310 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); 310 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
311 struct neigh_node *router;
311 unsigned char in_tq, in_ttl, tq_avg = 0; 312 unsigned char in_tq, in_ttl, tq_avg = 0;
312 unsigned long send_time; 313 unsigned long send_time;
313 314
@@ -316,6 +317,8 @@ void schedule_forward_packet(struct orig_node *orig_node,
316 return; 317 return;
317 } 318 }
318 319
320 router = orig_node_get_router(orig_node);
321
319 in_tq = batman_packet->tq; 322 in_tq = batman_packet->tq;
320 in_ttl = batman_packet->ttl; 323 in_ttl = batman_packet->ttl;
321 324
@@ -324,20 +327,22 @@ void schedule_forward_packet(struct orig_node *orig_node,
324 327
325 /* rebroadcast tq of our best ranking neighbor to ensure the rebroadcast 328 /* rebroadcast tq of our best ranking neighbor to ensure the rebroadcast
326 * of our best tq value */ 329 * of our best tq value */
327 if ((orig_node->router) && (orig_node->router->tq_avg != 0)) { 330 if (router && router->tq_avg != 0) {
328 331
329 /* rebroadcast ogm of best ranking neighbor as is */ 332 /* rebroadcast ogm of best ranking neighbor as is */
330 if (!compare_eth(orig_node->router->addr, ethhdr->h_source)) { 333 if (!compare_eth(router->addr, ethhdr->h_source)) {
331 batman_packet->tq = orig_node->router->tq_avg; 334 batman_packet->tq = router->tq_avg;
332 335
333 if (orig_node->router->last_ttl) 336 if (router->last_ttl)
334 batman_packet->ttl = orig_node->router->last_ttl 337 batman_packet->ttl = router->last_ttl - 1;
335 - 1;
336 } 338 }
337 339
338 tq_avg = orig_node->router->tq_avg; 340 tq_avg = router->tq_avg;
339 } 341 }
340 342
343 if (router)
344 neigh_node_free_ref(router);
345
341 /* apply hop penalty */ 346 /* apply hop penalty */
342 batman_packet->tq = hop_penalty(batman_packet->tq, bat_priv); 347 batman_packet->tq = hop_penalty(batman_packet->tq, bat_priv);
343 348
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 9ed26140a269..f4d80ad008c2 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -43,8 +43,6 @@ static void bat_get_drvinfo(struct net_device *dev,
43static u32 bat_get_msglevel(struct net_device *dev); 43static u32 bat_get_msglevel(struct net_device *dev);
44static void bat_set_msglevel(struct net_device *dev, u32 value); 44static void bat_set_msglevel(struct net_device *dev, u32 value);
45static u32 bat_get_link(struct net_device *dev); 45static u32 bat_get_link(struct net_device *dev);
46static u32 bat_get_rx_csum(struct net_device *dev);
47static int bat_set_rx_csum(struct net_device *dev, u32 data);
48 46
49static const struct ethtool_ops bat_ethtool_ops = { 47static const struct ethtool_ops bat_ethtool_ops = {
50 .get_settings = bat_get_settings, 48 .get_settings = bat_get_settings,
@@ -52,8 +50,6 @@ static const struct ethtool_ops bat_ethtool_ops = {
52 .get_msglevel = bat_get_msglevel, 50 .get_msglevel = bat_get_msglevel,
53 .set_msglevel = bat_set_msglevel, 51 .set_msglevel = bat_set_msglevel,
54 .get_link = bat_get_link, 52 .get_link = bat_get_link,
55 .get_rx_csum = bat_get_rx_csum,
56 .set_rx_csum = bat_set_rx_csum
57}; 53};
58 54
59int my_skb_head_push(struct sk_buff *skb, unsigned int len) 55int my_skb_head_push(struct sk_buff *skb, unsigned int len)
@@ -90,10 +86,51 @@ static void softif_neigh_free_ref(struct softif_neigh *softif_neigh)
90 call_rcu(&softif_neigh->rcu, softif_neigh_free_rcu); 86 call_rcu(&softif_neigh->rcu, softif_neigh_free_rcu);
91} 87}
92 88
89static struct softif_neigh *softif_neigh_get_selected(struct bat_priv *bat_priv)
90{
91 struct softif_neigh *neigh;
92
93 rcu_read_lock();
94 neigh = rcu_dereference(bat_priv->softif_neigh);
95
96 if (neigh && !atomic_inc_not_zero(&neigh->refcount))
97 neigh = NULL;
98
99 rcu_read_unlock();
100 return neigh;
101}
102
103static void softif_neigh_select(struct bat_priv *bat_priv,
104 struct softif_neigh *new_neigh)
105{
106 struct softif_neigh *curr_neigh;
107
108 spin_lock_bh(&bat_priv->softif_neigh_lock);
109
110 if (new_neigh && !atomic_inc_not_zero(&new_neigh->refcount))
111 new_neigh = NULL;
112
113 curr_neigh = bat_priv->softif_neigh;
114 rcu_assign_pointer(bat_priv->softif_neigh, new_neigh);
115
116 if (curr_neigh)
117 softif_neigh_free_ref(curr_neigh);
118
119 spin_unlock_bh(&bat_priv->softif_neigh_lock);
120}
121
122static void softif_neigh_deselect(struct bat_priv *bat_priv)
123{
124 softif_neigh_select(bat_priv, NULL);
125}
126
93void softif_neigh_purge(struct bat_priv *bat_priv) 127void softif_neigh_purge(struct bat_priv *bat_priv)
94{ 128{
95 struct softif_neigh *softif_neigh, *softif_neigh_tmp; 129 struct softif_neigh *softif_neigh, *curr_softif_neigh;
96 struct hlist_node *node, *node_tmp; 130 struct hlist_node *node, *node_tmp;
131 char do_deselect = 0;
132
133 curr_softif_neigh = softif_neigh_get_selected(bat_priv);
97 134
98 spin_lock_bh(&bat_priv->softif_neigh_lock); 135 spin_lock_bh(&bat_priv->softif_neigh_lock);
99 136
@@ -105,22 +142,26 @@ void softif_neigh_purge(struct bat_priv *bat_priv)
105 (atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE)) 142 (atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE))
106 continue; 143 continue;
107 144
108 hlist_del_rcu(&softif_neigh->list); 145 if (curr_softif_neigh == softif_neigh) {
109
110 if (bat_priv->softif_neigh == softif_neigh) {
111 bat_dbg(DBG_ROUTES, bat_priv, 146 bat_dbg(DBG_ROUTES, bat_priv,
112 "Current mesh exit point '%pM' vanished " 147 "Current mesh exit point '%pM' vanished "
113 "(vid: %d).\n", 148 "(vid: %d).\n",
114 softif_neigh->addr, softif_neigh->vid); 149 softif_neigh->addr, softif_neigh->vid);
115 softif_neigh_tmp = bat_priv->softif_neigh; 150 do_deselect = 1;
116 bat_priv->softif_neigh = NULL;
117 softif_neigh_free_ref(softif_neigh_tmp);
118 } 151 }
119 152
153 hlist_del_rcu(&softif_neigh->list);
120 softif_neigh_free_ref(softif_neigh); 154 softif_neigh_free_ref(softif_neigh);
121 } 155 }
122 156
123 spin_unlock_bh(&bat_priv->softif_neigh_lock); 157 spin_unlock_bh(&bat_priv->softif_neigh_lock);
158
159 /* soft_neigh_deselect() needs to acquire the softif_neigh_lock */
160 if (do_deselect)
161 softif_neigh_deselect(bat_priv);
162
163 if (curr_softif_neigh)
164 softif_neigh_free_ref(curr_softif_neigh);
124} 165}
125 166
126static struct softif_neigh *softif_neigh_get(struct bat_priv *bat_priv, 167static struct softif_neigh *softif_neigh_get(struct bat_priv *bat_priv,
@@ -171,6 +212,7 @@ int softif_neigh_seq_print_text(struct seq_file *seq, void *offset)
171 struct bat_priv *bat_priv = netdev_priv(net_dev); 212 struct bat_priv *bat_priv = netdev_priv(net_dev);
172 struct softif_neigh *softif_neigh; 213 struct softif_neigh *softif_neigh;
173 struct hlist_node *node; 214 struct hlist_node *node;
215 struct softif_neigh *curr_softif_neigh;
174 216
175 if (!bat_priv->primary_if) { 217 if (!bat_priv->primary_if) {
176 return seq_printf(seq, "BATMAN mesh %s disabled - " 218 return seq_printf(seq, "BATMAN mesh %s disabled - "
@@ -180,14 +222,17 @@ int softif_neigh_seq_print_text(struct seq_file *seq, void *offset)
180 222
181 seq_printf(seq, "Softif neighbor list (%s)\n", net_dev->name); 223 seq_printf(seq, "Softif neighbor list (%s)\n", net_dev->name);
182 224
225 curr_softif_neigh = softif_neigh_get_selected(bat_priv);
183 rcu_read_lock(); 226 rcu_read_lock();
184 hlist_for_each_entry_rcu(softif_neigh, node, 227 hlist_for_each_entry_rcu(softif_neigh, node,
185 &bat_priv->softif_neigh_list, list) 228 &bat_priv->softif_neigh_list, list)
186 seq_printf(seq, "%s %pM (vid: %d)\n", 229 seq_printf(seq, "%s %pM (vid: %d)\n",
187 bat_priv->softif_neigh == softif_neigh 230 curr_softif_neigh == softif_neigh
188 ? "=>" : " ", softif_neigh->addr, 231 ? "=>" : " ", softif_neigh->addr,
189 softif_neigh->vid); 232 softif_neigh->vid);
190 rcu_read_unlock(); 233 rcu_read_unlock();
234 if (curr_softif_neigh)
235 softif_neigh_free_ref(curr_softif_neigh);
191 236
192 return 0; 237 return 0;
193} 238}
@@ -198,7 +243,8 @@ static void softif_batman_recv(struct sk_buff *skb, struct net_device *dev,
198 struct bat_priv *bat_priv = netdev_priv(dev); 243 struct bat_priv *bat_priv = netdev_priv(dev);
199 struct ethhdr *ethhdr = (struct ethhdr *)skb->data; 244 struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
200 struct batman_packet *batman_packet; 245 struct batman_packet *batman_packet;
201 struct softif_neigh *softif_neigh, *softif_neigh_tmp; 246 struct softif_neigh *softif_neigh;
247 struct softif_neigh *curr_softif_neigh = NULL;
202 248
203 if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) 249 if (ntohs(ethhdr->h_proto) == ETH_P_8021Q)
204 batman_packet = (struct batman_packet *) 250 batman_packet = (struct batman_packet *)
@@ -223,7 +269,8 @@ static void softif_batman_recv(struct sk_buff *skb, struct net_device *dev,
223 if (!softif_neigh) 269 if (!softif_neigh)
224 goto err; 270 goto err;
225 271
226 if (bat_priv->softif_neigh == softif_neigh) 272 curr_softif_neigh = softif_neigh_get_selected(bat_priv);
273 if (curr_softif_neigh == softif_neigh)
227 goto out; 274 goto out;
228 275
229 /* we got a neighbor but its mac is 'bigger' than ours */ 276 /* we got a neighbor but its mac is 'bigger' than ours */
@@ -232,38 +279,39 @@ static void softif_batman_recv(struct sk_buff *skb, struct net_device *dev,
232 goto out; 279 goto out;
233 280
234 /* switch to new 'smallest neighbor' */ 281 /* switch to new 'smallest neighbor' */
235 if ((bat_priv->softif_neigh) && 282 if ((curr_softif_neigh) &&
236 (memcmp(softif_neigh->addr, bat_priv->softif_neigh->addr, 283 (memcmp(softif_neigh->addr, curr_softif_neigh->addr,
237 ETH_ALEN) < 0)) { 284 ETH_ALEN) < 0)) {
238 bat_dbg(DBG_ROUTES, bat_priv, 285 bat_dbg(DBG_ROUTES, bat_priv,
239 "Changing mesh exit point from %pM (vid: %d) " 286 "Changing mesh exit point from %pM (vid: %d) "
240 "to %pM (vid: %d).\n", 287 "to %pM (vid: %d).\n",
241 bat_priv->softif_neigh->addr, 288 curr_softif_neigh->addr,
242 bat_priv->softif_neigh->vid, 289 curr_softif_neigh->vid,
243 softif_neigh->addr, softif_neigh->vid); 290 softif_neigh->addr, softif_neigh->vid);
244 softif_neigh_tmp = bat_priv->softif_neigh; 291
245 bat_priv->softif_neigh = softif_neigh; 292 softif_neigh_select(bat_priv, softif_neigh);
246 softif_neigh_free_ref(softif_neigh_tmp); 293 goto out;
247 /* we need to hold the additional reference */
248 goto err;
249 } 294 }
250 295
251 /* close own batX device and use softif_neigh as exit node */ 296 /* close own batX device and use softif_neigh as exit node */
252 if ((!bat_priv->softif_neigh) && 297 if ((!curr_softif_neigh) &&
253 (memcmp(softif_neigh->addr, 298 (memcmp(softif_neigh->addr,
254 bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN) < 0)) { 299 bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN) < 0)) {
255 bat_dbg(DBG_ROUTES, bat_priv, 300 bat_dbg(DBG_ROUTES, bat_priv,
256 "Setting mesh exit point to %pM (vid: %d).\n", 301 "Setting mesh exit point to %pM (vid: %d).\n",
257 softif_neigh->addr, softif_neigh->vid); 302 softif_neigh->addr, softif_neigh->vid);
258 bat_priv->softif_neigh = softif_neigh; 303
259 /* we need to hold the additional reference */ 304 softif_neigh_select(bat_priv, softif_neigh);
260 goto err; 305 goto out;
261 } 306 }
262 307
263out: 308out:
264 softif_neigh_free_ref(softif_neigh); 309 softif_neigh_free_ref(softif_neigh);
265err: 310err:
266 kfree_skb(skb); 311 kfree_skb(skb);
312 if (curr_softif_neigh)
313 softif_neigh_free_ref(curr_softif_neigh);
314
267 return; 315 return;
268} 316}
269 317
@@ -321,6 +369,7 @@ int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
321 struct bat_priv *bat_priv = netdev_priv(soft_iface); 369 struct bat_priv *bat_priv = netdev_priv(soft_iface);
322 struct bcast_packet *bcast_packet; 370 struct bcast_packet *bcast_packet;
323 struct vlan_ethhdr *vhdr; 371 struct vlan_ethhdr *vhdr;
372 struct softif_neigh *curr_softif_neigh = NULL;
324 int data_len = skb->len, ret; 373 int data_len = skb->len, ret;
325 short vid = -1; 374 short vid = -1;
326 bool do_bcast = false; 375 bool do_bcast = false;
@@ -348,7 +397,8 @@ int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
348 * if we have a another chosen mesh exit node in range 397 * if we have a another chosen mesh exit node in range
349 * it will transport the packets to the mesh 398 * it will transport the packets to the mesh
350 */ 399 */
351 if ((bat_priv->softif_neigh) && (bat_priv->softif_neigh->vid == vid)) 400 curr_softif_neigh = softif_neigh_get_selected(bat_priv);
401 if ((curr_softif_neigh) && (curr_softif_neigh->vid == vid))
352 goto dropped; 402 goto dropped;
353 403
354 /* TODO: check this for locks */ 404 /* TODO: check this for locks */
@@ -410,6 +460,8 @@ dropped:
410dropped_freed: 460dropped_freed:
411 bat_priv->stats.tx_dropped++; 461 bat_priv->stats.tx_dropped++;
412end: 462end:
463 if (curr_softif_neigh)
464 softif_neigh_free_ref(curr_softif_neigh);
413 return NETDEV_TX_OK; 465 return NETDEV_TX_OK;
414} 466}
415 467
@@ -421,6 +473,7 @@ void interface_rx(struct net_device *soft_iface,
421 struct unicast_packet *unicast_packet; 473 struct unicast_packet *unicast_packet;
422 struct ethhdr *ethhdr; 474 struct ethhdr *ethhdr;
423 struct vlan_ethhdr *vhdr; 475 struct vlan_ethhdr *vhdr;
476 struct softif_neigh *curr_softif_neigh = NULL;
424 short vid = -1; 477 short vid = -1;
425 int ret; 478 int ret;
426 479
@@ -450,7 +503,8 @@ void interface_rx(struct net_device *soft_iface,
450 * if we have a another chosen mesh exit node in range 503 * if we have a another chosen mesh exit node in range
451 * it will transport the packets to the non-mesh network 504 * it will transport the packets to the non-mesh network
452 */ 505 */
453 if ((bat_priv->softif_neigh) && (bat_priv->softif_neigh->vid == vid)) { 506 curr_softif_neigh = softif_neigh_get_selected(bat_priv);
507 if (curr_softif_neigh && (curr_softif_neigh->vid == vid)) {
454 skb_push(skb, hdr_size); 508 skb_push(skb, hdr_size);
455 unicast_packet = (struct unicast_packet *)skb->data; 509 unicast_packet = (struct unicast_packet *)skb->data;
456 510
@@ -461,7 +515,7 @@ void interface_rx(struct net_device *soft_iface,
461 skb_reset_mac_header(skb); 515 skb_reset_mac_header(skb);
462 516
463 memcpy(unicast_packet->dest, 517 memcpy(unicast_packet->dest,
464 bat_priv->softif_neigh->addr, ETH_ALEN); 518 curr_softif_neigh->addr, ETH_ALEN);
465 ret = route_unicast_packet(skb, recv_if); 519 ret = route_unicast_packet(skb, recv_if);
466 if (ret == NET_RX_DROP) 520 if (ret == NET_RX_DROP)
467 goto dropped; 521 goto dropped;
@@ -474,7 +528,7 @@ void interface_rx(struct net_device *soft_iface,
474 goto dropped; 528 goto dropped;
475 skb->protocol = eth_type_trans(skb, soft_iface); 529 skb->protocol = eth_type_trans(skb, soft_iface);
476 530
477 /* should not be neccesary anymore as we use skb_pull_rcsum() 531 /* should not be necessary anymore as we use skb_pull_rcsum()
478 * TODO: please verify this and remove this TODO 532 * TODO: please verify this and remove this TODO
479 * -- Dec 21st 2009, Simon Wunderlich */ 533 * -- Dec 21st 2009, Simon Wunderlich */
480 534
@@ -486,11 +540,13 @@ void interface_rx(struct net_device *soft_iface,
486 soft_iface->last_rx = jiffies; 540 soft_iface->last_rx = jiffies;
487 541
488 netif_rx(skb); 542 netif_rx(skb);
489 return; 543 goto out;
490 544
491dropped: 545dropped:
492 kfree_skb(skb); 546 kfree_skb(skb);
493out: 547out:
548 if (curr_softif_neigh)
549 softif_neigh_free_ref(curr_softif_neigh);
494 return; 550 return;
495} 551}
496 552
@@ -524,6 +580,7 @@ static void interface_setup(struct net_device *dev)
524 dev->hard_start_xmit = interface_tx; 580 dev->hard_start_xmit = interface_tx;
525#endif 581#endif
526 dev->destructor = free_netdev; 582 dev->destructor = free_netdev;
583 dev->tx_queue_len = 0;
527 584
528 /** 585 /**
529 * can't call min_mtu, because the needed variables 586 * can't call min_mtu, because the needed variables
@@ -675,12 +732,3 @@ static u32 bat_get_link(struct net_device *dev)
675 return 1; 732 return 1;
676} 733}
677 734
678static u32 bat_get_rx_csum(struct net_device *dev)
679{
680 return 0;
681}
682
683static int bat_set_rx_csum(struct net_device *dev, u32 data)
684{
685 return -EOPNOTSUPP;
686}
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index 83445cf0cc9f..75123b1ae0de 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -67,7 +67,7 @@ struct hard_iface {
67struct orig_node { 67struct orig_node {
68 uint8_t orig[ETH_ALEN]; 68 uint8_t orig[ETH_ALEN];
69 uint8_t primary_addr[ETH_ALEN]; 69 uint8_t primary_addr[ETH_ALEN];
70 struct neigh_node *router; 70 struct neigh_node __rcu *router; /* rcu protected pointer */
71 unsigned long *bcast_own; 71 unsigned long *bcast_own;
72 uint8_t *bcast_own_sum; 72 uint8_t *bcast_own_sum;
73 unsigned long last_valid; 73 unsigned long last_valid;
@@ -83,7 +83,7 @@ struct orig_node {
83 uint32_t last_bcast_seqno; 83 uint32_t last_bcast_seqno;
84 struct hlist_head neigh_list; 84 struct hlist_head neigh_list;
85 struct list_head frag_list; 85 struct list_head frag_list;
86 spinlock_t neigh_list_lock; /* protects neighbor list */ 86 spinlock_t neigh_list_lock; /* protects neigh_list and router */
87 atomic_t refcount; 87 atomic_t refcount;
88 struct rcu_head rcu; 88 struct rcu_head rcu;
89 struct hlist_node hash_entry; 89 struct hlist_node hash_entry;
@@ -125,6 +125,7 @@ struct neigh_node {
125 struct rcu_head rcu; 125 struct rcu_head rcu;
126 struct orig_node *orig_node; 126 struct orig_node *orig_node;
127 struct hard_iface *if_incoming; 127 struct hard_iface *if_incoming;
128 spinlock_t tq_lock; /* protects: tq_recv, tq_index */
128}; 129};
129 130
130 131
@@ -146,7 +147,7 @@ struct bat_priv {
146 atomic_t batman_queue_left; 147 atomic_t batman_queue_left;
147 char num_ifaces; 148 char num_ifaces;
148 struct hlist_head softif_neigh_list; 149 struct hlist_head softif_neigh_list;
149 struct softif_neigh *softif_neigh; 150 struct softif_neigh __rcu *softif_neigh;
150 struct debug_log *debug_log; 151 struct debug_log *debug_log;
151 struct hard_iface *primary_if; 152 struct hard_iface *primary_if;
152 struct kobject *mesh_obj; 153 struct kobject *mesh_obj;
diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
index 19f84bd443af..d46acc815138 100644
--- a/net/batman-adv/unicast.c
+++ b/net/batman-adv/unicast.c
@@ -289,7 +289,7 @@ int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv)
289 289
290 /* get routing information */ 290 /* get routing information */
291 if (is_multicast_ether_addr(ethhdr->h_dest)) { 291 if (is_multicast_ether_addr(ethhdr->h_dest)) {
292 orig_node = (struct orig_node *)gw_get_selected(bat_priv); 292 orig_node = (struct orig_node *)gw_get_selected_orig(bat_priv);
293 if (orig_node) 293 if (orig_node)
294 goto find_router; 294 goto find_router;
295 } 295 }
diff --git a/net/batman-adv/vis.c b/net/batman-adv/vis.c
index f90212f42082..d4cc4f5399f4 100644
--- a/net/batman-adv/vis.c
+++ b/net/batman-adv/vis.c
@@ -558,6 +558,7 @@ static int find_best_vis_server(struct bat_priv *bat_priv,
558 struct vis_info *info) 558 struct vis_info *info)
559{ 559{
560 struct hashtable_t *hash = bat_priv->orig_hash; 560 struct hashtable_t *hash = bat_priv->orig_hash;
561 struct neigh_node *router;
561 struct hlist_node *node; 562 struct hlist_node *node;
562 struct hlist_head *head; 563 struct hlist_head *head;
563 struct orig_node *orig_node; 564 struct orig_node *orig_node;
@@ -571,13 +572,17 @@ static int find_best_vis_server(struct bat_priv *bat_priv,
571 572
572 rcu_read_lock(); 573 rcu_read_lock();
573 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { 574 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
574 if ((orig_node) && (orig_node->router) && 575 router = orig_node_get_router(orig_node);
575 (orig_node->flags & VIS_SERVER) && 576 if (!router)
576 (orig_node->router->tq_avg > best_tq)) { 577 continue;
577 best_tq = orig_node->router->tq_avg; 578
579 if ((orig_node->flags & VIS_SERVER) &&
580 (router->tq_avg > best_tq)) {
581 best_tq = router->tq_avg;
578 memcpy(packet->target_orig, orig_node->orig, 582 memcpy(packet->target_orig, orig_node->orig,
579 ETH_ALEN); 583 ETH_ALEN);
580 } 584 }
585 neigh_node_free_ref(router);
581 } 586 }
582 rcu_read_unlock(); 587 rcu_read_unlock();
583 } 588 }
@@ -605,7 +610,7 @@ static int generate_vis_packet(struct bat_priv *bat_priv)
605 struct hlist_node *node; 610 struct hlist_node *node;
606 struct hlist_head *head; 611 struct hlist_head *head;
607 struct orig_node *orig_node; 612 struct orig_node *orig_node;
608 struct neigh_node *neigh_node; 613 struct neigh_node *router;
609 struct vis_info *info = (struct vis_info *)bat_priv->my_vis_info; 614 struct vis_info *info = (struct vis_info *)bat_priv->my_vis_info;
610 struct vis_packet *packet = (struct vis_packet *)info->skb_packet->data; 615 struct vis_packet *packet = (struct vis_packet *)info->skb_packet->data;
611 struct vis_info_entry *entry; 616 struct vis_info_entry *entry;
@@ -633,30 +638,32 @@ static int generate_vis_packet(struct bat_priv *bat_priv)
633 638
634 rcu_read_lock(); 639 rcu_read_lock();
635 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { 640 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
636 neigh_node = orig_node->router; 641 router = orig_node_get_router(orig_node);
637 642 if (!router)
638 if (!neigh_node)
639 continue; 643 continue;
640 644
641 if (!compare_eth(neigh_node->addr, orig_node->orig)) 645 if (!compare_eth(router->addr, orig_node->orig))
642 continue; 646 goto next;
643 647
644 if (neigh_node->if_incoming->if_status != IF_ACTIVE) 648 if (router->if_incoming->if_status != IF_ACTIVE)
645 continue; 649 goto next;
646 650
647 if (neigh_node->tq_avg < 1) 651 if (router->tq_avg < 1)
648 continue; 652 goto next;
649 653
650 /* fill one entry into buffer. */ 654 /* fill one entry into buffer. */
651 entry = (struct vis_info_entry *) 655 entry = (struct vis_info_entry *)
652 skb_put(info->skb_packet, sizeof(*entry)); 656 skb_put(info->skb_packet, sizeof(*entry));
653 memcpy(entry->src, 657 memcpy(entry->src,
654 neigh_node->if_incoming->net_dev->dev_addr, 658 router->if_incoming->net_dev->dev_addr,
655 ETH_ALEN); 659 ETH_ALEN);
656 memcpy(entry->dest, orig_node->orig, ETH_ALEN); 660 memcpy(entry->dest, orig_node->orig, ETH_ALEN);
657 entry->quality = neigh_node->tq_avg; 661 entry->quality = router->tq_avg;
658 packet->entries++; 662 packet->entries++;
659 663
664next:
665 neigh_node_free_ref(router);
666
660 if (vis_packet_full(info)) 667 if (vis_packet_full(info))
661 goto unlock; 668 goto unlock;
662 } 669 }
@@ -725,6 +732,7 @@ static void purge_vis_packets(struct bat_priv *bat_priv)
725static void broadcast_vis_packet(struct bat_priv *bat_priv, 732static void broadcast_vis_packet(struct bat_priv *bat_priv,
726 struct vis_info *info) 733 struct vis_info *info)
727{ 734{
735 struct neigh_node *router;
728 struct hashtable_t *hash = bat_priv->orig_hash; 736 struct hashtable_t *hash = bat_priv->orig_hash;
729 struct hlist_node *node; 737 struct hlist_node *node;
730 struct hlist_head *head; 738 struct hlist_head *head;
@@ -745,19 +753,26 @@ static void broadcast_vis_packet(struct bat_priv *bat_priv,
745 rcu_read_lock(); 753 rcu_read_lock();
746 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { 754 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
747 /* if it's a vis server and reachable, send it. */ 755 /* if it's a vis server and reachable, send it. */
748 if ((!orig_node) || (!orig_node->router))
749 continue;
750 if (!(orig_node->flags & VIS_SERVER)) 756 if (!(orig_node->flags & VIS_SERVER))
751 continue; 757 continue;
758
759 router = orig_node_get_router(orig_node);
760 if (!router)
761 continue;
762
752 /* don't send it if we already received the packet from 763 /* don't send it if we already received the packet from
753 * this node. */ 764 * this node. */
754 if (recv_list_is_in(bat_priv, &info->recv_list, 765 if (recv_list_is_in(bat_priv, &info->recv_list,
755 orig_node->orig)) 766 orig_node->orig)) {
767 neigh_node_free_ref(router);
756 continue; 768 continue;
769 }
757 770
758 memcpy(packet->target_orig, orig_node->orig, ETH_ALEN); 771 memcpy(packet->target_orig, orig_node->orig, ETH_ALEN);
759 hard_iface = orig_node->router->if_incoming; 772 hard_iface = router->if_incoming;
760 memcpy(dstaddr, orig_node->router->addr, ETH_ALEN); 773 memcpy(dstaddr, router->addr, ETH_ALEN);
774
775 neigh_node_free_ref(router);
761 776
762 skb = skb_clone(info->skb_packet, GFP_ATOMIC); 777 skb = skb_clone(info->skb_packet, GFP_ATOMIC);
763 if (skb) 778 if (skb)
@@ -772,45 +787,29 @@ static void unicast_vis_packet(struct bat_priv *bat_priv,
772 struct vis_info *info) 787 struct vis_info *info)
773{ 788{
774 struct orig_node *orig_node; 789 struct orig_node *orig_node;
775 struct neigh_node *neigh_node = NULL; 790 struct neigh_node *router = NULL;
776 struct sk_buff *skb; 791 struct sk_buff *skb;
777 struct vis_packet *packet; 792 struct vis_packet *packet;
778 793
779 packet = (struct vis_packet *)info->skb_packet->data; 794 packet = (struct vis_packet *)info->skb_packet->data;
780 795
781 rcu_read_lock();
782 orig_node = orig_hash_find(bat_priv, packet->target_orig); 796 orig_node = orig_hash_find(bat_priv, packet->target_orig);
783
784 if (!orig_node) 797 if (!orig_node)
785 goto unlock; 798 goto out;
786 799
787 neigh_node = orig_node->router; 800 router = orig_node_get_router(orig_node);
788 801 if (!router)
789 if (!neigh_node) 802 goto out;
790 goto unlock;
791
792 if (!atomic_inc_not_zero(&neigh_node->refcount)) {
793 neigh_node = NULL;
794 goto unlock;
795 }
796
797 rcu_read_unlock();
798 803
799 skb = skb_clone(info->skb_packet, GFP_ATOMIC); 804 skb = skb_clone(info->skb_packet, GFP_ATOMIC);
800 if (skb) 805 if (skb)
801 send_skb_packet(skb, neigh_node->if_incoming, 806 send_skb_packet(skb, router->if_incoming, router->addr);
802 neigh_node->addr);
803
804 goto out;
805 807
806unlock:
807 rcu_read_unlock();
808out: 808out:
809 if (neigh_node) 809 if (router)
810 neigh_node_free_ref(neigh_node); 810 neigh_node_free_ref(router);
811 if (orig_node) 811 if (orig_node)
812 orig_node_free_ref(orig_node); 812 orig_node_free_ref(orig_node);
813 return;
814} 813}
815 814
816/* only send one vis packet. called from send_vis_packets() */ 815/* only send one vis packet. called from send_vis_packets() */
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index a80bc1cdb35b..1ad4907766c7 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -1948,7 +1948,7 @@ static void hci_tx_task(unsigned long arg)
1948 read_unlock(&hci_task_lock); 1948 read_unlock(&hci_task_lock);
1949} 1949}
1950 1950
1951/* ----- HCI RX task (incoming data proccessing) ----- */ 1951/* ----- HCI RX task (incoming data processing) ----- */
1952 1952
1953/* ACL data packet */ 1953/* ACL data packet */
1954static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb) 1954static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index 473e5973d8fe..47394a178bd5 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -679,7 +679,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch
679 679
680 if (opt == BT_FLUSHABLE_OFF) { 680 if (opt == BT_FLUSHABLE_OFF) {
681 struct l2cap_conn *conn = l2cap_pi(sk)->conn; 681 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
682 /* proceed futher only when we have l2cap_conn and 682 /* proceed further only when we have l2cap_conn and
683 No Flush support in the LM */ 683 No Flush support in the LM */
684 if (!conn || !lmp_no_flush_capable(conn->hcon->hdev)) { 684 if (!conn || !lmp_no_flush_capable(conn->hcon->hdev)) {
685 err = -EINVAL; 685 err = -EINVAL;
diff --git a/net/bridge/br.c b/net/bridge/br.c
index 84bbb82599b2..f20c4fd915a8 100644
--- a/net/bridge/br.c
+++ b/net/bridge/br.c
@@ -104,3 +104,4 @@ module_init(br_init)
104module_exit(br_deinit) 104module_exit(br_deinit)
105MODULE_LICENSE("GPL"); 105MODULE_LICENSE("GPL");
106MODULE_VERSION(BR_VERSION); 106MODULE_VERSION(BR_VERSION);
107MODULE_ALIAS_RTNL_LINK("bridge");
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 21e5901186ea..45cfd54b06d3 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -74,6 +74,17 @@ out:
74 return NETDEV_TX_OK; 74 return NETDEV_TX_OK;
75} 75}
76 76
77static int br_dev_init(struct net_device *dev)
78{
79 struct net_bridge *br = netdev_priv(dev);
80
81 br->stats = alloc_percpu(struct br_cpu_netstats);
82 if (!br->stats)
83 return -ENOMEM;
84
85 return 0;
86}
87
77static int br_dev_open(struct net_device *dev) 88static int br_dev_open(struct net_device *dev)
78{ 89{
79 struct net_bridge *br = netdev_priv(dev); 90 struct net_bridge *br = netdev_priv(dev);
@@ -334,6 +345,7 @@ static const struct ethtool_ops br_ethtool_ops = {
334static const struct net_device_ops br_netdev_ops = { 345static const struct net_device_ops br_netdev_ops = {
335 .ndo_open = br_dev_open, 346 .ndo_open = br_dev_open,
336 .ndo_stop = br_dev_stop, 347 .ndo_stop = br_dev_stop,
348 .ndo_init = br_dev_init,
337 .ndo_start_xmit = br_dev_xmit, 349 .ndo_start_xmit = br_dev_xmit,
338 .ndo_get_stats64 = br_get_stats64, 350 .ndo_get_stats64 = br_get_stats64,
339 .ndo_set_mac_address = br_set_mac_address, 351 .ndo_set_mac_address = br_set_mac_address,
@@ -357,18 +369,47 @@ static void br_dev_free(struct net_device *dev)
357 free_netdev(dev); 369 free_netdev(dev);
358} 370}
359 371
372static struct device_type br_type = {
373 .name = "bridge",
374};
375
360void br_dev_setup(struct net_device *dev) 376void br_dev_setup(struct net_device *dev)
361{ 377{
378 struct net_bridge *br = netdev_priv(dev);
379
362 random_ether_addr(dev->dev_addr); 380 random_ether_addr(dev->dev_addr);
363 ether_setup(dev); 381 ether_setup(dev);
364 382
365 dev->netdev_ops = &br_netdev_ops; 383 dev->netdev_ops = &br_netdev_ops;
366 dev->destructor = br_dev_free; 384 dev->destructor = br_dev_free;
367 SET_ETHTOOL_OPS(dev, &br_ethtool_ops); 385 SET_ETHTOOL_OPS(dev, &br_ethtool_ops);
386 SET_NETDEV_DEVTYPE(dev, &br_type);
368 dev->tx_queue_len = 0; 387 dev->tx_queue_len = 0;
369 dev->priv_flags = IFF_EBRIDGE; 388 dev->priv_flags = IFF_EBRIDGE;
370 389
371 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | 390 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA |
372 NETIF_F_GSO_MASK | NETIF_F_NO_CSUM | NETIF_F_LLTX | 391 NETIF_F_GSO_MASK | NETIF_F_NO_CSUM | NETIF_F_LLTX |
373 NETIF_F_NETNS_LOCAL | NETIF_F_GSO | NETIF_F_HW_VLAN_TX; 392 NETIF_F_NETNS_LOCAL | NETIF_F_GSO | NETIF_F_HW_VLAN_TX;
393
394 br->dev = dev;
395 spin_lock_init(&br->lock);
396 INIT_LIST_HEAD(&br->port_list);
397 spin_lock_init(&br->hash_lock);
398
399 br->bridge_id.prio[0] = 0x80;
400 br->bridge_id.prio[1] = 0x00;
401
402 memcpy(br->group_addr, br_group_address, ETH_ALEN);
403
404 br->feature_mask = dev->features;
405 br->stp_enabled = BR_NO_STP;
406 br->designated_root = br->bridge_id;
407 br->bridge_max_age = br->max_age = 20 * HZ;
408 br->bridge_hello_time = br->hello_time = 2 * HZ;
409 br->bridge_forward_delay = br->forward_delay = 15 * HZ;
410 br->ageing_time = 300 * HZ;
411
412 br_netfilter_rtable_init(br);
413 br_stp_timer_init(br);
414 br_multicast_init(br);
374} 415}
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 88485cc74dc3..e0dfbc151dd7 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -28,6 +28,7 @@
28static struct kmem_cache *br_fdb_cache __read_mostly; 28static struct kmem_cache *br_fdb_cache __read_mostly;
29static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source, 29static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
30 const unsigned char *addr); 30 const unsigned char *addr);
31static void fdb_notify(const struct net_bridge_fdb_entry *, int);
31 32
32static u32 fdb_salt __read_mostly; 33static u32 fdb_salt __read_mostly;
33 34
@@ -62,7 +63,7 @@ static inline int has_expired(const struct net_bridge *br,
62 const struct net_bridge_fdb_entry *fdb) 63 const struct net_bridge_fdb_entry *fdb)
63{ 64{
64 return !fdb->is_static && 65 return !fdb->is_static &&
65 time_before_eq(fdb->ageing_timer + hold_time(br), jiffies); 66 time_before_eq(fdb->updated + hold_time(br), jiffies);
66} 67}
67 68
68static inline int br_mac_hash(const unsigned char *mac) 69static inline int br_mac_hash(const unsigned char *mac)
@@ -81,6 +82,7 @@ static void fdb_rcu_free(struct rcu_head *head)
81 82
82static inline void fdb_delete(struct net_bridge_fdb_entry *f) 83static inline void fdb_delete(struct net_bridge_fdb_entry *f)
83{ 84{
85 fdb_notify(f, RTM_DELNEIGH);
84 hlist_del_rcu(&f->hlist); 86 hlist_del_rcu(&f->hlist);
85 call_rcu(&f->rcu, fdb_rcu_free); 87 call_rcu(&f->rcu, fdb_rcu_free);
86} 88}
@@ -140,7 +142,7 @@ void br_fdb_cleanup(unsigned long _data)
140 unsigned long this_timer; 142 unsigned long this_timer;
141 if (f->is_static) 143 if (f->is_static)
142 continue; 144 continue;
143 this_timer = f->ageing_timer + delay; 145 this_timer = f->updated + delay;
144 if (time_before_eq(this_timer, jiffies)) 146 if (time_before_eq(this_timer, jiffies))
145 fdb_delete(f); 147 fdb_delete(f);
146 else if (time_before(this_timer, next_timer)) 148 else if (time_before(this_timer, next_timer))
@@ -169,7 +171,7 @@ void br_fdb_flush(struct net_bridge *br)
169 spin_unlock_bh(&br->hash_lock); 171 spin_unlock_bh(&br->hash_lock);
170} 172}
171 173
172/* Flush all entries refering to a specific port. 174/* Flush all entries referring to a specific port.
173 * if do_all is set also flush static entries 175 * if do_all is set also flush static entries
174 */ 176 */
175void br_fdb_delete_by_port(struct net_bridge *br, 177void br_fdb_delete_by_port(struct net_bridge *br,
@@ -293,7 +295,7 @@ int br_fdb_fillbuf(struct net_bridge *br, void *buf,
293 295
294 fe->is_local = f->is_local; 296 fe->is_local = f->is_local;
295 if (!f->is_static) 297 if (!f->is_static)
296 fe->ageing_timer_value = jiffies_to_clock_t(jiffies - f->ageing_timer); 298 fe->ageing_timer_value = jiffies_to_clock_t(jiffies - f->updated);
297 ++fe; 299 ++fe;
298 ++num; 300 ++num;
299 } 301 }
@@ -305,8 +307,21 @@ int br_fdb_fillbuf(struct net_bridge *br, void *buf,
305 return num; 307 return num;
306} 308}
307 309
308static inline struct net_bridge_fdb_entry *fdb_find(struct hlist_head *head, 310static struct net_bridge_fdb_entry *fdb_find(struct hlist_head *head,
309 const unsigned char *addr) 311 const unsigned char *addr)
312{
313 struct hlist_node *h;
314 struct net_bridge_fdb_entry *fdb;
315
316 hlist_for_each_entry(fdb, h, head, hlist) {
317 if (!compare_ether_addr(fdb->addr.addr, addr))
318 return fdb;
319 }
320 return NULL;
321}
322
323static struct net_bridge_fdb_entry *fdb_find_rcu(struct hlist_head *head,
324 const unsigned char *addr)
310{ 325{
311 struct hlist_node *h; 326 struct hlist_node *h;
312 struct net_bridge_fdb_entry *fdb; 327 struct net_bridge_fdb_entry *fdb;
@@ -320,8 +335,7 @@ static inline struct net_bridge_fdb_entry *fdb_find(struct hlist_head *head,
320 335
321static struct net_bridge_fdb_entry *fdb_create(struct hlist_head *head, 336static struct net_bridge_fdb_entry *fdb_create(struct hlist_head *head,
322 struct net_bridge_port *source, 337 struct net_bridge_port *source,
323 const unsigned char *addr, 338 const unsigned char *addr)
324 int is_local)
325{ 339{
326 struct net_bridge_fdb_entry *fdb; 340 struct net_bridge_fdb_entry *fdb;
327 341
@@ -329,11 +343,11 @@ static struct net_bridge_fdb_entry *fdb_create(struct hlist_head *head,
329 if (fdb) { 343 if (fdb) {
330 memcpy(fdb->addr.addr, addr, ETH_ALEN); 344 memcpy(fdb->addr.addr, addr, ETH_ALEN);
331 fdb->dst = source; 345 fdb->dst = source;
332 fdb->is_local = is_local; 346 fdb->is_local = 0;
333 fdb->is_static = is_local; 347 fdb->is_static = 0;
334 fdb->ageing_timer = jiffies; 348 fdb->updated = fdb->used = jiffies;
335
336 hlist_add_head_rcu(&fdb->hlist, head); 349 hlist_add_head_rcu(&fdb->hlist, head);
350 fdb_notify(fdb, RTM_NEWNEIGH);
337 } 351 }
338 return fdb; 352 return fdb;
339} 353}
@@ -360,12 +374,15 @@ static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
360 fdb_delete(fdb); 374 fdb_delete(fdb);
361 } 375 }
362 376
363 if (!fdb_create(head, source, addr, 1)) 377 fdb = fdb_create(head, source, addr);
378 if (!fdb)
364 return -ENOMEM; 379 return -ENOMEM;
365 380
381 fdb->is_local = fdb->is_static = 1;
366 return 0; 382 return 0;
367} 383}
368 384
385/* Add entry for local address of interface */
369int br_fdb_insert(struct net_bridge *br, struct net_bridge_port *source, 386int br_fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
370 const unsigned char *addr) 387 const unsigned char *addr)
371{ 388{
@@ -392,7 +409,7 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
392 source->state == BR_STATE_FORWARDING)) 409 source->state == BR_STATE_FORWARDING))
393 return; 410 return;
394 411
395 fdb = fdb_find(head, addr); 412 fdb = fdb_find_rcu(head, addr);
396 if (likely(fdb)) { 413 if (likely(fdb)) {
397 /* attempt to update an entry for a local interface */ 414 /* attempt to update an entry for a local interface */
398 if (unlikely(fdb->is_local)) { 415 if (unlikely(fdb->is_local)) {
@@ -403,15 +420,277 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
403 } else { 420 } else {
404 /* fastpath: update of existing entry */ 421 /* fastpath: update of existing entry */
405 fdb->dst = source; 422 fdb->dst = source;
406 fdb->ageing_timer = jiffies; 423 fdb->updated = jiffies;
407 } 424 }
408 } else { 425 } else {
409 spin_lock(&br->hash_lock); 426 spin_lock(&br->hash_lock);
410 if (!fdb_find(head, addr)) 427 if (likely(!fdb_find(head, addr)))
411 fdb_create(head, source, addr, 0); 428 fdb_create(head, source, addr);
429
412 /* else we lose race and someone else inserts 430 /* else we lose race and someone else inserts
413 * it first, don't bother updating 431 * it first, don't bother updating
414 */ 432 */
415 spin_unlock(&br->hash_lock); 433 spin_unlock(&br->hash_lock);
416 } 434 }
417} 435}
436
437static int fdb_to_nud(const struct net_bridge_fdb_entry *fdb)
438{
439 if (fdb->is_local)
440 return NUD_PERMANENT;
441 else if (fdb->is_static)
442 return NUD_NOARP;
443 else if (has_expired(fdb->dst->br, fdb))
444 return NUD_STALE;
445 else
446 return NUD_REACHABLE;
447}
448
449static int fdb_fill_info(struct sk_buff *skb,
450 const struct net_bridge_fdb_entry *fdb,
451 u32 pid, u32 seq, int type, unsigned int flags)
452{
453 unsigned long now = jiffies;
454 struct nda_cacheinfo ci;
455 struct nlmsghdr *nlh;
456 struct ndmsg *ndm;
457
458 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
459 if (nlh == NULL)
460 return -EMSGSIZE;
461
462
463 ndm = nlmsg_data(nlh);
464 ndm->ndm_family = AF_BRIDGE;
465 ndm->ndm_pad1 = 0;
466 ndm->ndm_pad2 = 0;
467 ndm->ndm_flags = 0;
468 ndm->ndm_type = 0;
469 ndm->ndm_ifindex = fdb->dst->dev->ifindex;
470 ndm->ndm_state = fdb_to_nud(fdb);
471
472 NLA_PUT(skb, NDA_LLADDR, ETH_ALEN, &fdb->addr);
473
474 ci.ndm_used = jiffies_to_clock_t(now - fdb->used);
475 ci.ndm_confirmed = 0;
476 ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated);
477 ci.ndm_refcnt = 0;
478 NLA_PUT(skb, NDA_CACHEINFO, sizeof(ci), &ci);
479
480 return nlmsg_end(skb, nlh);
481
482nla_put_failure:
483 nlmsg_cancel(skb, nlh);
484 return -EMSGSIZE;
485}
486
487static inline size_t fdb_nlmsg_size(void)
488{
489 return NLMSG_ALIGN(sizeof(struct ndmsg))
490 + nla_total_size(ETH_ALEN) /* NDA_LLADDR */
491 + nla_total_size(sizeof(struct nda_cacheinfo));
492}
493
494static void fdb_notify(const struct net_bridge_fdb_entry *fdb, int type)
495{
496 struct net *net = dev_net(fdb->dst->dev);
497 struct sk_buff *skb;
498 int err = -ENOBUFS;
499
500 skb = nlmsg_new(fdb_nlmsg_size(), GFP_ATOMIC);
501 if (skb == NULL)
502 goto errout;
503
504 err = fdb_fill_info(skb, fdb, 0, 0, type, 0);
505 if (err < 0) {
506 /* -EMSGSIZE implies BUG in fdb_nlmsg_size() */
507 WARN_ON(err == -EMSGSIZE);
508 kfree_skb(skb);
509 goto errout;
510 }
511 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
512 return;
513errout:
514 if (err < 0)
515 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
516}
517
518/* Dump information about entries, in response to GETNEIGH */
519int br_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
520{
521 struct net *net = sock_net(skb->sk);
522 struct net_device *dev;
523 int idx = 0;
524
525 rcu_read_lock();
526 for_each_netdev_rcu(net, dev) {
527 struct net_bridge *br = netdev_priv(dev);
528 int i;
529
530 if (!(dev->priv_flags & IFF_EBRIDGE))
531 continue;
532
533 for (i = 0; i < BR_HASH_SIZE; i++) {
534 struct hlist_node *h;
535 struct net_bridge_fdb_entry *f;
536
537 hlist_for_each_entry_rcu(f, h, &br->hash[i], hlist) {
538 if (idx < cb->args[0])
539 goto skip;
540
541 if (fdb_fill_info(skb, f,
542 NETLINK_CB(cb->skb).pid,
543 cb->nlh->nlmsg_seq,
544 RTM_NEWNEIGH,
545 NLM_F_MULTI) < 0)
546 break;
547skip:
548 ++idx;
549 }
550 }
551 }
552 rcu_read_unlock();
553
554 cb->args[0] = idx;
555
556 return skb->len;
557}
558
559/* Create new static fdb entry */
560static int fdb_add_entry(struct net_bridge_port *source, const __u8 *addr,
561 __u16 state)
562{
563 struct net_bridge *br = source->br;
564 struct hlist_head *head = &br->hash[br_mac_hash(addr)];
565 struct net_bridge_fdb_entry *fdb;
566
567 fdb = fdb_find(head, addr);
568 if (fdb)
569 return -EEXIST;
570
571 fdb = fdb_create(head, source, addr);
572 if (!fdb)
573 return -ENOMEM;
574
575 if (state & NUD_PERMANENT)
576 fdb->is_local = fdb->is_static = 1;
577 else if (state & NUD_NOARP)
578 fdb->is_static = 1;
579 return 0;
580}
581
582/* Add new permanent fdb entry with RTM_NEWNEIGH */
583int br_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
584{
585 struct net *net = sock_net(skb->sk);
586 struct ndmsg *ndm;
587 struct nlattr *tb[NDA_MAX+1];
588 struct net_device *dev;
589 struct net_bridge_port *p;
590 const __u8 *addr;
591 int err;
592
593 ASSERT_RTNL();
594 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
595 if (err < 0)
596 return err;
597
598 ndm = nlmsg_data(nlh);
599 if (ndm->ndm_ifindex == 0) {
600 pr_info("bridge: RTM_NEWNEIGH with invalid ifindex\n");
601 return -EINVAL;
602 }
603
604 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
605 if (dev == NULL) {
606 pr_info("bridge: RTM_NEWNEIGH with unknown ifindex\n");
607 return -ENODEV;
608 }
609
610 if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
611 pr_info("bridge: RTM_NEWNEIGH with invalid address\n");
612 return -EINVAL;
613 }
614
615 addr = nla_data(tb[NDA_LLADDR]);
616 if (!is_valid_ether_addr(addr)) {
617 pr_info("bridge: RTM_NEWNEIGH with invalid ether address\n");
618 return -EINVAL;
619 }
620
621 p = br_port_get_rtnl(dev);
622 if (p == NULL) {
623 pr_info("bridge: RTM_NEWNEIGH %s not a bridge port\n",
624 dev->name);
625 return -EINVAL;
626 }
627
628 spin_lock_bh(&p->br->hash_lock);
629 err = fdb_add_entry(p, addr, ndm->ndm_state);
630 spin_unlock_bh(&p->br->hash_lock);
631
632 return err;
633}
634
635static int fdb_delete_by_addr(struct net_bridge_port *p, const u8 *addr)
636{
637 struct net_bridge *br = p->br;
638 struct hlist_head *head = &br->hash[br_mac_hash(addr)];
639 struct net_bridge_fdb_entry *fdb;
640
641 fdb = fdb_find(head, addr);
642 if (!fdb)
643 return -ENOENT;
644
645 fdb_delete(fdb);
646 return 0;
647}
648
649/* Remove neighbor entry with RTM_DELNEIGH */
650int br_fdb_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
651{
652 struct net *net = sock_net(skb->sk);
653 struct ndmsg *ndm;
654 struct net_bridge_port *p;
655 struct nlattr *llattr;
656 const __u8 *addr;
657 struct net_device *dev;
658 int err;
659
660 ASSERT_RTNL();
661 if (nlmsg_len(nlh) < sizeof(*ndm))
662 return -EINVAL;
663
664 ndm = nlmsg_data(nlh);
665 if (ndm->ndm_ifindex == 0) {
666 pr_info("bridge: RTM_DELNEIGH with invalid ifindex\n");
667 return -EINVAL;
668 }
669
670 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
671 if (dev == NULL) {
672 pr_info("bridge: RTM_DELNEIGH with unknown ifindex\n");
673 return -ENODEV;
674 }
675
676 llattr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_LLADDR);
677 if (llattr == NULL || nla_len(llattr) != ETH_ALEN) {
678 pr_info("bridge: RTM_DELNEIGH with invalid address\n");
679 return -EINVAL;
680 }
681
682 addr = nla_data(llattr);
683
684 p = br_port_get_rtnl(dev);
685 if (p == NULL) {
686 pr_info("bridge: RTM_DELNEIGH %s not a bridge port\n",
687 dev->name);
688 return -EINVAL;
689 }
690
691 spin_lock_bh(&p->br->hash_lock);
692 err = fdb_delete_by_addr(p, addr);
693 spin_unlock_bh(&p->br->hash_lock);
694
695 return err;
696}
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 718b60366dfe..7f5379c593d9 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -175,56 +175,6 @@ static void del_br(struct net_bridge *br, struct list_head *head)
175 unregister_netdevice_queue(br->dev, head); 175 unregister_netdevice_queue(br->dev, head);
176} 176}
177 177
178static struct net_device *new_bridge_dev(struct net *net, const char *name)
179{
180 struct net_bridge *br;
181 struct net_device *dev;
182
183 dev = alloc_netdev(sizeof(struct net_bridge), name,
184 br_dev_setup);
185
186 if (!dev)
187 return NULL;
188 dev_net_set(dev, net);
189
190 br = netdev_priv(dev);
191 br->dev = dev;
192
193 br->stats = alloc_percpu(struct br_cpu_netstats);
194 if (!br->stats) {
195 free_netdev(dev);
196 return NULL;
197 }
198
199 spin_lock_init(&br->lock);
200 INIT_LIST_HEAD(&br->port_list);
201 spin_lock_init(&br->hash_lock);
202
203 br->bridge_id.prio[0] = 0x80;
204 br->bridge_id.prio[1] = 0x00;
205
206 memcpy(br->group_addr, br_group_address, ETH_ALEN);
207
208 br->feature_mask = dev->features;
209 br->stp_enabled = BR_NO_STP;
210 br->designated_root = br->bridge_id;
211 br->root_path_cost = 0;
212 br->root_port = 0;
213 br->bridge_max_age = br->max_age = 20 * HZ;
214 br->bridge_hello_time = br->hello_time = 2 * HZ;
215 br->bridge_forward_delay = br->forward_delay = 15 * HZ;
216 br->topology_change = 0;
217 br->topology_change_detected = 0;
218 br->ageing_time = 300 * HZ;
219
220 br_netfilter_rtable_init(br);
221
222 br_stp_timer_init(br);
223 br_multicast_init(br);
224
225 return dev;
226}
227
228/* find an available port number */ 178/* find an available port number */
229static int find_portno(struct net_bridge *br) 179static int find_portno(struct net_bridge *br)
230{ 180{
@@ -277,42 +227,19 @@ static struct net_bridge_port *new_nbp(struct net_bridge *br,
277 return p; 227 return p;
278} 228}
279 229
280static struct device_type br_type = {
281 .name = "bridge",
282};
283
284int br_add_bridge(struct net *net, const char *name) 230int br_add_bridge(struct net *net, const char *name)
285{ 231{
286 struct net_device *dev; 232 struct net_device *dev;
287 int ret;
288 233
289 dev = new_bridge_dev(net, name); 234 dev = alloc_netdev(sizeof(struct net_bridge), name,
235 br_dev_setup);
236
290 if (!dev) 237 if (!dev)
291 return -ENOMEM; 238 return -ENOMEM;
292 239
293 rtnl_lock(); 240 dev_net_set(dev, net);
294 if (strchr(dev->name, '%')) {
295 ret = dev_alloc_name(dev, dev->name);
296 if (ret < 0)
297 goto out_free;
298 }
299
300 SET_NETDEV_DEVTYPE(dev, &br_type);
301
302 ret = register_netdevice(dev);
303 if (ret)
304 goto out_free;
305
306 ret = br_sysfs_addbr(dev);
307 if (ret)
308 unregister_netdevice(dev);
309 out:
310 rtnl_unlock();
311 return ret;
312 241
313out_free: 242 return register_netdev(dev);
314 free_netdev(dev);
315 goto out;
316} 243}
317 244
318int br_del_bridge(struct net *net, const char *name) 245int br_del_bridge(struct net *net, const char *name)
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index e2160792e1bc..785932d7ad32 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -98,9 +98,10 @@ int br_handle_frame_finish(struct sk_buff *skb)
98 } 98 }
99 99
100 if (skb) { 100 if (skb) {
101 if (dst) 101 if (dst) {
102 dst->used = jiffies;
102 br_forward(dst->dst, skb, skb2); 103 br_forward(dst->dst, skb, skb2);
103 else 104 } else
104 br_flood_forward(br, skb, skb2); 105 br_flood_forward(br, skb, skb2);
105 } 106 }
106 107
diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c
index cb43312b846e..7222fe1d5460 100644
--- a/net/bridge/br_ioctl.c
+++ b/net/bridge/br_ioctl.c
@@ -106,7 +106,7 @@ static int add_del_if(struct net_bridge *br, int ifindex, int isadd)
106/* 106/*
107 * Legacy ioctl's through SIOCDEVPRIVATE 107 * Legacy ioctl's through SIOCDEVPRIVATE
108 * This interface is deprecated because it was too difficult to 108 * This interface is deprecated because it was too difficult to
109 * to do the translation for 32/64bit ioctl compatability. 109 * to do the translation for 32/64bit ioctl compatibility.
110 */ 110 */
111static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 111static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
112{ 112{
@@ -181,40 +181,19 @@ static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
181 if (!capable(CAP_NET_ADMIN)) 181 if (!capable(CAP_NET_ADMIN))
182 return -EPERM; 182 return -EPERM;
183 183
184 spin_lock_bh(&br->lock); 184 return br_set_forward_delay(br, args[1]);
185 br->bridge_forward_delay = clock_t_to_jiffies(args[1]);
186 if (br_is_root_bridge(br))
187 br->forward_delay = br->bridge_forward_delay;
188 spin_unlock_bh(&br->lock);
189 return 0;
190 185
191 case BRCTL_SET_BRIDGE_HELLO_TIME: 186 case BRCTL_SET_BRIDGE_HELLO_TIME:
192 {
193 unsigned long t = clock_t_to_jiffies(args[1]);
194 if (!capable(CAP_NET_ADMIN)) 187 if (!capable(CAP_NET_ADMIN))
195 return -EPERM; 188 return -EPERM;
196 189
197 if (t < HZ) 190 return br_set_hello_time(br, args[1]);
198 return -EINVAL;
199
200 spin_lock_bh(&br->lock);
201 br->bridge_hello_time = t;
202 if (br_is_root_bridge(br))
203 br->hello_time = br->bridge_hello_time;
204 spin_unlock_bh(&br->lock);
205 return 0;
206 }
207 191
208 case BRCTL_SET_BRIDGE_MAX_AGE: 192 case BRCTL_SET_BRIDGE_MAX_AGE:
209 if (!capable(CAP_NET_ADMIN)) 193 if (!capable(CAP_NET_ADMIN))
210 return -EPERM; 194 return -EPERM;
211 195
212 spin_lock_bh(&br->lock); 196 return br_set_max_age(br, args[1]);
213 br->bridge_max_age = clock_t_to_jiffies(args[1]);
214 if (br_is_root_bridge(br))
215 br->max_age = br->bridge_max_age;
216 spin_unlock_bh(&br->lock);
217 return 0;
218 197
219 case BRCTL_SET_AGEING_TIME: 198 case BRCTL_SET_AGEING_TIME:
220 if (!capable(CAP_NET_ADMIN)) 199 if (!capable(CAP_NET_ADMIN))
@@ -275,19 +254,16 @@ static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
275 case BRCTL_SET_PORT_PRIORITY: 254 case BRCTL_SET_PORT_PRIORITY:
276 { 255 {
277 struct net_bridge_port *p; 256 struct net_bridge_port *p;
278 int ret = 0; 257 int ret;
279 258
280 if (!capable(CAP_NET_ADMIN)) 259 if (!capable(CAP_NET_ADMIN))
281 return -EPERM; 260 return -EPERM;
282 261
283 if (args[2] >= (1<<(16-BR_PORT_BITS)))
284 return -ERANGE;
285
286 spin_lock_bh(&br->lock); 262 spin_lock_bh(&br->lock);
287 if ((p = br_get_port(br, args[1])) == NULL) 263 if ((p = br_get_port(br, args[1])) == NULL)
288 ret = -EINVAL; 264 ret = -EINVAL;
289 else 265 else
290 br_stp_set_port_priority(p, args[2]); 266 ret = br_stp_set_port_priority(p, args[2]);
291 spin_unlock_bh(&br->lock); 267 spin_unlock_bh(&br->lock);
292 return ret; 268 return ret;
293 } 269 }
@@ -295,15 +271,17 @@ static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
295 case BRCTL_SET_PATH_COST: 271 case BRCTL_SET_PATH_COST:
296 { 272 {
297 struct net_bridge_port *p; 273 struct net_bridge_port *p;
298 int ret = 0; 274 int ret;
299 275
300 if (!capable(CAP_NET_ADMIN)) 276 if (!capable(CAP_NET_ADMIN))
301 return -EPERM; 277 return -EPERM;
302 278
279 spin_lock_bh(&br->lock);
303 if ((p = br_get_port(br, args[1])) == NULL) 280 if ((p = br_get_port(br, args[1])) == NULL)
304 ret = -EINVAL; 281 ret = -EINVAL;
305 else 282 else
306 br_stp_set_path_cost(p, args[2]); 283 ret = br_stp_set_path_cost(p, args[2]);
284 spin_unlock_bh(&br->lock);
307 285
308 return ret; 286 return ret;
309 } 287 }
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 59660c909a7c..2f14eafdeeab 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -413,7 +413,7 @@ out:
413 413
414#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 414#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
415static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br, 415static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
416 struct in6_addr *group) 416 const struct in6_addr *group)
417{ 417{
418 struct sk_buff *skb; 418 struct sk_buff *skb;
419 struct ipv6hdr *ip6h; 419 struct ipv6hdr *ip6h;
@@ -1115,7 +1115,7 @@ static int br_ip4_multicast_query(struct net_bridge *br,
1115 struct net_bridge_port *port, 1115 struct net_bridge_port *port,
1116 struct sk_buff *skb) 1116 struct sk_buff *skb)
1117{ 1117{
1118 struct iphdr *iph = ip_hdr(skb); 1118 const struct iphdr *iph = ip_hdr(skb);
1119 struct igmphdr *ih = igmp_hdr(skb); 1119 struct igmphdr *ih = igmp_hdr(skb);
1120 struct net_bridge_mdb_entry *mp; 1120 struct net_bridge_mdb_entry *mp;
1121 struct igmpv3_query *ih3; 1121 struct igmpv3_query *ih3;
@@ -1190,7 +1190,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
1190 struct net_bridge_port *port, 1190 struct net_bridge_port *port,
1191 struct sk_buff *skb) 1191 struct sk_buff *skb)
1192{ 1192{
1193 struct ipv6hdr *ip6h = ipv6_hdr(skb); 1193 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
1194 struct mld_msg *mld = (struct mld_msg *) icmp6_hdr(skb); 1194 struct mld_msg *mld = (struct mld_msg *) icmp6_hdr(skb);
1195 struct net_bridge_mdb_entry *mp; 1195 struct net_bridge_mdb_entry *mp;
1196 struct mld2_query *mld2q; 1196 struct mld2_query *mld2q;
@@ -1198,7 +1198,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
1198 struct net_bridge_port_group __rcu **pp; 1198 struct net_bridge_port_group __rcu **pp;
1199 unsigned long max_delay; 1199 unsigned long max_delay;
1200 unsigned long now = jiffies; 1200 unsigned long now = jiffies;
1201 struct in6_addr *group = NULL; 1201 const struct in6_addr *group = NULL;
1202 int err = 0; 1202 int err = 0;
1203 1203
1204 spin_lock(&br->multicast_lock); 1204 spin_lock(&br->multicast_lock);
@@ -1356,7 +1356,7 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
1356 struct sk_buff *skb) 1356 struct sk_buff *skb)
1357{ 1357{
1358 struct sk_buff *skb2 = skb; 1358 struct sk_buff *skb2 = skb;
1359 struct iphdr *iph; 1359 const struct iphdr *iph;
1360 struct igmphdr *ih; 1360 struct igmphdr *ih;
1361 unsigned len; 1361 unsigned len;
1362 unsigned offset; 1362 unsigned offset;
@@ -1452,7 +1452,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
1452 struct sk_buff *skb) 1452 struct sk_buff *skb)
1453{ 1453{
1454 struct sk_buff *skb2; 1454 struct sk_buff *skb2;
1455 struct ipv6hdr *ip6h; 1455 const struct ipv6hdr *ip6h;
1456 struct icmp6hdr *icmp6h; 1456 struct icmp6hdr *icmp6h;
1457 u8 nexthdr; 1457 u8 nexthdr;
1458 unsigned len; 1458 unsigned len;
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index 008ff6c4eecf..5614907525e1 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -219,7 +219,7 @@ static inline void nf_bridge_update_protocol(struct sk_buff *skb)
219static int br_parse_ip_options(struct sk_buff *skb) 219static int br_parse_ip_options(struct sk_buff *skb)
220{ 220{
221 struct ip_options *opt; 221 struct ip_options *opt;
222 struct iphdr *iph; 222 const struct iphdr *iph;
223 struct net_device *dev = skb->dev; 223 struct net_device *dev = skb->dev;
224 u32 len; 224 u32 len;
225 225
@@ -249,11 +249,9 @@ static int br_parse_ip_options(struct sk_buff *skb)
249 goto drop; 249 goto drop;
250 } 250 }
251 251
252 /* Zero out the CB buffer if no options present */ 252 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
253 if (iph->ihl == 5) { 253 if (iph->ihl == 5)
254 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
255 return 0; 254 return 0;
256 }
257 255
258 opt->optlen = iph->ihl*4 - sizeof(struct iphdr); 256 opt->optlen = iph->ihl*4 - sizeof(struct iphdr);
259 if (ip_options_compile(dev_net(dev), opt, skb)) 257 if (ip_options_compile(dev_net(dev), opt, skb))
@@ -556,7 +554,7 @@ static unsigned int br_nf_pre_routing_ipv6(unsigned int hook,
556 const struct net_device *out, 554 const struct net_device *out,
557 int (*okfn)(struct sk_buff *)) 555 int (*okfn)(struct sk_buff *))
558{ 556{
559 struct ipv6hdr *hdr; 557 const struct ipv6hdr *hdr;
560 u32 pkt_len; 558 u32 pkt_len;
561 559
562 if (skb->len < sizeof(struct ipv6hdr)) 560 if (skb->len < sizeof(struct ipv6hdr))
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index f8bf4c7f842c..134a2ff6b98b 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -12,9 +12,11 @@
12 12
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/slab.h> 14#include <linux/slab.h>
15#include <linux/etherdevice.h>
15#include <net/rtnetlink.h> 16#include <net/rtnetlink.h>
16#include <net/net_namespace.h> 17#include <net/net_namespace.h>
17#include <net/sock.h> 18#include <net/sock.h>
19
18#include "br_private.h" 20#include "br_private.h"
19 21
20static inline size_t br_nlmsg_size(void) 22static inline size_t br_nlmsg_size(void)
@@ -188,20 +190,61 @@ static int br_rtm_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
188 return 0; 190 return 0;
189} 191}
190 192
193static int br_validate(struct nlattr *tb[], struct nlattr *data[])
194{
195 if (tb[IFLA_ADDRESS]) {
196 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
197 return -EINVAL;
198 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
199 return -EADDRNOTAVAIL;
200 }
201
202 return 0;
203}
204
205static struct rtnl_link_ops br_link_ops __read_mostly = {
206 .kind = "bridge",
207 .priv_size = sizeof(struct net_bridge),
208 .setup = br_dev_setup,
209 .validate = br_validate,
210};
191 211
192int __init br_netlink_init(void) 212int __init br_netlink_init(void)
193{ 213{
194 if (__rtnl_register(PF_BRIDGE, RTM_GETLINK, NULL, br_dump_ifinfo)) 214 int err;
195 return -ENOBUFS;
196 215
197 /* Only the first call to __rtnl_register can fail */ 216 err = rtnl_link_register(&br_link_ops);
198 __rtnl_register(PF_BRIDGE, RTM_SETLINK, br_rtm_setlink, NULL); 217 if (err < 0)
218 goto err1;
219
220 err = __rtnl_register(PF_BRIDGE, RTM_GETLINK, NULL, br_dump_ifinfo);
221 if (err)
222 goto err2;
223 err = __rtnl_register(PF_BRIDGE, RTM_SETLINK, br_rtm_setlink, NULL);
224 if (err)
225 goto err3;
226 err = __rtnl_register(PF_BRIDGE, RTM_NEWNEIGH, br_fdb_add, NULL);
227 if (err)
228 goto err3;
229 err = __rtnl_register(PF_BRIDGE, RTM_DELNEIGH, br_fdb_delete, NULL);
230 if (err)
231 goto err3;
232 err = __rtnl_register(PF_BRIDGE, RTM_GETNEIGH, NULL, br_fdb_dump);
233 if (err)
234 goto err3;
199 235
200 return 0; 236 return 0;
237
238err3:
239 rtnl_unregister_all(PF_BRIDGE);
240err2:
241 rtnl_link_unregister(&br_link_ops);
242err1:
243 return err;
201} 244}
202 245
203void __exit br_netlink_fini(void) 246void __exit br_netlink_fini(void)
204{ 247{
248 rtnl_link_unregister(&br_link_ops);
205 rtnl_unregister_all(PF_BRIDGE); 249 rtnl_unregister_all(PF_BRIDGE);
206} 250}
207
diff --git a/net/bridge/br_notify.c b/net/bridge/br_notify.c
index 7d337c9b6082..606b323e8a0c 100644
--- a/net/bridge/br_notify.c
+++ b/net/bridge/br_notify.c
@@ -36,6 +36,12 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v
36 struct net_bridge *br; 36 struct net_bridge *br;
37 int err; 37 int err;
38 38
39 /* register of bridge completed, add sysfs entries */
40 if ((dev->priv_flags & IFF_EBRIDGE) && event == NETDEV_REGISTER) {
41 br_sysfs_addbr(dev);
42 return NOTIFY_DONE;
43 }
44
39 /* not a port of a bridge */ 45 /* not a port of a bridge */
40 p = br_port_get_rtnl(dev); 46 p = br_port_get_rtnl(dev);
41 if (!p) 47 if (!p)
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 387013d33745..e2a40343aa09 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -64,7 +64,8 @@ struct net_bridge_fdb_entry
64 struct net_bridge_port *dst; 64 struct net_bridge_port *dst;
65 65
66 struct rcu_head rcu; 66 struct rcu_head rcu;
67 unsigned long ageing_timer; 67 unsigned long updated;
68 unsigned long used;
68 mac_addr addr; 69 mac_addr addr;
69 unsigned char is_local; 70 unsigned char is_local;
70 unsigned char is_static; 71 unsigned char is_static;
@@ -353,6 +354,9 @@ extern int br_fdb_insert(struct net_bridge *br,
353extern void br_fdb_update(struct net_bridge *br, 354extern void br_fdb_update(struct net_bridge *br,
354 struct net_bridge_port *source, 355 struct net_bridge_port *source,
355 const unsigned char *addr); 356 const unsigned char *addr);
357extern int br_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb);
358extern int br_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg);
359extern int br_fdb_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg);
356 360
357/* br_forward.c */ 361/* br_forward.c */
358extern void br_deliver(const struct net_bridge_port *to, 362extern void br_deliver(const struct net_bridge_port *to,
@@ -491,6 +495,11 @@ extern struct net_bridge_port *br_get_port(struct net_bridge *br,
491extern void br_init_port(struct net_bridge_port *p); 495extern void br_init_port(struct net_bridge_port *p);
492extern void br_become_designated_port(struct net_bridge_port *p); 496extern void br_become_designated_port(struct net_bridge_port *p);
493 497
498extern int br_set_forward_delay(struct net_bridge *br, unsigned long x);
499extern int br_set_hello_time(struct net_bridge *br, unsigned long x);
500extern int br_set_max_age(struct net_bridge *br, unsigned long x);
501
502
494/* br_stp_if.c */ 503/* br_stp_if.c */
495extern void br_stp_enable_bridge(struct net_bridge *br); 504extern void br_stp_enable_bridge(struct net_bridge *br);
496extern void br_stp_disable_bridge(struct net_bridge *br); 505extern void br_stp_disable_bridge(struct net_bridge *br);
@@ -501,10 +510,10 @@ extern bool br_stp_recalculate_bridge_id(struct net_bridge *br);
501extern void br_stp_change_bridge_id(struct net_bridge *br, const unsigned char *a); 510extern void br_stp_change_bridge_id(struct net_bridge *br, const unsigned char *a);
502extern void br_stp_set_bridge_priority(struct net_bridge *br, 511extern void br_stp_set_bridge_priority(struct net_bridge *br,
503 u16 newprio); 512 u16 newprio);
504extern void br_stp_set_port_priority(struct net_bridge_port *p, 513extern int br_stp_set_port_priority(struct net_bridge_port *p,
505 u8 newprio); 514 unsigned long newprio);
506extern void br_stp_set_path_cost(struct net_bridge_port *p, 515extern int br_stp_set_path_cost(struct net_bridge_port *p,
507 u32 path_cost); 516 unsigned long path_cost);
508extern ssize_t br_show_bridge_id(char *buf, const struct bridge_id *id); 517extern ssize_t br_show_bridge_id(char *buf, const struct bridge_id *id);
509 518
510/* br_stp_bpdu.c */ 519/* br_stp_bpdu.c */
diff --git a/net/bridge/br_private_stp.h b/net/bridge/br_private_stp.h
index 8b650f7fbfa0..642ef47a867e 100644
--- a/net/bridge/br_private_stp.h
+++ b/net/bridge/br_private_stp.h
@@ -16,6 +16,19 @@
16#define BPDU_TYPE_CONFIG 0 16#define BPDU_TYPE_CONFIG 0
17#define BPDU_TYPE_TCN 0x80 17#define BPDU_TYPE_TCN 0x80
18 18
19/* IEEE 802.1D-1998 timer values */
20#define BR_MIN_HELLO_TIME (1*HZ)
21#define BR_MAX_HELLO_TIME (10*HZ)
22
23#define BR_MIN_FORWARD_DELAY (2*HZ)
24#define BR_MAX_FORWARD_DELAY (30*HZ)
25
26#define BR_MIN_MAX_AGE (6*HZ)
27#define BR_MAX_MAX_AGE (40*HZ)
28
29#define BR_MIN_PATH_COST 1
30#define BR_MAX_PATH_COST 65535
31
19struct br_config_bpdu 32struct br_config_bpdu
20{ 33{
21 unsigned topology_change:1; 34 unsigned topology_change:1;
diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c
index 7370d14f634d..bb4383e84de9 100644
--- a/net/bridge/br_stp.c
+++ b/net/bridge/br_stp.c
@@ -484,3 +484,51 @@ void br_received_tcn_bpdu(struct net_bridge_port *p)
484 br_topology_change_acknowledge(p); 484 br_topology_change_acknowledge(p);
485 } 485 }
486} 486}
487
488/* Change bridge STP parameter */
489int br_set_hello_time(struct net_bridge *br, unsigned long val)
490{
491 unsigned long t = clock_t_to_jiffies(val);
492
493 if (t < BR_MIN_HELLO_TIME || t > BR_MAX_HELLO_TIME)
494 return -ERANGE;
495
496 spin_lock_bh(&br->lock);
497 br->bridge_hello_time = t;
498 if (br_is_root_bridge(br))
499 br->hello_time = br->bridge_hello_time;
500 spin_unlock_bh(&br->lock);
501 return 0;
502}
503
504int br_set_max_age(struct net_bridge *br, unsigned long val)
505{
506 unsigned long t = clock_t_to_jiffies(val);
507
508 if (t < BR_MIN_MAX_AGE || t > BR_MAX_MAX_AGE)
509 return -ERANGE;
510
511 spin_lock_bh(&br->lock);
512 br->bridge_max_age = t;
513 if (br_is_root_bridge(br))
514 br->max_age = br->bridge_max_age;
515 spin_unlock_bh(&br->lock);
516 return 0;
517
518}
519
520int br_set_forward_delay(struct net_bridge *br, unsigned long val)
521{
522 unsigned long t = clock_t_to_jiffies(val);
523
524 if (br->stp_enabled != BR_NO_STP &&
525 (t < BR_MIN_FORWARD_DELAY || t > BR_MAX_FORWARD_DELAY))
526 return -ERANGE;
527
528 spin_lock_bh(&br->lock);
529 br->bridge_forward_delay = t;
530 if (br_is_root_bridge(br))
531 br->forward_delay = br->bridge_forward_delay;
532 spin_unlock_bh(&br->lock);
533 return 0;
534}
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index 9b61d09de9b9..6f615b8192f4 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -20,7 +20,7 @@
20 20
21 21
22/* Port id is composed of priority and port number. 22/* Port id is composed of priority and port number.
23 * NB: least significant bits of priority are dropped to 23 * NB: some bits of priority are dropped to
24 * make room for more ports. 24 * make room for more ports.
25 */ 25 */
26static inline port_id br_make_port_id(__u8 priority, __u16 port_no) 26static inline port_id br_make_port_id(__u8 priority, __u16 port_no)
@@ -29,6 +29,8 @@ static inline port_id br_make_port_id(__u8 priority, __u16 port_no)
29 | (port_no & ((1<<BR_PORT_BITS)-1)); 29 | (port_no & ((1<<BR_PORT_BITS)-1));
30} 30}
31 31
32#define BR_MAX_PORT_PRIORITY ((u16)~0 >> BR_PORT_BITS)
33
32/* called under bridge lock */ 34/* called under bridge lock */
33void br_init_port(struct net_bridge_port *p) 35void br_init_port(struct net_bridge_port *p)
34{ 36{
@@ -255,10 +257,14 @@ void br_stp_set_bridge_priority(struct net_bridge *br, u16 newprio)
255} 257}
256 258
257/* called under bridge lock */ 259/* called under bridge lock */
258void br_stp_set_port_priority(struct net_bridge_port *p, u8 newprio) 260int br_stp_set_port_priority(struct net_bridge_port *p, unsigned long newprio)
259{ 261{
260 port_id new_port_id = br_make_port_id(newprio, p->port_no); 262 port_id new_port_id;
263
264 if (newprio > BR_MAX_PORT_PRIORITY)
265 return -ERANGE;
261 266
267 new_port_id = br_make_port_id(newprio, p->port_no);
262 if (br_is_designated_port(p)) 268 if (br_is_designated_port(p))
263 p->designated_port = new_port_id; 269 p->designated_port = new_port_id;
264 270
@@ -269,14 +275,21 @@ void br_stp_set_port_priority(struct net_bridge_port *p, u8 newprio)
269 br_become_designated_port(p); 275 br_become_designated_port(p);
270 br_port_state_selection(p->br); 276 br_port_state_selection(p->br);
271 } 277 }
278
279 return 0;
272} 280}
273 281
274/* called under bridge lock */ 282/* called under bridge lock */
275void br_stp_set_path_cost(struct net_bridge_port *p, u32 path_cost) 283int br_stp_set_path_cost(struct net_bridge_port *p, unsigned long path_cost)
276{ 284{
285 if (path_cost < BR_MIN_PATH_COST ||
286 path_cost > BR_MAX_PATH_COST)
287 return -ERANGE;
288
277 p->path_cost = path_cost; 289 p->path_cost = path_cost;
278 br_configuration_update(p->br); 290 br_configuration_update(p->br);
279 br_port_state_selection(p->br); 291 br_port_state_selection(p->br);
292 return 0;
280} 293}
281 294
282ssize_t br_show_bridge_id(char *buf, const struct bridge_id *id) 295ssize_t br_show_bridge_id(char *buf, const struct bridge_id *id)
diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c
index 5c1e5559ebba..68b893ea8c3a 100644
--- a/net/bridge/br_sysfs_br.c
+++ b/net/bridge/br_sysfs_br.c
@@ -43,9 +43,7 @@ static ssize_t store_bridge_parm(struct device *d,
43 if (endp == buf) 43 if (endp == buf)
44 return -EINVAL; 44 return -EINVAL;
45 45
46 spin_lock_bh(&br->lock);
47 err = (*set)(br, val); 46 err = (*set)(br, val);
48 spin_unlock_bh(&br->lock);
49 return err ? err : len; 47 return err ? err : len;
50} 48}
51 49
@@ -57,20 +55,11 @@ static ssize_t show_forward_delay(struct device *d,
57 return sprintf(buf, "%lu\n", jiffies_to_clock_t(br->forward_delay)); 55 return sprintf(buf, "%lu\n", jiffies_to_clock_t(br->forward_delay));
58} 56}
59 57
60static int set_forward_delay(struct net_bridge *br, unsigned long val)
61{
62 unsigned long delay = clock_t_to_jiffies(val);
63 br->forward_delay = delay;
64 if (br_is_root_bridge(br))
65 br->bridge_forward_delay = delay;
66 return 0;
67}
68
69static ssize_t store_forward_delay(struct device *d, 58static ssize_t store_forward_delay(struct device *d,
70 struct device_attribute *attr, 59 struct device_attribute *attr,
71 const char *buf, size_t len) 60 const char *buf, size_t len)
72{ 61{
73 return store_bridge_parm(d, buf, len, set_forward_delay); 62 return store_bridge_parm(d, buf, len, br_set_forward_delay);
74} 63}
75static DEVICE_ATTR(forward_delay, S_IRUGO | S_IWUSR, 64static DEVICE_ATTR(forward_delay, S_IRUGO | S_IWUSR,
76 show_forward_delay, store_forward_delay); 65 show_forward_delay, store_forward_delay);
@@ -82,24 +71,11 @@ static ssize_t show_hello_time(struct device *d, struct device_attribute *attr,
82 jiffies_to_clock_t(to_bridge(d)->hello_time)); 71 jiffies_to_clock_t(to_bridge(d)->hello_time));
83} 72}
84 73
85static int set_hello_time(struct net_bridge *br, unsigned long val)
86{
87 unsigned long t = clock_t_to_jiffies(val);
88
89 if (t < HZ)
90 return -EINVAL;
91
92 br->hello_time = t;
93 if (br_is_root_bridge(br))
94 br->bridge_hello_time = t;
95 return 0;
96}
97
98static ssize_t store_hello_time(struct device *d, 74static ssize_t store_hello_time(struct device *d,
99 struct device_attribute *attr, const char *buf, 75 struct device_attribute *attr, const char *buf,
100 size_t len) 76 size_t len)
101{ 77{
102 return store_bridge_parm(d, buf, len, set_hello_time); 78 return store_bridge_parm(d, buf, len, br_set_hello_time);
103} 79}
104static DEVICE_ATTR(hello_time, S_IRUGO | S_IWUSR, show_hello_time, 80static DEVICE_ATTR(hello_time, S_IRUGO | S_IWUSR, show_hello_time,
105 store_hello_time); 81 store_hello_time);
@@ -111,19 +87,10 @@ static ssize_t show_max_age(struct device *d, struct device_attribute *attr,
111 jiffies_to_clock_t(to_bridge(d)->max_age)); 87 jiffies_to_clock_t(to_bridge(d)->max_age));
112} 88}
113 89
114static int set_max_age(struct net_bridge *br, unsigned long val)
115{
116 unsigned long t = clock_t_to_jiffies(val);
117 br->max_age = t;
118 if (br_is_root_bridge(br))
119 br->bridge_max_age = t;
120 return 0;
121}
122
123static ssize_t store_max_age(struct device *d, struct device_attribute *attr, 90static ssize_t store_max_age(struct device *d, struct device_attribute *attr,
124 const char *buf, size_t len) 91 const char *buf, size_t len)
125{ 92{
126 return store_bridge_parm(d, buf, len, set_max_age); 93 return store_bridge_parm(d, buf, len, br_set_max_age);
127} 94}
128static DEVICE_ATTR(max_age, S_IRUGO | S_IWUSR, show_max_age, store_max_age); 95static DEVICE_ATTR(max_age, S_IRUGO | S_IWUSR, show_max_age, store_max_age);
129 96
diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c
index fd5799c9bc8d..6229b62749e8 100644
--- a/net/bridge/br_sysfs_if.c
+++ b/net/bridge/br_sysfs_if.c
@@ -23,7 +23,7 @@
23struct brport_attribute { 23struct brport_attribute {
24 struct attribute attr; 24 struct attribute attr;
25 ssize_t (*show)(struct net_bridge_port *, char *); 25 ssize_t (*show)(struct net_bridge_port *, char *);
26 ssize_t (*store)(struct net_bridge_port *, unsigned long); 26 int (*store)(struct net_bridge_port *, unsigned long);
27}; 27};
28 28
29#define BRPORT_ATTR(_name,_mode,_show,_store) \ 29#define BRPORT_ATTR(_name,_mode,_show,_store) \
@@ -38,27 +38,17 @@ static ssize_t show_path_cost(struct net_bridge_port *p, char *buf)
38{ 38{
39 return sprintf(buf, "%d\n", p->path_cost); 39 return sprintf(buf, "%d\n", p->path_cost);
40} 40}
41static ssize_t store_path_cost(struct net_bridge_port *p, unsigned long v) 41
42{
43 br_stp_set_path_cost(p, v);
44 return 0;
45}
46static BRPORT_ATTR(path_cost, S_IRUGO | S_IWUSR, 42static BRPORT_ATTR(path_cost, S_IRUGO | S_IWUSR,
47 show_path_cost, store_path_cost); 43 show_path_cost, br_stp_set_path_cost);
48 44
49static ssize_t show_priority(struct net_bridge_port *p, char *buf) 45static ssize_t show_priority(struct net_bridge_port *p, char *buf)
50{ 46{
51 return sprintf(buf, "%d\n", p->priority); 47 return sprintf(buf, "%d\n", p->priority);
52} 48}
53static ssize_t store_priority(struct net_bridge_port *p, unsigned long v) 49
54{
55 if (v >= (1<<(16-BR_PORT_BITS)))
56 return -ERANGE;
57 br_stp_set_port_priority(p, v);
58 return 0;
59}
60static BRPORT_ATTR(priority, S_IRUGO | S_IWUSR, 50static BRPORT_ATTR(priority, S_IRUGO | S_IWUSR,
61 show_priority, store_priority); 51 show_priority, br_stp_set_port_priority);
62 52
63static ssize_t show_designated_root(struct net_bridge_port *p, char *buf) 53static ssize_t show_designated_root(struct net_bridge_port *p, char *buf)
64{ 54{
@@ -136,7 +126,7 @@ static ssize_t show_hold_timer(struct net_bridge_port *p,
136} 126}
137static BRPORT_ATTR(hold_timer, S_IRUGO, show_hold_timer, NULL); 127static BRPORT_ATTR(hold_timer, S_IRUGO, show_hold_timer, NULL);
138 128
139static ssize_t store_flush(struct net_bridge_port *p, unsigned long v) 129static int store_flush(struct net_bridge_port *p, unsigned long v)
140{ 130{
141 br_fdb_delete_by_port(p->br, p, 0); // Don't delete local entry 131 br_fdb_delete_by_port(p->br, p, 0); // Don't delete local entry
142 return 0; 132 return 0;
@@ -148,7 +138,7 @@ static ssize_t show_hairpin_mode(struct net_bridge_port *p, char *buf)
148 int hairpin_mode = (p->flags & BR_HAIRPIN_MODE) ? 1 : 0; 138 int hairpin_mode = (p->flags & BR_HAIRPIN_MODE) ? 1 : 0;
149 return sprintf(buf, "%d\n", hairpin_mode); 139 return sprintf(buf, "%d\n", hairpin_mode);
150} 140}
151static ssize_t store_hairpin_mode(struct net_bridge_port *p, unsigned long v) 141static int store_hairpin_mode(struct net_bridge_port *p, unsigned long v)
152{ 142{
153 if (v) 143 if (v)
154 p->flags |= BR_HAIRPIN_MODE; 144 p->flags |= BR_HAIRPIN_MODE;
@@ -165,7 +155,7 @@ static ssize_t show_multicast_router(struct net_bridge_port *p, char *buf)
165 return sprintf(buf, "%d\n", p->multicast_router); 155 return sprintf(buf, "%d\n", p->multicast_router);
166} 156}
167 157
168static ssize_t store_multicast_router(struct net_bridge_port *p, 158static int store_multicast_router(struct net_bridge_port *p,
169 unsigned long v) 159 unsigned long v)
170{ 160{
171 return br_multicast_set_port_router(p, v); 161 return br_multicast_set_port_router(p, v);
diff --git a/net/caif/caif_config_util.c b/net/caif/caif_config_util.c
index d522d8c1703e..9b63e4e3910e 100644
--- a/net/caif/caif_config_util.c
+++ b/net/caif/caif_config_util.c
@@ -10,9 +10,9 @@
10#include <net/caif/cfcnfg.h> 10#include <net/caif/cfcnfg.h>
11#include <net/caif/caif_dev.h> 11#include <net/caif/caif_dev.h>
12 12
13int connect_req_to_link_param(struct cfcnfg *cnfg, 13int caif_connect_req_to_link_param(struct cfcnfg *cnfg,
14 struct caif_connect_request *s, 14 struct caif_connect_request *s,
15 struct cfctrl_link_param *l) 15 struct cfctrl_link_param *l)
16{ 16{
17 struct dev_info *dev_info; 17 struct dev_info *dev_info;
18 enum cfcnfg_phy_preference pref; 18 enum cfcnfg_phy_preference pref;
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
index a42a408306e4..75e00d59eb49 100644
--- a/net/caif/caif_dev.c
+++ b/net/caif/caif_dev.c
@@ -120,25 +120,12 @@ static int transmit(struct cflayer *layer, struct cfpkt *pkt)
120{ 120{
121 struct caif_device_entry *caifd = 121 struct caif_device_entry *caifd =
122 container_of(layer, struct caif_device_entry, layer); 122 container_of(layer, struct caif_device_entry, layer);
123 struct sk_buff *skb, *skb2; 123 struct sk_buff *skb;
124 int ret = -EINVAL; 124
125 skb = cfpkt_tonative(pkt); 125 skb = cfpkt_tonative(pkt);
126 skb->dev = caifd->netdev; 126 skb->dev = caifd->netdev;
127 /* 127
128 * Don't allow SKB to be destroyed upon error, but signal resend 128 dev_queue_xmit(skb);
129 * notification to clients. We can't rely on the return value as
130 * congestion (NET_XMIT_CN) sometimes drops the packet, sometimes don't.
131 */
132 if (netif_queue_stopped(caifd->netdev))
133 return -EAGAIN;
134 skb2 = skb_get(skb);
135
136 ret = dev_queue_xmit(skb2);
137
138 if (!ret)
139 kfree_skb(skb);
140 else
141 return -EAGAIN;
142 129
143 return 0; 130 return 0;
144} 131}
@@ -146,9 +133,7 @@ static int transmit(struct cflayer *layer, struct cfpkt *pkt)
146static int modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl) 133static int modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl)
147{ 134{
148 struct caif_device_entry *caifd; 135 struct caif_device_entry *caifd;
149 struct caif_dev_common *caifdev;
150 caifd = container_of(layr, struct caif_device_entry, layer); 136 caifd = container_of(layr, struct caif_device_entry, layer);
151 caifdev = netdev_priv(caifd->netdev);
152 if (ctrl == _CAIF_MODEMCMD_PHYIF_USEFULL) { 137 if (ctrl == _CAIF_MODEMCMD_PHYIF_USEFULL) {
153 atomic_set(&caifd->in_use, 1); 138 atomic_set(&caifd->in_use, 1);
154 wake_up_interruptible(&caifd->event); 139 wake_up_interruptible(&caifd->event);
@@ -167,10 +152,8 @@ static int modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl)
167static int receive(struct sk_buff *skb, struct net_device *dev, 152static int receive(struct sk_buff *skb, struct net_device *dev,
168 struct packet_type *pkttype, struct net_device *orig_dev) 153 struct packet_type *pkttype, struct net_device *orig_dev)
169{ 154{
170 struct net *net;
171 struct cfpkt *pkt; 155 struct cfpkt *pkt;
172 struct caif_device_entry *caifd; 156 struct caif_device_entry *caifd;
173 net = dev_net(dev);
174 pkt = cfpkt_fromnative(CAIF_DIR_IN, skb); 157 pkt = cfpkt_fromnative(CAIF_DIR_IN, skb);
175 caifd = caif_get(dev); 158 caifd = caif_get(dev);
176 if (!caifd || !caifd->layer.up || !caifd->layer.up->receive) 159 if (!caifd || !caifd->layer.up || !caifd->layer.up->receive)
@@ -208,7 +191,6 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
208 struct caif_device_entry *caifd = NULL; 191 struct caif_device_entry *caifd = NULL;
209 struct caif_dev_common *caifdev; 192 struct caif_dev_common *caifdev;
210 enum cfcnfg_phy_preference pref; 193 enum cfcnfg_phy_preference pref;
211 int res = -EINVAL;
212 enum cfcnfg_phy_type phy_type; 194 enum cfcnfg_phy_type phy_type;
213 195
214 if (dev->type != ARPHRD_CAIF) 196 if (dev->type != ARPHRD_CAIF)
@@ -223,7 +205,6 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
223 caifdev = netdev_priv(dev); 205 caifdev = netdev_priv(dev);
224 caifdev->flowctrl = dev_flowctrl; 206 caifdev->flowctrl = dev_flowctrl;
225 atomic_set(&caifd->state, what); 207 atomic_set(&caifd->state, what);
226 res = 0;
227 break; 208 break;
228 209
229 case NETDEV_UP: 210 case NETDEV_UP:
@@ -257,7 +238,7 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
257 break; 238 break;
258 } 239 }
259 dev_hold(dev); 240 dev_hold(dev);
260 cfcnfg_add_phy_layer(get_caif_conf(), 241 cfcnfg_add_phy_layer(cfg,
261 phy_type, 242 phy_type,
262 dev, 243 dev,
263 &caifd->layer, 244 &caifd->layer,
@@ -287,7 +268,7 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
287 _CAIF_CTRLCMD_PHYIF_DOWN_IND, 268 _CAIF_CTRLCMD_PHYIF_DOWN_IND,
288 caifd->layer.id); 269 caifd->layer.id);
289 might_sleep(); 270 might_sleep();
290 res = wait_event_interruptible_timeout(caifd->event, 271 wait_event_interruptible_timeout(caifd->event,
291 atomic_read(&caifd->in_use) == 0, 272 atomic_read(&caifd->in_use) == 0,
292 TIMEOUT); 273 TIMEOUT);
293 break; 274 break;
@@ -300,7 +281,7 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
300 if (atomic_read(&caifd->in_use)) 281 if (atomic_read(&caifd->in_use))
301 netdev_warn(dev, 282 netdev_warn(dev,
302 "Unregistering an active CAIF device\n"); 283 "Unregistering an active CAIF device\n");
303 cfcnfg_del_phy_layer(get_caif_conf(), &caifd->layer); 284 cfcnfg_del_phy_layer(cfg, &caifd->layer);
304 dev_put(dev); 285 dev_put(dev);
305 atomic_set(&caifd->state, what); 286 atomic_set(&caifd->state, what);
306 break; 287 break;
@@ -322,24 +303,18 @@ static struct notifier_block caif_device_notifier = {
322 .priority = 0, 303 .priority = 0,
323}; 304};
324 305
325
326struct cfcnfg *get_caif_conf(void)
327{
328 return cfg;
329}
330EXPORT_SYMBOL(get_caif_conf);
331
332int caif_connect_client(struct caif_connect_request *conn_req, 306int caif_connect_client(struct caif_connect_request *conn_req,
333 struct cflayer *client_layer, int *ifindex, 307 struct cflayer *client_layer, int *ifindex,
334 int *headroom, int *tailroom) 308 int *headroom, int *tailroom)
335{ 309{
336 struct cfctrl_link_param param; 310 struct cfctrl_link_param param;
337 int ret; 311 int ret;
338 ret = connect_req_to_link_param(get_caif_conf(), conn_req, &param); 312
313 ret = caif_connect_req_to_link_param(cfg, conn_req, &param);
339 if (ret) 314 if (ret)
340 return ret; 315 return ret;
341 /* Hook up the adaptation layer. */ 316 /* Hook up the adaptation layer. */
342 return cfcnfg_add_adaptation_layer(get_caif_conf(), &param, 317 return cfcnfg_add_adaptation_layer(cfg, &param,
343 client_layer, ifindex, 318 client_layer, ifindex,
344 headroom, tailroom); 319 headroom, tailroom);
345} 320}
@@ -347,16 +322,10 @@ EXPORT_SYMBOL(caif_connect_client);
347 322
348int caif_disconnect_client(struct cflayer *adap_layer) 323int caif_disconnect_client(struct cflayer *adap_layer)
349{ 324{
350 return cfcnfg_disconn_adapt_layer(get_caif_conf(), adap_layer); 325 return cfcnfg_disconn_adapt_layer(cfg, adap_layer);
351} 326}
352EXPORT_SYMBOL(caif_disconnect_client); 327EXPORT_SYMBOL(caif_disconnect_client);
353 328
354void caif_release_client(struct cflayer *adap_layer)
355{
356 cfcnfg_release_adap_layer(adap_layer);
357}
358EXPORT_SYMBOL(caif_release_client);
359
360/* Per-namespace Caif devices handling */ 329/* Per-namespace Caif devices handling */
361static int caif_init_net(struct net *net) 330static int caif_init_net(struct net *net)
362{ 331{
@@ -369,12 +338,11 @@ static int caif_init_net(struct net *net)
369static void caif_exit_net(struct net *net) 338static void caif_exit_net(struct net *net)
370{ 339{
371 struct net_device *dev; 340 struct net_device *dev;
372 int res;
373 rtnl_lock(); 341 rtnl_lock();
374 for_each_netdev(net, dev) { 342 for_each_netdev(net, dev) {
375 if (dev->type != ARPHRD_CAIF) 343 if (dev->type != ARPHRD_CAIF)
376 continue; 344 continue;
377 res = dev_close(dev); 345 dev_close(dev);
378 caif_device_destroy(dev); 346 caif_device_destroy(dev);
379 } 347 }
380 rtnl_unlock(); 348 rtnl_unlock();
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index 8184c031d028..20212424e2e8 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -519,43 +519,14 @@ static int transmit_skb(struct sk_buff *skb, struct caifsock *cf_sk,
519 int noblock, long timeo) 519 int noblock, long timeo)
520{ 520{
521 struct cfpkt *pkt; 521 struct cfpkt *pkt;
522 int ret, loopcnt = 0;
523 522
524 pkt = cfpkt_fromnative(CAIF_DIR_OUT, skb); 523 pkt = cfpkt_fromnative(CAIF_DIR_OUT, skb);
525 memset(cfpkt_info(pkt), 0, sizeof(struct caif_payload_info)); 524 memset(cfpkt_info(pkt), 0, sizeof(struct caif_payload_info));
526 do {
527
528 ret = -ETIMEDOUT;
529 525
530 /* Slight paranoia, probably not needed. */ 526 if (cf_sk->layer.dn == NULL)
531 if (unlikely(loopcnt++ > 1000)) { 527 return -EINVAL;
532 pr_warn("transmit retries failed, error = %d\n", ret);
533 break;
534 }
535 528
536 if (cf_sk->layer.dn != NULL) 529 return cf_sk->layer.dn->transmit(cf_sk->layer.dn, pkt);
537 ret = cf_sk->layer.dn->transmit(cf_sk->layer.dn, pkt);
538 if (likely(ret >= 0))
539 break;
540 /* if transmit return -EAGAIN, then retry */
541 if (noblock && ret == -EAGAIN)
542 break;
543 timeo = caif_wait_for_flow_on(cf_sk, 0, timeo, &ret);
544 if (signal_pending(current)) {
545 ret = sock_intr_errno(timeo);
546 break;
547 }
548 if (ret)
549 break;
550 if (cf_sk->sk.sk_state != CAIF_CONNECTED ||
551 sock_flag(&cf_sk->sk, SOCK_DEAD) ||
552 (cf_sk->sk.sk_shutdown & RCV_SHUTDOWN)) {
553 ret = -EPIPE;
554 cf_sk->sk.sk_err = EPIPE;
555 break;
556 }
557 } while (ret == -EAGAIN);
558 return ret;
559} 530}
560 531
561/* Copied from af_unix:unix_dgram_sendmsg, and adapted to CAIF */ 532/* Copied from af_unix:unix_dgram_sendmsg, and adapted to CAIF */
@@ -852,7 +823,7 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
852 sock->state = SS_CONNECTING; 823 sock->state = SS_CONNECTING;
853 sk->sk_state = CAIF_CONNECTING; 824 sk->sk_state = CAIF_CONNECTING;
854 825
855 /* Check priority value comming from socket */ 826 /* Check priority value coming from socket */
856 /* if priority value is out of range it will be ajusted */ 827 /* if priority value is out of range it will be ajusted */
857 if (cf_sk->sk.sk_priority > CAIF_PRIO_MAX) 828 if (cf_sk->sk.sk_priority > CAIF_PRIO_MAX)
858 cf_sk->conn_req.priority = CAIF_PRIO_MAX; 829 cf_sk->conn_req.priority = CAIF_PRIO_MAX;
diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c
index f1f98d967d8a..25c0b198e285 100644
--- a/net/caif/cfcnfg.c
+++ b/net/caif/cfcnfg.c
@@ -253,7 +253,7 @@ static void cfcnfg_linkdestroy_rsp(struct cflayer *layer, u8 channel_id)
253{ 253{
254} 254}
255 255
256int protohead[CFCTRL_SRV_MASK] = { 256static const int protohead[CFCTRL_SRV_MASK] = {
257 [CFCTRL_SRV_VEI] = 4, 257 [CFCTRL_SRV_VEI] = 4,
258 [CFCTRL_SRV_DATAGRAM] = 7, 258 [CFCTRL_SRV_DATAGRAM] = 7,
259 [CFCTRL_SRV_UTIL] = 4, 259 [CFCTRL_SRV_UTIL] = 4,
diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
index 3cd8f978e309..397a2c099e2c 100644
--- a/net/caif/cfctrl.c
+++ b/net/caif/cfctrl.c
@@ -58,7 +58,8 @@ struct cflayer *cfctrl_create(void)
58 return &this->serv.layer; 58 return &this->serv.layer;
59} 59}
60 60
61static bool param_eq(struct cfctrl_link_param *p1, struct cfctrl_link_param *p2) 61static bool param_eq(const struct cfctrl_link_param *p1,
62 const struct cfctrl_link_param *p2)
62{ 63{
63 bool eq = 64 bool eq =
64 p1->linktype == p2->linktype && 65 p1->linktype == p2->linktype &&
@@ -100,8 +101,8 @@ static bool param_eq(struct cfctrl_link_param *p1, struct cfctrl_link_param *p2)
100 return false; 101 return false;
101} 102}
102 103
103bool cfctrl_req_eq(struct cfctrl_request_info *r1, 104static bool cfctrl_req_eq(const struct cfctrl_request_info *r1,
104 struct cfctrl_request_info *r2) 105 const struct cfctrl_request_info *r2)
105{ 106{
106 if (r1->cmd != r2->cmd) 107 if (r1->cmd != r2->cmd)
107 return false; 108 return false;
@@ -112,7 +113,7 @@ bool cfctrl_req_eq(struct cfctrl_request_info *r1,
112} 113}
113 114
114/* Insert request at the end */ 115/* Insert request at the end */
115void cfctrl_insert_req(struct cfctrl *ctrl, 116static void cfctrl_insert_req(struct cfctrl *ctrl,
116 struct cfctrl_request_info *req) 117 struct cfctrl_request_info *req)
117{ 118{
118 spin_lock(&ctrl->info_list_lock); 119 spin_lock(&ctrl->info_list_lock);
@@ -123,8 +124,8 @@ void cfctrl_insert_req(struct cfctrl *ctrl,
123} 124}
124 125
125/* Compare and remove request */ 126/* Compare and remove request */
126struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl, 127static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
127 struct cfctrl_request_info *req) 128 struct cfctrl_request_info *req)
128{ 129{
129 struct cfctrl_request_info *p, *tmp, *first; 130 struct cfctrl_request_info *p, *tmp, *first;
130 131
@@ -154,16 +155,6 @@ struct cfctrl_rsp *cfctrl_get_respfuncs(struct cflayer *layer)
154 return &this->res; 155 return &this->res;
155} 156}
156 157
157void cfctrl_set_dnlayer(struct cflayer *this, struct cflayer *dn)
158{
159 this->dn = dn;
160}
161
162void cfctrl_set_uplayer(struct cflayer *this, struct cflayer *up)
163{
164 this->up = up;
165}
166
167static void init_info(struct caif_payload_info *info, struct cfctrl *cfctrl) 158static void init_info(struct caif_payload_info *info, struct cfctrl *cfctrl)
168{ 159{
169 info->hdr_len = 0; 160 info->hdr_len = 0;
@@ -304,58 +295,6 @@ int cfctrl_linkdown_req(struct cflayer *layer, u8 channelid,
304 return ret; 295 return ret;
305} 296}
306 297
307void cfctrl_sleep_req(struct cflayer *layer)
308{
309 int ret;
310 struct cfctrl *cfctrl = container_obj(layer);
311 struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
312 if (!pkt) {
313 pr_warn("Out of memory\n");
314 return;
315 }
316 cfpkt_addbdy(pkt, CFCTRL_CMD_SLEEP);
317 init_info(cfpkt_info(pkt), cfctrl);
318 ret =
319 cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt);
320 if (ret < 0)
321 cfpkt_destroy(pkt);
322}
323
324void cfctrl_wake_req(struct cflayer *layer)
325{
326 int ret;
327 struct cfctrl *cfctrl = container_obj(layer);
328 struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
329 if (!pkt) {
330 pr_warn("Out of memory\n");
331 return;
332 }
333 cfpkt_addbdy(pkt, CFCTRL_CMD_WAKE);
334 init_info(cfpkt_info(pkt), cfctrl);
335 ret =
336 cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt);
337 if (ret < 0)
338 cfpkt_destroy(pkt);
339}
340
341void cfctrl_getstartreason_req(struct cflayer *layer)
342{
343 int ret;
344 struct cfctrl *cfctrl = container_obj(layer);
345 struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
346 if (!pkt) {
347 pr_warn("Out of memory\n");
348 return;
349 }
350 cfpkt_addbdy(pkt, CFCTRL_CMD_START_REASON);
351 init_info(cfpkt_info(pkt), cfctrl);
352 ret =
353 cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt);
354 if (ret < 0)
355 cfpkt_destroy(pkt);
356}
357
358
359void cfctrl_cancel_req(struct cflayer *layr, struct cflayer *adap_layer) 298void cfctrl_cancel_req(struct cflayer *layr, struct cflayer *adap_layer)
360{ 299{
361 struct cfctrl_request_info *p, *tmp; 300 struct cfctrl_request_info *p, *tmp;
diff --git a/net/caif/cfdgml.c b/net/caif/cfdgml.c
index 27dab26ad3b8..0382dec84fdc 100644
--- a/net/caif/cfdgml.c
+++ b/net/caif/cfdgml.c
@@ -13,6 +13,7 @@
13#include <net/caif/cfsrvl.h> 13#include <net/caif/cfsrvl.h>
14#include <net/caif/cfpkt.h> 14#include <net/caif/cfpkt.h>
15 15
16
16#define container_obj(layr) ((struct cfsrvl *) layr) 17#define container_obj(layr) ((struct cfsrvl *) layr)
17 18
18#define DGM_CMD_BIT 0x80 19#define DGM_CMD_BIT 0x80
@@ -83,6 +84,7 @@ static int cfdgml_receive(struct cflayer *layr, struct cfpkt *pkt)
83 84
84static int cfdgml_transmit(struct cflayer *layr, struct cfpkt *pkt) 85static int cfdgml_transmit(struct cflayer *layr, struct cfpkt *pkt)
85{ 86{
87 u8 packet_type;
86 u32 zero = 0; 88 u32 zero = 0;
87 struct caif_payload_info *info; 89 struct caif_payload_info *info;
88 struct cfsrvl *service = container_obj(layr); 90 struct cfsrvl *service = container_obj(layr);
@@ -94,7 +96,9 @@ static int cfdgml_transmit(struct cflayer *layr, struct cfpkt *pkt)
94 if (cfpkt_getlen(pkt) > DGM_MTU) 96 if (cfpkt_getlen(pkt) > DGM_MTU)
95 return -EMSGSIZE; 97 return -EMSGSIZE;
96 98
97 cfpkt_add_head(pkt, &zero, 4); 99 cfpkt_add_head(pkt, &zero, 3);
100 packet_type = 0x08; /* B9 set - UNCLASSIFIED */
101 cfpkt_add_head(pkt, &packet_type, 1);
98 102
99 /* Add info for MUX-layer to route the packet out. */ 103 /* Add info for MUX-layer to route the packet out. */
100 info = cfpkt_info(pkt); 104 info = cfpkt_info(pkt);
@@ -104,10 +108,5 @@ static int cfdgml_transmit(struct cflayer *layr, struct cfpkt *pkt)
104 */ 108 */
105 info->hdr_len = 4; 109 info->hdr_len = 4;
106 info->dev_info = &service->dev_info; 110 info->dev_info = &service->dev_info;
107 ret = layr->dn->transmit(layr->dn, pkt); 111 return layr->dn->transmit(layr->dn, pkt);
108 if (ret < 0) {
109 u32 tmp32;
110 cfpkt_extr_head(pkt, &tmp32, 4);
111 }
112 return ret;
113} 112}
diff --git a/net/caif/cffrml.c b/net/caif/cffrml.c
index a445043931ae..2423fed8e26c 100644
--- a/net/caif/cffrml.c
+++ b/net/caif/cffrml.c
@@ -120,7 +120,6 @@ static int cffrml_transmit(struct cflayer *layr, struct cfpkt *pkt)
120 int tmp; 120 int tmp;
121 u16 chks; 121 u16 chks;
122 u16 len; 122 u16 len;
123 int ret;
124 struct cffrml *this = container_obj(layr); 123 struct cffrml *this = container_obj(layr);
125 if (this->dofcs) { 124 if (this->dofcs) {
126 chks = cfpkt_iterate(pkt, cffrml_checksum, 0xffff); 125 chks = cfpkt_iterate(pkt, cffrml_checksum, 0xffff);
@@ -137,12 +136,7 @@ static int cffrml_transmit(struct cflayer *layr, struct cfpkt *pkt)
137 pr_err("Packet is erroneous!\n"); 136 pr_err("Packet is erroneous!\n");
138 return -EPROTO; 137 return -EPROTO;
139 } 138 }
140 ret = layr->dn->transmit(layr->dn, pkt); 139 return layr->dn->transmit(layr->dn, pkt);
141 if (ret < 0) {
142 /* Remove header on faulty packet. */
143 cfpkt_extr_head(pkt, &tmp, 2);
144 }
145 return ret;
146} 140}
147 141
148static void cffrml_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, 142static void cffrml_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
diff --git a/net/caif/cfmuxl.c b/net/caif/cfmuxl.c
index 46f34b2e0478..fc2497468571 100644
--- a/net/caif/cfmuxl.c
+++ b/net/caif/cfmuxl.c
@@ -71,41 +71,6 @@ int cfmuxl_set_uplayer(struct cflayer *layr, struct cflayer *up, u8 linkid)
71 return 0; 71 return 0;
72} 72}
73 73
74bool cfmuxl_is_phy_inuse(struct cflayer *layr, u8 phyid)
75{
76 struct list_head *node;
77 struct cflayer *layer;
78 struct cfmuxl *muxl = container_obj(layr);
79 bool match = false;
80 spin_lock(&muxl->receive_lock);
81
82 list_for_each(node, &muxl->srvl_list) {
83 layer = list_entry(node, struct cflayer, node);
84 if (cfsrvl_phyid_match(layer, phyid)) {
85 match = true;
86 break;
87 }
88
89 }
90 spin_unlock(&muxl->receive_lock);
91 return match;
92}
93
94u8 cfmuxl_get_phyid(struct cflayer *layr, u8 channel_id)
95{
96 struct cflayer *up;
97 int phyid;
98 struct cfmuxl *muxl = container_obj(layr);
99 spin_lock(&muxl->receive_lock);
100 up = get_up(muxl, channel_id);
101 if (up != NULL)
102 phyid = cfsrvl_getphyid(up);
103 else
104 phyid = 0;
105 spin_unlock(&muxl->receive_lock);
106 return phyid;
107}
108
109int cfmuxl_set_dnlayer(struct cflayer *layr, struct cflayer *dn, u8 phyid) 74int cfmuxl_set_dnlayer(struct cflayer *layr, struct cflayer *dn, u8 phyid)
110{ 75{
111 struct cfmuxl *muxl = (struct cfmuxl *) layr; 76 struct cfmuxl *muxl = (struct cfmuxl *) layr;
@@ -219,12 +184,12 @@ static int cfmuxl_receive(struct cflayer *layr, struct cfpkt *pkt)
219 184
220static int cfmuxl_transmit(struct cflayer *layr, struct cfpkt *pkt) 185static int cfmuxl_transmit(struct cflayer *layr, struct cfpkt *pkt)
221{ 186{
222 int ret;
223 struct cfmuxl *muxl = container_obj(layr); 187 struct cfmuxl *muxl = container_obj(layr);
224 u8 linkid; 188 u8 linkid;
225 struct cflayer *dn; 189 struct cflayer *dn;
226 struct caif_payload_info *info = cfpkt_info(pkt); 190 struct caif_payload_info *info = cfpkt_info(pkt);
227 dn = get_dn(muxl, cfpkt_info(pkt)->dev_info); 191 BUG_ON(!info);
192 dn = get_dn(muxl, info->dev_info);
228 if (dn == NULL) { 193 if (dn == NULL) {
229 pr_warn("Send data on unknown phy ID = %d (0x%x)\n", 194 pr_warn("Send data on unknown phy ID = %d (0x%x)\n",
230 info->dev_info->id, info->dev_info->id); 195 info->dev_info->id, info->dev_info->id);
@@ -233,20 +198,16 @@ static int cfmuxl_transmit(struct cflayer *layr, struct cfpkt *pkt)
233 info->hdr_len += 1; 198 info->hdr_len += 1;
234 linkid = info->channel_id; 199 linkid = info->channel_id;
235 cfpkt_add_head(pkt, &linkid, 1); 200 cfpkt_add_head(pkt, &linkid, 1);
236 ret = dn->transmit(dn, pkt); 201 return dn->transmit(dn, pkt);
237 /* Remove MUX protocol header upon error. */
238 if (ret < 0)
239 cfpkt_extr_head(pkt, &linkid, 1);
240 return ret;
241} 202}
242 203
243static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, 204static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
244 int phyid) 205 int phyid)
245{ 206{
246 struct cfmuxl *muxl = container_obj(layr); 207 struct cfmuxl *muxl = container_obj(layr);
247 struct list_head *node; 208 struct list_head *node, *next;
248 struct cflayer *layer; 209 struct cflayer *layer;
249 list_for_each(node, &muxl->srvl_list) { 210 list_for_each_safe(node, next, &muxl->srvl_list) {
250 layer = list_entry(node, struct cflayer, node); 211 layer = list_entry(node, struct cflayer, node);
251 if (cfsrvl_phyid_match(layer, phyid)) 212 if (cfsrvl_phyid_match(layer, phyid))
252 layer->ctrlcmd(layer, ctrl, phyid); 213 layer->ctrlcmd(layer, ctrl, phyid);
diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c
index d7e865e2ff65..20c6cb3522e0 100644
--- a/net/caif/cfpkt_skbuff.c
+++ b/net/caif/cfpkt_skbuff.c
@@ -42,22 +42,22 @@ struct cfpkt_priv_data {
42 bool erronous; 42 bool erronous;
43}; 43};
44 44
45inline struct cfpkt_priv_data *cfpkt_priv(struct cfpkt *pkt) 45static inline struct cfpkt_priv_data *cfpkt_priv(struct cfpkt *pkt)
46{ 46{
47 return (struct cfpkt_priv_data *) pkt->skb.cb; 47 return (struct cfpkt_priv_data *) pkt->skb.cb;
48} 48}
49 49
50inline bool is_erronous(struct cfpkt *pkt) 50static inline bool is_erronous(struct cfpkt *pkt)
51{ 51{
52 return cfpkt_priv(pkt)->erronous; 52 return cfpkt_priv(pkt)->erronous;
53} 53}
54 54
55inline struct sk_buff *pkt_to_skb(struct cfpkt *pkt) 55static inline struct sk_buff *pkt_to_skb(struct cfpkt *pkt)
56{ 56{
57 return &pkt->skb; 57 return &pkt->skb;
58} 58}
59 59
60inline struct cfpkt *skb_to_pkt(struct sk_buff *skb) 60static inline struct cfpkt *skb_to_pkt(struct sk_buff *skb)
61{ 61{
62 return (struct cfpkt *) skb; 62 return (struct cfpkt *) skb;
63} 63}
@@ -317,17 +317,6 @@ int cfpkt_setlen(struct cfpkt *pkt, u16 len)
317} 317}
318EXPORT_SYMBOL(cfpkt_setlen); 318EXPORT_SYMBOL(cfpkt_setlen);
319 319
320struct cfpkt *cfpkt_create_uplink(const unsigned char *data, unsigned int len)
321{
322 struct cfpkt *pkt = cfpkt_create_pfx(len + PKT_POSTFIX, PKT_PREFIX);
323 if (!pkt)
324 return NULL;
325 if (unlikely(data != NULL))
326 cfpkt_add_body(pkt, data, len);
327 return pkt;
328}
329EXPORT_SYMBOL(cfpkt_create_uplink);
330
331struct cfpkt *cfpkt_append(struct cfpkt *dstpkt, 320struct cfpkt *cfpkt_append(struct cfpkt *dstpkt,
332 struct cfpkt *addpkt, 321 struct cfpkt *addpkt,
333 u16 expectlen) 322 u16 expectlen)
@@ -408,169 +397,12 @@ struct cfpkt *cfpkt_split(struct cfpkt *pkt, u16 pos)
408} 397}
409EXPORT_SYMBOL(cfpkt_split); 398EXPORT_SYMBOL(cfpkt_split);
410 399
411char *cfpkt_log_pkt(struct cfpkt *pkt, char *buf, int buflen) 400bool cfpkt_erroneous(struct cfpkt *pkt)
412{
413 struct sk_buff *skb = pkt_to_skb(pkt);
414 char *p = buf;
415 int i;
416
417 /*
418 * Sanity check buffer length, it needs to be at least as large as
419 * the header info: ~=50+ bytes
420 */
421 if (buflen < 50)
422 return NULL;
423
424 snprintf(buf, buflen, "%s: pkt:%p len:%ld(%ld+%ld) {%ld,%ld} data: [",
425 is_erronous(pkt) ? "ERRONOUS-SKB" :
426 (skb->data_len != 0 ? "COMPLEX-SKB" : "SKB"),
427 skb,
428 (long) skb->len,
429 (long) (skb_tail_pointer(skb) - skb->data),
430 (long) skb->data_len,
431 (long) (skb->data - skb->head),
432 (long) (skb_tail_pointer(skb) - skb->head));
433 p = buf + strlen(buf);
434
435 for (i = 0; i < skb_tail_pointer(skb) - skb->data && i < 300; i++) {
436 if (p > buf + buflen - 10) {
437 sprintf(p, "...");
438 p = buf + strlen(buf);
439 break;
440 }
441 sprintf(p, "%02x,", skb->data[i]);
442 p = buf + strlen(buf);
443 }
444 sprintf(p, "]\n");
445 return buf;
446}
447EXPORT_SYMBOL(cfpkt_log_pkt);
448
449int cfpkt_raw_append(struct cfpkt *pkt, void **buf, unsigned int buflen)
450{
451 struct sk_buff *skb = pkt_to_skb(pkt);
452 struct sk_buff *lastskb;
453
454 caif_assert(buf != NULL);
455 if (unlikely(is_erronous(pkt)))
456 return -EPROTO;
457 /* Make sure SKB is writable */
458 if (unlikely(skb_cow_data(skb, 0, &lastskb) < 0)) {
459 PKT_ERROR(pkt, "skb_cow_data failed\n");
460 return -EPROTO;
461 }
462
463 if (unlikely(skb_linearize(skb) != 0)) {
464 PKT_ERROR(pkt, "linearize failed\n");
465 return -EPROTO;
466 }
467
468 if (unlikely(skb_tailroom(skb) < buflen)) {
469 PKT_ERROR(pkt, "buffer too short - failed\n");
470 return -EPROTO;
471 }
472
473 *buf = skb_put(skb, buflen);
474 return 1;
475}
476EXPORT_SYMBOL(cfpkt_raw_append);
477
478int cfpkt_raw_extract(struct cfpkt *pkt, void **buf, unsigned int buflen)
479{
480 struct sk_buff *skb = pkt_to_skb(pkt);
481
482 caif_assert(buf != NULL);
483 if (unlikely(is_erronous(pkt)))
484 return -EPROTO;
485
486 if (unlikely(buflen > skb->len)) {
487 PKT_ERROR(pkt, "buflen too large - failed\n");
488 return -EPROTO;
489 }
490
491 if (unlikely(buflen > skb_headlen(skb))) {
492 if (unlikely(skb_linearize(skb) != 0)) {
493 PKT_ERROR(pkt, "linearize failed\n");
494 return -EPROTO;
495 }
496 }
497
498 *buf = skb->data;
499 skb_pull(skb, buflen);
500
501 return 1;
502}
503EXPORT_SYMBOL(cfpkt_raw_extract);
504
505inline bool cfpkt_erroneous(struct cfpkt *pkt)
506{ 401{
507 return cfpkt_priv(pkt)->erronous; 402 return cfpkt_priv(pkt)->erronous;
508} 403}
509EXPORT_SYMBOL(cfpkt_erroneous); 404EXPORT_SYMBOL(cfpkt_erroneous);
510 405
511struct cfpktq *cfpktq_create(void)
512{
513 struct cfpktq *q = kmalloc(sizeof(struct cfpktq), GFP_ATOMIC);
514 if (!q)
515 return NULL;
516 skb_queue_head_init(&q->head);
517 atomic_set(&q->count, 0);
518 spin_lock_init(&q->lock);
519 return q;
520}
521EXPORT_SYMBOL(cfpktq_create);
522
523void cfpkt_queue(struct cfpktq *pktq, struct cfpkt *pkt, unsigned short prio)
524{
525 atomic_inc(&pktq->count);
526 spin_lock(&pktq->lock);
527 skb_queue_tail(&pktq->head, pkt_to_skb(pkt));
528 spin_unlock(&pktq->lock);
529
530}
531EXPORT_SYMBOL(cfpkt_queue);
532
533struct cfpkt *cfpkt_qpeek(struct cfpktq *pktq)
534{
535 struct cfpkt *tmp;
536 spin_lock(&pktq->lock);
537 tmp = skb_to_pkt(skb_peek(&pktq->head));
538 spin_unlock(&pktq->lock);
539 return tmp;
540}
541EXPORT_SYMBOL(cfpkt_qpeek);
542
543struct cfpkt *cfpkt_dequeue(struct cfpktq *pktq)
544{
545 struct cfpkt *pkt;
546 spin_lock(&pktq->lock);
547 pkt = skb_to_pkt(skb_dequeue(&pktq->head));
548 if (pkt) {
549 atomic_dec(&pktq->count);
550 caif_assert(atomic_read(&pktq->count) >= 0);
551 }
552 spin_unlock(&pktq->lock);
553 return pkt;
554}
555EXPORT_SYMBOL(cfpkt_dequeue);
556
557int cfpkt_qcount(struct cfpktq *pktq)
558{
559 return atomic_read(&pktq->count);
560}
561EXPORT_SYMBOL(cfpkt_qcount);
562
563struct cfpkt *cfpkt_clone_release(struct cfpkt *pkt)
564{
565 struct cfpkt *clone;
566 clone = skb_to_pkt(skb_clone(pkt_to_skb(pkt), GFP_ATOMIC));
567 /* Free original packet. */
568 cfpkt_destroy(pkt);
569 if (!clone)
570 return NULL;
571 return clone;
572}
573EXPORT_SYMBOL(cfpkt_clone_release);
574 406
575struct caif_payload_info *cfpkt_info(struct cfpkt *pkt) 407struct caif_payload_info *cfpkt_info(struct cfpkt *pkt)
576{ 408{
diff --git a/net/caif/cfserl.c b/net/caif/cfserl.c
index 8303fe3ebf89..2715c84cfa87 100644
--- a/net/caif/cfserl.c
+++ b/net/caif/cfserl.c
@@ -179,15 +179,10 @@ static int cfserl_receive(struct cflayer *l, struct cfpkt *newpkt)
179static int cfserl_transmit(struct cflayer *layer, struct cfpkt *newpkt) 179static int cfserl_transmit(struct cflayer *layer, struct cfpkt *newpkt)
180{ 180{
181 struct cfserl *layr = container_obj(layer); 181 struct cfserl *layr = container_obj(layer);
182 int ret;
183 u8 tmp8 = CFSERL_STX; 182 u8 tmp8 = CFSERL_STX;
184 if (layr->usestx) 183 if (layr->usestx)
185 cfpkt_add_head(newpkt, &tmp8, 1); 184 cfpkt_add_head(newpkt, &tmp8, 1);
186 ret = layer->dn->transmit(layer->dn, newpkt); 185 return layer->dn->transmit(layer->dn, newpkt);
187 if (ret < 0)
188 cfpkt_extr_head(newpkt, &tmp8, 1);
189
190 return ret;
191} 186}
192 187
193static void cfserl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, 188static void cfserl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
diff --git a/net/caif/cfsrvl.c b/net/caif/cfsrvl.c
index ab5e542526bf..24ba392f203b 100644
--- a/net/caif/cfsrvl.c
+++ b/net/caif/cfsrvl.c
@@ -151,12 +151,7 @@ static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl)
151 return -EINVAL; 151 return -EINVAL;
152} 152}
153 153
154void cfservl_destroy(struct cflayer *layer) 154static void cfsrvl_release(struct kref *kref)
155{
156 kfree(layer);
157}
158
159void cfsrvl_release(struct kref *kref)
160{ 155{
161 struct cfsrvl *service = container_of(kref, struct cfsrvl, ref); 156 struct cfsrvl *service = container_of(kref, struct cfsrvl, ref);
162 kfree(service); 157 kfree(service);
diff --git a/net/caif/cfutill.c b/net/caif/cfutill.c
index 315c0d601368..98e027db18ed 100644
--- a/net/caif/cfutill.c
+++ b/net/caif/cfutill.c
@@ -100,10 +100,5 @@ static int cfutill_transmit(struct cflayer *layr, struct cfpkt *pkt)
100 */ 100 */
101 info->hdr_len = 1; 101 info->hdr_len = 1;
102 info->dev_info = &service->dev_info; 102 info->dev_info = &service->dev_info;
103 ret = layr->dn->transmit(layr->dn, pkt); 103 return layr->dn->transmit(layr->dn, pkt);
104 if (ret < 0) {
105 u32 tmp32;
106 cfpkt_extr_head(pkt, &tmp32, 4);
107 }
108 return ret;
109} 104}
diff --git a/net/caif/cfveil.c b/net/caif/cfveil.c
index c3b1dec4acf6..1a588cd818ea 100644
--- a/net/caif/cfveil.c
+++ b/net/caif/cfveil.c
@@ -96,8 +96,5 @@ static int cfvei_transmit(struct cflayer *layr, struct cfpkt *pkt)
96 info->channel_id = service->layer.id; 96 info->channel_id = service->layer.id;
97 info->hdr_len = 1; 97 info->hdr_len = 1;
98 info->dev_info = &service->dev_info; 98 info->dev_info = &service->dev_info;
99 ret = layr->dn->transmit(layr->dn, pkt); 99 return layr->dn->transmit(layr->dn, pkt);
100 if (ret < 0)
101 cfpkt_extr_head(pkt, &tmp, 1);
102 return ret;
103} 100}
diff --git a/net/caif/cfvidl.c b/net/caif/cfvidl.c
index bf6fef2a0eff..b2f5989ad455 100644
--- a/net/caif/cfvidl.c
+++ b/net/caif/cfvidl.c
@@ -60,8 +60,5 @@ static int cfvidl_transmit(struct cflayer *layr, struct cfpkt *pkt)
60 info = cfpkt_info(pkt); 60 info = cfpkt_info(pkt);
61 info->channel_id = service->layer.id; 61 info->channel_id = service->layer.id;
62 info->dev_info = &service->dev_info; 62 info->dev_info = &service->dev_info;
63 ret = layr->dn->transmit(layr->dn, pkt); 63 return layr->dn->transmit(layr->dn, pkt);
64 if (ret < 0)
65 cfpkt_extr_head(pkt, &videoheader, 4);
66 return ret;
67} 64}
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 733d66f1b05a..a8dcaa49675a 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -85,7 +85,7 @@ static struct kmem_cache *rcv_cache __read_mostly;
85 85
86/* table of registered CAN protocols */ 86/* table of registered CAN protocols */
87static struct can_proto *proto_tab[CAN_NPROTO] __read_mostly; 87static struct can_proto *proto_tab[CAN_NPROTO] __read_mostly;
88static DEFINE_SPINLOCK(proto_tab_lock); 88static DEFINE_MUTEX(proto_tab_lock);
89 89
90struct timer_list can_stattimer; /* timer for statistics update */ 90struct timer_list can_stattimer; /* timer for statistics update */
91struct s_stats can_stats; /* packet statistics */ 91struct s_stats can_stats; /* packet statistics */
@@ -115,6 +115,19 @@ static void can_sock_destruct(struct sock *sk)
115 skb_queue_purge(&sk->sk_receive_queue); 115 skb_queue_purge(&sk->sk_receive_queue);
116} 116}
117 117
118static struct can_proto *can_try_module_get(int protocol)
119{
120 struct can_proto *cp;
121
122 rcu_read_lock();
123 cp = rcu_dereference(proto_tab[protocol]);
124 if (cp && !try_module_get(cp->prot->owner))
125 cp = NULL;
126 rcu_read_unlock();
127
128 return cp;
129}
130
118static int can_create(struct net *net, struct socket *sock, int protocol, 131static int can_create(struct net *net, struct socket *sock, int protocol,
119 int kern) 132 int kern)
120{ 133{
@@ -130,9 +143,12 @@ static int can_create(struct net *net, struct socket *sock, int protocol,
130 if (!net_eq(net, &init_net)) 143 if (!net_eq(net, &init_net))
131 return -EAFNOSUPPORT; 144 return -EAFNOSUPPORT;
132 145
146 cp = can_try_module_get(protocol);
147
133#ifdef CONFIG_MODULES 148#ifdef CONFIG_MODULES
134 /* try to load protocol module kernel is modular */ 149 if (!cp) {
135 if (!proto_tab[protocol]) { 150 /* try to load protocol module if kernel is modular */
151
136 err = request_module("can-proto-%d", protocol); 152 err = request_module("can-proto-%d", protocol);
137 153
138 /* 154 /*
@@ -143,22 +159,18 @@ static int can_create(struct net *net, struct socket *sock, int protocol,
143 if (err && printk_ratelimit()) 159 if (err && printk_ratelimit())
144 printk(KERN_ERR "can: request_module " 160 printk(KERN_ERR "can: request_module "
145 "(can-proto-%d) failed.\n", protocol); 161 "(can-proto-%d) failed.\n", protocol);
162
163 cp = can_try_module_get(protocol);
146 } 164 }
147#endif 165#endif
148 166
149 spin_lock(&proto_tab_lock);
150 cp = proto_tab[protocol];
151 if (cp && !try_module_get(cp->prot->owner))
152 cp = NULL;
153 spin_unlock(&proto_tab_lock);
154
155 /* check for available protocol and correct usage */ 167 /* check for available protocol and correct usage */
156 168
157 if (!cp) 169 if (!cp)
158 return -EPROTONOSUPPORT; 170 return -EPROTONOSUPPORT;
159 171
160 if (cp->type != sock->type) { 172 if (cp->type != sock->type) {
161 err = -EPROTONOSUPPORT; 173 err = -EPROTOTYPE;
162 goto errout; 174 goto errout;
163 } 175 }
164 176
@@ -694,15 +706,16 @@ int can_proto_register(struct can_proto *cp)
694 if (err < 0) 706 if (err < 0)
695 return err; 707 return err;
696 708
697 spin_lock(&proto_tab_lock); 709 mutex_lock(&proto_tab_lock);
710
698 if (proto_tab[proto]) { 711 if (proto_tab[proto]) {
699 printk(KERN_ERR "can: protocol %d already registered\n", 712 printk(KERN_ERR "can: protocol %d already registered\n",
700 proto); 713 proto);
701 err = -EBUSY; 714 err = -EBUSY;
702 } else 715 } else
703 proto_tab[proto] = cp; 716 rcu_assign_pointer(proto_tab[proto], cp);
704 717
705 spin_unlock(&proto_tab_lock); 718 mutex_unlock(&proto_tab_lock);
706 719
707 if (err < 0) 720 if (err < 0)
708 proto_unregister(cp->prot); 721 proto_unregister(cp->prot);
@@ -719,13 +732,12 @@ void can_proto_unregister(struct can_proto *cp)
719{ 732{
720 int proto = cp->protocol; 733 int proto = cp->protocol;
721 734
722 spin_lock(&proto_tab_lock); 735 mutex_lock(&proto_tab_lock);
723 if (!proto_tab[proto]) { 736 BUG_ON(proto_tab[proto] != cp);
724 printk(KERN_ERR "BUG: can: protocol %d is not registered\n", 737 rcu_assign_pointer(proto_tab[proto], NULL);
725 proto); 738 mutex_unlock(&proto_tab_lock);
726 } 739
727 proto_tab[proto] = NULL; 740 synchronize_rcu();
728 spin_unlock(&proto_tab_lock);
729 741
730 proto_unregister(cp->prot); 742 proto_unregister(cp->prot);
731} 743}
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 871a0ad51025..57b1aed79014 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -387,7 +387,7 @@ static void bcm_tx_timeout_tsklet(unsigned long data)
387} 387}
388 388
389/* 389/*
390 * bcm_tx_timeout_handler - performes cyclic CAN frame transmissions 390 * bcm_tx_timeout_handler - performs cyclic CAN frame transmissions
391 */ 391 */
392static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer) 392static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer)
393{ 393{
diff --git a/net/ceph/Kconfig b/net/ceph/Kconfig
index ad424049b0cf..be683f2d401f 100644
--- a/net/ceph/Kconfig
+++ b/net/ceph/Kconfig
@@ -4,6 +4,7 @@ config CEPH_LIB
4 select LIBCRC32C 4 select LIBCRC32C
5 select CRYPTO_AES 5 select CRYPTO_AES
6 select CRYPTO 6 select CRYPTO
7 select KEYS
7 default n 8 default n
8 help 9 help
9 Choose Y or M here to include cephlib, which provides the 10 Choose Y or M here to include cephlib, which provides the
diff --git a/net/ceph/auth.c b/net/ceph/auth.c
index 549c1f43e1d5..b4bf4ac090f1 100644
--- a/net/ceph/auth.c
+++ b/net/ceph/auth.c
@@ -35,12 +35,12 @@ static int ceph_auth_init_protocol(struct ceph_auth_client *ac, int protocol)
35/* 35/*
36 * setup, teardown. 36 * setup, teardown.
37 */ 37 */
38struct ceph_auth_client *ceph_auth_init(const char *name, const char *secret) 38struct ceph_auth_client *ceph_auth_init(const char *name, const struct ceph_crypto_key *key)
39{ 39{
40 struct ceph_auth_client *ac; 40 struct ceph_auth_client *ac;
41 int ret; 41 int ret;
42 42
43 dout("auth_init name '%s' secret '%s'\n", name, secret); 43 dout("auth_init name '%s'\n", name);
44 44
45 ret = -ENOMEM; 45 ret = -ENOMEM;
46 ac = kzalloc(sizeof(*ac), GFP_NOFS); 46 ac = kzalloc(sizeof(*ac), GFP_NOFS);
@@ -52,8 +52,8 @@ struct ceph_auth_client *ceph_auth_init(const char *name, const char *secret)
52 ac->name = name; 52 ac->name = name;
53 else 53 else
54 ac->name = CEPH_AUTH_NAME_DEFAULT; 54 ac->name = CEPH_AUTH_NAME_DEFAULT;
55 dout("auth_init name %s secret %s\n", ac->name, secret); 55 dout("auth_init name %s\n", ac->name);
56 ac->secret = secret; 56 ac->key = key;
57 return ac; 57 return ac;
58 58
59out: 59out:
diff --git a/net/ceph/auth_x.c b/net/ceph/auth_x.c
index 7fd5dfcf6e18..1587dc6010c6 100644
--- a/net/ceph/auth_x.c
+++ b/net/ceph/auth_x.c
@@ -662,14 +662,16 @@ int ceph_x_init(struct ceph_auth_client *ac)
662 goto out; 662 goto out;
663 663
664 ret = -EINVAL; 664 ret = -EINVAL;
665 if (!ac->secret) { 665 if (!ac->key) {
666 pr_err("no secret set (for auth_x protocol)\n"); 666 pr_err("no secret set (for auth_x protocol)\n");
667 goto out_nomem; 667 goto out_nomem;
668 } 668 }
669 669
670 ret = ceph_crypto_key_unarmor(&xi->secret, ac->secret); 670 ret = ceph_crypto_key_clone(&xi->secret, ac->key);
671 if (ret) 671 if (ret < 0) {
672 pr_err("cannot clone key: %d\n", ret);
672 goto out_nomem; 673 goto out_nomem;
674 }
673 675
674 xi->starting = true; 676 xi->starting = true;
675 xi->ticket_handlers = RB_ROOT; 677 xi->ticket_handlers = RB_ROOT;
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
index 95f96ab94bba..132963abc266 100644
--- a/net/ceph/ceph_common.c
+++ b/net/ceph/ceph_common.c
@@ -5,6 +5,8 @@
5#include <linux/fs.h> 5#include <linux/fs.h>
6#include <linux/inet.h> 6#include <linux/inet.h>
7#include <linux/in6.h> 7#include <linux/in6.h>
8#include <linux/key.h>
9#include <keys/ceph-type.h>
8#include <linux/module.h> 10#include <linux/module.h>
9#include <linux/mount.h> 11#include <linux/mount.h>
10#include <linux/parser.h> 12#include <linux/parser.h>
@@ -20,6 +22,7 @@
20#include <linux/ceph/decode.h> 22#include <linux/ceph/decode.h>
21#include <linux/ceph/mon_client.h> 23#include <linux/ceph/mon_client.h>
22#include <linux/ceph/auth.h> 24#include <linux/ceph/auth.h>
25#include "crypto.h"
23 26
24 27
25 28
@@ -117,9 +120,29 @@ int ceph_compare_options(struct ceph_options *new_opt,
117 if (ret) 120 if (ret)
118 return ret; 121 return ret;
119 122
120 ret = strcmp_null(opt1->secret, opt2->secret); 123 if (opt1->key && !opt2->key)
121 if (ret) 124 return -1;
122 return ret; 125 if (!opt1->key && opt2->key)
126 return 1;
127 if (opt1->key && opt2->key) {
128 if (opt1->key->type != opt2->key->type)
129 return -1;
130 if (opt1->key->created.tv_sec != opt2->key->created.tv_sec)
131 return -1;
132 if (opt1->key->created.tv_nsec != opt2->key->created.tv_nsec)
133 return -1;
134 if (opt1->key->len != opt2->key->len)
135 return -1;
136 if (opt1->key->key && !opt2->key->key)
137 return -1;
138 if (!opt1->key->key && opt2->key->key)
139 return 1;
140 if (opt1->key->key && opt2->key->key) {
141 ret = memcmp(opt1->key->key, opt2->key->key, opt1->key->len);
142 if (ret)
143 return ret;
144 }
145 }
123 146
124 /* any matching mon ip implies a match */ 147 /* any matching mon ip implies a match */
125 for (i = 0; i < opt1->num_mon; i++) { 148 for (i = 0; i < opt1->num_mon; i++) {
@@ -176,6 +199,7 @@ enum {
176 Opt_fsid, 199 Opt_fsid,
177 Opt_name, 200 Opt_name,
178 Opt_secret, 201 Opt_secret,
202 Opt_key,
179 Opt_ip, 203 Opt_ip,
180 Opt_last_string, 204 Opt_last_string,
181 /* string args above */ 205 /* string args above */
@@ -192,6 +216,7 @@ static match_table_t opt_tokens = {
192 {Opt_fsid, "fsid=%s"}, 216 {Opt_fsid, "fsid=%s"},
193 {Opt_name, "name=%s"}, 217 {Opt_name, "name=%s"},
194 {Opt_secret, "secret=%s"}, 218 {Opt_secret, "secret=%s"},
219 {Opt_key, "key=%s"},
195 {Opt_ip, "ip=%s"}, 220 {Opt_ip, "ip=%s"},
196 /* string args above */ 221 /* string args above */
197 {Opt_noshare, "noshare"}, 222 {Opt_noshare, "noshare"},
@@ -203,11 +228,56 @@ void ceph_destroy_options(struct ceph_options *opt)
203{ 228{
204 dout("destroy_options %p\n", opt); 229 dout("destroy_options %p\n", opt);
205 kfree(opt->name); 230 kfree(opt->name);
206 kfree(opt->secret); 231 if (opt->key) {
232 ceph_crypto_key_destroy(opt->key);
233 kfree(opt->key);
234 }
207 kfree(opt); 235 kfree(opt);
208} 236}
209EXPORT_SYMBOL(ceph_destroy_options); 237EXPORT_SYMBOL(ceph_destroy_options);
210 238
239/* get secret from key store */
240static int get_secret(struct ceph_crypto_key *dst, const char *name) {
241 struct key *ukey;
242 int key_err;
243 int err = 0;
244 struct ceph_crypto_key *ckey;
245
246 ukey = request_key(&key_type_ceph, name, NULL);
247 if (!ukey || IS_ERR(ukey)) {
248 /* request_key errors don't map nicely to mount(2)
249 errors; don't even try, but still printk */
250 key_err = PTR_ERR(ukey);
251 switch (key_err) {
252 case -ENOKEY:
253 pr_warning("ceph: Mount failed due to key not found: %s\n", name);
254 break;
255 case -EKEYEXPIRED:
256 pr_warning("ceph: Mount failed due to expired key: %s\n", name);
257 break;
258 case -EKEYREVOKED:
259 pr_warning("ceph: Mount failed due to revoked key: %s\n", name);
260 break;
261 default:
262 pr_warning("ceph: Mount failed due to unknown key error"
263 " %d: %s\n", key_err, name);
264 }
265 err = -EPERM;
266 goto out;
267 }
268
269 ckey = ukey->payload.data;
270 err = ceph_crypto_key_clone(dst, ckey);
271 if (err)
272 goto out_key;
273 /* pass through, err is 0 */
274
275out_key:
276 key_put(ukey);
277out:
278 return err;
279}
280
211int ceph_parse_options(struct ceph_options **popt, char *options, 281int ceph_parse_options(struct ceph_options **popt, char *options,
212 const char *dev_name, const char *dev_name_end, 282 const char *dev_name, const char *dev_name_end,
213 int (*parse_extra_token)(char *c, void *private), 283 int (*parse_extra_token)(char *c, void *private),
@@ -295,9 +365,24 @@ int ceph_parse_options(struct ceph_options **popt, char *options,
295 GFP_KERNEL); 365 GFP_KERNEL);
296 break; 366 break;
297 case Opt_secret: 367 case Opt_secret:
298 opt->secret = kstrndup(argstr[0].from, 368 opt->key = kzalloc(sizeof(*opt->key), GFP_KERNEL);
299 argstr[0].to-argstr[0].from, 369 if (!opt->key) {
300 GFP_KERNEL); 370 err = -ENOMEM;
371 goto out;
372 }
373 err = ceph_crypto_key_unarmor(opt->key, argstr[0].from);
374 if (err < 0)
375 goto out;
376 break;
377 case Opt_key:
378 opt->key = kzalloc(sizeof(*opt->key), GFP_KERNEL);
379 if (!opt->key) {
380 err = -ENOMEM;
381 goto out;
382 }
383 err = get_secret(opt->key, argstr[0].from);
384 if (err < 0)
385 goto out;
301 break; 386 break;
302 387
303 /* misc */ 388 /* misc */
@@ -394,8 +479,8 @@ void ceph_destroy_client(struct ceph_client *client)
394 ceph_osdc_stop(&client->osdc); 479 ceph_osdc_stop(&client->osdc);
395 480
396 /* 481 /*
397 * make sure mds and osd connections close out before destroying 482 * make sure osd connections close out before destroying the
398 * the auth module, which is needed to free those connections' 483 * auth module, which is needed to free those connections'
399 * ceph_authorizers. 484 * ceph_authorizers.
400 */ 485 */
401 ceph_msgr_flush(); 486 ceph_msgr_flush();
@@ -496,10 +581,14 @@ static int __init init_ceph_lib(void)
496 if (ret < 0) 581 if (ret < 0)
497 goto out; 582 goto out;
498 583
499 ret = ceph_msgr_init(); 584 ret = ceph_crypto_init();
500 if (ret < 0) 585 if (ret < 0)
501 goto out_debugfs; 586 goto out_debugfs;
502 587
588 ret = ceph_msgr_init();
589 if (ret < 0)
590 goto out_crypto;
591
503 pr_info("loaded (mon/osd proto %d/%d, osdmap %d/%d %d/%d)\n", 592 pr_info("loaded (mon/osd proto %d/%d, osdmap %d/%d %d/%d)\n",
504 CEPH_MONC_PROTOCOL, CEPH_OSDC_PROTOCOL, 593 CEPH_MONC_PROTOCOL, CEPH_OSDC_PROTOCOL,
505 CEPH_OSDMAP_VERSION, CEPH_OSDMAP_VERSION_EXT, 594 CEPH_OSDMAP_VERSION, CEPH_OSDMAP_VERSION_EXT,
@@ -507,6 +596,8 @@ static int __init init_ceph_lib(void)
507 596
508 return 0; 597 return 0;
509 598
599out_crypto:
600 ceph_crypto_shutdown();
510out_debugfs: 601out_debugfs:
511 ceph_debugfs_cleanup(); 602 ceph_debugfs_cleanup();
512out: 603out:
@@ -517,6 +608,7 @@ static void __exit exit_ceph_lib(void)
517{ 608{
518 dout("exit_ceph_lib\n"); 609 dout("exit_ceph_lib\n");
519 ceph_msgr_exit(); 610 ceph_msgr_exit();
611 ceph_crypto_shutdown();
520 ceph_debugfs_cleanup(); 612 ceph_debugfs_cleanup();
521} 613}
522 614
diff --git a/net/ceph/crypto.c b/net/ceph/crypto.c
index 7b505b0c983f..5a8009c9e0cd 100644
--- a/net/ceph/crypto.c
+++ b/net/ceph/crypto.c
@@ -5,10 +5,23 @@
5#include <linux/scatterlist.h> 5#include <linux/scatterlist.h>
6#include <linux/slab.h> 6#include <linux/slab.h>
7#include <crypto/hash.h> 7#include <crypto/hash.h>
8#include <linux/key-type.h>
8 9
10#include <keys/ceph-type.h>
9#include <linux/ceph/decode.h> 11#include <linux/ceph/decode.h>
10#include "crypto.h" 12#include "crypto.h"
11 13
14int ceph_crypto_key_clone(struct ceph_crypto_key *dst,
15 const struct ceph_crypto_key *src)
16{
17 memcpy(dst, src, sizeof(struct ceph_crypto_key));
18 dst->key = kmalloc(src->len, GFP_NOFS);
19 if (!dst->key)
20 return -ENOMEM;
21 memcpy(dst->key, src->key, src->len);
22 return 0;
23}
24
12int ceph_crypto_key_encode(struct ceph_crypto_key *key, void **p, void *end) 25int ceph_crypto_key_encode(struct ceph_crypto_key *key, void **p, void *end)
13{ 26{
14 if (*p + sizeof(u16) + sizeof(key->created) + 27 if (*p + sizeof(u16) + sizeof(key->created) +
@@ -410,3 +423,63 @@ int ceph_encrypt2(struct ceph_crypto_key *secret, void *dst, size_t *dst_len,
410 return -EINVAL; 423 return -EINVAL;
411 } 424 }
412} 425}
426
427int ceph_key_instantiate(struct key *key, const void *data, size_t datalen)
428{
429 struct ceph_crypto_key *ckey;
430 int ret;
431 void *p;
432
433 ret = -EINVAL;
434 if (datalen <= 0 || datalen > 32767 || !data)
435 goto err;
436
437 ret = key_payload_reserve(key, datalen);
438 if (ret < 0)
439 goto err;
440
441 ret = -ENOMEM;
442 ckey = kmalloc(sizeof(*ckey), GFP_KERNEL);
443 if (!ckey)
444 goto err;
445
446 /* TODO ceph_crypto_key_decode should really take const input */
447 p = (void*)data;
448 ret = ceph_crypto_key_decode(ckey, &p, (char*)data+datalen);
449 if (ret < 0)
450 goto err_ckey;
451
452 key->payload.data = ckey;
453 return 0;
454
455err_ckey:
456 kfree(ckey);
457err:
458 return ret;
459}
460
461int ceph_key_match(const struct key *key, const void *description)
462{
463 return strcmp(key->description, description) == 0;
464}
465
466void ceph_key_destroy(struct key *key) {
467 struct ceph_crypto_key *ckey = key->payload.data;
468
469 ceph_crypto_key_destroy(ckey);
470}
471
472struct key_type key_type_ceph = {
473 .name = "ceph",
474 .instantiate = ceph_key_instantiate,
475 .match = ceph_key_match,
476 .destroy = ceph_key_destroy,
477};
478
479int ceph_crypto_init(void) {
480 return register_key_type(&key_type_ceph);
481}
482
483void ceph_crypto_shutdown(void) {
484 unregister_key_type(&key_type_ceph);
485}
diff --git a/net/ceph/crypto.h b/net/ceph/crypto.h
index f9eccace592b..1919d1550d75 100644
--- a/net/ceph/crypto.h
+++ b/net/ceph/crypto.h
@@ -19,6 +19,8 @@ static inline void ceph_crypto_key_destroy(struct ceph_crypto_key *key)
19 kfree(key->key); 19 kfree(key->key);
20} 20}
21 21
22extern int ceph_crypto_key_clone(struct ceph_crypto_key *dst,
23 const struct ceph_crypto_key *src);
22extern int ceph_crypto_key_encode(struct ceph_crypto_key *key, 24extern int ceph_crypto_key_encode(struct ceph_crypto_key *key,
23 void **p, void *end); 25 void **p, void *end);
24extern int ceph_crypto_key_decode(struct ceph_crypto_key *key, 26extern int ceph_crypto_key_decode(struct ceph_crypto_key *key,
@@ -40,6 +42,8 @@ extern int ceph_encrypt2(struct ceph_crypto_key *secret,
40 void *dst, size_t *dst_len, 42 void *dst, size_t *dst_len,
41 const void *src1, size_t src1_len, 43 const void *src1, size_t src1_len,
42 const void *src2, size_t src2_len); 44 const void *src2, size_t src2_len);
45extern int ceph_crypto_init(void);
46extern void ceph_crypto_shutdown(void);
43 47
44/* armor.c */ 48/* armor.c */
45extern int ceph_armor(char *dst, const char *src, const char *end); 49extern int ceph_armor(char *dst, const char *src, const char *end);
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
index 8a079399174a..cbe31fa45508 100644
--- a/net/ceph/mon_client.c
+++ b/net/ceph/mon_client.c
@@ -759,7 +759,7 @@ int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl)
759 759
760 /* authentication */ 760 /* authentication */
761 monc->auth = ceph_auth_init(cl->options->name, 761 monc->auth = ceph_auth_init(cl->options->name,
762 cl->options->secret); 762 cl->options->key);
763 if (IS_ERR(monc->auth)) 763 if (IS_ERR(monc->auth))
764 return PTR_ERR(monc->auth); 764 return PTR_ERR(monc->auth);
765 monc->auth->want_keys = 765 monc->auth->want_keys =
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 02212ed50852..50af02737a3d 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -837,8 +837,7 @@ static void __unregister_request(struct ceph_osd_client *osdc,
837 dout("moving osd to %p lru\n", req->r_osd); 837 dout("moving osd to %p lru\n", req->r_osd);
838 __move_osd_to_lru(osdc, req->r_osd); 838 __move_osd_to_lru(osdc, req->r_osd);
839 } 839 }
840 if (list_empty(&req->r_osd_item) && 840 if (list_empty(&req->r_linger_item))
841 list_empty(&req->r_linger_item))
842 req->r_osd = NULL; 841 req->r_osd = NULL;
843 } 842 }
844 843
@@ -883,7 +882,8 @@ static void __unregister_linger_request(struct ceph_osd_client *osdc,
883 dout("moving osd to %p lru\n", req->r_osd); 882 dout("moving osd to %p lru\n", req->r_osd);
884 __move_osd_to_lru(osdc, req->r_osd); 883 __move_osd_to_lru(osdc, req->r_osd);
885 } 884 }
886 req->r_osd = NULL; 885 if (list_empty(&req->r_osd_item))
886 req->r_osd = NULL;
887 } 887 }
888} 888}
889 889
@@ -917,7 +917,7 @@ EXPORT_SYMBOL(ceph_osdc_set_request_linger);
917/* 917/*
918 * Pick an osd (the first 'up' osd in the pg), allocate the osd struct 918 * Pick an osd (the first 'up' osd in the pg), allocate the osd struct
919 * (as needed), and set the request r_osd appropriately. If there is 919 * (as needed), and set the request r_osd appropriately. If there is
920 * no up osd, set r_osd to NULL. Move the request to the appropiate list 920 * no up osd, set r_osd to NULL. Move the request to the appropriate list
921 * (unsent, homeless) or leave on in-flight lru. 921 * (unsent, homeless) or leave on in-flight lru.
922 * 922 *
923 * Return 0 if unchanged, 1 if changed, or negative on error. 923 * Return 0 if unchanged, 1 if changed, or negative on error.
@@ -1602,11 +1602,11 @@ void handle_watch_notify(struct ceph_osd_client *osdc, struct ceph_msg *msg)
1602 cookie, ver, event); 1602 cookie, ver, event);
1603 if (event) { 1603 if (event) {
1604 event_work = kmalloc(sizeof(*event_work), GFP_NOIO); 1604 event_work = kmalloc(sizeof(*event_work), GFP_NOIO);
1605 INIT_WORK(&event_work->work, do_event_work);
1606 if (!event_work) { 1605 if (!event_work) {
1607 dout("ERROR: could not allocate event_work\n"); 1606 dout("ERROR: could not allocate event_work\n");
1608 goto done_err; 1607 goto done_err;
1609 } 1608 }
1609 INIT_WORK(&event_work->work, do_event_work);
1610 event_work->event = event; 1610 event_work->event = event;
1611 event_work->ver = ver; 1611 event_work->ver = ver;
1612 event_work->notify_id = notify_id; 1612 event_work->notify_id = notify_id;
@@ -1672,7 +1672,7 @@ int ceph_osdc_start_request(struct ceph_osd_client *osdc,
1672 if (req->r_sent == 0) { 1672 if (req->r_sent == 0) {
1673 rc = __map_request(osdc, req); 1673 rc = __map_request(osdc, req);
1674 if (rc < 0) 1674 if (rc < 0)
1675 return rc; 1675 goto out_unlock;
1676 if (req->r_osd == NULL) { 1676 if (req->r_osd == NULL) {
1677 dout("send_request %p no up osds in pg\n", req); 1677 dout("send_request %p no up osds in pg\n", req);
1678 ceph_monc_request_next_osdmap(&osdc->client->monc); 1678 ceph_monc_request_next_osdmap(&osdc->client->monc);
@@ -1689,6 +1689,8 @@ int ceph_osdc_start_request(struct ceph_osd_client *osdc,
1689 } 1689 }
1690 } 1690 }
1691 } 1691 }
1692
1693out_unlock:
1692 mutex_unlock(&osdc->request_mutex); 1694 mutex_unlock(&osdc->request_mutex);
1693 up_read(&osdc->map_sem); 1695 up_read(&osdc->map_sem);
1694 return rc; 1696 return rc;
diff --git a/net/core/dev.c b/net/core/dev.c
index 3da9fb06d47a..379c993ff421 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2091,7 +2091,7 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2091 u32 features; 2091 u32 features;
2092 2092
2093 /* 2093 /*
2094 * If device doesnt need skb->dst, release it right now while 2094 * If device doesn't need skb->dst, release it right now while
2095 * its hot in this cpu cache 2095 * its hot in this cpu cache
2096 */ 2096 */
2097 if (dev->priv_flags & IFF_XMIT_DST_RELEASE) 2097 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
@@ -2151,7 +2151,7 @@ gso:
2151 nskb->next = NULL; 2151 nskb->next = NULL;
2152 2152
2153 /* 2153 /*
2154 * If device doesnt need nskb->dst, release it right now while 2154 * If device doesn't need nskb->dst, release it right now while
2155 * its hot in this cpu cache 2155 * its hot in this cpu cache
2156 */ 2156 */
2157 if (dev->priv_flags & IFF_XMIT_DST_RELEASE) 2157 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
@@ -2502,8 +2502,8 @@ static inline void ____napi_schedule(struct softnet_data *sd,
2502__u32 __skb_get_rxhash(struct sk_buff *skb) 2502__u32 __skb_get_rxhash(struct sk_buff *skb)
2503{ 2503{
2504 int nhoff, hash = 0, poff; 2504 int nhoff, hash = 0, poff;
2505 struct ipv6hdr *ip6; 2505 const struct ipv6hdr *ip6;
2506 struct iphdr *ip; 2506 const struct iphdr *ip;
2507 u8 ip_proto; 2507 u8 ip_proto;
2508 u32 addr1, addr2, ihl; 2508 u32 addr1, addr2, ihl;
2509 union { 2509 union {
@@ -2518,7 +2518,7 @@ __u32 __skb_get_rxhash(struct sk_buff *skb)
2518 if (!pskb_may_pull(skb, sizeof(*ip) + nhoff)) 2518 if (!pskb_may_pull(skb, sizeof(*ip) + nhoff))
2519 goto done; 2519 goto done;
2520 2520
2521 ip = (struct iphdr *) (skb->data + nhoff); 2521 ip = (const struct iphdr *) (skb->data + nhoff);
2522 if (ip->frag_off & htons(IP_MF | IP_OFFSET)) 2522 if (ip->frag_off & htons(IP_MF | IP_OFFSET))
2523 ip_proto = 0; 2523 ip_proto = 0;
2524 else 2524 else
@@ -2531,7 +2531,7 @@ __u32 __skb_get_rxhash(struct sk_buff *skb)
2531 if (!pskb_may_pull(skb, sizeof(*ip6) + nhoff)) 2531 if (!pskb_may_pull(skb, sizeof(*ip6) + nhoff))
2532 goto done; 2532 goto done;
2533 2533
2534 ip6 = (struct ipv6hdr *) (skb->data + nhoff); 2534 ip6 = (const struct ipv6hdr *) (skb->data + nhoff);
2535 ip_proto = ip6->nexthdr; 2535 ip_proto = ip6->nexthdr;
2536 addr1 = (__force u32) ip6->saddr.s6_addr32[3]; 2536 addr1 = (__force u32) ip6->saddr.s6_addr32[3];
2537 addr2 = (__force u32) ip6->daddr.s6_addr32[3]; 2537 addr2 = (__force u32) ip6->daddr.s6_addr32[3];
@@ -2970,8 +2970,8 @@ EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
2970 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions 2970 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
2971 * a compare and 2 stores extra right now if we dont have it on 2971 * a compare and 2 stores extra right now if we dont have it on
2972 * but have CONFIG_NET_CLS_ACT 2972 * but have CONFIG_NET_CLS_ACT
2973 * NOTE: This doesnt stop any functionality; if you dont have 2973 * NOTE: This doesn't stop any functionality; if you dont have
2974 * the ingress scheduler, you just cant add policies on ingress. 2974 * the ingress scheduler, you just can't add policies on ingress.
2975 * 2975 *
2976 */ 2976 */
2977static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq) 2977static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq)
@@ -3130,6 +3130,12 @@ another_round:
3130 3130
3131 __this_cpu_inc(softnet_data.processed); 3131 __this_cpu_inc(softnet_data.processed);
3132 3132
3133 if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
3134 skb = vlan_untag(skb);
3135 if (unlikely(!skb))
3136 goto out;
3137 }
3138
3133#ifdef CONFIG_NET_CLS_ACT 3139#ifdef CONFIG_NET_CLS_ACT
3134 if (skb->tc_verd & TC_NCLS) { 3140 if (skb->tc_verd & TC_NCLS) {
3135 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd); 3141 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
@@ -3177,7 +3183,7 @@ ncls:
3177 ret = deliver_skb(skb, pt_prev, orig_dev); 3183 ret = deliver_skb(skb, pt_prev, orig_dev);
3178 pt_prev = NULL; 3184 pt_prev = NULL;
3179 } 3185 }
3180 if (vlan_hwaccel_do_receive(&skb)) { 3186 if (vlan_do_receive(&skb)) {
3181 ret = __netif_receive_skb(skb); 3187 ret = __netif_receive_skb(skb);
3182 goto out; 3188 goto out;
3183 } else if (unlikely(!skb)) 3189 } else if (unlikely(!skb))
@@ -3800,7 +3806,7 @@ static void net_rx_action(struct softirq_action *h)
3800 * with netpoll's poll_napi(). Only the entity which 3806 * with netpoll's poll_napi(). Only the entity which
3801 * obtains the lock and sees NAPI_STATE_SCHED set will 3807 * obtains the lock and sees NAPI_STATE_SCHED set will
3802 * actually make the ->poll() call. Therefore we avoid 3808 * actually make the ->poll() call. Therefore we avoid
3803 * accidently calling ->poll() when NAPI is not scheduled. 3809 * accidentally calling ->poll() when NAPI is not scheduled.
3804 */ 3810 */
3805 work = 0; 3811 work = 0;
3806 if (test_bit(NAPI_STATE_SCHED, &n->state)) { 3812 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
@@ -5203,11 +5209,15 @@ u32 netdev_fix_features(struct net_device *dev, u32 features)
5203 } 5209 }
5204 5210
5205 /* TSO requires that SG is present as well. */ 5211 /* TSO requires that SG is present as well. */
5206 if ((features & NETIF_F_TSO) && !(features & NETIF_F_SG)) { 5212 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
5207 netdev_info(dev, "Dropping NETIF_F_TSO since no SG feature.\n"); 5213 netdev_info(dev, "Dropping TSO features since no SG feature.\n");
5208 features &= ~NETIF_F_TSO; 5214 features &= ~NETIF_F_ALL_TSO;
5209 } 5215 }
5210 5216
5217 /* TSO ECN requires that TSO is present as well. */
5218 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
5219 features &= ~NETIF_F_TSO_ECN;
5220
5211 /* Software GSO depends on SG. */ 5221 /* Software GSO depends on SG. */
5212 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) { 5222 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
5213 netdev_info(dev, "Dropping NETIF_F_GSO since no SG feature.\n"); 5223 netdev_info(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
@@ -5236,11 +5246,13 @@ u32 netdev_fix_features(struct net_device *dev, u32 features)
5236} 5246}
5237EXPORT_SYMBOL(netdev_fix_features); 5247EXPORT_SYMBOL(netdev_fix_features);
5238 5248
5239void netdev_update_features(struct net_device *dev) 5249int __netdev_update_features(struct net_device *dev)
5240{ 5250{
5241 u32 features; 5251 u32 features;
5242 int err = 0; 5252 int err = 0;
5243 5253
5254 ASSERT_RTNL();
5255
5244 features = netdev_get_wanted_features(dev); 5256 features = netdev_get_wanted_features(dev);
5245 5257
5246 if (dev->netdev_ops->ndo_fix_features) 5258 if (dev->netdev_ops->ndo_fix_features)
@@ -5250,7 +5262,7 @@ void netdev_update_features(struct net_device *dev)
5250 features = netdev_fix_features(dev, features); 5262 features = netdev_fix_features(dev, features);
5251 5263
5252 if (dev->features == features) 5264 if (dev->features == features)
5253 return; 5265 return 0;
5254 5266
5255 netdev_info(dev, "Features changed: 0x%08x -> 0x%08x\n", 5267 netdev_info(dev, "Features changed: 0x%08x -> 0x%08x\n",
5256 dev->features, features); 5268 dev->features, features);
@@ -5258,12 +5270,23 @@ void netdev_update_features(struct net_device *dev)
5258 if (dev->netdev_ops->ndo_set_features) 5270 if (dev->netdev_ops->ndo_set_features)
5259 err = dev->netdev_ops->ndo_set_features(dev, features); 5271 err = dev->netdev_ops->ndo_set_features(dev, features);
5260 5272
5261 if (!err) 5273 if (unlikely(err < 0)) {
5262 dev->features = features;
5263 else if (err < 0)
5264 netdev_err(dev, 5274 netdev_err(dev,
5265 "set_features() failed (%d); wanted 0x%08x, left 0x%08x\n", 5275 "set_features() failed (%d); wanted 0x%08x, left 0x%08x\n",
5266 err, features, dev->features); 5276 err, features, dev->features);
5277 return -1;
5278 }
5279
5280 if (!err)
5281 dev->features = features;
5282
5283 return 1;
5284}
5285
5286void netdev_update_features(struct net_device *dev)
5287{
5288 if (__netdev_update_features(dev))
5289 netdev_features_change(dev);
5267} 5290}
5268EXPORT_SYMBOL(netdev_update_features); 5291EXPORT_SYMBOL(netdev_update_features);
5269 5292
@@ -5414,6 +5437,14 @@ int register_netdevice(struct net_device *dev)
5414 dev->features &= ~NETIF_F_GSO; 5437 dev->features &= ~NETIF_F_GSO;
5415 } 5438 }
5416 5439
5440 /* Turn on no cache copy if HW is doing checksum */
5441 dev->hw_features |= NETIF_F_NOCACHE_COPY;
5442 if ((dev->features & NETIF_F_ALL_CSUM) &&
5443 !(dev->features & NETIF_F_NO_CSUM)) {
5444 dev->wanted_features |= NETIF_F_NOCACHE_COPY;
5445 dev->features |= NETIF_F_NOCACHE_COPY;
5446 }
5447
5417 /* Enable GRO and NETIF_F_HIGHDMA for vlans by default, 5448 /* Enable GRO and NETIF_F_HIGHDMA for vlans by default,
5418 * vlan_dev_init() will do the dev->features check, so these features 5449 * vlan_dev_init() will do the dev->features check, so these features
5419 * are enabled only if supported by underlying device. 5450 * are enabled only if supported by underlying device.
@@ -5430,7 +5461,7 @@ int register_netdevice(struct net_device *dev)
5430 goto err_uninit; 5461 goto err_uninit;
5431 dev->reg_state = NETREG_REGISTERED; 5462 dev->reg_state = NETREG_REGISTERED;
5432 5463
5433 netdev_update_features(dev); 5464 __netdev_update_features(dev);
5434 5465
5435 /* 5466 /*
5436 * Default initial state at registry is that the 5467 * Default initial state at registry is that the
@@ -6171,6 +6202,10 @@ u32 netdev_increment_features(u32 all, u32 one, u32 mask)
6171 } 6202 }
6172 } 6203 }
6173 6204
6205 /* If device can't no cache copy, don't do for all */
6206 if (!(one & NETIF_F_NOCACHE_COPY))
6207 all &= ~NETIF_F_NOCACHE_COPY;
6208
6174 one |= NETIF_F_ALL_CSUM; 6209 one |= NETIF_F_ALL_CSUM;
6175 6210
6176 one |= all & NETIF_F_ONE_FOR_ALL; 6211 one |= all & NETIF_F_ONE_FOR_ALL;
@@ -6336,7 +6371,7 @@ static void __net_exit default_device_exit(struct net *net)
6336 if (dev->rtnl_link_ops) 6371 if (dev->rtnl_link_ops)
6337 continue; 6372 continue;
6338 6373
6339 /* Push remaing network devices to init_net */ 6374 /* Push remaining network devices to init_net */
6340 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex); 6375 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
6341 err = dev_change_net_namespace(dev, &init_net, fb_name); 6376 err = dev_change_net_namespace(dev, &init_net, fb_name);
6342 if (err) { 6377 if (err) {
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 74ead9eca126..d8b1a8d85a96 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -21,6 +21,8 @@
21#include <linux/uaccess.h> 21#include <linux/uaccess.h>
22#include <linux/vmalloc.h> 22#include <linux/vmalloc.h>
23#include <linux/slab.h> 23#include <linux/slab.h>
24#include <linux/rtnetlink.h>
25#include <linux/sched.h>
24 26
25/* 27/*
26 * Some useful ethtool_ops methods that're device independent. 28 * Some useful ethtool_ops methods that're device independent.
@@ -317,7 +319,7 @@ static int ethtool_set_features(struct net_device *dev, void __user *useraddr)
317 319
318 dev->wanted_features &= ~features[0].valid; 320 dev->wanted_features &= ~features[0].valid;
319 dev->wanted_features |= features[0].valid & features[0].requested; 321 dev->wanted_features |= features[0].valid & features[0].requested;
320 netdev_update_features(dev); 322 __netdev_update_features(dev);
321 323
322 if ((dev->wanted_features ^ dev->features) & features[0].valid) 324 if ((dev->wanted_features ^ dev->features) & features[0].valid)
323 ret |= ETHTOOL_F_WISH; 325 ret |= ETHTOOL_F_WISH;
@@ -359,7 +361,7 @@ static const char netdev_features_strings[ETHTOOL_DEV_FEATURE_WORDS * 32][ETH_GS
359 /* NETIF_F_NTUPLE */ "rx-ntuple-filter", 361 /* NETIF_F_NTUPLE */ "rx-ntuple-filter",
360 /* NETIF_F_RXHASH */ "rx-hashing", 362 /* NETIF_F_RXHASH */ "rx-hashing",
361 /* NETIF_F_RXCSUM */ "rx-checksum", 363 /* NETIF_F_RXCSUM */ "rx-checksum",
362 "", 364 /* NETIF_F_NOCACHE_COPY */ "tx-nocache-copy"
363 "", 365 "",
364}; 366};
365 367
@@ -499,7 +501,7 @@ static int ethtool_set_one_feature(struct net_device *dev,
499 else 501 else
500 dev->wanted_features &= ~mask; 502 dev->wanted_features &= ~mask;
501 503
502 netdev_update_features(dev); 504 __netdev_update_features(dev);
503 return 0; 505 return 0;
504 } 506 }
505 507
@@ -544,14 +546,14 @@ int __ethtool_set_flags(struct net_device *dev, u32 data)
544 } 546 }
545 547
546 /* allow changing only bits set in hw_features */ 548 /* allow changing only bits set in hw_features */
547 changed = (data ^ dev->wanted_features) & flags_dup_features; 549 changed = (data ^ dev->features) & flags_dup_features;
548 if (changed & ~dev->hw_features) 550 if (changed & ~dev->hw_features)
549 return (changed & dev->hw_features) ? -EINVAL : -EOPNOTSUPP; 551 return (changed & dev->hw_features) ? -EINVAL : -EOPNOTSUPP;
550 552
551 dev->wanted_features = 553 dev->wanted_features =
552 (dev->wanted_features & ~changed) | data; 554 (dev->wanted_features & ~changed) | (data & dev->hw_features);
553 555
554 netdev_update_features(dev); 556 __netdev_update_features(dev);
555 557
556 return 0; 558 return 0;
557} 559}
@@ -908,6 +910,9 @@ static noinline_for_stack int ethtool_set_rx_ntuple(struct net_device *dev,
908 struct ethtool_rx_ntuple_flow_spec_container *fsc = NULL; 910 struct ethtool_rx_ntuple_flow_spec_container *fsc = NULL;
909 int ret; 911 int ret;
910 912
913 if (!ops->set_rx_ntuple)
914 return -EOPNOTSUPP;
915
911 if (!(dev->features & NETIF_F_NTUPLE)) 916 if (!(dev->features & NETIF_F_NTUPLE))
912 return -EINVAL; 917 return -EINVAL;
913 918
@@ -1441,6 +1446,35 @@ static int ethtool_set_ringparam(struct net_device *dev, void __user *useraddr)
1441 return dev->ethtool_ops->set_ringparam(dev, &ringparam); 1446 return dev->ethtool_ops->set_ringparam(dev, &ringparam);
1442} 1447}
1443 1448
1449static noinline_for_stack int ethtool_get_channels(struct net_device *dev,
1450 void __user *useraddr)
1451{
1452 struct ethtool_channels channels = { .cmd = ETHTOOL_GCHANNELS };
1453
1454 if (!dev->ethtool_ops->get_channels)
1455 return -EOPNOTSUPP;
1456
1457 dev->ethtool_ops->get_channels(dev, &channels);
1458
1459 if (copy_to_user(useraddr, &channels, sizeof(channels)))
1460 return -EFAULT;
1461 return 0;
1462}
1463
1464static noinline_for_stack int ethtool_set_channels(struct net_device *dev,
1465 void __user *useraddr)
1466{
1467 struct ethtool_channels channels;
1468
1469 if (!dev->ethtool_ops->set_channels)
1470 return -EOPNOTSUPP;
1471
1472 if (copy_from_user(&channels, useraddr, sizeof(channels)))
1473 return -EFAULT;
1474
1475 return dev->ethtool_ops->set_channels(dev, &channels);
1476}
1477
1444static int ethtool_get_pauseparam(struct net_device *dev, void __user *useraddr) 1478static int ethtool_get_pauseparam(struct net_device *dev, void __user *useraddr)
1445{ 1479{
1446 struct ethtool_pauseparam pauseparam = { ETHTOOL_GPAUSEPARAM }; 1480 struct ethtool_pauseparam pauseparam = { ETHTOOL_GPAUSEPARAM };
@@ -1618,14 +1652,64 @@ out:
1618static int ethtool_phys_id(struct net_device *dev, void __user *useraddr) 1652static int ethtool_phys_id(struct net_device *dev, void __user *useraddr)
1619{ 1653{
1620 struct ethtool_value id; 1654 struct ethtool_value id;
1655 static bool busy;
1656 int rc;
1621 1657
1622 if (!dev->ethtool_ops->phys_id) 1658 if (!dev->ethtool_ops->set_phys_id && !dev->ethtool_ops->phys_id)
1623 return -EOPNOTSUPP; 1659 return -EOPNOTSUPP;
1624 1660
1661 if (busy)
1662 return -EBUSY;
1663
1625 if (copy_from_user(&id, useraddr, sizeof(id))) 1664 if (copy_from_user(&id, useraddr, sizeof(id)))
1626 return -EFAULT; 1665 return -EFAULT;
1627 1666
1628 return dev->ethtool_ops->phys_id(dev, id.data); 1667 if (!dev->ethtool_ops->set_phys_id)
1668 /* Do it the old way */
1669 return dev->ethtool_ops->phys_id(dev, id.data);
1670
1671 rc = dev->ethtool_ops->set_phys_id(dev, ETHTOOL_ID_ACTIVE);
1672 if (rc < 0)
1673 return rc;
1674
1675 /* Drop the RTNL lock while waiting, but prevent reentry or
1676 * removal of the device.
1677 */
1678 busy = true;
1679 dev_hold(dev);
1680 rtnl_unlock();
1681
1682 if (rc == 0) {
1683 /* Driver will handle this itself */
1684 schedule_timeout_interruptible(
1685 id.data ? (id.data * HZ) : MAX_SCHEDULE_TIMEOUT);
1686 } else {
1687 /* Driver expects to be called at twice the frequency in rc */
1688 int n = rc * 2, i, interval = HZ / n;
1689
1690 /* Count down seconds */
1691 do {
1692 /* Count down iterations per second */
1693 i = n;
1694 do {
1695 rtnl_lock();
1696 rc = dev->ethtool_ops->set_phys_id(dev,
1697 (i & 1) ? ETHTOOL_ID_OFF : ETHTOOL_ID_ON);
1698 rtnl_unlock();
1699 if (rc)
1700 break;
1701 schedule_timeout_interruptible(interval);
1702 } while (!signal_pending(current) && --i != 0);
1703 } while (!signal_pending(current) &&
1704 (id.data == 0 || --id.data != 0));
1705 }
1706
1707 rtnl_lock();
1708 dev_put(dev);
1709 busy = false;
1710
1711 (void)dev->ethtool_ops->set_phys_id(dev, ETHTOOL_ID_INACTIVE);
1712 return rc;
1629} 1713}
1630 1714
1631static int ethtool_get_stats(struct net_device *dev, void __user *useraddr) 1715static int ethtool_get_stats(struct net_device *dev, void __user *useraddr)
@@ -1953,6 +2037,12 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
1953 case ETHTOOL_SGRO: 2037 case ETHTOOL_SGRO:
1954 rc = ethtool_set_one_feature(dev, useraddr, ethcmd); 2038 rc = ethtool_set_one_feature(dev, useraddr, ethcmd);
1955 break; 2039 break;
2040 case ETHTOOL_GCHANNELS:
2041 rc = ethtool_get_channels(dev, useraddr);
2042 break;
2043 case ETHTOOL_SCHANNELS:
2044 rc = ethtool_set_channels(dev, useraddr);
2045 break;
1956 default: 2046 default:
1957 rc = -EOPNOTSUPP; 2047 rc = -EOPNOTSUPP;
1958 } 2048 }
diff --git a/net/core/filter.c b/net/core/filter.c
index 232b1873bb28..afb8afb066bb 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -425,7 +425,7 @@ EXPORT_SYMBOL(sk_run_filter);
425 * As we dont want to clear mem[] array for each packet going through 425 * As we dont want to clear mem[] array for each packet going through
426 * sk_run_filter(), we check that filter loaded by user never try to read 426 * sk_run_filter(), we check that filter loaded by user never try to read
427 * a cell if not previously written, and we check all branches to be sure 427 * a cell if not previously written, and we check all branches to be sure
428 * a malicious user doesnt try to abuse us. 428 * a malicious user doesn't try to abuse us.
429 */ 429 */
430static int check_load_and_stores(struct sock_filter *filter, int flen) 430static int check_load_and_stores(struct sock_filter *filter, int flen)
431{ 431{
diff --git a/net/core/link_watch.c b/net/core/link_watch.c
index 01a1101b5936..a7b342131869 100644
--- a/net/core/link_watch.c
+++ b/net/core/link_watch.c
@@ -129,7 +129,7 @@ static void linkwatch_schedule_work(int urgent)
129 if (!cancel_delayed_work(&linkwatch_work)) 129 if (!cancel_delayed_work(&linkwatch_work))
130 return; 130 return;
131 131
132 /* Otherwise we reschedule it again for immediate exection. */ 132 /* Otherwise we reschedule it again for immediate execution. */
133 schedule_delayed_work(&linkwatch_work, 0); 133 schedule_delayed_work(&linkwatch_work, 0);
134} 134}
135 135
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 3f860261c5ee..1abb50841046 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -216,11 +216,14 @@ static void net_free(struct net *net)
216 kmem_cache_free(net_cachep, net); 216 kmem_cache_free(net_cachep, net);
217} 217}
218 218
219static struct net *net_create(void) 219struct net *copy_net_ns(unsigned long flags, struct net *old_net)
220{ 220{
221 struct net *net; 221 struct net *net;
222 int rv; 222 int rv;
223 223
224 if (!(flags & CLONE_NEWNET))
225 return get_net(old_net);
226
224 net = net_alloc(); 227 net = net_alloc();
225 if (!net) 228 if (!net)
226 return ERR_PTR(-ENOMEM); 229 return ERR_PTR(-ENOMEM);
@@ -239,13 +242,6 @@ static struct net *net_create(void)
239 return net; 242 return net;
240} 243}
241 244
242struct net *copy_net_ns(unsigned long flags, struct net *old_net)
243{
244 if (!(flags & CLONE_NEWNET))
245 return get_net(old_net);
246 return net_create();
247}
248
249static DEFINE_SPINLOCK(cleanup_list_lock); 245static DEFINE_SPINLOCK(cleanup_list_lock);
250static LIST_HEAD(cleanup_list); /* Must hold cleanup_list_lock to touch */ 246static LIST_HEAD(cleanup_list); /* Must hold cleanup_list_lock to touch */
251 247
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 06be2431753e..46d9c3a4de2f 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -539,7 +539,7 @@ int __netpoll_rx(struct sk_buff *skb)
539{ 539{
540 int proto, len, ulen; 540 int proto, len, ulen;
541 int hits = 0; 541 int hits = 0;
542 struct iphdr *iph; 542 const struct iphdr *iph;
543 struct udphdr *uh; 543 struct udphdr *uh;
544 struct netpoll_info *npinfo = skb->dev->npinfo; 544 struct netpoll_info *npinfo = skb->dev->npinfo;
545 struct netpoll *np, *tmp; 545 struct netpoll *np, *tmp;
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index aeeece72b72f..2fa6fee1b46f 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -2514,7 +2514,6 @@ static int pktgen_output_ipsec(struct sk_buff *skb, struct pktgen_dev *pkt_dev)
2514{ 2514{
2515 struct xfrm_state *x = pkt_dev->flows[pkt_dev->curfl].x; 2515 struct xfrm_state *x = pkt_dev->flows[pkt_dev->curfl].x;
2516 int err = 0; 2516 int err = 0;
2517 struct iphdr *iph;
2518 2517
2519 if (!x) 2518 if (!x)
2520 return 0; 2519 return 0;
@@ -2524,7 +2523,6 @@ static int pktgen_output_ipsec(struct sk_buff *skb, struct pktgen_dev *pkt_dev)
2524 return 0; 2523 return 0;
2525 2524
2526 spin_lock(&x->lock); 2525 spin_lock(&x->lock);
2527 iph = ip_hdr(skb);
2528 2526
2529 err = x->outer_mode->output(x, skb); 2527 err = x->outer_mode->output(x, skb);
2530 if (err) 2528 if (err)
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 49f7ea5b4c75..d7c4bb4b1820 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -196,7 +196,7 @@ EXPORT_SYMBOL_GPL(__rtnl_register);
196 * as failure of this function is very unlikely, it can only happen due 196 * as failure of this function is very unlikely, it can only happen due
197 * to lack of memory when allocating the chain to store all message 197 * to lack of memory when allocating the chain to store all message
198 * handlers for a protocol. Meant for use in init functions where lack 198 * handlers for a protocol. Meant for use in init functions where lack
199 * of memory implies no sense in continueing. 199 * of memory implies no sense in continuing.
200 */ 200 */
201void rtnl_register(int protocol, int msgtype, 201void rtnl_register(int protocol, int msgtype,
202 rtnl_doit_func doit, rtnl_dumpit_func dumpit) 202 rtnl_doit_func doit, rtnl_dumpit_func dumpit)
@@ -1440,7 +1440,7 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
1440errout: 1440errout:
1441 if (err < 0 && modified && net_ratelimit()) 1441 if (err < 0 && modified && net_ratelimit())
1442 printk(KERN_WARNING "A link change request failed with " 1442 printk(KERN_WARNING "A link change request failed with "
1443 "some changes comitted already. Interface %s may " 1443 "some changes committed already. Interface %s may "
1444 "have been left with an inconsistent configuration, " 1444 "have been left with an inconsistent configuration, "
1445 "please check.\n", dev->name); 1445 "please check.\n", dev->name);
1446 1446
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 801dd08908f9..7ebeed0a877c 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -2267,7 +2267,7 @@ EXPORT_SYMBOL(skb_prepare_seq_read);
2267 * of bytes already consumed and the next call to 2267 * of bytes already consumed and the next call to
2268 * skb_seq_read() will return the remaining part of the block. 2268 * skb_seq_read() will return the remaining part of the block.
2269 * 2269 *
2270 * Note 1: The size of each block of data returned can be arbitary, 2270 * Note 1: The size of each block of data returned can be arbitrary,
2271 * this limitation is the cost for zerocopy seqeuental 2271 * this limitation is the cost for zerocopy seqeuental
2272 * reads of potentially non linear data. 2272 * reads of potentially non linear data.
2273 * 2273 *
diff --git a/net/core/sock.c b/net/core/sock.c
index 7dfed792434d..6e819780c232 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -215,7 +215,7 @@ __u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
215__u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX; 215__u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
216__u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX; 216__u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
217 217
218/* Maximal space eaten by iovec or ancilliary data plus some space */ 218/* Maximal space eaten by iovec or ancillary data plus some space */
219int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512); 219int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
220EXPORT_SYMBOL(sysctl_optmem_max); 220EXPORT_SYMBOL(sysctl_optmem_max);
221 221
@@ -1175,7 +1175,7 @@ static void __sk_free(struct sock *sk)
1175void sk_free(struct sock *sk) 1175void sk_free(struct sock *sk)
1176{ 1176{
1177 /* 1177 /*
1178 * We substract one from sk_wmem_alloc and can know if 1178 * We subtract one from sk_wmem_alloc and can know if
1179 * some packets are still in some tx queue. 1179 * some packets are still in some tx queue.
1180 * If not null, sock_wfree() will call __sk_free(sk) later 1180 * If not null, sock_wfree() will call __sk_free(sk) later
1181 */ 1181 */
@@ -1185,10 +1185,10 @@ void sk_free(struct sock *sk)
1185EXPORT_SYMBOL(sk_free); 1185EXPORT_SYMBOL(sk_free);
1186 1186
1187/* 1187/*
1188 * Last sock_put should drop referrence to sk->sk_net. It has already 1188 * Last sock_put should drop reference to sk->sk_net. It has already
1189 * been dropped in sk_change_net. Taking referrence to stopping namespace 1189 * been dropped in sk_change_net. Taking reference to stopping namespace
1190 * is not an option. 1190 * is not an option.
1191 * Take referrence to a socket to remove it from hash _alive_ and after that 1191 * Take reference to a socket to remove it from hash _alive_ and after that
1192 * destroy it in the context of init_net. 1192 * destroy it in the context of init_net.
1193 */ 1193 */
1194void sk_release_kernel(struct sock *sk) 1194void sk_release_kernel(struct sock *sk)
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index de1b7e37ad5b..73add2373247 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -54,8 +54,8 @@ static void dccp_v6_hash(struct sock *sk)
54 54
55/* add pseudo-header to DCCP checksum stored in skb->csum */ 55/* add pseudo-header to DCCP checksum stored in skb->csum */
56static inline __sum16 dccp_v6_csum_finish(struct sk_buff *skb, 56static inline __sum16 dccp_v6_csum_finish(struct sk_buff *skb,
57 struct in6_addr *saddr, 57 const struct in6_addr *saddr,
58 struct in6_addr *daddr) 58 const struct in6_addr *daddr)
59{ 59{
60 return csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_DCCP, skb->csum); 60 return csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_DCCP, skb->csum);
61} 61}
@@ -87,7 +87,7 @@ static inline __u32 dccp_v6_init_sequence(struct sk_buff *skb)
87static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 87static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
88 u8 type, u8 code, int offset, __be32 info) 88 u8 type, u8 code, int offset, __be32 info)
89{ 89{
90 struct ipv6hdr *hdr = (struct ipv6hdr *)skb->data; 90 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
91 const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + offset); 91 const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + offset);
92 struct dccp_sock *dp; 92 struct dccp_sock *dp;
93 struct ipv6_pinfo *np; 93 struct ipv6_pinfo *np;
@@ -296,7 +296,7 @@ static void dccp_v6_reqsk_destructor(struct request_sock *req)
296 296
297static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb) 297static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb)
298{ 298{
299 struct ipv6hdr *rxip6h; 299 const struct ipv6hdr *rxip6h;
300 struct sk_buff *skb; 300 struct sk_buff *skb;
301 struct flowi6 fl6; 301 struct flowi6 fl6;
302 struct net *net = dev_net(skb_dst(rxskb)->dev); 302 struct net *net = dev_net(skb_dst(rxskb)->dev);
diff --git a/net/dccp/output.c b/net/dccp/output.c
index 784d30210543..136d41cbcd02 100644
--- a/net/dccp/output.c
+++ b/net/dccp/output.c
@@ -143,7 +143,7 @@ static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb)
143} 143}
144 144
145/** 145/**
146 * dccp_determine_ccmps - Find out about CCID-specfic packet-size limits 146 * dccp_determine_ccmps - Find out about CCID-specific packet-size limits
147 * We only consider the HC-sender CCID for setting the CCMPS (RFC 4340, 14.), 147 * We only consider the HC-sender CCID for setting the CCMPS (RFC 4340, 14.),
148 * since the RX CCID is restricted to feedback packets (Acks), which are small 148 * since the RX CCID is restricted to feedback packets (Acks), which are small
149 * in comparison with the data traffic. A value of 0 means "no current CCMPS". 149 * in comparison with the data traffic. A value of 0 means "no current CCMPS".
diff --git a/net/decnet/dn_table.c b/net/decnet/dn_table.c
index 99d8d3a40998..bd0a52dd1d40 100644
--- a/net/decnet/dn_table.c
+++ b/net/decnet/dn_table.c
@@ -123,11 +123,11 @@ static inline void dn_rebuild_zone(struct dn_zone *dz,
123 struct dn_fib_node **old_ht, 123 struct dn_fib_node **old_ht,
124 int old_divisor) 124 int old_divisor)
125{ 125{
126 int i;
127 struct dn_fib_node *f, **fp, *next; 126 struct dn_fib_node *f, **fp, *next;
127 int i;
128 128
129 for(i = 0; i < old_divisor; i++) { 129 for(i = 0; i < old_divisor; i++) {
130 for(f = old_ht[i]; f; f = f->fn_next) { 130 for(f = old_ht[i]; f; f = next) {
131 next = f->fn_next; 131 next = f->fn_next;
132 for(fp = dn_chain_p(f->fn_key, dz); 132 for(fp = dn_chain_p(f->fn_key, dz);
133 *fp && dn_key_leq((*fp)->fn_key, f->fn_key); 133 *fp && dn_key_leq((*fp)->fn_key, f->fn_key);
diff --git a/net/dsa/mv88e6131.c b/net/dsa/mv88e6131.c
index bb2b41bc854e..3da418894efc 100644
--- a/net/dsa/mv88e6131.c
+++ b/net/dsa/mv88e6131.c
@@ -14,6 +14,13 @@
14#include "dsa_priv.h" 14#include "dsa_priv.h"
15#include "mv88e6xxx.h" 15#include "mv88e6xxx.h"
16 16
17/*
18 * Switch product IDs
19 */
20#define ID_6085 0x04a0
21#define ID_6095 0x0950
22#define ID_6131 0x1060
23
17static char *mv88e6131_probe(struct mii_bus *bus, int sw_addr) 24static char *mv88e6131_probe(struct mii_bus *bus, int sw_addr)
18{ 25{
19 int ret; 26 int ret;
@@ -21,9 +28,11 @@ static char *mv88e6131_probe(struct mii_bus *bus, int sw_addr)
21 ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), 0x03); 28 ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), 0x03);
22 if (ret >= 0) { 29 if (ret >= 0) {
23 ret &= 0xfff0; 30 ret &= 0xfff0;
24 if (ret == 0x0950) 31 if (ret == ID_6085)
32 return "Marvell 88E6085";
33 if (ret == ID_6095)
25 return "Marvell 88E6095/88E6095F"; 34 return "Marvell 88E6095/88E6095F";
26 if (ret == 0x1060) 35 if (ret == ID_6131)
27 return "Marvell 88E6131"; 36 return "Marvell 88E6131";
28 } 37 }
29 38
@@ -124,7 +133,7 @@ static int mv88e6131_setup_global(struct dsa_switch *ds)
124 * Ignore removed tag data on doubly tagged packets, disable 133 * Ignore removed tag data on doubly tagged packets, disable
125 * flow control messages, force flow control priority to the 134 * flow control messages, force flow control priority to the
126 * highest, and send all special multicast frames to the CPU 135 * highest, and send all special multicast frames to the CPU
127 * port at the higest priority. 136 * port at the highest priority.
128 */ 137 */
129 REG_WRITE(REG_GLOBAL2, 0x05, 0x00ff); 138 REG_WRITE(REG_GLOBAL2, 0x05, 0x00ff);
130 139
@@ -164,6 +173,7 @@ static int mv88e6131_setup_global(struct dsa_switch *ds)
164 173
165static int mv88e6131_setup_port(struct dsa_switch *ds, int p) 174static int mv88e6131_setup_port(struct dsa_switch *ds, int p)
166{ 175{
176 struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
167 int addr = REG_PORT(p); 177 int addr = REG_PORT(p);
168 u16 val; 178 u16 val;
169 179
@@ -171,10 +181,13 @@ static int mv88e6131_setup_port(struct dsa_switch *ds, int p)
171 * MAC Forcing register: don't force link, speed, duplex 181 * MAC Forcing register: don't force link, speed, duplex
172 * or flow control state to any particular values on physical 182 * or flow control state to any particular values on physical
173 * ports, but force the CPU port and all DSA ports to 1000 Mb/s 183 * ports, but force the CPU port and all DSA ports to 1000 Mb/s
174 * full duplex. 184 * (100 Mb/s on 6085) full duplex.
175 */ 185 */
176 if (dsa_is_cpu_port(ds, p) || ds->dsa_port_mask & (1 << p)) 186 if (dsa_is_cpu_port(ds, p) || ds->dsa_port_mask & (1 << p))
177 REG_WRITE(addr, 0x01, 0x003e); 187 if (ps->id == ID_6085)
188 REG_WRITE(addr, 0x01, 0x003d); /* 100 Mb/s */
189 else
190 REG_WRITE(addr, 0x01, 0x003e); /* 1000 Mb/s */
178 else 191 else
179 REG_WRITE(addr, 0x01, 0x0003); 192 REG_WRITE(addr, 0x01, 0x0003);
180 193
@@ -286,6 +299,8 @@ static int mv88e6131_setup(struct dsa_switch *ds)
286 mv88e6xxx_ppu_state_init(ds); 299 mv88e6xxx_ppu_state_init(ds);
287 mutex_init(&ps->stats_mutex); 300 mutex_init(&ps->stats_mutex);
288 301
302 ps->id = REG_READ(REG_PORT(0), 0x03) & 0xfff0;
303
289 ret = mv88e6131_switch_reset(ds); 304 ret = mv88e6131_switch_reset(ds);
290 if (ret < 0) 305 if (ret < 0)
291 return ret; 306 return ret;
diff --git a/net/dsa/mv88e6xxx.h b/net/dsa/mv88e6xxx.h
index eb0e0aaa9f1b..61156ca26a0d 100644
--- a/net/dsa/mv88e6xxx.h
+++ b/net/dsa/mv88e6xxx.h
@@ -39,6 +39,8 @@ struct mv88e6xxx_priv_state {
39 * Hold this mutex over snapshot + dump sequences. 39 * Hold this mutex over snapshot + dump sequences.
40 */ 40 */
41 struct mutex stats_mutex; 41 struct mutex stats_mutex;
42
43 int id; /* switch product id */
42}; 44};
43 45
44struct mv88e6xxx_hw_stat { 46struct mv88e6xxx_hw_stat {
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 64ca2a6fa0d4..0a47b6c37038 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -288,7 +288,6 @@ static const struct ethtool_ops dsa_slave_ethtool_ops = {
288 .get_drvinfo = dsa_slave_get_drvinfo, 288 .get_drvinfo = dsa_slave_get_drvinfo,
289 .nway_reset = dsa_slave_nway_reset, 289 .nway_reset = dsa_slave_nway_reset,
290 .get_link = dsa_slave_get_link, 290 .get_link = dsa_slave_get_link,
291 .set_sg = ethtool_op_set_sg,
292 .get_strings = dsa_slave_get_strings, 291 .get_strings = dsa_slave_get_strings,
293 .get_ethtool_stats = dsa_slave_get_ethtool_stats, 292 .get_ethtool_stats = dsa_slave_get_ethtool_stats,
294 .get_sset_count = dsa_slave_get_sset_count, 293 .get_sset_count = dsa_slave_get_sset_count,
diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c
index 116d3fd3d669..a1d9f3787dd5 100644
--- a/net/econet/af_econet.c
+++ b/net/econet/af_econet.c
@@ -935,7 +935,6 @@ static void aun_data_available(struct sock *sk, int slen)
935 struct sk_buff *skb; 935 struct sk_buff *skb;
936 unsigned char *data; 936 unsigned char *data;
937 struct aunhdr *ah; 937 struct aunhdr *ah;
938 struct iphdr *ip;
939 size_t len; 938 size_t len;
940 939
941 while ((skb = skb_recv_datagram(sk, 0, 1, &err)) == NULL) { 940 while ((skb = skb_recv_datagram(sk, 0, 1, &err)) == NULL) {
@@ -949,7 +948,6 @@ static void aun_data_available(struct sock *sk, int slen)
949 data = skb_transport_header(skb) + sizeof(struct udphdr); 948 data = skb_transport_header(skb) + sizeof(struct udphdr);
950 ah = (struct aunhdr *)data; 949 ah = (struct aunhdr *)data;
951 len = skb->len - sizeof(struct udphdr); 950 len = skb->len - sizeof(struct udphdr);
952 ip = ip_hdr(skb);
953 951
954 switch (ah->code) 952 switch (ah->code)
955 { 953 {
@@ -962,12 +960,6 @@ static void aun_data_available(struct sock *sk, int slen)
962 case 4: 960 case 4:
963 aun_tx_ack(ah->handle, ECTYPE_TRANSMIT_NOT_LISTENING); 961 aun_tx_ack(ah->handle, ECTYPE_TRANSMIT_NOT_LISTENING);
964 break; 962 break;
965#if 0
966 /* This isn't quite right yet. */
967 case 5:
968 aun_send_response(ip->saddr, ah->handle, 6, ah->cb);
969 break;
970#endif
971 default: 963 default:
972 printk(KERN_DEBUG "unknown AUN packet (type %d)\n", data[0]); 964 printk(KERN_DEBUG "unknown AUN packet (type %d)\n", data[0]);
973 } 965 }
diff --git a/net/ieee802154/Makefile b/net/ieee802154/Makefile
index ce2d33582859..5761185f884e 100644
--- a/net/ieee802154/Makefile
+++ b/net/ieee802154/Makefile
@@ -1,5 +1,3 @@
1obj-$(CONFIG_IEEE802154) += ieee802154.o af_802154.o 1obj-$(CONFIG_IEEE802154) += ieee802154.o af_802154.o
2ieee802154-y := netlink.o nl-mac.o nl-phy.o nl_policy.o wpan-class.o 2ieee802154-y := netlink.o nl-mac.o nl-phy.o nl_policy.o wpan-class.o
3af_802154-y := af_ieee802154.o raw.o dgram.o 3af_802154-y := af_ieee802154.o raw.o dgram.o
4
5ccflags-y += -Wall -DDEBUG
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 807d83c02ef6..cae75ef21fea 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1186,7 +1186,7 @@ EXPORT_SYMBOL(inet_sk_rebuild_header);
1186 1186
1187static int inet_gso_send_check(struct sk_buff *skb) 1187static int inet_gso_send_check(struct sk_buff *skb)
1188{ 1188{
1189 struct iphdr *iph; 1189 const struct iphdr *iph;
1190 const struct net_protocol *ops; 1190 const struct net_protocol *ops;
1191 int proto; 1191 int proto;
1192 int ihl; 1192 int ihl;
@@ -1293,7 +1293,7 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head,
1293 const struct net_protocol *ops; 1293 const struct net_protocol *ops;
1294 struct sk_buff **pp = NULL; 1294 struct sk_buff **pp = NULL;
1295 struct sk_buff *p; 1295 struct sk_buff *p;
1296 struct iphdr *iph; 1296 const struct iphdr *iph;
1297 unsigned int hlen; 1297 unsigned int hlen;
1298 unsigned int off; 1298 unsigned int off;
1299 unsigned int id; 1299 unsigned int id;
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index 4286fd3cc0e2..c1f4154552fc 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -73,7 +73,7 @@ static inline struct scatterlist *ah_req_sg(struct crypto_ahash *ahash,
73 * into IP header for icv calculation. Options are already checked 73 * into IP header for icv calculation. Options are already checked
74 * for validity, so paranoia is not required. */ 74 * for validity, so paranoia is not required. */
75 75
76static int ip_clear_mutable_options(struct iphdr *iph, __be32 *daddr) 76static int ip_clear_mutable_options(const struct iphdr *iph, __be32 *daddr)
77{ 77{
78 unsigned char * optptr = (unsigned char*)(iph+1); 78 unsigned char * optptr = (unsigned char*)(iph+1);
79 int l = iph->ihl*4 - sizeof(struct iphdr); 79 int l = iph->ihl*4 - sizeof(struct iphdr);
@@ -396,7 +396,7 @@ out:
396static void ah4_err(struct sk_buff *skb, u32 info) 396static void ah4_err(struct sk_buff *skb, u32 info)
397{ 397{
398 struct net *net = dev_net(skb->dev); 398 struct net *net = dev_net(skb->dev);
399 struct iphdr *iph = (struct iphdr *)skb->data; 399 const struct iphdr *iph = (const struct iphdr *)skb->data;
400 struct ip_auth_hdr *ah = (struct ip_auth_hdr *)(skb->data+(iph->ihl<<2)); 400 struct ip_auth_hdr *ah = (struct ip_auth_hdr *)(skb->data+(iph->ihl<<2));
401 struct xfrm_state *x; 401 struct xfrm_state *x;
402 402
@@ -404,7 +404,8 @@ static void ah4_err(struct sk_buff *skb, u32 info)
404 icmp_hdr(skb)->code != ICMP_FRAG_NEEDED) 404 icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
405 return; 405 return;
406 406
407 x = xfrm_state_lookup(net, skb->mark, (xfrm_address_t *)&iph->daddr, ah->spi, IPPROTO_AH, AF_INET); 407 x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
408 ah->spi, IPPROTO_AH, AF_INET);
408 if (!x) 409 if (!x)
409 return; 410 return;
410 printk(KERN_DEBUG "pmtu discovery on SA AH/%08x/%08x\n", 411 printk(KERN_DEBUG "pmtu discovery on SA AH/%08x/%08x\n",
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index 094e150c6260..a0af7ea87870 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -112,7 +112,7 @@ int cipso_v4_rbm_strictvalid = 1;
112/* The maximum number of category ranges permitted in the ranged category tag 112/* The maximum number of category ranges permitted in the ranged category tag
113 * (tag #5). You may note that the IETF draft states that the maximum number 113 * (tag #5). You may note that the IETF draft states that the maximum number
114 * of category ranges is 7, but if the low end of the last category range is 114 * of category ranges is 7, but if the low end of the last category range is
115 * zero then it is possibile to fit 8 category ranges because the zero should 115 * zero then it is possible to fit 8 category ranges because the zero should
116 * be omitted. */ 116 * be omitted. */
117#define CIPSO_V4_TAG_RNG_CAT_MAX 8 117#define CIPSO_V4_TAG_RNG_CAT_MAX 8
118 118
@@ -438,7 +438,7 @@ cache_add_failure:
438 * 438 *
439 * Description: 439 * Description:
440 * Search the DOI definition list for a DOI definition with a DOI value that 440 * Search the DOI definition list for a DOI definition with a DOI value that
441 * matches @doi. The caller is responsibile for calling rcu_read_[un]lock(). 441 * matches @doi. The caller is responsible for calling rcu_read_[un]lock().
442 * Returns a pointer to the DOI definition on success and NULL on failure. 442 * Returns a pointer to the DOI definition on success and NULL on failure.
443 */ 443 */
444static struct cipso_v4_doi *cipso_v4_doi_search(u32 doi) 444static struct cipso_v4_doi *cipso_v4_doi_search(u32 doi)
@@ -1293,7 +1293,7 @@ static int cipso_v4_gentag_rbm(const struct cipso_v4_doi *doi_def,
1293 return ret_val; 1293 return ret_val;
1294 1294
1295 /* This will send packets using the "optimized" format when 1295 /* This will send packets using the "optimized" format when
1296 * possibile as specified in section 3.4.2.6 of the 1296 * possible as specified in section 3.4.2.6 of the
1297 * CIPSO draft. */ 1297 * CIPSO draft. */
1298 if (cipso_v4_rbm_optfmt && ret_val > 0 && ret_val <= 10) 1298 if (cipso_v4_rbm_optfmt && ret_val > 0 && ret_val <= 10)
1299 tag_len = 14; 1299 tag_len = 14;
@@ -1752,7 +1752,7 @@ validate_return:
1752} 1752}
1753 1753
1754/** 1754/**
1755 * cipso_v4_error - Send the correct reponse for a bad packet 1755 * cipso_v4_error - Send the correct response for a bad packet
1756 * @skb: the packet 1756 * @skb: the packet
1757 * @error: the error code 1757 * @error: the error code
1758 * @gateway: CIPSO gateway flag 1758 * @gateway: CIPSO gateway flag
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 5345b0bee6df..acf553f95b5b 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1203,6 +1203,7 @@ static int inetdev_event(struct notifier_block *this, unsigned long event,
1203 break; 1203 break;
1204 /* fall through */ 1204 /* fall through */
1205 case NETDEV_NOTIFY_PEERS: 1205 case NETDEV_NOTIFY_PEERS:
1206 case NETDEV_BONDING_FAILOVER:
1206 /* Send gratuitous ARP to notify of link change */ 1207 /* Send gratuitous ARP to notify of link change */
1207 inetdev_send_gratuitous_arp(dev, in_dev); 1208 inetdev_send_gratuitous_arp(dev, in_dev);
1208 break; 1209 break;
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 03f994bcf7de..a5b413416da3 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -276,7 +276,7 @@ error:
276 276
277static int esp_input_done2(struct sk_buff *skb, int err) 277static int esp_input_done2(struct sk_buff *skb, int err)
278{ 278{
279 struct iphdr *iph; 279 const struct iphdr *iph;
280 struct xfrm_state *x = xfrm_input_state(skb); 280 struct xfrm_state *x = xfrm_input_state(skb);
281 struct esp_data *esp = x->data; 281 struct esp_data *esp = x->data;
282 struct crypto_aead *aead = esp->aead; 282 struct crypto_aead *aead = esp->aead;
@@ -484,7 +484,7 @@ static u32 esp4_get_mtu(struct xfrm_state *x, int mtu)
484static void esp4_err(struct sk_buff *skb, u32 info) 484static void esp4_err(struct sk_buff *skb, u32 info)
485{ 485{
486 struct net *net = dev_net(skb->dev); 486 struct net *net = dev_net(skb->dev);
487 struct iphdr *iph = (struct iphdr *)skb->data; 487 const struct iphdr *iph = (const struct iphdr *)skb->data;
488 struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2)); 488 struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2));
489 struct xfrm_state *x; 489 struct xfrm_state *x;
490 490
@@ -492,7 +492,8 @@ static void esp4_err(struct sk_buff *skb, u32 info)
492 icmp_hdr(skb)->code != ICMP_FRAG_NEEDED) 492 icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
493 return; 493 return;
494 494
495 x = xfrm_state_lookup(net, skb->mark, (xfrm_address_t *)&iph->daddr, esph->spi, IPPROTO_ESP, AF_INET); 495 x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
496 esph->spi, IPPROTO_ESP, AF_INET);
496 if (!x) 497 if (!x)
497 return; 498 return;
498 NETDEBUG(KERN_DEBUG "pmtu discovery on SA ESP/%08x/%08x\n", 499 NETDEBUG(KERN_DEBUG "pmtu discovery on SA ESP/%08x/%08x\n",
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 451088330bbb..22524716fe70 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -44,6 +44,7 @@
44#include <net/arp.h> 44#include <net/arp.h>
45#include <net/ip_fib.h> 45#include <net/ip_fib.h>
46#include <net/rtnetlink.h> 46#include <net/rtnetlink.h>
47#include <net/xfrm.h>
47 48
48#ifndef CONFIG_IP_MULTIPLE_TABLES 49#ifndef CONFIG_IP_MULTIPLE_TABLES
49 50
@@ -188,9 +189,9 @@ EXPORT_SYMBOL(inet_dev_addr_type);
188 * - check, that packet arrived from expected physical interface. 189 * - check, that packet arrived from expected physical interface.
189 * called with rcu_read_lock() 190 * called with rcu_read_lock()
190 */ 191 */
191int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif, 192int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, u8 tos,
192 struct net_device *dev, __be32 *spec_dst, 193 int oif, struct net_device *dev, __be32 *spec_dst,
193 u32 *itag, u32 mark) 194 u32 *itag)
194{ 195{
195 struct in_device *in_dev; 196 struct in_device *in_dev;
196 struct flowi4 fl4; 197 struct flowi4 fl4;
@@ -202,7 +203,6 @@ int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif,
202 203
203 fl4.flowi4_oif = 0; 204 fl4.flowi4_oif = 0;
204 fl4.flowi4_iif = oif; 205 fl4.flowi4_iif = oif;
205 fl4.flowi4_mark = mark;
206 fl4.daddr = src; 206 fl4.daddr = src;
207 fl4.saddr = dst; 207 fl4.saddr = dst;
208 fl4.flowi4_tos = tos; 208 fl4.flowi4_tos = tos;
@@ -212,10 +212,12 @@ int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif,
212 in_dev = __in_dev_get_rcu(dev); 212 in_dev = __in_dev_get_rcu(dev);
213 if (in_dev) { 213 if (in_dev) {
214 no_addr = in_dev->ifa_list == NULL; 214 no_addr = in_dev->ifa_list == NULL;
215 rpf = IN_DEV_RPFILTER(in_dev); 215
216 /* Ignore rp_filter for packets protected by IPsec. */
217 rpf = secpath_exists(skb) ? 0 : IN_DEV_RPFILTER(in_dev);
218
216 accept_local = IN_DEV_ACCEPT_LOCAL(in_dev); 219 accept_local = IN_DEV_ACCEPT_LOCAL(in_dev);
217 if (mark && !IN_DEV_SRC_VMARK(in_dev)) 220 fl4.flowi4_mark = IN_DEV_SRC_VMARK(in_dev) ? skb->mark : 0;
218 fl4.flowi4_mark = 0;
219 } 221 }
220 222
221 if (in_dev == NULL) 223 if (in_dev == NULL)
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index b92c86f6e9b3..9ac481a10d37 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -12,7 +12,7 @@
12 * 12 *
13 * Hans Liss <hans.liss@its.uu.se> Uppsala Universitet 13 * Hans Liss <hans.liss@its.uu.se> Uppsala Universitet
14 * 14 *
15 * This work is based on the LPC-trie which is originally descibed in: 15 * This work is based on the LPC-trie which is originally described in:
16 * 16 *
17 * An experimental study of compression methods for dynamic tries 17 * An experimental study of compression methods for dynamic tries
18 * Stefan Nilsson and Matti Tikkanen. Algorithmica, 33(1):19-33, 2002. 18 * Stefan Nilsson and Matti Tikkanen. Algorithmica, 33(1):19-33, 2002.
@@ -126,7 +126,7 @@ struct tnode {
126 struct work_struct work; 126 struct work_struct work;
127 struct tnode *tnode_free; 127 struct tnode *tnode_free;
128 }; 128 };
129 struct rt_trie_node *child[0]; 129 struct rt_trie_node __rcu *child[0];
130}; 130};
131 131
132#ifdef CONFIG_IP_FIB_TRIE_STATS 132#ifdef CONFIG_IP_FIB_TRIE_STATS
@@ -151,7 +151,7 @@ struct trie_stat {
151}; 151};
152 152
153struct trie { 153struct trie {
154 struct rt_trie_node *trie; 154 struct rt_trie_node __rcu *trie;
155#ifdef CONFIG_IP_FIB_TRIE_STATS 155#ifdef CONFIG_IP_FIB_TRIE_STATS
156 struct trie_use_stats stats; 156 struct trie_use_stats stats;
157#endif 157#endif
@@ -177,16 +177,29 @@ static const int sync_pages = 128;
177static struct kmem_cache *fn_alias_kmem __read_mostly; 177static struct kmem_cache *fn_alias_kmem __read_mostly;
178static struct kmem_cache *trie_leaf_kmem __read_mostly; 178static struct kmem_cache *trie_leaf_kmem __read_mostly;
179 179
180static inline struct tnode *node_parent(struct rt_trie_node *node) 180/*
181 * caller must hold RTNL
182 */
183static inline struct tnode *node_parent(const struct rt_trie_node *node)
181{ 184{
182 return (struct tnode *)(node->parent & ~NODE_TYPE_MASK); 185 unsigned long parent;
186
187 parent = rcu_dereference_index_check(node->parent, lockdep_rtnl_is_held());
188
189 return (struct tnode *)(parent & ~NODE_TYPE_MASK);
183} 190}
184 191
185static inline struct tnode *node_parent_rcu(struct rt_trie_node *node) 192/*
193 * caller must hold RCU read lock or RTNL
194 */
195static inline struct tnode *node_parent_rcu(const struct rt_trie_node *node)
186{ 196{
187 struct tnode *ret = node_parent(node); 197 unsigned long parent;
198
199 parent = rcu_dereference_index_check(node->parent, rcu_read_lock_held() ||
200 lockdep_rtnl_is_held());
188 201
189 return rcu_dereference_rtnl(ret); 202 return (struct tnode *)(parent & ~NODE_TYPE_MASK);
190} 203}
191 204
192/* Same as rcu_assign_pointer 205/* Same as rcu_assign_pointer
@@ -198,18 +211,24 @@ static inline void node_set_parent(struct rt_trie_node *node, struct tnode *ptr)
198 node->parent = (unsigned long)ptr | NODE_TYPE(node); 211 node->parent = (unsigned long)ptr | NODE_TYPE(node);
199} 212}
200 213
201static inline struct rt_trie_node *tnode_get_child(struct tnode *tn, unsigned int i) 214/*
215 * caller must hold RTNL
216 */
217static inline struct rt_trie_node *tnode_get_child(const struct tnode *tn, unsigned int i)
202{ 218{
203 BUG_ON(i >= 1U << tn->bits); 219 BUG_ON(i >= 1U << tn->bits);
204 220
205 return tn->child[i]; 221 return rtnl_dereference(tn->child[i]);
206} 222}
207 223
208static inline struct rt_trie_node *tnode_get_child_rcu(struct tnode *tn, unsigned int i) 224/*
225 * caller must hold RCU read lock or RTNL
226 */
227static inline struct rt_trie_node *tnode_get_child_rcu(const struct tnode *tn, unsigned int i)
209{ 228{
210 struct rt_trie_node *ret = tnode_get_child(tn, i); 229 BUG_ON(i >= 1U << tn->bits);
211 230
212 return rcu_dereference_rtnl(ret); 231 return rcu_dereference_rtnl(tn->child[i]);
213} 232}
214 233
215static inline int tnode_child_length(const struct tnode *tn) 234static inline int tnode_child_length(const struct tnode *tn)
@@ -487,7 +506,7 @@ static inline void put_child(struct trie *t, struct tnode *tn, int i,
487static void tnode_put_child_reorg(struct tnode *tn, int i, struct rt_trie_node *n, 506static void tnode_put_child_reorg(struct tnode *tn, int i, struct rt_trie_node *n,
488 int wasfull) 507 int wasfull)
489{ 508{
490 struct rt_trie_node *chi = tn->child[i]; 509 struct rt_trie_node *chi = rtnl_dereference(tn->child[i]);
491 int isfull; 510 int isfull;
492 511
493 BUG_ON(i >= 1<<tn->bits); 512 BUG_ON(i >= 1<<tn->bits);
@@ -665,7 +684,7 @@ one_child:
665 for (i = 0; i < tnode_child_length(tn); i++) { 684 for (i = 0; i < tnode_child_length(tn); i++) {
666 struct rt_trie_node *n; 685 struct rt_trie_node *n;
667 686
668 n = tn->child[i]; 687 n = rtnl_dereference(tn->child[i]);
669 if (!n) 688 if (!n)
670 continue; 689 continue;
671 690
@@ -679,6 +698,20 @@ one_child:
679 return (struct rt_trie_node *) tn; 698 return (struct rt_trie_node *) tn;
680} 699}
681 700
701
702static void tnode_clean_free(struct tnode *tn)
703{
704 int i;
705 struct tnode *tofree;
706
707 for (i = 0; i < tnode_child_length(tn); i++) {
708 tofree = (struct tnode *)rtnl_dereference(tn->child[i]);
709 if (tofree)
710 tnode_free(tofree);
711 }
712 tnode_free(tn);
713}
714
682static struct tnode *inflate(struct trie *t, struct tnode *tn) 715static struct tnode *inflate(struct trie *t, struct tnode *tn)
683{ 716{
684 struct tnode *oldtnode = tn; 717 struct tnode *oldtnode = tn;
@@ -755,8 +788,8 @@ static struct tnode *inflate(struct trie *t, struct tnode *tn)
755 inode = (struct tnode *) node; 788 inode = (struct tnode *) node;
756 789
757 if (inode->bits == 1) { 790 if (inode->bits == 1) {
758 put_child(t, tn, 2*i, inode->child[0]); 791 put_child(t, tn, 2*i, rtnl_dereference(inode->child[0]));
759 put_child(t, tn, 2*i+1, inode->child[1]); 792 put_child(t, tn, 2*i+1, rtnl_dereference(inode->child[1]));
760 793
761 tnode_free_safe(inode); 794 tnode_free_safe(inode);
762 continue; 795 continue;
@@ -797,8 +830,8 @@ static struct tnode *inflate(struct trie *t, struct tnode *tn)
797 830
798 size = tnode_child_length(left); 831 size = tnode_child_length(left);
799 for (j = 0; j < size; j++) { 832 for (j = 0; j < size; j++) {
800 put_child(t, left, j, inode->child[j]); 833 put_child(t, left, j, rtnl_dereference(inode->child[j]));
801 put_child(t, right, j, inode->child[j + size]); 834 put_child(t, right, j, rtnl_dereference(inode->child[j + size]));
802 } 835 }
803 put_child(t, tn, 2*i, resize(t, left)); 836 put_child(t, tn, 2*i, resize(t, left));
804 put_child(t, tn, 2*i+1, resize(t, right)); 837 put_child(t, tn, 2*i+1, resize(t, right));
@@ -808,18 +841,8 @@ static struct tnode *inflate(struct trie *t, struct tnode *tn)
808 tnode_free_safe(oldtnode); 841 tnode_free_safe(oldtnode);
809 return tn; 842 return tn;
810nomem: 843nomem:
811 { 844 tnode_clean_free(tn);
812 int size = tnode_child_length(tn); 845 return ERR_PTR(-ENOMEM);
813 int j;
814
815 for (j = 0; j < size; j++)
816 if (tn->child[j])
817 tnode_free((struct tnode *)tn->child[j]);
818
819 tnode_free(tn);
820
821 return ERR_PTR(-ENOMEM);
822 }
823} 846}
824 847
825static struct tnode *halve(struct trie *t, struct tnode *tn) 848static struct tnode *halve(struct trie *t, struct tnode *tn)
@@ -890,18 +913,8 @@ static struct tnode *halve(struct trie *t, struct tnode *tn)
890 tnode_free_safe(oldtnode); 913 tnode_free_safe(oldtnode);
891 return tn; 914 return tn;
892nomem: 915nomem:
893 { 916 tnode_clean_free(tn);
894 int size = tnode_child_length(tn); 917 return ERR_PTR(-ENOMEM);
895 int j;
896
897 for (j = 0; j < size; j++)
898 if (tn->child[j])
899 tnode_free((struct tnode *)tn->child[j]);
900
901 tnode_free(tn);
902
903 return ERR_PTR(-ENOMEM);
904 }
905} 918}
906 919
907/* readside must use rcu_read_lock currently dump routines 920/* readside must use rcu_read_lock currently dump routines
@@ -1033,7 +1046,7 @@ static struct list_head *fib_insert_node(struct trie *t, u32 key, int plen)
1033 t_key cindex; 1046 t_key cindex;
1034 1047
1035 pos = 0; 1048 pos = 0;
1036 n = t->trie; 1049 n = rtnl_dereference(t->trie);
1037 1050
1038 /* If we point to NULL, stop. Either the tree is empty and we should 1051 /* If we point to NULL, stop. Either the tree is empty and we should
1039 * just put a new leaf in if, or we have reached an empty child slot, 1052 * just put a new leaf in if, or we have reached an empty child slot,
@@ -1319,6 +1332,9 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
1319 } 1332 }
1320 } 1333 }
1321 1334
1335 if (!plen)
1336 tb->tb_num_default++;
1337
1322 list_add_tail_rcu(&new_fa->fa_list, 1338 list_add_tail_rcu(&new_fa->fa_list,
1323 (fa ? &fa->fa_list : fa_head)); 1339 (fa ? &fa->fa_list : fa_head));
1324 1340
@@ -1684,6 +1700,9 @@ int fib_table_delete(struct fib_table *tb, struct fib_config *cfg)
1684 1700
1685 list_del_rcu(&fa->fa_list); 1701 list_del_rcu(&fa->fa_list);
1686 1702
1703 if (!plen)
1704 tb->tb_num_default--;
1705
1687 if (list_empty(fa_head)) { 1706 if (list_empty(fa_head)) {
1688 hlist_del_rcu(&li->hlist); 1707 hlist_del_rcu(&li->hlist);
1689 free_leaf_info(li); 1708 free_leaf_info(li);
@@ -1756,7 +1775,7 @@ static struct leaf *leaf_walk_rcu(struct tnode *p, struct rt_trie_node *c)
1756 continue; 1775 continue;
1757 1776
1758 if (IS_LEAF(c)) { 1777 if (IS_LEAF(c)) {
1759 prefetch(p->child[idx]); 1778 prefetch(rcu_dereference_rtnl(p->child[idx]));
1760 return (struct leaf *) c; 1779 return (struct leaf *) c;
1761 } 1780 }
1762 1781
@@ -1974,6 +1993,7 @@ struct fib_table *fib_trie_table(u32 id)
1974 1993
1975 tb->tb_id = id; 1994 tb->tb_id = id;
1976 tb->tb_default = -1; 1995 tb->tb_default = -1;
1996 tb->tb_num_default = 0;
1977 1997
1978 t = (struct trie *) tb->tb_data; 1998 t = (struct trie *) tb->tb_data;
1979 memset(t, 0, sizeof(*t)); 1999 memset(t, 0, sizeof(*t));
@@ -2272,7 +2292,7 @@ static void *fib_trie_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2272 2292
2273 /* walk rest of this hash chain */ 2293 /* walk rest of this hash chain */
2274 h = tb->tb_id & (FIB_TABLE_HASHSZ - 1); 2294 h = tb->tb_id & (FIB_TABLE_HASHSZ - 1);
2275 while ( (tb_node = rcu_dereference(tb->tb_hlist.next)) ) { 2295 while ((tb_node = rcu_dereference(hlist_next_rcu(&tb->tb_hlist)))) {
2276 tb = hlist_entry(tb_node, struct fib_table, tb_hlist); 2296 tb = hlist_entry(tb_node, struct fib_table, tb_hlist);
2277 n = fib_trie_get_first(iter, (struct trie *) tb->tb_data); 2297 n = fib_trie_get_first(iter, (struct trie *) tb->tb_data);
2278 if (n) 2298 if (n)
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index a91dc1611081..74e35e5736e2 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -373,7 +373,7 @@ out_unlock:
373} 373}
374 374
375static struct rtable *icmp_route_lookup(struct net *net, struct sk_buff *skb_in, 375static struct rtable *icmp_route_lookup(struct net *net, struct sk_buff *skb_in,
376 struct iphdr *iph, 376 const struct iphdr *iph,
377 __be32 saddr, u8 tos, 377 __be32 saddr, u8 tos,
378 int type, int code, 378 int type, int code,
379 struct icmp_bxm *param) 379 struct icmp_bxm *param)
@@ -637,7 +637,7 @@ EXPORT_SYMBOL(icmp_send);
637 637
638static void icmp_unreach(struct sk_buff *skb) 638static void icmp_unreach(struct sk_buff *skb)
639{ 639{
640 struct iphdr *iph; 640 const struct iphdr *iph;
641 struct icmphdr *icmph; 641 struct icmphdr *icmph;
642 int hash, protocol; 642 int hash, protocol;
643 const struct net_protocol *ipprot; 643 const struct net_protocol *ipprot;
@@ -656,7 +656,7 @@ static void icmp_unreach(struct sk_buff *skb)
656 goto out_err; 656 goto out_err;
657 657
658 icmph = icmp_hdr(skb); 658 icmph = icmp_hdr(skb);
659 iph = (struct iphdr *)skb->data; 659 iph = (const struct iphdr *)skb->data;
660 660
661 if (iph->ihl < 5) /* Mangled header, drop. */ 661 if (iph->ihl < 5) /* Mangled header, drop. */
662 goto out_err; 662 goto out_err;
@@ -704,7 +704,7 @@ static void icmp_unreach(struct sk_buff *skb)
704 */ 704 */
705 705
706 /* 706 /*
707 * Check the other end isnt violating RFC 1122. Some routers send 707 * Check the other end isn't violating RFC 1122. Some routers send
708 * bogus responses to broadcast frames. If you see this message 708 * bogus responses to broadcast frames. If you see this message
709 * first check your netmask matches at both ends, if it does then 709 * first check your netmask matches at both ends, if it does then
710 * get the other vendor to fix their kit. 710 * get the other vendor to fix their kit.
@@ -729,7 +729,7 @@ static void icmp_unreach(struct sk_buff *skb)
729 if (!pskb_may_pull(skb, iph->ihl * 4 + 8)) 729 if (!pskb_may_pull(skb, iph->ihl * 4 + 8))
730 goto out; 730 goto out;
731 731
732 iph = (struct iphdr *)skb->data; 732 iph = (const struct iphdr *)skb->data;
733 protocol = iph->protocol; 733 protocol = iph->protocol;
734 734
735 /* 735 /*
@@ -758,7 +758,7 @@ out_err:
758 758
759static void icmp_redirect(struct sk_buff *skb) 759static void icmp_redirect(struct sk_buff *skb)
760{ 760{
761 struct iphdr *iph; 761 const struct iphdr *iph;
762 762
763 if (skb->len < sizeof(struct iphdr)) 763 if (skb->len < sizeof(struct iphdr))
764 goto out_err; 764 goto out_err;
@@ -769,7 +769,7 @@ static void icmp_redirect(struct sk_buff *skb)
769 if (!pskb_may_pull(skb, sizeof(struct iphdr))) 769 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
770 goto out; 770 goto out;
771 771
772 iph = (struct iphdr *)skb->data; 772 iph = (const struct iphdr *)skb->data;
773 773
774 switch (icmp_hdr(skb)->code & 7) { 774 switch (icmp_hdr(skb)->code & 7) {
775 case ICMP_REDIR_NET: 775 case ICMP_REDIR_NET:
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 6c0b7f4a3d7d..8514db54a7f4 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -73,7 +73,7 @@ int inet_csk_bind_conflict(const struct sock *sk,
73 !sk2->sk_bound_dev_if || 73 !sk2->sk_bound_dev_if ||
74 sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) { 74 sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
75 if (!reuse || !sk2->sk_reuse || 75 if (!reuse || !sk2->sk_reuse ||
76 ((1 << sk2->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))) { 76 sk2->sk_state == TCP_LISTEN) {
77 const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2); 77 const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2);
78 if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) || 78 if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) ||
79 sk2_rcv_saddr == sk_rcv_saddr(sk)) 79 sk2_rcv_saddr == sk_rcv_saddr(sk))
@@ -122,8 +122,7 @@ again:
122 (tb->num_owners < smallest_size || smallest_size == -1)) { 122 (tb->num_owners < smallest_size || smallest_size == -1)) {
123 smallest_size = tb->num_owners; 123 smallest_size = tb->num_owners;
124 smallest_rover = rover; 124 smallest_rover = rover;
125 if (atomic_read(&hashinfo->bsockets) > (high - low) + 1 && 125 if (atomic_read(&hashinfo->bsockets) > (high - low) + 1) {
126 !inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb)) {
127 spin_unlock(&head->lock); 126 spin_unlock(&head->lock);
128 snum = smallest_rover; 127 snum = smallest_rover;
129 goto have_snum; 128 goto have_snum;
@@ -356,20 +355,14 @@ struct dst_entry *inet_csk_route_req(struct sock *sk,
356 struct rtable *rt; 355 struct rtable *rt;
357 const struct inet_request_sock *ireq = inet_rsk(req); 356 const struct inet_request_sock *ireq = inet_rsk(req);
358 struct ip_options *opt = inet_rsk(req)->opt; 357 struct ip_options *opt = inet_rsk(req)->opt;
359 struct flowi4 fl4 = {
360 .flowi4_oif = sk->sk_bound_dev_if,
361 .flowi4_mark = sk->sk_mark,
362 .daddr = ((opt && opt->srr) ?
363 opt->faddr : ireq->rmt_addr),
364 .saddr = ireq->loc_addr,
365 .flowi4_tos = RT_CONN_FLAGS(sk),
366 .flowi4_proto = sk->sk_protocol,
367 .flowi4_flags = inet_sk_flowi_flags(sk),
368 .fl4_sport = inet_sk(sk)->inet_sport,
369 .fl4_dport = ireq->rmt_port,
370 };
371 struct net *net = sock_net(sk); 358 struct net *net = sock_net(sk);
359 struct flowi4 fl4;
372 360
361 flowi4_init_output(&fl4, sk->sk_bound_dev_if, sk->sk_mark,
362 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
363 sk->sk_protocol, inet_sk_flowi_flags(sk),
364 (opt && opt->srr) ? opt->faddr : ireq->rmt_addr,
365 ireq->loc_addr, ireq->rmt_port, inet_sk(sk)->inet_sport);
373 security_req_classify_flow(req, flowi4_to_flowi(&fl4)); 366 security_req_classify_flow(req, flowi4_to_flowi(&fl4));
374 rt = ip_route_output_flow(net, &fl4, sk); 367 rt = ip_route_output_flow(net, &fl4, sk);
375 if (IS_ERR(rt)) 368 if (IS_ERR(rt))
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 2ada17129fce..6ffe94ca5bc9 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -124,7 +124,7 @@ static int inet_csk_diag_fill(struct sock *sk,
124 124
125#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) 125#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
126 if (r->idiag_family == AF_INET6) { 126 if (r->idiag_family == AF_INET6) {
127 struct ipv6_pinfo *np = inet6_sk(sk); 127 const struct ipv6_pinfo *np = inet6_sk(sk);
128 128
129 ipv6_addr_copy((struct in6_addr *)r->id.idiag_src, 129 ipv6_addr_copy((struct in6_addr *)r->id.idiag_src,
130 &np->rcv_saddr); 130 &np->rcv_saddr);
diff --git a/net/ipv4/inet_lro.c b/net/ipv4/inet_lro.c
index 47038cb6c138..85a0f75dae64 100644
--- a/net/ipv4/inet_lro.c
+++ b/net/ipv4/inet_lro.c
@@ -51,8 +51,8 @@ MODULE_DESCRIPTION("Large Receive Offload (ipv4 / tcp)");
51 * Basic tcp checks whether packet is suitable for LRO 51 * Basic tcp checks whether packet is suitable for LRO
52 */ 52 */
53 53
54static int lro_tcp_ip_check(struct iphdr *iph, struct tcphdr *tcph, 54static int lro_tcp_ip_check(const struct iphdr *iph, const struct tcphdr *tcph,
55 int len, struct net_lro_desc *lro_desc) 55 int len, const struct net_lro_desc *lro_desc)
56{ 56{
57 /* check ip header: don't aggregate padded frames */ 57 /* check ip header: don't aggregate padded frames */
58 if (ntohs(iph->tot_len) != len) 58 if (ntohs(iph->tot_len) != len)
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index dd1b20eca1a2..9df4e635fb5f 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -354,7 +354,8 @@ static void inetpeer_free_rcu(struct rcu_head *head)
354} 354}
355 355
356/* May be called with local BH enabled. */ 356/* May be called with local BH enabled. */
357static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base) 357static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base,
358 struct inet_peer __rcu **stack[PEER_MAXDEPTH])
358{ 359{
359 int do_free; 360 int do_free;
360 361
@@ -368,7 +369,6 @@ static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base)
368 * We use refcnt=-1 to alert lockless readers this entry is deleted. 369 * We use refcnt=-1 to alert lockless readers this entry is deleted.
369 */ 370 */
370 if (atomic_cmpxchg(&p->refcnt, 1, -1) == 1) { 371 if (atomic_cmpxchg(&p->refcnt, 1, -1) == 1) {
371 struct inet_peer __rcu **stack[PEER_MAXDEPTH];
372 struct inet_peer __rcu ***stackptr, ***delp; 372 struct inet_peer __rcu ***stackptr, ***delp;
373 if (lookup(&p->daddr, stack, base) != p) 373 if (lookup(&p->daddr, stack, base) != p)
374 BUG(); 374 BUG();
@@ -422,7 +422,7 @@ static struct inet_peer_base *peer_to_base(struct inet_peer *p)
422} 422}
423 423
424/* May be called with local BH enabled. */ 424/* May be called with local BH enabled. */
425static int cleanup_once(unsigned long ttl) 425static int cleanup_once(unsigned long ttl, struct inet_peer __rcu **stack[PEER_MAXDEPTH])
426{ 426{
427 struct inet_peer *p = NULL; 427 struct inet_peer *p = NULL;
428 428
@@ -454,7 +454,7 @@ static int cleanup_once(unsigned long ttl)
454 * happen because of entry limits in route cache. */ 454 * happen because of entry limits in route cache. */
455 return -1; 455 return -1;
456 456
457 unlink_from_pool(p, peer_to_base(p)); 457 unlink_from_pool(p, peer_to_base(p), stack);
458 return 0; 458 return 0;
459} 459}
460 460
@@ -524,7 +524,7 @@ struct inet_peer *inet_getpeer(struct inetpeer_addr *daddr, int create)
524 524
525 if (base->total >= inet_peer_threshold) 525 if (base->total >= inet_peer_threshold)
526 /* Remove one less-recently-used entry. */ 526 /* Remove one less-recently-used entry. */
527 cleanup_once(0); 527 cleanup_once(0, stack);
528 528
529 return p; 529 return p;
530} 530}
@@ -540,6 +540,7 @@ static void peer_check_expire(unsigned long dummy)
540{ 540{
541 unsigned long now = jiffies; 541 unsigned long now = jiffies;
542 int ttl, total; 542 int ttl, total;
543 struct inet_peer __rcu **stack[PEER_MAXDEPTH];
543 544
544 total = compute_total(); 545 total = compute_total();
545 if (total >= inet_peer_threshold) 546 if (total >= inet_peer_threshold)
@@ -548,7 +549,7 @@ static void peer_check_expire(unsigned long dummy)
548 ttl = inet_peer_maxttl 549 ttl = inet_peer_maxttl
549 - (inet_peer_maxttl - inet_peer_minttl) / HZ * 550 - (inet_peer_maxttl - inet_peer_minttl) / HZ *
550 total / inet_peer_threshold * HZ; 551 total / inet_peer_threshold * HZ;
551 while (!cleanup_once(ttl)) { 552 while (!cleanup_once(ttl, stack)) {
552 if (jiffies != now) 553 if (jiffies != now)
553 break; 554 break;
554 } 555 }
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index da5941f18c3c..24efd353279a 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -462,7 +462,7 @@ static void ipgre_err(struct sk_buff *skb, u32 info)
462 by themself??? 462 by themself???
463 */ 463 */
464 464
465 struct iphdr *iph = (struct iphdr *)skb->data; 465 const struct iphdr *iph = (const struct iphdr *)skb->data;
466 __be16 *p = (__be16*)(skb->data+(iph->ihl<<2)); 466 __be16 *p = (__be16*)(skb->data+(iph->ihl<<2));
467 int grehlen = (iph->ihl<<2) + 4; 467 int grehlen = (iph->ihl<<2) + 4;
468 const int type = icmp_hdr(skb)->type; 468 const int type = icmp_hdr(skb)->type;
@@ -534,7 +534,7 @@ out:
534 rcu_read_unlock(); 534 rcu_read_unlock();
535} 535}
536 536
537static inline void ipgre_ecn_decapsulate(struct iphdr *iph, struct sk_buff *skb) 537static inline void ipgre_ecn_decapsulate(const struct iphdr *iph, struct sk_buff *skb)
538{ 538{
539 if (INET_ECN_is_ce(iph->tos)) { 539 if (INET_ECN_is_ce(iph->tos)) {
540 if (skb->protocol == htons(ETH_P_IP)) { 540 if (skb->protocol == htons(ETH_P_IP)) {
@@ -546,19 +546,19 @@ static inline void ipgre_ecn_decapsulate(struct iphdr *iph, struct sk_buff *skb)
546} 546}
547 547
548static inline u8 548static inline u8
549ipgre_ecn_encapsulate(u8 tos, struct iphdr *old_iph, struct sk_buff *skb) 549ipgre_ecn_encapsulate(u8 tos, const struct iphdr *old_iph, struct sk_buff *skb)
550{ 550{
551 u8 inner = 0; 551 u8 inner = 0;
552 if (skb->protocol == htons(ETH_P_IP)) 552 if (skb->protocol == htons(ETH_P_IP))
553 inner = old_iph->tos; 553 inner = old_iph->tos;
554 else if (skb->protocol == htons(ETH_P_IPV6)) 554 else if (skb->protocol == htons(ETH_P_IPV6))
555 inner = ipv6_get_dsfield((struct ipv6hdr *)old_iph); 555 inner = ipv6_get_dsfield((const struct ipv6hdr *)old_iph);
556 return INET_ECN_encapsulate(tos, inner); 556 return INET_ECN_encapsulate(tos, inner);
557} 557}
558 558
559static int ipgre_rcv(struct sk_buff *skb) 559static int ipgre_rcv(struct sk_buff *skb)
560{ 560{
561 struct iphdr *iph; 561 const struct iphdr *iph;
562 u8 *h; 562 u8 *h;
563 __be16 flags; 563 __be16 flags;
564 __sum16 csum = 0; 564 __sum16 csum = 0;
@@ -697,8 +697,8 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
697{ 697{
698 struct ip_tunnel *tunnel = netdev_priv(dev); 698 struct ip_tunnel *tunnel = netdev_priv(dev);
699 struct pcpu_tstats *tstats; 699 struct pcpu_tstats *tstats;
700 struct iphdr *old_iph = ip_hdr(skb); 700 const struct iphdr *old_iph = ip_hdr(skb);
701 struct iphdr *tiph; 701 const struct iphdr *tiph;
702 u8 tos; 702 u8 tos;
703 __be16 df; 703 __be16 df;
704 struct rtable *rt; /* Route to the other host */ 704 struct rtable *rt; /* Route to the other host */
@@ -714,7 +714,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
714 714
715 if (dev->header_ops && dev->type == ARPHRD_IPGRE) { 715 if (dev->header_ops && dev->type == ARPHRD_IPGRE) {
716 gre_hlen = 0; 716 gre_hlen = 0;
717 tiph = (struct iphdr *)skb->data; 717 tiph = (const struct iphdr *)skb->data;
718 } else { 718 } else {
719 gre_hlen = tunnel->hlen; 719 gre_hlen = tunnel->hlen;
720 tiph = &tunnel->parms.iph; 720 tiph = &tunnel->parms.iph;
@@ -735,14 +735,14 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
735 } 735 }
736#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 736#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
737 else if (skb->protocol == htons(ETH_P_IPV6)) { 737 else if (skb->protocol == htons(ETH_P_IPV6)) {
738 struct in6_addr *addr6; 738 const struct in6_addr *addr6;
739 int addr_type; 739 int addr_type;
740 struct neighbour *neigh = skb_dst(skb)->neighbour; 740 struct neighbour *neigh = skb_dst(skb)->neighbour;
741 741
742 if (neigh == NULL) 742 if (neigh == NULL)
743 goto tx_error; 743 goto tx_error;
744 744
745 addr6 = (struct in6_addr *)&neigh->primary_key; 745 addr6 = (const struct in6_addr *)&neigh->primary_key;
746 addr_type = ipv6_addr_type(addr6); 746 addr_type = ipv6_addr_type(addr6);
747 747
748 if (addr_type == IPV6_ADDR_ANY) { 748 if (addr_type == IPV6_ADDR_ANY) {
@@ -766,7 +766,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
766 if (skb->protocol == htons(ETH_P_IP)) 766 if (skb->protocol == htons(ETH_P_IP))
767 tos = old_iph->tos; 767 tos = old_iph->tos;
768 else if (skb->protocol == htons(ETH_P_IPV6)) 768 else if (skb->protocol == htons(ETH_P_IPV6))
769 tos = ipv6_get_dsfield((struct ipv6hdr *)old_iph); 769 tos = ipv6_get_dsfield((const struct ipv6hdr *)old_iph);
770 } 770 }
771 771
772 rt = ip_route_output_gre(dev_net(dev), dst, tiph->saddr, 772 rt = ip_route_output_gre(dev_net(dev), dst, tiph->saddr,
@@ -881,7 +881,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
881 iph->ttl = old_iph->ttl; 881 iph->ttl = old_iph->ttl;
882#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 882#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
883 else if (skb->protocol == htons(ETH_P_IPV6)) 883 else if (skb->protocol == htons(ETH_P_IPV6))
884 iph->ttl = ((struct ipv6hdr *)old_iph)->hop_limit; 884 iph->ttl = ((const struct ipv6hdr *)old_iph)->hop_limit;
885#endif 885#endif
886 else 886 else
887 iph->ttl = ip4_dst_hoplimit(&rt->dst); 887 iph->ttl = ip4_dst_hoplimit(&rt->dst);
@@ -927,7 +927,7 @@ static int ipgre_tunnel_bind_dev(struct net_device *dev)
927{ 927{
928 struct net_device *tdev = NULL; 928 struct net_device *tdev = NULL;
929 struct ip_tunnel *tunnel; 929 struct ip_tunnel *tunnel;
930 struct iphdr *iph; 930 const struct iphdr *iph;
931 int hlen = LL_MAX_HEADER; 931 int hlen = LL_MAX_HEADER;
932 int mtu = ETH_DATA_LEN; 932 int mtu = ETH_DATA_LEN;
933 int addend = sizeof(struct iphdr) + 4; 933 int addend = sizeof(struct iphdr) + 4;
@@ -1180,7 +1180,7 @@ static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
1180 1180
1181static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr) 1181static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr)
1182{ 1182{
1183 struct iphdr *iph = (struct iphdr *) skb_mac_header(skb); 1183 const struct iphdr *iph = (const struct iphdr *) skb_mac_header(skb);
1184 memcpy(haddr, &iph->saddr, 4); 1184 memcpy(haddr, &iph->saddr, 4);
1185 return 4; 1185 return 4;
1186} 1186}
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index d7b2b0987a3b..c8f48efc5fd3 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -268,7 +268,7 @@ int ip_local_deliver(struct sk_buff *skb)
268static inline int ip_rcv_options(struct sk_buff *skb) 268static inline int ip_rcv_options(struct sk_buff *skb)
269{ 269{
270 struct ip_options *opt; 270 struct ip_options *opt;
271 struct iphdr *iph; 271 const struct iphdr *iph;
272 struct net_device *dev = skb->dev; 272 struct net_device *dev = skb->dev;
273 273
274 /* It looks as overkill, because not all 274 /* It looks as overkill, because not all
@@ -374,7 +374,7 @@ drop:
374 */ 374 */
375int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) 375int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
376{ 376{
377 struct iphdr *iph; 377 const struct iphdr *iph;
378 u32 len; 378 u32 len;
379 379
380 /* When the interface is in promisc. mode, drop all the crap 380 /* When the interface is in promisc. mode, drop all the crap
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index 28a736f3442f..2391b24e8251 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -329,7 +329,7 @@ int ip_options_compile(struct net *net,
329 pp_ptr = optptr + 2; 329 pp_ptr = optptr + 2;
330 goto error; 330 goto error;
331 } 331 }
332 if (skb) { 332 if (rt) {
333 memcpy(&optptr[optptr[2]-1], &rt->rt_spec_dst, 4); 333 memcpy(&optptr[optptr[2]-1], &rt->rt_spec_dst, 4);
334 opt->is_changed = 1; 334 opt->is_changed = 1;
335 } 335 }
@@ -371,7 +371,7 @@ int ip_options_compile(struct net *net,
371 goto error; 371 goto error;
372 } 372 }
373 opt->ts = optptr - iph; 373 opt->ts = optptr - iph;
374 if (skb) { 374 if (rt) {
375 memcpy(&optptr[optptr[2]-1], &rt->rt_spec_dst, 4); 375 memcpy(&optptr[optptr[2]-1], &rt->rt_spec_dst, 4);
376 timeptr = (__be32*)&optptr[optptr[2]+3]; 376 timeptr = (__be32*)&optptr[optptr[2]+3];
377 } 377 }
@@ -603,7 +603,7 @@ int ip_options_rcv_srr(struct sk_buff *skb)
603 unsigned long orefdst; 603 unsigned long orefdst;
604 int err; 604 int err;
605 605
606 if (!opt->srr) 606 if (!opt->srr || !rt)
607 return 0; 607 return 0;
608 608
609 if (skb->pkt_type != PACKET_HOST) 609 if (skb->pkt_type != PACKET_HOST)
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 67f241b97649..bdad3d60aa82 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -603,7 +603,7 @@ slow_path:
603 /* IF: it doesn't fit, use 'mtu' - the data space left */ 603 /* IF: it doesn't fit, use 'mtu' - the data space left */
604 if (len > mtu) 604 if (len > mtu)
605 len = mtu; 605 len = mtu;
606 /* IF: we are not sending upto and including the packet end 606 /* IF: we are not sending up to and including the packet end
607 then align the next start on an eight byte boundary */ 607 then align the next start on an eight byte boundary */
608 if (len < left) { 608 if (len < left) {
609 len &= ~7; 609 len &= ~7;
@@ -1474,16 +1474,14 @@ void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *ar
1474 } 1474 }
1475 1475
1476 { 1476 {
1477 struct flowi4 fl4 = { 1477 struct flowi4 fl4;
1478 .flowi4_oif = arg->bound_dev_if, 1478
1479 .daddr = daddr, 1479 flowi4_init_output(&fl4, arg->bound_dev_if, 0,
1480 .saddr = rt->rt_spec_dst, 1480 RT_TOS(ip_hdr(skb)->tos),
1481 .flowi4_tos = RT_TOS(ip_hdr(skb)->tos), 1481 RT_SCOPE_UNIVERSE, sk->sk_protocol,
1482 .fl4_sport = tcp_hdr(skb)->dest, 1482 ip_reply_arg_flowi_flags(arg),
1483 .fl4_dport = tcp_hdr(skb)->source, 1483 daddr, rt->rt_spec_dst,
1484 .flowi4_proto = sk->sk_protocol, 1484 tcp_hdr(skb)->source, tcp_hdr(skb)->dest);
1485 .flowi4_flags = ip_reply_arg_flowi_flags(arg),
1486 };
1487 security_skb_classify_flow(skb, flowi4_to_flowi(&fl4)); 1485 security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
1488 rt = ip_route_output_key(sock_net(sk), &fl4); 1486 rt = ip_route_output_key(sock_net(sk), &fl4);
1489 if (IS_ERR(rt)) 1487 if (IS_ERR(rt))
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 3948c86e59ca..9640900309bb 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -131,7 +131,7 @@ static void ip_cmsg_recv_security(struct msghdr *msg, struct sk_buff *skb)
131static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb) 131static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
132{ 132{
133 struct sockaddr_in sin; 133 struct sockaddr_in sin;
134 struct iphdr *iph = ip_hdr(skb); 134 const struct iphdr *iph = ip_hdr(skb);
135 __be16 *ports = (__be16 *)skb_transport_header(skb); 135 __be16 *ports = (__be16 *)skb_transport_header(skb);
136 136
137 if (skb_transport_offset(skb) + 4 > skb->len) 137 if (skb_transport_offset(skb) + 4 > skb->len)
diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
index 629067571f02..c857f6f49b03 100644
--- a/net/ipv4/ipcomp.c
+++ b/net/ipv4/ipcomp.c
@@ -27,7 +27,7 @@ static void ipcomp4_err(struct sk_buff *skb, u32 info)
27{ 27{
28 struct net *net = dev_net(skb->dev); 28 struct net *net = dev_net(skb->dev);
29 __be32 spi; 29 __be32 spi;
30 struct iphdr *iph = (struct iphdr *)skb->data; 30 const struct iphdr *iph = (const struct iphdr *)skb->data;
31 struct ip_comp_hdr *ipch = (struct ip_comp_hdr *)(skb->data+(iph->ihl<<2)); 31 struct ip_comp_hdr *ipch = (struct ip_comp_hdr *)(skb->data+(iph->ihl<<2));
32 struct xfrm_state *x; 32 struct xfrm_state *x;
33 33
@@ -36,7 +36,7 @@ static void ipcomp4_err(struct sk_buff *skb, u32 info)
36 return; 36 return;
37 37
38 spi = htonl(ntohs(ipch->cpi)); 38 spi = htonl(ntohs(ipch->cpi));
39 x = xfrm_state_lookup(net, skb->mark, (xfrm_address_t *)&iph->daddr, 39 x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
40 spi, IPPROTO_COMP, AF_INET); 40 spi, IPPROTO_COMP, AF_INET);
41 if (!x) 41 if (!x)
42 return; 42 return;
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 2b097752426b..cbff2ecccf3d 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -1444,7 +1444,7 @@ static int __init ip_auto_config(void)
1444 root_server_addr = addr; 1444 root_server_addr = addr;
1445 1445
1446 /* 1446 /*
1447 * Use defaults whereever applicable. 1447 * Use defaults wherever applicable.
1448 */ 1448 */
1449 if (ic_defaults() < 0) 1449 if (ic_defaults() < 0)
1450 return -1; 1450 return -1;
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index bfc17c5914e7..ef16377ec73f 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -319,7 +319,7 @@ static int ipip_err(struct sk_buff *skb, u32 info)
319 8 bytes of packet payload. It means, that precise relaying of 319 8 bytes of packet payload. It means, that precise relaying of
320 ICMP in the real Internet is absolutely infeasible. 320 ICMP in the real Internet is absolutely infeasible.
321 */ 321 */
322 struct iphdr *iph = (struct iphdr *)skb->data; 322 const struct iphdr *iph = (const struct iphdr *)skb->data;
323 const int type = icmp_hdr(skb)->type; 323 const int type = icmp_hdr(skb)->type;
324 const int code = icmp_hdr(skb)->code; 324 const int code = icmp_hdr(skb)->code;
325 struct ip_tunnel *t; 325 struct ip_tunnel *t;
@@ -433,12 +433,12 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
433{ 433{
434 struct ip_tunnel *tunnel = netdev_priv(dev); 434 struct ip_tunnel *tunnel = netdev_priv(dev);
435 struct pcpu_tstats *tstats; 435 struct pcpu_tstats *tstats;
436 struct iphdr *tiph = &tunnel->parms.iph; 436 const struct iphdr *tiph = &tunnel->parms.iph;
437 u8 tos = tunnel->parms.iph.tos; 437 u8 tos = tunnel->parms.iph.tos;
438 __be16 df = tiph->frag_off; 438 __be16 df = tiph->frag_off;
439 struct rtable *rt; /* Route to the other host */ 439 struct rtable *rt; /* Route to the other host */
440 struct net_device *tdev; /* Device to other host */ 440 struct net_device *tdev; /* Device to other host */
441 struct iphdr *old_iph = ip_hdr(skb); 441 const struct iphdr *old_iph = ip_hdr(skb);
442 struct iphdr *iph; /* Our new IP header */ 442 struct iphdr *iph; /* Our new IP header */
443 unsigned int max_headroom; /* The extra header space needed */ 443 unsigned int max_headroom; /* The extra header space needed */
444 __be32 dst = tiph->daddr; 444 __be32 dst = tiph->daddr;
@@ -572,7 +572,7 @@ static void ipip_tunnel_bind_dev(struct net_device *dev)
572{ 572{
573 struct net_device *tdev = NULL; 573 struct net_device *tdev = NULL;
574 struct ip_tunnel *tunnel; 574 struct ip_tunnel *tunnel;
575 struct iphdr *iph; 575 const struct iphdr *iph;
576 576
577 tunnel = netdev_priv(dev); 577 tunnel = netdev_priv(dev);
578 iph = &tunnel->parms.iph; 578 iph = &tunnel->parms.iph;
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 1f62eaeb6de4..c81b9b661d26 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -1549,7 +1549,7 @@ static struct notifier_block ip_mr_notifier = {
1549static void ip_encap(struct sk_buff *skb, __be32 saddr, __be32 daddr) 1549static void ip_encap(struct sk_buff *skb, __be32 saddr, __be32 daddr)
1550{ 1550{
1551 struct iphdr *iph; 1551 struct iphdr *iph;
1552 struct iphdr *old_iph = ip_hdr(skb); 1552 const struct iphdr *old_iph = ip_hdr(skb);
1553 1553
1554 skb_push(skb, sizeof(struct iphdr)); 1554 skb_push(skb, sizeof(struct iphdr));
1555 skb->transport_header = skb->network_header; 1555 skb->transport_header = skb->network_header;
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c
index f3c0b549b8e1..4614babdc45f 100644
--- a/net/ipv4/netfilter.c
+++ b/net/ipv4/netfilter.c
@@ -221,9 +221,10 @@ static __sum16 nf_ip_checksum_partial(struct sk_buff *skb, unsigned int hook,
221 return csum; 221 return csum;
222} 222}
223 223
224static int nf_ip_route(struct dst_entry **dst, struct flowi *fl) 224static int nf_ip_route(struct net *net, struct dst_entry **dst,
225 struct flowi *fl, bool strict __always_unused)
225{ 226{
226 struct rtable *rt = ip_route_output_key(&init_net, &fl->u.ip4); 227 struct rtable *rt = ip_route_output_key(net, &fl->u.ip4);
227 if (IS_ERR(rt)) 228 if (IS_ERR(rt))
228 return PTR_ERR(rt); 229 return PTR_ERR(rt);
229 *dst = &rt->dst; 230 *dst = &rt->dst;
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 4b5d457c2d76..fd7a3f68917f 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -76,7 +76,7 @@ static inline int arp_devaddr_compare(const struct arpt_devaddr_info *ap,
76} 76}
77 77
78/* 78/*
79 * Unfortunatly, _b and _mask are not aligned to an int (or long int) 79 * Unfortunately, _b and _mask are not aligned to an int (or long int)
80 * Some arches dont care, unrolling the loop is a win on them. 80 * Some arches dont care, unrolling the loop is a win on them.
81 * For other arches, we only have a 16bit alignement. 81 * For other arches, we only have a 16bit alignement.
82 */ 82 */
@@ -260,6 +260,7 @@ unsigned int arpt_do_table(struct sk_buff *skb,
260 void *table_base; 260 void *table_base;
261 const struct xt_table_info *private; 261 const struct xt_table_info *private;
262 struct xt_action_param acpar; 262 struct xt_action_param acpar;
263 unsigned int addend;
263 264
264 if (!pskb_may_pull(skb, arp_hdr_len(skb->dev))) 265 if (!pskb_may_pull(skb, arp_hdr_len(skb->dev)))
265 return NF_DROP; 266 return NF_DROP;
@@ -267,7 +268,8 @@ unsigned int arpt_do_table(struct sk_buff *skb,
267 indev = in ? in->name : nulldevname; 268 indev = in ? in->name : nulldevname;
268 outdev = out ? out->name : nulldevname; 269 outdev = out ? out->name : nulldevname;
269 270
270 xt_info_rdlock_bh(); 271 local_bh_disable();
272 addend = xt_write_recseq_begin();
271 private = table->private; 273 private = table->private;
272 table_base = private->entries[smp_processor_id()]; 274 table_base = private->entries[smp_processor_id()];
273 275
@@ -338,7 +340,8 @@ unsigned int arpt_do_table(struct sk_buff *skb,
338 /* Verdict */ 340 /* Verdict */
339 break; 341 break;
340 } while (!acpar.hotdrop); 342 } while (!acpar.hotdrop);
341 xt_info_rdunlock_bh(); 343 xt_write_recseq_end(addend);
344 local_bh_enable();
342 345
343 if (acpar.hotdrop) 346 if (acpar.hotdrop)
344 return NF_DROP; 347 return NF_DROP;
@@ -712,7 +715,7 @@ static void get_counters(const struct xt_table_info *t,
712 unsigned int i; 715 unsigned int i;
713 716
714 for_each_possible_cpu(cpu) { 717 for_each_possible_cpu(cpu) {
715 seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock; 718 seqcount_t *s = &per_cpu(xt_recseq, cpu);
716 719
717 i = 0; 720 i = 0;
718 xt_entry_foreach(iter, t->entries[cpu], t->size) { 721 xt_entry_foreach(iter, t->entries[cpu], t->size) {
@@ -720,10 +723,10 @@ static void get_counters(const struct xt_table_info *t,
720 unsigned int start; 723 unsigned int start;
721 724
722 do { 725 do {
723 start = read_seqbegin(lock); 726 start = read_seqcount_begin(s);
724 bcnt = iter->counters.bcnt; 727 bcnt = iter->counters.bcnt;
725 pcnt = iter->counters.pcnt; 728 pcnt = iter->counters.pcnt;
726 } while (read_seqretry(lock, start)); 729 } while (read_seqcount_retry(s, start));
727 730
728 ADD_COUNTER(counters[i], bcnt, pcnt); 731 ADD_COUNTER(counters[i], bcnt, pcnt);
729 ++i; 732 ++i;
@@ -1115,6 +1118,7 @@ static int do_add_counters(struct net *net, const void __user *user,
1115 int ret = 0; 1118 int ret = 0;
1116 void *loc_cpu_entry; 1119 void *loc_cpu_entry;
1117 struct arpt_entry *iter; 1120 struct arpt_entry *iter;
1121 unsigned int addend;
1118#ifdef CONFIG_COMPAT 1122#ifdef CONFIG_COMPAT
1119 struct compat_xt_counters_info compat_tmp; 1123 struct compat_xt_counters_info compat_tmp;
1120 1124
@@ -1171,12 +1175,12 @@ static int do_add_counters(struct net *net, const void __user *user,
1171 /* Choose the copy that is on our node */ 1175 /* Choose the copy that is on our node */
1172 curcpu = smp_processor_id(); 1176 curcpu = smp_processor_id();
1173 loc_cpu_entry = private->entries[curcpu]; 1177 loc_cpu_entry = private->entries[curcpu];
1174 xt_info_wrlock(curcpu); 1178 addend = xt_write_recseq_begin();
1175 xt_entry_foreach(iter, loc_cpu_entry, private->size) { 1179 xt_entry_foreach(iter, loc_cpu_entry, private->size) {
1176 ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt); 1180 ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt);
1177 ++i; 1181 ++i;
1178 } 1182 }
1179 xt_info_wrunlock(curcpu); 1183 xt_write_recseq_end(addend);
1180 unlock_up_free: 1184 unlock_up_free:
1181 local_bh_enable(); 1185 local_bh_enable();
1182 xt_table_unlock(t); 1186 xt_table_unlock(t);
@@ -1874,7 +1878,7 @@ static int __init arp_tables_init(void)
1874 if (ret < 0) 1878 if (ret < 0)
1875 goto err1; 1879 goto err1;
1876 1880
1877 /* Noone else will be downing sem now, so we won't sleep */ 1881 /* No one else will be downing sem now, so we won't sleep */
1878 ret = xt_register_targets(arpt_builtin_tg, ARRAY_SIZE(arpt_builtin_tg)); 1882 ret = xt_register_targets(arpt_builtin_tg, ARRAY_SIZE(arpt_builtin_tg));
1879 if (ret < 0) 1883 if (ret < 0)
1880 goto err2; 1884 goto err2;
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index ffcea0d1678e..764743843503 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -68,15 +68,6 @@ void *ipt_alloc_initial_table(const struct xt_table *info)
68} 68}
69EXPORT_SYMBOL_GPL(ipt_alloc_initial_table); 69EXPORT_SYMBOL_GPL(ipt_alloc_initial_table);
70 70
71/*
72 We keep a set of rules for each CPU, so we can avoid write-locking
73 them in the softirq when updating the counters and therefore
74 only need to read-lock in the softirq; doing a write_lock_bh() in user
75 context stops packets coming through and allows user context to read
76 the counters or update the rules.
77
78 Hence the start of any table is given by get_table() below. */
79
80/* Returns whether matches rule or not. */ 71/* Returns whether matches rule or not. */
81/* Performance critical - called for every packet */ 72/* Performance critical - called for every packet */
82static inline bool 73static inline bool
@@ -311,6 +302,7 @@ ipt_do_table(struct sk_buff *skb,
311 unsigned int *stackptr, origptr, cpu; 302 unsigned int *stackptr, origptr, cpu;
312 const struct xt_table_info *private; 303 const struct xt_table_info *private;
313 struct xt_action_param acpar; 304 struct xt_action_param acpar;
305 unsigned int addend;
314 306
315 /* Initialization */ 307 /* Initialization */
316 ip = ip_hdr(skb); 308 ip = ip_hdr(skb);
@@ -331,7 +323,8 @@ ipt_do_table(struct sk_buff *skb,
331 acpar.hooknum = hook; 323 acpar.hooknum = hook;
332 324
333 IP_NF_ASSERT(table->valid_hooks & (1 << hook)); 325 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
334 xt_info_rdlock_bh(); 326 local_bh_disable();
327 addend = xt_write_recseq_begin();
335 private = table->private; 328 private = table->private;
336 cpu = smp_processor_id(); 329 cpu = smp_processor_id();
337 table_base = private->entries[cpu]; 330 table_base = private->entries[cpu];
@@ -430,7 +423,9 @@ ipt_do_table(struct sk_buff *skb,
430 pr_debug("Exiting %s; resetting sp from %u to %u\n", 423 pr_debug("Exiting %s; resetting sp from %u to %u\n",
431 __func__, *stackptr, origptr); 424 __func__, *stackptr, origptr);
432 *stackptr = origptr; 425 *stackptr = origptr;
433 xt_info_rdunlock_bh(); 426 xt_write_recseq_end(addend);
427 local_bh_enable();
428
434#ifdef DEBUG_ALLOW_ALL 429#ifdef DEBUG_ALLOW_ALL
435 return NF_ACCEPT; 430 return NF_ACCEPT;
436#else 431#else
@@ -886,7 +881,7 @@ get_counters(const struct xt_table_info *t,
886 unsigned int i; 881 unsigned int i;
887 882
888 for_each_possible_cpu(cpu) { 883 for_each_possible_cpu(cpu) {
889 seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock; 884 seqcount_t *s = &per_cpu(xt_recseq, cpu);
890 885
891 i = 0; 886 i = 0;
892 xt_entry_foreach(iter, t->entries[cpu], t->size) { 887 xt_entry_foreach(iter, t->entries[cpu], t->size) {
@@ -894,10 +889,10 @@ get_counters(const struct xt_table_info *t,
894 unsigned int start; 889 unsigned int start;
895 890
896 do { 891 do {
897 start = read_seqbegin(lock); 892 start = read_seqcount_begin(s);
898 bcnt = iter->counters.bcnt; 893 bcnt = iter->counters.bcnt;
899 pcnt = iter->counters.pcnt; 894 pcnt = iter->counters.pcnt;
900 } while (read_seqretry(lock, start)); 895 } while (read_seqcount_retry(s, start));
901 896
902 ADD_COUNTER(counters[i], bcnt, pcnt); 897 ADD_COUNTER(counters[i], bcnt, pcnt);
903 ++i; /* macro does multi eval of i */ 898 ++i; /* macro does multi eval of i */
@@ -1312,6 +1307,7 @@ do_add_counters(struct net *net, const void __user *user,
1312 int ret = 0; 1307 int ret = 0;
1313 void *loc_cpu_entry; 1308 void *loc_cpu_entry;
1314 struct ipt_entry *iter; 1309 struct ipt_entry *iter;
1310 unsigned int addend;
1315#ifdef CONFIG_COMPAT 1311#ifdef CONFIG_COMPAT
1316 struct compat_xt_counters_info compat_tmp; 1312 struct compat_xt_counters_info compat_tmp;
1317 1313
@@ -1368,12 +1364,12 @@ do_add_counters(struct net *net, const void __user *user,
1368 /* Choose the copy that is on our node */ 1364 /* Choose the copy that is on our node */
1369 curcpu = smp_processor_id(); 1365 curcpu = smp_processor_id();
1370 loc_cpu_entry = private->entries[curcpu]; 1366 loc_cpu_entry = private->entries[curcpu];
1371 xt_info_wrlock(curcpu); 1367 addend = xt_write_recseq_begin();
1372 xt_entry_foreach(iter, loc_cpu_entry, private->size) { 1368 xt_entry_foreach(iter, loc_cpu_entry, private->size) {
1373 ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt); 1369 ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt);
1374 ++i; 1370 ++i;
1375 } 1371 }
1376 xt_info_wrunlock(curcpu); 1372 xt_write_recseq_end(addend);
1377 unlock_up_free: 1373 unlock_up_free:
1378 local_bh_enable(); 1374 local_bh_enable();
1379 xt_table_unlock(t); 1375 xt_table_unlock(t);
@@ -2233,7 +2229,7 @@ static int __init ip_tables_init(void)
2233 if (ret < 0) 2229 if (ret < 0)
2234 goto err1; 2230 goto err1;
2235 2231
2236 /* Noone else will be downing sem now, so we won't sleep */ 2232 /* No one else will be downing sem now, so we won't sleep */
2237 ret = xt_register_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg)); 2233 ret = xt_register_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg));
2238 if (ret < 0) 2234 if (ret < 0)
2239 goto err2; 2235 goto err2;
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
index 21bcf471b25a..9c71b2755ce3 100644
--- a/net/ipv4/netfilter/nf_nat_core.c
+++ b/net/ipv4/netfilter/nf_nat_core.c
@@ -521,7 +521,7 @@ int nf_nat_protocol_register(const struct nf_nat_protocol *proto)
521} 521}
522EXPORT_SYMBOL(nf_nat_protocol_register); 522EXPORT_SYMBOL(nf_nat_protocol_register);
523 523
524/* Noone stores the protocol anywhere; simply delete it. */ 524/* No one stores the protocol anywhere; simply delete it. */
525void nf_nat_protocol_unregister(const struct nf_nat_protocol *proto) 525void nf_nat_protocol_unregister(const struct nf_nat_protocol *proto)
526{ 526{
527 spin_lock_bh(&nf_nat_lock); 527 spin_lock_bh(&nf_nat_lock);
@@ -532,7 +532,7 @@ void nf_nat_protocol_unregister(const struct nf_nat_protocol *proto)
532} 532}
533EXPORT_SYMBOL(nf_nat_protocol_unregister); 533EXPORT_SYMBOL(nf_nat_protocol_unregister);
534 534
535/* Noone using conntrack by the time this called. */ 535/* No one using conntrack by the time this called. */
536static void nf_nat_cleanup_conntrack(struct nf_conn *ct) 536static void nf_nat_cleanup_conntrack(struct nf_conn *ct)
537{ 537{
538 struct nf_conn_nat *nat = nf_ct_ext_find(ct, NF_CT_EXT_NAT); 538 struct nf_conn_nat *nat = nf_ct_ext_find(ct, NF_CT_EXT_NAT);
diff --git a/net/ipv4/netfilter/nf_nat_helper.c b/net/ipv4/netfilter/nf_nat_helper.c
index 31427fb57aa8..99cfa28b6d38 100644
--- a/net/ipv4/netfilter/nf_nat_helper.c
+++ b/net/ipv4/netfilter/nf_nat_helper.c
@@ -153,7 +153,7 @@ void nf_nat_set_seq_adjust(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
153} 153}
154EXPORT_SYMBOL_GPL(nf_nat_set_seq_adjust); 154EXPORT_SYMBOL_GPL(nf_nat_set_seq_adjust);
155 155
156static void nf_nat_csum(struct sk_buff *skb, struct iphdr *iph, void *data, 156static void nf_nat_csum(struct sk_buff *skb, const struct iphdr *iph, void *data,
157 int datalen, __sum16 *check, int oldlen) 157 int datalen, __sum16 *check, int oldlen)
158{ 158{
159 struct rtable *rt = skb_rtable(skb); 159 struct rtable *rt = skb_rtable(skb);
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 2d3c72e5bbbf..abf14dbcb3b9 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -154,7 +154,7 @@ static __inline__ int icmp_filter(struct sock *sk, struct sk_buff *skb)
154 * RFC 1122: SHOULD pass TOS value up to the transport layer. 154 * RFC 1122: SHOULD pass TOS value up to the transport layer.
155 * -> It does. And not only TOS, but all IP header. 155 * -> It does. And not only TOS, but all IP header.
156 */ 156 */
157static int raw_v4_input(struct sk_buff *skb, struct iphdr *iph, int hash) 157static int raw_v4_input(struct sk_buff *skb, const struct iphdr *iph, int hash)
158{ 158{
159 struct sock *sk; 159 struct sock *sk;
160 struct hlist_head *head; 160 struct hlist_head *head;
@@ -247,7 +247,7 @@ static void raw_err(struct sock *sk, struct sk_buff *skb, u32 info)
247 } 247 }
248 248
249 if (inet->recverr) { 249 if (inet->recverr) {
250 struct iphdr *iph = (struct iphdr *)skb->data; 250 const struct iphdr *iph = (const struct iphdr *)skb->data;
251 u8 *payload = skb->data + (iph->ihl << 2); 251 u8 *payload = skb->data + (iph->ihl << 2);
252 252
253 if (inet->hdrincl) 253 if (inet->hdrincl)
@@ -265,7 +265,7 @@ void raw_icmp_error(struct sk_buff *skb, int protocol, u32 info)
265{ 265{
266 int hash; 266 int hash;
267 struct sock *raw_sk; 267 struct sock *raw_sk;
268 struct iphdr *iph; 268 const struct iphdr *iph;
269 struct net *net; 269 struct net *net;
270 270
271 hash = protocol & (RAW_HTABLE_SIZE - 1); 271 hash = protocol & (RAW_HTABLE_SIZE - 1);
@@ -273,7 +273,7 @@ void raw_icmp_error(struct sk_buff *skb, int protocol, u32 info)
273 read_lock(&raw_v4_hashinfo.lock); 273 read_lock(&raw_v4_hashinfo.lock);
274 raw_sk = sk_head(&raw_v4_hashinfo.ht[hash]); 274 raw_sk = sk_head(&raw_v4_hashinfo.ht[hash]);
275 if (raw_sk != NULL) { 275 if (raw_sk != NULL) {
276 iph = (struct iphdr *)skb->data; 276 iph = (const struct iphdr *)skb->data;
277 net = dev_net(skb->dev); 277 net = dev_net(skb->dev);
278 278
279 while ((raw_sk = __raw_v4_lookup(net, raw_sk, protocol, 279 while ((raw_sk = __raw_v4_lookup(net, raw_sk, protocol,
@@ -281,7 +281,7 @@ void raw_icmp_error(struct sk_buff *skb, int protocol, u32 info)
281 skb->dev->ifindex)) != NULL) { 281 skb->dev->ifindex)) != NULL) {
282 raw_err(raw_sk, skb, info); 282 raw_err(raw_sk, skb, info);
283 raw_sk = sk_next(raw_sk); 283 raw_sk = sk_next(raw_sk);
284 iph = (struct iphdr *)skb->data; 284 iph = (const struct iphdr *)skb->data;
285 } 285 }
286 } 286 }
287 read_unlock(&raw_v4_hashinfo.lock); 287 read_unlock(&raw_v4_hashinfo.lock);
@@ -548,17 +548,13 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
548 } 548 }
549 549
550 { 550 {
551 struct flowi4 fl4 = { 551 struct flowi4 fl4;
552 .flowi4_oif = ipc.oif, 552
553 .flowi4_mark = sk->sk_mark, 553 flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos,
554 .daddr = daddr, 554 RT_SCOPE_UNIVERSE,
555 .saddr = saddr, 555 inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
556 .flowi4_tos = tos, 556 FLOWI_FLAG_CAN_SLEEP, daddr, saddr, 0, 0);
557 .flowi4_proto = (inet->hdrincl ? 557
558 IPPROTO_RAW :
559 sk->sk_protocol),
560 .flowi4_flags = FLOWI_FLAG_CAN_SLEEP,
561 };
562 if (!inet->hdrincl) { 558 if (!inet->hdrincl) {
563 err = raw_probe_proto_opt(&fl4, msg); 559 err = raw_probe_proto_opt(&fl4, msg);
564 if (err) 560 if (err)
@@ -622,7 +618,7 @@ do_confirm:
622static void raw_close(struct sock *sk, long timeout) 618static void raw_close(struct sock *sk, long timeout)
623{ 619{
624 /* 620 /*
625 * Raw sockets may have direct kernel refereneces. Kill them. 621 * Raw sockets may have direct kernel references. Kill them.
626 */ 622 */
627 ip_ra_control(sk, 0, NULL); 623 ip_ra_control(sk, 0, NULL);
628 624
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 4b0c81180804..f4b7f806afd8 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -821,7 +821,7 @@ static int has_noalias(const struct rtable *head, const struct rtable *rth)
821} 821}
822 822
823/* 823/*
824 * Pertubation of rt_genid by a small quantity [1..256] 824 * Perturbation of rt_genid by a small quantity [1..256]
825 * Using 8 bits of shuffling ensure we can call rt_cache_invalidate() 825 * Using 8 bits of shuffling ensure we can call rt_cache_invalidate()
826 * many times (2^24) without giving recent rt_genid. 826 * many times (2^24) without giving recent rt_genid.
827 * Jenkins hash is strong enough that litle changes of rt_genid are OK. 827 * Jenkins hash is strong enough that litle changes of rt_genid are OK.
@@ -1191,7 +1191,7 @@ restart:
1191#endif 1191#endif
1192 /* 1192 /*
1193 * Since lookup is lockfree, we must make sure 1193 * Since lookup is lockfree, we must make sure
1194 * previous writes to rt are comitted to memory 1194 * previous writes to rt are committed to memory
1195 * before making rt visible to other CPUS. 1195 * before making rt visible to other CPUS.
1196 */ 1196 */
1197 rcu_assign_pointer(rt_hash_table[hash].chain, rt); 1197 rcu_assign_pointer(rt_hash_table[hash].chain, rt);
@@ -1507,7 +1507,7 @@ static inline unsigned short guess_mtu(unsigned short old_mtu)
1507 return 68; 1507 return 68;
1508} 1508}
1509 1509
1510unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph, 1510unsigned short ip_rt_frag_needed(struct net *net, const struct iphdr *iph,
1511 unsigned short new_mtu, 1511 unsigned short new_mtu,
1512 struct net_device *dev) 1512 struct net_device *dev)
1513{ 1513{
@@ -1871,8 +1871,8 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1871 goto e_inval; 1871 goto e_inval;
1872 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK); 1872 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
1873 } else { 1873 } else {
1874 err = fib_validate_source(saddr, 0, tos, 0, dev, &spec_dst, 1874 err = fib_validate_source(skb, saddr, 0, tos, 0, dev, &spec_dst,
1875 &itag, 0); 1875 &itag);
1876 if (err < 0) 1876 if (err < 0)
1877 goto e_err; 1877 goto e_err;
1878 } 1878 }
@@ -1891,6 +1891,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1891#ifdef CONFIG_IP_ROUTE_CLASSID 1891#ifdef CONFIG_IP_ROUTE_CLASSID
1892 rth->dst.tclassid = itag; 1892 rth->dst.tclassid = itag;
1893#endif 1893#endif
1894 rth->rt_route_iif = dev->ifindex;
1894 rth->rt_iif = dev->ifindex; 1895 rth->rt_iif = dev->ifindex;
1895 rth->dst.dev = init_net.loopback_dev; 1896 rth->dst.dev = init_net.loopback_dev;
1896 dev_hold(rth->dst.dev); 1897 dev_hold(rth->dst.dev);
@@ -1980,8 +1981,8 @@ static int __mkroute_input(struct sk_buff *skb,
1980 } 1981 }
1981 1982
1982 1983
1983 err = fib_validate_source(saddr, daddr, tos, FIB_RES_OIF(*res), 1984 err = fib_validate_source(skb, saddr, daddr, tos, FIB_RES_OIF(*res),
1984 in_dev->dev, &spec_dst, &itag, skb->mark); 1985 in_dev->dev, &spec_dst, &itag);
1985 if (err < 0) { 1986 if (err < 0) {
1986 ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr, 1987 ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
1987 saddr); 1988 saddr);
@@ -2026,6 +2027,7 @@ static int __mkroute_input(struct sk_buff *skb,
2026 rth->rt_key_src = saddr; 2027 rth->rt_key_src = saddr;
2027 rth->rt_src = saddr; 2028 rth->rt_src = saddr;
2028 rth->rt_gateway = daddr; 2029 rth->rt_gateway = daddr;
2030 rth->rt_route_iif = in_dev->dev->ifindex;
2029 rth->rt_iif = in_dev->dev->ifindex; 2031 rth->rt_iif = in_dev->dev->ifindex;
2030 rth->dst.dev = (out_dev)->dev; 2032 rth->dst.dev = (out_dev)->dev;
2031 dev_hold(rth->dst.dev); 2033 dev_hold(rth->dst.dev);
@@ -2148,9 +2150,9 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2148 goto brd_input; 2150 goto brd_input;
2149 2151
2150 if (res.type == RTN_LOCAL) { 2152 if (res.type == RTN_LOCAL) {
2151 err = fib_validate_source(saddr, daddr, tos, 2153 err = fib_validate_source(skb, saddr, daddr, tos,
2152 net->loopback_dev->ifindex, 2154 net->loopback_dev->ifindex,
2153 dev, &spec_dst, &itag, skb->mark); 2155 dev, &spec_dst, &itag);
2154 if (err < 0) 2156 if (err < 0)
2155 goto martian_source_keep_err; 2157 goto martian_source_keep_err;
2156 if (err) 2158 if (err)
@@ -2174,8 +2176,8 @@ brd_input:
2174 if (ipv4_is_zeronet(saddr)) 2176 if (ipv4_is_zeronet(saddr))
2175 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK); 2177 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
2176 else { 2178 else {
2177 err = fib_validate_source(saddr, 0, tos, 0, dev, &spec_dst, 2179 err = fib_validate_source(skb, saddr, 0, tos, 0, dev, &spec_dst,
2178 &itag, skb->mark); 2180 &itag);
2179 if (err < 0) 2181 if (err < 0)
2180 goto martian_source_keep_err; 2182 goto martian_source_keep_err;
2181 if (err) 2183 if (err)
@@ -2202,6 +2204,7 @@ local_input:
2202#ifdef CONFIG_IP_ROUTE_CLASSID 2204#ifdef CONFIG_IP_ROUTE_CLASSID
2203 rth->dst.tclassid = itag; 2205 rth->dst.tclassid = itag;
2204#endif 2206#endif
2207 rth->rt_route_iif = dev->ifindex;
2205 rth->rt_iif = dev->ifindex; 2208 rth->rt_iif = dev->ifindex;
2206 rth->dst.dev = net->loopback_dev; 2209 rth->dst.dev = net->loopback_dev;
2207 dev_hold(rth->dst.dev); 2210 dev_hold(rth->dst.dev);
@@ -2401,7 +2404,8 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
2401 rth->rt_mark = oldflp4->flowi4_mark; 2404 rth->rt_mark = oldflp4->flowi4_mark;
2402 rth->rt_dst = fl4->daddr; 2405 rth->rt_dst = fl4->daddr;
2403 rth->rt_src = fl4->saddr; 2406 rth->rt_src = fl4->saddr;
2404 rth->rt_iif = 0; 2407 rth->rt_route_iif = 0;
2408 rth->rt_iif = oldflp4->flowi4_oif ? : dev_out->ifindex;
2405 /* get references to the devices that are to be hold by the routing 2409 /* get references to the devices that are to be hold by the routing
2406 cache entry */ 2410 cache entry */
2407 rth->dst.dev = dev_out; 2411 rth->dst.dev = dev_out;
@@ -2611,7 +2615,9 @@ static struct rtable *ip_route_output_slow(struct net *net,
2611 fib_select_multipath(&res); 2615 fib_select_multipath(&res);
2612 else 2616 else
2613#endif 2617#endif
2614 if (!res.prefixlen && res.type == RTN_UNICAST && !fl4.flowi4_oif) 2618 if (!res.prefixlen &&
2619 res.table->tb_num_default > 1 &&
2620 res.type == RTN_UNICAST && !fl4.flowi4_oif)
2615 fib_select_default(&res); 2621 fib_select_default(&res);
2616 2622
2617 if (!fl4.saddr) 2623 if (!fl4.saddr)
@@ -2716,6 +2722,7 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or
2716 rt->rt_key_dst = ort->rt_key_dst; 2722 rt->rt_key_dst = ort->rt_key_dst;
2717 rt->rt_key_src = ort->rt_key_src; 2723 rt->rt_key_src = ort->rt_key_src;
2718 rt->rt_tos = ort->rt_tos; 2724 rt->rt_tos = ort->rt_tos;
2725 rt->rt_route_iif = ort->rt_route_iif;
2719 rt->rt_iif = ort->rt_iif; 2726 rt->rt_iif = ort->rt_iif;
2720 rt->rt_oif = ort->rt_oif; 2727 rt->rt_oif = ort->rt_oif;
2721 rt->rt_mark = ort->rt_mark; 2728 rt->rt_mark = ort->rt_mark;
@@ -2725,7 +2732,6 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or
2725 rt->rt_type = ort->rt_type; 2732 rt->rt_type = ort->rt_type;
2726 rt->rt_dst = ort->rt_dst; 2733 rt->rt_dst = ort->rt_dst;
2727 rt->rt_src = ort->rt_src; 2734 rt->rt_src = ort->rt_src;
2728 rt->rt_iif = ort->rt_iif;
2729 rt->rt_gateway = ort->rt_gateway; 2735 rt->rt_gateway = ort->rt_gateway;
2730 rt->rt_spec_dst = ort->rt_spec_dst; 2736 rt->rt_spec_dst = ort->rt_spec_dst;
2731 rt->peer = ort->peer; 2737 rt->peer = ort->peer;
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 8b44c6d2a79b..71e029691908 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -345,17 +345,13 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
345 * no easy way to do this. 345 * no easy way to do this.
346 */ 346 */
347 { 347 {
348 struct flowi4 fl4 = { 348 struct flowi4 fl4;
349 .flowi4_mark = sk->sk_mark, 349
350 .daddr = ((opt && opt->srr) ? 350 flowi4_init_output(&fl4, 0, sk->sk_mark, RT_CONN_FLAGS(sk),
351 opt->faddr : ireq->rmt_addr), 351 RT_SCOPE_UNIVERSE, IPPROTO_TCP,
352 .saddr = ireq->loc_addr, 352 inet_sk_flowi_flags(sk),
353 .flowi4_tos = RT_CONN_FLAGS(sk), 353 (opt && opt->srr) ? opt->faddr : ireq->rmt_addr,
354 .flowi4_proto = IPPROTO_TCP, 354 ireq->loc_addr, th->source, th->dest);
355 .flowi4_flags = inet_sk_flowi_flags(sk),
356 .fl4_sport = th->dest,
357 .fl4_dport = th->source,
358 };
359 security_req_classify_flow(req, flowi4_to_flowi(&fl4)); 355 security_req_classify_flow(req, flowi4_to_flowi(&fl4));
360 rt = ip_route_output_key(sock_net(sk), &fl4); 356 rt = ip_route_output_key(sock_net(sk), &fl4);
361 if (IS_ERR(rt)) { 357 if (IS_ERR(rt)) {
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 1a456652086b..321e6e84dbcc 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -311,7 +311,6 @@ static struct ctl_table ipv4_table[] = {
311 .mode = 0644, 311 .mode = 0644,
312 .proc_handler = proc_do_large_bitmap, 312 .proc_handler = proc_do_large_bitmap,
313 }, 313 },
314#ifdef CONFIG_IP_MULTICAST
315 { 314 {
316 .procname = "igmp_max_memberships", 315 .procname = "igmp_max_memberships",
317 .data = &sysctl_igmp_max_memberships, 316 .data = &sysctl_igmp_max_memberships,
@@ -319,8 +318,6 @@ static struct ctl_table ipv4_table[] = {
319 .mode = 0644, 318 .mode = 0644,
320 .proc_handler = proc_dointvec 319 .proc_handler = proc_dointvec
321 }, 320 },
322
323#endif
324 { 321 {
325 .procname = "igmp_max_msf", 322 .procname = "igmp_max_msf",
326 .data = &sysctl_igmp_max_msf, 323 .data = &sysctl_igmp_max_msf,
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index b22d45010545..054a59d21eb0 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -999,7 +999,8 @@ new_segment:
999 /* We have some space in skb head. Superb! */ 999 /* We have some space in skb head. Superb! */
1000 if (copy > skb_tailroom(skb)) 1000 if (copy > skb_tailroom(skb))
1001 copy = skb_tailroom(skb); 1001 copy = skb_tailroom(skb);
1002 if ((err = skb_add_data(skb, from, copy)) != 0) 1002 err = skb_add_data_nocache(sk, skb, from, copy);
1003 if (err)
1003 goto do_fault; 1004 goto do_fault;
1004 } else { 1005 } else {
1005 int merge = 0; 1006 int merge = 0;
@@ -1042,8 +1043,8 @@ new_segment:
1042 1043
1043 /* Time to copy data. We are close to 1044 /* Time to copy data. We are close to
1044 * the end! */ 1045 * the end! */
1045 err = skb_copy_to_page(sk, from, skb, page, 1046 err = skb_copy_to_page_nocache(sk, from, skb,
1046 off, copy); 1047 page, off, copy);
1047 if (err) { 1048 if (err) {
1048 /* If this page was new, give it to the 1049 /* If this page was new, give it to the
1049 * socket so it does not get leaked. 1050 * socket so it does not get leaked.
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index f7e6c2c2d2bb..edf18bd74b87 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -279,7 +279,7 @@ EXPORT_SYMBOL(tcp_v4_connect);
279/* 279/*
280 * This routine does path mtu discovery as defined in RFC1191. 280 * This routine does path mtu discovery as defined in RFC1191.
281 */ 281 */
282static void do_pmtu_discovery(struct sock *sk, struct iphdr *iph, u32 mtu) 282static void do_pmtu_discovery(struct sock *sk, const struct iphdr *iph, u32 mtu)
283{ 283{
284 struct dst_entry *dst; 284 struct dst_entry *dst;
285 struct inet_sock *inet = inet_sk(sk); 285 struct inet_sock *inet = inet_sk(sk);
@@ -341,7 +341,7 @@ static void do_pmtu_discovery(struct sock *sk, struct iphdr *iph, u32 mtu)
341 341
342void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) 342void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
343{ 343{
344 struct iphdr *iph = (struct iphdr *)icmp_skb->data; 344 const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
345 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2)); 345 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
346 struct inet_connection_sock *icsk; 346 struct inet_connection_sock *icsk;
347 struct tcp_sock *tp; 347 struct tcp_sock *tp;
@@ -2527,7 +2527,7 @@ void tcp4_proc_exit(void)
2527 2527
2528struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb) 2528struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2529{ 2529{
2530 struct iphdr *iph = skb_gro_network_header(skb); 2530 const struct iphdr *iph = skb_gro_network_header(skb);
2531 2531
2532 switch (skb->ip_summed) { 2532 switch (skb->ip_summed) {
2533 case CHECKSUM_COMPLETE: 2533 case CHECKSUM_COMPLETE:
@@ -2548,7 +2548,7 @@ struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2548 2548
2549int tcp4_gro_complete(struct sk_buff *skb) 2549int tcp4_gro_complete(struct sk_buff *skb)
2550{ 2550{
2551 struct iphdr *iph = ip_hdr(skb); 2551 const struct iphdr *iph = ip_hdr(skb);
2552 struct tcphdr *th = tcp_hdr(skb); 2552 struct tcphdr *th = tcp_hdr(skb);
2553 2553
2554 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb), 2554 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
diff --git a/net/ipv4/tcp_lp.c b/net/ipv4/tcp_lp.c
index 656d431c99ad..72f7218b03f5 100644
--- a/net/ipv4/tcp_lp.c
+++ b/net/ipv4/tcp_lp.c
@@ -12,7 +12,7 @@
12 * within cong_avoid. 12 * within cong_avoid.
13 * o Error correcting in remote HZ, therefore remote HZ will be keeped 13 * o Error correcting in remote HZ, therefore remote HZ will be keeped
14 * on checking and updating. 14 * on checking and updating.
15 * o Handling calculation of One-Way-Delay (OWD) within rtt_sample, sicne 15 * o Handling calculation of One-Way-Delay (OWD) within rtt_sample, since
16 * OWD have a similar meaning as RTT. Also correct the buggy formular. 16 * OWD have a similar meaning as RTT. Also correct the buggy formular.
17 * o Handle reaction for Early Congestion Indication (ECI) within 17 * o Handle reaction for Early Congestion Indication (ECI) within
18 * pkts_acked, as mentioned within pseudo code. 18 * pkts_acked, as mentioned within pseudo code.
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 8b0d0167e44a..17388c7f49c4 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -73,7 +73,7 @@ static void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb)
73 tcp_advance_send_head(sk, skb); 73 tcp_advance_send_head(sk, skb);
74 tp->snd_nxt = TCP_SKB_CB(skb)->end_seq; 74 tp->snd_nxt = TCP_SKB_CB(skb)->end_seq;
75 75
76 /* Don't override Nagle indefinately with F-RTO */ 76 /* Don't override Nagle indefinitely with F-RTO */
77 if (tp->frto_counter == 2) 77 if (tp->frto_counter == 2)
78 tp->frto_counter = 3; 78 tp->frto_counter = 3;
79 79
diff --git a/net/ipv4/tcp_yeah.c b/net/ipv4/tcp_yeah.c
index dc7f43179c9a..05c3b6f0e8e1 100644
--- a/net/ipv4/tcp_yeah.c
+++ b/net/ipv4/tcp_yeah.c
@@ -20,7 +20,7 @@
20#define TCP_YEAH_DELTA 3 //log minimum fraction of cwnd to be removed on loss 20#define TCP_YEAH_DELTA 3 //log minimum fraction of cwnd to be removed on loss
21#define TCP_YEAH_EPSILON 1 //log maximum fraction to be removed on early decongestion 21#define TCP_YEAH_EPSILON 1 //log maximum fraction to be removed on early decongestion
22#define TCP_YEAH_PHY 8 //lin maximum delta from base 22#define TCP_YEAH_PHY 8 //lin maximum delta from base
23#define TCP_YEAH_RHO 16 //lin minumum number of consecutive rtt to consider competition on loss 23#define TCP_YEAH_RHO 16 //lin minimum number of consecutive rtt to consider competition on loss
24#define TCP_YEAH_ZETA 50 //lin minimum number of state switchs to reset reno_count 24#define TCP_YEAH_ZETA 50 //lin minimum number of state switchs to reset reno_count
25 25
26#define TCP_SCALABLE_AI_CNT 100U 26#define TCP_SCALABLE_AI_CNT 100U
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 588f47af5faf..bc0dab2593e0 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -189,7 +189,7 @@ static int udp_lib_lport_inuse2(struct net *net, __u16 num,
189 * @sk: socket struct in question 189 * @sk: socket struct in question
190 * @snum: port number to look up 190 * @snum: port number to look up
191 * @saddr_comp: AF-dependent comparison of bound local IP addresses 191 * @saddr_comp: AF-dependent comparison of bound local IP addresses
192 * @hash2_nulladdr: AF-dependant hash value in secondary hash chains, 192 * @hash2_nulladdr: AF-dependent hash value in secondary hash chains,
193 * with NULL address 193 * with NULL address
194 */ 194 */
195int udp_lib_get_port(struct sock *sk, unsigned short snum, 195int udp_lib_get_port(struct sock *sk, unsigned short snum,
@@ -578,7 +578,7 @@ found:
578void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable) 578void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
579{ 579{
580 struct inet_sock *inet; 580 struct inet_sock *inet;
581 struct iphdr *iph = (struct iphdr *)skb->data; 581 const struct iphdr *iph = (const struct iphdr *)skb->data;
582 struct udphdr *uh = (struct udphdr *)(skb->data+(iph->ihl<<2)); 582 struct udphdr *uh = (struct udphdr *)(skb->data+(iph->ihl<<2));
583 const int type = icmp_hdr(skb)->type; 583 const int type = icmp_hdr(skb)->type;
584 const int code = icmp_hdr(skb)->code; 584 const int code = icmp_hdr(skb)->code;
@@ -909,20 +909,14 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
909 rt = (struct rtable *)sk_dst_check(sk, 0); 909 rt = (struct rtable *)sk_dst_check(sk, 0);
910 910
911 if (rt == NULL) { 911 if (rt == NULL) {
912 struct flowi4 fl4 = { 912 struct flowi4 fl4;
913 .flowi4_oif = ipc.oif,
914 .flowi4_mark = sk->sk_mark,
915 .daddr = faddr,
916 .saddr = saddr,
917 .flowi4_tos = tos,
918 .flowi4_proto = sk->sk_protocol,
919 .flowi4_flags = (inet_sk_flowi_flags(sk) |
920 FLOWI_FLAG_CAN_SLEEP),
921 .fl4_sport = inet->inet_sport,
922 .fl4_dport = dport,
923 };
924 struct net *net = sock_net(sk); 913 struct net *net = sock_net(sk);
925 914
915 flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos,
916 RT_SCOPE_UNIVERSE, sk->sk_protocol,
917 inet_sk_flowi_flags(sk)|FLOWI_FLAG_CAN_SLEEP,
918 faddr, saddr, dport, inet->inet_sport);
919
926 security_sk_classify_flow(sk, flowi4_to_flowi(&fl4)); 920 security_sk_classify_flow(sk, flowi4_to_flowi(&fl4));
927 rt = ip_route_output_flow(net, &fl4, sk); 921 rt = ip_route_output_flow(net, &fl4, sk);
928 if (IS_ERR(rt)) { 922 if (IS_ERR(rt)) {
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 13e0e7f659ff..59b1340fb3bf 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -74,6 +74,7 @@ static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
74 rt->rt_key_dst = fl4->daddr; 74 rt->rt_key_dst = fl4->daddr;
75 rt->rt_key_src = fl4->saddr; 75 rt->rt_key_src = fl4->saddr;
76 rt->rt_tos = fl4->flowi4_tos; 76 rt->rt_tos = fl4->flowi4_tos;
77 rt->rt_route_iif = fl4->flowi4_iif;
77 rt->rt_iif = fl4->flowi4_iif; 78 rt->rt_iif = fl4->flowi4_iif;
78 rt->rt_oif = fl4->flowi4_oif; 79 rt->rt_oif = fl4->flowi4_oif;
79 rt->rt_mark = fl4->flowi4_mark; 80 rt->rt_mark = fl4->flowi4_mark;
@@ -101,7 +102,7 @@ static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
101static void 102static void
102_decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse) 103_decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
103{ 104{
104 struct iphdr *iph = ip_hdr(skb); 105 const struct iphdr *iph = ip_hdr(skb);
105 u8 *xprth = skb_network_header(skb) + iph->ihl * 4; 106 u8 *xprth = skb_network_header(skb) + iph->ihl * 4;
106 struct flowi4 *fl4 = &fl->u.ip4; 107 struct flowi4 *fl4 = &fl->u.ip4;
107 108
diff --git a/net/ipv4/xfrm4_state.c b/net/ipv4/xfrm4_state.c
index 1717c64628d1..ea983ae96ae6 100644
--- a/net/ipv4/xfrm4_state.c
+++ b/net/ipv4/xfrm4_state.c
@@ -55,7 +55,7 @@ xfrm4_init_temprop(struct xfrm_state *x, const struct xfrm_tmpl *tmpl,
55 55
56int xfrm4_extract_header(struct sk_buff *skb) 56int xfrm4_extract_header(struct sk_buff *skb)
57{ 57{
58 struct iphdr *iph = ip_hdr(skb); 58 const struct iphdr *iph = ip_hdr(skb);
59 59
60 XFRM_MODE_SKB_CB(skb)->ihl = sizeof(*iph); 60 XFRM_MODE_SKB_CB(skb)->ihl = sizeof(*iph);
61 XFRM_MODE_SKB_CB(skb)->id = iph->id; 61 XFRM_MODE_SKB_CB(skb)->id = iph->id;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 3daaf3c7703c..c663a3b70924 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -825,6 +825,8 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)
825 dst_release(&rt->dst); 825 dst_release(&rt->dst);
826 } 826 }
827 827
828 /* clean up prefsrc entries */
829 rt6_remove_prefsrc(ifp);
828out: 830out:
829 in6_ifa_put(ifp); 831 in6_ifa_put(ifp);
830} 832}
@@ -1084,7 +1086,7 @@ static int ipv6_get_saddr_eval(struct net *net,
1084 case IPV6_SADDR_RULE_PRIVACY: 1086 case IPV6_SADDR_RULE_PRIVACY:
1085 { 1087 {
1086 /* Rule 7: Prefer public address 1088 /* Rule 7: Prefer public address
1087 * Note: prefer temprary address if use_tempaddr >= 2 1089 * Note: prefer temporary address if use_tempaddr >= 2
1088 */ 1090 */
1089 int preftmp = dst->prefs & (IPV6_PREFER_SRC_PUBLIC|IPV6_PREFER_SRC_TMP) ? 1091 int preftmp = dst->prefs & (IPV6_PREFER_SRC_PUBLIC|IPV6_PREFER_SRC_TMP) ?
1090 !!(dst->prefs & IPV6_PREFER_SRC_TMP) : 1092 !!(dst->prefs & IPV6_PREFER_SRC_TMP) :
@@ -1281,7 +1283,7 @@ static int ipv6_count_addresses(struct inet6_dev *idev)
1281 return cnt; 1283 return cnt;
1282} 1284}
1283 1285
1284int ipv6_chk_addr(struct net *net, struct in6_addr *addr, 1286int ipv6_chk_addr(struct net *net, const struct in6_addr *addr,
1285 struct net_device *dev, int strict) 1287 struct net_device *dev, int strict)
1286{ 1288{
1287 struct inet6_ifaddr *ifp; 1289 struct inet6_ifaddr *ifp;
@@ -1324,7 +1326,7 @@ static bool ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr,
1324 return false; 1326 return false;
1325} 1327}
1326 1328
1327int ipv6_chk_prefix(struct in6_addr *addr, struct net_device *dev) 1329int ipv6_chk_prefix(const struct in6_addr *addr, struct net_device *dev)
1328{ 1330{
1329 struct inet6_dev *idev; 1331 struct inet6_dev *idev;
1330 struct inet6_ifaddr *ifa; 1332 struct inet6_ifaddr *ifa;
@@ -1455,7 +1457,7 @@ void addrconf_dad_failure(struct inet6_ifaddr *ifp)
1455 1457
1456/* Join to solicited addr multicast group. */ 1458/* Join to solicited addr multicast group. */
1457 1459
1458void addrconf_join_solict(struct net_device *dev, struct in6_addr *addr) 1460void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr)
1459{ 1461{
1460 struct in6_addr maddr; 1462 struct in6_addr maddr;
1461 1463
@@ -1466,7 +1468,7 @@ void addrconf_join_solict(struct net_device *dev, struct in6_addr *addr)
1466 ipv6_dev_mc_inc(dev, &maddr); 1468 ipv6_dev_mc_inc(dev, &maddr);
1467} 1469}
1468 1470
1469void addrconf_leave_solict(struct inet6_dev *idev, struct in6_addr *addr) 1471void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr)
1470{ 1472{
1471 struct in6_addr maddr; 1473 struct in6_addr maddr;
1472 1474
@@ -1968,7 +1970,7 @@ ok:
1968 * to the stored lifetime since we'll 1970 * to the stored lifetime since we'll
1969 * be updating the timestamp below, 1971 * be updating the timestamp below,
1970 * else we'll set it back to the 1972 * else we'll set it back to the
1971 * minumum. 1973 * minimum.
1972 */ 1974 */
1973 if (prefered_lft != ifp->prefered_lft) { 1975 if (prefered_lft != ifp->prefered_lft) {
1974 valid_lft = stored_lft; 1976 valid_lft = stored_lft;
@@ -2111,7 +2113,7 @@ err_exit:
2111/* 2113/*
2112 * Manual configuration of address on an interface 2114 * Manual configuration of address on an interface
2113 */ 2115 */
2114static int inet6_addr_add(struct net *net, int ifindex, struct in6_addr *pfx, 2116static int inet6_addr_add(struct net *net, int ifindex, const struct in6_addr *pfx,
2115 unsigned int plen, __u8 ifa_flags, __u32 prefered_lft, 2117 unsigned int plen, __u8 ifa_flags, __u32 prefered_lft,
2116 __u32 valid_lft) 2118 __u32 valid_lft)
2117{ 2119{
@@ -2185,7 +2187,7 @@ static int inet6_addr_add(struct net *net, int ifindex, struct in6_addr *pfx,
2185 return PTR_ERR(ifp); 2187 return PTR_ERR(ifp);
2186} 2188}
2187 2189
2188static int inet6_addr_del(struct net *net, int ifindex, struct in6_addr *pfx, 2190static int inet6_addr_del(struct net *net, int ifindex, const struct in6_addr *pfx,
2189 unsigned int plen) 2191 unsigned int plen)
2190{ 2192{
2191 struct inet6_ifaddr *ifp; 2193 struct inet6_ifaddr *ifp;
@@ -2348,7 +2350,7 @@ static void init_loopback(struct net_device *dev)
2348 add_addr(idev, &in6addr_loopback, 128, IFA_HOST); 2350 add_addr(idev, &in6addr_loopback, 128, IFA_HOST);
2349} 2351}
2350 2352
2351static void addrconf_add_linklocal(struct inet6_dev *idev, struct in6_addr *addr) 2353static void addrconf_add_linklocal(struct inet6_dev *idev, const struct in6_addr *addr)
2352{ 2354{
2353 struct inet6_ifaddr * ifp; 2355 struct inet6_ifaddr * ifp;
2354 u32 addr_flags = IFA_F_PERMANENT; 2356 u32 addr_flags = IFA_F_PERMANENT;
@@ -3119,7 +3121,7 @@ void if6_proc_exit(void)
3119 3121
3120#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) 3122#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
3121/* Check if address is a home address configured on any interface. */ 3123/* Check if address is a home address configured on any interface. */
3122int ipv6_chk_home_addr(struct net *net, struct in6_addr *addr) 3124int ipv6_chk_home_addr(struct net *net, const struct in6_addr *addr)
3123{ 3125{
3124 int ret = 0; 3126 int ret = 0;
3125 struct inet6_ifaddr *ifp = NULL; 3127 struct inet6_ifaddr *ifp = NULL;
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 4b13d5d8890e..b7919f901fbf 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -740,7 +740,7 @@ static int ipv6_gso_pull_exthdrs(struct sk_buff *skb, int proto)
740 740
741static int ipv6_gso_send_check(struct sk_buff *skb) 741static int ipv6_gso_send_check(struct sk_buff *skb)
742{ 742{
743 struct ipv6hdr *ipv6h; 743 const struct ipv6hdr *ipv6h;
744 const struct inet6_protocol *ops; 744 const struct inet6_protocol *ops;
745 int err = -EINVAL; 745 int err = -EINVAL;
746 746
@@ -1113,7 +1113,7 @@ static int __init inet6_init(void)
1113 /* 1113 /*
1114 * ipngwg API draft makes clear that the correct semantics 1114 * ipngwg API draft makes clear that the correct semantics
1115 * for TCP and UDP is to consider one TCP and UDP instance 1115 * for TCP and UDP is to consider one TCP and UDP instance
1116 * in a host availiable by both INET and INET6 APIs and 1116 * in a host available by both INET and INET6 APIs and
1117 * able to communicate via both network protocols. 1117 * able to communicate via both network protocols.
1118 */ 1118 */
1119 1119
diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c
index 0e5e943446f0..674255f5e6b7 100644
--- a/net/ipv6/anycast.c
+++ b/net/ipv6/anycast.c
@@ -44,7 +44,7 @@
44 44
45#include <net/checksum.h> 45#include <net/checksum.h>
46 46
47static int ipv6_dev_ac_dec(struct net_device *dev, struct in6_addr *addr); 47static int ipv6_dev_ac_dec(struct net_device *dev, const struct in6_addr *addr);
48 48
49/* Big ac list lock for all the sockets */ 49/* Big ac list lock for all the sockets */
50static DEFINE_RWLOCK(ipv6_sk_ac_lock); 50static DEFINE_RWLOCK(ipv6_sk_ac_lock);
@@ -54,7 +54,7 @@ static DEFINE_RWLOCK(ipv6_sk_ac_lock);
54 * socket join an anycast group 54 * socket join an anycast group
55 */ 55 */
56 56
57int ipv6_sock_ac_join(struct sock *sk, int ifindex, struct in6_addr *addr) 57int ipv6_sock_ac_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
58{ 58{
59 struct ipv6_pinfo *np = inet6_sk(sk); 59 struct ipv6_pinfo *np = inet6_sk(sk);
60 struct net_device *dev = NULL; 60 struct net_device *dev = NULL;
@@ -145,7 +145,7 @@ error:
145/* 145/*
146 * socket leave an anycast group 146 * socket leave an anycast group
147 */ 147 */
148int ipv6_sock_ac_drop(struct sock *sk, int ifindex, struct in6_addr *addr) 148int ipv6_sock_ac_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
149{ 149{
150 struct ipv6_pinfo *np = inet6_sk(sk); 150 struct ipv6_pinfo *np = inet6_sk(sk);
151 struct net_device *dev; 151 struct net_device *dev;
@@ -252,7 +252,7 @@ static void aca_put(struct ifacaddr6 *ac)
252/* 252/*
253 * device anycast group inc (add if not found) 253 * device anycast group inc (add if not found)
254 */ 254 */
255int ipv6_dev_ac_inc(struct net_device *dev, struct in6_addr *addr) 255int ipv6_dev_ac_inc(struct net_device *dev, const struct in6_addr *addr)
256{ 256{
257 struct ifacaddr6 *aca; 257 struct ifacaddr6 *aca;
258 struct inet6_dev *idev; 258 struct inet6_dev *idev;
@@ -324,7 +324,7 @@ out:
324/* 324/*
325 * device anycast group decrement 325 * device anycast group decrement
326 */ 326 */
327int __ipv6_dev_ac_dec(struct inet6_dev *idev, struct in6_addr *addr) 327int __ipv6_dev_ac_dec(struct inet6_dev *idev, const struct in6_addr *addr)
328{ 328{
329 struct ifacaddr6 *aca, *prev_aca; 329 struct ifacaddr6 *aca, *prev_aca;
330 330
@@ -358,7 +358,7 @@ int __ipv6_dev_ac_dec(struct inet6_dev *idev, struct in6_addr *addr)
358} 358}
359 359
360/* called with rcu_read_lock() */ 360/* called with rcu_read_lock() */
361static int ipv6_dev_ac_dec(struct net_device *dev, struct in6_addr *addr) 361static int ipv6_dev_ac_dec(struct net_device *dev, const struct in6_addr *addr)
362{ 362{
363 struct inet6_dev *idev = __in6_dev_get(dev); 363 struct inet6_dev *idev = __in6_dev_get(dev);
364 364
@@ -371,7 +371,7 @@ static int ipv6_dev_ac_dec(struct net_device *dev, struct in6_addr *addr)
371 * check if the interface has this anycast address 371 * check if the interface has this anycast address
372 * called with rcu_read_lock() 372 * called with rcu_read_lock()
373 */ 373 */
374static int ipv6_chk_acast_dev(struct net_device *dev, struct in6_addr *addr) 374static int ipv6_chk_acast_dev(struct net_device *dev, const struct in6_addr *addr)
375{ 375{
376 struct inet6_dev *idev; 376 struct inet6_dev *idev;
377 struct ifacaddr6 *aca; 377 struct ifacaddr6 *aca;
@@ -392,7 +392,7 @@ static int ipv6_chk_acast_dev(struct net_device *dev, struct in6_addr *addr)
392 * check if given interface (or any, if dev==0) has this anycast address 392 * check if given interface (or any, if dev==0) has this anycast address
393 */ 393 */
394int ipv6_chk_acast_addr(struct net *net, struct net_device *dev, 394int ipv6_chk_acast_addr(struct net *net, struct net_device *dev,
395 struct in6_addr *addr) 395 const struct in6_addr *addr)
396{ 396{
397 int found = 0; 397 int found = 0;
398 398
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 5aa8ec88f194..e97b4b7ca2f2 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -430,7 +430,7 @@ static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
430 u8 type, u8 code, int offset, __be32 info) 430 u8 type, u8 code, int offset, __be32 info)
431{ 431{
432 struct net *net = dev_net(skb->dev); 432 struct net *net = dev_net(skb->dev);
433 struct ipv6hdr *iph = (struct ipv6hdr*)skb->data; 433 const struct ipv6hdr *iph = (const struct ipv6hdr *)skb->data;
434 struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data + offset); 434 struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data + offset);
435 struct xfrm_state *x; 435 struct xfrm_state *x;
436 436
@@ -438,7 +438,8 @@ static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
438 type != ICMPV6_PKT_TOOBIG) 438 type != ICMPV6_PKT_TOOBIG)
439 return; 439 return;
440 440
441 x = xfrm_state_lookup(net, skb->mark, (xfrm_address_t *)&iph->daddr, esph->spi, IPPROTO_ESP, AF_INET6); 441 x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
442 esph->spi, IPPROTO_ESP, AF_INET6);
442 if (!x) 443 if (!x)
443 return; 444 return;
444 printk(KERN_DEBUG "pmtu discovery on SA ESP/%08x/%pI6\n", 445 printk(KERN_DEBUG "pmtu discovery on SA ESP/%08x/%pI6\n",
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 83cb4f9add81..11900417b1cc 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -372,7 +372,7 @@ void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
372 struct ipv6hdr *hdr = ipv6_hdr(skb); 372 struct ipv6hdr *hdr = ipv6_hdr(skb);
373 struct sock *sk; 373 struct sock *sk;
374 struct ipv6_pinfo *np; 374 struct ipv6_pinfo *np;
375 struct in6_addr *saddr = NULL; 375 const struct in6_addr *saddr = NULL;
376 struct dst_entry *dst; 376 struct dst_entry *dst;
377 struct icmp6hdr tmp_hdr; 377 struct icmp6hdr tmp_hdr;
378 struct flowi6 fl6; 378 struct flowi6 fl6;
@@ -521,7 +521,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
521 struct sock *sk; 521 struct sock *sk;
522 struct inet6_dev *idev; 522 struct inet6_dev *idev;
523 struct ipv6_pinfo *np; 523 struct ipv6_pinfo *np;
524 struct in6_addr *saddr = NULL; 524 const struct in6_addr *saddr = NULL;
525 struct icmp6hdr *icmph = icmp6_hdr(skb); 525 struct icmp6hdr *icmph = icmp6_hdr(skb);
526 struct icmp6hdr tmp_hdr; 526 struct icmp6hdr tmp_hdr;
527 struct flowi6 fl6; 527 struct flowi6 fl6;
@@ -645,8 +645,8 @@ static int icmpv6_rcv(struct sk_buff *skb)
645{ 645{
646 struct net_device *dev = skb->dev; 646 struct net_device *dev = skb->dev;
647 struct inet6_dev *idev = __in6_dev_get(dev); 647 struct inet6_dev *idev = __in6_dev_get(dev);
648 struct in6_addr *saddr, *daddr; 648 const struct in6_addr *saddr, *daddr;
649 struct ipv6hdr *orig_hdr; 649 const struct ipv6hdr *orig_hdr;
650 struct icmp6hdr *hdr; 650 struct icmp6hdr *hdr;
651 u8 type; 651 u8 type;
652 652
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index 166054650466..f2c5b0fc0f21 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -44,7 +44,7 @@ int inet6_csk_bind_conflict(const struct sock *sk,
44 !sk2->sk_bound_dev_if || 44 !sk2->sk_bound_dev_if ||
45 sk->sk_bound_dev_if == sk2->sk_bound_dev_if) && 45 sk->sk_bound_dev_if == sk2->sk_bound_dev_if) &&
46 (!sk->sk_reuse || !sk2->sk_reuse || 46 (!sk->sk_reuse || !sk2->sk_reuse ||
47 ((1 << sk2->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))) && 47 sk2->sk_state == TCP_LISTEN) &&
48 ipv6_rcv_saddr_equal(sk, sk2)) 48 ipv6_rcv_saddr_equal(sk, sk2))
49 break; 49 break;
50 } 50 }
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 7548905e79e1..dd88df0a5d7f 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -134,9 +134,9 @@ static __inline__ u32 fib6_new_sernum(void)
134# define BITOP_BE32_SWIZZLE 0 134# define BITOP_BE32_SWIZZLE 0
135#endif 135#endif
136 136
137static __inline__ __be32 addr_bit_set(void *token, int fn_bit) 137static __inline__ __be32 addr_bit_set(const void *token, int fn_bit)
138{ 138{
139 __be32 *addr = token; 139 const __be32 *addr = token;
140 /* 140 /*
141 * Here, 141 * Here,
142 * 1 << ((~fn_bit ^ BITOP_BE32_SWIZZLE) & 0x1f) 142 * 1 << ((~fn_bit ^ BITOP_BE32_SWIZZLE) & 0x1f)
@@ -822,7 +822,7 @@ st_failure:
822 822
823struct lookup_args { 823struct lookup_args {
824 int offset; /* key offset on rt6_info */ 824 int offset; /* key offset on rt6_info */
825 struct in6_addr *addr; /* search key */ 825 const struct in6_addr *addr; /* search key */
826}; 826};
827 827
828static struct fib6_node * fib6_lookup_1(struct fib6_node *root, 828static struct fib6_node * fib6_lookup_1(struct fib6_node *root,
@@ -881,8 +881,8 @@ static struct fib6_node * fib6_lookup_1(struct fib6_node *root,
881 return NULL; 881 return NULL;
882} 882}
883 883
884struct fib6_node * fib6_lookup(struct fib6_node *root, struct in6_addr *daddr, 884struct fib6_node * fib6_lookup(struct fib6_node *root, const struct in6_addr *daddr,
885 struct in6_addr *saddr) 885 const struct in6_addr *saddr)
886{ 886{
887 struct fib6_node *fn; 887 struct fib6_node *fn;
888 struct lookup_args args[] = { 888 struct lookup_args args[] = {
@@ -916,7 +916,7 @@ struct fib6_node * fib6_lookup(struct fib6_node *root, struct in6_addr *daddr,
916 916
917 917
918static struct fib6_node * fib6_locate_1(struct fib6_node *root, 918static struct fib6_node * fib6_locate_1(struct fib6_node *root,
919 struct in6_addr *addr, 919 const struct in6_addr *addr,
920 int plen, int offset) 920 int plen, int offset)
921{ 921{
922 struct fib6_node *fn; 922 struct fib6_node *fn;
@@ -946,8 +946,8 @@ static struct fib6_node * fib6_locate_1(struct fib6_node *root,
946} 946}
947 947
948struct fib6_node * fib6_locate(struct fib6_node *root, 948struct fib6_node * fib6_locate(struct fib6_node *root,
949 struct in6_addr *daddr, int dst_len, 949 const struct in6_addr *daddr, int dst_len,
950 struct in6_addr *saddr, int src_len) 950 const struct in6_addr *saddr, int src_len)
951{ 951{
952 struct fib6_node *fn; 952 struct fib6_node *fn;
953 953
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index a83e9209cecc..027c7ff6f1e5 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -57,7 +57,7 @@ inline int ip6_rcv_finish( struct sk_buff *skb)
57 57
58int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) 58int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
59{ 59{
60 struct ipv6hdr *hdr; 60 const struct ipv6hdr *hdr;
61 u32 pkt_len; 61 u32 pkt_len;
62 struct inet6_dev *idev; 62 struct inet6_dev *idev;
63 struct net *net = dev_net(skb->dev); 63 struct net *net = dev_net(skb->dev);
@@ -186,7 +186,7 @@ resubmit:
186 int ret; 186 int ret;
187 187
188 if (ipprot->flags & INET6_PROTO_FINAL) { 188 if (ipprot->flags & INET6_PROTO_FINAL) {
189 struct ipv6hdr *hdr; 189 const struct ipv6hdr *hdr;
190 190
191 /* Free reference early: we don't need it any more, 191 /* Free reference early: we don't need it any more,
192 and it may hold ip_conntrack module loaded 192 and it may hold ip_conntrack module loaded
@@ -242,7 +242,7 @@ int ip6_input(struct sk_buff *skb)
242 242
243int ip6_mc_input(struct sk_buff *skb) 243int ip6_mc_input(struct sk_buff *skb)
244{ 244{
245 struct ipv6hdr *hdr; 245 const struct ipv6hdr *hdr;
246 int deliver; 246 int deliver;
247 247
248 IP6_UPD_PO_STATS_BH(dev_net(skb_dst(skb)->dev), 248 IP6_UPD_PO_STATS_BH(dev_net(skb_dst(skb)->dev),
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 18208876aa8a..4cfbb24b9e04 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -779,7 +779,7 @@ slow_path:
779 /* IF: it doesn't fit, use 'mtu' - the data space left */ 779 /* IF: it doesn't fit, use 'mtu' - the data space left */
780 if (len > mtu) 780 if (len > mtu)
781 len = mtu; 781 len = mtu;
782 /* IF: we are not sending upto and including the packet end 782 /* IF: we are not sending up to and including the packet end
783 then align the next start on an eight byte boundary */ 783 then align the next start on an eight byte boundary */
784 if (len < left) { 784 if (len < left) {
785 len &= ~7; 785 len &= ~7;
@@ -869,9 +869,9 @@ fail:
869 return err; 869 return err;
870} 870}
871 871
872static inline int ip6_rt_check(struct rt6key *rt_key, 872static inline int ip6_rt_check(const struct rt6key *rt_key,
873 struct in6_addr *fl_addr, 873 const struct in6_addr *fl_addr,
874 struct in6_addr *addr_cache) 874 const struct in6_addr *addr_cache)
875{ 875{
876 return (rt_key->plen != 128 || !ipv6_addr_equal(fl_addr, &rt_key->addr)) && 876 return (rt_key->plen != 128 || !ipv6_addr_equal(fl_addr, &rt_key->addr)) &&
877 (addr_cache == NULL || !ipv6_addr_equal(fl_addr, addr_cache)); 877 (addr_cache == NULL || !ipv6_addr_equal(fl_addr, addr_cache));
@@ -879,7 +879,7 @@ static inline int ip6_rt_check(struct rt6key *rt_key,
879 879
880static struct dst_entry *ip6_sk_dst_check(struct sock *sk, 880static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
881 struct dst_entry *dst, 881 struct dst_entry *dst,
882 struct flowi6 *fl6) 882 const struct flowi6 *fl6)
883{ 883{
884 struct ipv6_pinfo *np = inet6_sk(sk); 884 struct ipv6_pinfo *np = inet6_sk(sk);
885 struct rt6_info *rt = (struct rt6_info *)dst; 885 struct rt6_info *rt = (struct rt6_info *)dst;
@@ -930,10 +930,10 @@ static int ip6_dst_lookup_tail(struct sock *sk,
930 goto out_err_release; 930 goto out_err_release;
931 931
932 if (ipv6_addr_any(&fl6->saddr)) { 932 if (ipv6_addr_any(&fl6->saddr)) {
933 err = ipv6_dev_get_saddr(net, ip6_dst_idev(*dst)->dev, 933 struct rt6_info *rt = (struct rt6_info *) *dst;
934 &fl6->daddr, 934 err = ip6_route_get_saddr(net, rt, &fl6->daddr,
935 sk ? inet6_sk(sk)->srcprefs : 0, 935 sk ? inet6_sk(sk)->srcprefs : 0,
936 &fl6->saddr); 936 &fl6->saddr);
937 if (err) 937 if (err)
938 goto out_err_release; 938 goto out_err_release;
939 } 939 }
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index c1b1bd312df2..9dd0e964b8bd 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -162,7 +162,7 @@ static inline void ip6_tnl_dst_store(struct ip6_tnl *t, struct dst_entry *dst)
162 for (t = rcu_dereference(start); t; t = rcu_dereference(t->next)) 162 for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
163 163
164static struct ip6_tnl * 164static struct ip6_tnl *
165ip6_tnl_lookup(struct net *net, struct in6_addr *remote, struct in6_addr *local) 165ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_addr *local)
166{ 166{
167 unsigned int h0 = HASH(remote); 167 unsigned int h0 = HASH(remote);
168 unsigned int h1 = HASH(local); 168 unsigned int h1 = HASH(local);
@@ -194,10 +194,10 @@ ip6_tnl_lookup(struct net *net, struct in6_addr *remote, struct in6_addr *local)
194 **/ 194 **/
195 195
196static struct ip6_tnl __rcu ** 196static struct ip6_tnl __rcu **
197ip6_tnl_bucket(struct ip6_tnl_net *ip6n, struct ip6_tnl_parm *p) 197ip6_tnl_bucket(struct ip6_tnl_net *ip6n, const struct ip6_tnl_parm *p)
198{ 198{
199 struct in6_addr *remote = &p->raddr; 199 const struct in6_addr *remote = &p->raddr;
200 struct in6_addr *local = &p->laddr; 200 const struct in6_addr *local = &p->laddr;
201 unsigned h = 0; 201 unsigned h = 0;
202 int prio = 0; 202 int prio = 0;
203 203
@@ -321,8 +321,8 @@ failed:
321static struct ip6_tnl *ip6_tnl_locate(struct net *net, 321static struct ip6_tnl *ip6_tnl_locate(struct net *net,
322 struct ip6_tnl_parm *p, int create) 322 struct ip6_tnl_parm *p, int create)
323{ 323{
324 struct in6_addr *remote = &p->raddr; 324 const struct in6_addr *remote = &p->raddr;
325 struct in6_addr *local = &p->laddr; 325 const struct in6_addr *local = &p->laddr;
326 struct ip6_tnl __rcu **tp; 326 struct ip6_tnl __rcu **tp;
327 struct ip6_tnl *t; 327 struct ip6_tnl *t;
328 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 328 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
@@ -374,7 +374,7 @@ ip6_tnl_dev_uninit(struct net_device *dev)
374static __u16 374static __u16
375parse_tlv_tnl_enc_lim(struct sk_buff *skb, __u8 * raw) 375parse_tlv_tnl_enc_lim(struct sk_buff *skb, __u8 * raw)
376{ 376{
377 struct ipv6hdr *ipv6h = (struct ipv6hdr *) raw; 377 const struct ipv6hdr *ipv6h = (const struct ipv6hdr *) raw;
378 __u8 nexthdr = ipv6h->nexthdr; 378 __u8 nexthdr = ipv6h->nexthdr;
379 __u16 off = sizeof (*ipv6h); 379 __u16 off = sizeof (*ipv6h);
380 380
@@ -435,7 +435,7 @@ static int
435ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt, 435ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt,
436 u8 *type, u8 *code, int *msg, __u32 *info, int offset) 436 u8 *type, u8 *code, int *msg, __u32 *info, int offset)
437{ 437{
438 struct ipv6hdr *ipv6h = (struct ipv6hdr *) skb->data; 438 const struct ipv6hdr *ipv6h = (const struct ipv6hdr *) skb->data;
439 struct ip6_tnl *t; 439 struct ip6_tnl *t;
440 int rel_msg = 0; 440 int rel_msg = 0;
441 u8 rel_type = ICMPV6_DEST_UNREACH; 441 u8 rel_type = ICMPV6_DEST_UNREACH;
@@ -535,7 +535,7 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
535 __u32 rel_info = ntohl(info); 535 __u32 rel_info = ntohl(info);
536 int err; 536 int err;
537 struct sk_buff *skb2; 537 struct sk_buff *skb2;
538 struct iphdr *eiph; 538 const struct iphdr *eiph;
539 struct rtable *rt; 539 struct rtable *rt;
540 540
541 err = ip6_tnl_err(skb, IPPROTO_IPIP, opt, &rel_type, &rel_code, 541 err = ip6_tnl_err(skb, IPPROTO_IPIP, opt, &rel_type, &rel_code,
@@ -669,8 +669,8 @@ ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
669 return 0; 669 return 0;
670} 670}
671 671
672static void ip4ip6_dscp_ecn_decapsulate(struct ip6_tnl *t, 672static void ip4ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t,
673 struct ipv6hdr *ipv6h, 673 const struct ipv6hdr *ipv6h,
674 struct sk_buff *skb) 674 struct sk_buff *skb)
675{ 675{
676 __u8 dsfield = ipv6_get_dsfield(ipv6h) & ~INET_ECN_MASK; 676 __u8 dsfield = ipv6_get_dsfield(ipv6h) & ~INET_ECN_MASK;
@@ -682,8 +682,8 @@ static void ip4ip6_dscp_ecn_decapsulate(struct ip6_tnl *t,
682 IP_ECN_set_ce(ip_hdr(skb)); 682 IP_ECN_set_ce(ip_hdr(skb));
683} 683}
684 684
685static void ip6ip6_dscp_ecn_decapsulate(struct ip6_tnl *t, 685static void ip6ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t,
686 struct ipv6hdr *ipv6h, 686 const struct ipv6hdr *ipv6h,
687 struct sk_buff *skb) 687 struct sk_buff *skb)
688{ 688{
689 if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY) 689 if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY)
@@ -726,12 +726,12 @@ static inline int ip6_tnl_rcv_ctl(struct ip6_tnl *t)
726 726
727static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol, 727static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol,
728 __u8 ipproto, 728 __u8 ipproto,
729 void (*dscp_ecn_decapsulate)(struct ip6_tnl *t, 729 void (*dscp_ecn_decapsulate)(const struct ip6_tnl *t,
730 struct ipv6hdr *ipv6h, 730 const struct ipv6hdr *ipv6h,
731 struct sk_buff *skb)) 731 struct sk_buff *skb))
732{ 732{
733 struct ip6_tnl *t; 733 struct ip6_tnl *t;
734 struct ipv6hdr *ipv6h = ipv6_hdr(skb); 734 const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
735 735
736 rcu_read_lock(); 736 rcu_read_lock();
737 737
@@ -828,7 +828,7 @@ static void init_tel_txopt(struct ipv6_tel_txoption *opt, __u8 encap_limit)
828 **/ 828 **/
829 829
830static inline int 830static inline int
831ip6_tnl_addr_conflict(struct ip6_tnl *t, struct ipv6hdr *hdr) 831ip6_tnl_addr_conflict(const struct ip6_tnl *t, const struct ipv6hdr *hdr)
832{ 832{
833 return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr); 833 return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr);
834} 834}
@@ -1005,7 +1005,7 @@ static inline int
1005ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) 1005ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1006{ 1006{
1007 struct ip6_tnl *t = netdev_priv(dev); 1007 struct ip6_tnl *t = netdev_priv(dev);
1008 struct iphdr *iph = ip_hdr(skb); 1008 const struct iphdr *iph = ip_hdr(skb);
1009 int encap_limit = -1; 1009 int encap_limit = -1;
1010 struct flowi6 fl6; 1010 struct flowi6 fl6;
1011 __u8 dsfield; 1011 __u8 dsfield;
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 29e48593bf22..82a809901f8e 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -989,8 +989,8 @@ static int mif6_add(struct net *net, struct mr6_table *mrt,
989} 989}
990 990
991static struct mfc6_cache *ip6mr_cache_find(struct mr6_table *mrt, 991static struct mfc6_cache *ip6mr_cache_find(struct mr6_table *mrt,
992 struct in6_addr *origin, 992 const struct in6_addr *origin,
993 struct in6_addr *mcastgrp) 993 const struct in6_addr *mcastgrp)
994{ 994{
995 int line = MFC6_HASH(mcastgrp, origin); 995 int line = MFC6_HASH(mcastgrp, origin);
996 struct mfc6_cache *c; 996 struct mfc6_cache *c;
diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c
index 85cccd6ed0b7..bba658d9a03c 100644
--- a/net/ipv6/ipcomp6.c
+++ b/net/ipv6/ipcomp6.c
@@ -55,7 +55,7 @@ static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
55{ 55{
56 struct net *net = dev_net(skb->dev); 56 struct net *net = dev_net(skb->dev);
57 __be32 spi; 57 __be32 spi;
58 struct ipv6hdr *iph = (struct ipv6hdr*)skb->data; 58 const struct ipv6hdr *iph = (const struct ipv6hdr *)skb->data;
59 struct ip_comp_hdr *ipcomph = 59 struct ip_comp_hdr *ipcomph =
60 (struct ip_comp_hdr *)(skb->data + offset); 60 (struct ip_comp_hdr *)(skb->data + offset);
61 struct xfrm_state *x; 61 struct xfrm_state *x;
@@ -64,7 +64,8 @@ static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
64 return; 64 return;
65 65
66 spi = htonl(ntohs(ipcomph->cpi)); 66 spi = htonl(ntohs(ipcomph->cpi));
67 x = xfrm_state_lookup(net, skb->mark, (xfrm_address_t *)&iph->daddr, spi, IPPROTO_COMP, AF_INET6); 67 x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
68 spi, IPPROTO_COMP, AF_INET6);
68 if (!x) 69 if (!x)
69 return; 70 return;
70 71
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 76b893771e6e..ff62e33ead07 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -92,16 +92,16 @@ static void mld_gq_timer_expire(unsigned long data);
92static void mld_ifc_timer_expire(unsigned long data); 92static void mld_ifc_timer_expire(unsigned long data);
93static void mld_ifc_event(struct inet6_dev *idev); 93static void mld_ifc_event(struct inet6_dev *idev);
94static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *pmc); 94static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *pmc);
95static void mld_del_delrec(struct inet6_dev *idev, struct in6_addr *addr); 95static void mld_del_delrec(struct inet6_dev *idev, const struct in6_addr *addr);
96static void mld_clear_delrec(struct inet6_dev *idev); 96static void mld_clear_delrec(struct inet6_dev *idev);
97static int sf_setstate(struct ifmcaddr6 *pmc); 97static int sf_setstate(struct ifmcaddr6 *pmc);
98static void sf_markstate(struct ifmcaddr6 *pmc); 98static void sf_markstate(struct ifmcaddr6 *pmc);
99static void ip6_mc_clear_src(struct ifmcaddr6 *pmc); 99static void ip6_mc_clear_src(struct ifmcaddr6 *pmc);
100static int ip6_mc_del_src(struct inet6_dev *idev, struct in6_addr *pmca, 100static int ip6_mc_del_src(struct inet6_dev *idev, const struct in6_addr *pmca,
101 int sfmode, int sfcount, struct in6_addr *psfsrc, 101 int sfmode, int sfcount, const struct in6_addr *psfsrc,
102 int delta); 102 int delta);
103static int ip6_mc_add_src(struct inet6_dev *idev, struct in6_addr *pmca, 103static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca,
104 int sfmode, int sfcount, struct in6_addr *psfsrc, 104 int sfmode, int sfcount, const struct in6_addr *psfsrc,
105 int delta); 105 int delta);
106static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml, 106static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml,
107 struct inet6_dev *idev); 107 struct inet6_dev *idev);
@@ -250,7 +250,7 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
250 250
251/* called with rcu_read_lock() */ 251/* called with rcu_read_lock() */
252static struct inet6_dev *ip6_mc_find_dev_rcu(struct net *net, 252static struct inet6_dev *ip6_mc_find_dev_rcu(struct net *net,
253 struct in6_addr *group, 253 const struct in6_addr *group,
254 int ifindex) 254 int ifindex)
255{ 255{
256 struct net_device *dev = NULL; 256 struct net_device *dev = NULL;
@@ -451,7 +451,7 @@ done:
451 451
452int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf) 452int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf)
453{ 453{
454 struct in6_addr *group; 454 const struct in6_addr *group;
455 struct ipv6_mc_socklist *pmc; 455 struct ipv6_mc_socklist *pmc;
456 struct inet6_dev *idev; 456 struct inet6_dev *idev;
457 struct ipv6_pinfo *inet6 = inet6_sk(sk); 457 struct ipv6_pinfo *inet6 = inet6_sk(sk);
@@ -542,7 +542,7 @@ int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf,
542 struct group_filter __user *optval, int __user *optlen) 542 struct group_filter __user *optval, int __user *optlen)
543{ 543{
544 int err, i, count, copycount; 544 int err, i, count, copycount;
545 struct in6_addr *group; 545 const struct in6_addr *group;
546 struct ipv6_mc_socklist *pmc; 546 struct ipv6_mc_socklist *pmc;
547 struct inet6_dev *idev; 547 struct inet6_dev *idev;
548 struct ipv6_pinfo *inet6 = inet6_sk(sk); 548 struct ipv6_pinfo *inet6 = inet6_sk(sk);
@@ -752,7 +752,7 @@ static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
752 spin_unlock_bh(&idev->mc_lock); 752 spin_unlock_bh(&idev->mc_lock);
753} 753}
754 754
755static void mld_del_delrec(struct inet6_dev *idev, struct in6_addr *pmca) 755static void mld_del_delrec(struct inet6_dev *idev, const struct in6_addr *pmca)
756{ 756{
757 struct ifmcaddr6 *pmc, *pmc_prev; 757 struct ifmcaddr6 *pmc, *pmc_prev;
758 struct ip6_sf_list *psf, *psf_next; 758 struct ip6_sf_list *psf, *psf_next;
@@ -1052,7 +1052,7 @@ static void igmp6_group_queried(struct ifmcaddr6 *ma, unsigned long resptime)
1052 1052
1053/* mark EXCLUDE-mode sources */ 1053/* mark EXCLUDE-mode sources */
1054static int mld_xmarksources(struct ifmcaddr6 *pmc, int nsrcs, 1054static int mld_xmarksources(struct ifmcaddr6 *pmc, int nsrcs,
1055 struct in6_addr *srcs) 1055 const struct in6_addr *srcs)
1056{ 1056{
1057 struct ip6_sf_list *psf; 1057 struct ip6_sf_list *psf;
1058 int i, scount; 1058 int i, scount;
@@ -1080,7 +1080,7 @@ static int mld_xmarksources(struct ifmcaddr6 *pmc, int nsrcs,
1080} 1080}
1081 1081
1082static int mld_marksources(struct ifmcaddr6 *pmc, int nsrcs, 1082static int mld_marksources(struct ifmcaddr6 *pmc, int nsrcs,
1083 struct in6_addr *srcs) 1083 const struct in6_addr *srcs)
1084{ 1084{
1085 struct ip6_sf_list *psf; 1085 struct ip6_sf_list *psf;
1086 int i, scount; 1086 int i, scount;
@@ -1115,7 +1115,7 @@ int igmp6_event_query(struct sk_buff *skb)
1115{ 1115{
1116 struct mld2_query *mlh2 = NULL; 1116 struct mld2_query *mlh2 = NULL;
1117 struct ifmcaddr6 *ma; 1117 struct ifmcaddr6 *ma;
1118 struct in6_addr *group; 1118 const struct in6_addr *group;
1119 unsigned long max_delay; 1119 unsigned long max_delay;
1120 struct inet6_dev *idev; 1120 struct inet6_dev *idev;
1121 struct mld_msg *mld; 1121 struct mld_msg *mld;
@@ -1821,7 +1821,7 @@ err_out:
1821} 1821}
1822 1822
1823static int ip6_mc_del1_src(struct ifmcaddr6 *pmc, int sfmode, 1823static int ip6_mc_del1_src(struct ifmcaddr6 *pmc, int sfmode,
1824 struct in6_addr *psfsrc) 1824 const struct in6_addr *psfsrc)
1825{ 1825{
1826 struct ip6_sf_list *psf, *psf_prev; 1826 struct ip6_sf_list *psf, *psf_prev;
1827 int rv = 0; 1827 int rv = 0;
@@ -1857,8 +1857,8 @@ static int ip6_mc_del1_src(struct ifmcaddr6 *pmc, int sfmode,
1857 return rv; 1857 return rv;
1858} 1858}
1859 1859
1860static int ip6_mc_del_src(struct inet6_dev *idev, struct in6_addr *pmca, 1860static int ip6_mc_del_src(struct inet6_dev *idev, const struct in6_addr *pmca,
1861 int sfmode, int sfcount, struct in6_addr *psfsrc, 1861 int sfmode, int sfcount, const struct in6_addr *psfsrc,
1862 int delta) 1862 int delta)
1863{ 1863{
1864 struct ifmcaddr6 *pmc; 1864 struct ifmcaddr6 *pmc;
@@ -1918,7 +1918,7 @@ static int ip6_mc_del_src(struct inet6_dev *idev, struct in6_addr *pmca,
1918 * Add multicast single-source filter to the interface list 1918 * Add multicast single-source filter to the interface list
1919 */ 1919 */
1920static int ip6_mc_add1_src(struct ifmcaddr6 *pmc, int sfmode, 1920static int ip6_mc_add1_src(struct ifmcaddr6 *pmc, int sfmode,
1921 struct in6_addr *psfsrc, int delta) 1921 const struct in6_addr *psfsrc, int delta)
1922{ 1922{
1923 struct ip6_sf_list *psf, *psf_prev; 1923 struct ip6_sf_list *psf, *psf_prev;
1924 1924
@@ -2021,8 +2021,8 @@ static int sf_setstate(struct ifmcaddr6 *pmc)
2021/* 2021/*
2022 * Add multicast source filter list to the interface list 2022 * Add multicast source filter list to the interface list
2023 */ 2023 */
2024static int ip6_mc_add_src(struct inet6_dev *idev, struct in6_addr *pmca, 2024static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca,
2025 int sfmode, int sfcount, struct in6_addr *psfsrc, 2025 int sfmode, int sfcount, const struct in6_addr *psfsrc,
2026 int delta) 2026 int delta)
2027{ 2027{
2028 struct ifmcaddr6 *pmc; 2028 struct ifmcaddr6 *pmc;
diff --git a/net/ipv6/mip6.c b/net/ipv6/mip6.c
index 9b210482fb05..43242e6e6103 100644
--- a/net/ipv6/mip6.c
+++ b/net/ipv6/mip6.c
@@ -126,7 +126,7 @@ static struct mip6_report_rate_limiter mip6_report_rl = {
126 126
127static int mip6_destopt_input(struct xfrm_state *x, struct sk_buff *skb) 127static int mip6_destopt_input(struct xfrm_state *x, struct sk_buff *skb)
128{ 128{
129 struct ipv6hdr *iph = ipv6_hdr(skb); 129 const struct ipv6hdr *iph = ipv6_hdr(skb);
130 struct ipv6_destopt_hdr *destopt = (struct ipv6_destopt_hdr *)skb->data; 130 struct ipv6_destopt_hdr *destopt = (struct ipv6_destopt_hdr *)skb->data;
131 int err = destopt->nexthdr; 131 int err = destopt->nexthdr;
132 132
@@ -181,8 +181,8 @@ static int mip6_destopt_output(struct xfrm_state *x, struct sk_buff *skb)
181} 181}
182 182
183static inline int mip6_report_rl_allow(struct timeval *stamp, 183static inline int mip6_report_rl_allow(struct timeval *stamp,
184 struct in6_addr *dst, 184 const struct in6_addr *dst,
185 struct in6_addr *src, int iif) 185 const struct in6_addr *src, int iif)
186{ 186{
187 int allow = 0; 187 int allow = 0;
188 188
@@ -349,7 +349,7 @@ static const struct xfrm_type mip6_destopt_type =
349 349
350static int mip6_rthdr_input(struct xfrm_state *x, struct sk_buff *skb) 350static int mip6_rthdr_input(struct xfrm_state *x, struct sk_buff *skb)
351{ 351{
352 struct ipv6hdr *iph = ipv6_hdr(skb); 352 const struct ipv6hdr *iph = ipv6_hdr(skb);
353 struct rt2_hdr *rt2 = (struct rt2_hdr *)skb->data; 353 struct rt2_hdr *rt2 = (struct rt2_hdr *)skb->data;
354 int err = rt2->rt_hdr.nexthdr; 354 int err = rt2->rt_hdr.nexthdr;
355 355
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 92f952d093db..69aacd18e066 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -324,7 +324,7 @@ static inline u8 *ndisc_opt_addr_data(struct nd_opt_hdr *p,
324 return lladdr + prepad; 324 return lladdr + prepad;
325} 325}
326 326
327int ndisc_mc_map(struct in6_addr *addr, char *buf, struct net_device *dev, int dir) 327int ndisc_mc_map(const struct in6_addr *addr, char *buf, struct net_device *dev, int dir)
328{ 328{
329 switch (dev->type) { 329 switch (dev->type) {
330 case ARPHRD_ETHER: 330 case ARPHRD_ETHER:
@@ -611,6 +611,29 @@ static void ndisc_send_na(struct net_device *dev, struct neighbour *neigh,
611 inc_opt ? ND_OPT_TARGET_LL_ADDR : 0); 611 inc_opt ? ND_OPT_TARGET_LL_ADDR : 0);
612} 612}
613 613
614static void ndisc_send_unsol_na(struct net_device *dev)
615{
616 struct inet6_dev *idev;
617 struct inet6_ifaddr *ifa;
618 struct in6_addr mcaddr;
619
620 idev = in6_dev_get(dev);
621 if (!idev)
622 return;
623
624 read_lock_bh(&idev->lock);
625 list_for_each_entry(ifa, &idev->addr_list, if_list) {
626 addrconf_addr_solict_mult(&ifa->addr, &mcaddr);
627 ndisc_send_na(dev, NULL, &mcaddr, &ifa->addr,
628 /*router=*/ !!idev->cnf.forwarding,
629 /*solicited=*/ false, /*override=*/ true,
630 /*inc_opt=*/ true);
631 }
632 read_unlock_bh(&idev->lock);
633
634 in6_dev_put(idev);
635}
636
614void ndisc_send_ns(struct net_device *dev, struct neighbour *neigh, 637void ndisc_send_ns(struct net_device *dev, struct neighbour *neigh,
615 const struct in6_addr *solicit, 638 const struct in6_addr *solicit,
616 const struct in6_addr *daddr, const struct in6_addr *saddr) 639 const struct in6_addr *daddr, const struct in6_addr *saddr)
@@ -725,8 +748,8 @@ static int pndisc_is_router(const void *pkey,
725static void ndisc_recv_ns(struct sk_buff *skb) 748static void ndisc_recv_ns(struct sk_buff *skb)
726{ 749{
727 struct nd_msg *msg = (struct nd_msg *)skb_transport_header(skb); 750 struct nd_msg *msg = (struct nd_msg *)skb_transport_header(skb);
728 struct in6_addr *saddr = &ipv6_hdr(skb)->saddr; 751 const struct in6_addr *saddr = &ipv6_hdr(skb)->saddr;
729 struct in6_addr *daddr = &ipv6_hdr(skb)->daddr; 752 const struct in6_addr *daddr = &ipv6_hdr(skb)->daddr;
730 u8 *lladdr = NULL; 753 u8 *lladdr = NULL;
731 u32 ndoptlen = skb->tail - (skb->transport_header + 754 u32 ndoptlen = skb->tail - (skb->transport_header +
732 offsetof(struct nd_msg, opt)); 755 offsetof(struct nd_msg, opt));
@@ -901,8 +924,8 @@ out:
901static void ndisc_recv_na(struct sk_buff *skb) 924static void ndisc_recv_na(struct sk_buff *skb)
902{ 925{
903 struct nd_msg *msg = (struct nd_msg *)skb_transport_header(skb); 926 struct nd_msg *msg = (struct nd_msg *)skb_transport_header(skb);
904 struct in6_addr *saddr = &ipv6_hdr(skb)->saddr; 927 const struct in6_addr *saddr = &ipv6_hdr(skb)->saddr;
905 struct in6_addr *daddr = &ipv6_hdr(skb)->daddr; 928 const struct in6_addr *daddr = &ipv6_hdr(skb)->daddr;
906 u8 *lladdr = NULL; 929 u8 *lladdr = NULL;
907 u32 ndoptlen = skb->tail - (skb->transport_header + 930 u32 ndoptlen = skb->tail - (skb->transport_header +
908 offsetof(struct nd_msg, opt)); 931 offsetof(struct nd_msg, opt));
@@ -945,9 +968,10 @@ static void ndisc_recv_na(struct sk_buff *skb)
945 } 968 }
946 ifp = ipv6_get_ifaddr(dev_net(dev), &msg->target, dev, 1); 969 ifp = ipv6_get_ifaddr(dev_net(dev), &msg->target, dev, 1);
947 if (ifp) { 970 if (ifp) {
948 if (ifp->flags & IFA_F_TENTATIVE) { 971 if (skb->pkt_type != PACKET_LOOPBACK
949 addrconf_dad_failure(ifp); 972 && (ifp->flags & IFA_F_TENTATIVE)) {
950 return; 973 addrconf_dad_failure(ifp);
974 return;
951 } 975 }
952 /* What should we make now? The advertisement 976 /* What should we make now? The advertisement
953 is invalid, but ndisc specs say nothing 977 is invalid, but ndisc specs say nothing
@@ -1014,7 +1038,7 @@ static void ndisc_recv_rs(struct sk_buff *skb)
1014 unsigned long ndoptlen = skb->len - sizeof(*rs_msg); 1038 unsigned long ndoptlen = skb->len - sizeof(*rs_msg);
1015 struct neighbour *neigh; 1039 struct neighbour *neigh;
1016 struct inet6_dev *idev; 1040 struct inet6_dev *idev;
1017 struct in6_addr *saddr = &ipv6_hdr(skb)->saddr; 1041 const struct in6_addr *saddr = &ipv6_hdr(skb)->saddr;
1018 struct ndisc_options ndopts; 1042 struct ndisc_options ndopts;
1019 u8 *lladdr = NULL; 1043 u8 *lladdr = NULL;
1020 1044
@@ -1411,8 +1435,8 @@ static void ndisc_redirect_rcv(struct sk_buff *skb)
1411{ 1435{
1412 struct inet6_dev *in6_dev; 1436 struct inet6_dev *in6_dev;
1413 struct icmp6hdr *icmph; 1437 struct icmp6hdr *icmph;
1414 struct in6_addr *dest; 1438 const struct in6_addr *dest;
1415 struct in6_addr *target; /* new first hop to destination */ 1439 const struct in6_addr *target; /* new first hop to destination */
1416 struct neighbour *neigh; 1440 struct neighbour *neigh;
1417 int on_link = 0; 1441 int on_link = 0;
1418 struct ndisc_options ndopts; 1442 struct ndisc_options ndopts;
@@ -1445,7 +1469,7 @@ static void ndisc_redirect_rcv(struct sk_buff *skb)
1445 } 1469 }
1446 1470
1447 icmph = icmp6_hdr(skb); 1471 icmph = icmp6_hdr(skb);
1448 target = (struct in6_addr *) (icmph + 1); 1472 target = (const struct in6_addr *) (icmph + 1);
1449 dest = target + 1; 1473 dest = target + 1;
1450 1474
1451 if (ipv6_addr_is_multicast(dest)) { 1475 if (ipv6_addr_is_multicast(dest)) {
@@ -1722,6 +1746,10 @@ static int ndisc_netdev_event(struct notifier_block *this, unsigned long event,
1722 neigh_ifdown(&nd_tbl, dev); 1746 neigh_ifdown(&nd_tbl, dev);
1723 fib6_run_gc(~0UL, net); 1747 fib6_run_gc(~0UL, net);
1724 break; 1748 break;
1749 case NETDEV_NOTIFY_PEERS:
1750 case NETDEV_BONDING_FAILOVER:
1751 ndisc_send_unsol_na(dev);
1752 break;
1725 default: 1753 default:
1726 break; 1754 break;
1727 } 1755 }
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
index 39aaca2b4fd2..30fcee465448 100644
--- a/net/ipv6/netfilter.c
+++ b/net/ipv6/netfilter.c
@@ -13,7 +13,7 @@
13int ip6_route_me_harder(struct sk_buff *skb) 13int ip6_route_me_harder(struct sk_buff *skb)
14{ 14{
15 struct net *net = dev_net(skb_dst(skb)->dev); 15 struct net *net = dev_net(skb_dst(skb)->dev);
16 struct ipv6hdr *iph = ipv6_hdr(skb); 16 const struct ipv6hdr *iph = ipv6_hdr(skb);
17 struct dst_entry *dst; 17 struct dst_entry *dst;
18 struct flowi6 fl6 = { 18 struct flowi6 fl6 = {
19 .flowi6_oif = skb->sk ? skb->sk->sk_bound_dev_if : 0, 19 .flowi6_oif = skb->sk ? skb->sk->sk_bound_dev_if : 0,
@@ -67,7 +67,7 @@ static void nf_ip6_saveroute(const struct sk_buff *skb,
67 struct ip6_rt_info *rt_info = nf_queue_entry_reroute(entry); 67 struct ip6_rt_info *rt_info = nf_queue_entry_reroute(entry);
68 68
69 if (entry->hook == NF_INET_LOCAL_OUT) { 69 if (entry->hook == NF_INET_LOCAL_OUT) {
70 struct ipv6hdr *iph = ipv6_hdr(skb); 70 const struct ipv6hdr *iph = ipv6_hdr(skb);
71 71
72 rt_info->daddr = iph->daddr; 72 rt_info->daddr = iph->daddr;
73 rt_info->saddr = iph->saddr; 73 rt_info->saddr = iph->saddr;
@@ -81,7 +81,7 @@ static int nf_ip6_reroute(struct sk_buff *skb,
81 struct ip6_rt_info *rt_info = nf_queue_entry_reroute(entry); 81 struct ip6_rt_info *rt_info = nf_queue_entry_reroute(entry);
82 82
83 if (entry->hook == NF_INET_LOCAL_OUT) { 83 if (entry->hook == NF_INET_LOCAL_OUT) {
84 struct ipv6hdr *iph = ipv6_hdr(skb); 84 const struct ipv6hdr *iph = ipv6_hdr(skb);
85 if (!ipv6_addr_equal(&iph->daddr, &rt_info->daddr) || 85 if (!ipv6_addr_equal(&iph->daddr, &rt_info->daddr) ||
86 !ipv6_addr_equal(&iph->saddr, &rt_info->saddr) || 86 !ipv6_addr_equal(&iph->saddr, &rt_info->saddr) ||
87 skb->mark != rt_info->mark) 87 skb->mark != rt_info->mark)
@@ -90,16 +90,25 @@ static int nf_ip6_reroute(struct sk_buff *skb,
90 return 0; 90 return 0;
91} 91}
92 92
93static int nf_ip6_route(struct dst_entry **dst, struct flowi *fl) 93static int nf_ip6_route(struct net *net, struct dst_entry **dst,
94 struct flowi *fl, bool strict)
94{ 95{
95 *dst = ip6_route_output(&init_net, NULL, &fl->u.ip6); 96 static const struct ipv6_pinfo fake_pinfo;
97 static const struct inet_sock fake_sk = {
98 /* makes ip6_route_output set RT6_LOOKUP_F_IFACE: */
99 .sk.sk_bound_dev_if = 1,
100 .pinet6 = (struct ipv6_pinfo *) &fake_pinfo,
101 };
102 const void *sk = strict ? &fake_sk : NULL;
103
104 *dst = ip6_route_output(net, sk, &fl->u.ip6);
96 return (*dst)->error; 105 return (*dst)->error;
97} 106}
98 107
99__sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook, 108__sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook,
100 unsigned int dataoff, u_int8_t protocol) 109 unsigned int dataoff, u_int8_t protocol)
101{ 110{
102 struct ipv6hdr *ip6h = ipv6_hdr(skb); 111 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
103 __sum16 csum = 0; 112 __sum16 csum = 0;
104 113
105 switch (skb->ip_summed) { 114 switch (skb->ip_summed) {
@@ -133,7 +142,7 @@ static __sum16 nf_ip6_checksum_partial(struct sk_buff *skb, unsigned int hook,
133 unsigned int dataoff, unsigned int len, 142 unsigned int dataoff, unsigned int len,
134 u_int8_t protocol) 143 u_int8_t protocol)
135{ 144{
136 struct ipv6hdr *ip6h = ipv6_hdr(skb); 145 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
137 __wsum hsum; 146 __wsum hsum;
138 __sum16 csum = 0; 147 __sum16 csum = 0;
139 148
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 0b2af9b85cec..94874b0bdcdc 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -340,6 +340,7 @@ ip6t_do_table(struct sk_buff *skb,
340 unsigned int *stackptr, origptr, cpu; 340 unsigned int *stackptr, origptr, cpu;
341 const struct xt_table_info *private; 341 const struct xt_table_info *private;
342 struct xt_action_param acpar; 342 struct xt_action_param acpar;
343 unsigned int addend;
343 344
344 /* Initialization */ 345 /* Initialization */
345 indev = in ? in->name : nulldevname; 346 indev = in ? in->name : nulldevname;
@@ -358,7 +359,8 @@ ip6t_do_table(struct sk_buff *skb,
358 359
359 IP_NF_ASSERT(table->valid_hooks & (1 << hook)); 360 IP_NF_ASSERT(table->valid_hooks & (1 << hook));
360 361
361 xt_info_rdlock_bh(); 362 local_bh_disable();
363 addend = xt_write_recseq_begin();
362 private = table->private; 364 private = table->private;
363 cpu = smp_processor_id(); 365 cpu = smp_processor_id();
364 table_base = private->entries[cpu]; 366 table_base = private->entries[cpu];
@@ -442,7 +444,9 @@ ip6t_do_table(struct sk_buff *skb,
442 } while (!acpar.hotdrop); 444 } while (!acpar.hotdrop);
443 445
444 *stackptr = origptr; 446 *stackptr = origptr;
445 xt_info_rdunlock_bh(); 447
448 xt_write_recseq_end(addend);
449 local_bh_enable();
446 450
447#ifdef DEBUG_ALLOW_ALL 451#ifdef DEBUG_ALLOW_ALL
448 return NF_ACCEPT; 452 return NF_ACCEPT;
@@ -899,7 +903,7 @@ get_counters(const struct xt_table_info *t,
899 unsigned int i; 903 unsigned int i;
900 904
901 for_each_possible_cpu(cpu) { 905 for_each_possible_cpu(cpu) {
902 seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock; 906 seqcount_t *s = &per_cpu(xt_recseq, cpu);
903 907
904 i = 0; 908 i = 0;
905 xt_entry_foreach(iter, t->entries[cpu], t->size) { 909 xt_entry_foreach(iter, t->entries[cpu], t->size) {
@@ -907,10 +911,10 @@ get_counters(const struct xt_table_info *t,
907 unsigned int start; 911 unsigned int start;
908 912
909 do { 913 do {
910 start = read_seqbegin(lock); 914 start = read_seqcount_begin(s);
911 bcnt = iter->counters.bcnt; 915 bcnt = iter->counters.bcnt;
912 pcnt = iter->counters.pcnt; 916 pcnt = iter->counters.pcnt;
913 } while (read_seqretry(lock, start)); 917 } while (read_seqcount_retry(s, start));
914 918
915 ADD_COUNTER(counters[i], bcnt, pcnt); 919 ADD_COUNTER(counters[i], bcnt, pcnt);
916 ++i; 920 ++i;
@@ -1325,6 +1329,7 @@ do_add_counters(struct net *net, const void __user *user, unsigned int len,
1325 int ret = 0; 1329 int ret = 0;
1326 const void *loc_cpu_entry; 1330 const void *loc_cpu_entry;
1327 struct ip6t_entry *iter; 1331 struct ip6t_entry *iter;
1332 unsigned int addend;
1328#ifdef CONFIG_COMPAT 1333#ifdef CONFIG_COMPAT
1329 struct compat_xt_counters_info compat_tmp; 1334 struct compat_xt_counters_info compat_tmp;
1330 1335
@@ -1381,13 +1386,13 @@ do_add_counters(struct net *net, const void __user *user, unsigned int len,
1381 i = 0; 1386 i = 0;
1382 /* Choose the copy that is on our node */ 1387 /* Choose the copy that is on our node */
1383 curcpu = smp_processor_id(); 1388 curcpu = smp_processor_id();
1384 xt_info_wrlock(curcpu); 1389 addend = xt_write_recseq_begin();
1385 loc_cpu_entry = private->entries[curcpu]; 1390 loc_cpu_entry = private->entries[curcpu];
1386 xt_entry_foreach(iter, loc_cpu_entry, private->size) { 1391 xt_entry_foreach(iter, loc_cpu_entry, private->size) {
1387 ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt); 1392 ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt);
1388 ++i; 1393 ++i;
1389 } 1394 }
1390 xt_info_wrunlock(curcpu); 1395 xt_write_recseq_end(addend);
1391 1396
1392 unlock_up_free: 1397 unlock_up_free:
1393 local_bh_enable(); 1398 local_bh_enable();
@@ -1578,7 +1583,6 @@ compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1578 struct xt_table_info *newinfo, unsigned char *base) 1583 struct xt_table_info *newinfo, unsigned char *base)
1579{ 1584{
1580 struct xt_entry_target *t; 1585 struct xt_entry_target *t;
1581 struct xt_target *target;
1582 struct ip6t_entry *de; 1586 struct ip6t_entry *de;
1583 unsigned int origsize; 1587 unsigned int origsize;
1584 int ret, h; 1588 int ret, h;
@@ -1600,7 +1604,6 @@ compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1600 } 1604 }
1601 de->target_offset = e->target_offset - (origsize - *size); 1605 de->target_offset = e->target_offset - (origsize - *size);
1602 t = compat_ip6t_get_target(e); 1606 t = compat_ip6t_get_target(e);
1603 target = t->u.kernel.target;
1604 xt_compat_target_from_user(t, dstptr, size); 1607 xt_compat_target_from_user(t, dstptr, size);
1605 1608
1606 de->next_offset = e->next_offset - (origsize - *size); 1609 de->next_offset = e->next_offset - (origsize - *size);
@@ -2248,7 +2251,7 @@ static int __init ip6_tables_init(void)
2248 if (ret < 0) 2251 if (ret < 0)
2249 goto err1; 2252 goto err1;
2250 2253
2251 /* Noone else will be downing sem now, so we won't sleep */ 2254 /* No one else will be downing sem now, so we won't sleep */
2252 ret = xt_register_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg)); 2255 ret = xt_register_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2253 if (ret < 0) 2256 if (ret < 0)
2254 goto err2; 2257 goto err2;
diff --git a/net/ipv6/netfilter/ip6table_mangle.c b/net/ipv6/netfilter/ip6table_mangle.c
index 679a0a3b7b3c..00d19173db7e 100644
--- a/net/ipv6/netfilter/ip6table_mangle.c
+++ b/net/ipv6/netfilter/ip6table_mangle.c
@@ -64,7 +64,8 @@ ip6t_mangle_out(struct sk_buff *skb, const struct net_device *out)
64 (memcmp(&ipv6_hdr(skb)->saddr, &saddr, sizeof(saddr)) || 64 (memcmp(&ipv6_hdr(skb)->saddr, &saddr, sizeof(saddr)) ||
65 memcmp(&ipv6_hdr(skb)->daddr, &daddr, sizeof(daddr)) || 65 memcmp(&ipv6_hdr(skb)->daddr, &daddr, sizeof(daddr)) ||
66 skb->mark != mark || 66 skb->mark != mark ||
67 ipv6_hdr(skb)->hop_limit != hop_limit)) 67 ipv6_hdr(skb)->hop_limit != hop_limit ||
68 flowlabel != *((u_int32_t *)ipv6_hdr(skb))))
68 return ip6_route_me_harder(skb) == 0 ? ret : NF_DROP; 69 return ip6_route_me_harder(skb) == 0 ? ret : NF_DROP;
69 70
70 return ret; 71 return ret;
diff --git a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
index 97c5b21b9674..cdd6d045e42e 100644
--- a/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
+++ b/net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
@@ -71,7 +71,7 @@ static unsigned int ipv6_defrag(unsigned int hooknum,
71 if (reasm == NULL) 71 if (reasm == NULL)
72 return NF_STOLEN; 72 return NF_STOLEN;
73 73
74 /* error occured or not fragmented */ 74 /* error occurred or not fragmented */
75 if (reasm == skb) 75 if (reasm == skb)
76 return NF_ACCEPT; 76 return NF_ACCEPT;
77 77
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 4a1c3b46c56b..e5e5425fe7d0 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -67,8 +67,8 @@ static struct raw_hashinfo raw_v6_hashinfo = {
67}; 67};
68 68
69static struct sock *__raw_v6_lookup(struct net *net, struct sock *sk, 69static struct sock *__raw_v6_lookup(struct net *net, struct sock *sk,
70 unsigned short num, struct in6_addr *loc_addr, 70 unsigned short num, const struct in6_addr *loc_addr,
71 struct in6_addr *rmt_addr, int dif) 71 const struct in6_addr *rmt_addr, int dif)
72{ 72{
73 struct hlist_node *node; 73 struct hlist_node *node;
74 int is_multicast = ipv6_addr_is_multicast(loc_addr); 74 int is_multicast = ipv6_addr_is_multicast(loc_addr);
@@ -154,8 +154,8 @@ EXPORT_SYMBOL(rawv6_mh_filter_unregister);
154 */ 154 */
155static int ipv6_raw_deliver(struct sk_buff *skb, int nexthdr) 155static int ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
156{ 156{
157 struct in6_addr *saddr; 157 const struct in6_addr *saddr;
158 struct in6_addr *daddr; 158 const struct in6_addr *daddr;
159 struct sock *sk; 159 struct sock *sk;
160 int delivered = 0; 160 int delivered = 0;
161 __u8 hash; 161 __u8 hash;
@@ -348,7 +348,7 @@ void raw6_icmp_error(struct sk_buff *skb, int nexthdr,
348{ 348{
349 struct sock *sk; 349 struct sock *sk;
350 int hash; 350 int hash;
351 struct in6_addr *saddr, *daddr; 351 const struct in6_addr *saddr, *daddr;
352 struct net *net; 352 struct net *net;
353 353
354 hash = nexthdr & (RAW_HTABLE_SIZE - 1); 354 hash = nexthdr & (RAW_HTABLE_SIZE - 1);
@@ -357,7 +357,7 @@ void raw6_icmp_error(struct sk_buff *skb, int nexthdr,
357 sk = sk_head(&raw_v6_hashinfo.ht[hash]); 357 sk = sk_head(&raw_v6_hashinfo.ht[hash]);
358 if (sk != NULL) { 358 if (sk != NULL) {
359 /* Note: ipv6_hdr(skb) != skb->data */ 359 /* Note: ipv6_hdr(skb) != skb->data */
360 struct ipv6hdr *ip6h = (struct ipv6hdr *)skb->data; 360 const struct ipv6hdr *ip6h = (const struct ipv6hdr *)skb->data;
361 saddr = &ip6h->saddr; 361 saddr = &ip6h->saddr;
362 daddr = &ip6h->daddr; 362 daddr = &ip6h->daddr;
363 net = dev_net(skb->dev); 363 net = dev_net(skb->dev);
@@ -1231,7 +1231,7 @@ struct proto rawv6_prot = {
1231static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i) 1231static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
1232{ 1232{
1233 struct ipv6_pinfo *np = inet6_sk(sp); 1233 struct ipv6_pinfo *np = inet6_sk(sp);
1234 struct in6_addr *dest, *src; 1234 const struct in6_addr *dest, *src;
1235 __u16 destp, srcp; 1235 __u16 destp, srcp;
1236 1236
1237 dest = &np->daddr; 1237 dest = &np->daddr;
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 07beeb06f752..7b954e2539d0 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -224,7 +224,7 @@ out:
224} 224}
225 225
226static __inline__ struct frag_queue * 226static __inline__ struct frag_queue *
227fq_find(struct net *net, __be32 id, struct in6_addr *src, struct in6_addr *dst) 227fq_find(struct net *net, __be32 id, const struct in6_addr *src, const struct in6_addr *dst)
228{ 228{
229 struct inet_frag_queue *q; 229 struct inet_frag_queue *q;
230 struct ip6_create_arg arg; 230 struct ip6_create_arg arg;
@@ -535,7 +535,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
535{ 535{
536 struct frag_hdr *fhdr; 536 struct frag_hdr *fhdr;
537 struct frag_queue *fq; 537 struct frag_queue *fq;
538 struct ipv6hdr *hdr = ipv6_hdr(skb); 538 const struct ipv6hdr *hdr = ipv6_hdr(skb);
539 struct net *net = dev_net(skb_dst(skb)->dev); 539 struct net *net = dev_net(skb_dst(skb)->dev);
540 540
541 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMREQDS); 541 IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMREQDS);
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 843406f14d7b..852fc28ca818 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -89,12 +89,12 @@ static void ip6_rt_update_pmtu(struct dst_entry *dst, u32 mtu);
89 89
90#ifdef CONFIG_IPV6_ROUTE_INFO 90#ifdef CONFIG_IPV6_ROUTE_INFO
91static struct rt6_info *rt6_add_route_info(struct net *net, 91static struct rt6_info *rt6_add_route_info(struct net *net,
92 struct in6_addr *prefix, int prefixlen, 92 const struct in6_addr *prefix, int prefixlen,
93 struct in6_addr *gwaddr, int ifindex, 93 const struct in6_addr *gwaddr, int ifindex,
94 unsigned pref); 94 unsigned pref);
95static struct rt6_info *rt6_get_route_info(struct net *net, 95static struct rt6_info *rt6_get_route_info(struct net *net,
96 struct in6_addr *prefix, int prefixlen, 96 const struct in6_addr *prefix, int prefixlen,
97 struct in6_addr *gwaddr, int ifindex); 97 const struct in6_addr *gwaddr, int ifindex);
98#endif 98#endif
99 99
100static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old) 100static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)
@@ -283,7 +283,7 @@ static __inline__ int rt6_check_expired(const struct rt6_info *rt)
283 time_after(jiffies, rt->rt6i_expires); 283 time_after(jiffies, rt->rt6i_expires);
284} 284}
285 285
286static inline int rt6_need_strict(struct in6_addr *daddr) 286static inline int rt6_need_strict(const struct in6_addr *daddr)
287{ 287{
288 return ipv6_addr_type(daddr) & 288 return ipv6_addr_type(daddr) &
289 (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK); 289 (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK);
@@ -295,7 +295,7 @@ static inline int rt6_need_strict(struct in6_addr *daddr)
295 295
296static inline struct rt6_info *rt6_device_match(struct net *net, 296static inline struct rt6_info *rt6_device_match(struct net *net,
297 struct rt6_info *rt, 297 struct rt6_info *rt,
298 struct in6_addr *saddr, 298 const struct in6_addr *saddr,
299 int oif, 299 int oif,
300 int flags) 300 int flags)
301{ 301{
@@ -507,7 +507,7 @@ static struct rt6_info *rt6_select(struct fib6_node *fn, int oif, int strict)
507 507
508#ifdef CONFIG_IPV6_ROUTE_INFO 508#ifdef CONFIG_IPV6_ROUTE_INFO
509int rt6_route_rcv(struct net_device *dev, u8 *opt, int len, 509int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
510 struct in6_addr *gwaddr) 510 const struct in6_addr *gwaddr)
511{ 511{
512 struct net *net = dev_net(dev); 512 struct net *net = dev_net(dev);
513 struct route_info *rinfo = (struct route_info *) opt; 513 struct route_info *rinfo = (struct route_info *) opt;
@@ -670,8 +670,8 @@ int ip6_ins_rt(struct rt6_info *rt)
670 return __ip6_ins_rt(rt, &info); 670 return __ip6_ins_rt(rt, &info);
671} 671}
672 672
673static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort, struct in6_addr *daddr, 673static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort, const struct in6_addr *daddr,
674 struct in6_addr *saddr) 674 const struct in6_addr *saddr)
675{ 675{
676 struct rt6_info *rt; 676 struct rt6_info *rt;
677 677
@@ -739,7 +739,7 @@ static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort, struct in6_addr *dad
739 return rt; 739 return rt;
740} 740}
741 741
742static struct rt6_info *rt6_alloc_clone(struct rt6_info *ort, struct in6_addr *daddr) 742static struct rt6_info *rt6_alloc_clone(struct rt6_info *ort, const struct in6_addr *daddr)
743{ 743{
744 struct rt6_info *rt = ip6_rt_copy(ort); 744 struct rt6_info *rt = ip6_rt_copy(ort);
745 if (rt) { 745 if (rt) {
@@ -830,7 +830,7 @@ static struct rt6_info *ip6_pol_route_input(struct net *net, struct fib6_table *
830 830
831void ip6_route_input(struct sk_buff *skb) 831void ip6_route_input(struct sk_buff *skb)
832{ 832{
833 struct ipv6hdr *iph = ipv6_hdr(skb); 833 const struct ipv6hdr *iph = ipv6_hdr(skb);
834 struct net *net = dev_net(skb->dev); 834 struct net *net = dev_net(skb->dev);
835 int flags = RT6_LOOKUP_F_HAS_SADDR; 835 int flags = RT6_LOOKUP_F_HAS_SADDR;
836 struct flowi6 fl6 = { 836 struct flowi6 fl6 = {
@@ -1272,7 +1272,7 @@ int ip6_route_add(struct fib6_config *cfg)
1272 } 1272 }
1273 1273
1274 if (cfg->fc_flags & RTF_GATEWAY) { 1274 if (cfg->fc_flags & RTF_GATEWAY) {
1275 struct in6_addr *gw_addr; 1275 const struct in6_addr *gw_addr;
1276 int gwa_type; 1276 int gwa_type;
1277 1277
1278 gw_addr = &cfg->fc_gateway; 1278 gw_addr = &cfg->fc_gateway;
@@ -1325,6 +1325,16 @@ int ip6_route_add(struct fib6_config *cfg)
1325 if (dev == NULL) 1325 if (dev == NULL)
1326 goto out; 1326 goto out;
1327 1327
1328 if (!ipv6_addr_any(&cfg->fc_prefsrc)) {
1329 if (!ipv6_chk_addr(net, &cfg->fc_prefsrc, dev, 0)) {
1330 err = -EINVAL;
1331 goto out;
1332 }
1333 ipv6_addr_copy(&rt->rt6i_prefsrc.addr, &cfg->fc_prefsrc);
1334 rt->rt6i_prefsrc.plen = 128;
1335 } else
1336 rt->rt6i_prefsrc.plen = 0;
1337
1328 if (cfg->fc_flags & (RTF_GATEWAY | RTF_NONEXTHOP)) { 1338 if (cfg->fc_flags & (RTF_GATEWAY | RTF_NONEXTHOP)) {
1329 rt->rt6i_nexthop = __neigh_lookup_errno(&nd_tbl, &rt->rt6i_gateway, dev); 1339 rt->rt6i_nexthop = __neigh_lookup_errno(&nd_tbl, &rt->rt6i_gateway, dev);
1330 if (IS_ERR(rt->rt6i_nexthop)) { 1340 if (IS_ERR(rt->rt6i_nexthop)) {
@@ -1502,9 +1512,9 @@ out:
1502 return rt; 1512 return rt;
1503}; 1513};
1504 1514
1505static struct rt6_info *ip6_route_redirect(struct in6_addr *dest, 1515static struct rt6_info *ip6_route_redirect(const struct in6_addr *dest,
1506 struct in6_addr *src, 1516 const struct in6_addr *src,
1507 struct in6_addr *gateway, 1517 const struct in6_addr *gateway,
1508 struct net_device *dev) 1518 struct net_device *dev)
1509{ 1519{
1510 int flags = RT6_LOOKUP_F_HAS_SADDR; 1520 int flags = RT6_LOOKUP_F_HAS_SADDR;
@@ -1526,8 +1536,8 @@ static struct rt6_info *ip6_route_redirect(struct in6_addr *dest,
1526 flags, __ip6_route_redirect); 1536 flags, __ip6_route_redirect);
1527} 1537}
1528 1538
1529void rt6_redirect(struct in6_addr *dest, struct in6_addr *src, 1539void rt6_redirect(const struct in6_addr *dest, const struct in6_addr *src,
1530 struct in6_addr *saddr, 1540 const struct in6_addr *saddr,
1531 struct neighbour *neigh, u8 *lladdr, int on_link) 1541 struct neighbour *neigh, u8 *lladdr, int on_link)
1532{ 1542{
1533 struct rt6_info *rt, *nrt = NULL; 1543 struct rt6_info *rt, *nrt = NULL;
@@ -1601,7 +1611,7 @@ out:
1601 * i.e. Path MTU discovery 1611 * i.e. Path MTU discovery
1602 */ 1612 */
1603 1613
1604static void rt6_do_pmtu_disc(struct in6_addr *daddr, struct in6_addr *saddr, 1614static void rt6_do_pmtu_disc(const struct in6_addr *daddr, const struct in6_addr *saddr,
1605 struct net *net, u32 pmtu, int ifindex) 1615 struct net *net, u32 pmtu, int ifindex)
1606{ 1616{
1607 struct rt6_info *rt, *nrt; 1617 struct rt6_info *rt, *nrt;
@@ -1686,7 +1696,7 @@ out:
1686 dst_release(&rt->dst); 1696 dst_release(&rt->dst);
1687} 1697}
1688 1698
1689void rt6_pmtu_discovery(struct in6_addr *daddr, struct in6_addr *saddr, 1699void rt6_pmtu_discovery(const struct in6_addr *daddr, const struct in6_addr *saddr,
1690 struct net_device *dev, u32 pmtu) 1700 struct net_device *dev, u32 pmtu)
1691{ 1701{
1692 struct net *net = dev_net(dev); 1702 struct net *net = dev_net(dev);
@@ -1746,8 +1756,8 @@ static struct rt6_info * ip6_rt_copy(struct rt6_info *ort)
1746 1756
1747#ifdef CONFIG_IPV6_ROUTE_INFO 1757#ifdef CONFIG_IPV6_ROUTE_INFO
1748static struct rt6_info *rt6_get_route_info(struct net *net, 1758static struct rt6_info *rt6_get_route_info(struct net *net,
1749 struct in6_addr *prefix, int prefixlen, 1759 const struct in6_addr *prefix, int prefixlen,
1750 struct in6_addr *gwaddr, int ifindex) 1760 const struct in6_addr *gwaddr, int ifindex)
1751{ 1761{
1752 struct fib6_node *fn; 1762 struct fib6_node *fn;
1753 struct rt6_info *rt = NULL; 1763 struct rt6_info *rt = NULL;
@@ -1778,8 +1788,8 @@ out:
1778} 1788}
1779 1789
1780static struct rt6_info *rt6_add_route_info(struct net *net, 1790static struct rt6_info *rt6_add_route_info(struct net *net,
1781 struct in6_addr *prefix, int prefixlen, 1791 const struct in6_addr *prefix, int prefixlen,
1782 struct in6_addr *gwaddr, int ifindex, 1792 const struct in6_addr *gwaddr, int ifindex,
1783 unsigned pref) 1793 unsigned pref)
1784{ 1794{
1785 struct fib6_config cfg = { 1795 struct fib6_config cfg = {
@@ -1807,7 +1817,7 @@ static struct rt6_info *rt6_add_route_info(struct net *net,
1807} 1817}
1808#endif 1818#endif
1809 1819
1810struct rt6_info *rt6_get_dflt_router(struct in6_addr *addr, struct net_device *dev) 1820struct rt6_info *rt6_get_dflt_router(const struct in6_addr *addr, struct net_device *dev)
1811{ 1821{
1812 struct rt6_info *rt; 1822 struct rt6_info *rt;
1813 struct fib6_table *table; 1823 struct fib6_table *table;
@@ -1829,7 +1839,7 @@ struct rt6_info *rt6_get_dflt_router(struct in6_addr *addr, struct net_device *d
1829 return rt; 1839 return rt;
1830} 1840}
1831 1841
1832struct rt6_info *rt6_add_dflt_router(struct in6_addr *gwaddr, 1842struct rt6_info *rt6_add_dflt_router(const struct in6_addr *gwaddr,
1833 struct net_device *dev, 1843 struct net_device *dev,
1834 unsigned int pref) 1844 unsigned int pref)
1835{ 1845{
@@ -2037,6 +2047,55 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
2037 return rt; 2047 return rt;
2038} 2048}
2039 2049
2050int ip6_route_get_saddr(struct net *net,
2051 struct rt6_info *rt,
2052 const struct in6_addr *daddr,
2053 unsigned int prefs,
2054 struct in6_addr *saddr)
2055{
2056 struct inet6_dev *idev = ip6_dst_idev((struct dst_entry*)rt);
2057 int err = 0;
2058 if (rt->rt6i_prefsrc.plen)
2059 ipv6_addr_copy(saddr, &rt->rt6i_prefsrc.addr);
2060 else
2061 err = ipv6_dev_get_saddr(net, idev ? idev->dev : NULL,
2062 daddr, prefs, saddr);
2063 return err;
2064}
2065
2066/* remove deleted ip from prefsrc entries */
2067struct arg_dev_net_ip {
2068 struct net_device *dev;
2069 struct net *net;
2070 struct in6_addr *addr;
2071};
2072
2073static int fib6_remove_prefsrc(struct rt6_info *rt, void *arg)
2074{
2075 struct net_device *dev = ((struct arg_dev_net_ip *)arg)->dev;
2076 struct net *net = ((struct arg_dev_net_ip *)arg)->net;
2077 struct in6_addr *addr = ((struct arg_dev_net_ip *)arg)->addr;
2078
2079 if (((void *)rt->rt6i_dev == dev || dev == NULL) &&
2080 rt != net->ipv6.ip6_null_entry &&
2081 ipv6_addr_equal(addr, &rt->rt6i_prefsrc.addr)) {
2082 /* remove prefsrc entry */
2083 rt->rt6i_prefsrc.plen = 0;
2084 }
2085 return 0;
2086}
2087
2088void rt6_remove_prefsrc(struct inet6_ifaddr *ifp)
2089{
2090 struct net *net = dev_net(ifp->idev->dev);
2091 struct arg_dev_net_ip adni = {
2092 .dev = ifp->idev->dev,
2093 .net = net,
2094 .addr = &ifp->addr,
2095 };
2096 fib6_clean_all(net, fib6_remove_prefsrc, 0, &adni);
2097}
2098
2040struct arg_dev_net { 2099struct arg_dev_net {
2041 struct net_device *dev; 2100 struct net_device *dev;
2042 struct net *net; 2101 struct net *net;
@@ -2183,6 +2242,9 @@ static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
2183 nla_memcpy(&cfg->fc_src, tb[RTA_SRC], plen); 2242 nla_memcpy(&cfg->fc_src, tb[RTA_SRC], plen);
2184 } 2243 }
2185 2244
2245 if (tb[RTA_PREFSRC])
2246 nla_memcpy(&cfg->fc_prefsrc, tb[RTA_PREFSRC], 16);
2247
2186 if (tb[RTA_OIF]) 2248 if (tb[RTA_OIF])
2187 cfg->fc_ifindex = nla_get_u32(tb[RTA_OIF]); 2249 cfg->fc_ifindex = nla_get_u32(tb[RTA_OIF]);
2188 2250
@@ -2325,13 +2387,17 @@ static int rt6_fill_node(struct net *net,
2325#endif 2387#endif
2326 NLA_PUT_U32(skb, RTA_IIF, iif); 2388 NLA_PUT_U32(skb, RTA_IIF, iif);
2327 } else if (dst) { 2389 } else if (dst) {
2328 struct inet6_dev *idev = ip6_dst_idev(&rt->dst);
2329 struct in6_addr saddr_buf; 2390 struct in6_addr saddr_buf;
2330 if (ipv6_dev_get_saddr(net, idev ? idev->dev : NULL, 2391 if (ip6_route_get_saddr(net, rt, dst, 0, &saddr_buf) == 0)
2331 dst, 0, &saddr_buf) == 0)
2332 NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf); 2392 NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf);
2333 } 2393 }
2334 2394
2395 if (rt->rt6i_prefsrc.plen) {
2396 struct in6_addr saddr_buf;
2397 ipv6_addr_copy(&saddr_buf, &rt->rt6i_prefsrc.addr);
2398 NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf);
2399 }
2400
2335 if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0) 2401 if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
2336 goto nla_put_failure; 2402 goto nla_put_failure;
2337 2403
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 43b33373adb2..34d896426701 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -452,7 +452,7 @@ out:
452} 452}
453 453
454static int 454static int
455isatap_chksrc(struct sk_buff *skb, struct iphdr *iph, struct ip_tunnel *t) 455isatap_chksrc(struct sk_buff *skb, const struct iphdr *iph, struct ip_tunnel *t)
456{ 456{
457 struct ip_tunnel_prl_entry *p; 457 struct ip_tunnel_prl_entry *p;
458 int ok = 1; 458 int ok = 1;
@@ -465,7 +465,8 @@ isatap_chksrc(struct sk_buff *skb, struct iphdr *iph, struct ip_tunnel *t)
465 else 465 else
466 skb->ndisc_nodetype = NDISC_NODETYPE_NODEFAULT; 466 skb->ndisc_nodetype = NDISC_NODETYPE_NODEFAULT;
467 } else { 467 } else {
468 struct in6_addr *addr6 = &ipv6_hdr(skb)->saddr; 468 const struct in6_addr *addr6 = &ipv6_hdr(skb)->saddr;
469
469 if (ipv6_addr_is_isatap(addr6) && 470 if (ipv6_addr_is_isatap(addr6) &&
470 (addr6->s6_addr32[3] == iph->saddr) && 471 (addr6->s6_addr32[3] == iph->saddr) &&
471 ipv6_chk_prefix(addr6, t->dev)) 472 ipv6_chk_prefix(addr6, t->dev))
@@ -499,7 +500,7 @@ static int ipip6_err(struct sk_buff *skb, u32 info)
499 8 bytes of packet payload. It means, that precise relaying of 500 8 bytes of packet payload. It means, that precise relaying of
500 ICMP in the real Internet is absolutely infeasible. 501 ICMP in the real Internet is absolutely infeasible.
501 */ 502 */
502 struct iphdr *iph = (struct iphdr*)skb->data; 503 const struct iphdr *iph = (const struct iphdr *)skb->data;
503 const int type = icmp_hdr(skb)->type; 504 const int type = icmp_hdr(skb)->type;
504 const int code = icmp_hdr(skb)->code; 505 const int code = icmp_hdr(skb)->code;
505 struct ip_tunnel *t; 506 struct ip_tunnel *t;
@@ -557,7 +558,7 @@ out:
557 return err; 558 return err;
558} 559}
559 560
560static inline void ipip6_ecn_decapsulate(struct iphdr *iph, struct sk_buff *skb) 561static inline void ipip6_ecn_decapsulate(const struct iphdr *iph, struct sk_buff *skb)
561{ 562{
562 if (INET_ECN_is_ce(iph->tos)) 563 if (INET_ECN_is_ce(iph->tos))
563 IP6_ECN_set_ce(ipv6_hdr(skb)); 564 IP6_ECN_set_ce(ipv6_hdr(skb));
@@ -565,7 +566,7 @@ static inline void ipip6_ecn_decapsulate(struct iphdr *iph, struct sk_buff *skb)
565 566
566static int ipip6_rcv(struct sk_buff *skb) 567static int ipip6_rcv(struct sk_buff *skb)
567{ 568{
568 struct iphdr *iph; 569 const struct iphdr *iph;
569 struct ip_tunnel *tunnel; 570 struct ip_tunnel *tunnel;
570 571
571 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) 572 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
@@ -621,7 +622,7 @@ out:
621 * comes from 6rd / 6to4 (RFC 3056) addr space. 622 * comes from 6rd / 6to4 (RFC 3056) addr space.
622 */ 623 */
623static inline 624static inline
624__be32 try_6rd(struct in6_addr *v6dst, struct ip_tunnel *tunnel) 625__be32 try_6rd(const struct in6_addr *v6dst, struct ip_tunnel *tunnel)
625{ 626{
626 __be32 dst = 0; 627 __be32 dst = 0;
627 628
@@ -664,8 +665,8 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
664{ 665{
665 struct ip_tunnel *tunnel = netdev_priv(dev); 666 struct ip_tunnel *tunnel = netdev_priv(dev);
666 struct pcpu_tstats *tstats; 667 struct pcpu_tstats *tstats;
667 struct iphdr *tiph = &tunnel->parms.iph; 668 const struct iphdr *tiph = &tunnel->parms.iph;
668 struct ipv6hdr *iph6 = ipv6_hdr(skb); 669 const struct ipv6hdr *iph6 = ipv6_hdr(skb);
669 u8 tos = tunnel->parms.iph.tos; 670 u8 tos = tunnel->parms.iph.tos;
670 __be16 df = tiph->frag_off; 671 __be16 df = tiph->frag_off;
671 struct rtable *rt; /* Route to the other host */ 672 struct rtable *rt; /* Route to the other host */
@@ -674,7 +675,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
674 unsigned int max_headroom; /* The extra header space needed */ 675 unsigned int max_headroom; /* The extra header space needed */
675 __be32 dst = tiph->daddr; 676 __be32 dst = tiph->daddr;
676 int mtu; 677 int mtu;
677 struct in6_addr *addr6; 678 const struct in6_addr *addr6;
678 int addr_type; 679 int addr_type;
679 680
680 if (skb->protocol != htons(ETH_P_IPV6)) 681 if (skb->protocol != htons(ETH_P_IPV6))
@@ -693,7 +694,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
693 goto tx_error; 694 goto tx_error;
694 } 695 }
695 696
696 addr6 = (struct in6_addr*)&neigh->primary_key; 697 addr6 = (const struct in6_addr*)&neigh->primary_key;
697 addr_type = ipv6_addr_type(addr6); 698 addr_type = ipv6_addr_type(addr6);
698 699
699 if ((addr_type & IPV6_ADDR_UNICAST) && 700 if ((addr_type & IPV6_ADDR_UNICAST) &&
@@ -718,7 +719,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
718 goto tx_error; 719 goto tx_error;
719 } 720 }
720 721
721 addr6 = (struct in6_addr*)&neigh->primary_key; 722 addr6 = (const struct in6_addr*)&neigh->primary_key;
722 addr_type = ipv6_addr_type(addr6); 723 addr_type = ipv6_addr_type(addr6);
723 724
724 if (addr_type == IPV6_ADDR_ANY) { 725 if (addr_type == IPV6_ADDR_ANY) {
@@ -849,7 +850,7 @@ static void ipip6_tunnel_bind_dev(struct net_device *dev)
849{ 850{
850 struct net_device *tdev = NULL; 851 struct net_device *tdev = NULL;
851 struct ip_tunnel *tunnel; 852 struct ip_tunnel *tunnel;
852 struct iphdr *iph; 853 const struct iphdr *iph;
853 854
854 tunnel = netdev_priv(dev); 855 tunnel = netdev_priv(dev);
855 iph = &tunnel->parms.iph; 856 iph = &tunnel->parms.iph;
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 352c26081f5d..8b9644a8b697 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -66,7 +66,7 @@ static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb,
66static DEFINE_PER_CPU(__u32 [16 + 5 + SHA_WORKSPACE_WORDS], 66static DEFINE_PER_CPU(__u32 [16 + 5 + SHA_WORKSPACE_WORDS],
67 ipv6_cookie_scratch); 67 ipv6_cookie_scratch);
68 68
69static u32 cookie_hash(struct in6_addr *saddr, struct in6_addr *daddr, 69static u32 cookie_hash(const struct in6_addr *saddr, const struct in6_addr *daddr,
70 __be16 sport, __be16 dport, u32 count, int c) 70 __be16 sport, __be16 dport, u32 count, int c)
71{ 71{
72 __u32 *tmp = __get_cpu_var(ipv6_cookie_scratch); 72 __u32 *tmp = __get_cpu_var(ipv6_cookie_scratch);
@@ -86,7 +86,8 @@ static u32 cookie_hash(struct in6_addr *saddr, struct in6_addr *daddr,
86 return tmp[17]; 86 return tmp[17];
87} 87}
88 88
89static __u32 secure_tcp_syn_cookie(struct in6_addr *saddr, struct in6_addr *daddr, 89static __u32 secure_tcp_syn_cookie(const struct in6_addr *saddr,
90 const struct in6_addr *daddr,
90 __be16 sport, __be16 dport, __u32 sseq, 91 __be16 sport, __be16 dport, __u32 sseq,
91 __u32 count, __u32 data) 92 __u32 count, __u32 data)
92{ 93{
@@ -96,8 +97,8 @@ static __u32 secure_tcp_syn_cookie(struct in6_addr *saddr, struct in6_addr *dadd
96 & COOKIEMASK)); 97 & COOKIEMASK));
97} 98}
98 99
99static __u32 check_tcp_syn_cookie(__u32 cookie, struct in6_addr *saddr, 100static __u32 check_tcp_syn_cookie(__u32 cookie, const struct in6_addr *saddr,
100 struct in6_addr *daddr, __be16 sport, 101 const struct in6_addr *daddr, __be16 sport,
101 __be16 dport, __u32 sseq, __u32 count, 102 __be16 dport, __u32 sseq, __u32 count,
102 __u32 maxdiff) 103 __u32 maxdiff)
103{ 104{
@@ -116,7 +117,7 @@ static __u32 check_tcp_syn_cookie(__u32 cookie, struct in6_addr *saddr,
116 117
117__u32 cookie_v6_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp) 118__u32 cookie_v6_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp)
118{ 119{
119 struct ipv6hdr *iph = ipv6_hdr(skb); 120 const struct ipv6hdr *iph = ipv6_hdr(skb);
120 const struct tcphdr *th = tcp_hdr(skb); 121 const struct tcphdr *th = tcp_hdr(skb);
121 int mssind; 122 int mssind;
122 const __u16 mss = *mssp; 123 const __u16 mss = *mssp;
@@ -138,7 +139,7 @@ __u32 cookie_v6_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp)
138 139
139static inline int cookie_check(struct sk_buff *skb, __u32 cookie) 140static inline int cookie_check(struct sk_buff *skb, __u32 cookie)
140{ 141{
141 struct ipv6hdr *iph = ipv6_hdr(skb); 142 const struct ipv6hdr *iph = ipv6_hdr(skb);
142 const struct tcphdr *th = tcp_hdr(skb); 143 const struct tcphdr *th = tcp_hdr(skb);
143 __u32 seq = ntohl(th->seq) - 1; 144 __u32 seq = ntohl(th->seq) - 1;
144 __u32 mssind = check_tcp_syn_cookie(cookie, &iph->saddr, &iph->daddr, 145 __u32 mssind = check_tcp_syn_cookie(cookie, &iph->saddr, &iph->daddr,
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 2b0c186862c8..cb7658aceb6c 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -76,8 +76,8 @@ static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
76 76
77static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb); 77static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
78static void __tcp_v6_send_check(struct sk_buff *skb, 78static void __tcp_v6_send_check(struct sk_buff *skb,
79 struct in6_addr *saddr, 79 const struct in6_addr *saddr,
80 struct in6_addr *daddr); 80 const struct in6_addr *daddr);
81 81
82static const struct inet_connection_sock_af_ops ipv6_mapped; 82static const struct inet_connection_sock_af_ops ipv6_mapped;
83static const struct inet_connection_sock_af_ops ipv6_specific; 83static const struct inet_connection_sock_af_ops ipv6_specific;
@@ -86,7 +86,7 @@ static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
86static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific; 86static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
87#else 87#else
88static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk, 88static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
89 struct in6_addr *addr) 89 const struct in6_addr *addr)
90{ 90{
91 return NULL; 91 return NULL;
92} 92}
@@ -106,8 +106,8 @@ static void tcp_v6_hash(struct sock *sk)
106} 106}
107 107
108static __inline__ __sum16 tcp_v6_check(int len, 108static __inline__ __sum16 tcp_v6_check(int len,
109 struct in6_addr *saddr, 109 const struct in6_addr *saddr,
110 struct in6_addr *daddr, 110 const struct in6_addr *daddr,
111 __wsum base) 111 __wsum base)
112{ 112{
113 return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base); 113 return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
@@ -331,7 +331,7 @@ failure:
331static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 331static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
332 u8 type, u8 code, int offset, __be32 info) 332 u8 type, u8 code, int offset, __be32 info)
333{ 333{
334 struct ipv6hdr *hdr = (struct ipv6hdr*)skb->data; 334 const struct ipv6hdr *hdr = (const struct ipv6hdr*)skb->data;
335 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset); 335 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
336 struct ipv6_pinfo *np; 336 struct ipv6_pinfo *np;
337 struct sock *sk; 337 struct sock *sk;
@@ -503,6 +503,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
503 dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false); 503 dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false);
504 if (IS_ERR(dst)) { 504 if (IS_ERR(dst)) {
505 err = PTR_ERR(dst); 505 err = PTR_ERR(dst);
506 dst = NULL;
506 goto done; 507 goto done;
507 } 508 }
508 skb = tcp_make_synack(sk, dst, req, rvp); 509 skb = tcp_make_synack(sk, dst, req, rvp);
@@ -550,7 +551,7 @@ static void tcp_v6_reqsk_destructor(struct request_sock *req)
550 551
551#ifdef CONFIG_TCP_MD5SIG 552#ifdef CONFIG_TCP_MD5SIG
552static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk, 553static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
553 struct in6_addr *addr) 554 const struct in6_addr *addr)
554{ 555{
555 struct tcp_sock *tp = tcp_sk(sk); 556 struct tcp_sock *tp = tcp_sk(sk);
556 int i; 557 int i;
@@ -579,7 +580,7 @@ static struct tcp_md5sig_key *tcp_v6_reqsk_md5_lookup(struct sock *sk,
579 return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr); 580 return tcp_v6_md5_do_lookup(sk, &inet6_rsk(req)->rmt_addr);
580} 581}
581 582
582static int tcp_v6_md5_do_add(struct sock *sk, struct in6_addr *peer, 583static int tcp_v6_md5_do_add(struct sock *sk, const struct in6_addr *peer,
583 char *newkey, u8 newkeylen) 584 char *newkey, u8 newkeylen)
584{ 585{
585 /* Add key to the list */ 586 /* Add key to the list */
@@ -644,7 +645,7 @@ static int tcp_v6_md5_add_func(struct sock *sk, struct sock *addr_sk,
644 newkey, newkeylen); 645 newkey, newkeylen);
645} 646}
646 647
647static int tcp_v6_md5_do_del(struct sock *sk, struct in6_addr *peer) 648static int tcp_v6_md5_do_del(struct sock *sk, const struct in6_addr *peer)
648{ 649{
649 struct tcp_sock *tp = tcp_sk(sk); 650 struct tcp_sock *tp = tcp_sk(sk);
650 int i; 651 int i;
@@ -752,8 +753,8 @@ static int tcp_v6_parse_md5_keys (struct sock *sk, char __user *optval,
752} 753}
753 754
754static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp, 755static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
755 struct in6_addr *daddr, 756 const struct in6_addr *daddr,
756 struct in6_addr *saddr, int nbytes) 757 const struct in6_addr *saddr, int nbytes)
757{ 758{
758 struct tcp6_pseudohdr *bp; 759 struct tcp6_pseudohdr *bp;
759 struct scatterlist sg; 760 struct scatterlist sg;
@@ -770,7 +771,7 @@ static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
770} 771}
771 772
772static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key, 773static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
773 struct in6_addr *daddr, struct in6_addr *saddr, 774 const struct in6_addr *daddr, struct in6_addr *saddr,
774 struct tcphdr *th) 775 struct tcphdr *th)
775{ 776{
776 struct tcp_md5sig_pool *hp; 777 struct tcp_md5sig_pool *hp;
@@ -806,7 +807,7 @@ static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
806 struct sock *sk, struct request_sock *req, 807 struct sock *sk, struct request_sock *req,
807 struct sk_buff *skb) 808 struct sk_buff *skb)
808{ 809{
809 struct in6_addr *saddr, *daddr; 810 const struct in6_addr *saddr, *daddr;
810 struct tcp_md5sig_pool *hp; 811 struct tcp_md5sig_pool *hp;
811 struct hash_desc *desc; 812 struct hash_desc *desc;
812 struct tcphdr *th = tcp_hdr(skb); 813 struct tcphdr *th = tcp_hdr(skb);
@@ -818,7 +819,7 @@ static int tcp_v6_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
818 saddr = &inet6_rsk(req)->loc_addr; 819 saddr = &inet6_rsk(req)->loc_addr;
819 daddr = &inet6_rsk(req)->rmt_addr; 820 daddr = &inet6_rsk(req)->rmt_addr;
820 } else { 821 } else {
821 struct ipv6hdr *ip6h = ipv6_hdr(skb); 822 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
822 saddr = &ip6h->saddr; 823 saddr = &ip6h->saddr;
823 daddr = &ip6h->daddr; 824 daddr = &ip6h->daddr;
824 } 825 }
@@ -856,7 +857,7 @@ static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb)
856{ 857{
857 __u8 *hash_location = NULL; 858 __u8 *hash_location = NULL;
858 struct tcp_md5sig_key *hash_expected; 859 struct tcp_md5sig_key *hash_expected;
859 struct ipv6hdr *ip6h = ipv6_hdr(skb); 860 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
860 struct tcphdr *th = tcp_hdr(skb); 861 struct tcphdr *th = tcp_hdr(skb);
861 int genhash; 862 int genhash;
862 u8 newhash[16]; 863 u8 newhash[16];
@@ -914,7 +915,7 @@ static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
914#endif 915#endif
915 916
916static void __tcp_v6_send_check(struct sk_buff *skb, 917static void __tcp_v6_send_check(struct sk_buff *skb,
917 struct in6_addr *saddr, struct in6_addr *daddr) 918 const struct in6_addr *saddr, const struct in6_addr *daddr)
918{ 919{
919 struct tcphdr *th = tcp_hdr(skb); 920 struct tcphdr *th = tcp_hdr(skb);
920 921
@@ -938,7 +939,7 @@ static void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)
938 939
939static int tcp_v6_gso_send_check(struct sk_buff *skb) 940static int tcp_v6_gso_send_check(struct sk_buff *skb)
940{ 941{
941 struct ipv6hdr *ipv6h; 942 const struct ipv6hdr *ipv6h;
942 struct tcphdr *th; 943 struct tcphdr *th;
943 944
944 if (!pskb_may_pull(skb, sizeof(*th))) 945 if (!pskb_may_pull(skb, sizeof(*th)))
@@ -956,7 +957,7 @@ static int tcp_v6_gso_send_check(struct sk_buff *skb)
956static struct sk_buff **tcp6_gro_receive(struct sk_buff **head, 957static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
957 struct sk_buff *skb) 958 struct sk_buff *skb)
958{ 959{
959 struct ipv6hdr *iph = skb_gro_network_header(skb); 960 const struct ipv6hdr *iph = skb_gro_network_header(skb);
960 961
961 switch (skb->ip_summed) { 962 switch (skb->ip_summed) {
962 case CHECKSUM_COMPLETE: 963 case CHECKSUM_COMPLETE:
@@ -977,7 +978,7 @@ static struct sk_buff **tcp6_gro_receive(struct sk_buff **head,
977 978
978static int tcp6_gro_complete(struct sk_buff *skb) 979static int tcp6_gro_complete(struct sk_buff *skb)
979{ 980{
980 struct ipv6hdr *iph = ipv6_hdr(skb); 981 const struct ipv6hdr *iph = ipv6_hdr(skb);
981 struct tcphdr *th = tcp_hdr(skb); 982 struct tcphdr *th = tcp_hdr(skb);
982 983
983 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb), 984 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
@@ -1621,6 +1622,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1621 opt_skb = skb_clone(skb, GFP_ATOMIC); 1622 opt_skb = skb_clone(skb, GFP_ATOMIC);
1622 1623
1623 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ 1624 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1625 sock_rps_save_rxhash(sk, skb->rxhash);
1624 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) 1626 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
1625 goto reset; 1627 goto reset;
1626 if (opt_skb) 1628 if (opt_skb)
@@ -1648,7 +1650,8 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1648 __kfree_skb(opt_skb); 1650 __kfree_skb(opt_skb);
1649 return 0; 1651 return 0;
1650 } 1652 }
1651 } 1653 } else
1654 sock_rps_save_rxhash(sk, skb->rxhash);
1652 1655
1653 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) 1656 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len))
1654 goto reset; 1657 goto reset;
@@ -1699,7 +1702,7 @@ ipv6_pktoptions:
1699static int tcp_v6_rcv(struct sk_buff *skb) 1702static int tcp_v6_rcv(struct sk_buff *skb)
1700{ 1703{
1701 struct tcphdr *th; 1704 struct tcphdr *th;
1702 struct ipv6hdr *hdr; 1705 const struct ipv6hdr *hdr;
1703 struct sock *sk; 1706 struct sock *sk;
1704 int ret; 1707 int ret;
1705 struct net *net = dev_net(skb->dev); 1708 struct net *net = dev_net(skb->dev);
@@ -2025,8 +2028,8 @@ static void get_openreq6(struct seq_file *seq,
2025 struct sock *sk, struct request_sock *req, int i, int uid) 2028 struct sock *sk, struct request_sock *req, int i, int uid)
2026{ 2029{
2027 int ttd = req->expires - jiffies; 2030 int ttd = req->expires - jiffies;
2028 struct in6_addr *src = &inet6_rsk(req)->loc_addr; 2031 const struct in6_addr *src = &inet6_rsk(req)->loc_addr;
2029 struct in6_addr *dest = &inet6_rsk(req)->rmt_addr; 2032 const struct in6_addr *dest = &inet6_rsk(req)->rmt_addr;
2030 2033
2031 if (ttd < 0) 2034 if (ttd < 0)
2032 ttd = 0; 2035 ttd = 0;
@@ -2054,7 +2057,7 @@ static void get_openreq6(struct seq_file *seq,
2054 2057
2055static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i) 2058static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
2056{ 2059{
2057 struct in6_addr *dest, *src; 2060 const struct in6_addr *dest, *src;
2058 __u16 destp, srcp; 2061 __u16 destp, srcp;
2059 int timer_active; 2062 int timer_active;
2060 unsigned long timer_expires; 2063 unsigned long timer_expires;
@@ -2111,7 +2114,7 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
2111static void get_timewait6_sock(struct seq_file *seq, 2114static void get_timewait6_sock(struct seq_file *seq,
2112 struct inet_timewait_sock *tw, int i) 2115 struct inet_timewait_sock *tw, int i)
2113{ 2116{
2114 struct in6_addr *dest, *src; 2117 const struct in6_addr *dest, *src;
2115 __u16 destp, srcp; 2118 __u16 destp, srcp;
2116 struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw); 2119 struct inet6_timewait_sock *tw6 = inet6_twsk((struct sock *)tw);
2117 int ttd = tw->tw_ttd - jiffies; 2120 int ttd = tw->tw_ttd - jiffies;
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index d7037c006e13..1bdc5f053db8 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -311,7 +311,7 @@ static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb,
311 struct udp_table *udptable) 311 struct udp_table *udptable)
312{ 312{
313 struct sock *sk; 313 struct sock *sk;
314 struct ipv6hdr *iph = ipv6_hdr(skb); 314 const struct ipv6hdr *iph = ipv6_hdr(skb);
315 315
316 if (unlikely(sk = skb_steal_sock(skb))) 316 if (unlikely(sk = skb_steal_sock(skb)))
317 return sk; 317 return sk;
@@ -463,9 +463,9 @@ void __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
463 struct udp_table *udptable) 463 struct udp_table *udptable)
464{ 464{
465 struct ipv6_pinfo *np; 465 struct ipv6_pinfo *np;
466 struct ipv6hdr *hdr = (struct ipv6hdr*)skb->data; 466 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
467 struct in6_addr *saddr = &hdr->saddr; 467 const struct in6_addr *saddr = &hdr->saddr;
468 struct in6_addr *daddr = &hdr->daddr; 468 const struct in6_addr *daddr = &hdr->daddr;
469 struct udphdr *uh = (struct udphdr*)(skb->data+offset); 469 struct udphdr *uh = (struct udphdr*)(skb->data+offset);
470 struct sock *sk; 470 struct sock *sk;
471 int err; 471 int err;
@@ -505,6 +505,9 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
505 int rc; 505 int rc;
506 int is_udplite = IS_UDPLITE(sk); 506 int is_udplite = IS_UDPLITE(sk);
507 507
508 if (!ipv6_addr_any(&inet6_sk(sk)->daddr))
509 sock_rps_save_rxhash(sk, skb->rxhash);
510
508 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) 511 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
509 goto drop; 512 goto drop;
510 513
@@ -550,8 +553,8 @@ drop_no_sk_drops_inc:
550} 553}
551 554
552static struct sock *udp_v6_mcast_next(struct net *net, struct sock *sk, 555static struct sock *udp_v6_mcast_next(struct net *net, struct sock *sk,
553 __be16 loc_port, struct in6_addr *loc_addr, 556 __be16 loc_port, const struct in6_addr *loc_addr,
554 __be16 rmt_port, struct in6_addr *rmt_addr, 557 __be16 rmt_port, const struct in6_addr *rmt_addr,
555 int dif) 558 int dif)
556{ 559{
557 struct hlist_nulls_node *node; 560 struct hlist_nulls_node *node;
@@ -630,7 +633,7 @@ drop:
630 * so we don't need to lock the hashes. 633 * so we don't need to lock the hashes.
631 */ 634 */
632static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb, 635static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
633 struct in6_addr *saddr, struct in6_addr *daddr, 636 const struct in6_addr *saddr, const struct in6_addr *daddr,
634 struct udp_table *udptable) 637 struct udp_table *udptable)
635{ 638{
636 struct sock *sk, *stack[256 / sizeof(struct sock *)]; 639 struct sock *sk, *stack[256 / sizeof(struct sock *)];
@@ -713,7 +716,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
713 struct net *net = dev_net(skb->dev); 716 struct net *net = dev_net(skb->dev);
714 struct sock *sk; 717 struct sock *sk;
715 struct udphdr *uh; 718 struct udphdr *uh;
716 struct in6_addr *saddr, *daddr; 719 const struct in6_addr *saddr, *daddr;
717 u32 ulen = 0; 720 u32 ulen = 0;
718 721
719 if (!pskb_may_pull(skb, sizeof(struct udphdr))) 722 if (!pskb_may_pull(skb, sizeof(struct udphdr)))
@@ -1275,7 +1278,7 @@ int compat_udpv6_getsockopt(struct sock *sk, int level, int optname,
1275 1278
1276static int udp6_ufo_send_check(struct sk_buff *skb) 1279static int udp6_ufo_send_check(struct sk_buff *skb)
1277{ 1280{
1278 struct ipv6hdr *ipv6h; 1281 const struct ipv6hdr *ipv6h;
1279 struct udphdr *uh; 1282 struct udphdr *uh;
1280 1283
1281 if (!pskb_may_pull(skb, sizeof(*uh))) 1284 if (!pskb_may_pull(skb, sizeof(*uh)))
@@ -1379,7 +1382,7 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
1379{ 1382{
1380 struct inet_sock *inet = inet_sk(sp); 1383 struct inet_sock *inet = inet_sk(sp);
1381 struct ipv6_pinfo *np = inet6_sk(sp); 1384 struct ipv6_pinfo *np = inet6_sk(sp);
1382 struct in6_addr *dest, *src; 1385 const struct in6_addr *dest, *src;
1383 __u16 destp, srcp; 1386 __u16 destp, srcp;
1384 1387
1385 dest = &np->daddr; 1388 dest = &np->daddr;
diff --git a/net/ipv6/xfrm6_mode_beet.c b/net/ipv6/xfrm6_mode_beet.c
index bbd48b101bae..3437d7d4eed6 100644
--- a/net/ipv6/xfrm6_mode_beet.c
+++ b/net/ipv6/xfrm6_mode_beet.c
@@ -41,10 +41,8 @@ static int xfrm6_beet_output(struct xfrm_state *x, struct sk_buff *skb)
41{ 41{
42 struct ipv6hdr *top_iph; 42 struct ipv6hdr *top_iph;
43 struct ip_beet_phdr *ph; 43 struct ip_beet_phdr *ph;
44 struct iphdr *iphv4;
45 int optlen, hdr_len; 44 int optlen, hdr_len;
46 45
47 iphv4 = ip_hdr(skb);
48 hdr_len = 0; 46 hdr_len = 0;
49 optlen = XFRM_MODE_SKB_CB(skb)->optlen; 47 optlen = XFRM_MODE_SKB_CB(skb)->optlen;
50 if (unlikely(optlen)) 48 if (unlikely(optlen))
diff --git a/net/ipv6/xfrm6_mode_tunnel.c b/net/ipv6/xfrm6_mode_tunnel.c
index 645cb968d450..4d6edff0498f 100644
--- a/net/ipv6/xfrm6_mode_tunnel.c
+++ b/net/ipv6/xfrm6_mode_tunnel.c
@@ -20,7 +20,7 @@
20 20
21static inline void ipip6_ecn_decapsulate(struct sk_buff *skb) 21static inline void ipip6_ecn_decapsulate(struct sk_buff *skb)
22{ 22{
23 struct ipv6hdr *outer_iph = ipv6_hdr(skb); 23 const struct ipv6hdr *outer_iph = ipv6_hdr(skb);
24 struct ipv6hdr *inner_iph = ipipv6_hdr(skb); 24 struct ipv6hdr *inner_iph = ipipv6_hdr(skb);
25 25
26 if (INET_ECN_is_ce(ipv6_get_dsfield(outer_iph))) 26 if (INET_ECN_is_ce(ipv6_get_dsfield(outer_iph)))
@@ -55,8 +55,8 @@ static int xfrm6_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
55 dsfield &= ~INET_ECN_MASK; 55 dsfield &= ~INET_ECN_MASK;
56 ipv6_change_dsfield(top_iph, 0, dsfield); 56 ipv6_change_dsfield(top_iph, 0, dsfield);
57 top_iph->hop_limit = ip6_dst_hoplimit(dst->child); 57 top_iph->hop_limit = ip6_dst_hoplimit(dst->child);
58 ipv6_addr_copy(&top_iph->saddr, (struct in6_addr *)&x->props.saddr); 58 ipv6_addr_copy(&top_iph->saddr, (const struct in6_addr *)&x->props.saddr);
59 ipv6_addr_copy(&top_iph->daddr, (struct in6_addr *)&x->id.daddr); 59 ipv6_addr_copy(&top_iph->daddr, (const struct in6_addr *)&x->id.daddr);
60 return 0; 60 return 0;
61} 61}
62 62
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 05e34c8ec913..d879f7efbd10 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -124,7 +124,7 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
124 struct flowi6 *fl6 = &fl->u.ip6; 124 struct flowi6 *fl6 = &fl->u.ip6;
125 int onlyproto = 0; 125 int onlyproto = 0;
126 u16 offset = skb_network_header_len(skb); 126 u16 offset = skb_network_header_len(skb);
127 struct ipv6hdr *hdr = ipv6_hdr(skb); 127 const struct ipv6hdr *hdr = ipv6_hdr(skb);
128 struct ipv6_opt_hdr *exthdr; 128 struct ipv6_opt_hdr *exthdr;
129 const unsigned char *nh = skb_network_header(skb); 129 const unsigned char *nh = skb_network_header(skb);
130 u8 nexthdr = nh[IP6CB(skb)->nhoff]; 130 u8 nexthdr = nh[IP6CB(skb)->nhoff];
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
index 2969cad408de..a6770a04e3bd 100644
--- a/net/ipv6/xfrm6_tunnel.c
+++ b/net/ipv6/xfrm6_tunnel.c
@@ -68,7 +68,7 @@ static DEFINE_SPINLOCK(xfrm6_tunnel_spi_lock);
68 68
69static struct kmem_cache *xfrm6_tunnel_spi_kmem __read_mostly; 69static struct kmem_cache *xfrm6_tunnel_spi_kmem __read_mostly;
70 70
71static inline unsigned xfrm6_tunnel_spi_hash_byaddr(xfrm_address_t *addr) 71static inline unsigned xfrm6_tunnel_spi_hash_byaddr(const xfrm_address_t *addr)
72{ 72{
73 unsigned h; 73 unsigned h;
74 74
@@ -85,7 +85,7 @@ static inline unsigned xfrm6_tunnel_spi_hash_byspi(u32 spi)
85 return spi % XFRM6_TUNNEL_SPI_BYSPI_HSIZE; 85 return spi % XFRM6_TUNNEL_SPI_BYSPI_HSIZE;
86} 86}
87 87
88static struct xfrm6_tunnel_spi *__xfrm6_tunnel_spi_lookup(struct net *net, xfrm_address_t *saddr) 88static struct xfrm6_tunnel_spi *__xfrm6_tunnel_spi_lookup(struct net *net, const xfrm_address_t *saddr)
89{ 89{
90 struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net); 90 struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net);
91 struct xfrm6_tunnel_spi *x6spi; 91 struct xfrm6_tunnel_spi *x6spi;
@@ -101,7 +101,7 @@ static struct xfrm6_tunnel_spi *__xfrm6_tunnel_spi_lookup(struct net *net, xfrm_
101 return NULL; 101 return NULL;
102} 102}
103 103
104__be32 xfrm6_tunnel_spi_lookup(struct net *net, xfrm_address_t *saddr) 104__be32 xfrm6_tunnel_spi_lookup(struct net *net, const xfrm_address_t *saddr)
105{ 105{
106 struct xfrm6_tunnel_spi *x6spi; 106 struct xfrm6_tunnel_spi *x6spi;
107 u32 spi; 107 u32 spi;
@@ -237,10 +237,10 @@ static int xfrm6_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
237static int xfrm6_tunnel_rcv(struct sk_buff *skb) 237static int xfrm6_tunnel_rcv(struct sk_buff *skb)
238{ 238{
239 struct net *net = dev_net(skb->dev); 239 struct net *net = dev_net(skb->dev);
240 struct ipv6hdr *iph = ipv6_hdr(skb); 240 const struct ipv6hdr *iph = ipv6_hdr(skb);
241 __be32 spi; 241 __be32 spi;
242 242
243 spi = xfrm6_tunnel_spi_lookup(net, (xfrm_address_t *)&iph->saddr); 243 spi = xfrm6_tunnel_spi_lookup(net, (const xfrm_address_t *)&iph->saddr);
244 return xfrm6_rcv_spi(skb, IPPROTO_IPV6, spi) > 0 ? : 0; 244 return xfrm6_rcv_spi(skb, IPPROTO_IPV6, spi) > 0 ? : 0;
245} 245}
246 246
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index c9890e25cd4c..cc616974a447 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -1297,8 +1297,7 @@ static int irda_sendmsg(struct kiocb *iocb, struct socket *sock,
1297 /* Note : socket.c set MSG_EOR on SEQPACKET sockets */ 1297 /* Note : socket.c set MSG_EOR on SEQPACKET sockets */
1298 if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_EOR | MSG_CMSG_COMPAT | 1298 if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_EOR | MSG_CMSG_COMPAT |
1299 MSG_NOSIGNAL)) { 1299 MSG_NOSIGNAL)) {
1300 err = -EINVAL; 1300 return -EINVAL;
1301 goto out;
1302 } 1301 }
1303 1302
1304 lock_sock(sk); 1303 lock_sock(sk);
diff --git a/net/irda/irlap.c b/net/irda/irlap.c
index 783c5f367d29..005b424494a0 100644
--- a/net/irda/irlap.c
+++ b/net/irda/irlap.c
@@ -165,7 +165,7 @@ struct irlap_cb *irlap_open(struct net_device *dev, struct qos_info *qos,
165 165
166 irlap_apply_default_connection_parameters(self); 166 irlap_apply_default_connection_parameters(self);
167 167
168 self->N3 = 3; /* # connections attemts to try before giving up */ 168 self->N3 = 3; /* # connections attempts to try before giving up */
169 169
170 self->state = LAP_NDM; 170 self->state = LAP_NDM;
171 171
diff --git a/net/irda/irlap_event.c b/net/irda/irlap_event.c
index d434c8880745..ccd214f9d196 100644
--- a/net/irda/irlap_event.c
+++ b/net/irda/irlap_event.c
@@ -708,7 +708,7 @@ static int irlap_state_reply(struct irlap_cb *self, IRLAP_EVENT event,
708 708
709 self->frame_sent = TRUE; 709 self->frame_sent = TRUE;
710 } 710 }
711 /* Readjust our timer to accomodate devices 711 /* Readjust our timer to accommodate devices
712 * doing faster or slower discovery than us... 712 * doing faster or slower discovery than us...
713 * Jean II */ 713 * Jean II */
714 irlap_start_query_timer(self, info->S, info->s); 714 irlap_start_query_timer(self, info->S, info->s);
@@ -931,7 +931,7 @@ static int irlap_state_setup(struct irlap_cb *self, IRLAP_EVENT event,
931 irlap_send_rr_frame(self, CMD_FRAME); 931 irlap_send_rr_frame(self, CMD_FRAME);
932 932
933 /* The timer is set to half the normal timer to quickly 933 /* The timer is set to half the normal timer to quickly
934 * detect a failure to negociate the new connection 934 * detect a failure to negotiate the new connection
935 * parameters. IrLAP 6.11.3.2, note 3. 935 * parameters. IrLAP 6.11.3.2, note 3.
936 * Note that currently we don't process this failure 936 * Note that currently we don't process this failure
937 * properly, as we should do a quick disconnect. 937 * properly, as we should do a quick disconnect.
@@ -1052,7 +1052,7 @@ static int irlap_state_xmit_p(struct irlap_cb *self, IRLAP_EVENT event,
1052 return -EPROTO; 1052 return -EPROTO;
1053 } 1053 }
1054 1054
1055 /* Substract space used by this skb */ 1055 /* Subtract space used by this skb */
1056 self->bytes_left -= skb->len; 1056 self->bytes_left -= skb->len;
1057#else /* CONFIG_IRDA_DYNAMIC_WINDOW */ 1057#else /* CONFIG_IRDA_DYNAMIC_WINDOW */
1058 /* Window has been adjusted for the max packet 1058 /* Window has been adjusted for the max packet
@@ -1808,7 +1808,7 @@ static int irlap_state_xmit_s(struct irlap_cb *self, IRLAP_EVENT event,
1808 1808
1809 return -EPROTO; /* Try again later */ 1809 return -EPROTO; /* Try again later */
1810 } 1810 }
1811 /* Substract space used by this skb */ 1811 /* Subtract space used by this skb */
1812 self->bytes_left -= skb->len; 1812 self->bytes_left -= skb->len;
1813#else /* CONFIG_IRDA_DYNAMIC_WINDOW */ 1813#else /* CONFIG_IRDA_DYNAMIC_WINDOW */
1814 /* Window has been adjusted for the max packet 1814 /* Window has been adjusted for the max packet
@@ -2227,8 +2227,6 @@ static int irlap_state_nrm_s(struct irlap_cb *self, IRLAP_EVENT event,
2227static int irlap_state_sclose(struct irlap_cb *self, IRLAP_EVENT event, 2227static int irlap_state_sclose(struct irlap_cb *self, IRLAP_EVENT event,
2228 struct sk_buff *skb, struct irlap_info *info) 2228 struct sk_buff *skb, struct irlap_info *info)
2229{ 2229{
2230 int ret = 0;
2231
2232 IRDA_DEBUG(1, "%s()\n", __func__); 2230 IRDA_DEBUG(1, "%s()\n", __func__);
2233 2231
2234 IRDA_ASSERT(self != NULL, return -ENODEV;); 2232 IRDA_ASSERT(self != NULL, return -ENODEV;);
@@ -2289,7 +2287,6 @@ static int irlap_state_sclose(struct irlap_cb *self, IRLAP_EVENT event,
2289 IRDA_DEBUG(1, "%s(), Unknown event %d, (%s)\n", __func__, 2287 IRDA_DEBUG(1, "%s(), Unknown event %d, (%s)\n", __func__,
2290 event, irlap_event[event]); 2288 event, irlap_event[event]);
2291 2289
2292 ret = -EINVAL;
2293 break; 2290 break;
2294 } 2291 }
2295 2292
diff --git a/net/irda/irlap_frame.c b/net/irda/irlap_frame.c
index 688222cbf55b..8c004161a843 100644
--- a/net/irda/irlap_frame.c
+++ b/net/irda/irlap_frame.c
@@ -848,7 +848,7 @@ void irlap_send_data_primary_poll(struct irlap_cb *self, struct sk_buff *skb)
848 * though IrLAP is currently sending the *last* frame of the 848 * though IrLAP is currently sending the *last* frame of the
849 * tx-window, the driver most likely has only just started 849 * tx-window, the driver most likely has only just started
850 * sending the *first* frame of the same tx-window. 850 * sending the *first* frame of the same tx-window.
851 * I.e. we are always at the very begining of or Tx window. 851 * I.e. we are always at the very beginning of or Tx window.
852 * Now, we are supposed to set the final timer from the end 852 * Now, we are supposed to set the final timer from the end
853 * of our tx-window to let the other peer reply. So, we need 853 * of our tx-window to let the other peer reply. So, we need
854 * to add extra time to compensate for the fact that we 854 * to add extra time to compensate for the fact that we
diff --git a/net/irda/irlmp_event.c b/net/irda/irlmp_event.c
index c1fb5db81042..9505a7d06f1a 100644
--- a/net/irda/irlmp_event.c
+++ b/net/irda/irlmp_event.c
@@ -498,7 +498,7 @@ static int irlmp_state_disconnected(struct lsap_cb *self, IRLMP_EVENT event,
498 switch (event) { 498 switch (event) {
499#ifdef CONFIG_IRDA_ULTRA 499#ifdef CONFIG_IRDA_ULTRA
500 case LM_UDATA_INDICATION: 500 case LM_UDATA_INDICATION:
501 /* This is most bizzare. Those packets are aka unreliable 501 /* This is most bizarre. Those packets are aka unreliable
502 * connected, aka IrLPT or SOCK_DGRAM/IRDAPROTO_UNITDATA. 502 * connected, aka IrLPT or SOCK_DGRAM/IRDAPROTO_UNITDATA.
503 * Why do we pass them as Ultra ??? Jean II */ 503 * Why do we pass them as Ultra ??? Jean II */
504 irlmp_connless_data_indication(self, skb); 504 irlmp_connless_data_indication(self, skb);
diff --git a/net/irda/irnet/irnet.h b/net/irda/irnet/irnet.h
index 0d82ff5aeff1..979ecb2435a7 100644
--- a/net/irda/irnet/irnet.h
+++ b/net/irda/irnet/irnet.h
@@ -73,7 +73,7 @@
73 * Infinite thanks to those brave souls for providing the infrastructure 73 * Infinite thanks to those brave souls for providing the infrastructure
74 * upon which IrNET is built. 74 * upon which IrNET is built.
75 * 75 *
76 * Thanks to all my collegues in HP for helping me. In particular, 76 * Thanks to all my colleagues in HP for helping me. In particular,
77 * thanks to Salil Pradhan and Bill Serra for W2k testing... 77 * thanks to Salil Pradhan and Bill Serra for W2k testing...
78 * Thanks to Luiz Magalhaes for irnetd and much testing... 78 * Thanks to Luiz Magalhaes for irnetd and much testing...
79 * 79 *
diff --git a/net/irda/irproc.c b/net/irda/irproc.c
index 318766e5dbdf..b9ac598e2116 100644
--- a/net/irda/irproc.c
+++ b/net/irda/irproc.c
@@ -65,15 +65,14 @@ static const struct irda_entry irda_dirs[] = {
65void __init irda_proc_register(void) 65void __init irda_proc_register(void)
66{ 66{
67 int i; 67 int i;
68 struct proc_dir_entry *d;
69 68
70 proc_irda = proc_mkdir("irda", init_net.proc_net); 69 proc_irda = proc_mkdir("irda", init_net.proc_net);
71 if (proc_irda == NULL) 70 if (proc_irda == NULL)
72 return; 71 return;
73 72
74 for (i = 0; i < ARRAY_SIZE(irda_dirs); i++) 73 for (i = 0; i < ARRAY_SIZE(irda_dirs); i++)
75 d = proc_create(irda_dirs[i].name, 0, proc_irda, 74 (void) proc_create(irda_dirs[i].name, 0, proc_irda,
76 irda_dirs[i].fops); 75 irda_dirs[i].fops);
77} 76}
78 77
79/* 78/*
diff --git a/net/irda/irqueue.c b/net/irda/irqueue.c
index 849aaf0dabb5..9715e6e5900b 100644
--- a/net/irda/irqueue.c
+++ b/net/irda/irqueue.c
@@ -40,7 +40,7 @@
40 * o the hash function for ints is pathetic (but could be changed) 40 * o the hash function for ints is pathetic (but could be changed)
41 * o locking is sometime suspicious (especially during enumeration) 41 * o locking is sometime suspicious (especially during enumeration)
42 * o most users have only a few elements (== overhead) 42 * o most users have only a few elements (== overhead)
43 * o most users never use seach, so don't benefit from hashing 43 * o most users never use search, so don't benefit from hashing
44 * Problem already fixed : 44 * Problem already fixed :
45 * o not 64 bit compliant (most users do hashv = (int) self) 45 * o not 64 bit compliant (most users do hashv = (int) self)
46 * o hashbin_remove() is broken => use hashbin_remove_this() 46 * o hashbin_remove() is broken => use hashbin_remove_this()
diff --git a/net/irda/irttp.c b/net/irda/irttp.c
index f6054f9ccbe3..9d9af4606970 100644
--- a/net/irda/irttp.c
+++ b/net/irda/irttp.c
@@ -1193,7 +1193,7 @@ EXPORT_SYMBOL(irttp_connect_request);
1193/* 1193/*
1194 * Function irttp_connect_confirm (handle, qos, skb) 1194 * Function irttp_connect_confirm (handle, qos, skb)
1195 * 1195 *
1196 * Sevice user confirms TSAP connection with peer. 1196 * Service user confirms TSAP connection with peer.
1197 * 1197 *
1198 */ 1198 */
1199static void irttp_connect_confirm(void *instance, void *sap, 1199static void irttp_connect_confirm(void *instance, void *sap,
diff --git a/net/irda/qos.c b/net/irda/qos.c
index 2b00974e5bae..1b51bcf42394 100644
--- a/net/irda/qos.c
+++ b/net/irda/qos.c
@@ -39,16 +39,16 @@
39#include <net/irda/irlap_frame.h> 39#include <net/irda/irlap_frame.h>
40 40
41/* 41/*
42 * Maximum values of the baud rate we negociate with the other end. 42 * Maximum values of the baud rate we negotiate with the other end.
43 * Most often, you don't have to change that, because Linux-IrDA will 43 * Most often, you don't have to change that, because Linux-IrDA will
44 * use the maximum offered by the link layer, which usually works fine. 44 * use the maximum offered by the link layer, which usually works fine.
45 * In some very rare cases, you may want to limit it to lower speeds... 45 * In some very rare cases, you may want to limit it to lower speeds...
46 */ 46 */
47int sysctl_max_baud_rate = 16000000; 47int sysctl_max_baud_rate = 16000000;
48/* 48/*
49 * Maximum value of the lap disconnect timer we negociate with the other end. 49 * Maximum value of the lap disconnect timer we negotiate with the other end.
50 * Most often, the value below represent the best compromise, but some user 50 * Most often, the value below represent the best compromise, but some user
51 * may want to keep the LAP alive longuer or shorter in case of link failure. 51 * may want to keep the LAP alive longer or shorter in case of link failure.
52 * Remember that the threshold time (early warning) is fixed to 3s... 52 * Remember that the threshold time (early warning) is fixed to 3s...
53 */ 53 */
54int sysctl_max_noreply_time = 12; 54int sysctl_max_noreply_time = 12;
@@ -411,7 +411,7 @@ static void irlap_adjust_qos_settings(struct qos_info *qos)
411 * Fix tx data size according to user limits - Jean II 411 * Fix tx data size according to user limits - Jean II
412 */ 412 */
413 if (qos->data_size.value > sysctl_max_tx_data_size) 413 if (qos->data_size.value > sysctl_max_tx_data_size)
414 /* Allow non discrete adjustement to avoid loosing capacity */ 414 /* Allow non discrete adjustement to avoid losing capacity */
415 qos->data_size.value = sysctl_max_tx_data_size; 415 qos->data_size.value = sysctl_max_tx_data_size;
416 /* 416 /*
417 * Override Tx window if user request it. - Jean II 417 * Override Tx window if user request it. - Jean II
diff --git a/net/irda/timer.c b/net/irda/timer.c
index 0335ba0cc593..f418cb2ad49c 100644
--- a/net/irda/timer.c
+++ b/net/irda/timer.c
@@ -59,7 +59,7 @@ void irlap_start_query_timer(struct irlap_cb *self, int S, int s)
59 * slot time, plus add some extra time to properly receive the last 59 * slot time, plus add some extra time to properly receive the last
60 * discovery packet (which is longer due to extra discovery info), 60 * discovery packet (which is longer due to extra discovery info),
61 * to avoid messing with for incomming connections requests and 61 * to avoid messing with for incomming connections requests and
62 * to accomodate devices that perform discovery slower than us. 62 * to accommodate devices that perform discovery slower than us.
63 * Jean II */ 63 * Jean II */
64 timeout = ((sysctl_slot_timeout * HZ / 1000) * (S - s) 64 timeout = ((sysctl_slot_timeout * HZ / 1000) * (S - s)
65 + XIDEXTRA_TIMEOUT + SMALLBUSY_TIMEOUT); 65 + XIDEXTRA_TIMEOUT + SMALLBUSY_TIMEOUT);
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 9637e45744fa..986b2a5e8769 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -250,7 +250,7 @@ static struct device *af_iucv_dev;
250 * PRMDATA[0..6] socket data (max 7 bytes); 250 * PRMDATA[0..6] socket data (max 7 bytes);
251 * PRMDATA[7] socket data length value (len is 0xff - PRMDATA[7]) 251 * PRMDATA[7] socket data length value (len is 0xff - PRMDATA[7])
252 * 252 *
253 * The socket data length is computed by substracting the socket data length 253 * The socket data length is computed by subtracting the socket data length
254 * value from 0xFF. 254 * value from 0xFF.
255 * If the socket data len is greater 7, then PRMDATA can be used for special 255 * If the socket data len is greater 7, then PRMDATA can be used for special
256 * notifications (see iucv_sock_shutdown); and further, 256 * notifications (see iucv_sock_shutdown); and further,
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index 1ee5dab3cfae..8f156bd86be7 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -735,7 +735,7 @@ static void iucv_cleanup_queue(void)
735 struct iucv_irq_list *p, *n; 735 struct iucv_irq_list *p, *n;
736 736
737 /* 737 /*
738 * When a path is severed, the pathid can be reused immediatly 738 * When a path is severed, the pathid can be reused immediately
739 * on a iucv connect or a connection pending interrupt. Remove 739 * on a iucv connect or a connection pending interrupt. Remove
740 * all entries from the task queue that refer to a stale pathid 740 * all entries from the task queue that refer to a stale pathid
741 * (iucv_path_table[ix] == NULL). Only then do the iucv connect 741 * (iucv_path_table[ix] == NULL). Only then do the iucv connect
@@ -807,7 +807,7 @@ void iucv_unregister(struct iucv_handler *handler, int smp)
807 spin_lock_bh(&iucv_table_lock); 807 spin_lock_bh(&iucv_table_lock);
808 /* Remove handler from the iucv_handler_list. */ 808 /* Remove handler from the iucv_handler_list. */
809 list_del_init(&handler->list); 809 list_del_init(&handler->list);
810 /* Sever all pathids still refering to the handler. */ 810 /* Sever all pathids still referring to the handler. */
811 list_for_each_entry_safe(p, n, &handler->paths, list) { 811 list_for_each_entry_safe(p, n, &handler->paths, list) {
812 iucv_sever_pathid(p->pathid, NULL); 812 iucv_sever_pathid(p->pathid, NULL);
813 iucv_path_table[p->pathid] = NULL; 813 iucv_path_table[p->pathid] = NULL;
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 7db86ffcf070..d62401c25684 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -712,7 +712,7 @@ static unsigned int pfkey_sockaddr_fill(const xfrm_address_t *xaddr, __be16 port
712 sin6->sin6_family = AF_INET6; 712 sin6->sin6_family = AF_INET6;
713 sin6->sin6_port = port; 713 sin6->sin6_port = port;
714 sin6->sin6_flowinfo = 0; 714 sin6->sin6_flowinfo = 0;
715 ipv6_addr_copy(&sin6->sin6_addr, (struct in6_addr *)xaddr->a6); 715 ipv6_addr_copy(&sin6->sin6_addr, (const struct in6_addr *)xaddr->a6);
716 sin6->sin6_scope_id = 0; 716 sin6->sin6_scope_id = 0;
717 return 128; 717 return 128;
718 } 718 }
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
index 4c1e540732d7..93a41a09458b 100644
--- a/net/l2tp/l2tp_netlink.c
+++ b/net/l2tp/l2tp_netlink.c
@@ -795,11 +795,12 @@ int l2tp_nl_register_ops(enum l2tp_pwtype pw_type, const struct l2tp_nl_cmd_ops
795 goto out; 795 goto out;
796 796
797 l2tp_nl_cmd_ops[pw_type] = ops; 797 l2tp_nl_cmd_ops[pw_type] = ops;
798 ret = 0;
798 799
799out: 800out:
800 genl_unlock(); 801 genl_unlock();
801err: 802err:
802 return 0; 803 return ret;
803} 804}
804EXPORT_SYMBOL_GPL(l2tp_nl_register_ops); 805EXPORT_SYMBOL_GPL(l2tp_nl_register_ops);
805 806
diff --git a/net/llc/llc_input.c b/net/llc/llc_input.c
index 058f1e9a9128..903242111317 100644
--- a/net/llc/llc_input.c
+++ b/net/llc/llc_input.c
@@ -121,8 +121,7 @@ static inline int llc_fixup_skb(struct sk_buff *skb)
121 s32 data_size = ntohs(pdulen) - llc_len; 121 s32 data_size = ntohs(pdulen) - llc_len;
122 122
123 if (data_size < 0 || 123 if (data_size < 0 ||
124 ((skb_tail_pointer(skb) - 124 !pskb_may_pull(skb, data_size))
125 (u8 *)pdu) - llc_len) < data_size)
126 return 0; 125 return 0;
127 if (unlikely(pskb_trim_rcsum(skb, data_size))) 126 if (unlikely(pskb_trim_rcsum(skb, data_size)))
128 return 0; 127 return 0;
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 8d6d6e3d95da..a77849970914 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -97,7 +97,7 @@ struct ieee80211_bss {
97 size_t supp_rates_len; 97 size_t supp_rates_len;
98 98
99 /* 99 /*
100 * During assocation, we save an ERP value from a probe response so 100 * During association, we save an ERP value from a probe response so
101 * that we can feed ERP info to the driver when handling the 101 * that we can feed ERP info to the driver when handling the
102 * association completes. these fields probably won't be up-to-date 102 * association completes. these fields probably won't be up-to-date
103 * otherwise, you probably don't want to use them. 103 * otherwise, you probably don't want to use them.
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 7776ae5a8f15..35c715adaae2 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -629,7 +629,7 @@ void mesh_path_discard_frame(struct sk_buff *skb,
629 * 629 *
630 * @mpath: mesh path whose queue has to be freed 630 * @mpath: mesh path whose queue has to be freed
631 * 631 *
632 * Locking: the function must me called withing a rcu_read_lock region 632 * Locking: the function must me called within a rcu_read_lock region
633 */ 633 */
634void mesh_path_flush_pending(struct mesh_path *mpath) 634void mesh_path_flush_pending(struct mesh_path *mpath)
635{ 635{
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index dbdebeda097f..c06aa3ac6b9d 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -259,7 +259,7 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
259 } 259 }
260 } 260 }
261 261
262 /* try to sample up to half of the availble rates during each interval */ 262 /* try to sample up to half of the available rates during each interval */
263 mi->sample_count *= 4; 263 mi->sample_count *= 4;
264 264
265 cur_prob = 0; 265 cur_prob = 0;
diff --git a/net/mac80211/rc80211_pid.h b/net/mac80211/rc80211_pid.h
index 6510f8ee738e..19111c7bf454 100644
--- a/net/mac80211/rc80211_pid.h
+++ b/net/mac80211/rc80211_pid.h
@@ -77,7 +77,7 @@ union rc_pid_event_data {
77}; 77};
78 78
79struct rc_pid_event { 79struct rc_pid_event {
80 /* The time when the event occured */ 80 /* The time when the event occurred */
81 unsigned long timestamp; 81 unsigned long timestamp;
82 82
83 /* Event ID number */ 83 /* Event ID number */
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 1f06b31e21c1..a864890e4d03 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -382,7 +382,7 @@ static void ieee80211_parse_qos(struct ieee80211_rx_data *rx)
382 * specs were sane enough this time around to require padding each A-MSDU 382 * specs were sane enough this time around to require padding each A-MSDU
383 * subframe to a length that is a multiple of four. 383 * subframe to a length that is a multiple of four.
384 * 384 *
385 * Padding like Atheros hardware adds which is inbetween the 802.11 header and 385 * Padding like Atheros hardware adds which is between the 802.11 header and
386 * the payload is not supported, the driver is required to move the 802.11 386 * the payload is not supported, the driver is required to move the 802.11
387 * header to be directly in front of the payload in that case. 387 * header to be directly in front of the payload in that case.
388 */ 388 */
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 7c5c6da01bea..a03d8a312875 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -47,9 +47,9 @@
47 * Station entries are added by mac80211 when you establish a link with a 47 * Station entries are added by mac80211 when you establish a link with a
48 * peer. This means different things for the different type of interfaces 48 * peer. This means different things for the different type of interfaces
49 * we support. For a regular station this mean we add the AP sta when we 49 * we support. For a regular station this mean we add the AP sta when we
50 * receive an assocation response from the AP. For IBSS this occurs when 50 * receive an association response from the AP. For IBSS this occurs when
51 * get to know about a peer on the same IBSS. For WDS we add the sta for 51 * get to know about a peer on the same IBSS. For WDS we add the sta for
52 * the peer imediately upon device open. When using AP mode we add stations 52 * the peer immediately upon device open. When using AP mode we add stations
53 * for each respective station upon request from userspace through nl80211. 53 * for each respective station upon request from userspace through nl80211.
54 * 54 *
55 * In order to remove a STA info structure, various sta_info_destroy_*() 55 * In order to remove a STA info structure, various sta_info_destroy_*()
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index af1a7f8c8675..aa0adcbf3a93 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -176,7 +176,7 @@ struct sta_ampdu_mlme {
176/** 176/**
177 * enum plink_state - state of a mesh peer link finite state machine 177 * enum plink_state - state of a mesh peer link finite state machine
178 * 178 *
179 * @PLINK_LISTEN: initial state, considered the implicit state of non existant 179 * @PLINK_LISTEN: initial state, considered the implicit state of non existent
180 * mesh peer links 180 * mesh peer links
181 * @PLINK_OPN_SNT: mesh plink open frame has been sent to this mesh peer 181 * @PLINK_OPN_SNT: mesh plink open frame has been sent to this mesh peer
182 * @PLINK_OPN_RCVD: mesh plink open frame has been received from this mesh peer 182 * @PLINK_OPN_RCVD: mesh plink open frame has been received from this mesh peer
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index c3f988aa1152..32bff6d86cb2 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -652,7 +652,6 @@ comment "Xtables matches"
652config NETFILTER_XT_MATCH_ADDRTYPE 652config NETFILTER_XT_MATCH_ADDRTYPE
653 tristate '"addrtype" address type match support' 653 tristate '"addrtype" address type match support'
654 depends on NETFILTER_ADVANCED 654 depends on NETFILTER_ADVANCED
655 depends on (IPV6 || IPV6=n)
656 ---help--- 655 ---help---
657 This option allows you to match what routing thinks of an address, 656 This option allows you to match what routing thinks of an address,
658 eg. UNICAST, LOCAL, BROADCAST, ... 657 eg. UNICAST, LOCAL, BROADCAST, ...
diff --git a/net/netfilter/ipset/ip_set_bitmap_ip.c b/net/netfilter/ipset/ip_set_bitmap_ip.c
index bca96990218d..a113ff066928 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ip.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ip.c
@@ -338,8 +338,7 @@ bitmap_ip_head(struct ip_set *set, struct sk_buff *skb)
338 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip)); 338 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip));
339 if (map->netmask != 32) 339 if (map->netmask != 32)
340 NLA_PUT_U8(skb, IPSET_ATTR_NETMASK, map->netmask); 340 NLA_PUT_U8(skb, IPSET_ATTR_NETMASK, map->netmask);
341 NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, 341 NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1));
342 htonl(atomic_read(&set->ref) - 1));
343 NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, 342 NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
344 htonl(sizeof(*map) + map->memsize)); 343 htonl(sizeof(*map) + map->memsize));
345 if (with_timeout(map->timeout)) 344 if (with_timeout(map->timeout))
diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
index 5e790172deff..00a33242e90c 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ipmac.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
@@ -434,8 +434,7 @@ bitmap_ipmac_head(struct ip_set *set, struct sk_buff *skb)
434 goto nla_put_failure; 434 goto nla_put_failure;
435 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, htonl(map->first_ip)); 435 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, htonl(map->first_ip));
436 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip)); 436 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip));
437 NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, 437 NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1));
438 htonl(atomic_read(&set->ref) - 1));
439 NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, 438 NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
440 htonl(sizeof(*map) 439 htonl(sizeof(*map)
441 + (map->last_ip - map->first_ip + 1) * map->dsize)); 440 + (map->last_ip - map->first_ip + 1) * map->dsize));
diff --git a/net/netfilter/ipset/ip_set_bitmap_port.c b/net/netfilter/ipset/ip_set_bitmap_port.c
index 165f09b1a9cb..6b38eb8f6ed8 100644
--- a/net/netfilter/ipset/ip_set_bitmap_port.c
+++ b/net/netfilter/ipset/ip_set_bitmap_port.c
@@ -320,8 +320,7 @@ bitmap_port_head(struct ip_set *set, struct sk_buff *skb)
320 goto nla_put_failure; 320 goto nla_put_failure;
321 NLA_PUT_NET16(skb, IPSET_ATTR_PORT, htons(map->first_port)); 321 NLA_PUT_NET16(skb, IPSET_ATTR_PORT, htons(map->first_port));
322 NLA_PUT_NET16(skb, IPSET_ATTR_PORT_TO, htons(map->last_port)); 322 NLA_PUT_NET16(skb, IPSET_ATTR_PORT_TO, htons(map->last_port));
323 NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, 323 NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1));
324 htonl(atomic_read(&set->ref) - 1));
325 NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, 324 NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
326 htonl(sizeof(*map) + map->memsize)); 325 htonl(sizeof(*map) + map->memsize));
327 if (with_timeout(map->timeout)) 326 if (with_timeout(map->timeout))
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index d6b48230a540..9152e69a162d 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -26,6 +26,7 @@
26 26
27static LIST_HEAD(ip_set_type_list); /* all registered set types */ 27static LIST_HEAD(ip_set_type_list); /* all registered set types */
28static DEFINE_MUTEX(ip_set_type_mutex); /* protects ip_set_type_list */ 28static DEFINE_MUTEX(ip_set_type_mutex); /* protects ip_set_type_list */
29static DEFINE_RWLOCK(ip_set_ref_lock); /* protects the set refs */
29 30
30static struct ip_set **ip_set_list; /* all individual sets */ 31static struct ip_set **ip_set_list; /* all individual sets */
31static ip_set_id_t ip_set_max = CONFIG_IP_SET_MAX; /* max number of sets */ 32static ip_set_id_t ip_set_max = CONFIG_IP_SET_MAX; /* max number of sets */
@@ -301,13 +302,18 @@ EXPORT_SYMBOL_GPL(ip_set_get_ipaddr6);
301static inline void 302static inline void
302__ip_set_get(ip_set_id_t index) 303__ip_set_get(ip_set_id_t index)
303{ 304{
304 atomic_inc(&ip_set_list[index]->ref); 305 write_lock_bh(&ip_set_ref_lock);
306 ip_set_list[index]->ref++;
307 write_unlock_bh(&ip_set_ref_lock);
305} 308}
306 309
307static inline void 310static inline void
308__ip_set_put(ip_set_id_t index) 311__ip_set_put(ip_set_id_t index)
309{ 312{
310 atomic_dec(&ip_set_list[index]->ref); 313 write_lock_bh(&ip_set_ref_lock);
314 BUG_ON(ip_set_list[index]->ref == 0);
315 ip_set_list[index]->ref--;
316 write_unlock_bh(&ip_set_ref_lock);
311} 317}
312 318
313/* 319/*
@@ -324,7 +330,7 @@ ip_set_test(ip_set_id_t index, const struct sk_buff *skb,
324 struct ip_set *set = ip_set_list[index]; 330 struct ip_set *set = ip_set_list[index];
325 int ret = 0; 331 int ret = 0;
326 332
327 BUG_ON(set == NULL || atomic_read(&set->ref) == 0); 333 BUG_ON(set == NULL);
328 pr_debug("set %s, index %u\n", set->name, index); 334 pr_debug("set %s, index %u\n", set->name, index);
329 335
330 if (dim < set->type->dimension || 336 if (dim < set->type->dimension ||
@@ -356,7 +362,7 @@ ip_set_add(ip_set_id_t index, const struct sk_buff *skb,
356 struct ip_set *set = ip_set_list[index]; 362 struct ip_set *set = ip_set_list[index];
357 int ret; 363 int ret;
358 364
359 BUG_ON(set == NULL || atomic_read(&set->ref) == 0); 365 BUG_ON(set == NULL);
360 pr_debug("set %s, index %u\n", set->name, index); 366 pr_debug("set %s, index %u\n", set->name, index);
361 367
362 if (dim < set->type->dimension || 368 if (dim < set->type->dimension ||
@@ -378,7 +384,7 @@ ip_set_del(ip_set_id_t index, const struct sk_buff *skb,
378 struct ip_set *set = ip_set_list[index]; 384 struct ip_set *set = ip_set_list[index];
379 int ret = 0; 385 int ret = 0;
380 386
381 BUG_ON(set == NULL || atomic_read(&set->ref) == 0); 387 BUG_ON(set == NULL);
382 pr_debug("set %s, index %u\n", set->name, index); 388 pr_debug("set %s, index %u\n", set->name, index);
383 389
384 if (dim < set->type->dimension || 390 if (dim < set->type->dimension ||
@@ -397,7 +403,6 @@ EXPORT_SYMBOL_GPL(ip_set_del);
397 * Find set by name, reference it once. The reference makes sure the 403 * Find set by name, reference it once. The reference makes sure the
398 * thing pointed to, does not go away under our feet. 404 * thing pointed to, does not go away under our feet.
399 * 405 *
400 * The nfnl mutex must already be activated.
401 */ 406 */
402ip_set_id_t 407ip_set_id_t
403ip_set_get_byname(const char *name, struct ip_set **set) 408ip_set_get_byname(const char *name, struct ip_set **set)
@@ -423,15 +428,12 @@ EXPORT_SYMBOL_GPL(ip_set_get_byname);
423 * reference count by 1. The caller shall not assume the index 428 * reference count by 1. The caller shall not assume the index
424 * to be valid, after calling this function. 429 * to be valid, after calling this function.
425 * 430 *
426 * The nfnl mutex must already be activated.
427 */ 431 */
428void 432void
429ip_set_put_byindex(ip_set_id_t index) 433ip_set_put_byindex(ip_set_id_t index)
430{ 434{
431 if (ip_set_list[index] != NULL) { 435 if (ip_set_list[index] != NULL)
432 BUG_ON(atomic_read(&ip_set_list[index]->ref) == 0);
433 __ip_set_put(index); 436 __ip_set_put(index);
434 }
435} 437}
436EXPORT_SYMBOL_GPL(ip_set_put_byindex); 438EXPORT_SYMBOL_GPL(ip_set_put_byindex);
437 439
@@ -441,7 +443,6 @@ EXPORT_SYMBOL_GPL(ip_set_put_byindex);
441 * can't be destroyed. The set cannot be renamed due to 443 * can't be destroyed. The set cannot be renamed due to
442 * the referencing either. 444 * the referencing either.
443 * 445 *
444 * The nfnl mutex must already be activated.
445 */ 446 */
446const char * 447const char *
447ip_set_name_byindex(ip_set_id_t index) 448ip_set_name_byindex(ip_set_id_t index)
@@ -449,7 +450,7 @@ ip_set_name_byindex(ip_set_id_t index)
449 const struct ip_set *set = ip_set_list[index]; 450 const struct ip_set *set = ip_set_list[index];
450 451
451 BUG_ON(set == NULL); 452 BUG_ON(set == NULL);
452 BUG_ON(atomic_read(&set->ref) == 0); 453 BUG_ON(set->ref == 0);
453 454
454 /* Referenced, so it's safe */ 455 /* Referenced, so it's safe */
455 return set->name; 456 return set->name;
@@ -515,10 +516,7 @@ void
515ip_set_nfnl_put(ip_set_id_t index) 516ip_set_nfnl_put(ip_set_id_t index)
516{ 517{
517 nfnl_lock(); 518 nfnl_lock();
518 if (ip_set_list[index] != NULL) { 519 ip_set_put_byindex(index);
519 BUG_ON(atomic_read(&ip_set_list[index]->ref) == 0);
520 __ip_set_put(index);
521 }
522 nfnl_unlock(); 520 nfnl_unlock();
523} 521}
524EXPORT_SYMBOL_GPL(ip_set_nfnl_put); 522EXPORT_SYMBOL_GPL(ip_set_nfnl_put);
@@ -526,7 +524,7 @@ EXPORT_SYMBOL_GPL(ip_set_nfnl_put);
526/* 524/*
527 * Communication protocol with userspace over netlink. 525 * Communication protocol with userspace over netlink.
528 * 526 *
529 * We already locked by nfnl_lock. 527 * The commands are serialized by the nfnl mutex.
530 */ 528 */
531 529
532static inline bool 530static inline bool
@@ -657,7 +655,6 @@ ip_set_create(struct sock *ctnl, struct sk_buff *skb,
657 return -ENOMEM; 655 return -ENOMEM;
658 rwlock_init(&set->lock); 656 rwlock_init(&set->lock);
659 strlcpy(set->name, name, IPSET_MAXNAMELEN); 657 strlcpy(set->name, name, IPSET_MAXNAMELEN);
660 atomic_set(&set->ref, 0);
661 set->family = family; 658 set->family = family;
662 659
663 /* 660 /*
@@ -690,8 +687,8 @@ ip_set_create(struct sock *ctnl, struct sk_buff *skb,
690 687
691 /* 688 /*
692 * Here, we have a valid, constructed set and we are protected 689 * Here, we have a valid, constructed set and we are protected
693 * by nfnl_lock. Find the first free index in ip_set_list and 690 * by the nfnl mutex. Find the first free index in ip_set_list
694 * check clashing. 691 * and check clashing.
695 */ 692 */
696 if ((ret = find_free_id(set->name, &index, &clash)) != 0) { 693 if ((ret = find_free_id(set->name, &index, &clash)) != 0) {
697 /* If this is the same set and requested, ignore error */ 694 /* If this is the same set and requested, ignore error */
@@ -751,31 +748,51 @@ ip_set_destroy(struct sock *ctnl, struct sk_buff *skb,
751 const struct nlattr * const attr[]) 748 const struct nlattr * const attr[])
752{ 749{
753 ip_set_id_t i; 750 ip_set_id_t i;
751 int ret = 0;
754 752
755 if (unlikely(protocol_failed(attr))) 753 if (unlikely(protocol_failed(attr)))
756 return -IPSET_ERR_PROTOCOL; 754 return -IPSET_ERR_PROTOCOL;
757 755
758 /* References are protected by the nfnl mutex */ 756 /* Commands are serialized and references are
757 * protected by the ip_set_ref_lock.
758 * External systems (i.e. xt_set) must call
759 * ip_set_put|get_nfnl_* functions, that way we
760 * can safely check references here.
761 *
762 * list:set timer can only decrement the reference
763 * counter, so if it's already zero, we can proceed
764 * without holding the lock.
765 */
766 read_lock_bh(&ip_set_ref_lock);
759 if (!attr[IPSET_ATTR_SETNAME]) { 767 if (!attr[IPSET_ATTR_SETNAME]) {
760 for (i = 0; i < ip_set_max; i++) { 768 for (i = 0; i < ip_set_max; i++) {
761 if (ip_set_list[i] != NULL && 769 if (ip_set_list[i] != NULL && ip_set_list[i]->ref) {
762 (atomic_read(&ip_set_list[i]->ref))) 770 ret = IPSET_ERR_BUSY;
763 return -IPSET_ERR_BUSY; 771 goto out;
772 }
764 } 773 }
774 read_unlock_bh(&ip_set_ref_lock);
765 for (i = 0; i < ip_set_max; i++) { 775 for (i = 0; i < ip_set_max; i++) {
766 if (ip_set_list[i] != NULL) 776 if (ip_set_list[i] != NULL)
767 ip_set_destroy_set(i); 777 ip_set_destroy_set(i);
768 } 778 }
769 } else { 779 } else {
770 i = find_set_id(nla_data(attr[IPSET_ATTR_SETNAME])); 780 i = find_set_id(nla_data(attr[IPSET_ATTR_SETNAME]));
771 if (i == IPSET_INVALID_ID) 781 if (i == IPSET_INVALID_ID) {
772 return -ENOENT; 782 ret = -ENOENT;
773 else if (atomic_read(&ip_set_list[i]->ref)) 783 goto out;
774 return -IPSET_ERR_BUSY; 784 } else if (ip_set_list[i]->ref) {
785 ret = -IPSET_ERR_BUSY;
786 goto out;
787 }
788 read_unlock_bh(&ip_set_ref_lock);
775 789
776 ip_set_destroy_set(i); 790 ip_set_destroy_set(i);
777 } 791 }
778 return 0; 792 return 0;
793out:
794 read_unlock_bh(&ip_set_ref_lock);
795 return ret;
779} 796}
780 797
781/* Flush sets */ 798/* Flush sets */
@@ -834,6 +851,7 @@ ip_set_rename(struct sock *ctnl, struct sk_buff *skb,
834 struct ip_set *set; 851 struct ip_set *set;
835 const char *name2; 852 const char *name2;
836 ip_set_id_t i; 853 ip_set_id_t i;
854 int ret = 0;
837 855
838 if (unlikely(protocol_failed(attr) || 856 if (unlikely(protocol_failed(attr) ||
839 attr[IPSET_ATTR_SETNAME] == NULL || 857 attr[IPSET_ATTR_SETNAME] == NULL ||
@@ -843,25 +861,33 @@ ip_set_rename(struct sock *ctnl, struct sk_buff *skb,
843 set = find_set(nla_data(attr[IPSET_ATTR_SETNAME])); 861 set = find_set(nla_data(attr[IPSET_ATTR_SETNAME]));
844 if (set == NULL) 862 if (set == NULL)
845 return -ENOENT; 863 return -ENOENT;
846 if (atomic_read(&set->ref) != 0) 864
847 return -IPSET_ERR_REFERENCED; 865 read_lock_bh(&ip_set_ref_lock);
866 if (set->ref != 0) {
867 ret = -IPSET_ERR_REFERENCED;
868 goto out;
869 }
848 870
849 name2 = nla_data(attr[IPSET_ATTR_SETNAME2]); 871 name2 = nla_data(attr[IPSET_ATTR_SETNAME2]);
850 for (i = 0; i < ip_set_max; i++) { 872 for (i = 0; i < ip_set_max; i++) {
851 if (ip_set_list[i] != NULL && 873 if (ip_set_list[i] != NULL &&
852 STREQ(ip_set_list[i]->name, name2)) 874 STREQ(ip_set_list[i]->name, name2)) {
853 return -IPSET_ERR_EXIST_SETNAME2; 875 ret = -IPSET_ERR_EXIST_SETNAME2;
876 goto out;
877 }
854 } 878 }
855 strncpy(set->name, name2, IPSET_MAXNAMELEN); 879 strncpy(set->name, name2, IPSET_MAXNAMELEN);
856 880
857 return 0; 881out:
882 read_unlock_bh(&ip_set_ref_lock);
883 return ret;
858} 884}
859 885
860/* Swap two sets so that name/index points to the other. 886/* Swap two sets so that name/index points to the other.
861 * References and set names are also swapped. 887 * References and set names are also swapped.
862 * 888 *
863 * We are protected by the nfnl mutex and references are 889 * The commands are serialized by the nfnl mutex and references are
864 * manipulated only by holding the mutex. The kernel interfaces 890 * protected by the ip_set_ref_lock. The kernel interfaces
865 * do not hold the mutex but the pointer settings are atomic 891 * do not hold the mutex but the pointer settings are atomic
866 * so the ip_set_list always contains valid pointers to the sets. 892 * so the ip_set_list always contains valid pointers to the sets.
867 */ 893 */
@@ -874,7 +900,6 @@ ip_set_swap(struct sock *ctnl, struct sk_buff *skb,
874 struct ip_set *from, *to; 900 struct ip_set *from, *to;
875 ip_set_id_t from_id, to_id; 901 ip_set_id_t from_id, to_id;
876 char from_name[IPSET_MAXNAMELEN]; 902 char from_name[IPSET_MAXNAMELEN];
877 u32 from_ref;
878 903
879 if (unlikely(protocol_failed(attr) || 904 if (unlikely(protocol_failed(attr) ||
880 attr[IPSET_ATTR_SETNAME] == NULL || 905 attr[IPSET_ATTR_SETNAME] == NULL ||
@@ -893,23 +918,21 @@ ip_set_swap(struct sock *ctnl, struct sk_buff *skb,
893 to = ip_set_list[to_id]; 918 to = ip_set_list[to_id];
894 919
895 /* Features must not change. 920 /* Features must not change.
896 * Not an artifical restriction anymore, as we must prevent 921 * Not an artificial restriction anymore, as we must prevent
897 * possible loops created by swapping in setlist type of sets. */ 922 * possible loops created by swapping in setlist type of sets. */
898 if (!(from->type->features == to->type->features && 923 if (!(from->type->features == to->type->features &&
899 from->type->family == to->type->family)) 924 from->type->family == to->type->family))
900 return -IPSET_ERR_TYPE_MISMATCH; 925 return -IPSET_ERR_TYPE_MISMATCH;
901 926
902 /* No magic here: ref munging protected by the nfnl_lock */
903 strncpy(from_name, from->name, IPSET_MAXNAMELEN); 927 strncpy(from_name, from->name, IPSET_MAXNAMELEN);
904 from_ref = atomic_read(&from->ref);
905
906 strncpy(from->name, to->name, IPSET_MAXNAMELEN); 928 strncpy(from->name, to->name, IPSET_MAXNAMELEN);
907 atomic_set(&from->ref, atomic_read(&to->ref));
908 strncpy(to->name, from_name, IPSET_MAXNAMELEN); 929 strncpy(to->name, from_name, IPSET_MAXNAMELEN);
909 atomic_set(&to->ref, from_ref);
910 930
931 write_lock_bh(&ip_set_ref_lock);
932 swap(from->ref, to->ref);
911 ip_set_list[from_id] = to; 933 ip_set_list[from_id] = to;
912 ip_set_list[to_id] = from; 934 ip_set_list[to_id] = from;
935 write_unlock_bh(&ip_set_ref_lock);
913 936
914 return 0; 937 return 0;
915} 938}
@@ -926,7 +949,7 @@ ip_set_dump_done(struct netlink_callback *cb)
926{ 949{
927 if (cb->args[2]) { 950 if (cb->args[2]) {
928 pr_debug("release set %s\n", ip_set_list[cb->args[1]]->name); 951 pr_debug("release set %s\n", ip_set_list[cb->args[1]]->name);
929 __ip_set_put((ip_set_id_t) cb->args[1]); 952 ip_set_put_byindex((ip_set_id_t) cb->args[1]);
930 } 953 }
931 return 0; 954 return 0;
932} 955}
@@ -1068,7 +1091,7 @@ release_refcount:
1068 /* If there was an error or set is done, release set */ 1091 /* If there was an error or set is done, release set */
1069 if (ret || !cb->args[2]) { 1092 if (ret || !cb->args[2]) {
1070 pr_debug("release set %s\n", ip_set_list[index]->name); 1093 pr_debug("release set %s\n", ip_set_list[index]->name);
1071 __ip_set_put(index); 1094 ip_set_put_byindex(index);
1072 } 1095 }
1073 1096
1074 /* If we dump all sets, continue with dumping last ones */ 1097 /* If we dump all sets, continue with dumping last ones */
diff --git a/net/netfilter/ipset/ip_set_getport.c b/net/netfilter/ipset/ip_set_getport.c
index 8d5227212686..757143b2240a 100644
--- a/net/netfilter/ipset/ip_set_getport.c
+++ b/net/netfilter/ipset/ip_set_getport.c
@@ -11,6 +11,7 @@
11#include <linux/skbuff.h> 11#include <linux/skbuff.h>
12#include <linux/icmp.h> 12#include <linux/icmp.h>
13#include <linux/icmpv6.h> 13#include <linux/icmpv6.h>
14#include <linux/sctp.h>
14#include <linux/netfilter_ipv6/ip6_tables.h> 15#include <linux/netfilter_ipv6/ip6_tables.h>
15#include <net/ip.h> 16#include <net/ip.h>
16#include <net/ipv6.h> 17#include <net/ipv6.h>
@@ -35,7 +36,20 @@ get_port(const struct sk_buff *skb, int protocol, unsigned int protooff,
35 *port = src ? th->source : th->dest; 36 *port = src ? th->source : th->dest;
36 break; 37 break;
37 } 38 }
38 case IPPROTO_UDP: { 39 case IPPROTO_SCTP: {
40 sctp_sctphdr_t _sh;
41 const sctp_sctphdr_t *sh;
42
43 sh = skb_header_pointer(skb, protooff, sizeof(_sh), &_sh);
44 if (sh == NULL)
45 /* No choice either */
46 return false;
47
48 *port = src ? sh->source : sh->dest;
49 break;
50 }
51 case IPPROTO_UDP:
52 case IPPROTO_UDPLITE: {
39 struct udphdr _udph; 53 struct udphdr _udph;
40 const struct udphdr *uh; 54 const struct udphdr *uh;
41 55
diff --git a/net/netfilter/ipset/ip_set_hash_ipport.c b/net/netfilter/ipset/ip_set_hash_ipport.c
index b9214145d357..14281b6b8074 100644
--- a/net/netfilter/ipset/ip_set_hash_ipport.c
+++ b/net/netfilter/ipset/ip_set_hash_ipport.c
@@ -491,7 +491,7 @@ static struct ip_set_type hash_ipport_type __read_mostly = {
491 .features = IPSET_TYPE_IP | IPSET_TYPE_PORT, 491 .features = IPSET_TYPE_IP | IPSET_TYPE_PORT,
492 .dimension = IPSET_DIM_TWO, 492 .dimension = IPSET_DIM_TWO,
493 .family = AF_UNSPEC, 493 .family = AF_UNSPEC,
494 .revision = 0, 494 .revision = 1,
495 .create = hash_ipport_create, 495 .create = hash_ipport_create,
496 .create_policy = { 496 .create_policy = {
497 [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 }, 497 [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
diff --git a/net/netfilter/ipset/ip_set_hash_ipportip.c b/net/netfilter/ipset/ip_set_hash_ipportip.c
index 4642872df6e1..401c8a2531db 100644
--- a/net/netfilter/ipset/ip_set_hash_ipportip.c
+++ b/net/netfilter/ipset/ip_set_hash_ipportip.c
@@ -509,7 +509,7 @@ static struct ip_set_type hash_ipportip_type __read_mostly = {
509 .features = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_IP2, 509 .features = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_IP2,
510 .dimension = IPSET_DIM_THREE, 510 .dimension = IPSET_DIM_THREE,
511 .family = AF_UNSPEC, 511 .family = AF_UNSPEC,
512 .revision = 0, 512 .revision = 1,
513 .create = hash_ipportip_create, 513 .create = hash_ipportip_create,
514 .create_policy = { 514 .create_policy = {
515 [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 }, 515 [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
diff --git a/net/netfilter/ipset/ip_set_hash_ipportnet.c b/net/netfilter/ipset/ip_set_hash_ipportnet.c
index 2cb84a54b7ad..4743e5402522 100644
--- a/net/netfilter/ipset/ip_set_hash_ipportnet.c
+++ b/net/netfilter/ipset/ip_set_hash_ipportnet.c
@@ -574,7 +574,7 @@ static struct ip_set_type hash_ipportnet_type __read_mostly = {
574 .features = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_IP2, 574 .features = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_IP2,
575 .dimension = IPSET_DIM_THREE, 575 .dimension = IPSET_DIM_THREE,
576 .family = AF_UNSPEC, 576 .family = AF_UNSPEC,
577 .revision = 0, 577 .revision = 1,
578 .create = hash_ipportnet_create, 578 .create = hash_ipportnet_create,
579 .create_policy = { 579 .create_policy = {
580 [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 }, 580 [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
diff --git a/net/netfilter/ipset/ip_set_hash_netport.c b/net/netfilter/ipset/ip_set_hash_netport.c
index 8598676f2a05..d2a40362dd3a 100644
--- a/net/netfilter/ipset/ip_set_hash_netport.c
+++ b/net/netfilter/ipset/ip_set_hash_netport.c
@@ -526,7 +526,7 @@ static struct ip_set_type hash_netport_type __read_mostly = {
526 .features = IPSET_TYPE_IP | IPSET_TYPE_PORT, 526 .features = IPSET_TYPE_IP | IPSET_TYPE_PORT,
527 .dimension = IPSET_DIM_TWO, 527 .dimension = IPSET_DIM_TWO,
528 .family = AF_UNSPEC, 528 .family = AF_UNSPEC,
529 .revision = 0, 529 .revision = 1,
530 .create = hash_netport_create, 530 .create = hash_netport_create,
531 .create_policy = { 531 .create_policy = {
532 [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 }, 532 [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c
index a47c32982f06..e9159e99fc4b 100644
--- a/net/netfilter/ipset/ip_set_list_set.c
+++ b/net/netfilter/ipset/ip_set_list_set.c
@@ -43,14 +43,19 @@ struct list_set {
43static inline struct set_elem * 43static inline struct set_elem *
44list_set_elem(const struct list_set *map, u32 id) 44list_set_elem(const struct list_set *map, u32 id)
45{ 45{
46 return (struct set_elem *)((char *)map->members + id * map->dsize); 46 return (struct set_elem *)((void *)map->members + id * map->dsize);
47}
48
49static inline struct set_telem *
50list_set_telem(const struct list_set *map, u32 id)
51{
52 return (struct set_telem *)((void *)map->members + id * map->dsize);
47} 53}
48 54
49static inline bool 55static inline bool
50list_set_timeout(const struct list_set *map, u32 id) 56list_set_timeout(const struct list_set *map, u32 id)
51{ 57{
52 const struct set_telem *elem = 58 const struct set_telem *elem = list_set_telem(map, id);
53 (const struct set_telem *) list_set_elem(map, id);
54 59
55 return ip_set_timeout_test(elem->timeout); 60 return ip_set_timeout_test(elem->timeout);
56} 61}
@@ -58,19 +63,11 @@ list_set_timeout(const struct list_set *map, u32 id)
58static inline bool 63static inline bool
59list_set_expired(const struct list_set *map, u32 id) 64list_set_expired(const struct list_set *map, u32 id)
60{ 65{
61 const struct set_telem *elem = 66 const struct set_telem *elem = list_set_telem(map, id);
62 (const struct set_telem *) list_set_elem(map, id);
63 67
64 return ip_set_timeout_expired(elem->timeout); 68 return ip_set_timeout_expired(elem->timeout);
65} 69}
66 70
67static inline int
68list_set_exist(const struct set_telem *elem)
69{
70 return elem->id != IPSET_INVALID_ID &&
71 !ip_set_timeout_expired(elem->timeout);
72}
73
74/* Set list without and with timeout */ 71/* Set list without and with timeout */
75 72
76static int 73static int
@@ -146,11 +143,11 @@ list_elem_tadd(struct list_set *map, u32 i, ip_set_id_t id,
146 struct set_telem *e; 143 struct set_telem *e;
147 144
148 for (; i < map->size; i++) { 145 for (; i < map->size; i++) {
149 e = (struct set_telem *)list_set_elem(map, i); 146 e = list_set_telem(map, i);
150 swap(e->id, id); 147 swap(e->id, id);
148 swap(e->timeout, timeout);
151 if (e->id == IPSET_INVALID_ID) 149 if (e->id == IPSET_INVALID_ID)
152 break; 150 break;
153 swap(e->timeout, timeout);
154 } 151 }
155} 152}
156 153
@@ -164,7 +161,7 @@ list_set_add(struct list_set *map, u32 i, ip_set_id_t id,
164 /* Last element replaced: e.g. add new,before,last */ 161 /* Last element replaced: e.g. add new,before,last */
165 ip_set_put_byindex(e->id); 162 ip_set_put_byindex(e->id);
166 if (with_timeout(map->timeout)) 163 if (with_timeout(map->timeout))
167 list_elem_tadd(map, i, id, timeout); 164 list_elem_tadd(map, i, id, ip_set_timeout_set(timeout));
168 else 165 else
169 list_elem_add(map, i, id); 166 list_elem_add(map, i, id);
170 167
@@ -172,11 +169,11 @@ list_set_add(struct list_set *map, u32 i, ip_set_id_t id,
172} 169}
173 170
174static int 171static int
175list_set_del(struct list_set *map, ip_set_id_t id, u32 i) 172list_set_del(struct list_set *map, u32 i)
176{ 173{
177 struct set_elem *a = list_set_elem(map, i), *b; 174 struct set_elem *a = list_set_elem(map, i), *b;
178 175
179 ip_set_put_byindex(id); 176 ip_set_put_byindex(a->id);
180 177
181 for (; i < map->size - 1; i++) { 178 for (; i < map->size - 1; i++) {
182 b = list_set_elem(map, i + 1); 179 b = list_set_elem(map, i + 1);
@@ -308,11 +305,11 @@ list_set_uadt(struct ip_set *set, struct nlattr *tb[],
308 (before == 0 || 305 (before == 0 ||
309 (before > 0 && 306 (before > 0 &&
310 next_id_eq(map, i, refid)))) 307 next_id_eq(map, i, refid))))
311 ret = list_set_del(map, id, i); 308 ret = list_set_del(map, i);
312 else if (before < 0 && 309 else if (before < 0 &&
313 elem->id == refid && 310 elem->id == refid &&
314 next_id_eq(map, i, id)) 311 next_id_eq(map, i, id))
315 ret = list_set_del(map, id, i + 1); 312 ret = list_set_del(map, i + 1);
316 } 313 }
317 break; 314 break;
318 default: 315 default:
@@ -369,8 +366,7 @@ list_set_head(struct ip_set *set, struct sk_buff *skb)
369 NLA_PUT_NET32(skb, IPSET_ATTR_SIZE, htonl(map->size)); 366 NLA_PUT_NET32(skb, IPSET_ATTR_SIZE, htonl(map->size));
370 if (with_timeout(map->timeout)) 367 if (with_timeout(map->timeout))
371 NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout)); 368 NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout));
372 NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, 369 NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1));
373 htonl(atomic_read(&set->ref) - 1));
374 NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, 370 NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE,
375 htonl(sizeof(*map) + map->size * map->dsize)); 371 htonl(sizeof(*map) + map->size * map->dsize));
376 ipset_nest_end(skb, nested); 372 ipset_nest_end(skb, nested);
@@ -461,16 +457,13 @@ list_set_gc(unsigned long ul_set)
461 struct set_telem *e; 457 struct set_telem *e;
462 u32 i; 458 u32 i;
463 459
464 /* We run parallel with other readers (test element) 460 write_lock_bh(&set->lock);
465 * but adding/deleting new entries is locked out */ 461 for (i = 0; i < map->size; i++) {
466 read_lock_bh(&set->lock); 462 e = list_set_telem(map, i);
467 for (i = map->size - 1; i >= 0; i--) { 463 if (e->id != IPSET_INVALID_ID && list_set_expired(map, i))
468 e = (struct set_telem *) list_set_elem(map, i); 464 list_set_del(map, i);
469 if (e->id != IPSET_INVALID_ID &&
470 list_set_expired(map, i))
471 list_set_del(map, e->id, i);
472 } 465 }
473 read_unlock_bh(&set->lock); 466 write_unlock_bh(&set->lock);
474 467
475 map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ; 468 map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
476 add_timer(&map->gc); 469 add_timer(&map->gc);
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index f289306cbf12..c97bd45975be 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -595,7 +595,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
595 atomic_inc(&dest->inactconns); 595 atomic_inc(&dest->inactconns);
596 } else { 596 } else {
597 /* It is a persistent connection/template, so increase 597 /* It is a persistent connection/template, so increase
598 the peristent connection counter */ 598 the persistent connection counter */
599 atomic_inc(&dest->persistconns); 599 atomic_inc(&dest->persistconns);
600 } 600 }
601 601
@@ -657,7 +657,7 @@ static inline void ip_vs_unbind_dest(struct ip_vs_conn *cp)
657 } 657 }
658 } else { 658 } else {
659 /* It is a persistent connection/template, so decrease 659 /* It is a persistent connection/template, so decrease
660 the peristent connection counter */ 660 the persistent connection counter */
661 atomic_dec(&dest->persistconns); 661 atomic_dec(&dest->persistconns);
662 } 662 }
663 663
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 33733c8872e7..9930f340908a 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -1984,9 +1984,6 @@ static const struct file_operations ip_vs_info_fops = {
1984 .release = seq_release_private, 1984 .release = seq_release_private,
1985}; 1985};
1986 1986
1987#endif
1988
1989#ifdef CONFIG_PROC_FS
1990static int ip_vs_stats_show(struct seq_file *seq, void *v) 1987static int ip_vs_stats_show(struct seq_file *seq, void *v)
1991{ 1988{
1992 struct net *net = seq_file_single_net(seq); 1989 struct net *net = seq_file_single_net(seq);
@@ -3120,7 +3117,7 @@ nla_put_failure:
3120static int ip_vs_genl_dump_daemons(struct sk_buff *skb, 3117static int ip_vs_genl_dump_daemons(struct sk_buff *skb,
3121 struct netlink_callback *cb) 3118 struct netlink_callback *cb)
3122{ 3119{
3123 struct net *net = skb_net(skb); 3120 struct net *net = skb_sknet(skb);
3124 struct netns_ipvs *ipvs = net_ipvs(net); 3121 struct netns_ipvs *ipvs = net_ipvs(net);
3125 3122
3126 mutex_lock(&__ip_vs_mutex); 3123 mutex_lock(&__ip_vs_mutex);
diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
index f276df9896b3..87e40ea77a95 100644
--- a/net/netfilter/ipvs/ip_vs_lblc.c
+++ b/net/netfilter/ipvs/ip_vs_lblc.c
@@ -131,7 +131,7 @@ static inline void ip_vs_lblc_free(struct ip_vs_lblc_entry *en)
131{ 131{
132 list_del(&en->list); 132 list_del(&en->list);
133 /* 133 /*
134 * We don't kfree dest because it is refered either by its service 134 * We don't kfree dest because it is referred either by its service
135 * or the trash dest list. 135 * or the trash dest list.
136 */ 136 */
137 atomic_dec(&en->dest->refcnt); 137 atomic_dec(&en->dest->refcnt);
diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
index cb1c9913d38b..90f618ab6dda 100644
--- a/net/netfilter/ipvs/ip_vs_lblcr.c
+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
@@ -152,7 +152,7 @@ static void ip_vs_dest_set_eraseall(struct ip_vs_dest_set *set)
152 write_lock(&set->lock); 152 write_lock(&set->lock);
153 list_for_each_entry_safe(e, ep, &set->list, list) { 153 list_for_each_entry_safe(e, ep, &set->list, list) {
154 /* 154 /*
155 * We don't kfree dest because it is refered either 155 * We don't kfree dest because it is referred either
156 * by its service or by the trash dest list. 156 * by its service or by the trash dest list.
157 */ 157 */
158 atomic_dec(&e->dest->refcnt); 158 atomic_dec(&e->dest->refcnt);
diff --git a/net/netfilter/ipvs/ip_vs_proto_sctp.c b/net/netfilter/ipvs/ip_vs_proto_sctp.c
index b027ccc49f43..d12ed53ec95f 100644
--- a/net/netfilter/ipvs/ip_vs_proto_sctp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_sctp.c
@@ -566,7 +566,7 @@ static struct ipvs_sctp_nextstate
566 * SHUTDOWN sent from the client, waitinf for SHUT ACK from the server 566 * SHUTDOWN sent from the client, waitinf for SHUT ACK from the server
567 */ 567 */
568 /* 568 /*
569 * We recieved the data chuck, keep the state unchanged. I assume 569 * We received the data chuck, keep the state unchanged. I assume
570 * that still data chuncks can be received by both the peers in 570 * that still data chuncks can be received by both the peers in
571 * SHUDOWN state 571 * SHUDOWN state
572 */ 572 */
@@ -633,7 +633,7 @@ static struct ipvs_sctp_nextstate
633 * SHUTDOWN sent from the server, waitinf for SHUTDOWN ACK from client 633 * SHUTDOWN sent from the server, waitinf for SHUTDOWN ACK from client
634 */ 634 */
635 /* 635 /*
636 * We recieved the data chuck, keep the state unchanged. I assume 636 * We received the data chuck, keep the state unchanged. I assume
637 * that still data chuncks can be received by both the peers in 637 * that still data chuncks can be received by both the peers in
638 * SHUDOWN state 638 * SHUDOWN state
639 */ 639 */
@@ -701,7 +701,7 @@ static struct ipvs_sctp_nextstate
701 * SHUTDOWN ACK from the client, awaiting for SHUTDOWN COM from server 701 * SHUTDOWN ACK from the client, awaiting for SHUTDOWN COM from server
702 */ 702 */
703 /* 703 /*
704 * We recieved the data chuck, keep the state unchanged. I assume 704 * We received the data chuck, keep the state unchanged. I assume
705 * that still data chuncks can be received by both the peers in 705 * that still data chuncks can be received by both the peers in
706 * SHUDOWN state 706 * SHUDOWN state
707 */ 707 */
@@ -771,7 +771,7 @@ static struct ipvs_sctp_nextstate
771 * SHUTDOWN ACK from the server, awaiting for SHUTDOWN COM from client 771 * SHUTDOWN ACK from the server, awaiting for SHUTDOWN COM from client
772 */ 772 */
773 /* 773 /*
774 * We recieved the data chuck, keep the state unchanged. I assume 774 * We received the data chuck, keep the state unchanged. I assume
775 * that still data chuncks can be received by both the peers in 775 * that still data chuncks can be received by both the peers in
776 * SHUDOWN state 776 * SHUDOWN state
777 */ 777 */
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 941286ca911d..2e1c11f78419 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -453,7 +453,7 @@ __nf_conntrack_confirm(struct sk_buff *skb)
453 REJECT will give spurious warnings here. */ 453 REJECT will give spurious warnings here. */
454 /* NF_CT_ASSERT(atomic_read(&ct->ct_general.use) == 1); */ 454 /* NF_CT_ASSERT(atomic_read(&ct->ct_general.use) == 1); */
455 455
456 /* No external references means noone else could have 456 /* No external references means no one else could have
457 confirmed us. */ 457 confirmed us. */
458 NF_CT_ASSERT(!nf_ct_is_confirmed(ct)); 458 NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
459 pr_debug("Confirming conntrack %p\n", ct); 459 pr_debug("Confirming conntrack %p\n", ct);
@@ -901,7 +901,7 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
901 ret = l3proto->get_l4proto(skb, skb_network_offset(skb), 901 ret = l3proto->get_l4proto(skb, skb_network_offset(skb),
902 &dataoff, &protonum); 902 &dataoff, &protonum);
903 if (ret <= 0) { 903 if (ret <= 0) {
904 pr_debug("not prepared to track yet or error occured\n"); 904 pr_debug("not prepared to track yet or error occurred\n");
905 NF_CT_STAT_INC_ATOMIC(net, error); 905 NF_CT_STAT_INC_ATOMIC(net, error);
906 NF_CT_STAT_INC_ATOMIC(net, invalid); 906 NF_CT_STAT_INC_ATOMIC(net, invalid);
907 ret = -ret; 907 ret = -ret;
diff --git a/net/netfilter/nf_conntrack_h323_asn1.c b/net/netfilter/nf_conntrack_h323_asn1.c
index 867882313e49..bcd5ed6b7130 100644
--- a/net/netfilter/nf_conntrack_h323_asn1.c
+++ b/net/netfilter/nf_conntrack_h323_asn1.c
@@ -631,7 +631,7 @@ static int decode_seqof(bitstr_t *bs, const struct field_t *f,
631 CHECK_BOUND(bs, 2); 631 CHECK_BOUND(bs, 2);
632 count = *bs->cur++; 632 count = *bs->cur++;
633 count <<= 8; 633 count <<= 8;
634 count = *bs->cur++; 634 count += *bs->cur++;
635 break; 635 break;
636 case SEMI: 636 case SEMI:
637 BYTE_ALIGN(bs); 637 BYTE_ALIGN(bs);
diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c
index 533a183e6661..18b2ce5c8ced 100644
--- a/net/netfilter/nf_conntrack_h323_main.c
+++ b/net/netfilter/nf_conntrack_h323_main.c
@@ -731,10 +731,10 @@ static int callforward_do_filter(const union nf_inet_addr *src,
731 731
732 memset(&fl2, 0, sizeof(fl2)); 732 memset(&fl2, 0, sizeof(fl2));
733 fl2.daddr = dst->ip; 733 fl2.daddr = dst->ip;
734 if (!afinfo->route((struct dst_entry **)&rt1, 734 if (!afinfo->route(&init_net, (struct dst_entry **)&rt1,
735 flowi4_to_flowi(&fl1))) { 735 flowi4_to_flowi(&fl1), false)) {
736 if (!afinfo->route((struct dst_entry **)&rt2, 736 if (!afinfo->route(&init_net, (struct dst_entry **)&rt2,
737 flowi4_to_flowi(&fl2))) { 737 flowi4_to_flowi(&fl2), false)) {
738 if (rt1->rt_gateway == rt2->rt_gateway && 738 if (rt1->rt_gateway == rt2->rt_gateway &&
739 rt1->dst.dev == rt2->dst.dev) 739 rt1->dst.dev == rt2->dst.dev)
740 ret = 1; 740 ret = 1;
@@ -755,10 +755,10 @@ static int callforward_do_filter(const union nf_inet_addr *src,
755 755
756 memset(&fl2, 0, sizeof(fl2)); 756 memset(&fl2, 0, sizeof(fl2));
757 ipv6_addr_copy(&fl2.daddr, &dst->in6); 757 ipv6_addr_copy(&fl2.daddr, &dst->in6);
758 if (!afinfo->route((struct dst_entry **)&rt1, 758 if (!afinfo->route(&init_net, (struct dst_entry **)&rt1,
759 flowi6_to_flowi(&fl1))) { 759 flowi6_to_flowi(&fl1), false)) {
760 if (!afinfo->route((struct dst_entry **)&rt2, 760 if (!afinfo->route(&init_net, (struct dst_entry **)&rt2,
761 flowi6_to_flowi(&fl2))) { 761 flowi6_to_flowi(&fl2), false)) {
762 if (!memcmp(&rt1->rt6i_gateway, &rt2->rt6i_gateway, 762 if (!memcmp(&rt1->rt6i_gateway, &rt2->rt6i_gateway,
763 sizeof(rt1->rt6i_gateway)) && 763 sizeof(rt1->rt6i_gateway)) &&
764 rt1->dst.dev == rt2->dst.dev) 764 rt1->dst.dev == rt2->dst.dev)
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
index 9ae57c57c50e..2e664a69d7db 100644
--- a/net/netfilter/nf_conntrack_proto_dccp.c
+++ b/net/netfilter/nf_conntrack_proto_dccp.c
@@ -98,7 +98,7 @@ static const char * const dccp_state_names[] = {
98#define sIV CT_DCCP_INVALID 98#define sIV CT_DCCP_INVALID
99 99
100/* 100/*
101 * DCCP state transistion table 101 * DCCP state transition table
102 * 102 *
103 * The assumption is the same as for TCP tracking: 103 * The assumption is the same as for TCP tracking:
104 * 104 *
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index 6f4ee70f460b..6772b1154654 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -107,9 +107,9 @@ static const u8 sctp_conntracks[2][9][SCTP_CONNTRACK_MAX] = {
107/* abort */ {sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL}, 107/* abort */ {sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL},
108/* shutdown */ {sCL, sCL, sCW, sCE, sSS, sSS, sSR, sSA}, 108/* shutdown */ {sCL, sCL, sCW, sCE, sSS, sSS, sSR, sSA},
109/* shutdown_ack */ {sSA, sCL, sCW, sCE, sES, sSA, sSA, sSA}, 109/* shutdown_ack */ {sSA, sCL, sCW, sCE, sES, sSA, sSA, sSA},
110/* error */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA},/* Cant have Stale cookie*/ 110/* error */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA},/* Can't have Stale cookie*/
111/* cookie_echo */ {sCL, sCL, sCE, sCE, sES, sSS, sSR, sSA},/* 5.2.4 - Big TODO */ 111/* cookie_echo */ {sCL, sCL, sCE, sCE, sES, sSS, sSR, sSA},/* 5.2.4 - Big TODO */
112/* cookie_ack */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA},/* Cant come in orig dir */ 112/* cookie_ack */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA},/* Can't come in orig dir */
113/* shutdown_comp*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sCL} 113/* shutdown_comp*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sCL}
114 }, 114 },
115 { 115 {
@@ -121,7 +121,7 @@ static const u8 sctp_conntracks[2][9][SCTP_CONNTRACK_MAX] = {
121/* shutdown */ {sIV, sCL, sCW, sCE, sSR, sSS, sSR, sSA}, 121/* shutdown */ {sIV, sCL, sCW, sCE, sSR, sSS, sSR, sSA},
122/* shutdown_ack */ {sIV, sCL, sCW, sCE, sES, sSA, sSA, sSA}, 122/* shutdown_ack */ {sIV, sCL, sCW, sCE, sES, sSA, sSA, sSA},
123/* error */ {sIV, sCL, sCW, sCL, sES, sSS, sSR, sSA}, 123/* error */ {sIV, sCL, sCW, sCL, sES, sSS, sSR, sSA},
124/* cookie_echo */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA},/* Cant come in reply dir */ 124/* cookie_echo */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA},/* Can't come in reply dir */
125/* cookie_ack */ {sIV, sCL, sCW, sES, sES, sSS, sSR, sSA}, 125/* cookie_ack */ {sIV, sCL, sCW, sES, sES, sSS, sSR, sSA},
126/* shutdown_comp*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sCL} 126/* shutdown_comp*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sCL}
127 } 127 }
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
index bcf47eb518ef..237cc1981b89 100644
--- a/net/netfilter/nf_conntrack_sip.c
+++ b/net/netfilter/nf_conntrack_sip.c
@@ -707,7 +707,7 @@ static const char *ct_sdp_header_search(const char *dptr, const char *limit,
707} 707}
708 708
709/* Locate a SDP header (optionally a substring within the header value), 709/* Locate a SDP header (optionally a substring within the header value),
710 * optionally stopping at the first occurence of the term header, parse 710 * optionally stopping at the first occurrence of the term header, parse
711 * it and return the offset and length of the data we're interested in. 711 * it and return the offset and length of the data we're interested in.
712 */ 712 */
713int ct_sip_get_sdp_header(const struct nf_conn *ct, const char *dptr, 713int ct_sip_get_sdp_header(const struct nf_conn *ct, const char *dptr,
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index 0ae142825881..05e9feb101c3 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -245,7 +245,7 @@ static int ct_seq_show(struct seq_file *s, void *v)
245 ret = 0; 245 ret = 0;
246release: 246release:
247 nf_ct_put(ct); 247 nf_ct_put(ct);
248 return 0; 248 return ret;
249} 249}
250 250
251static const struct seq_operations ct_seq_ops = { 251static const struct seq_operations ct_seq_ops = {
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
index 5ab22e2bbd7d..5b466cd1272f 100644
--- a/net/netfilter/nf_queue.c
+++ b/net/netfilter/nf_queue.c
@@ -134,7 +134,7 @@ static int __nf_queue(struct sk_buff *skb,
134 const struct nf_afinfo *afinfo; 134 const struct nf_afinfo *afinfo;
135 const struct nf_queue_handler *qh; 135 const struct nf_queue_handler *qh;
136 136
137 /* QUEUE == DROP if noone is waiting, to be safe. */ 137 /* QUEUE == DROP if no one is waiting, to be safe. */
138 rcu_read_lock(); 138 rcu_read_lock();
139 139
140 qh = rcu_dereference(queue_handler[pf]); 140 qh = rcu_dereference(queue_handler[pf]);
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 985e9b76c916..e0ee010935e7 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -381,7 +381,6 @@ __build_packet_message(struct nfulnl_instance *inst,
381 struct nfulnl_msg_packet_hdr pmsg; 381 struct nfulnl_msg_packet_hdr pmsg;
382 struct nlmsghdr *nlh; 382 struct nlmsghdr *nlh;
383 struct nfgenmsg *nfmsg; 383 struct nfgenmsg *nfmsg;
384 __be32 tmp_uint;
385 sk_buff_data_t old_tail = inst->skb->tail; 384 sk_buff_data_t old_tail = inst->skb->tail;
386 385
387 nlh = NLMSG_PUT(inst->skb, 0, 0, 386 nlh = NLMSG_PUT(inst->skb, 0, 0,
@@ -428,7 +427,6 @@ __build_packet_message(struct nfulnl_instance *inst,
428 } 427 }
429 428
430 if (outdev) { 429 if (outdev) {
431 tmp_uint = htonl(outdev->ifindex);
432#ifndef CONFIG_BRIDGE_NETFILTER 430#ifndef CONFIG_BRIDGE_NETFILTER
433 NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_OUTDEV, 431 NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_OUTDEV,
434 htonl(outdev->ifindex)); 432 htonl(outdev->ifindex));
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index a9adf4c6b299..52959efca858 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -762,8 +762,8 @@ void xt_compat_unlock(u_int8_t af)
762EXPORT_SYMBOL_GPL(xt_compat_unlock); 762EXPORT_SYMBOL_GPL(xt_compat_unlock);
763#endif 763#endif
764 764
765DEFINE_PER_CPU(struct xt_info_lock, xt_info_locks); 765DEFINE_PER_CPU(seqcount_t, xt_recseq);
766EXPORT_PER_CPU_SYMBOL_GPL(xt_info_locks); 766EXPORT_PER_CPU_SYMBOL_GPL(xt_recseq);
767 767
768static int xt_jumpstack_alloc(struct xt_table_info *i) 768static int xt_jumpstack_alloc(struct xt_table_info *i)
769{ 769{
@@ -1362,10 +1362,7 @@ static int __init xt_init(void)
1362 int rv; 1362 int rv;
1363 1363
1364 for_each_possible_cpu(i) { 1364 for_each_possible_cpu(i) {
1365 struct xt_info_lock *lock = &per_cpu(xt_info_locks, i); 1365 seqcount_init(&per_cpu(xt_recseq, i));
1366
1367 seqlock_init(&lock->lock);
1368 lock->readers = 0;
1369 } 1366 }
1370 1367
1371 xt = kmalloc(sizeof(struct xt_af) * NFPROTO_NUMPROTO, GFP_KERNEL); 1368 xt = kmalloc(sizeof(struct xt_af) * NFPROTO_NUMPROTO, GFP_KERNEL);
diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
index 6e6b46cb1db9..9e63b43faeed 100644
--- a/net/netfilter/xt_TCPMSS.c
+++ b/net/netfilter/xt_TCPMSS.c
@@ -166,7 +166,7 @@ static u_int32_t tcpmss_reverse_mtu(const struct sk_buff *skb,
166 rcu_read_lock(); 166 rcu_read_lock();
167 ai = nf_get_afinfo(family); 167 ai = nf_get_afinfo(family);
168 if (ai != NULL) 168 if (ai != NULL)
169 ai->route((struct dst_entry **)&rt, &fl); 169 ai->route(&init_net, (struct dst_entry **)&rt, &fl, false);
170 rcu_read_unlock(); 170 rcu_read_unlock();
171 171
172 if (rt != NULL) { 172 if (rt != NULL) {
diff --git a/net/netfilter/xt_addrtype.c b/net/netfilter/xt_addrtype.c
index 2220b85e9519..b77d383cec78 100644
--- a/net/netfilter/xt_addrtype.c
+++ b/net/netfilter/xt_addrtype.c
@@ -32,11 +32,32 @@ MODULE_ALIAS("ipt_addrtype");
32MODULE_ALIAS("ip6t_addrtype"); 32MODULE_ALIAS("ip6t_addrtype");
33 33
34#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE) 34#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
35static u32 xt_addrtype_rt6_to_type(const struct rt6_info *rt) 35static u32 match_lookup_rt6(struct net *net, const struct net_device *dev,
36 const struct in6_addr *addr)
36{ 37{
38 const struct nf_afinfo *afinfo;
39 struct flowi6 flow;
40 struct rt6_info *rt;
37 u32 ret; 41 u32 ret;
42 int route_err;
38 43
39 if (!rt) 44 memset(&flow, 0, sizeof(flow));
45 ipv6_addr_copy(&flow.daddr, addr);
46 if (dev)
47 flow.flowi6_oif = dev->ifindex;
48
49 rcu_read_lock();
50
51 afinfo = nf_get_afinfo(NFPROTO_IPV6);
52 if (afinfo != NULL)
53 route_err = afinfo->route(net, (struct dst_entry **)&rt,
54 flowi6_to_flowi(&flow), !!dev);
55 else
56 route_err = 1;
57
58 rcu_read_unlock();
59
60 if (route_err)
40 return XT_ADDRTYPE_UNREACHABLE; 61 return XT_ADDRTYPE_UNREACHABLE;
41 62
42 if (rt->rt6i_flags & RTF_REJECT) 63 if (rt->rt6i_flags & RTF_REJECT)
@@ -48,6 +69,9 @@ static u32 xt_addrtype_rt6_to_type(const struct rt6_info *rt)
48 ret |= XT_ADDRTYPE_LOCAL; 69 ret |= XT_ADDRTYPE_LOCAL;
49 if (rt->rt6i_flags & RTF_ANYCAST) 70 if (rt->rt6i_flags & RTF_ANYCAST)
50 ret |= XT_ADDRTYPE_ANYCAST; 71 ret |= XT_ADDRTYPE_ANYCAST;
72
73
74 dst_release(&rt->dst);
51 return ret; 75 return ret;
52} 76}
53 77
@@ -65,18 +89,8 @@ static bool match_type6(struct net *net, const struct net_device *dev,
65 return false; 89 return false;
66 90
67 if ((XT_ADDRTYPE_LOCAL | XT_ADDRTYPE_ANYCAST | 91 if ((XT_ADDRTYPE_LOCAL | XT_ADDRTYPE_ANYCAST |
68 XT_ADDRTYPE_UNREACHABLE) & mask) { 92 XT_ADDRTYPE_UNREACHABLE) & mask)
69 struct rt6_info *rt; 93 return !!(mask & match_lookup_rt6(net, dev, addr));
70 u32 type;
71 int ifindex = dev ? dev->ifindex : 0;
72
73 rt = rt6_lookup(net, addr, NULL, ifindex, !!dev);
74
75 type = xt_addrtype_rt6_to_type(rt);
76
77 dst_release(&rt->dst);
78 return !!(mask & type);
79 }
80 return true; 94 return true;
81} 95}
82 96
diff --git a/net/netfilter/xt_conntrack.c b/net/netfilter/xt_conntrack.c
index 2c0086a4751e..481a86fdc409 100644
--- a/net/netfilter/xt_conntrack.c
+++ b/net/netfilter/xt_conntrack.c
@@ -195,7 +195,7 @@ conntrack_mt(const struct sk_buff *skb, struct xt_action_param *par,
195 return info->match_flags & XT_CONNTRACK_STATE; 195 return info->match_flags & XT_CONNTRACK_STATE;
196 if ((info->match_flags & XT_CONNTRACK_DIRECTION) && 196 if ((info->match_flags & XT_CONNTRACK_DIRECTION) &&
197 (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) ^ 197 (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) ^
198 !!(info->invert_flags & XT_CONNTRACK_DIRECTION)) 198 !(info->invert_flags & XT_CONNTRACK_DIRECTION))
199 return false; 199 return false;
200 200
201 if (info->match_flags & XT_CONNTRACK_ORIGSRC) 201 if (info->match_flags & XT_CONNTRACK_ORIGSRC)
diff --git a/net/netlabel/netlabel_cipso_v4.c b/net/netlabel/netlabel_cipso_v4.c
index 5f14c8462e30..bae5756b1626 100644
--- a/net/netlabel/netlabel_cipso_v4.c
+++ b/net/netlabel/netlabel_cipso_v4.c
@@ -422,7 +422,6 @@ static int netlbl_cipsov4_add(struct sk_buff *skb, struct genl_info *info)
422 422
423{ 423{
424 int ret_val = -EINVAL; 424 int ret_val = -EINVAL;
425 const char *type_str = "(unknown)";
426 struct netlbl_audit audit_info; 425 struct netlbl_audit audit_info;
427 426
428 if (!info->attrs[NLBL_CIPSOV4_A_DOI] || 427 if (!info->attrs[NLBL_CIPSOV4_A_DOI] ||
@@ -432,15 +431,12 @@ static int netlbl_cipsov4_add(struct sk_buff *skb, struct genl_info *info)
432 netlbl_netlink_auditinfo(skb, &audit_info); 431 netlbl_netlink_auditinfo(skb, &audit_info);
433 switch (nla_get_u32(info->attrs[NLBL_CIPSOV4_A_MTYPE])) { 432 switch (nla_get_u32(info->attrs[NLBL_CIPSOV4_A_MTYPE])) {
434 case CIPSO_V4_MAP_TRANS: 433 case CIPSO_V4_MAP_TRANS:
435 type_str = "trans";
436 ret_val = netlbl_cipsov4_add_std(info, &audit_info); 434 ret_val = netlbl_cipsov4_add_std(info, &audit_info);
437 break; 435 break;
438 case CIPSO_V4_MAP_PASS: 436 case CIPSO_V4_MAP_PASS:
439 type_str = "pass";
440 ret_val = netlbl_cipsov4_add_pass(info, &audit_info); 437 ret_val = netlbl_cipsov4_add_pass(info, &audit_info);
441 break; 438 break;
442 case CIPSO_V4_MAP_LOCAL: 439 case CIPSO_V4_MAP_LOCAL:
443 type_str = "local";
444 ret_val = netlbl_cipsov4_add_local(info, &audit_info); 440 ret_val = netlbl_cipsov4_add_local(info, &audit_info);
445 break; 441 break;
446 } 442 }
diff --git a/net/netlabel/netlabel_domainhash.c b/net/netlabel/netlabel_domainhash.c
index d37b7f80fa37..de0d8e4cbfb6 100644
--- a/net/netlabel/netlabel_domainhash.c
+++ b/net/netlabel/netlabel_domainhash.c
@@ -109,7 +109,7 @@ static void netlbl_domhsh_free_entry(struct rcu_head *entry)
109 * 109 *
110 * Description: 110 * Description:
111 * This is the hashing function for the domain hash table, it returns the 111 * This is the hashing function for the domain hash table, it returns the
112 * correct bucket number for the domain. The caller is responsibile for 112 * correct bucket number for the domain. The caller is responsible for
113 * ensuring that the hash table is protected with either a RCU read lock or the 113 * ensuring that the hash table is protected with either a RCU read lock or the
114 * hash table lock. 114 * hash table lock.
115 * 115 *
@@ -134,7 +134,7 @@ static u32 netlbl_domhsh_hash(const char *key)
134 * 134 *
135 * Description: 135 * Description:
136 * Searches the domain hash table and returns a pointer to the hash table 136 * Searches the domain hash table and returns a pointer to the hash table
137 * entry if found, otherwise NULL is returned. The caller is responsibile for 137 * entry if found, otherwise NULL is returned. The caller is responsible for
138 * ensuring that the hash table is protected with either a RCU read lock or the 138 * ensuring that the hash table is protected with either a RCU read lock or the
139 * hash table lock. 139 * hash table lock.
140 * 140 *
@@ -165,7 +165,7 @@ static struct netlbl_dom_map *netlbl_domhsh_search(const char *domain)
165 * Searches the domain hash table and returns a pointer to the hash table 165 * Searches the domain hash table and returns a pointer to the hash table
166 * entry if an exact match is found, if an exact match is not present in the 166 * entry if an exact match is found, if an exact match is not present in the
167 * hash table then the default entry is returned if valid otherwise NULL is 167 * hash table then the default entry is returned if valid otherwise NULL is
168 * returned. The caller is responsibile ensuring that the hash table is 168 * returned. The caller is responsible ensuring that the hash table is
169 * protected with either a RCU read lock or the hash table lock. 169 * protected with either a RCU read lock or the hash table lock.
170 * 170 *
171 */ 171 */
@@ -193,7 +193,7 @@ static struct netlbl_dom_map *netlbl_domhsh_search_def(const char *domain)
193 * 193 *
194 * Description: 194 * Description:
195 * Generate an audit record for adding a new NetLabel/LSM mapping entry with 195 * Generate an audit record for adding a new NetLabel/LSM mapping entry with
196 * the given information. Caller is responsibile for holding the necessary 196 * the given information. Caller is responsible for holding the necessary
197 * locks. 197 * locks.
198 * 198 *
199 */ 199 */
@@ -605,7 +605,7 @@ int netlbl_domhsh_remove_default(struct netlbl_audit *audit_info)
605 * 605 *
606 * Description: 606 * Description:
607 * Look through the domain hash table searching for an entry to match @domain, 607 * Look through the domain hash table searching for an entry to match @domain,
608 * return a pointer to a copy of the entry or NULL. The caller is responsibile 608 * return a pointer to a copy of the entry or NULL. The caller is responsible
609 * for ensuring that rcu_read_[un]lock() is called. 609 * for ensuring that rcu_read_[un]lock() is called.
610 * 610 *
611 */ 611 */
diff --git a/net/netlabel/netlabel_mgmt.c b/net/netlabel/netlabel_mgmt.c
index 998e85e895d0..4f251b19fbcc 100644
--- a/net/netlabel/netlabel_mgmt.c
+++ b/net/netlabel/netlabel_mgmt.c
@@ -259,7 +259,7 @@ add_failure:
259 * 259 *
260 * Description: 260 * Description:
261 * This function is a helper function used by the LISTALL and LISTDEF command 261 * This function is a helper function used by the LISTALL and LISTDEF command
262 * handlers. The caller is responsibile for ensuring that the RCU read lock 262 * handlers. The caller is responsible for ensuring that the RCU read lock
263 * is held. Returns zero on success, negative values on failure. 263 * is held. Returns zero on success, negative values on failure.
264 * 264 *
265 */ 265 */
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 06cb02796a0e..732152f718e0 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -591,7 +591,6 @@ static int nr_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
591 return -EINVAL; 591 return -EINVAL;
592 } 592 }
593 if ((dev = nr_dev_get(&addr->fsa_ax25.sax25_call)) == NULL) { 593 if ((dev = nr_dev_get(&addr->fsa_ax25.sax25_call)) == NULL) {
594 SOCK_DEBUG(sk, "NET/ROM: bind failed: invalid node callsign\n");
595 release_sock(sk); 594 release_sock(sk);
596 return -EADDRNOTAVAIL; 595 return -EADDRNOTAVAIL;
597 } 596 }
@@ -632,7 +631,7 @@ static int nr_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
632 sock_reset_flag(sk, SOCK_ZAPPED); 631 sock_reset_flag(sk, SOCK_ZAPPED);
633 dev_put(dev); 632 dev_put(dev);
634 release_sock(sk); 633 release_sock(sk);
635 SOCK_DEBUG(sk, "NET/ROM: socket is bound\n"); 634
636 return 0; 635 return 0;
637} 636}
638 637
@@ -1082,8 +1081,6 @@ static int nr_sendmsg(struct kiocb *iocb, struct socket *sock,
1082 sax.sax25_call = nr->dest_addr; 1081 sax.sax25_call = nr->dest_addr;
1083 } 1082 }
1084 1083
1085 SOCK_DEBUG(sk, "NET/ROM: sendto: Addresses built.\n");
1086
1087 /* Build a packet - the conventional user limit is 236 bytes. We can 1084 /* Build a packet - the conventional user limit is 236 bytes. We can
1088 do ludicrously large NetROM frames but must not overflow */ 1085 do ludicrously large NetROM frames but must not overflow */
1089 if (len > 65536) { 1086 if (len > 65536) {
@@ -1091,7 +1088,6 @@ static int nr_sendmsg(struct kiocb *iocb, struct socket *sock,
1091 goto out; 1088 goto out;
1092 } 1089 }
1093 1090
1094 SOCK_DEBUG(sk, "NET/ROM: sendto: building packet.\n");
1095 size = len + NR_NETWORK_LEN + NR_TRANSPORT_LEN; 1091 size = len + NR_NETWORK_LEN + NR_TRANSPORT_LEN;
1096 1092
1097 if ((skb = sock_alloc_send_skb(sk, size, msg->msg_flags & MSG_DONTWAIT, &err)) == NULL) 1093 if ((skb = sock_alloc_send_skb(sk, size, msg->msg_flags & MSG_DONTWAIT, &err)) == NULL)
@@ -1105,7 +1101,6 @@ static int nr_sendmsg(struct kiocb *iocb, struct socket *sock,
1105 */ 1101 */
1106 1102
1107 asmptr = skb_push(skb, NR_TRANSPORT_LEN); 1103 asmptr = skb_push(skb, NR_TRANSPORT_LEN);
1108 SOCK_DEBUG(sk, "Building NET/ROM Header.\n");
1109 1104
1110 /* Build a NET/ROM Transport header */ 1105 /* Build a NET/ROM Transport header */
1111 1106
@@ -1114,15 +1109,12 @@ static int nr_sendmsg(struct kiocb *iocb, struct socket *sock,
1114 *asmptr++ = 0; /* To be filled in later */ 1109 *asmptr++ = 0; /* To be filled in later */
1115 *asmptr++ = 0; /* Ditto */ 1110 *asmptr++ = 0; /* Ditto */
1116 *asmptr++ = NR_INFO; 1111 *asmptr++ = NR_INFO;
1117 SOCK_DEBUG(sk, "Built header.\n");
1118 1112
1119 /* 1113 /*
1120 * Put the data on the end 1114 * Put the data on the end
1121 */ 1115 */
1122 skb_put(skb, len); 1116 skb_put(skb, len);
1123 1117
1124 SOCK_DEBUG(sk, "NET/ROM: Appending user data\n");
1125
1126 /* User data follows immediately after the NET/ROM transport header */ 1118 /* User data follows immediately after the NET/ROM transport header */
1127 if (memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len)) { 1119 if (memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len)) {
1128 kfree_skb(skb); 1120 kfree_skb(skb);
@@ -1130,8 +1122,6 @@ static int nr_sendmsg(struct kiocb *iocb, struct socket *sock,
1130 goto out; 1122 goto out;
1131 } 1123 }
1132 1124
1133 SOCK_DEBUG(sk, "NET/ROM: Transmitting buffer\n");
1134
1135 if (sk->sk_state != TCP_ESTABLISHED) { 1125 if (sk->sk_state != TCP_ESTABLISHED) {
1136 kfree_skb(skb); 1126 kfree_skb(skb);
1137 err = -ENOTCONN; 1127 err = -ENOTCONN;
diff --git a/net/phonet/socket.c b/net/phonet/socket.c
index b1adafab377c..8c5bfcef92cb 100644
--- a/net/phonet/socket.c
+++ b/net/phonet/socket.c
@@ -52,7 +52,7 @@ static int pn_socket_release(struct socket *sock)
52 52
53static struct { 53static struct {
54 struct hlist_head hlist[PN_HASHSIZE]; 54 struct hlist_head hlist[PN_HASHSIZE];
55 spinlock_t lock; 55 struct mutex lock;
56} pnsocks; 56} pnsocks;
57 57
58void __init pn_sock_init(void) 58void __init pn_sock_init(void)
@@ -61,7 +61,7 @@ void __init pn_sock_init(void)
61 61
62 for (i = 0; i < PN_HASHSIZE; i++) 62 for (i = 0; i < PN_HASHSIZE; i++)
63 INIT_HLIST_HEAD(pnsocks.hlist + i); 63 INIT_HLIST_HEAD(pnsocks.hlist + i);
64 spin_lock_init(&pnsocks.lock); 64 mutex_init(&pnsocks.lock);
65} 65}
66 66
67static struct hlist_head *pn_hash_list(u16 obj) 67static struct hlist_head *pn_hash_list(u16 obj)
@@ -82,9 +82,8 @@ struct sock *pn_find_sock_by_sa(struct net *net, const struct sockaddr_pn *spn)
82 u8 res = spn->spn_resource; 82 u8 res = spn->spn_resource;
83 struct hlist_head *hlist = pn_hash_list(obj); 83 struct hlist_head *hlist = pn_hash_list(obj);
84 84
85 spin_lock_bh(&pnsocks.lock); 85 rcu_read_lock();
86 86 sk_for_each_rcu(sknode, node, hlist) {
87 sk_for_each(sknode, node, hlist) {
88 struct pn_sock *pn = pn_sk(sknode); 87 struct pn_sock *pn = pn_sk(sknode);
89 BUG_ON(!pn->sobject); /* unbound socket */ 88 BUG_ON(!pn->sobject); /* unbound socket */
90 89
@@ -107,8 +106,7 @@ struct sock *pn_find_sock_by_sa(struct net *net, const struct sockaddr_pn *spn)
107 sock_hold(sknode); 106 sock_hold(sknode);
108 break; 107 break;
109 } 108 }
110 109 rcu_read_unlock();
111 spin_unlock_bh(&pnsocks.lock);
112 110
113 return rval; 111 return rval;
114} 112}
@@ -119,7 +117,7 @@ void pn_deliver_sock_broadcast(struct net *net, struct sk_buff *skb)
119 struct hlist_head *hlist = pnsocks.hlist; 117 struct hlist_head *hlist = pnsocks.hlist;
120 unsigned h; 118 unsigned h;
121 119
122 spin_lock(&pnsocks.lock); 120 rcu_read_lock();
123 for (h = 0; h < PN_HASHSIZE; h++) { 121 for (h = 0; h < PN_HASHSIZE; h++) {
124 struct hlist_node *node; 122 struct hlist_node *node;
125 struct sock *sknode; 123 struct sock *sknode;
@@ -140,25 +138,26 @@ void pn_deliver_sock_broadcast(struct net *net, struct sk_buff *skb)
140 } 138 }
141 hlist++; 139 hlist++;
142 } 140 }
143 spin_unlock(&pnsocks.lock); 141 rcu_read_unlock();
144} 142}
145 143
146void pn_sock_hash(struct sock *sk) 144void pn_sock_hash(struct sock *sk)
147{ 145{
148 struct hlist_head *hlist = pn_hash_list(pn_sk(sk)->sobject); 146 struct hlist_head *hlist = pn_hash_list(pn_sk(sk)->sobject);
149 147
150 spin_lock_bh(&pnsocks.lock); 148 mutex_lock(&pnsocks.lock);
151 sk_add_node(sk, hlist); 149 sk_add_node_rcu(sk, hlist);
152 spin_unlock_bh(&pnsocks.lock); 150 mutex_unlock(&pnsocks.lock);
153} 151}
154EXPORT_SYMBOL(pn_sock_hash); 152EXPORT_SYMBOL(pn_sock_hash);
155 153
156void pn_sock_unhash(struct sock *sk) 154void pn_sock_unhash(struct sock *sk)
157{ 155{
158 spin_lock_bh(&pnsocks.lock); 156 mutex_lock(&pnsocks.lock);
159 sk_del_node_init(sk); 157 sk_del_node_init_rcu(sk);
160 spin_unlock_bh(&pnsocks.lock); 158 mutex_unlock(&pnsocks.lock);
161 pn_sock_unbind_all_res(sk); 159 pn_sock_unbind_all_res(sk);
160 synchronize_rcu();
162} 161}
163EXPORT_SYMBOL(pn_sock_unhash); 162EXPORT_SYMBOL(pn_sock_unhash);
164 163
@@ -548,7 +547,7 @@ static struct sock *pn_sock_get_idx(struct seq_file *seq, loff_t pos)
548 unsigned h; 547 unsigned h;
549 548
550 for (h = 0; h < PN_HASHSIZE; h++) { 549 for (h = 0; h < PN_HASHSIZE; h++) {
551 sk_for_each(sknode, node, hlist) { 550 sk_for_each_rcu(sknode, node, hlist) {
552 if (!net_eq(net, sock_net(sknode))) 551 if (!net_eq(net, sock_net(sknode)))
553 continue; 552 continue;
554 if (!pos) 553 if (!pos)
@@ -572,9 +571,9 @@ static struct sock *pn_sock_get_next(struct seq_file *seq, struct sock *sk)
572} 571}
573 572
574static void *pn_sock_seq_start(struct seq_file *seq, loff_t *pos) 573static void *pn_sock_seq_start(struct seq_file *seq, loff_t *pos)
575 __acquires(pnsocks.lock) 574 __acquires(rcu)
576{ 575{
577 spin_lock_bh(&pnsocks.lock); 576 rcu_read_lock();
578 return *pos ? pn_sock_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; 577 return *pos ? pn_sock_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
579} 578}
580 579
@@ -591,9 +590,9 @@ static void *pn_sock_seq_next(struct seq_file *seq, void *v, loff_t *pos)
591} 590}
592 591
593static void pn_sock_seq_stop(struct seq_file *seq, void *v) 592static void pn_sock_seq_stop(struct seq_file *seq, void *v)
594 __releases(pnsocks.lock) 593 __releases(rcu)
595{ 594{
596 spin_unlock_bh(&pnsocks.lock); 595 rcu_read_unlock();
597} 596}
598 597
599static int pn_sock_seq_show(struct seq_file *seq, void *v) 598static int pn_sock_seq_show(struct seq_file *seq, void *v)
@@ -721,13 +720,11 @@ void pn_sock_unbind_all_res(struct sock *sk)
721 } 720 }
722 mutex_unlock(&resource_mutex); 721 mutex_unlock(&resource_mutex);
723 722
724 if (match == 0)
725 return;
726 synchronize_rcu();
727 while (match > 0) { 723 while (match > 0) {
728 sock_put(sk); 724 __sock_put(sk);
729 match--; 725 match--;
730 } 726 }
727 /* Caller is responsible for RCU sync before final sock_put() */
731} 728}
732 729
733#ifdef CONFIG_PROC_FS 730#ifdef CONFIG_PROC_FS
diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c
index c47a511f203d..7c4dce8fa5e6 100644
--- a/net/rds/ib_send.c
+++ b/net/rds/ib_send.c
@@ -355,7 +355,7 @@ void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context)
355 * 355 *
356 * Conceptually, we have two counters: 356 * Conceptually, we have two counters:
357 * - send credits: this tells us how many WRs we're allowed 357 * - send credits: this tells us how many WRs we're allowed
358 * to submit without overruning the reciever's queue. For 358 * to submit without overruning the receiver's queue. For
359 * each SEND WR we post, we decrement this by one. 359 * each SEND WR we post, we decrement this by one.
360 * 360 *
361 * - posted credits: this tells us how many WRs we recently 361 * - posted credits: this tells us how many WRs we recently
diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c
index 712cf2d1f28e..3a60a15d1b4a 100644
--- a/net/rds/iw_cm.c
+++ b/net/rds/iw_cm.c
@@ -181,7 +181,7 @@ static int rds_iw_init_qp_attrs(struct ib_qp_init_attr *attr,
181 unsigned int send_size, recv_size; 181 unsigned int send_size, recv_size;
182 int ret; 182 int ret;
183 183
184 /* The offset of 1 is to accomodate the additional ACK WR. */ 184 /* The offset of 1 is to accommodate the additional ACK WR. */
185 send_size = min_t(unsigned int, rds_iwdev->max_wrs, rds_iw_sysctl_max_send_wr + 1); 185 send_size = min_t(unsigned int, rds_iwdev->max_wrs, rds_iw_sysctl_max_send_wr + 1);
186 recv_size = min_t(unsigned int, rds_iwdev->max_wrs, rds_iw_sysctl_max_recv_wr + 1); 186 recv_size = min_t(unsigned int, rds_iwdev->max_wrs, rds_iw_sysctl_max_recv_wr + 1);
187 rds_iw_ring_resize(send_ring, send_size - 1); 187 rds_iw_ring_resize(send_ring, send_size - 1);
diff --git a/net/rds/iw_rdma.c b/net/rds/iw_rdma.c
index 59509e9a9e72..6deaa77495e3 100644
--- a/net/rds/iw_rdma.c
+++ b/net/rds/iw_rdma.c
@@ -122,7 +122,7 @@ static int rds_iw_get_device(struct rds_sock *rs, struct rds_iw_device **rds_iwd
122#else 122#else
123 /* FIXME - needs to compare the local and remote 123 /* FIXME - needs to compare the local and remote
124 * ipaddr/port tuple, but the ipaddr is the only 124 * ipaddr/port tuple, but the ipaddr is the only
125 * available infomation in the rds_sock (as the rest are 125 * available information in the rds_sock (as the rest are
126 * zero'ed. It doesn't appear to be properly populated 126 * zero'ed. It doesn't appear to be properly populated
127 * during connection setup... 127 * during connection setup...
128 */ 128 */
diff --git a/net/rds/iw_send.c b/net/rds/iw_send.c
index 6280ea020d4e..545d8ee3efb1 100644
--- a/net/rds/iw_send.c
+++ b/net/rds/iw_send.c
@@ -307,7 +307,7 @@ void rds_iw_send_cq_comp_handler(struct ib_cq *cq, void *context)
307 * 307 *
308 * Conceptually, we have two counters: 308 * Conceptually, we have two counters:
309 * - send credits: this tells us how many WRs we're allowed 309 * - send credits: this tells us how many WRs we're allowed
310 * to submit without overruning the reciever's queue. For 310 * to submit without overruning the receiver's queue. For
311 * each SEND WR we post, we decrement this by one. 311 * each SEND WR we post, we decrement this by one.
312 * 312 *
313 * - posted credits: this tells us how many WRs we recently 313 * - posted credits: this tells us how many WRs we recently
diff --git a/net/rds/send.c b/net/rds/send.c
index 35b9c2e9caf1..d58ae5f9339e 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -116,7 +116,7 @@ static void release_in_xmit(struct rds_connection *conn)
116} 116}
117 117
118/* 118/*
119 * We're making the concious trade-off here to only send one message 119 * We're making the conscious trade-off here to only send one message
120 * down the connection at a time. 120 * down the connection at a time.
121 * Pro: 121 * Pro:
122 * - tx queueing is a simple fifo list 122 * - tx queueing is a simple fifo list
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index a80aef6e3d1f..f9ea925ad9cb 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -682,10 +682,8 @@ static int rose_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
682 if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS) 682 if ((unsigned int) addr->srose_ndigis > ROSE_MAX_DIGIS)
683 return -EINVAL; 683 return -EINVAL;
684 684
685 if ((dev = rose_dev_get(&addr->srose_addr)) == NULL) { 685 if ((dev = rose_dev_get(&addr->srose_addr)) == NULL)
686 SOCK_DEBUG(sk, "ROSE: bind failed: invalid address\n");
687 return -EADDRNOTAVAIL; 686 return -EADDRNOTAVAIL;
688 }
689 687
690 source = &addr->srose_call; 688 source = &addr->srose_call;
691 689
@@ -716,7 +714,7 @@ static int rose_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
716 rose_insert_socket(sk); 714 rose_insert_socket(sk);
717 715
718 sock_reset_flag(sk, SOCK_ZAPPED); 716 sock_reset_flag(sk, SOCK_ZAPPED);
719 SOCK_DEBUG(sk, "ROSE: socket is bound\n"); 717
720 return 0; 718 return 0;
721} 719}
722 720
@@ -1109,10 +1107,7 @@ static int rose_sendmsg(struct kiocb *iocb, struct socket *sock,
1109 srose.srose_digis[n] = rose->dest_digis[n]; 1107 srose.srose_digis[n] = rose->dest_digis[n];
1110 } 1108 }
1111 1109
1112 SOCK_DEBUG(sk, "ROSE: sendto: Addresses built.\n");
1113
1114 /* Build a packet */ 1110 /* Build a packet */
1115 SOCK_DEBUG(sk, "ROSE: sendto: building packet.\n");
1116 /* Sanity check the packet size */ 1111 /* Sanity check the packet size */
1117 if (len > 65535) 1112 if (len > 65535)
1118 return -EMSGSIZE; 1113 return -EMSGSIZE;
@@ -1127,7 +1122,6 @@ static int rose_sendmsg(struct kiocb *iocb, struct socket *sock,
1127 /* 1122 /*
1128 * Put the data on the end 1123 * Put the data on the end
1129 */ 1124 */
1130 SOCK_DEBUG(sk, "ROSE: Appending user data\n");
1131 1125
1132 skb_reset_transport_header(skb); 1126 skb_reset_transport_header(skb);
1133 skb_put(skb, len); 1127 skb_put(skb, len);
@@ -1152,8 +1146,6 @@ static int rose_sendmsg(struct kiocb *iocb, struct socket *sock,
1152 */ 1146 */
1153 asmptr = skb_push(skb, ROSE_MIN_LEN); 1147 asmptr = skb_push(skb, ROSE_MIN_LEN);
1154 1148
1155 SOCK_DEBUG(sk, "ROSE: Building Network Header.\n");
1156
1157 /* Build a ROSE Network header */ 1149 /* Build a ROSE Network header */
1158 asmptr[0] = ((rose->lci >> 8) & 0x0F) | ROSE_GFI; 1150 asmptr[0] = ((rose->lci >> 8) & 0x0F) | ROSE_GFI;
1159 asmptr[1] = (rose->lci >> 0) & 0xFF; 1151 asmptr[1] = (rose->lci >> 0) & 0xFF;
@@ -1162,10 +1154,6 @@ static int rose_sendmsg(struct kiocb *iocb, struct socket *sock,
1162 if (qbit) 1154 if (qbit)
1163 asmptr[0] |= ROSE_Q_BIT; 1155 asmptr[0] |= ROSE_Q_BIT;
1164 1156
1165 SOCK_DEBUG(sk, "ROSE: Built header.\n");
1166
1167 SOCK_DEBUG(sk, "ROSE: Transmitting buffer\n");
1168
1169 if (sk->sk_state != TCP_ESTABLISHED) { 1157 if (sk->sk_state != TCP_ESTABLISHED) {
1170 kfree_skb(skb); 1158 kfree_skb(skb);
1171 return -ENOTCONN; 1159 return -ENOTCONN;
diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c
index 08dcd2f29cdc..479cae57d187 100644
--- a/net/rose/rose_route.c
+++ b/net/rose/rose_route.c
@@ -587,7 +587,7 @@ static int rose_clear_routes(void)
587 587
588/* 588/*
589 * Check that the device given is a valid AX.25 interface that is "up". 589 * Check that the device given is a valid AX.25 interface that is "up".
590 * called whith RTNL 590 * called with RTNL
591 */ 591 */
592static struct net_device *rose_ax25_dev_find(char *devname) 592static struct net_device *rose_ax25_dev_find(char *devname)
593{ 593{
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index a7a5583d4f68..aeaa2110b699 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -239,6 +239,17 @@ config NET_SCH_CHOKE
239 To compile this code as a module, choose M here: the 239 To compile this code as a module, choose M here: the
240 module will be called sch_choke. 240 module will be called sch_choke.
241 241
242config NET_SCH_QFQ
243 tristate "Quick Fair Queueing scheduler (QFQ)"
244 help
245 Say Y here if you want to use the Quick Fair Queueing Scheduler (QFQ)
246 packet scheduling algorithm.
247
248 To compile this driver as a module, choose M here: the module
249 will be called sch_qfq.
250
251 If unsure, say N.
252
242config NET_SCH_INGRESS 253config NET_SCH_INGRESS
243 tristate "Ingress Qdisc" 254 tristate "Ingress Qdisc"
244 depends on NET_CLS_ACT 255 depends on NET_CLS_ACT
diff --git a/net/sched/Makefile b/net/sched/Makefile
index 2e77b8dba22e..dc5889c0a15a 100644
--- a/net/sched/Makefile
+++ b/net/sched/Makefile
@@ -35,6 +35,7 @@ obj-$(CONFIG_NET_SCH_NETEM) += sch_netem.o
35obj-$(CONFIG_NET_SCH_DRR) += sch_drr.o 35obj-$(CONFIG_NET_SCH_DRR) += sch_drr.o
36obj-$(CONFIG_NET_SCH_MQPRIO) += sch_mqprio.o 36obj-$(CONFIG_NET_SCH_MQPRIO) += sch_mqprio.o
37obj-$(CONFIG_NET_SCH_CHOKE) += sch_choke.o 37obj-$(CONFIG_NET_SCH_CHOKE) += sch_choke.o
38obj-$(CONFIG_NET_SCH_QFQ) += sch_qfq.o
38 39
39obj-$(CONFIG_NET_CLS_U32) += cls_u32.o 40obj-$(CONFIG_NET_CLS_U32) += cls_u32.o
40obj-$(CONFIG_NET_CLS_ROUTE4) += cls_route.o 41obj-$(CONFIG_NET_CLS_ROUTE4) += cls_route.o
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 15873e14cb54..14b42f4ad791 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -999,7 +999,7 @@ static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
999 switch (n->nlmsg_type) { 999 switch (n->nlmsg_type) {
1000 case RTM_NEWACTION: 1000 case RTM_NEWACTION:
1001 /* we are going to assume all other flags 1001 /* we are going to assume all other flags
1002 * imply create only if it doesnt exist 1002 * imply create only if it doesn't exist
1003 * Note that CREATE | EXCL implies that 1003 * Note that CREATE | EXCL implies that
1004 * but since we want avoid ambiguity (eg when flags 1004 * but since we want avoid ambiguity (eg when flags
1005 * is zero) then just set this 1005 * is zero) then just set this
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 50c7c06c019d..7affe9a92757 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -161,7 +161,7 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a,
161 } 161 }
162 if (offset > 0 && offset > skb->len) { 162 if (offset > 0 && offset > skb->len) {
163 pr_info("tc filter pedit" 163 pr_info("tc filter pedit"
164 " offset %d cant exceed pkt length %d\n", 164 " offset %d can't exceed pkt length %d\n",
165 offset, skb->len); 165 offset, skb->len);
166 goto bad; 166 goto bad;
167 } 167 }
diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c
index a4de67eca824..49130e8abff0 100644
--- a/net/sched/em_meta.c
+++ b/net/sched/em_meta.c
@@ -47,7 +47,7 @@
47 * on the meta type. Obviously, the length of the data must also 47 * on the meta type. Obviously, the length of the data must also
48 * be provided for non-numeric types. 48 * be provided for non-numeric types.
49 * 49 *
50 * Additionaly, type dependant modifiers such as shift operators 50 * Additionally, type dependent modifiers such as shift operators
51 * or mask may be applied to extend the functionaliy. As of now, 51 * or mask may be applied to extend the functionaliy. As of now,
52 * the variable length type supports shifting the byte string to 52 * the variable length type supports shifting the byte string to
53 * the right, eating up any number of octets and thus supporting 53 * the right, eating up any number of octets and thus supporting
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index e1429a85091f..29b942ce9e82 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -183,7 +183,7 @@ static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
183 * filters in qdisc and in inner nodes (if higher filter points to the inner 183 * filters in qdisc and in inner nodes (if higher filter points to the inner
184 * node). If we end up with classid MAJOR:0 we enqueue the skb into special 184 * node). If we end up with classid MAJOR:0 we enqueue the skb into special
185 * internal fifo (direct). These packets then go directly thru. If we still 185 * internal fifo (direct). These packets then go directly thru. If we still
186 * have no valid leaf we try to use MAJOR:default leaf. It still unsuccessfull 186 * have no valid leaf we try to use MAJOR:default leaf. It still unsuccessful
187 * then finish and return direct queue. 187 * then finish and return direct queue.
188 */ 188 */
189#define HTB_DIRECT ((struct htb_class *)-1L) 189#define HTB_DIRECT ((struct htb_class *)-1L)
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index edbbf7ad6623..69c35f6cd13f 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -160,7 +160,7 @@ static bool loss_4state(struct netem_sched_data *q)
160 u32 rnd = net_random(); 160 u32 rnd = net_random();
161 161
162 /* 162 /*
163 * Makes a comparision between rnd and the transition 163 * Makes a comparison between rnd and the transition
164 * probabilities outgoing from the current state, then decides the 164 * probabilities outgoing from the current state, then decides the
165 * next state and if the next packet has to be transmitted or lost. 165 * next state and if the next packet has to be transmitted or lost.
166 * The four states correspond to: 166 * The four states correspond to:
@@ -212,9 +212,9 @@ static bool loss_4state(struct netem_sched_data *q)
212 * Generates losses according to the Gilbert-Elliot loss model or 212 * Generates losses according to the Gilbert-Elliot loss model or
213 * its special cases (Gilbert or Simple Gilbert) 213 * its special cases (Gilbert or Simple Gilbert)
214 * 214 *
215 * Makes a comparision between random number and the transition 215 * Makes a comparison between random number and the transition
216 * probabilities outgoing from the current state, then decides the 216 * probabilities outgoing from the current state, then decides the
217 * next state. A second random number is extracted and the comparision 217 * next state. A second random number is extracted and the comparison
218 * with the loss probability of the current state decides if the next 218 * with the loss probability of the current state decides if the next
219 * packet will be transmitted or lost. 219 * packet will be transmitted or lost.
220 */ 220 */
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
new file mode 100644
index 000000000000..103343408593
--- /dev/null
+++ b/net/sched/sch_qfq.c
@@ -0,0 +1,1137 @@
1/*
2 * net/sched/sch_qfq.c Quick Fair Queueing Scheduler.
3 *
4 * Copyright (c) 2009 Fabio Checconi, Luigi Rizzo, and Paolo Valente.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 */
10
11#include <linux/module.h>
12#include <linux/init.h>
13#include <linux/bitops.h>
14#include <linux/errno.h>
15#include <linux/netdevice.h>
16#include <linux/pkt_sched.h>
17#include <net/sch_generic.h>
18#include <net/pkt_sched.h>
19#include <net/pkt_cls.h>
20
21
22/* Quick Fair Queueing
23 ===================
24
25 Sources:
26
27 Fabio Checconi, Luigi Rizzo, and Paolo Valente: "QFQ: Efficient
28 Packet Scheduling with Tight Bandwidth Distribution Guarantees."
29
30 See also:
31 http://retis.sssup.it/~fabio/linux/qfq/
32 */
33
34/*
35
36 Virtual time computations.
37
38 S, F and V are all computed in fixed point arithmetic with
39 FRAC_BITS decimal bits.
40
41 QFQ_MAX_INDEX is the maximum index allowed for a group. We need
42 one bit per index.
43 QFQ_MAX_WSHIFT is the maximum power of two supported as a weight.
44
45 The layout of the bits is as below:
46
47 [ MTU_SHIFT ][ FRAC_BITS ]
48 [ MAX_INDEX ][ MIN_SLOT_SHIFT ]
49 ^.__grp->index = 0
50 *.__grp->slot_shift
51
52 where MIN_SLOT_SHIFT is derived by difference from the others.
53
54 The max group index corresponds to Lmax/w_min, where
55 Lmax=1<<MTU_SHIFT, w_min = 1 .
56 From this, and knowing how many groups (MAX_INDEX) we want,
57 we can derive the shift corresponding to each group.
58
59 Because we often need to compute
60 F = S + len/w_i and V = V + len/wsum
61 instead of storing w_i store the value
62 inv_w = (1<<FRAC_BITS)/w_i
63 so we can do F = S + len * inv_w * wsum.
64 We use W_TOT in the formulas so we can easily move between
65 static and adaptive weight sum.
66
67 The per-scheduler-instance data contain all the data structures
68 for the scheduler: bitmaps and bucket lists.
69
70 */
71
72/*
73 * Maximum number of consecutive slots occupied by backlogged classes
74 * inside a group.
75 */
76#define QFQ_MAX_SLOTS 32
77
78/*
79 * Shifts used for class<->group mapping. We allow class weights that are
80 * in the range [1, 2^MAX_WSHIFT], and we try to map each class i to the
81 * group with the smallest index that can support the L_i / r_i configured
82 * for the class.
83 *
84 * grp->index is the index of the group; and grp->slot_shift
85 * is the shift for the corresponding (scaled) sigma_i.
86 */
87#define QFQ_MAX_INDEX 19
88#define QFQ_MAX_WSHIFT 16
89
90#define QFQ_MAX_WEIGHT (1<<QFQ_MAX_WSHIFT)
91#define QFQ_MAX_WSUM (2*QFQ_MAX_WEIGHT)
92
93#define FRAC_BITS 30 /* fixed point arithmetic */
94#define ONE_FP (1UL << FRAC_BITS)
95#define IWSUM (ONE_FP/QFQ_MAX_WSUM)
96
97#define QFQ_MTU_SHIFT 11
98#define QFQ_MIN_SLOT_SHIFT (FRAC_BITS + QFQ_MTU_SHIFT - QFQ_MAX_INDEX)
99
100/*
101 * Possible group states. These values are used as indexes for the bitmaps
102 * array of struct qfq_queue.
103 */
104enum qfq_state { ER, IR, EB, IB, QFQ_MAX_STATE };
105
106struct qfq_group;
107
108struct qfq_class {
109 struct Qdisc_class_common common;
110
111 unsigned int refcnt;
112 unsigned int filter_cnt;
113
114 struct gnet_stats_basic_packed bstats;
115 struct gnet_stats_queue qstats;
116 struct gnet_stats_rate_est rate_est;
117 struct Qdisc *qdisc;
118
119 struct hlist_node next; /* Link for the slot list. */
120 u64 S, F; /* flow timestamps (exact) */
121
122 /* group we belong to. In principle we would need the index,
123 * which is log_2(lmax/weight), but we never reference it
124 * directly, only the group.
125 */
126 struct qfq_group *grp;
127
128 /* these are copied from the flowset. */
129 u32 inv_w; /* ONE_FP/weight */
130 u32 lmax; /* Max packet size for this flow. */
131};
132
133struct qfq_group {
134 u64 S, F; /* group timestamps (approx). */
135 unsigned int slot_shift; /* Slot shift. */
136 unsigned int index; /* Group index. */
137 unsigned int front; /* Index of the front slot. */
138 unsigned long full_slots; /* non-empty slots */
139
140 /* Array of RR lists of active classes. */
141 struct hlist_head slots[QFQ_MAX_SLOTS];
142};
143
144struct qfq_sched {
145 struct tcf_proto *filter_list;
146 struct Qdisc_class_hash clhash;
147
148 u64 V; /* Precise virtual time. */
149 u32 wsum; /* weight sum */
150
151 unsigned long bitmaps[QFQ_MAX_STATE]; /* Group bitmaps. */
152 struct qfq_group groups[QFQ_MAX_INDEX + 1]; /* The groups. */
153};
154
155static struct qfq_class *qfq_find_class(struct Qdisc *sch, u32 classid)
156{
157 struct qfq_sched *q = qdisc_priv(sch);
158 struct Qdisc_class_common *clc;
159
160 clc = qdisc_class_find(&q->clhash, classid);
161 if (clc == NULL)
162 return NULL;
163 return container_of(clc, struct qfq_class, common);
164}
165
166static void qfq_purge_queue(struct qfq_class *cl)
167{
168 unsigned int len = cl->qdisc->q.qlen;
169
170 qdisc_reset(cl->qdisc);
171 qdisc_tree_decrease_qlen(cl->qdisc, len);
172}
173
174static const struct nla_policy qfq_policy[TCA_QFQ_MAX + 1] = {
175 [TCA_QFQ_WEIGHT] = { .type = NLA_U32 },
176 [TCA_QFQ_LMAX] = { .type = NLA_U32 },
177};
178
179/*
180 * Calculate a flow index, given its weight and maximum packet length.
181 * index = log_2(maxlen/weight) but we need to apply the scaling.
182 * This is used only once at flow creation.
183 */
184static int qfq_calc_index(u32 inv_w, unsigned int maxlen)
185{
186 u64 slot_size = (u64)maxlen * inv_w;
187 unsigned long size_map;
188 int index = 0;
189
190 size_map = slot_size >> QFQ_MIN_SLOT_SHIFT;
191 if (!size_map)
192 goto out;
193
194 index = __fls(size_map) + 1; /* basically a log_2 */
195 index -= !(slot_size - (1ULL << (index + QFQ_MIN_SLOT_SHIFT - 1)));
196
197 if (index < 0)
198 index = 0;
199out:
200 pr_debug("qfq calc_index: W = %lu, L = %u, I = %d\n",
201 (unsigned long) ONE_FP/inv_w, maxlen, index);
202
203 return index;
204}
205
206static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
207 struct nlattr **tca, unsigned long *arg)
208{
209 struct qfq_sched *q = qdisc_priv(sch);
210 struct qfq_class *cl = (struct qfq_class *)*arg;
211 struct nlattr *tb[TCA_QFQ_MAX + 1];
212 u32 weight, lmax, inv_w;
213 int i, err;
214
215 if (tca[TCA_OPTIONS] == NULL) {
216 pr_notice("qfq: no options\n");
217 return -EINVAL;
218 }
219
220 err = nla_parse_nested(tb, TCA_QFQ_MAX, tca[TCA_OPTIONS], qfq_policy);
221 if (err < 0)
222 return err;
223
224 if (tb[TCA_QFQ_WEIGHT]) {
225 weight = nla_get_u32(tb[TCA_QFQ_WEIGHT]);
226 if (!weight || weight > (1UL << QFQ_MAX_WSHIFT)) {
227 pr_notice("qfq: invalid weight %u\n", weight);
228 return -EINVAL;
229 }
230 } else
231 weight = 1;
232
233 inv_w = ONE_FP / weight;
234 weight = ONE_FP / inv_w;
235 if (q->wsum + weight > QFQ_MAX_WSUM) {
236 pr_notice("qfq: total weight out of range (%u + %u)\n",
237 weight, q->wsum);
238 return -EINVAL;
239 }
240
241 if (tb[TCA_QFQ_LMAX]) {
242 lmax = nla_get_u32(tb[TCA_QFQ_LMAX]);
243 if (!lmax || lmax > (1UL << QFQ_MTU_SHIFT)) {
244 pr_notice("qfq: invalid max length %u\n", lmax);
245 return -EINVAL;
246 }
247 } else
248 lmax = 1UL << QFQ_MTU_SHIFT;
249
250 if (cl != NULL) {
251 if (tca[TCA_RATE]) {
252 err = gen_replace_estimator(&cl->bstats, &cl->rate_est,
253 qdisc_root_sleeping_lock(sch),
254 tca[TCA_RATE]);
255 if (err)
256 return err;
257 }
258
259 sch_tree_lock(sch);
260 if (tb[TCA_QFQ_WEIGHT]) {
261 q->wsum = weight - ONE_FP / cl->inv_w;
262 cl->inv_w = inv_w;
263 }
264 sch_tree_unlock(sch);
265
266 return 0;
267 }
268
269 cl = kzalloc(sizeof(struct qfq_class), GFP_KERNEL);
270 if (cl == NULL)
271 return -ENOBUFS;
272
273 cl->refcnt = 1;
274 cl->common.classid = classid;
275 cl->lmax = lmax;
276 cl->inv_w = inv_w;
277 i = qfq_calc_index(cl->inv_w, cl->lmax);
278
279 cl->grp = &q->groups[i];
280 q->wsum += weight;
281
282 cl->qdisc = qdisc_create_dflt(sch->dev_queue,
283 &pfifo_qdisc_ops, classid);
284 if (cl->qdisc == NULL)
285 cl->qdisc = &noop_qdisc;
286
287 if (tca[TCA_RATE]) {
288 err = gen_new_estimator(&cl->bstats, &cl->rate_est,
289 qdisc_root_sleeping_lock(sch),
290 tca[TCA_RATE]);
291 if (err) {
292 qdisc_destroy(cl->qdisc);
293 kfree(cl);
294 return err;
295 }
296 }
297
298 sch_tree_lock(sch);
299 qdisc_class_hash_insert(&q->clhash, &cl->common);
300 sch_tree_unlock(sch);
301
302 qdisc_class_hash_grow(sch, &q->clhash);
303
304 *arg = (unsigned long)cl;
305 return 0;
306}
307
308static void qfq_destroy_class(struct Qdisc *sch, struct qfq_class *cl)
309{
310 struct qfq_sched *q = qdisc_priv(sch);
311
312 if (cl->inv_w) {
313 q->wsum -= ONE_FP / cl->inv_w;
314 cl->inv_w = 0;
315 }
316
317 gen_kill_estimator(&cl->bstats, &cl->rate_est);
318 qdisc_destroy(cl->qdisc);
319 kfree(cl);
320}
321
322static int qfq_delete_class(struct Qdisc *sch, unsigned long arg)
323{
324 struct qfq_sched *q = qdisc_priv(sch);
325 struct qfq_class *cl = (struct qfq_class *)arg;
326
327 if (cl->filter_cnt > 0)
328 return -EBUSY;
329
330 sch_tree_lock(sch);
331
332 qfq_purge_queue(cl);
333 qdisc_class_hash_remove(&q->clhash, &cl->common);
334
335 BUG_ON(--cl->refcnt == 0);
336 /*
337 * This shouldn't happen: we "hold" one cops->get() when called
338 * from tc_ctl_tclass; the destroy method is done from cops->put().
339 */
340
341 sch_tree_unlock(sch);
342 return 0;
343}
344
345static unsigned long qfq_get_class(struct Qdisc *sch, u32 classid)
346{
347 struct qfq_class *cl = qfq_find_class(sch, classid);
348
349 if (cl != NULL)
350 cl->refcnt++;
351
352 return (unsigned long)cl;
353}
354
355static void qfq_put_class(struct Qdisc *sch, unsigned long arg)
356{
357 struct qfq_class *cl = (struct qfq_class *)arg;
358
359 if (--cl->refcnt == 0)
360 qfq_destroy_class(sch, cl);
361}
362
363static struct tcf_proto **qfq_tcf_chain(struct Qdisc *sch, unsigned long cl)
364{
365 struct qfq_sched *q = qdisc_priv(sch);
366
367 if (cl)
368 return NULL;
369
370 return &q->filter_list;
371}
372
373static unsigned long qfq_bind_tcf(struct Qdisc *sch, unsigned long parent,
374 u32 classid)
375{
376 struct qfq_class *cl = qfq_find_class(sch, classid);
377
378 if (cl != NULL)
379 cl->filter_cnt++;
380
381 return (unsigned long)cl;
382}
383
384static void qfq_unbind_tcf(struct Qdisc *sch, unsigned long arg)
385{
386 struct qfq_class *cl = (struct qfq_class *)arg;
387
388 cl->filter_cnt--;
389}
390
391static int qfq_graft_class(struct Qdisc *sch, unsigned long arg,
392 struct Qdisc *new, struct Qdisc **old)
393{
394 struct qfq_class *cl = (struct qfq_class *)arg;
395
396 if (new == NULL) {
397 new = qdisc_create_dflt(sch->dev_queue,
398 &pfifo_qdisc_ops, cl->common.classid);
399 if (new == NULL)
400 new = &noop_qdisc;
401 }
402
403 sch_tree_lock(sch);
404 qfq_purge_queue(cl);
405 *old = cl->qdisc;
406 cl->qdisc = new;
407 sch_tree_unlock(sch);
408 return 0;
409}
410
411static struct Qdisc *qfq_class_leaf(struct Qdisc *sch, unsigned long arg)
412{
413 struct qfq_class *cl = (struct qfq_class *)arg;
414
415 return cl->qdisc;
416}
417
418static int qfq_dump_class(struct Qdisc *sch, unsigned long arg,
419 struct sk_buff *skb, struct tcmsg *tcm)
420{
421 struct qfq_class *cl = (struct qfq_class *)arg;
422 struct nlattr *nest;
423
424 tcm->tcm_parent = TC_H_ROOT;
425 tcm->tcm_handle = cl->common.classid;
426 tcm->tcm_info = cl->qdisc->handle;
427
428 nest = nla_nest_start(skb, TCA_OPTIONS);
429 if (nest == NULL)
430 goto nla_put_failure;
431 NLA_PUT_U32(skb, TCA_QFQ_WEIGHT, ONE_FP/cl->inv_w);
432 NLA_PUT_U32(skb, TCA_QFQ_LMAX, cl->lmax);
433 return nla_nest_end(skb, nest);
434
435nla_put_failure:
436 nla_nest_cancel(skb, nest);
437 return -EMSGSIZE;
438}
439
440static int qfq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
441 struct gnet_dump *d)
442{
443 struct qfq_class *cl = (struct qfq_class *)arg;
444 struct tc_qfq_stats xstats;
445
446 memset(&xstats, 0, sizeof(xstats));
447 cl->qdisc->qstats.qlen = cl->qdisc->q.qlen;
448
449 xstats.weight = ONE_FP/cl->inv_w;
450 xstats.lmax = cl->lmax;
451
452 if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
453 gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 ||
454 gnet_stats_copy_queue(d, &cl->qdisc->qstats) < 0)
455 return -1;
456
457 return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
458}
459
460static void qfq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
461{
462 struct qfq_sched *q = qdisc_priv(sch);
463 struct qfq_class *cl;
464 struct hlist_node *n;
465 unsigned int i;
466
467 if (arg->stop)
468 return;
469
470 for (i = 0; i < q->clhash.hashsize; i++) {
471 hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode) {
472 if (arg->count < arg->skip) {
473 arg->count++;
474 continue;
475 }
476 if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
477 arg->stop = 1;
478 return;
479 }
480 arg->count++;
481 }
482 }
483}
484
485static struct qfq_class *qfq_classify(struct sk_buff *skb, struct Qdisc *sch,
486 int *qerr)
487{
488 struct qfq_sched *q = qdisc_priv(sch);
489 struct qfq_class *cl;
490 struct tcf_result res;
491 int result;
492
493 if (TC_H_MAJ(skb->priority ^ sch->handle) == 0) {
494 pr_debug("qfq_classify: found %d\n", skb->priority);
495 cl = qfq_find_class(sch, skb->priority);
496 if (cl != NULL)
497 return cl;
498 }
499
500 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
501 result = tc_classify(skb, q->filter_list, &res);
502 if (result >= 0) {
503#ifdef CONFIG_NET_CLS_ACT
504 switch (result) {
505 case TC_ACT_QUEUED:
506 case TC_ACT_STOLEN:
507 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
508 case TC_ACT_SHOT:
509 return NULL;
510 }
511#endif
512 cl = (struct qfq_class *)res.class;
513 if (cl == NULL)
514 cl = qfq_find_class(sch, res.classid);
515 return cl;
516 }
517
518 return NULL;
519}
520
521/* Generic comparison function, handling wraparound. */
522static inline int qfq_gt(u64 a, u64 b)
523{
524 return (s64)(a - b) > 0;
525}
526
527/* Round a precise timestamp to its slotted value. */
528static inline u64 qfq_round_down(u64 ts, unsigned int shift)
529{
530 return ts & ~((1ULL << shift) - 1);
531}
532
533/* return the pointer to the group with lowest index in the bitmap */
534static inline struct qfq_group *qfq_ffs(struct qfq_sched *q,
535 unsigned long bitmap)
536{
537 int index = __ffs(bitmap);
538 return &q->groups[index];
539}
540/* Calculate a mask to mimic what would be ffs_from(). */
541static inline unsigned long mask_from(unsigned long bitmap, int from)
542{
543 return bitmap & ~((1UL << from) - 1);
544}
545
546/*
547 * The state computation relies on ER=0, IR=1, EB=2, IB=3
548 * First compute eligibility comparing grp->S, q->V,
549 * then check if someone is blocking us and possibly add EB
550 */
551static int qfq_calc_state(struct qfq_sched *q, const struct qfq_group *grp)
552{
553 /* if S > V we are not eligible */
554 unsigned int state = qfq_gt(grp->S, q->V);
555 unsigned long mask = mask_from(q->bitmaps[ER], grp->index);
556 struct qfq_group *next;
557
558 if (mask) {
559 next = qfq_ffs(q, mask);
560 if (qfq_gt(grp->F, next->F))
561 state |= EB;
562 }
563
564 return state;
565}
566
567
568/*
569 * In principle
570 * q->bitmaps[dst] |= q->bitmaps[src] & mask;
571 * q->bitmaps[src] &= ~mask;
572 * but we should make sure that src != dst
573 */
574static inline void qfq_move_groups(struct qfq_sched *q, unsigned long mask,
575 int src, int dst)
576{
577 q->bitmaps[dst] |= q->bitmaps[src] & mask;
578 q->bitmaps[src] &= ~mask;
579}
580
581static void qfq_unblock_groups(struct qfq_sched *q, int index, u64 old_F)
582{
583 unsigned long mask = mask_from(q->bitmaps[ER], index + 1);
584 struct qfq_group *next;
585
586 if (mask) {
587 next = qfq_ffs(q, mask);
588 if (!qfq_gt(next->F, old_F))
589 return;
590 }
591
592 mask = (1UL << index) - 1;
593 qfq_move_groups(q, mask, EB, ER);
594 qfq_move_groups(q, mask, IB, IR);
595}
596
597/*
598 * perhaps
599 *
600 old_V ^= q->V;
601 old_V >>= QFQ_MIN_SLOT_SHIFT;
602 if (old_V) {
603 ...
604 }
605 *
606 */
607static void qfq_make_eligible(struct qfq_sched *q, u64 old_V)
608{
609 unsigned long vslot = q->V >> QFQ_MIN_SLOT_SHIFT;
610 unsigned long old_vslot = old_V >> QFQ_MIN_SLOT_SHIFT;
611
612 if (vslot != old_vslot) {
613 unsigned long mask = (1UL << fls(vslot ^ old_vslot)) - 1;
614 qfq_move_groups(q, mask, IR, ER);
615 qfq_move_groups(q, mask, IB, EB);
616 }
617}
618
619
620/*
621 * XXX we should make sure that slot becomes less than 32.
622 * This is guaranteed by the input values.
623 * roundedS is always cl->S rounded on grp->slot_shift bits.
624 */
625static void qfq_slot_insert(struct qfq_group *grp, struct qfq_class *cl,
626 u64 roundedS)
627{
628 u64 slot = (roundedS - grp->S) >> grp->slot_shift;
629 unsigned int i = (grp->front + slot) % QFQ_MAX_SLOTS;
630
631 hlist_add_head(&cl->next, &grp->slots[i]);
632 __set_bit(slot, &grp->full_slots);
633}
634
635/* Maybe introduce hlist_first_entry?? */
636static struct qfq_class *qfq_slot_head(struct qfq_group *grp)
637{
638 return hlist_entry(grp->slots[grp->front].first,
639 struct qfq_class, next);
640}
641
642/*
643 * remove the entry from the slot
644 */
645static void qfq_front_slot_remove(struct qfq_group *grp)
646{
647 struct qfq_class *cl = qfq_slot_head(grp);
648
649 BUG_ON(!cl);
650 hlist_del(&cl->next);
651 if (hlist_empty(&grp->slots[grp->front]))
652 __clear_bit(0, &grp->full_slots);
653}
654
655/*
656 * Returns the first full queue in a group. As a side effect,
657 * adjust the bucket list so the first non-empty bucket is at
658 * position 0 in full_slots.
659 */
660static struct qfq_class *qfq_slot_scan(struct qfq_group *grp)
661{
662 unsigned int i;
663
664 pr_debug("qfq slot_scan: grp %u full %#lx\n",
665 grp->index, grp->full_slots);
666
667 if (grp->full_slots == 0)
668 return NULL;
669
670 i = __ffs(grp->full_slots); /* zero based */
671 if (i > 0) {
672 grp->front = (grp->front + i) % QFQ_MAX_SLOTS;
673 grp->full_slots >>= i;
674 }
675
676 return qfq_slot_head(grp);
677}
678
679/*
680 * adjust the bucket list. When the start time of a group decreases,
681 * we move the index down (modulo QFQ_MAX_SLOTS) so we don't need to
682 * move the objects. The mask of occupied slots must be shifted
683 * because we use ffs() to find the first non-empty slot.
684 * This covers decreases in the group's start time, but what about
685 * increases of the start time ?
686 * Here too we should make sure that i is less than 32
687 */
688static void qfq_slot_rotate(struct qfq_group *grp, u64 roundedS)
689{
690 unsigned int i = (grp->S - roundedS) >> grp->slot_shift;
691
692 grp->full_slots <<= i;
693 grp->front = (grp->front - i) % QFQ_MAX_SLOTS;
694}
695
696static void qfq_update_eligible(struct qfq_sched *q, u64 old_V)
697{
698 struct qfq_group *grp;
699 unsigned long ineligible;
700
701 ineligible = q->bitmaps[IR] | q->bitmaps[IB];
702 if (ineligible) {
703 if (!q->bitmaps[ER]) {
704 grp = qfq_ffs(q, ineligible);
705 if (qfq_gt(grp->S, q->V))
706 q->V = grp->S;
707 }
708 qfq_make_eligible(q, old_V);
709 }
710}
711
712/* What is length of next packet in queue (0 if queue is empty) */
713static unsigned int qdisc_peek_len(struct Qdisc *sch)
714{
715 struct sk_buff *skb;
716
717 skb = sch->ops->peek(sch);
718 return skb ? qdisc_pkt_len(skb) : 0;
719}
720
721/*
722 * Updates the class, returns true if also the group needs to be updated.
723 */
724static bool qfq_update_class(struct qfq_group *grp, struct qfq_class *cl)
725{
726 unsigned int len = qdisc_peek_len(cl->qdisc);
727
728 cl->S = cl->F;
729 if (!len)
730 qfq_front_slot_remove(grp); /* queue is empty */
731 else {
732 u64 roundedS;
733
734 cl->F = cl->S + (u64)len * cl->inv_w;
735 roundedS = qfq_round_down(cl->S, grp->slot_shift);
736 if (roundedS == grp->S)
737 return false;
738
739 qfq_front_slot_remove(grp);
740 qfq_slot_insert(grp, cl, roundedS);
741 }
742
743 return true;
744}
745
746static struct sk_buff *qfq_dequeue(struct Qdisc *sch)
747{
748 struct qfq_sched *q = qdisc_priv(sch);
749 struct qfq_group *grp;
750 struct qfq_class *cl;
751 struct sk_buff *skb;
752 unsigned int len;
753 u64 old_V;
754
755 if (!q->bitmaps[ER])
756 return NULL;
757
758 grp = qfq_ffs(q, q->bitmaps[ER]);
759
760 cl = qfq_slot_head(grp);
761 skb = qdisc_dequeue_peeked(cl->qdisc);
762 if (!skb) {
763 WARN_ONCE(1, "qfq_dequeue: non-workconserving leaf\n");
764 return NULL;
765 }
766
767 sch->q.qlen--;
768 qdisc_bstats_update(sch, skb);
769
770 old_V = q->V;
771 len = qdisc_pkt_len(skb);
772 q->V += (u64)len * IWSUM;
773 pr_debug("qfq dequeue: len %u F %lld now %lld\n",
774 len, (unsigned long long) cl->F, (unsigned long long) q->V);
775
776 if (qfq_update_class(grp, cl)) {
777 u64 old_F = grp->F;
778
779 cl = qfq_slot_scan(grp);
780 if (!cl)
781 __clear_bit(grp->index, &q->bitmaps[ER]);
782 else {
783 u64 roundedS = qfq_round_down(cl->S, grp->slot_shift);
784 unsigned int s;
785
786 if (grp->S == roundedS)
787 goto skip_unblock;
788 grp->S = roundedS;
789 grp->F = roundedS + (2ULL << grp->slot_shift);
790 __clear_bit(grp->index, &q->bitmaps[ER]);
791 s = qfq_calc_state(q, grp);
792 __set_bit(grp->index, &q->bitmaps[s]);
793 }
794
795 qfq_unblock_groups(q, grp->index, old_F);
796 }
797
798skip_unblock:
799 qfq_update_eligible(q, old_V);
800
801 return skb;
802}
803
804/*
805 * Assign a reasonable start time for a new flow k in group i.
806 * Admissible values for \hat(F) are multiples of \sigma_i
807 * no greater than V+\sigma_i . Larger values mean that
808 * we had a wraparound so we consider the timestamp to be stale.
809 *
810 * If F is not stale and F >= V then we set S = F.
811 * Otherwise we should assign S = V, but this may violate
812 * the ordering in ER. So, if we have groups in ER, set S to
813 * the F_j of the first group j which would be blocking us.
814 * We are guaranteed not to move S backward because
815 * otherwise our group i would still be blocked.
816 */
817static void qfq_update_start(struct qfq_sched *q, struct qfq_class *cl)
818{
819 unsigned long mask;
820 uint32_t limit, roundedF;
821 int slot_shift = cl->grp->slot_shift;
822
823 roundedF = qfq_round_down(cl->F, slot_shift);
824 limit = qfq_round_down(q->V, slot_shift) + (1UL << slot_shift);
825
826 if (!qfq_gt(cl->F, q->V) || qfq_gt(roundedF, limit)) {
827 /* timestamp was stale */
828 mask = mask_from(q->bitmaps[ER], cl->grp->index);
829 if (mask) {
830 struct qfq_group *next = qfq_ffs(q, mask);
831 if (qfq_gt(roundedF, next->F)) {
832 cl->S = next->F;
833 return;
834 }
835 }
836 cl->S = q->V;
837 } else /* timestamp is not stale */
838 cl->S = cl->F;
839}
840
841static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
842{
843 struct qfq_sched *q = qdisc_priv(sch);
844 struct qfq_group *grp;
845 struct qfq_class *cl;
846 int err;
847 u64 roundedS;
848 int s;
849
850 cl = qfq_classify(skb, sch, &err);
851 if (cl == NULL) {
852 if (err & __NET_XMIT_BYPASS)
853 sch->qstats.drops++;
854 kfree_skb(skb);
855 return err;
856 }
857 pr_debug("qfq_enqueue: cl = %x\n", cl->common.classid);
858
859 err = qdisc_enqueue(skb, cl->qdisc);
860 if (unlikely(err != NET_XMIT_SUCCESS)) {
861 pr_debug("qfq_enqueue: enqueue failed %d\n", err);
862 if (net_xmit_drop_count(err)) {
863 cl->qstats.drops++;
864 sch->qstats.drops++;
865 }
866 return err;
867 }
868
869 bstats_update(&cl->bstats, skb);
870 ++sch->q.qlen;
871
872 /* If the new skb is not the head of queue, then done here. */
873 if (cl->qdisc->q.qlen != 1)
874 return err;
875
876 /* If reach this point, queue q was idle */
877 grp = cl->grp;
878 qfq_update_start(q, cl);
879
880 /* compute new finish time and rounded start. */
881 cl->F = cl->S + (u64)qdisc_pkt_len(skb) * cl->inv_w;
882 roundedS = qfq_round_down(cl->S, grp->slot_shift);
883
884 /*
885 * insert cl in the correct bucket.
886 * If cl->S >= grp->S we don't need to adjust the
887 * bucket list and simply go to the insertion phase.
888 * Otherwise grp->S is decreasing, we must make room
889 * in the bucket list, and also recompute the group state.
890 * Finally, if there were no flows in this group and nobody
891 * was in ER make sure to adjust V.
892 */
893 if (grp->full_slots) {
894 if (!qfq_gt(grp->S, cl->S))
895 goto skip_update;
896
897 /* create a slot for this cl->S */
898 qfq_slot_rotate(grp, roundedS);
899 /* group was surely ineligible, remove */
900 __clear_bit(grp->index, &q->bitmaps[IR]);
901 __clear_bit(grp->index, &q->bitmaps[IB]);
902 } else if (!q->bitmaps[ER] && qfq_gt(roundedS, q->V))
903 q->V = roundedS;
904
905 grp->S = roundedS;
906 grp->F = roundedS + (2ULL << grp->slot_shift);
907 s = qfq_calc_state(q, grp);
908 __set_bit(grp->index, &q->bitmaps[s]);
909
910 pr_debug("qfq enqueue: new state %d %#lx S %lld F %lld V %lld\n",
911 s, q->bitmaps[s],
912 (unsigned long long) cl->S,
913 (unsigned long long) cl->F,
914 (unsigned long long) q->V);
915
916skip_update:
917 qfq_slot_insert(grp, cl, roundedS);
918
919 return err;
920}
921
922
923static void qfq_slot_remove(struct qfq_sched *q, struct qfq_group *grp,
924 struct qfq_class *cl)
925{
926 unsigned int i, offset;
927 u64 roundedS;
928
929 roundedS = qfq_round_down(cl->S, grp->slot_shift);
930 offset = (roundedS - grp->S) >> grp->slot_shift;
931 i = (grp->front + offset) % QFQ_MAX_SLOTS;
932
933 hlist_del(&cl->next);
934 if (hlist_empty(&grp->slots[i]))
935 __clear_bit(offset, &grp->full_slots);
936}
937
938/*
939 * called to forcibly destroy a queue.
940 * If the queue is not in the front bucket, or if it has
941 * other queues in the front bucket, we can simply remove
942 * the queue with no other side effects.
943 * Otherwise we must propagate the event up.
944 */
945static void qfq_deactivate_class(struct qfq_sched *q, struct qfq_class *cl)
946{
947 struct qfq_group *grp = cl->grp;
948 unsigned long mask;
949 u64 roundedS;
950 int s;
951
952 cl->F = cl->S;
953 qfq_slot_remove(q, grp, cl);
954
955 if (!grp->full_slots) {
956 __clear_bit(grp->index, &q->bitmaps[IR]);
957 __clear_bit(grp->index, &q->bitmaps[EB]);
958 __clear_bit(grp->index, &q->bitmaps[IB]);
959
960 if (test_bit(grp->index, &q->bitmaps[ER]) &&
961 !(q->bitmaps[ER] & ~((1UL << grp->index) - 1))) {
962 mask = q->bitmaps[ER] & ((1UL << grp->index) - 1);
963 if (mask)
964 mask = ~((1UL << __fls(mask)) - 1);
965 else
966 mask = ~0UL;
967 qfq_move_groups(q, mask, EB, ER);
968 qfq_move_groups(q, mask, IB, IR);
969 }
970 __clear_bit(grp->index, &q->bitmaps[ER]);
971 } else if (hlist_empty(&grp->slots[grp->front])) {
972 cl = qfq_slot_scan(grp);
973 roundedS = qfq_round_down(cl->S, grp->slot_shift);
974 if (grp->S != roundedS) {
975 __clear_bit(grp->index, &q->bitmaps[ER]);
976 __clear_bit(grp->index, &q->bitmaps[IR]);
977 __clear_bit(grp->index, &q->bitmaps[EB]);
978 __clear_bit(grp->index, &q->bitmaps[IB]);
979 grp->S = roundedS;
980 grp->F = roundedS + (2ULL << grp->slot_shift);
981 s = qfq_calc_state(q, grp);
982 __set_bit(grp->index, &q->bitmaps[s]);
983 }
984 }
985
986 qfq_update_eligible(q, q->V);
987}
988
989static void qfq_qlen_notify(struct Qdisc *sch, unsigned long arg)
990{
991 struct qfq_sched *q = qdisc_priv(sch);
992 struct qfq_class *cl = (struct qfq_class *)arg;
993
994 if (cl->qdisc->q.qlen == 0)
995 qfq_deactivate_class(q, cl);
996}
997
998static unsigned int qfq_drop(struct Qdisc *sch)
999{
1000 struct qfq_sched *q = qdisc_priv(sch);
1001 struct qfq_group *grp;
1002 unsigned int i, j, len;
1003
1004 for (i = 0; i <= QFQ_MAX_INDEX; i++) {
1005 grp = &q->groups[i];
1006 for (j = 0; j < QFQ_MAX_SLOTS; j++) {
1007 struct qfq_class *cl;
1008 struct hlist_node *n;
1009
1010 hlist_for_each_entry(cl, n, &grp->slots[j], next) {
1011
1012 if (!cl->qdisc->ops->drop)
1013 continue;
1014
1015 len = cl->qdisc->ops->drop(cl->qdisc);
1016 if (len > 0) {
1017 sch->q.qlen--;
1018 if (!cl->qdisc->q.qlen)
1019 qfq_deactivate_class(q, cl);
1020
1021 return len;
1022 }
1023 }
1024 }
1025 }
1026
1027 return 0;
1028}
1029
1030static int qfq_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
1031{
1032 struct qfq_sched *q = qdisc_priv(sch);
1033 struct qfq_group *grp;
1034 int i, j, err;
1035
1036 err = qdisc_class_hash_init(&q->clhash);
1037 if (err < 0)
1038 return err;
1039
1040 for (i = 0; i <= QFQ_MAX_INDEX; i++) {
1041 grp = &q->groups[i];
1042 grp->index = i;
1043 grp->slot_shift = QFQ_MTU_SHIFT + FRAC_BITS
1044 - (QFQ_MAX_INDEX - i);
1045 for (j = 0; j < QFQ_MAX_SLOTS; j++)
1046 INIT_HLIST_HEAD(&grp->slots[j]);
1047 }
1048
1049 return 0;
1050}
1051
1052static void qfq_reset_qdisc(struct Qdisc *sch)
1053{
1054 struct qfq_sched *q = qdisc_priv(sch);
1055 struct qfq_group *grp;
1056 struct qfq_class *cl;
1057 struct hlist_node *n, *tmp;
1058 unsigned int i, j;
1059
1060 for (i = 0; i <= QFQ_MAX_INDEX; i++) {
1061 grp = &q->groups[i];
1062 for (j = 0; j < QFQ_MAX_SLOTS; j++) {
1063 hlist_for_each_entry_safe(cl, n, tmp,
1064 &grp->slots[j], next) {
1065 qfq_deactivate_class(q, cl);
1066 }
1067 }
1068 }
1069
1070 for (i = 0; i < q->clhash.hashsize; i++) {
1071 hlist_for_each_entry(cl, n, &q->clhash.hash[i], common.hnode)
1072 qdisc_reset(cl->qdisc);
1073 }
1074 sch->q.qlen = 0;
1075}
1076
1077static void qfq_destroy_qdisc(struct Qdisc *sch)
1078{
1079 struct qfq_sched *q = qdisc_priv(sch);
1080 struct qfq_class *cl;
1081 struct hlist_node *n, *next;
1082 unsigned int i;
1083
1084 tcf_destroy_chain(&q->filter_list);
1085
1086 for (i = 0; i < q->clhash.hashsize; i++) {
1087 hlist_for_each_entry_safe(cl, n, next, &q->clhash.hash[i],
1088 common.hnode) {
1089 qfq_destroy_class(sch, cl);
1090 }
1091 }
1092 qdisc_class_hash_destroy(&q->clhash);
1093}
1094
1095static const struct Qdisc_class_ops qfq_class_ops = {
1096 .change = qfq_change_class,
1097 .delete = qfq_delete_class,
1098 .get = qfq_get_class,
1099 .put = qfq_put_class,
1100 .tcf_chain = qfq_tcf_chain,
1101 .bind_tcf = qfq_bind_tcf,
1102 .unbind_tcf = qfq_unbind_tcf,
1103 .graft = qfq_graft_class,
1104 .leaf = qfq_class_leaf,
1105 .qlen_notify = qfq_qlen_notify,
1106 .dump = qfq_dump_class,
1107 .dump_stats = qfq_dump_class_stats,
1108 .walk = qfq_walk,
1109};
1110
1111static struct Qdisc_ops qfq_qdisc_ops __read_mostly = {
1112 .cl_ops = &qfq_class_ops,
1113 .id = "qfq",
1114 .priv_size = sizeof(struct qfq_sched),
1115 .enqueue = qfq_enqueue,
1116 .dequeue = qfq_dequeue,
1117 .peek = qdisc_peek_dequeued,
1118 .drop = qfq_drop,
1119 .init = qfq_init_qdisc,
1120 .reset = qfq_reset_qdisc,
1121 .destroy = qfq_destroy_qdisc,
1122 .owner = THIS_MODULE,
1123};
1124
1125static int __init qfq_init(void)
1126{
1127 return register_qdisc(&qfq_qdisc_ops);
1128}
1129
1130static void __exit qfq_exit(void)
1131{
1132 unregister_qdisc(&qfq_qdisc_ops);
1133}
1134
1135module_init(qfq_init);
1136module_exit(qfq_exit);
1137MODULE_LICENSE("GPL");
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index c2e628dfaacc..7ef87f9eb675 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -169,7 +169,7 @@ static unsigned int sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
169 } 169 }
170 case htons(ETH_P_IPV6): 170 case htons(ETH_P_IPV6):
171 { 171 {
172 struct ipv6hdr *iph; 172 const struct ipv6hdr *iph;
173 int poff; 173 int poff;
174 174
175 if (!pskb_network_may_pull(skb, sizeof(*iph))) 175 if (!pskb_network_may_pull(skb, sizeof(*iph)))
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 6b04287913cd..1a21c571aa03 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -569,6 +569,8 @@ void sctp_assoc_rm_peer(struct sctp_association *asoc,
569 sctp_assoc_set_primary(asoc, transport); 569 sctp_assoc_set_primary(asoc, transport);
570 if (asoc->peer.active_path == peer) 570 if (asoc->peer.active_path == peer)
571 asoc->peer.active_path = transport; 571 asoc->peer.active_path = transport;
572 if (asoc->peer.retran_path == peer)
573 asoc->peer.retran_path = transport;
572 if (asoc->peer.last_data_from == peer) 574 if (asoc->peer.last_data_from == peer)
573 asoc->peer.last_data_from = transport; 575 asoc->peer.last_data_from = transport;
574 576
@@ -1323,6 +1325,8 @@ void sctp_assoc_update_retran_path(struct sctp_association *asoc)
1323 1325
1324 if (t) 1326 if (t)
1325 asoc->peer.retran_path = t; 1327 asoc->peer.retran_path = t;
1328 else
1329 t = asoc->peer.retran_path;
1326 1330
1327 SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_update_retran_path:association" 1331 SCTP_DEBUG_PRINTK_IPADDR("sctp_assoc_update_retran_path:association"
1328 " %p addr: ", 1332 " %p addr: ",
@@ -1593,7 +1597,7 @@ void sctp_assoc_clean_asconf_ack_cache(const struct sctp_association *asoc)
1593 struct sctp_chunk *ack; 1597 struct sctp_chunk *ack;
1594 struct sctp_chunk *tmp; 1598 struct sctp_chunk *tmp;
1595 1599
1596 /* We can remove all the entries from the queue upto 1600 /* We can remove all the entries from the queue up to
1597 * the "Peer-Sequence-Number". 1601 * the "Peer-Sequence-Number".
1598 */ 1602 */
1599 list_for_each_entry_safe(ack, tmp, &asoc->asconf_ack_list, 1603 list_for_each_entry_safe(ack, tmp, &asoc->asconf_ack_list,
diff --git a/net/sctp/auth.c b/net/sctp/auth.c
index ddbbf7c81fa1..865e68fef21c 100644
--- a/net/sctp/auth.c
+++ b/net/sctp/auth.c
@@ -113,7 +113,7 @@ struct sctp_shared_key *sctp_auth_shkey_create(__u16 key_id, gfp_t gfp)
113 return new; 113 return new;
114} 114}
115 115
116/* Free the shared key stucture */ 116/* Free the shared key structure */
117static void sctp_auth_shkey_free(struct sctp_shared_key *sh_key) 117static void sctp_auth_shkey_free(struct sctp_shared_key *sh_key)
118{ 118{
119 BUG_ON(!list_empty(&sh_key->key_list)); 119 BUG_ON(!list_empty(&sh_key->key_list));
@@ -122,7 +122,7 @@ static void sctp_auth_shkey_free(struct sctp_shared_key *sh_key)
122 kfree(sh_key); 122 kfree(sh_key);
123} 123}
124 124
125/* Destory the entire key list. This is done during the 125/* Destroy the entire key list. This is done during the
126 * associon and endpoint free process. 126 * associon and endpoint free process.
127 */ 127 */
128void sctp_auth_destroy_keys(struct list_head *keys) 128void sctp_auth_destroy_keys(struct list_head *keys)
@@ -324,7 +324,7 @@ static struct sctp_auth_bytes *sctp_auth_asoc_create_secret(
324 if (!peer_key_vector || !local_key_vector) 324 if (!peer_key_vector || !local_key_vector)
325 goto out; 325 goto out;
326 326
327 /* Figure out the order in wich the key_vectors will be 327 /* Figure out the order in which the key_vectors will be
328 * added to the endpoint shared key. 328 * added to the endpoint shared key.
329 * SCTP-AUTH, Section 6.1: 329 * SCTP-AUTH, Section 6.1:
330 * This is performed by selecting the numerically smaller key 330 * This is performed by selecting the numerically smaller key
diff --git a/net/sctp/debug.c b/net/sctp/debug.c
index bf24fa697de2..ec997cfe0a7e 100644
--- a/net/sctp/debug.c
+++ b/net/sctp/debug.c
@@ -98,7 +98,6 @@ const char *sctp_cname(const sctp_subtype_t cid)
98 98
99/* These are printable forms of the states. */ 99/* These are printable forms of the states. */
100const char *const sctp_state_tbl[SCTP_STATE_NUM_STATES] = { 100const char *const sctp_state_tbl[SCTP_STATE_NUM_STATES] = {
101 "STATE_EMPTY",
102 "STATE_CLOSED", 101 "STATE_CLOSED",
103 "STATE_COOKIE_WAIT", 102 "STATE_COOKIE_WAIT",
104 "STATE_COOKIE_ECHOED", 103 "STATE_COOKIE_ECHOED",
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index e10acc01c75f..c8cc24e282c3 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -325,6 +325,7 @@ static struct sctp_association *__sctp_endpoint_lookup_assoc(
325 struct sctp_transport **transport) 325 struct sctp_transport **transport)
326{ 326{
327 struct sctp_association *asoc = NULL; 327 struct sctp_association *asoc = NULL;
328 struct sctp_association *tmp;
328 struct sctp_transport *t = NULL; 329 struct sctp_transport *t = NULL;
329 struct sctp_hashbucket *head; 330 struct sctp_hashbucket *head;
330 struct sctp_ep_common *epb; 331 struct sctp_ep_common *epb;
@@ -333,25 +334,32 @@ static struct sctp_association *__sctp_endpoint_lookup_assoc(
333 int rport; 334 int rport;
334 335
335 *transport = NULL; 336 *transport = NULL;
337
338 /* If the local port is not set, there can't be any associations
339 * on this endpoint.
340 */
341 if (!ep->base.bind_addr.port)
342 goto out;
343
336 rport = ntohs(paddr->v4.sin_port); 344 rport = ntohs(paddr->v4.sin_port);
337 345
338 hash = sctp_assoc_hashfn(ep->base.bind_addr.port, rport); 346 hash = sctp_assoc_hashfn(ep->base.bind_addr.port, rport);
339 head = &sctp_assoc_hashtable[hash]; 347 head = &sctp_assoc_hashtable[hash];
340 read_lock(&head->lock); 348 read_lock(&head->lock);
341 sctp_for_each_hentry(epb, node, &head->chain) { 349 sctp_for_each_hentry(epb, node, &head->chain) {
342 asoc = sctp_assoc(epb); 350 tmp = sctp_assoc(epb);
343 if (asoc->ep != ep || rport != asoc->peer.port) 351 if (tmp->ep != ep || rport != tmp->peer.port)
344 goto next; 352 continue;
345 353
346 t = sctp_assoc_lookup_paddr(asoc, paddr); 354 t = sctp_assoc_lookup_paddr(tmp, paddr);
347 if (t) { 355 if (t) {
356 asoc = tmp;
348 *transport = t; 357 *transport = t;
349 break; 358 break;
350 } 359 }
351next:
352 asoc = NULL;
353 } 360 }
354 read_unlock(&head->lock); 361 read_unlock(&head->lock);
362out:
355 return asoc; 363 return asoc;
356} 364}
357 365
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 826661be73e7..741ed1648838 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -565,7 +565,7 @@ void sctp_err_finish(struct sock *sk, struct sctp_association *asoc)
565 */ 565 */
566void sctp_v4_err(struct sk_buff *skb, __u32 info) 566void sctp_v4_err(struct sk_buff *skb, __u32 info)
567{ 567{
568 struct iphdr *iph = (struct iphdr *)skb->data; 568 const struct iphdr *iph = (const struct iphdr *)skb->data;
569 const int ihlen = iph->ihl * 4; 569 const int ihlen = iph->ihl * 4;
570 const int type = icmp_hdr(skb)->type; 570 const int type = icmp_hdr(skb)->type;
571 const int code = icmp_hdr(skb)->code; 571 const int code = icmp_hdr(skb)->code;
@@ -661,7 +661,6 @@ static int sctp_rcv_ootb(struct sk_buff *skb)
661{ 661{
662 sctp_chunkhdr_t *ch; 662 sctp_chunkhdr_t *ch;
663 __u8 *ch_end; 663 __u8 *ch_end;
664 sctp_errhdr_t *err;
665 664
666 ch = (sctp_chunkhdr_t *) skb->data; 665 ch = (sctp_chunkhdr_t *) skb->data;
667 666
@@ -697,20 +696,6 @@ static int sctp_rcv_ootb(struct sk_buff *skb)
697 if (SCTP_CID_INIT == ch->type && (void *)ch != skb->data) 696 if (SCTP_CID_INIT == ch->type && (void *)ch != skb->data)
698 goto discard; 697 goto discard;
699 698
700 /* RFC 8.4, 7) If the packet contains a "Stale cookie" ERROR
701 * or a COOKIE ACK the SCTP Packet should be silently
702 * discarded.
703 */
704 if (SCTP_CID_COOKIE_ACK == ch->type)
705 goto discard;
706
707 if (SCTP_CID_ERROR == ch->type) {
708 sctp_walk_errors(err, ch) {
709 if (SCTP_ERROR_STALE_COOKIE == err->cause)
710 goto discard;
711 }
712 }
713
714 ch = (sctp_chunkhdr_t *) ch_end; 699 ch = (sctp_chunkhdr_t *) ch_end;
715 } while (ch_end < skb_tail_pointer(skb)); 700 } while (ch_end < skb_tail_pointer(skb));
716 701
@@ -1017,7 +1002,7 @@ static struct sctp_association *__sctp_rcv_asconf_lookup(
1017 /* Skip over the ADDIP header and find the Address parameter */ 1002 /* Skip over the ADDIP header and find the Address parameter */
1018 param = (union sctp_addr_param *)(asconf + 1); 1003 param = (union sctp_addr_param *)(asconf + 1);
1019 1004
1020 af = sctp_get_af_specific(param_type2af(param->v4.param_hdr.type)); 1005 af = sctp_get_af_specific(param_type2af(param->p.type));
1021 if (unlikely(!af)) 1006 if (unlikely(!af))
1022 return NULL; 1007 return NULL;
1023 1008
@@ -1034,7 +1019,7 @@ static struct sctp_association *__sctp_rcv_asconf_lookup(
1034* association. 1019* association.
1035* 1020*
1036* This means that any chunks that can help us identify the association need 1021* This means that any chunks that can help us identify the association need
1037* to be looked at to find this assocation. 1022* to be looked at to find this association.
1038*/ 1023*/
1039static struct sctp_association *__sctp_rcv_walk_lookup(struct sk_buff *skb, 1024static struct sctp_association *__sctp_rcv_walk_lookup(struct sk_buff *skb,
1040 const union sctp_addr *laddr, 1025 const union sctp_addr *laddr,
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 865ce7ba4e14..321f175055bf 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -531,7 +531,7 @@ static int sctp_v6_is_any(const union sctp_addr *addr)
531static int sctp_v6_available(union sctp_addr *addr, struct sctp_sock *sp) 531static int sctp_v6_available(union sctp_addr *addr, struct sctp_sock *sp)
532{ 532{
533 int type; 533 int type;
534 struct in6_addr *in6 = (struct in6_addr *)&addr->v6.sin6_addr; 534 const struct in6_addr *in6 = (const struct in6_addr *)&addr->v6.sin6_addr;
535 535
536 type = ipv6_addr_type(in6); 536 type = ipv6_addr_type(in6);
537 if (IPV6_ADDR_ANY == type) 537 if (IPV6_ADDR_ANY == type)
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 60600d337a3a..b4f3cf06d8da 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -510,7 +510,7 @@ int sctp_packet_transmit(struct sctp_packet *packet)
510 sh->checksum = sctp_end_cksum(crc32); 510 sh->checksum = sctp_end_cksum(crc32);
511 } else { 511 } else {
512 if (dst->dev->features & NETIF_F_SCTP_CSUM) { 512 if (dst->dev->features & NETIF_F_SCTP_CSUM) {
513 /* no need to seed psuedo checksum for SCTP */ 513 /* no need to seed pseudo checksum for SCTP */
514 nskb->ip_summed = CHECKSUM_PARTIAL; 514 nskb->ip_summed = CHECKSUM_PARTIAL;
515 nskb->csum_start = (skb_transport_header(nskb) - 515 nskb->csum_start = (skb_transport_header(nskb) -
516 nskb->head); 516 nskb->head);
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 26dc005113a0..1c88c8911dc5 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -131,7 +131,8 @@ static inline int sctp_cacc_skip_3_1_d(struct sctp_transport *primary,
131static inline int sctp_cacc_skip_3_1_f(struct sctp_transport *transport, 131static inline int sctp_cacc_skip_3_1_f(struct sctp_transport *transport,
132 int count_of_newacks) 132 int count_of_newacks)
133{ 133{
134 if (count_of_newacks < 2 && !transport->cacc.cacc_saw_newack) 134 if (count_of_newacks < 2 &&
135 (transport && !transport->cacc.cacc_saw_newack))
135 return 1; 136 return 1;
136 return 0; 137 return 0;
137} 138}
@@ -177,13 +178,13 @@ static inline int sctp_cacc_skip_3_2(struct sctp_transport *primary, __u32 tsn)
177 * 3) If the missing report count for TSN t is to be 178 * 3) If the missing report count for TSN t is to be
178 * incremented according to [RFC2960] and 179 * incremented according to [RFC2960] and
179 * [SCTP_STEWART-2002], and CHANGEOVER_ACTIVE is set, 180 * [SCTP_STEWART-2002], and CHANGEOVER_ACTIVE is set,
180 * then the sender MUST futher execute steps 3.1 and 181 * then the sender MUST further execute steps 3.1 and
181 * 3.2 to determine if the missing report count for 182 * 3.2 to determine if the missing report count for
182 * TSN t SHOULD NOT be incremented. 183 * TSN t SHOULD NOT be incremented.
183 * 184 *
184 * 3.3) If 3.1 and 3.2 do not dictate that the missing 185 * 3.3) If 3.1 and 3.2 do not dictate that the missing
185 * report count for t should not be incremented, then 186 * report count for t should not be incremented, then
186 * the sender SOULD increment missing report count for 187 * the sender SHOULD increment missing report count for
187 * t (according to [RFC2960] and [SCTP_STEWART_2002]). 188 * t (according to [RFC2960] and [SCTP_STEWART_2002]).
188 */ 189 */
189static inline int sctp_cacc_skip(struct sctp_transport *primary, 190static inline int sctp_cacc_skip(struct sctp_transport *primary,
@@ -319,7 +320,6 @@ int sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk)
319 * chunk. 320 * chunk.
320 */ 321 */
321 switch (q->asoc->state) { 322 switch (q->asoc->state) {
322 case SCTP_STATE_EMPTY:
323 case SCTP_STATE_CLOSED: 323 case SCTP_STATE_CLOSED:
324 case SCTP_STATE_SHUTDOWN_PENDING: 324 case SCTP_STATE_SHUTDOWN_PENDING:
325 case SCTP_STATE_SHUTDOWN_SENT: 325 case SCTP_STATE_SHUTDOWN_SENT:
@@ -577,6 +577,13 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
577 * try to send as much as possible. 577 * try to send as much as possible.
578 */ 578 */
579 list_for_each_entry_safe(chunk, chunk1, lqueue, transmitted_list) { 579 list_for_each_entry_safe(chunk, chunk1, lqueue, transmitted_list) {
580 /* If the chunk is abandoned, move it to abandoned list. */
581 if (sctp_chunk_abandoned(chunk)) {
582 list_del_init(&chunk->transmitted_list);
583 sctp_insert_list(&q->abandoned,
584 &chunk->transmitted_list);
585 continue;
586 }
580 587
581 /* Make sure that Gap Acked TSNs are not retransmitted. A 588 /* Make sure that Gap Acked TSNs are not retransmitted. A
582 * simple approach is just to move such TSNs out of the 589 * simple approach is just to move such TSNs out of the
@@ -618,9 +625,12 @@ redo:
618 625
619 /* If we are retransmitting, we should only 626 /* If we are retransmitting, we should only
620 * send a single packet. 627 * send a single packet.
628 * Otherwise, try appending this chunk again.
621 */ 629 */
622 if (rtx_timeout || fast_rtx) 630 if (rtx_timeout || fast_rtx)
623 done = 1; 631 done = 1;
632 else
633 goto redo;
624 634
625 /* Bundle next chunk in the next round. */ 635 /* Bundle next chunk in the next round. */
626 break; 636 break;
@@ -843,7 +853,7 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
843 case SCTP_CID_ECN_CWR: 853 case SCTP_CID_ECN_CWR:
844 case SCTP_CID_ASCONF_ACK: 854 case SCTP_CID_ASCONF_ACK:
845 one_packet = 1; 855 one_packet = 1;
846 /* Fall throught */ 856 /* Fall through */
847 857
848 case SCTP_CID_SACK: 858 case SCTP_CID_SACK:
849 case SCTP_CID_HEARTBEAT: 859 case SCTP_CID_HEARTBEAT:
@@ -1683,8 +1693,9 @@ static void sctp_mark_missing(struct sctp_outq *q,
1683 /* SFR-CACC may require us to skip marking 1693 /* SFR-CACC may require us to skip marking
1684 * this chunk as missing. 1694 * this chunk as missing.
1685 */ 1695 */
1686 if (!transport || !sctp_cacc_skip(primary, transport, 1696 if (!transport || !sctp_cacc_skip(primary,
1687 count_of_newacks, tsn)) { 1697 chunk->transport,
1698 count_of_newacks, tsn)) {
1688 chunk->tsn_missing_report++; 1699 chunk->tsn_missing_report++;
1689 1700
1690 SCTP_DEBUG_PRINTK( 1701 SCTP_DEBUG_PRINTK(
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index b3434cc7d0cf..58eb27fed4b4 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -1075,20 +1075,28 @@ nodata:
1075 1075
1076/* Make a HEARTBEAT chunk. */ 1076/* Make a HEARTBEAT chunk. */
1077struct sctp_chunk *sctp_make_heartbeat(const struct sctp_association *asoc, 1077struct sctp_chunk *sctp_make_heartbeat(const struct sctp_association *asoc,
1078 const struct sctp_transport *transport, 1078 const struct sctp_transport *transport)
1079 const void *payload, const size_t paylen)
1080{ 1079{
1081 struct sctp_chunk *retval = sctp_make_chunk(asoc, SCTP_CID_HEARTBEAT, 1080 struct sctp_chunk *retval;
1082 0, paylen); 1081 sctp_sender_hb_info_t hbinfo;
1082
1083 retval = sctp_make_chunk(asoc, SCTP_CID_HEARTBEAT, 0, sizeof(hbinfo));
1083 1084
1084 if (!retval) 1085 if (!retval)
1085 goto nodata; 1086 goto nodata;
1086 1087
1088 hbinfo.param_hdr.type = SCTP_PARAM_HEARTBEAT_INFO;
1089 hbinfo.param_hdr.length = htons(sizeof(sctp_sender_hb_info_t));
1090 hbinfo.daddr = transport->ipaddr;
1091 hbinfo.sent_at = jiffies;
1092 hbinfo.hb_nonce = transport->hb_nonce;
1093
1087 /* Cast away the 'const', as this is just telling the chunk 1094 /* Cast away the 'const', as this is just telling the chunk
1088 * what transport it belongs to. 1095 * what transport it belongs to.
1089 */ 1096 */
1090 retval->transport = (struct sctp_transport *) transport; 1097 retval->transport = (struct sctp_transport *) transport;
1091 retval->subh.hbs_hdr = sctp_addto_chunk(retval, paylen, payload); 1098 retval->subh.hbs_hdr = sctp_addto_chunk(retval, sizeof(hbinfo),
1099 &hbinfo);
1092 1100
1093nodata: 1101nodata:
1094 return retval; 1102 return retval;
@@ -2242,14 +2250,17 @@ int sctp_verify_init(const struct sctp_association *asoc,
2242 * Returns 0 on failure, else success. 2250 * Returns 0 on failure, else success.
2243 * FIXME: This is an association method. 2251 * FIXME: This is an association method.
2244 */ 2252 */
2245int sctp_process_init(struct sctp_association *asoc, sctp_cid_t cid, 2253int sctp_process_init(struct sctp_association *asoc, struct sctp_chunk *chunk,
2246 const union sctp_addr *peer_addr, 2254 const union sctp_addr *peer_addr,
2247 sctp_init_chunk_t *peer_init, gfp_t gfp) 2255 sctp_init_chunk_t *peer_init, gfp_t gfp)
2248{ 2256{
2249 union sctp_params param; 2257 union sctp_params param;
2250 struct sctp_transport *transport; 2258 struct sctp_transport *transport;
2251 struct list_head *pos, *temp; 2259 struct list_head *pos, *temp;
2260 struct sctp_af *af;
2261 union sctp_addr addr;
2252 char *cookie; 2262 char *cookie;
2263 int src_match = 0;
2253 2264
2254 /* We must include the address that the INIT packet came from. 2265 /* We must include the address that the INIT packet came from.
2255 * This is the only address that matters for an INIT packet. 2266 * This is the only address that matters for an INIT packet.
@@ -2261,18 +2272,31 @@ int sctp_process_init(struct sctp_association *asoc, sctp_cid_t cid,
2261 * added as the primary transport. The source address seems to 2272 * added as the primary transport. The source address seems to
2262 * be a a better choice than any of the embedded addresses. 2273 * be a a better choice than any of the embedded addresses.
2263 */ 2274 */
2264 if (peer_addr) { 2275 if(!sctp_assoc_add_peer(asoc, peer_addr, gfp, SCTP_ACTIVE))
2265 if(!sctp_assoc_add_peer(asoc, peer_addr, gfp, SCTP_ACTIVE)) 2276 goto nomem;
2266 goto nomem; 2277
2267 } 2278 if (sctp_cmp_addr_exact(sctp_source(chunk), peer_addr))
2279 src_match = 1;
2268 2280
2269 /* Process the initialization parameters. */ 2281 /* Process the initialization parameters. */
2270 sctp_walk_params(param, peer_init, init_hdr.params) { 2282 sctp_walk_params(param, peer_init, init_hdr.params) {
2283 if (!src_match && (param.p->type == SCTP_PARAM_IPV4_ADDRESS ||
2284 param.p->type == SCTP_PARAM_IPV6_ADDRESS)) {
2285 af = sctp_get_af_specific(param_type2af(param.p->type));
2286 af->from_addr_param(&addr, param.addr,
2287 chunk->sctp_hdr->source, 0);
2288 if (sctp_cmp_addr_exact(sctp_source(chunk), &addr))
2289 src_match = 1;
2290 }
2271 2291
2272 if (!sctp_process_param(asoc, param, peer_addr, gfp)) 2292 if (!sctp_process_param(asoc, param, peer_addr, gfp))
2273 goto clean_up; 2293 goto clean_up;
2274 } 2294 }
2275 2295
2296 /* source address of chunk may not match any valid address */
2297 if (!src_match)
2298 goto clean_up;
2299
2276 /* AUTH: After processing the parameters, make sure that we 2300 /* AUTH: After processing the parameters, make sure that we
2277 * have all the required info to potentially do authentications. 2301 * have all the required info to potentially do authentications.
2278 */ 2302 */
@@ -2923,7 +2947,7 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc,
2923 asconf_param->param_hdr.type != SCTP_PARAM_SET_PRIMARY) 2947 asconf_param->param_hdr.type != SCTP_PARAM_SET_PRIMARY)
2924 return SCTP_ERROR_UNKNOWN_PARAM; 2948 return SCTP_ERROR_UNKNOWN_PARAM;
2925 2949
2926 switch (addr_param->v4.param_hdr.type) { 2950 switch (addr_param->p.type) {
2927 case SCTP_PARAM_IPV6_ADDRESS: 2951 case SCTP_PARAM_IPV6_ADDRESS:
2928 if (!asoc->peer.ipv6_address) 2952 if (!asoc->peer.ipv6_address)
2929 return SCTP_ERROR_DNS_FAILED; 2953 return SCTP_ERROR_DNS_FAILED;
@@ -2936,7 +2960,7 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc,
2936 return SCTP_ERROR_DNS_FAILED; 2960 return SCTP_ERROR_DNS_FAILED;
2937 } 2961 }
2938 2962
2939 af = sctp_get_af_specific(param_type2af(addr_param->v4.param_hdr.type)); 2963 af = sctp_get_af_specific(param_type2af(addr_param->p.type));
2940 if (unlikely(!af)) 2964 if (unlikely(!af))
2941 return SCTP_ERROR_DNS_FAILED; 2965 return SCTP_ERROR_DNS_FAILED;
2942 2966
@@ -3100,7 +3124,7 @@ struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
3100 /* Skip the address parameter and store a pointer to the first 3124 /* Skip the address parameter and store a pointer to the first
3101 * asconf parameter. 3125 * asconf parameter.
3102 */ 3126 */
3103 length = ntohs(addr_param->v4.param_hdr.length); 3127 length = ntohs(addr_param->p.length);
3104 asconf_param = (sctp_addip_param_t *)((void *)addr_param + length); 3128 asconf_param = (sctp_addip_param_t *)((void *)addr_param + length);
3105 chunk_len -= length; 3129 chunk_len -= length;
3106 3130
@@ -3177,7 +3201,7 @@ static void sctp_asconf_param_success(struct sctp_association *asoc,
3177 ((void *)asconf_param + sizeof(sctp_addip_param_t)); 3201 ((void *)asconf_param + sizeof(sctp_addip_param_t));
3178 3202
3179 /* We have checked the packet before, so we do not check again. */ 3203 /* We have checked the packet before, so we do not check again. */
3180 af = sctp_get_af_specific(param_type2af(addr_param->v4.param_hdr.type)); 3204 af = sctp_get_af_specific(param_type2af(addr_param->p.type));
3181 af->from_addr_param(&addr, addr_param, htons(bp->port), 0); 3205 af->from_addr_param(&addr, addr_param, htons(bp->port), 0);
3182 3206
3183 switch (asconf_param->param_hdr.type) { 3207 switch (asconf_param->param_hdr.type) {
@@ -3193,11 +3217,8 @@ static void sctp_asconf_param_success(struct sctp_association *asoc,
3193 local_bh_enable(); 3217 local_bh_enable();
3194 list_for_each_entry(transport, &asoc->peer.transport_addr_list, 3218 list_for_each_entry(transport, &asoc->peer.transport_addr_list,
3195 transports) { 3219 transports) {
3196 if (transport->state == SCTP_ACTIVE)
3197 continue;
3198 dst_release(transport->dst); 3220 dst_release(transport->dst);
3199 sctp_transport_route(transport, NULL, 3221 transport->dst = NULL;
3200 sctp_sk(asoc->base.sk));
3201 } 3222 }
3202 break; 3223 break;
3203 case SCTP_PARAM_DEL_IP: 3224 case SCTP_PARAM_DEL_IP:
@@ -3207,8 +3228,7 @@ static void sctp_asconf_param_success(struct sctp_association *asoc,
3207 list_for_each_entry(transport, &asoc->peer.transport_addr_list, 3228 list_for_each_entry(transport, &asoc->peer.transport_addr_list,
3208 transports) { 3229 transports) {
3209 dst_release(transport->dst); 3230 dst_release(transport->dst);
3210 sctp_transport_route(transport, NULL, 3231 transport->dst = NULL;
3211 sctp_sk(asoc->base.sk));
3212 } 3232 }
3213 break; 3233 break;
3214 default: 3234 default:
@@ -3304,7 +3324,7 @@ int sctp_process_asconf_ack(struct sctp_association *asoc,
3304 /* Skip the address parameter in the last asconf sent and store a 3324 /* Skip the address parameter in the last asconf sent and store a
3305 * pointer to the first asconf parameter. 3325 * pointer to the first asconf parameter.
3306 */ 3326 */
3307 length = ntohs(addr_param->v4.param_hdr.length); 3327 length = ntohs(addr_param->p.length);
3308 asconf_param = (sctp_addip_param_t *)((void *)addr_param + length); 3328 asconf_param = (sctp_addip_param_t *)((void *)addr_param + length);
3309 asconf_len -= length; 3329 asconf_len -= length;
3310 3330
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index b21b218d564f..d612ca1ca6c0 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -482,7 +482,7 @@ static void sctp_do_8_2_transport_strike(struct sctp_association *asoc,
482 * If the timer was a heartbeat, we only increment error counts 482 * If the timer was a heartbeat, we only increment error counts
483 * when we already have an outstanding HEARTBEAT that has not 483 * when we already have an outstanding HEARTBEAT that has not
484 * been acknowledged. 484 * been acknowledged.
485 * Additionaly, some tranport states inhibit error increments. 485 * Additionally, some tranport states inhibit error increments.
486 */ 486 */
487 if (!is_hb) { 487 if (!is_hb) {
488 asoc->overall_error_count++; 488 asoc->overall_error_count++;
@@ -595,8 +595,7 @@ static int sctp_cmd_process_init(sctp_cmd_seq_t *commands,
595 * fail during INIT processing (due to malloc problems), 595 * fail during INIT processing (due to malloc problems),
596 * just return the error and stop processing the stack. 596 * just return the error and stop processing the stack.
597 */ 597 */
598 if (!sctp_process_init(asoc, chunk->chunk_hdr->type, 598 if (!sctp_process_init(asoc, chunk, sctp_source(chunk), peer_init, gfp))
599 sctp_source(chunk), peer_init, gfp))
600 error = -ENOMEM; 599 error = -ENOMEM;
601 else 600 else
602 error = 0; 601 error = 0;
@@ -1415,12 +1414,6 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
1415 SCTP_RTXR_T3_RTX); 1414 SCTP_RTXR_T3_RTX);
1416 break; 1415 break;
1417 1416
1418 case SCTP_CMD_TRANSMIT:
1419 /* Kick start transmission. */
1420 error = sctp_outq_uncork(&asoc->outqueue);
1421 local_cork = 0;
1422 break;
1423
1424 case SCTP_CMD_ECN_CE: 1417 case SCTP_CMD_ECN_CE:
1425 /* Do delayed CE processing. */ 1418 /* Do delayed CE processing. */
1426 sctp_do_ecn_ce_work(asoc, cmd->obj.u32); 1419 sctp_do_ecn_ce_work(asoc, cmd->obj.u32);
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 4b4eb7c96bbd..7f4a4f8368ee 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -393,8 +393,7 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep,
393 goto nomem_init; 393 goto nomem_init;
394 394
395 /* The call, sctp_process_init(), can fail on memory allocation. */ 395 /* The call, sctp_process_init(), can fail on memory allocation. */
396 if (!sctp_process_init(new_asoc, chunk->chunk_hdr->type, 396 if (!sctp_process_init(new_asoc, chunk, sctp_source(chunk),
397 sctp_source(chunk),
398 (sctp_init_chunk_t *)chunk->chunk_hdr, 397 (sctp_init_chunk_t *)chunk->chunk_hdr,
399 GFP_ATOMIC)) 398 GFP_ATOMIC))
400 goto nomem_init; 399 goto nomem_init;
@@ -551,7 +550,7 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const struct sctp_endpoint *ep,
551 * 550 *
552 * This means that if we only want to abort associations 551 * This means that if we only want to abort associations
553 * in an authenticated way (i.e AUTH+ABORT), then we 552 * in an authenticated way (i.e AUTH+ABORT), then we
554 * can't destroy this association just becuase the packet 553 * can't destroy this association just because the packet
555 * was malformed. 554 * was malformed.
556 */ 555 */
557 if (sctp_auth_recv_cid(SCTP_CID_ABORT, asoc)) 556 if (sctp_auth_recv_cid(SCTP_CID_ABORT, asoc))
@@ -725,7 +724,7 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep,
725 */ 724 */
726 peer_init = &chunk->subh.cookie_hdr->c.peer_init[0]; 725 peer_init = &chunk->subh.cookie_hdr->c.peer_init[0];
727 726
728 if (!sctp_process_init(new_asoc, chunk->chunk_hdr->type, 727 if (!sctp_process_init(new_asoc, chunk,
729 &chunk->subh.cookie_hdr->c.peer_addr, 728 &chunk->subh.cookie_hdr->c.peer_addr,
730 peer_init, GFP_ATOMIC)) 729 peer_init, GFP_ATOMIC))
731 goto nomem_init; 730 goto nomem_init;
@@ -942,18 +941,9 @@ static sctp_disposition_t sctp_sf_heartbeat(const struct sctp_endpoint *ep,
942{ 941{
943 struct sctp_transport *transport = (struct sctp_transport *) arg; 942 struct sctp_transport *transport = (struct sctp_transport *) arg;
944 struct sctp_chunk *reply; 943 struct sctp_chunk *reply;
945 sctp_sender_hb_info_t hbinfo;
946 size_t paylen = 0;
947
948 hbinfo.param_hdr.type = SCTP_PARAM_HEARTBEAT_INFO;
949 hbinfo.param_hdr.length = htons(sizeof(sctp_sender_hb_info_t));
950 hbinfo.daddr = transport->ipaddr;
951 hbinfo.sent_at = jiffies;
952 hbinfo.hb_nonce = transport->hb_nonce;
953 944
954 /* Send a heartbeat to our peer. */ 945 /* Send a heartbeat to our peer. */
955 paylen = sizeof(sctp_sender_hb_info_t); 946 reply = sctp_make_heartbeat(asoc, transport);
956 reply = sctp_make_heartbeat(asoc, transport, &hbinfo, paylen);
957 if (!reply) 947 if (!reply)
958 return SCTP_DISPOSITION_NOMEM; 948 return SCTP_DISPOSITION_NOMEM;
959 949
@@ -1464,8 +1454,7 @@ static sctp_disposition_t sctp_sf_do_unexpected_init(
1464 * Verification Tag and Peers Verification tag into a reserved 1454 * Verification Tag and Peers Verification tag into a reserved
1465 * place (local tie-tag and per tie-tag) within the state cookie. 1455 * place (local tie-tag and per tie-tag) within the state cookie.
1466 */ 1456 */
1467 if (!sctp_process_init(new_asoc, chunk->chunk_hdr->type, 1457 if (!sctp_process_init(new_asoc, chunk, sctp_source(chunk),
1468 sctp_source(chunk),
1469 (sctp_init_chunk_t *)chunk->chunk_hdr, 1458 (sctp_init_chunk_t *)chunk->chunk_hdr,
1470 GFP_ATOMIC)) 1459 GFP_ATOMIC))
1471 goto nomem; 1460 goto nomem;
@@ -1546,7 +1535,7 @@ cleanup:
1546} 1535}
1547 1536
1548/* 1537/*
1549 * Handle simultanous INIT. 1538 * Handle simultaneous INIT.
1550 * This means we started an INIT and then we got an INIT request from 1539 * This means we started an INIT and then we got an INIT request from
1551 * our peer. 1540 * our peer.
1552 * 1541 *
@@ -1694,8 +1683,7 @@ static sctp_disposition_t sctp_sf_do_dupcook_a(const struct sctp_endpoint *ep,
1694 */ 1683 */
1695 peer_init = &chunk->subh.cookie_hdr->c.peer_init[0]; 1684 peer_init = &chunk->subh.cookie_hdr->c.peer_init[0];
1696 1685
1697 if (!sctp_process_init(new_asoc, chunk->chunk_hdr->type, 1686 if (!sctp_process_init(new_asoc, chunk, sctp_source(chunk), peer_init,
1698 sctp_source(chunk), peer_init,
1699 GFP_ATOMIC)) 1687 GFP_ATOMIC))
1700 goto nomem; 1688 goto nomem;
1701 1689
@@ -1780,8 +1768,7 @@ static sctp_disposition_t sctp_sf_do_dupcook_b(const struct sctp_endpoint *ep,
1780 * side effects--it is safe to run them here. 1768 * side effects--it is safe to run them here.
1781 */ 1769 */
1782 peer_init = &chunk->subh.cookie_hdr->c.peer_init[0]; 1770 peer_init = &chunk->subh.cookie_hdr->c.peer_init[0];
1783 if (!sctp_process_init(new_asoc, chunk->chunk_hdr->type, 1771 if (!sctp_process_init(new_asoc, chunk, sctp_source(chunk), peer_init,
1784 sctp_source(chunk), peer_init,
1785 GFP_ATOMIC)) 1772 GFP_ATOMIC))
1786 goto nomem; 1773 goto nomem;
1787 1774
@@ -2079,7 +2066,7 @@ sctp_disposition_t sctp_sf_shutdown_pending_abort(
2079 * RFC 2960, Section 3.3.7 2066 * RFC 2960, Section 3.3.7
2080 * If an endpoint receives an ABORT with a format error or for an 2067 * If an endpoint receives an ABORT with a format error or for an
2081 * association that doesn't exist, it MUST silently discard it. 2068 * association that doesn't exist, it MUST silently discard it.
2082 * Becasue the length is "invalid", we can't really discard just 2069 * Because the length is "invalid", we can't really discard just
2083 * as we do not know its true length. So, to be safe, discard the 2070 * as we do not know its true length. So, to be safe, discard the
2084 * packet. 2071 * packet.
2085 */ 2072 */
@@ -2120,7 +2107,7 @@ sctp_disposition_t sctp_sf_shutdown_sent_abort(const struct sctp_endpoint *ep,
2120 * RFC 2960, Section 3.3.7 2107 * RFC 2960, Section 3.3.7
2121 * If an endpoint receives an ABORT with a format error or for an 2108 * If an endpoint receives an ABORT with a format error or for an
2122 * association that doesn't exist, it MUST silently discard it. 2109 * association that doesn't exist, it MUST silently discard it.
2123 * Becasue the length is "invalid", we can't really discard just 2110 * Because the length is "invalid", we can't really discard just
2124 * as we do not know its true length. So, to be safe, discard the 2111 * as we do not know its true length. So, to be safe, discard the
2125 * packet. 2112 * packet.
2126 */ 2113 */
@@ -2381,7 +2368,7 @@ sctp_disposition_t sctp_sf_do_9_1_abort(const struct sctp_endpoint *ep,
2381 * RFC 2960, Section 3.3.7 2368 * RFC 2960, Section 3.3.7
2382 * If an endpoint receives an ABORT with a format error or for an 2369 * If an endpoint receives an ABORT with a format error or for an
2383 * association that doesn't exist, it MUST silently discard it. 2370 * association that doesn't exist, it MUST silently discard it.
2384 * Becasue the length is "invalid", we can't really discard just 2371 * Because the length is "invalid", we can't really discard just
2385 * as we do not know its true length. So, to be safe, discard the 2372 * as we do not know its true length. So, to be safe, discard the
2386 * packet. 2373 * packet.
2387 */ 2374 */
@@ -2412,8 +2399,15 @@ static sctp_disposition_t __sctp_sf_do_9_1_abort(const struct sctp_endpoint *ep,
2412 2399
2413 /* See if we have an error cause code in the chunk. */ 2400 /* See if we have an error cause code in the chunk. */
2414 len = ntohs(chunk->chunk_hdr->length); 2401 len = ntohs(chunk->chunk_hdr->length);
2415 if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_errhdr)) 2402 if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_errhdr)) {
2403
2404 sctp_errhdr_t *err;
2405 sctp_walk_errors(err, chunk->chunk_hdr);
2406 if ((void *)err != (void *)chunk->chunk_end)
2407 return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
2408
2416 error = ((sctp_errhdr_t *)chunk->skb->data)->cause; 2409 error = ((sctp_errhdr_t *)chunk->skb->data)->cause;
2410 }
2417 2411
2418 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ECONNRESET)); 2412 sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ECONNRESET));
2419 /* ASSOC_FAILED will DELETE_TCB. */ 2413 /* ASSOC_FAILED will DELETE_TCB. */
@@ -2448,7 +2442,7 @@ sctp_disposition_t sctp_sf_cookie_wait_abort(const struct sctp_endpoint *ep,
2448 * RFC 2960, Section 3.3.7 2442 * RFC 2960, Section 3.3.7
2449 * If an endpoint receives an ABORT with a format error or for an 2443 * If an endpoint receives an ABORT with a format error or for an
2450 * association that doesn't exist, it MUST silently discard it. 2444 * association that doesn't exist, it MUST silently discard it.
2451 * Becasue the length is "invalid", we can't really discard just 2445 * Because the length is "invalid", we can't really discard just
2452 * as we do not know its true length. So, to be safe, discard the 2446 * as we do not know its true length. So, to be safe, discard the
2453 * packet. 2447 * packet.
2454 */ 2448 */
@@ -3204,6 +3198,7 @@ sctp_disposition_t sctp_sf_operr_notify(const struct sctp_endpoint *ep,
3204 sctp_cmd_seq_t *commands) 3198 sctp_cmd_seq_t *commands)
3205{ 3199{
3206 struct sctp_chunk *chunk = arg; 3200 struct sctp_chunk *chunk = arg;
3201 sctp_errhdr_t *err;
3207 3202
3208 if (!sctp_vtag_verify(chunk, asoc)) 3203 if (!sctp_vtag_verify(chunk, asoc))
3209 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 3204 return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
@@ -3212,6 +3207,10 @@ sctp_disposition_t sctp_sf_operr_notify(const struct sctp_endpoint *ep,
3212 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_operr_chunk_t))) 3207 if (!sctp_chunk_length_valid(chunk, sizeof(sctp_operr_chunk_t)))
3213 return sctp_sf_violation_chunklen(ep, asoc, type, arg, 3208 return sctp_sf_violation_chunklen(ep, asoc, type, arg,
3214 commands); 3209 commands);
3210 sctp_walk_errors(err, chunk->chunk_hdr);
3211 if ((void *)err != (void *)chunk->chunk_end)
3212 return sctp_sf_violation_paramlen(ep, asoc, type, arg,
3213 (void *)err, commands);
3215 3214
3216 sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_OPERR, 3215 sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_OPERR,
3217 SCTP_CHUNK(chunk)); 3216 SCTP_CHUNK(chunk));
@@ -3320,8 +3319,10 @@ sctp_disposition_t sctp_sf_ootb(const struct sctp_endpoint *ep,
3320 struct sctp_chunk *chunk = arg; 3319 struct sctp_chunk *chunk = arg;
3321 struct sk_buff *skb = chunk->skb; 3320 struct sk_buff *skb = chunk->skb;
3322 sctp_chunkhdr_t *ch; 3321 sctp_chunkhdr_t *ch;
3322 sctp_errhdr_t *err;
3323 __u8 *ch_end; 3323 __u8 *ch_end;
3324 int ootb_shut_ack = 0; 3324 int ootb_shut_ack = 0;
3325 int ootb_cookie_ack = 0;
3325 3326
3326 SCTP_INC_STATS(SCTP_MIB_OUTOFBLUES); 3327 SCTP_INC_STATS(SCTP_MIB_OUTOFBLUES);
3327 3328
@@ -3346,6 +3347,23 @@ sctp_disposition_t sctp_sf_ootb(const struct sctp_endpoint *ep,
3346 if (SCTP_CID_ABORT == ch->type) 3347 if (SCTP_CID_ABORT == ch->type)
3347 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 3348 return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
3348 3349
3350 /* RFC 8.4, 7) If the packet contains a "Stale cookie" ERROR
3351 * or a COOKIE ACK the SCTP Packet should be silently
3352 * discarded.
3353 */
3354
3355 if (SCTP_CID_COOKIE_ACK == ch->type)
3356 ootb_cookie_ack = 1;
3357
3358 if (SCTP_CID_ERROR == ch->type) {
3359 sctp_walk_errors(err, ch) {
3360 if (SCTP_ERROR_STALE_COOKIE == err->cause) {
3361 ootb_cookie_ack = 1;
3362 break;
3363 }
3364 }
3365 }
3366
3349 /* Report violation if chunk len overflows */ 3367 /* Report violation if chunk len overflows */
3350 ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length)); 3368 ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
3351 if (ch_end > skb_tail_pointer(skb)) 3369 if (ch_end > skb_tail_pointer(skb))
@@ -3357,6 +3375,8 @@ sctp_disposition_t sctp_sf_ootb(const struct sctp_endpoint *ep,
3357 3375
3358 if (ootb_shut_ack) 3376 if (ootb_shut_ack)
3359 return sctp_sf_shut_8_4_5(ep, asoc, type, arg, commands); 3377 return sctp_sf_shut_8_4_5(ep, asoc, type, arg, commands);
3378 else if (ootb_cookie_ack)
3379 return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
3360 else 3380 else
3361 return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands); 3381 return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
3362} 3382}
@@ -3855,7 +3875,7 @@ gen_shutdown:
3855} 3875}
3856 3876
3857/* 3877/*
3858 * SCTP-AUTH Section 6.3 Receving authenticated chukns 3878 * SCTP-AUTH Section 6.3 Receiving authenticated chukns
3859 * 3879 *
3860 * The receiver MUST use the HMAC algorithm indicated in the HMAC 3880 * The receiver MUST use the HMAC algorithm indicated in the HMAC
3861 * Identifier field. If this algorithm was not specified by the 3881 * Identifier field. If this algorithm was not specified by the
@@ -4231,7 +4251,7 @@ static sctp_disposition_t sctp_sf_abort_violation(
4231 * 4251 *
4232 * This means that if we only want to abort associations 4252 * This means that if we only want to abort associations
4233 * in an authenticated way (i.e AUTH+ABORT), then we 4253 * in an authenticated way (i.e AUTH+ABORT), then we
4234 * can't destroy this association just becuase the packet 4254 * can't destroy this association just because the packet
4235 * was malformed. 4255 * was malformed.
4236 */ 4256 */
4237 if (sctp_auth_recv_cid(SCTP_CID_ABORT, asoc)) 4257 if (sctp_auth_recv_cid(SCTP_CID_ABORT, asoc))
@@ -4343,8 +4363,9 @@ static sctp_disposition_t sctp_sf_violation_chunklen(
4343 4363
4344/* 4364/*
4345 * Handle a protocol violation when the parameter length is invalid. 4365 * Handle a protocol violation when the parameter length is invalid.
4346 * "Invalid" length is identified as smaller than the minimal length a 4366 * If the length is smaller than the minimum length of a given parameter,
4347 * given parameter can be. 4367 * or accumulated length in multi parameters exceeds the end of the chunk,
4368 * the length is considered as invalid.
4348 */ 4369 */
4349static sctp_disposition_t sctp_sf_violation_paramlen( 4370static sctp_disposition_t sctp_sf_violation_paramlen(
4350 const struct sctp_endpoint *ep, 4371 const struct sctp_endpoint *ep,
@@ -4402,9 +4423,9 @@ static sctp_disposition_t sctp_sf_violation_ctsn(
4402} 4423}
4403 4424
4404/* Handle protocol violation of an invalid chunk bundling. For example, 4425/* Handle protocol violation of an invalid chunk bundling. For example,
4405 * when we have an association and we recieve bundled INIT-ACK, or 4426 * when we have an association and we receive bundled INIT-ACK, or
4406 * SHUDOWN-COMPLETE, our peer is clearly violationg the "MUST NOT bundle" 4427 * SHUDOWN-COMPLETE, our peer is clearly violationg the "MUST NOT bundle"
4407 * statement from the specs. Additinally, there might be an attacker 4428 * statement from the specs. Additionally, there might be an attacker
4408 * on the path and we may not want to continue this communication. 4429 * on the path and we may not want to continue this communication.
4409 */ 4430 */
4410static sctp_disposition_t sctp_sf_violation_chunk( 4431static sctp_disposition_t sctp_sf_violation_chunk(
@@ -5056,6 +5077,30 @@ sctp_disposition_t sctp_sf_ignore_primitive(
5056 ***************************************************************************/ 5077 ***************************************************************************/
5057 5078
5058/* 5079/*
5080 * When the SCTP stack has no more user data to send or retransmit, this
5081 * notification is given to the user. Also, at the time when a user app
5082 * subscribes to this event, if there is no data to be sent or
5083 * retransmit, the stack will immediately send up this notification.
5084 */
5085sctp_disposition_t sctp_sf_do_no_pending_tsn(
5086 const struct sctp_endpoint *ep,
5087 const struct sctp_association *asoc,
5088 const sctp_subtype_t type,
5089 void *arg,
5090 sctp_cmd_seq_t *commands)
5091{
5092 struct sctp_ulpevent *event;
5093
5094 event = sctp_ulpevent_make_sender_dry_event(asoc, GFP_ATOMIC);
5095 if (!event)
5096 return SCTP_DISPOSITION_NOMEM;
5097
5098 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(event));
5099
5100 return SCTP_DISPOSITION_CONSUME;
5101}
5102
5103/*
5059 * Start the shutdown negotiation. 5104 * Start the shutdown negotiation.
5060 * 5105 *
5061 * From Section 9.2: 5106 * From Section 9.2:
diff --git a/net/sctp/sm_statetable.c b/net/sctp/sm_statetable.c
index 546d4387fb3c..0338dc6fdc9d 100644
--- a/net/sctp/sm_statetable.c
+++ b/net/sctp/sm_statetable.c
@@ -107,8 +107,6 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
107#define TYPE_SCTP_FUNC(func) {.fn = func, .name = #func} 107#define TYPE_SCTP_FUNC(func) {.fn = func, .name = #func}
108 108
109#define TYPE_SCTP_DATA { \ 109#define TYPE_SCTP_DATA { \
110 /* SCTP_STATE_EMPTY */ \
111 TYPE_SCTP_FUNC(sctp_sf_ootb), \
112 /* SCTP_STATE_CLOSED */ \ 110 /* SCTP_STATE_CLOSED */ \
113 TYPE_SCTP_FUNC(sctp_sf_ootb), \ 111 TYPE_SCTP_FUNC(sctp_sf_ootb), \
114 /* SCTP_STATE_COOKIE_WAIT */ \ 112 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -128,8 +126,6 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
128} /* TYPE_SCTP_DATA */ 126} /* TYPE_SCTP_DATA */
129 127
130#define TYPE_SCTP_INIT { \ 128#define TYPE_SCTP_INIT { \
131 /* SCTP_STATE_EMPTY */ \
132 TYPE_SCTP_FUNC(sctp_sf_bug), \
133 /* SCTP_STATE_CLOSED */ \ 129 /* SCTP_STATE_CLOSED */ \
134 TYPE_SCTP_FUNC(sctp_sf_do_5_1B_init), \ 130 TYPE_SCTP_FUNC(sctp_sf_do_5_1B_init), \
135 /* SCTP_STATE_COOKIE_WAIT */ \ 131 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -149,8 +145,6 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
149} /* TYPE_SCTP_INIT */ 145} /* TYPE_SCTP_INIT */
150 146
151#define TYPE_SCTP_INIT_ACK { \ 147#define TYPE_SCTP_INIT_ACK { \
152 /* SCTP_STATE_EMPTY */ \
153 TYPE_SCTP_FUNC(sctp_sf_ootb), \
154 /* SCTP_STATE_CLOSED */ \ 148 /* SCTP_STATE_CLOSED */ \
155 TYPE_SCTP_FUNC(sctp_sf_do_5_2_3_initack), \ 149 TYPE_SCTP_FUNC(sctp_sf_do_5_2_3_initack), \
156 /* SCTP_STATE_COOKIE_WAIT */ \ 150 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -170,8 +164,6 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
170} /* TYPE_SCTP_INIT_ACK */ 164} /* TYPE_SCTP_INIT_ACK */
171 165
172#define TYPE_SCTP_SACK { \ 166#define TYPE_SCTP_SACK { \
173 /* SCTP_STATE_EMPTY */ \
174 TYPE_SCTP_FUNC(sctp_sf_ootb), \
175 /* SCTP_STATE_CLOSED */ \ 167 /* SCTP_STATE_CLOSED */ \
176 TYPE_SCTP_FUNC(sctp_sf_ootb), \ 168 TYPE_SCTP_FUNC(sctp_sf_ootb), \
177 /* SCTP_STATE_COOKIE_WAIT */ \ 169 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -191,8 +183,6 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
191} /* TYPE_SCTP_SACK */ 183} /* TYPE_SCTP_SACK */
192 184
193#define TYPE_SCTP_HEARTBEAT { \ 185#define TYPE_SCTP_HEARTBEAT { \
194 /* SCTP_STATE_EMPTY */ \
195 TYPE_SCTP_FUNC(sctp_sf_ootb), \
196 /* SCTP_STATE_CLOSED */ \ 186 /* SCTP_STATE_CLOSED */ \
197 TYPE_SCTP_FUNC(sctp_sf_ootb), \ 187 TYPE_SCTP_FUNC(sctp_sf_ootb), \
198 /* SCTP_STATE_COOKIE_WAIT */ \ 188 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -213,8 +203,6 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
213} /* TYPE_SCTP_HEARTBEAT */ 203} /* TYPE_SCTP_HEARTBEAT */
214 204
215#define TYPE_SCTP_HEARTBEAT_ACK { \ 205#define TYPE_SCTP_HEARTBEAT_ACK { \
216 /* SCTP_STATE_EMPTY */ \
217 TYPE_SCTP_FUNC(sctp_sf_ootb), \
218 /* SCTP_STATE_CLOSED */ \ 206 /* SCTP_STATE_CLOSED */ \
219 TYPE_SCTP_FUNC(sctp_sf_ootb), \ 207 TYPE_SCTP_FUNC(sctp_sf_ootb), \
220 /* SCTP_STATE_COOKIE_WAIT */ \ 208 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -234,8 +222,6 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
234} /* TYPE_SCTP_HEARTBEAT_ACK */ 222} /* TYPE_SCTP_HEARTBEAT_ACK */
235 223
236#define TYPE_SCTP_ABORT { \ 224#define TYPE_SCTP_ABORT { \
237 /* SCTP_STATE_EMPTY */ \
238 TYPE_SCTP_FUNC(sctp_sf_ootb), \
239 /* SCTP_STATE_CLOSED */ \ 225 /* SCTP_STATE_CLOSED */ \
240 TYPE_SCTP_FUNC(sctp_sf_pdiscard), \ 226 TYPE_SCTP_FUNC(sctp_sf_pdiscard), \
241 /* SCTP_STATE_COOKIE_WAIT */ \ 227 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -255,8 +241,6 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
255} /* TYPE_SCTP_ABORT */ 241} /* TYPE_SCTP_ABORT */
256 242
257#define TYPE_SCTP_SHUTDOWN { \ 243#define TYPE_SCTP_SHUTDOWN { \
258 /* SCTP_STATE_EMPTY */ \
259 TYPE_SCTP_FUNC(sctp_sf_ootb), \
260 /* SCTP_STATE_CLOSED */ \ 244 /* SCTP_STATE_CLOSED */ \
261 TYPE_SCTP_FUNC(sctp_sf_ootb), \ 245 TYPE_SCTP_FUNC(sctp_sf_ootb), \
262 /* SCTP_STATE_COOKIE_WAIT */ \ 246 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -276,8 +260,6 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
276} /* TYPE_SCTP_SHUTDOWN */ 260} /* TYPE_SCTP_SHUTDOWN */
277 261
278#define TYPE_SCTP_SHUTDOWN_ACK { \ 262#define TYPE_SCTP_SHUTDOWN_ACK { \
279 /* SCTP_STATE_EMPTY */ \
280 TYPE_SCTP_FUNC(sctp_sf_ootb), \
281 /* SCTP_STATE_CLOSED */ \ 263 /* SCTP_STATE_CLOSED */ \
282 TYPE_SCTP_FUNC(sctp_sf_ootb), \ 264 TYPE_SCTP_FUNC(sctp_sf_ootb), \
283 /* SCTP_STATE_COOKIE_WAIT */ \ 265 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -297,8 +279,6 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
297} /* TYPE_SCTP_SHUTDOWN_ACK */ 279} /* TYPE_SCTP_SHUTDOWN_ACK */
298 280
299#define TYPE_SCTP_ERROR { \ 281#define TYPE_SCTP_ERROR { \
300 /* SCTP_STATE_EMPTY */ \
301 TYPE_SCTP_FUNC(sctp_sf_ootb), \
302 /* SCTP_STATE_CLOSED */ \ 282 /* SCTP_STATE_CLOSED */ \
303 TYPE_SCTP_FUNC(sctp_sf_ootb), \ 283 TYPE_SCTP_FUNC(sctp_sf_ootb), \
304 /* SCTP_STATE_COOKIE_WAIT */ \ 284 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -318,8 +298,6 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
318} /* TYPE_SCTP_ERROR */ 298} /* TYPE_SCTP_ERROR */
319 299
320#define TYPE_SCTP_COOKIE_ECHO { \ 300#define TYPE_SCTP_COOKIE_ECHO { \
321 /* SCTP_STATE_EMPTY */ \
322 TYPE_SCTP_FUNC(sctp_sf_bug), \
323 /* SCTP_STATE_CLOSED */ \ 301 /* SCTP_STATE_CLOSED */ \
324 TYPE_SCTP_FUNC(sctp_sf_do_5_1D_ce), \ 302 TYPE_SCTP_FUNC(sctp_sf_do_5_1D_ce), \
325 /* SCTP_STATE_COOKIE_WAIT */ \ 303 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -339,8 +317,6 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
339} /* TYPE_SCTP_COOKIE_ECHO */ 317} /* TYPE_SCTP_COOKIE_ECHO */
340 318
341#define TYPE_SCTP_COOKIE_ACK { \ 319#define TYPE_SCTP_COOKIE_ACK { \
342 /* SCTP_STATE_EMPTY */ \
343 TYPE_SCTP_FUNC(sctp_sf_ootb), \
344 /* SCTP_STATE_CLOSED */ \ 320 /* SCTP_STATE_CLOSED */ \
345 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ 321 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
346 /* SCTP_STATE_COOKIE_WAIT */ \ 322 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -360,8 +336,6 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
360} /* TYPE_SCTP_COOKIE_ACK */ 336} /* TYPE_SCTP_COOKIE_ACK */
361 337
362#define TYPE_SCTP_ECN_ECNE { \ 338#define TYPE_SCTP_ECN_ECNE { \
363 /* SCTP_STATE_EMPTY */ \
364 TYPE_SCTP_FUNC(sctp_sf_ootb), \
365 /* SCTP_STATE_CLOSED */ \ 339 /* SCTP_STATE_CLOSED */ \
366 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ 340 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
367 /* SCTP_STATE_COOKIE_WAIT */ \ 341 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -381,8 +355,6 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
381} /* TYPE_SCTP_ECN_ECNE */ 355} /* TYPE_SCTP_ECN_ECNE */
382 356
383#define TYPE_SCTP_ECN_CWR { \ 357#define TYPE_SCTP_ECN_CWR { \
384 /* SCTP_STATE_EMPTY */ \
385 TYPE_SCTP_FUNC(sctp_sf_ootb), \
386 /* SCTP_STATE_CLOSED */ \ 358 /* SCTP_STATE_CLOSED */ \
387 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ 359 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
388 /* SCTP_STATE_COOKIE_WAIT */ \ 360 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -402,8 +374,6 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type,
402} /* TYPE_SCTP_ECN_CWR */ 374} /* TYPE_SCTP_ECN_CWR */
403 375
404#define TYPE_SCTP_SHUTDOWN_COMPLETE { \ 376#define TYPE_SCTP_SHUTDOWN_COMPLETE { \
405 /* SCTP_STATE_EMPTY */ \
406 TYPE_SCTP_FUNC(sctp_sf_ootb), \
407 /* SCTP_STATE_CLOSED */ \ 377 /* SCTP_STATE_CLOSED */ \
408 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ 378 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
409 /* SCTP_STATE_COOKIE_WAIT */ \ 379 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -446,8 +416,6 @@ static const sctp_sm_table_entry_t chunk_event_table[SCTP_NUM_BASE_CHUNK_TYPES][
446}; /* state_fn_t chunk_event_table[][] */ 416}; /* state_fn_t chunk_event_table[][] */
447 417
448#define TYPE_SCTP_ASCONF { \ 418#define TYPE_SCTP_ASCONF { \
449 /* SCTP_STATE_EMPTY */ \
450 TYPE_SCTP_FUNC(sctp_sf_ootb), \
451 /* SCTP_STATE_CLOSED */ \ 419 /* SCTP_STATE_CLOSED */ \
452 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ 420 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
453 /* SCTP_STATE_COOKIE_WAIT */ \ 421 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -467,8 +435,6 @@ static const sctp_sm_table_entry_t chunk_event_table[SCTP_NUM_BASE_CHUNK_TYPES][
467} /* TYPE_SCTP_ASCONF */ 435} /* TYPE_SCTP_ASCONF */
468 436
469#define TYPE_SCTP_ASCONF_ACK { \ 437#define TYPE_SCTP_ASCONF_ACK { \
470 /* SCTP_STATE_EMPTY */ \
471 TYPE_SCTP_FUNC(sctp_sf_ootb), \
472 /* SCTP_STATE_CLOSED */ \ 438 /* SCTP_STATE_CLOSED */ \
473 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \ 439 TYPE_SCTP_FUNC(sctp_sf_discard_chunk), \
474 /* SCTP_STATE_COOKIE_WAIT */ \ 440 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -496,8 +462,6 @@ static const sctp_sm_table_entry_t addip_chunk_event_table[SCTP_NUM_ADDIP_CHUNK_
496}; /*state_fn_t addip_chunk_event_table[][] */ 462}; /*state_fn_t addip_chunk_event_table[][] */
497 463
498#define TYPE_SCTP_FWD_TSN { \ 464#define TYPE_SCTP_FWD_TSN { \
499 /* SCTP_STATE_EMPTY */ \
500 TYPE_SCTP_FUNC(sctp_sf_ootb), \
501 /* SCTP_STATE_CLOSED */ \ 465 /* SCTP_STATE_CLOSED */ \
502 TYPE_SCTP_FUNC(sctp_sf_ootb), \ 466 TYPE_SCTP_FUNC(sctp_sf_ootb), \
503 /* SCTP_STATE_COOKIE_WAIT */ \ 467 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -524,8 +488,6 @@ static const sctp_sm_table_entry_t prsctp_chunk_event_table[SCTP_NUM_PRSCTP_CHUN
524}; /*state_fn_t prsctp_chunk_event_table[][] */ 488}; /*state_fn_t prsctp_chunk_event_table[][] */
525 489
526#define TYPE_SCTP_AUTH { \ 490#define TYPE_SCTP_AUTH { \
527 /* SCTP_STATE_EMPTY */ \
528 TYPE_SCTP_FUNC(sctp_sf_ootb), \
529 /* SCTP_STATE_CLOSED */ \ 491 /* SCTP_STATE_CLOSED */ \
530 TYPE_SCTP_FUNC(sctp_sf_ootb), \ 492 TYPE_SCTP_FUNC(sctp_sf_ootb), \
531 /* SCTP_STATE_COOKIE_WAIT */ \ 493 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -553,8 +515,6 @@ static const sctp_sm_table_entry_t auth_chunk_event_table[SCTP_NUM_AUTH_CHUNK_TY
553 515
554static const sctp_sm_table_entry_t 516static const sctp_sm_table_entry_t
555chunk_event_table_unknown[SCTP_STATE_NUM_STATES] = { 517chunk_event_table_unknown[SCTP_STATE_NUM_STATES] = {
556 /* SCTP_STATE_EMPTY */
557 TYPE_SCTP_FUNC(sctp_sf_ootb),
558 /* SCTP_STATE_CLOSED */ 518 /* SCTP_STATE_CLOSED */
559 TYPE_SCTP_FUNC(sctp_sf_ootb), 519 TYPE_SCTP_FUNC(sctp_sf_ootb),
560 /* SCTP_STATE_COOKIE_WAIT */ 520 /* SCTP_STATE_COOKIE_WAIT */
@@ -575,8 +535,6 @@ chunk_event_table_unknown[SCTP_STATE_NUM_STATES] = {
575 535
576 536
577#define TYPE_SCTP_PRIMITIVE_ASSOCIATE { \ 537#define TYPE_SCTP_PRIMITIVE_ASSOCIATE { \
578 /* SCTP_STATE_EMPTY */ \
579 TYPE_SCTP_FUNC(sctp_sf_bug), \
580 /* SCTP_STATE_CLOSED */ \ 538 /* SCTP_STATE_CLOSED */ \
581 TYPE_SCTP_FUNC(sctp_sf_do_prm_asoc), \ 539 TYPE_SCTP_FUNC(sctp_sf_do_prm_asoc), \
582 /* SCTP_STATE_COOKIE_WAIT */ \ 540 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -596,8 +554,6 @@ chunk_event_table_unknown[SCTP_STATE_NUM_STATES] = {
596} /* TYPE_SCTP_PRIMITIVE_ASSOCIATE */ 554} /* TYPE_SCTP_PRIMITIVE_ASSOCIATE */
597 555
598#define TYPE_SCTP_PRIMITIVE_SHUTDOWN { \ 556#define TYPE_SCTP_PRIMITIVE_SHUTDOWN { \
599 /* SCTP_STATE_EMPTY */ \
600 TYPE_SCTP_FUNC(sctp_sf_bug), \
601 /* SCTP_STATE_CLOSED */ \ 557 /* SCTP_STATE_CLOSED */ \
602 TYPE_SCTP_FUNC(sctp_sf_error_closed), \ 558 TYPE_SCTP_FUNC(sctp_sf_error_closed), \
603 /* SCTP_STATE_COOKIE_WAIT */ \ 559 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -617,8 +573,6 @@ chunk_event_table_unknown[SCTP_STATE_NUM_STATES] = {
617} /* TYPE_SCTP_PRIMITIVE_SHUTDOWN */ 573} /* TYPE_SCTP_PRIMITIVE_SHUTDOWN */
618 574
619#define TYPE_SCTP_PRIMITIVE_ABORT { \ 575#define TYPE_SCTP_PRIMITIVE_ABORT { \
620 /* SCTP_STATE_EMPTY */ \
621 TYPE_SCTP_FUNC(sctp_sf_bug), \
622 /* SCTP_STATE_CLOSED */ \ 576 /* SCTP_STATE_CLOSED */ \
623 TYPE_SCTP_FUNC(sctp_sf_error_closed), \ 577 TYPE_SCTP_FUNC(sctp_sf_error_closed), \
624 /* SCTP_STATE_COOKIE_WAIT */ \ 578 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -638,8 +592,6 @@ chunk_event_table_unknown[SCTP_STATE_NUM_STATES] = {
638} /* TYPE_SCTP_PRIMITIVE_ABORT */ 592} /* TYPE_SCTP_PRIMITIVE_ABORT */
639 593
640#define TYPE_SCTP_PRIMITIVE_SEND { \ 594#define TYPE_SCTP_PRIMITIVE_SEND { \
641 /* SCTP_STATE_EMPTY */ \
642 TYPE_SCTP_FUNC(sctp_sf_bug), \
643 /* SCTP_STATE_CLOSED */ \ 595 /* SCTP_STATE_CLOSED */ \
644 TYPE_SCTP_FUNC(sctp_sf_error_closed), \ 596 TYPE_SCTP_FUNC(sctp_sf_error_closed), \
645 /* SCTP_STATE_COOKIE_WAIT */ \ 597 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -659,8 +611,6 @@ chunk_event_table_unknown[SCTP_STATE_NUM_STATES] = {
659} /* TYPE_SCTP_PRIMITIVE_SEND */ 611} /* TYPE_SCTP_PRIMITIVE_SEND */
660 612
661#define TYPE_SCTP_PRIMITIVE_REQUESTHEARTBEAT { \ 613#define TYPE_SCTP_PRIMITIVE_REQUESTHEARTBEAT { \
662 /* SCTP_STATE_EMPTY */ \
663 TYPE_SCTP_FUNC(sctp_sf_bug), \
664 /* SCTP_STATE_CLOSED */ \ 614 /* SCTP_STATE_CLOSED */ \
665 TYPE_SCTP_FUNC(sctp_sf_error_closed), \ 615 TYPE_SCTP_FUNC(sctp_sf_error_closed), \
666 /* SCTP_STATE_COOKIE_WAIT */ \ 616 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -680,8 +630,6 @@ chunk_event_table_unknown[SCTP_STATE_NUM_STATES] = {
680} /* TYPE_SCTP_PRIMITIVE_REQUESTHEARTBEAT */ 630} /* TYPE_SCTP_PRIMITIVE_REQUESTHEARTBEAT */
681 631
682#define TYPE_SCTP_PRIMITIVE_ASCONF { \ 632#define TYPE_SCTP_PRIMITIVE_ASCONF { \
683 /* SCTP_STATE_EMPTY */ \
684 TYPE_SCTP_FUNC(sctp_sf_bug), \
685 /* SCTP_STATE_CLOSED */ \ 633 /* SCTP_STATE_CLOSED */ \
686 TYPE_SCTP_FUNC(sctp_sf_error_closed), \ 634 TYPE_SCTP_FUNC(sctp_sf_error_closed), \
687 /* SCTP_STATE_COOKIE_WAIT */ \ 635 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -713,8 +661,6 @@ static const sctp_sm_table_entry_t primitive_event_table[SCTP_NUM_PRIMITIVE_TYPE
713}; 661};
714 662
715#define TYPE_SCTP_OTHER_NO_PENDING_TSN { \ 663#define TYPE_SCTP_OTHER_NO_PENDING_TSN { \
716 /* SCTP_STATE_EMPTY */ \
717 TYPE_SCTP_FUNC(sctp_sf_bug), \
718 /* SCTP_STATE_CLOSED */ \ 664 /* SCTP_STATE_CLOSED */ \
719 TYPE_SCTP_FUNC(sctp_sf_ignore_other), \ 665 TYPE_SCTP_FUNC(sctp_sf_ignore_other), \
720 /* SCTP_STATE_COOKIE_WAIT */ \ 666 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -722,7 +668,7 @@ static const sctp_sm_table_entry_t primitive_event_table[SCTP_NUM_PRIMITIVE_TYPE
722 /* SCTP_STATE_COOKIE_ECHOED */ \ 668 /* SCTP_STATE_COOKIE_ECHOED */ \
723 TYPE_SCTP_FUNC(sctp_sf_ignore_other), \ 669 TYPE_SCTP_FUNC(sctp_sf_ignore_other), \
724 /* SCTP_STATE_ESTABLISHED */ \ 670 /* SCTP_STATE_ESTABLISHED */ \
725 TYPE_SCTP_FUNC(sctp_sf_ignore_other), \ 671 TYPE_SCTP_FUNC(sctp_sf_do_no_pending_tsn), \
726 /* SCTP_STATE_SHUTDOWN_PENDING */ \ 672 /* SCTP_STATE_SHUTDOWN_PENDING */ \
727 TYPE_SCTP_FUNC(sctp_sf_do_9_2_start_shutdown), \ 673 TYPE_SCTP_FUNC(sctp_sf_do_9_2_start_shutdown), \
728 /* SCTP_STATE_SHUTDOWN_SENT */ \ 674 /* SCTP_STATE_SHUTDOWN_SENT */ \
@@ -734,8 +680,6 @@ static const sctp_sm_table_entry_t primitive_event_table[SCTP_NUM_PRIMITIVE_TYPE
734} 680}
735 681
736#define TYPE_SCTP_OTHER_ICMP_PROTO_UNREACH { \ 682#define TYPE_SCTP_OTHER_ICMP_PROTO_UNREACH { \
737 /* SCTP_STATE_EMPTY */ \
738 TYPE_SCTP_FUNC(sctp_sf_bug), \
739 /* SCTP_STATE_CLOSED */ \ 683 /* SCTP_STATE_CLOSED */ \
740 TYPE_SCTP_FUNC(sctp_sf_ignore_other), \ 684 TYPE_SCTP_FUNC(sctp_sf_ignore_other), \
741 /* SCTP_STATE_COOKIE_WAIT */ \ 685 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -760,8 +704,6 @@ static const sctp_sm_table_entry_t other_event_table[SCTP_NUM_OTHER_TYPES][SCTP_
760}; 704};
761 705
762#define TYPE_SCTP_EVENT_TIMEOUT_NONE { \ 706#define TYPE_SCTP_EVENT_TIMEOUT_NONE { \
763 /* SCTP_STATE_EMPTY */ \
764 TYPE_SCTP_FUNC(sctp_sf_bug), \
765 /* SCTP_STATE_CLOSED */ \ 707 /* SCTP_STATE_CLOSED */ \
766 TYPE_SCTP_FUNC(sctp_sf_bug), \ 708 TYPE_SCTP_FUNC(sctp_sf_bug), \
767 /* SCTP_STATE_COOKIE_WAIT */ \ 709 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -781,8 +723,6 @@ static const sctp_sm_table_entry_t other_event_table[SCTP_NUM_OTHER_TYPES][SCTP_
781} 723}
782 724
783#define TYPE_SCTP_EVENT_TIMEOUT_T1_COOKIE { \ 725#define TYPE_SCTP_EVENT_TIMEOUT_T1_COOKIE { \
784 /* SCTP_STATE_EMPTY */ \
785 TYPE_SCTP_FUNC(sctp_sf_bug), \
786 /* SCTP_STATE_CLOSED */ \ 726 /* SCTP_STATE_CLOSED */ \
787 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ 727 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
788 /* SCTP_STATE_COOKIE_WAIT */ \ 728 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -802,8 +742,6 @@ static const sctp_sm_table_entry_t other_event_table[SCTP_NUM_OTHER_TYPES][SCTP_
802} 742}
803 743
804#define TYPE_SCTP_EVENT_TIMEOUT_T1_INIT { \ 744#define TYPE_SCTP_EVENT_TIMEOUT_T1_INIT { \
805 /* SCTP_STATE_EMPTY */ \
806 TYPE_SCTP_FUNC(sctp_sf_bug), \
807 /* SCTP_STATE_CLOSED */ \ 745 /* SCTP_STATE_CLOSED */ \
808 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ 746 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
809 /* SCTP_STATE_COOKIE_WAIT */ \ 747 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -823,8 +761,6 @@ static const sctp_sm_table_entry_t other_event_table[SCTP_NUM_OTHER_TYPES][SCTP_
823} 761}
824 762
825#define TYPE_SCTP_EVENT_TIMEOUT_T2_SHUTDOWN { \ 763#define TYPE_SCTP_EVENT_TIMEOUT_T2_SHUTDOWN { \
826 /* SCTP_STATE_EMPTY */ \
827 TYPE_SCTP_FUNC(sctp_sf_bug), \
828 /* SCTP_STATE_CLOSED */ \ 764 /* SCTP_STATE_CLOSED */ \
829 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ 765 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
830 /* SCTP_STATE_COOKIE_WAIT */ \ 766 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -844,8 +780,6 @@ static const sctp_sm_table_entry_t other_event_table[SCTP_NUM_OTHER_TYPES][SCTP_
844} 780}
845 781
846#define TYPE_SCTP_EVENT_TIMEOUT_T3_RTX { \ 782#define TYPE_SCTP_EVENT_TIMEOUT_T3_RTX { \
847 /* SCTP_STATE_EMPTY */ \
848 TYPE_SCTP_FUNC(sctp_sf_bug), \
849 /* SCTP_STATE_CLOSED */ \ 783 /* SCTP_STATE_CLOSED */ \
850 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ 784 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
851 /* SCTP_STATE_COOKIE_WAIT */ \ 785 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -865,8 +799,6 @@ static const sctp_sm_table_entry_t other_event_table[SCTP_NUM_OTHER_TYPES][SCTP_
865} 799}
866 800
867#define TYPE_SCTP_EVENT_TIMEOUT_T4_RTO { \ 801#define TYPE_SCTP_EVENT_TIMEOUT_T4_RTO { \
868 /* SCTP_STATE_EMPTY */ \
869 TYPE_SCTP_FUNC(sctp_sf_bug), \
870 /* SCTP_STATE_CLOSED */ \ 802 /* SCTP_STATE_CLOSED */ \
871 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ 803 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
872 /* SCTP_STATE_COOKIE_WAIT */ \ 804 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -886,8 +818,6 @@ static const sctp_sm_table_entry_t other_event_table[SCTP_NUM_OTHER_TYPES][SCTP_
886} 818}
887 819
888#define TYPE_SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD { \ 820#define TYPE_SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD { \
889 /* SCTP_STATE_EMPTY */ \
890 TYPE_SCTP_FUNC(sctp_sf_bug), \
891 /* SCTP_STATE_CLOSED */ \ 821 /* SCTP_STATE_CLOSED */ \
892 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ 822 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
893 /* SCTP_STATE_COOKIE_WAIT */ \ 823 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -907,8 +837,6 @@ static const sctp_sm_table_entry_t other_event_table[SCTP_NUM_OTHER_TYPES][SCTP_
907} 837}
908 838
909#define TYPE_SCTP_EVENT_TIMEOUT_HEARTBEAT { \ 839#define TYPE_SCTP_EVENT_TIMEOUT_HEARTBEAT { \
910 /* SCTP_STATE_EMPTY */ \
911 TYPE_SCTP_FUNC(sctp_sf_bug), \
912 /* SCTP_STATE_CLOSED */ \ 840 /* SCTP_STATE_CLOSED */ \
913 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ 841 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
914 /* SCTP_STATE_COOKIE_WAIT */ \ 842 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -928,8 +856,6 @@ static const sctp_sm_table_entry_t other_event_table[SCTP_NUM_OTHER_TYPES][SCTP_
928} 856}
929 857
930#define TYPE_SCTP_EVENT_TIMEOUT_SACK { \ 858#define TYPE_SCTP_EVENT_TIMEOUT_SACK { \
931 /* SCTP_STATE_EMPTY */ \
932 TYPE_SCTP_FUNC(sctp_sf_bug), \
933 /* SCTP_STATE_CLOSED */ \ 859 /* SCTP_STATE_CLOSED */ \
934 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ 860 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
935 /* SCTP_STATE_COOKIE_WAIT */ \ 861 /* SCTP_STATE_COOKIE_WAIT */ \
@@ -949,8 +875,6 @@ static const sctp_sm_table_entry_t other_event_table[SCTP_NUM_OTHER_TYPES][SCTP_
949} 875}
950 876
951#define TYPE_SCTP_EVENT_TIMEOUT_AUTOCLOSE { \ 877#define TYPE_SCTP_EVENT_TIMEOUT_AUTOCLOSE { \
952 /* SCTP_STATE_EMPTY */ \
953 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
954 /* SCTP_STATE_CLOSED */ \ 878 /* SCTP_STATE_CLOSED */ \
955 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \ 879 TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
956 /* SCTP_STATE_COOKIE_WAIT */ \ 880 /* SCTP_STATE_COOKIE_WAIT */ \
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 3951a10605bc..f694ee116746 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -658,11 +658,15 @@ static int sctp_bindx_rem(struct sock *sk, struct sockaddr *addrs, int addrcnt)
658 goto err_bindx_rem; 658 goto err_bindx_rem;
659 } 659 }
660 660
661 if (sa_addr->v4.sin_port != htons(bp->port)) { 661 if (sa_addr->v4.sin_port &&
662 sa_addr->v4.sin_port != htons(bp->port)) {
662 retval = -EINVAL; 663 retval = -EINVAL;
663 goto err_bindx_rem; 664 goto err_bindx_rem;
664 } 665 }
665 666
667 if (!sa_addr->v4.sin_port)
668 sa_addr->v4.sin_port = htons(bp->port);
669
666 /* FIXME - There is probably a need to check if sk->sk_saddr and 670 /* FIXME - There is probably a need to check if sk->sk_saddr and
667 * sk->sk_rcv_addr are currently set to one of the addresses to 671 * sk->sk_rcv_addr are currently set to one of the addresses to
668 * be removed. This is something which needs to be looked into 672 * be removed. This is something which needs to be looked into
@@ -1193,7 +1197,7 @@ out_free:
1193 * an endpoint that is multi-homed. Much like sctp_bindx() this call 1197 * an endpoint that is multi-homed. Much like sctp_bindx() this call
1194 * allows a caller to specify multiple addresses at which a peer can be 1198 * allows a caller to specify multiple addresses at which a peer can be
1195 * reached. The way the SCTP stack uses the list of addresses to set up 1199 * reached. The way the SCTP stack uses the list of addresses to set up
1196 * the association is implementation dependant. This function only 1200 * the association is implementation dependent. This function only
1197 * specifies that the stack will try to make use of all the addresses in 1201 * specifies that the stack will try to make use of all the addresses in
1198 * the list when needed. 1202 * the list when needed.
1199 * 1203 *
@@ -3215,14 +3219,9 @@ static int sctp_setsockopt_hmac_ident(struct sock *sk,
3215 if (optlen < sizeof(struct sctp_hmacalgo)) 3219 if (optlen < sizeof(struct sctp_hmacalgo))
3216 return -EINVAL; 3220 return -EINVAL;
3217 3221
3218 hmacs = kmalloc(optlen, GFP_KERNEL); 3222 hmacs= memdup_user(optval, optlen);
3219 if (!hmacs) 3223 if (IS_ERR(hmacs))
3220 return -ENOMEM; 3224 return PTR_ERR(hmacs);
3221
3222 if (copy_from_user(hmacs, optval, optlen)) {
3223 err = -EFAULT;
3224 goto out;
3225 }
3226 3225
3227 idents = hmacs->shmac_num_idents; 3226 idents = hmacs->shmac_num_idents;
3228 if (idents == 0 || idents > SCTP_AUTH_NUM_HMACS || 3227 if (idents == 0 || idents > SCTP_AUTH_NUM_HMACS ||
@@ -3257,14 +3256,9 @@ static int sctp_setsockopt_auth_key(struct sock *sk,
3257 if (optlen <= sizeof(struct sctp_authkey)) 3256 if (optlen <= sizeof(struct sctp_authkey))
3258 return -EINVAL; 3257 return -EINVAL;
3259 3258
3260 authkey = kmalloc(optlen, GFP_KERNEL); 3259 authkey= memdup_user(optval, optlen);
3261 if (!authkey) 3260 if (IS_ERR(authkey))
3262 return -ENOMEM; 3261 return PTR_ERR(authkey);
3263
3264 if (copy_from_user(authkey, optval, optlen)) {
3265 ret = -EFAULT;
3266 goto out;
3267 }
3268 3262
3269 if (authkey->sca_keylength > optlen - sizeof(struct sctp_authkey)) { 3263 if (authkey->sca_keylength > optlen - sizeof(struct sctp_authkey)) {
3270 ret = -EINVAL; 3264 ret = -EINVAL;
@@ -5283,6 +5277,55 @@ static int sctp_getsockopt_assoc_number(struct sock *sk, int len,
5283 return 0; 5277 return 0;
5284} 5278}
5285 5279
5280/*
5281 * 8.2.6. Get the Current Identifiers of Associations
5282 * (SCTP_GET_ASSOC_ID_LIST)
5283 *
5284 * This option gets the current list of SCTP association identifiers of
5285 * the SCTP associations handled by a one-to-many style socket.
5286 */
5287static int sctp_getsockopt_assoc_ids(struct sock *sk, int len,
5288 char __user *optval, int __user *optlen)
5289{
5290 struct sctp_sock *sp = sctp_sk(sk);
5291 struct sctp_association *asoc;
5292 struct sctp_assoc_ids *ids;
5293 u32 num = 0;
5294
5295 if (sctp_style(sk, TCP))
5296 return -EOPNOTSUPP;
5297
5298 if (len < sizeof(struct sctp_assoc_ids))
5299 return -EINVAL;
5300
5301 list_for_each_entry(asoc, &(sp->ep->asocs), asocs) {
5302 num++;
5303 }
5304
5305 if (len < sizeof(struct sctp_assoc_ids) + sizeof(sctp_assoc_t) * num)
5306 return -EINVAL;
5307
5308 len = sizeof(struct sctp_assoc_ids) + sizeof(sctp_assoc_t) * num;
5309
5310 ids = kmalloc(len, GFP_KERNEL);
5311 if (unlikely(!ids))
5312 return -ENOMEM;
5313
5314 ids->gaids_number_of_ids = num;
5315 num = 0;
5316 list_for_each_entry(asoc, &(sp->ep->asocs), asocs) {
5317 ids->gaids_assoc_id[num++] = asoc->assoc_id;
5318 }
5319
5320 if (put_user(len, optlen) || copy_to_user(optval, ids, len)) {
5321 kfree(ids);
5322 return -EFAULT;
5323 }
5324
5325 kfree(ids);
5326 return 0;
5327}
5328
5286SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname, 5329SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname,
5287 char __user *optval, int __user *optlen) 5330 char __user *optval, int __user *optlen)
5288{ 5331{
@@ -5415,6 +5458,9 @@ SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname,
5415 case SCTP_GET_ASSOC_NUMBER: 5458 case SCTP_GET_ASSOC_NUMBER:
5416 retval = sctp_getsockopt_assoc_number(sk, len, optval, optlen); 5459 retval = sctp_getsockopt_assoc_number(sk, len, optval, optlen);
5417 break; 5460 break;
5461 case SCTP_GET_ASSOC_ID_LIST:
5462 retval = sctp_getsockopt_assoc_ids(sk, len, optval, optlen);
5463 break;
5418 default: 5464 default:
5419 retval = -ENOPROTOOPT; 5465 retval = -ENOPROTOOPT;
5420 break; 5466 break;
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
index aa72e89c3ee1..c962c6062aab 100644
--- a/net/sctp/ulpevent.c
+++ b/net/sctp/ulpevent.c
@@ -554,7 +554,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_send_failed(
554 memcpy(&ssf->ssf_info, &chunk->sinfo, sizeof(struct sctp_sndrcvinfo)); 554 memcpy(&ssf->ssf_info, &chunk->sinfo, sizeof(struct sctp_sndrcvinfo));
555 555
556 /* Per TSVWG discussion with Randy. Allow the application to 556 /* Per TSVWG discussion with Randy. Allow the application to
557 * ressemble a fragmented message. 557 * resemble a fragmented message.
558 */ 558 */
559 ssf->ssf_info.sinfo_flags = chunk->chunk_hdr->flags; 559 ssf->ssf_info.sinfo_flags = chunk->chunk_hdr->flags;
560 560
@@ -843,7 +843,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_authkey(
843 ak = (struct sctp_authkey_event *) 843 ak = (struct sctp_authkey_event *)
844 skb_put(skb, sizeof(struct sctp_authkey_event)); 844 skb_put(skb, sizeof(struct sctp_authkey_event));
845 845
846 ak->auth_type = SCTP_AUTHENTICATION_INDICATION; 846 ak->auth_type = SCTP_AUTHENTICATION_EVENT;
847 ak->auth_flags = 0; 847 ak->auth_flags = 0;
848 ak->auth_length = sizeof(struct sctp_authkey_event); 848 ak->auth_length = sizeof(struct sctp_authkey_event);
849 849
@@ -862,6 +862,34 @@ fail:
862 return NULL; 862 return NULL;
863} 863}
864 864
865/*
866 * Socket Extensions for SCTP
867 * 6.3.10. SCTP_SENDER_DRY_EVENT
868 */
869struct sctp_ulpevent *sctp_ulpevent_make_sender_dry_event(
870 const struct sctp_association *asoc, gfp_t gfp)
871{
872 struct sctp_ulpevent *event;
873 struct sctp_sender_dry_event *sdry;
874 struct sk_buff *skb;
875
876 event = sctp_ulpevent_new(sizeof(struct sctp_sender_dry_event),
877 MSG_NOTIFICATION, gfp);
878 if (!event)
879 return NULL;
880
881 skb = sctp_event2skb(event);
882 sdry = (struct sctp_sender_dry_event *)
883 skb_put(skb, sizeof(struct sctp_sender_dry_event));
884
885 sdry->sender_dry_type = SCTP_SENDER_DRY_EVENT;
886 sdry->sender_dry_flags = 0;
887 sdry->sender_dry_length = sizeof(struct sctp_sender_dry_event);
888 sctp_ulpevent_set_owner(event, asoc);
889 sdry->sender_dry_assoc_id = sctp_assoc2id(asoc);
890
891 return event;
892}
865 893
866/* Return the notification type, assuming this is a notification 894/* Return the notification type, assuming this is a notification
867 * event. 895 * event.
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index 17678189d054..f2d1de7f2ffb 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -240,7 +240,7 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
240 } else { 240 } else {
241 /* 241 /*
242 * If fragment interleave is enabled, we 242 * If fragment interleave is enabled, we
243 * can queue this to the recieve queue instead 243 * can queue this to the receive queue instead
244 * of the lobby. 244 * of the lobby.
245 */ 245 */
246 if (sctp_sk(sk)->frag_interleave) 246 if (sctp_sk(sk)->frag_interleave)
diff --git a/net/socket.c b/net/socket.c
index 5212447c86e7..d25f5a9d6fa2 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -2643,13 +2643,13 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
2643 return -EFAULT; 2643 return -EFAULT;
2644 2644
2645 if (convert_in) { 2645 if (convert_in) {
2646 /* We expect there to be holes between fs.m_u and 2646 /* We expect there to be holes between fs.m_ext and
2647 * fs.ring_cookie and at the end of fs, but nowhere else. 2647 * fs.ring_cookie and at the end of fs, but nowhere else.
2648 */ 2648 */
2649 BUILD_BUG_ON(offsetof(struct compat_ethtool_rxnfc, fs.m_u) + 2649 BUILD_BUG_ON(offsetof(struct compat_ethtool_rxnfc, fs.m_ext) +
2650 sizeof(compat_rxnfc->fs.m_u) != 2650 sizeof(compat_rxnfc->fs.m_ext) !=
2651 offsetof(struct ethtool_rxnfc, fs.m_u) + 2651 offsetof(struct ethtool_rxnfc, fs.m_ext) +
2652 sizeof(rxnfc->fs.m_u)); 2652 sizeof(rxnfc->fs.m_ext));
2653 BUILD_BUG_ON( 2653 BUILD_BUG_ON(
2654 offsetof(struct compat_ethtool_rxnfc, fs.location) - 2654 offsetof(struct compat_ethtool_rxnfc, fs.location) -
2655 offsetof(struct compat_ethtool_rxnfc, fs.ring_cookie) != 2655 offsetof(struct compat_ethtool_rxnfc, fs.ring_cookie) !=
@@ -2657,7 +2657,7 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
2657 offsetof(struct ethtool_rxnfc, fs.ring_cookie)); 2657 offsetof(struct ethtool_rxnfc, fs.ring_cookie));
2658 2658
2659 if (copy_in_user(rxnfc, compat_rxnfc, 2659 if (copy_in_user(rxnfc, compat_rxnfc,
2660 (void *)(&rxnfc->fs.m_u + 1) - 2660 (void *)(&rxnfc->fs.m_ext + 1) -
2661 (void *)rxnfc) || 2661 (void *)rxnfc) ||
2662 copy_in_user(&rxnfc->fs.ring_cookie, 2662 copy_in_user(&rxnfc->fs.ring_cookie,
2663 &compat_rxnfc->fs.ring_cookie, 2663 &compat_rxnfc->fs.ring_cookie,
@@ -2674,7 +2674,7 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
2674 2674
2675 if (convert_out) { 2675 if (convert_out) {
2676 if (copy_in_user(compat_rxnfc, rxnfc, 2676 if (copy_in_user(compat_rxnfc, rxnfc,
2677 (const void *)(&rxnfc->fs.m_u + 1) - 2677 (const void *)(&rxnfc->fs.m_ext + 1) -
2678 (const void *)rxnfc) || 2678 (const void *)rxnfc) ||
2679 copy_in_user(&compat_rxnfc->fs.ring_cookie, 2679 copy_in_user(&compat_rxnfc->fs.ring_cookie,
2680 &rxnfc->fs.ring_cookie, 2680 &rxnfc->fs.ring_cookie,
@@ -2986,7 +2986,7 @@ out:
2986 2986
2987/* Since old style bridge ioctl's endup using SIOCDEVPRIVATE 2987/* Since old style bridge ioctl's endup using SIOCDEVPRIVATE
2988 * for some operations; this forces use of the newer bridge-utils that 2988 * for some operations; this forces use of the newer bridge-utils that
2989 * use compatiable ioctls 2989 * use compatible ioctls
2990 */ 2990 */
2991static int old_bridge_ioctl(compat_ulong_t __user *argp) 2991static int old_bridge_ioctl(compat_ulong_t __user *argp)
2992{ 2992{
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c
index 9022f0a6503e..0a9a2ec2e469 100644
--- a/net/sunrpc/auth_gss/gss_krb5_mech.c
+++ b/net/sunrpc/auth_gss/gss_krb5_mech.c
@@ -427,7 +427,7 @@ static int
427context_derive_keys_rc4(struct krb5_ctx *ctx) 427context_derive_keys_rc4(struct krb5_ctx *ctx)
428{ 428{
429 struct crypto_hash *hmac; 429 struct crypto_hash *hmac;
430 static const char sigkeyconstant[] = "signaturekey"; 430 char sigkeyconstant[] = "signaturekey";
431 int slen = strlen(sigkeyconstant) + 1; /* include null terminator */ 431 int slen = strlen(sigkeyconstant) + 1; /* include null terminator */
432 struct hash_desc desc; 432 struct hash_desc desc;
433 struct scatterlist sg[1]; 433 struct scatterlist sg[1];
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index bcdae78fdfc6..8d0f7d3c71c8 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -1101,7 +1101,7 @@ svcauth_gss_accept(struct svc_rqst *rqstp, __be32 *authp)
1101 1101
1102 /* credential is: 1102 /* credential is:
1103 * version(==1), proc(0,1,2,3), seq, service (1,2,3), handle 1103 * version(==1), proc(0,1,2,3), seq, service (1,2,3), handle
1104 * at least 5 u32s, and is preceeded by length, so that makes 6. 1104 * at least 5 u32s, and is preceded by length, so that makes 6.
1105 */ 1105 */
1106 1106
1107 if (argv->iov_len < 5 * 4) 1107 if (argv->iov_len < 5 * 4)
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 1e336a06d3e6..bf005d3c65ef 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -504,7 +504,7 @@ static int xs_nospace(struct rpc_task *task)
504 * EAGAIN: The socket was blocked, please call again later to 504 * EAGAIN: The socket was blocked, please call again later to
505 * complete the request 505 * complete the request
506 * ENOTCONN: Caller needs to invoke connect logic then call again 506 * ENOTCONN: Caller needs to invoke connect logic then call again
507 * other: Some other error occured, the request was not sent 507 * other: Some other error occurred, the request was not sent
508 */ 508 */
509static int xs_udp_send_request(struct rpc_task *task) 509static int xs_udp_send_request(struct rpc_task *task)
510{ 510{
@@ -590,7 +590,7 @@ static inline void xs_encode_tcp_record_marker(struct xdr_buf *buf)
590 * EAGAIN: The socket was blocked, please call again later to 590 * EAGAIN: The socket was blocked, please call again later to
591 * complete the request 591 * complete the request
592 * ENOTCONN: Caller needs to invoke connect logic then call again 592 * ENOTCONN: Caller needs to invoke connect logic then call again
593 * other: Some other error occured, the request was not sent 593 * other: Some other error occurred, the request was not sent
594 * 594 *
595 * XXX: In the case of soft timeouts, should we eventually give up 595 * XXX: In the case of soft timeouts, should we eventually give up
596 * if sendmsg is not able to make progress? 596 * if sendmsg is not able to make progress?
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 43639ff1cbec..ebf338f7b14e 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -2471,7 +2471,7 @@ exit:
2471 * A pending message being re-assembled must store certain values 2471 * A pending message being re-assembled must store certain values
2472 * to handle subsequent fragments correctly. The following functions 2472 * to handle subsequent fragments correctly. The following functions
2473 * help storing these values in unused, available fields in the 2473 * help storing these values in unused, available fields in the
2474 * pending message. This makes dynamic memory allocation unecessary. 2474 * pending message. This makes dynamic memory allocation unnecessary.
2475 */ 2475 */
2476 2476
2477static void set_long_msg_seqno(struct sk_buff *buf, u32 seqno) 2477static void set_long_msg_seqno(struct sk_buff *buf, u32 seqno)
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index c9fa6dfcf287..80025a1b3bfd 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -160,7 +160,7 @@ void tipc_named_withdraw(struct publication *publ)
160 160
161 buf = named_prepare_buf(WITHDRAWAL, ITEM_SIZE, 0); 161 buf = named_prepare_buf(WITHDRAWAL, ITEM_SIZE, 0);
162 if (!buf) { 162 if (!buf) {
163 warn("Withdrawl distribution failure\n"); 163 warn("Withdrawal distribution failure\n");
164 return; 164 return;
165 } 165 }
166 166
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 1663e1a2efdd..3a43a8304768 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -207,7 +207,7 @@ static int unix_mkname(struct sockaddr_un *sunaddr, int len, unsigned *hashp)
207 /* 207 /*
208 * This may look like an off by one error but it is a bit more 208 * This may look like an off by one error but it is a bit more
209 * subtle. 108 is the longest valid AF_UNIX path for a binding. 209 * subtle. 108 is the longest valid AF_UNIX path for a binding.
210 * sun_path[108] doesnt as such exist. However in kernel space 210 * sun_path[108] doesn't as such exist. However in kernel space
211 * we are guaranteed that it is a valid memory location in our 211 * we are guaranteed that it is a valid memory location in our
212 * kernel address buffer. 212 * kernel address buffer.
213 */ 213 */
diff --git a/net/wanrouter/wanproc.c b/net/wanrouter/wanproc.c
index 11f25c7a7a05..f346395314ba 100644
--- a/net/wanrouter/wanproc.c
+++ b/net/wanrouter/wanproc.c
@@ -51,7 +51,7 @@
51 51
52/* 52/*
53 * Structures for interfacing with the /proc filesystem. 53 * Structures for interfacing with the /proc filesystem.
54 * Router creates its own directory /proc/net/router with the folowing 54 * Router creates its own directory /proc/net/router with the following
55 * entries: 55 * entries:
56 * config device configuration 56 * config device configuration
57 * status global device statistics 57 * status global device statistics
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 2714379ce2d6..58d69959ab28 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -812,7 +812,7 @@ static void handle_channel(struct wiphy *wiphy,
812 if (r) { 812 if (r) {
813 /* 813 /*
814 * We will disable all channels that do not match our 814 * We will disable all channels that do not match our
815 * recieved regulatory rule unless the hint is coming 815 * received regulatory rule unless the hint is coming
816 * from a Country IE and the Country IE had no information 816 * from a Country IE and the Country IE had no information
817 * about a band. The IEEE 802.11 spec allows for an AP 817 * about a band. The IEEE 802.11 spec allows for an AP
818 * to send only a subset of the regulatory rules allowed, 818 * to send only a subset of the regulatory rules allowed,
@@ -841,7 +841,7 @@ static void handle_channel(struct wiphy *wiphy,
841 request_wiphy && request_wiphy == wiphy && 841 request_wiphy && request_wiphy == wiphy &&
842 request_wiphy->flags & WIPHY_FLAG_STRICT_REGULATORY) { 842 request_wiphy->flags & WIPHY_FLAG_STRICT_REGULATORY) {
843 /* 843 /*
844 * This gaurantees the driver's requested regulatory domain 844 * This guarantees the driver's requested regulatory domain
845 * will always be used as a base for further regulatory 845 * will always be used as a base for further regulatory
846 * settings 846 * settings
847 */ 847 */
diff --git a/net/x25/x25_facilities.c b/net/x25/x25_facilities.c
index 406207515b5e..f77e4e75f914 100644
--- a/net/x25/x25_facilities.c
+++ b/net/x25/x25_facilities.c
@@ -31,7 +31,7 @@
31 * x25_parse_facilities - Parse facilities from skb into the facilities structs 31 * x25_parse_facilities - Parse facilities from skb into the facilities structs
32 * 32 *
33 * @skb: sk_buff to parse 33 * @skb: sk_buff to parse
34 * @facilities: Regular facilites, updated as facilities are found 34 * @facilities: Regular facilities, updated as facilities are found
35 * @dte_facs: ITU DTE facilities, updated as DTE facilities are found 35 * @dte_facs: ITU DTE facilities, updated as DTE facilities are found
36 * @vc_fac_mask: mask is updated with all facilities found 36 * @vc_fac_mask: mask is updated with all facilities found
37 * 37 *
diff --git a/net/x25/x25_forward.c b/net/x25/x25_forward.c
index 25a810793968..c541b622ae16 100644
--- a/net/x25/x25_forward.c
+++ b/net/x25/x25_forward.c
@@ -31,7 +31,7 @@ int x25_forward_call(struct x25_address *dest_addr, struct x25_neigh *from,
31 goto out_no_route; 31 goto out_no_route;
32 32
33 if ((neigh_new = x25_get_neigh(rt->dev)) == NULL) { 33 if ((neigh_new = x25_get_neigh(rt->dev)) == NULL) {
34 /* This shouldnt happen, if it occurs somehow 34 /* This shouldn't happen, if it occurs somehow
35 * do something sensible 35 * do something sensible
36 */ 36 */
37 goto out_put_route; 37 goto out_put_route;
@@ -45,7 +45,7 @@ int x25_forward_call(struct x25_address *dest_addr, struct x25_neigh *from,
45 } 45 }
46 46
47 /* Remote end sending a call request on an already 47 /* Remote end sending a call request on an already
48 * established LCI? It shouldnt happen, just in case.. 48 * established LCI? It shouldn't happen, just in case..
49 */ 49 */
50 read_lock_bh(&x25_forward_list_lock); 50 read_lock_bh(&x25_forward_list_lock);
51 list_for_each(entry, &x25_forward_list) { 51 list_for_each(entry, &x25_forward_list) {
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index dd78536d40de..d70f85eb7864 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -1036,15 +1036,15 @@ static struct xfrm_state *__find_acq_core(struct net *net, struct xfrm_mark *m,
1036 1036
1037 case AF_INET6: 1037 case AF_INET6:
1038 ipv6_addr_copy((struct in6_addr *)x->sel.daddr.a6, 1038 ipv6_addr_copy((struct in6_addr *)x->sel.daddr.a6,
1039 (struct in6_addr *)daddr); 1039 (const struct in6_addr *)daddr);
1040 ipv6_addr_copy((struct in6_addr *)x->sel.saddr.a6, 1040 ipv6_addr_copy((struct in6_addr *)x->sel.saddr.a6,
1041 (struct in6_addr *)saddr); 1041 (const struct in6_addr *)saddr);
1042 x->sel.prefixlen_d = 128; 1042 x->sel.prefixlen_d = 128;
1043 x->sel.prefixlen_s = 128; 1043 x->sel.prefixlen_s = 128;
1044 ipv6_addr_copy((struct in6_addr *)x->props.saddr.a6, 1044 ipv6_addr_copy((struct in6_addr *)x->props.saddr.a6,
1045 (struct in6_addr *)saddr); 1045 (const struct in6_addr *)saddr);
1046 ipv6_addr_copy((struct in6_addr *)x->id.daddr.a6, 1046 ipv6_addr_copy((struct in6_addr *)x->id.daddr.a6,
1047 (struct in6_addr *)daddr); 1047 (const struct in6_addr *)daddr);
1048 break; 1048 break;
1049 } 1049 }
1050 1050
@@ -2092,8 +2092,8 @@ static void xfrm_audit_helper_sainfo(struct xfrm_state *x,
2092static void xfrm_audit_helper_pktinfo(struct sk_buff *skb, u16 family, 2092static void xfrm_audit_helper_pktinfo(struct sk_buff *skb, u16 family,
2093 struct audit_buffer *audit_buf) 2093 struct audit_buffer *audit_buf)
2094{ 2094{
2095 struct iphdr *iph4; 2095 const struct iphdr *iph4;
2096 struct ipv6hdr *iph6; 2096 const struct ipv6hdr *iph6;
2097 2097
2098 switch (family) { 2098 switch (family) {
2099 case AF_INET: 2099 case AF_INET:
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 3d15d3e1b2c4..5d1d60d3ca83 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -894,7 +894,7 @@ static int build_spdinfo(struct sk_buff *skb, struct net *net,
894 u32 *f; 894 u32 *f;
895 895
896 nlh = nlmsg_put(skb, pid, seq, XFRM_MSG_NEWSPDINFO, sizeof(u32), 0); 896 nlh = nlmsg_put(skb, pid, seq, XFRM_MSG_NEWSPDINFO, sizeof(u32), 0);
897 if (nlh == NULL) /* shouldnt really happen ... */ 897 if (nlh == NULL) /* shouldn't really happen ... */
898 return -EMSGSIZE; 898 return -EMSGSIZE;
899 899
900 f = nlmsg_data(nlh); 900 f = nlmsg_data(nlh);
@@ -954,7 +954,7 @@ static int build_sadinfo(struct sk_buff *skb, struct net *net,
954 u32 *f; 954 u32 *f;
955 955
956 nlh = nlmsg_put(skb, pid, seq, XFRM_MSG_NEWSADINFO, sizeof(u32), 0); 956 nlh = nlmsg_put(skb, pid, seq, XFRM_MSG_NEWSADINFO, sizeof(u32), 0);
957 if (nlh == NULL) /* shouldnt really happen ... */ 957 if (nlh == NULL) /* shouldn't really happen ... */
958 return -EMSGSIZE; 958 return -EMSGSIZE;
959 959
960 f = nlmsg_data(nlh); 960 f = nlmsg_data(nlh);
@@ -1361,7 +1361,7 @@ static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
1361 if (!xp) 1361 if (!xp)
1362 return err; 1362 return err;
1363 1363
1364 /* shouldnt excl be based on nlh flags?? 1364 /* shouldn't excl be based on nlh flags??
1365 * Aha! this is anti-netlink really i.e more pfkey derived 1365 * Aha! this is anti-netlink really i.e more pfkey derived
1366 * in netlink excl is a flag and you wouldnt need 1366 * in netlink excl is a flag and you wouldnt need
1367 * a type XFRM_MSG_UPDPOLICY - JHS */ 1367 * a type XFRM_MSG_UPDPOLICY - JHS */